@@ -149,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q)
149149 wake_up_process (mq -> thread );
150150}
151151
152- static struct scatterlist * mmc_alloc_sg (int sg_len , int * err )
152+ static struct scatterlist * mmc_alloc_sg (int sg_len )
153153{
154154 struct scatterlist * sg ;
155155
156156 sg = kmalloc_array (sg_len , sizeof (* sg ), GFP_KERNEL );
157- if (!sg )
158- * err = - ENOMEM ;
159- else {
160- * err = 0 ;
157+ if (sg )
161158 sg_init_table (sg , sg_len );
162- }
163159
164160 return sg ;
165161}
@@ -185,6 +181,32 @@ static void mmc_queue_setup_discard(struct request_queue *q,
185181 queue_flag_set_unlocked (QUEUE_FLAG_SECERASE , q );
186182}
187183
184+ static void mmc_queue_req_free_bufs (struct mmc_queue_req * mqrq )
185+ {
186+ kfree (mqrq -> bounce_sg );
187+ mqrq -> bounce_sg = NULL ;
188+
189+ kfree (mqrq -> sg );
190+ mqrq -> sg = NULL ;
191+
192+ kfree (mqrq -> bounce_buf );
193+ mqrq -> bounce_buf = NULL ;
194+ }
195+
196+ static void mmc_queue_reqs_free_bufs (struct mmc_queue_req * mqrq , int qdepth )
197+ {
198+ int i ;
199+
200+ for (i = 0 ; i < qdepth ; i ++ )
201+ mmc_queue_req_free_bufs (& mqrq [i ]);
202+ }
203+
204+ static void mmc_queue_free_mqrqs (struct mmc_queue_req * mqrq , int qdepth )
205+ {
206+ mmc_queue_reqs_free_bufs (mqrq , qdepth );
207+ kfree (mqrq );
208+ }
209+
188210static struct mmc_queue_req * mmc_queue_alloc_mqrqs (int qdepth )
189211{
190212 struct mmc_queue_req * mqrq ;
@@ -200,79 +222,137 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
200222}
201223
202224#ifdef CONFIG_MMC_BLOCK_BOUNCE
203- static bool mmc_queue_alloc_bounce_bufs (struct mmc_queue * mq ,
204- unsigned int bouncesz )
225+ static int mmc_queue_alloc_bounce_bufs (struct mmc_queue_req * mqrq , int qdepth ,
226+ unsigned int bouncesz )
205227{
206228 int i ;
207229
208- for (i = 0 ; i < mq -> qdepth ; i ++ ) {
209- mq -> mqrq [i ].bounce_buf = kmalloc (bouncesz , GFP_KERNEL );
210- if (!mq -> mqrq [i ].bounce_buf )
211- goto out_err ;
212- }
230+ for (i = 0 ; i < qdepth ; i ++ ) {
231+ mqrq [i ].bounce_buf = kmalloc (bouncesz , GFP_KERNEL );
232+ if (!mqrq [i ].bounce_buf )
233+ return - ENOMEM ;
213234
214- return true;
235+ mqrq [i ].sg = mmc_alloc_sg (1 );
236+ if (!mqrq [i ].sg )
237+ return - ENOMEM ;
215238
216- out_err :
217- while (-- i >= 0 ) {
218- kfree (mq -> mqrq [i ].bounce_buf );
219- mq -> mqrq [i ].bounce_buf = NULL ;
239+ mqrq [i ].bounce_sg = mmc_alloc_sg (bouncesz / 512 );
240+ if (!mqrq [i ].bounce_sg )
241+ return - ENOMEM ;
220242 }
221- pr_warn ("%s: unable to allocate bounce buffers\n" ,
222- mmc_card_name (mq -> card ));
223- return false;
243+
244+ return 0 ;
224245}
225246
226- static int mmc_queue_alloc_bounce_sgs (struct mmc_queue * mq ,
227- unsigned int bouncesz )
247+ static bool mmc_queue_alloc_bounce (struct mmc_queue_req * mqrq , int qdepth ,
248+ unsigned int bouncesz )
228249{
229- int i , ret ;
250+ int ret ;
230251
231- for (i = 0 ; i < mq -> qdepth ; i ++ ) {
232- mq -> mqrq [i ].sg = mmc_alloc_sg (1 , & ret );
233- if (ret )
234- return ret ;
252+ ret = mmc_queue_alloc_bounce_bufs (mqrq , qdepth , bouncesz );
253+ if (ret )
254+ mmc_queue_reqs_free_bufs (mqrq , qdepth );
235255
236- mq -> mqrq [i ].bounce_sg = mmc_alloc_sg (bouncesz / 512 , & ret );
237- if (ret )
238- return ret ;
239- }
256+ return !ret ;
257+ }
258+
259+ static unsigned int mmc_queue_calc_bouncesz (struct mmc_host * host )
260+ {
261+ unsigned int bouncesz = MMC_QUEUE_BOUNCESZ ;
262+
263+ if (host -> max_segs != 1 )
264+ return 0 ;
265+
266+ if (bouncesz > host -> max_req_size )
267+ bouncesz = host -> max_req_size ;
268+ if (bouncesz > host -> max_seg_size )
269+ bouncesz = host -> max_seg_size ;
270+ if (bouncesz > host -> max_blk_count * 512 )
271+ bouncesz = host -> max_blk_count * 512 ;
272+
273+ if (bouncesz <= 512 )
274+ return 0 ;
275+
276+ return bouncesz ;
277+ }
278+ #else
279+ static inline bool mmc_queue_alloc_bounce (struct mmc_queue_req * mqrq ,
280+ int qdepth , unsigned int bouncesz )
281+ {
282+ return false;
283+ }
240284
285+ static unsigned int mmc_queue_calc_bouncesz (struct mmc_host * host )
286+ {
241287 return 0 ;
242288}
243289#endif
244290
245- static int mmc_queue_alloc_sgs (struct mmc_queue * mq , int max_segs )
291+ static int mmc_queue_alloc_sgs (struct mmc_queue_req * mqrq , int qdepth ,
292+ int max_segs )
246293{
247- int i , ret ;
294+ int i ;
248295
249- for (i = 0 ; i < mq -> qdepth ; i ++ ) {
250- mq -> mqrq [i ].sg = mmc_alloc_sg (max_segs , & ret );
251- if (ret )
252- return ret ;
296+ for (i = 0 ; i < qdepth ; i ++ ) {
297+ mqrq [i ].sg = mmc_alloc_sg (max_segs );
298+ if (! mqrq [ i ]. sg )
299+ return - ENOMEM ;
253300 }
254301
255302 return 0 ;
256303}
257304
258- static void mmc_queue_req_free_bufs (struct mmc_queue_req * mqrq )
305+ void mmc_queue_free_shared_queue (struct mmc_card * card )
259306{
260- kfree (mqrq -> bounce_sg );
261- mqrq -> bounce_sg = NULL ;
307+ if (card -> mqrq ) {
308+ mmc_queue_free_mqrqs (card -> mqrq , card -> qdepth );
309+ card -> mqrq = NULL ;
310+ }
311+ }
262312
263- kfree (mqrq -> sg );
264- mqrq -> sg = NULL ;
313+ static int __mmc_queue_alloc_shared_queue (struct mmc_card * card , int qdepth )
314+ {
315+ struct mmc_host * host = card -> host ;
316+ struct mmc_queue_req * mqrq ;
317+ unsigned int bouncesz ;
318+ int ret = 0 ;
265319
266- kfree (mqrq -> bounce_buf );
267- mqrq -> bounce_buf = NULL ;
320+ if (card -> mqrq )
321+ return - EINVAL ;
322+
323+ mqrq = mmc_queue_alloc_mqrqs (qdepth );
324+ if (!mqrq )
325+ return - ENOMEM ;
326+
327+ card -> mqrq = mqrq ;
328+ card -> qdepth = qdepth ;
329+
330+ bouncesz = mmc_queue_calc_bouncesz (host );
331+
332+ if (bouncesz && !mmc_queue_alloc_bounce (mqrq , qdepth , bouncesz )) {
333+ bouncesz = 0 ;
334+ pr_warn ("%s: unable to allocate bounce buffers\n" ,
335+ mmc_card_name (card ));
336+ }
337+
338+ card -> bouncesz = bouncesz ;
339+
340+ if (!bouncesz ) {
341+ ret = mmc_queue_alloc_sgs (mqrq , qdepth , host -> max_segs );
342+ if (ret )
343+ goto out_err ;
344+ }
345+
346+ return ret ;
347+
348+ out_err :
349+ mmc_queue_free_shared_queue (card );
350+ return ret ;
268351}
269352
270- static void mmc_queue_reqs_free_bufs (struct mmc_queue * mq )
353+ int mmc_queue_alloc_shared_queue (struct mmc_card * card )
271354{
272- int i ;
273-
274- for (i = 0 ; i < mq -> qdepth ; i ++ )
275- mmc_queue_req_free_bufs (& mq -> mqrq [i ]);
355+ return __mmc_queue_alloc_shared_queue (card , 2 );
276356}
277357
278358/**
@@ -289,7 +369,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
289369{
290370 struct mmc_host * host = card -> host ;
291371 u64 limit = BLK_BOUNCE_HIGH ;
292- bool bounce = false;
293372 int ret = - ENOMEM ;
294373
295374 if (mmc_dev (host )-> dma_mask && * mmc_dev (host )-> dma_mask )
@@ -300,10 +379,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
300379 if (!mq -> queue )
301380 return - ENOMEM ;
302381
303- mq -> qdepth = 2 ;
304- mq -> mqrq = mmc_queue_alloc_mqrqs (mq -> qdepth );
305- if (!mq -> mqrq )
306- goto blk_cleanup ;
382+ mq -> mqrq = card -> mqrq ;
383+ mq -> qdepth = card -> qdepth ;
307384 mq -> queue -> queuedata = mq ;
308385
309386 blk_queue_prep_rq (mq -> queue , mmc_prep_request );
@@ -312,44 +389,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
312389 if (mmc_can_erase (card ))
313390 mmc_queue_setup_discard (mq -> queue , card );
314391
315- #ifdef CONFIG_MMC_BLOCK_BOUNCE
316- if (host -> max_segs == 1 ) {
317- unsigned int bouncesz ;
318-
319- bouncesz = MMC_QUEUE_BOUNCESZ ;
320-
321- if (bouncesz > host -> max_req_size )
322- bouncesz = host -> max_req_size ;
323- if (bouncesz > host -> max_seg_size )
324- bouncesz = host -> max_seg_size ;
325- if (bouncesz > (host -> max_blk_count * 512 ))
326- bouncesz = host -> max_blk_count * 512 ;
327-
328- if (bouncesz > 512 &&
329- mmc_queue_alloc_bounce_bufs (mq , bouncesz )) {
330- blk_queue_bounce_limit (mq -> queue , BLK_BOUNCE_ANY );
331- blk_queue_max_hw_sectors (mq -> queue , bouncesz / 512 );
332- blk_queue_max_segments (mq -> queue , bouncesz / 512 );
333- blk_queue_max_segment_size (mq -> queue , bouncesz );
334-
335- ret = mmc_queue_alloc_bounce_sgs (mq , bouncesz );
336- if (ret )
337- goto cleanup_queue ;
338- bounce = true;
339- }
340- }
341- #endif
342-
343- if (!bounce ) {
392+ if (card -> bouncesz ) {
393+ blk_queue_bounce_limit (mq -> queue , BLK_BOUNCE_ANY );
394+ blk_queue_max_hw_sectors (mq -> queue , card -> bouncesz / 512 );
395+ blk_queue_max_segments (mq -> queue , card -> bouncesz / 512 );
396+ blk_queue_max_segment_size (mq -> queue , card -> bouncesz );
397+ } else {
344398 blk_queue_bounce_limit (mq -> queue , limit );
345399 blk_queue_max_hw_sectors (mq -> queue ,
346400 min (host -> max_blk_count , host -> max_req_size / 512 ));
347401 blk_queue_max_segments (mq -> queue , host -> max_segs );
348402 blk_queue_max_segment_size (mq -> queue , host -> max_seg_size );
349-
350- ret = mmc_queue_alloc_sgs (mq , host -> max_segs );
351- if (ret )
352- goto cleanup_queue ;
353403 }
354404
355405 sema_init (& mq -> thread_sem , 1 );
@@ -364,11 +414,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
364414
365415 return 0 ;
366416
367- cleanup_queue :
368- mmc_queue_reqs_free_bufs (mq );
369- kfree (mq -> mqrq );
417+ cleanup_queue :
370418 mq -> mqrq = NULL ;
371- blk_cleanup :
372419 blk_cleanup_queue (mq -> queue );
373420 return ret ;
374421}
@@ -390,10 +437,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
390437 blk_start_queue (q );
391438 spin_unlock_irqrestore (q -> queue_lock , flags );
392439
393- mmc_queue_reqs_free_bufs (mq );
394- kfree (mq -> mqrq );
395440 mq -> mqrq = NULL ;
396-
397441 mq -> card = NULL ;
398442}
399443EXPORT_SYMBOL (mmc_cleanup_queue );
0 commit comments