1 /*
2  *  linux/drivers/mmc/card/queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *  Copyright 2006-2007 Pierre Ossman
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
21 #include "queue.h"
22 
23 #define MMC_QUEUE_BOUNCESZ	65536
24 
25 #define MMC_QUEUE_SUSPENDED	(1 << 0)
26 
27 /*
28  * Prepare a MMC request. This just filters out odd stuff.
29  */
mmc_prep_request(struct request_queue * q,struct request * req)30 static int mmc_prep_request(struct request_queue *q, struct request *req)
31 {
32 	struct mmc_queue *mq = q->queuedata;
33 
34 	/*
35 	 * We only like normal block requests and discards.
36 	 */
37 	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
38 		blk_dump_rq_flags(req, "MMC bad request");
39 		return BLKPREP_KILL;
40 	}
41 
42 	if (mq && mmc_card_removed(mq->card))
43 		return BLKPREP_KILL;
44 
45 	req->cmd_flags |= REQ_DONTPREP;
46 
47 	return BLKPREP_OK;
48 }
49 
mmc_queue_thread(void * d)50 static int mmc_queue_thread(void *d)
51 {
52 	struct mmc_queue *mq = d;
53 	struct request_queue *q = mq->queue;
54 
55 	current->flags |= PF_MEMALLOC;
56 
57 	down(&mq->thread_sem);
58 	do {
59 		struct request *req = NULL;
60 		struct mmc_queue_req *tmp;
61 
62 		spin_lock_irq(q->queue_lock);
63 		set_current_state(TASK_INTERRUPTIBLE);
64 		req = blk_fetch_request(q);
65 		mq->mqrq_cur->req = req;
66 		spin_unlock_irq(q->queue_lock);
67 
68 		if (req || mq->mqrq_prev->req) {
69 			set_current_state(TASK_RUNNING);
70 			mq->issue_fn(mq, req);
71 		} else {
72 			if (kthread_should_stop()) {
73 				set_current_state(TASK_RUNNING);
74 				break;
75 			}
76 			up(&mq->thread_sem);
77 			schedule();
78 			down(&mq->thread_sem);
79 		}
80 
81 		/* Current request becomes previous request and vice versa. */
82 		mq->mqrq_prev->brq.mrq.data = NULL;
83 		mq->mqrq_prev->req = NULL;
84 		tmp = mq->mqrq_prev;
85 		mq->mqrq_prev = mq->mqrq_cur;
86 		mq->mqrq_cur = tmp;
87 	} while (1);
88 	up(&mq->thread_sem);
89 
90 	return 0;
91 }
92 
93 /*
94  * Generic MMC request handler.  This is called for any queue on a
95  * particular host.  When the host is not busy, we look for a request
96  * on any queue on this host, and attempt to issue it.  This may
97  * not be the queue we were asked to process.
98  */
mmc_request(struct request_queue * q)99 static void mmc_request(struct request_queue *q)
100 {
101 	struct mmc_queue *mq = q->queuedata;
102 	struct request *req;
103 
104 	if (!mq) {
105 		while ((req = blk_fetch_request(q)) != NULL) {
106 			req->cmd_flags |= REQ_QUIET;
107 			__blk_end_request_all(req, -EIO);
108 		}
109 		return;
110 	}
111 
112 	if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
113 		wake_up_process(mq->thread);
114 }
115 
mmc_alloc_sg(int sg_len,int * err)116 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
117 {
118 	struct scatterlist *sg;
119 
120 	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
121 	if (!sg)
122 		*err = -ENOMEM;
123 	else {
124 		*err = 0;
125 		sg_init_table(sg, sg_len);
126 	}
127 
128 	return sg;
129 }
130 
mmc_queue_setup_discard(struct request_queue * q,struct mmc_card * card)131 static void mmc_queue_setup_discard(struct request_queue *q,
132 				    struct mmc_card *card)
133 {
134 	unsigned max_discard;
135 
136 	max_discard = mmc_calc_max_discard(card);
137 	if (!max_discard)
138 		return;
139 
140 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
141 	q->limits.max_discard_sectors = max_discard;
142 	if (card->erased_byte == 0)
143 		q->limits.discard_zeroes_data = 1;
144 	q->limits.discard_granularity = card->pref_erase << 9;
145 	/* granularity must not be greater than max. discard */
146 	if (card->pref_erase > max_discard)
147 		q->limits.discard_granularity = 0;
148 	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
149 		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
150 }
151 
152 /**
153  * mmc_init_queue - initialise a queue structure.
154  * @mq: mmc queue
155  * @card: mmc card to attach this queue
156  * @lock: queue lock
157  * @subname: partition subname
158  *
159  * Initialise a MMC card request queue.
160  */
mmc_init_queue(struct mmc_queue * mq,struct mmc_card * card,spinlock_t * lock,const char * subname)161 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
162 		   spinlock_t *lock, const char *subname)
163 {
164 	struct mmc_host *host = card->host;
165 	u64 limit = BLK_BOUNCE_HIGH;
166 	int ret;
167 	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
168 	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
169 
170 	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
171 		limit = *mmc_dev(host)->dma_mask;
172 
173 	mq->card = card;
174 	mq->queue = blk_init_queue(mmc_request, lock);
175 	if (!mq->queue)
176 		return -ENOMEM;
177 
178 	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
179 	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
180 	mq->mqrq_cur = mqrq_cur;
181 	mq->mqrq_prev = mqrq_prev;
182 	mq->queue->queuedata = mq;
183 
184 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
185 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
186 	if (mmc_can_erase(card))
187 		mmc_queue_setup_discard(mq->queue, card);
188 
189 #ifdef CONFIG_MMC_BLOCK_BOUNCE
190 	if (host->max_segs == 1) {
191 		unsigned int bouncesz;
192 
193 		bouncesz = MMC_QUEUE_BOUNCESZ;
194 
195 		if (bouncesz > host->max_req_size)
196 			bouncesz = host->max_req_size;
197 		if (bouncesz > host->max_seg_size)
198 			bouncesz = host->max_seg_size;
199 		if (bouncesz > (host->max_blk_count * 512))
200 			bouncesz = host->max_blk_count * 512;
201 
202 		if (bouncesz > 512) {
203 			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
204 			if (!mqrq_cur->bounce_buf) {
205 				pr_warning("%s: unable to "
206 					"allocate bounce cur buffer\n",
207 					mmc_card_name(card));
208 			}
209 			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
210 			if (!mqrq_prev->bounce_buf) {
211 				pr_warning("%s: unable to "
212 					"allocate bounce prev buffer\n",
213 					mmc_card_name(card));
214 				kfree(mqrq_cur->bounce_buf);
215 				mqrq_cur->bounce_buf = NULL;
216 			}
217 		}
218 
219 		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
220 			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
221 			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
222 			blk_queue_max_segments(mq->queue, bouncesz / 512);
223 			blk_queue_max_segment_size(mq->queue, bouncesz);
224 
225 			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
226 			if (ret)
227 				goto cleanup_queue;
228 
229 			mqrq_cur->bounce_sg =
230 				mmc_alloc_sg(bouncesz / 512, &ret);
231 			if (ret)
232 				goto cleanup_queue;
233 
234 			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
235 			if (ret)
236 				goto cleanup_queue;
237 
238 			mqrq_prev->bounce_sg =
239 				mmc_alloc_sg(bouncesz / 512, &ret);
240 			if (ret)
241 				goto cleanup_queue;
242 		}
243 	}
244 #endif
245 
246 	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
247 		blk_queue_bounce_limit(mq->queue, limit);
248 		blk_queue_max_hw_sectors(mq->queue,
249 			min(host->max_blk_count, host->max_req_size / 512));
250 		blk_queue_max_segments(mq->queue, host->max_segs);
251 		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
252 
253 		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
254 		if (ret)
255 			goto cleanup_queue;
256 
257 
258 		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
259 		if (ret)
260 			goto cleanup_queue;
261 	}
262 
263 	sema_init(&mq->thread_sem, 1);
264 
265 	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
266 		host->index, subname ? subname : "");
267 
268 	if (IS_ERR(mq->thread)) {
269 		ret = PTR_ERR(mq->thread);
270 		goto free_bounce_sg;
271 	}
272 
273 	return 0;
274  free_bounce_sg:
275 	kfree(mqrq_cur->bounce_sg);
276 	mqrq_cur->bounce_sg = NULL;
277 	kfree(mqrq_prev->bounce_sg);
278 	mqrq_prev->bounce_sg = NULL;
279 
280  cleanup_queue:
281 	kfree(mqrq_cur->sg);
282 	mqrq_cur->sg = NULL;
283 	kfree(mqrq_cur->bounce_buf);
284 	mqrq_cur->bounce_buf = NULL;
285 
286 	kfree(mqrq_prev->sg);
287 	mqrq_prev->sg = NULL;
288 	kfree(mqrq_prev->bounce_buf);
289 	mqrq_prev->bounce_buf = NULL;
290 
291 	blk_cleanup_queue(mq->queue);
292 	return ret;
293 }
294 
mmc_cleanup_queue(struct mmc_queue * mq)295 void mmc_cleanup_queue(struct mmc_queue *mq)
296 {
297 	struct request_queue *q = mq->queue;
298 	unsigned long flags;
299 	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
300 	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
301 
302 	/* Make sure the queue isn't suspended, as that will deadlock */
303 	mmc_queue_resume(mq);
304 
305 	/* Then terminate our worker thread */
306 	kthread_stop(mq->thread);
307 
308 	/* Empty the queue */
309 	spin_lock_irqsave(q->queue_lock, flags);
310 	q->queuedata = NULL;
311 	blk_start_queue(q);
312 	spin_unlock_irqrestore(q->queue_lock, flags);
313 
314 	kfree(mqrq_cur->bounce_sg);
315 	mqrq_cur->bounce_sg = NULL;
316 
317 	kfree(mqrq_cur->sg);
318 	mqrq_cur->sg = NULL;
319 
320 	kfree(mqrq_cur->bounce_buf);
321 	mqrq_cur->bounce_buf = NULL;
322 
323 	kfree(mqrq_prev->bounce_sg);
324 	mqrq_prev->bounce_sg = NULL;
325 
326 	kfree(mqrq_prev->sg);
327 	mqrq_prev->sg = NULL;
328 
329 	kfree(mqrq_prev->bounce_buf);
330 	mqrq_prev->bounce_buf = NULL;
331 
332 	mq->card = NULL;
333 }
334 EXPORT_SYMBOL(mmc_cleanup_queue);
335 
336 /**
337  * mmc_queue_suspend - suspend a MMC request queue
338  * @mq: MMC queue to suspend
339  *
340  * Stop the block request queue, and wait for our thread to
341  * complete any outstanding requests.  This ensures that we
342  * won't suspend while a request is being processed.
343  */
mmc_queue_suspend(struct mmc_queue * mq)344 void mmc_queue_suspend(struct mmc_queue *mq)
345 {
346 	struct request_queue *q = mq->queue;
347 	unsigned long flags;
348 
349 	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
350 		mq->flags |= MMC_QUEUE_SUSPENDED;
351 
352 		spin_lock_irqsave(q->queue_lock, flags);
353 		blk_stop_queue(q);
354 		spin_unlock_irqrestore(q->queue_lock, flags);
355 
356 		down(&mq->thread_sem);
357 	}
358 }
359 
360 /**
361  * mmc_queue_resume - resume a previously suspended MMC request queue
362  * @mq: MMC queue to resume
363  */
mmc_queue_resume(struct mmc_queue * mq)364 void mmc_queue_resume(struct mmc_queue *mq)
365 {
366 	struct request_queue *q = mq->queue;
367 	unsigned long flags;
368 
369 	if (mq->flags & MMC_QUEUE_SUSPENDED) {
370 		mq->flags &= ~MMC_QUEUE_SUSPENDED;
371 
372 		up(&mq->thread_sem);
373 
374 		spin_lock_irqsave(q->queue_lock, flags);
375 		blk_start_queue(q);
376 		spin_unlock_irqrestore(q->queue_lock, flags);
377 	}
378 }
379 
380 /*
381  * Prepare the sg list(s) to be handed of to the host driver
382  */
mmc_queue_map_sg(struct mmc_queue * mq,struct mmc_queue_req * mqrq)383 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
384 {
385 	unsigned int sg_len;
386 	size_t buflen;
387 	struct scatterlist *sg;
388 	int i;
389 
390 	if (!mqrq->bounce_buf)
391 		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
392 
393 	BUG_ON(!mqrq->bounce_sg);
394 
395 	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
396 
397 	mqrq->bounce_sg_len = sg_len;
398 
399 	buflen = 0;
400 	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
401 		buflen += sg->length;
402 
403 	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
404 
405 	return 1;
406 }
407 
408 /*
409  * If writing, bounce the data to the buffer before the request
410  * is sent to the host driver
411  */
mmc_queue_bounce_pre(struct mmc_queue_req * mqrq)412 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
413 {
414 	if (!mqrq->bounce_buf)
415 		return;
416 
417 	if (rq_data_dir(mqrq->req) != WRITE)
418 		return;
419 
420 	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
421 		mqrq->bounce_buf, mqrq->sg[0].length);
422 }
423 
424 /*
425  * If reading, bounce the data from the buffer after the request
426  * has been handled by the host driver
427  */
mmc_queue_bounce_post(struct mmc_queue_req * mqrq)428 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
429 {
430 	if (!mqrq->bounce_buf)
431 		return;
432 
433 	if (rq_data_dir(mqrq->req) != READ)
434 		return;
435 
436 	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
437 		mqrq->bounce_buf, mqrq->sg[0].length);
438 }
439