1 /*
2  * Functions related to tagged command queuing
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/slab.h>
9 
10 #include "blk.h"
11 
12 /**
13  * blk_queue_find_tag - find a request by its tag and queue
14  * @q:	 The request queue for the device
15  * @tag: The tag of the request
16  *
17  * Notes:
18  *    Should be used when a device returns a tag and you want to match
19  *    it with a request.
20  *
21  *    no locks need be held.
22  **/
blk_queue_find_tag(struct request_queue * q,int tag)23 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24 {
25 	return blk_map_queue_find_tag(q->queue_tags, tag);
26 }
27 EXPORT_SYMBOL(blk_queue_find_tag);
28 
29 /**
30  * __blk_free_tags - release a given set of tag maintenance info
31  * @bqt:	the tag map to free
32  *
33  * Tries to free the specified @bqt.  Returns true if it was
34  * actually freed and false if there are still references using it
35  */
__blk_free_tags(struct blk_queue_tag * bqt)36 static int __blk_free_tags(struct blk_queue_tag *bqt)
37 {
38 	int retval;
39 
40 	retval = atomic_dec_and_test(&bqt->refcnt);
41 	if (retval) {
42 		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 							bqt->max_depth);
44 
45 		kfree(bqt->tag_index);
46 		bqt->tag_index = NULL;
47 
48 		kfree(bqt->tag_map);
49 		bqt->tag_map = NULL;
50 
51 		kfree(bqt);
52 	}
53 
54 	return retval;
55 }
56 
57 /**
58  * __blk_queue_free_tags - release tag maintenance info
59  * @q:  the request queue for the device
60  *
61  *  Notes:
62  *    blk_cleanup_queue() will take care of calling this function, if tagging
63  *    has been used. So there's no need to call this directly.
64  **/
__blk_queue_free_tags(struct request_queue * q)65 void __blk_queue_free_tags(struct request_queue *q)
66 {
67 	struct blk_queue_tag *bqt = q->queue_tags;
68 
69 	if (!bqt)
70 		return;
71 
72 	__blk_free_tags(bqt);
73 
74 	q->queue_tags = NULL;
75 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76 }
77 
78 /**
79  * blk_free_tags - release a given set of tag maintenance info
80  * @bqt:	the tag map to free
81  *
82  * For externally managed @bqt frees the map.  Callers of this
83  * function must guarantee to have released all the queues that
84  * might have been using this tag map.
85  */
blk_free_tags(struct blk_queue_tag * bqt)86 void blk_free_tags(struct blk_queue_tag *bqt)
87 {
88 	if (unlikely(!__blk_free_tags(bqt)))
89 		BUG();
90 }
91 EXPORT_SYMBOL(blk_free_tags);
92 
93 /**
94  * blk_queue_free_tags - release tag maintenance info
95  * @q:  the request queue for the device
96  *
97  *  Notes:
98  *	This is used to disable tagged queuing to a device, yet leave
99  *	queue in function.
100  **/
blk_queue_free_tags(struct request_queue * q)101 void blk_queue_free_tags(struct request_queue *q)
102 {
103 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104 }
105 EXPORT_SYMBOL(blk_queue_free_tags);
106 
107 static int
init_tag_map(struct request_queue * q,struct blk_queue_tag * tags,int depth)108 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109 {
110 	struct request **tag_index;
111 	unsigned long *tag_map;
112 	int nr_ulongs;
113 
114 	if (q && depth > q->nr_requests * 2) {
115 		depth = q->nr_requests * 2;
116 		printk(KERN_ERR "%s: adjusted depth to %d\n",
117 		       __func__, depth);
118 	}
119 
120 	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121 	if (!tag_index)
122 		goto fail;
123 
124 	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125 	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126 	if (!tag_map)
127 		goto fail;
128 
129 	tags->real_max_depth = depth;
130 	tags->max_depth = depth;
131 	tags->tag_index = tag_index;
132 	tags->tag_map = tag_map;
133 
134 	return 0;
135 fail:
136 	kfree(tag_index);
137 	return -ENOMEM;
138 }
139 
__blk_queue_init_tags(struct request_queue * q,int depth)140 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141 						   int depth)
142 {
143 	struct blk_queue_tag *tags;
144 
145 	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146 	if (!tags)
147 		goto fail;
148 
149 	if (init_tag_map(q, tags, depth))
150 		goto fail;
151 
152 	atomic_set(&tags->refcnt, 1);
153 	return tags;
154 fail:
155 	kfree(tags);
156 	return NULL;
157 }
158 
159 /**
160  * blk_init_tags - initialize the tag info for an external tag map
161  * @depth:	the maximum queue depth supported
162  **/
blk_init_tags(int depth)163 struct blk_queue_tag *blk_init_tags(int depth)
164 {
165 	return __blk_queue_init_tags(NULL, depth);
166 }
167 EXPORT_SYMBOL(blk_init_tags);
168 
169 /**
170  * blk_queue_init_tags - initialize the queue tag info
171  * @q:  the request queue for the device
172  * @depth:  the maximum queue depth supported
173  * @tags: the tag to use
174  *
175  * Queue lock must be held here if the function is called to resize an
176  * existing map.
177  **/
blk_queue_init_tags(struct request_queue * q,int depth,struct blk_queue_tag * tags)178 int blk_queue_init_tags(struct request_queue *q, int depth,
179 			struct blk_queue_tag *tags)
180 {
181 	int rc;
182 
183 	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184 
185 	if (!tags && !q->queue_tags) {
186 		tags = __blk_queue_init_tags(q, depth);
187 
188 		if (!tags)
189 			goto fail;
190 	} else if (q->queue_tags) {
191 		rc = blk_queue_resize_tags(q, depth);
192 		if (rc)
193 			return rc;
194 		queue_flag_set(QUEUE_FLAG_QUEUED, q);
195 		return 0;
196 	} else
197 		atomic_inc(&tags->refcnt);
198 
199 	/*
200 	 * assign it, all done
201 	 */
202 	q->queue_tags = tags;
203 	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
204 	INIT_LIST_HEAD(&q->tag_busy_list);
205 	return 0;
206 fail:
207 	kfree(tags);
208 	return -ENOMEM;
209 }
210 EXPORT_SYMBOL(blk_queue_init_tags);
211 
212 /**
213  * blk_queue_resize_tags - change the queueing depth
214  * @q:  the request queue for the device
215  * @new_depth: the new max command queueing depth
216  *
217  *  Notes:
218  *    Must be called with the queue lock held.
219  **/
blk_queue_resize_tags(struct request_queue * q,int new_depth)220 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
221 {
222 	struct blk_queue_tag *bqt = q->queue_tags;
223 	struct request **tag_index;
224 	unsigned long *tag_map;
225 	int max_depth, nr_ulongs;
226 
227 	if (!bqt)
228 		return -ENXIO;
229 
230 	/*
231 	 * if we already have large enough real_max_depth.  just
232 	 * adjust max_depth.  *NOTE* as requests with tag value
233 	 * between new_depth and real_max_depth can be in-flight, tag
234 	 * map can not be shrunk blindly here.
235 	 */
236 	if (new_depth <= bqt->real_max_depth) {
237 		bqt->max_depth = new_depth;
238 		return 0;
239 	}
240 
241 	/*
242 	 * Currently cannot replace a shared tag map with a new
243 	 * one, so error out if this is the case
244 	 */
245 	if (atomic_read(&bqt->refcnt) != 1)
246 		return -EBUSY;
247 
248 	/*
249 	 * save the old state info, so we can copy it back
250 	 */
251 	tag_index = bqt->tag_index;
252 	tag_map = bqt->tag_map;
253 	max_depth = bqt->real_max_depth;
254 
255 	if (init_tag_map(q, bqt, new_depth))
256 		return -ENOMEM;
257 
258 	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
259 	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
260 	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
261 
262 	kfree(tag_index);
263 	kfree(tag_map);
264 	return 0;
265 }
266 EXPORT_SYMBOL(blk_queue_resize_tags);
267 
268 /**
269  * blk_queue_end_tag - end tag operations for a request
270  * @q:  the request queue for the device
271  * @rq: the request that has completed
272  *
273  *  Description:
274  *    Typically called when end_that_request_first() returns %0, meaning
275  *    all transfers have been done for a request. It's important to call
276  *    this function before end_that_request_last(), as that will put the
277  *    request back on the free list thus corrupting the internal tag list.
278  *
279  *  Notes:
280  *   queue lock must be held.
281  **/
blk_queue_end_tag(struct request_queue * q,struct request * rq)282 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
283 {
284 	struct blk_queue_tag *bqt = q->queue_tags;
285 	unsigned tag = rq->tag; /* negative tags invalid */
286 
287 	BUG_ON(tag >= bqt->real_max_depth);
288 
289 	list_del_init(&rq->queuelist);
290 	rq->cmd_flags &= ~REQ_QUEUED;
291 	rq->tag = -1;
292 
293 	if (unlikely(bqt->tag_index[tag] == NULL))
294 		printk(KERN_ERR "%s: tag %d is missing\n",
295 		       __func__, tag);
296 
297 	bqt->tag_index[tag] = NULL;
298 
299 	if (unlikely(!test_bit(tag, bqt->tag_map))) {
300 		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
301 		       __func__, tag);
302 		return;
303 	}
304 	/*
305 	 * The tag_map bit acts as a lock for tag_index[bit], so we need
306 	 * unlock memory barrier semantics.
307 	 */
308 	clear_bit_unlock(tag, bqt->tag_map);
309 }
310 EXPORT_SYMBOL(blk_queue_end_tag);
311 
312 /**
313  * blk_queue_start_tag - find a free tag and assign it
314  * @q:  the request queue for the device
315  * @rq:  the block request that needs tagging
316  *
317  *  Description:
318  *    This can either be used as a stand-alone helper, or possibly be
319  *    assigned as the queue &prep_rq_fn (in which case &struct request
320  *    automagically gets a tag assigned). Note that this function
321  *    assumes that any type of request can be queued! if this is not
322  *    true for your device, you must check the request type before
323  *    calling this function.  The request will also be removed from
324  *    the request queue, so it's the drivers responsibility to readd
325  *    it if it should need to be restarted for some reason.
326  *
327  *  Notes:
328  *   queue lock must be held.
329  **/
blk_queue_start_tag(struct request_queue * q,struct request * rq)330 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
331 {
332 	struct blk_queue_tag *bqt = q->queue_tags;
333 	unsigned max_depth;
334 	int tag;
335 
336 	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
337 		printk(KERN_ERR
338 		       "%s: request %p for device [%s] already tagged %d",
339 		       __func__, rq,
340 		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
341 		BUG();
342 	}
343 
344 	/*
345 	 * Protect against shared tag maps, as we may not have exclusive
346 	 * access to the tag map.
347 	 *
348 	 * We reserve a few tags just for sync IO, since we don't want
349 	 * to starve sync IO on behalf of flooding async IO.
350 	 */
351 	max_depth = bqt->max_depth;
352 	if (!rq_is_sync(rq) && max_depth > 1) {
353 		max_depth -= 2;
354 		if (!max_depth)
355 			max_depth = 1;
356 		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
357 			return 1;
358 	}
359 
360 	do {
361 		tag = find_first_zero_bit(bqt->tag_map, max_depth);
362 		if (tag >= max_depth)
363 			return 1;
364 
365 	} while (test_and_set_bit_lock(tag, bqt->tag_map));
366 	/*
367 	 * We need lock ordering semantics given by test_and_set_bit_lock.
368 	 * See blk_queue_end_tag for details.
369 	 */
370 
371 	rq->cmd_flags |= REQ_QUEUED;
372 	rq->tag = tag;
373 	bqt->tag_index[tag] = rq;
374 	blk_start_request(rq);
375 	list_add(&rq->queuelist, &q->tag_busy_list);
376 	return 0;
377 }
378 EXPORT_SYMBOL(blk_queue_start_tag);
379 
380 /**
381  * blk_queue_invalidate_tags - invalidate all pending tags
382  * @q:  the request queue for the device
383  *
384  *  Description:
385  *   Hardware conditions may dictate a need to stop all pending requests.
386  *   In this case, we will safely clear the block side of the tag queue and
387  *   readd all requests to the request queue in the right order.
388  *
389  *  Notes:
390  *   queue lock must be held.
391  **/
blk_queue_invalidate_tags(struct request_queue * q)392 void blk_queue_invalidate_tags(struct request_queue *q)
393 {
394 	struct list_head *tmp, *n;
395 
396 	list_for_each_safe(tmp, n, &q->tag_busy_list)
397 		blk_requeue_request(q, list_entry_rq(tmp));
398 }
399 EXPORT_SYMBOL(blk_queue_invalidate_tags);
400