xref: /linux/block/elevator.c (revision 04225d13aef11b2a539014def5e47d8c21fd74a5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Block device elevator/IO-scheduler.
4  *
5  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6  *
7  * 30042000 Jens Axboe <axboe@kernel.dk> :
8  *
9  * Split the elevator a bit so that it is possible to choose a different
10  * one or even write a new "plug in". There are three pieces:
11  * - elevator_fn, inserts a new request in the queue list
12  * - elevator_merge_fn, decides whether a new buffer can be merged with
13  *   an existing request
14  * - elevator_dequeue_fn, called when a request is taken off the active list
15  *
16  * 20082000 Dave Jones <davej@suse.de> :
17  * Removed tests for max-bomb-segments, which was breaking elvtune
18  *  when run without -bN
19  *
20  * Jens:
21  * - Rework again to work with bio instead of buffer_heads
22  * - loose bi_dev comparisons, partition handling is right now
23  * - completely modularize elevator setup and teardown
24  *
25  */
26 #include <linux/kernel.h>
27 #include <linux/fs.h>
28 #include <linux/blkdev.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 
39 #include <trace/events/block.h>
40 
41 #include "elevator.h"
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44 #include "blk-pm.h"
45 #include "blk-wbt.h"
46 #include "blk-cgroup.h"
47 
48 /* Holding context data for changing elevator */
49 struct elv_change_ctx {
50 	const char *name;
51 	bool no_uevent;
52 
53 	/* for unregistering old elevator */
54 	struct elevator_queue *old;
55 	/* for registering new elevator */
56 	struct elevator_queue *new;
57 	/* holds sched tags data */
58 	struct elevator_tags *et;
59 };
60 
61 static DEFINE_SPINLOCK(elv_list_lock);
62 static LIST_HEAD(elv_list);
63 
64 /*
65  * Merge hash stuff.
66  */
67 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
68 
69 /*
70  * Query io scheduler to see if the current process issuing bio may be
71  * merged with rq.
72  */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)73 static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
74 {
75 	struct request_queue *q = rq->q;
76 	struct elevator_queue *e = q->elevator;
77 
78 	if (e->type->ops.allow_merge)
79 		return e->type->ops.allow_merge(q, rq, bio);
80 
81 	return true;
82 }
83 
84 /*
85  * can we safely merge with this request?
86  */
elv_bio_merge_ok(struct request * rq,struct bio * bio)87 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
88 {
89 	if (!blk_rq_merge_ok(rq, bio))
90 		return false;
91 
92 	if (!elv_iosched_allow_bio_merge(rq, bio))
93 		return false;
94 
95 	return true;
96 }
97 EXPORT_SYMBOL(elv_bio_merge_ok);
98 
99 /**
100  * elevator_match - Check whether @e's name or alias matches @name
101  * @e: Scheduler to test
102  * @name: Elevator name to test
103  *
104  * Return true if the elevator @e's name or alias matches @name.
105  */
elevator_match(const struct elevator_type * e,const char * name)106 static bool elevator_match(const struct elevator_type *e, const char *name)
107 {
108 	return !strcmp(e->elevator_name, name) ||
109 		(e->elevator_alias && !strcmp(e->elevator_alias, name));
110 }
111 
__elevator_find(const char * name)112 static struct elevator_type *__elevator_find(const char *name)
113 {
114 	struct elevator_type *e;
115 
116 	list_for_each_entry(e, &elv_list, list)
117 		if (elevator_match(e, name))
118 			return e;
119 	return NULL;
120 }
121 
elevator_find_get(const char * name)122 static struct elevator_type *elevator_find_get(const char *name)
123 {
124 	struct elevator_type *e;
125 
126 	spin_lock(&elv_list_lock);
127 	e = __elevator_find(name);
128 	if (e && (!elevator_tryget(e)))
129 		e = NULL;
130 	spin_unlock(&elv_list_lock);
131 	return e;
132 }
133 
134 static const struct kobj_type elv_ktype;
135 
elevator_alloc(struct request_queue * q,struct elevator_type * e,struct elevator_tags * et)136 struct elevator_queue *elevator_alloc(struct request_queue *q,
137 		struct elevator_type *e, struct elevator_tags *et)
138 {
139 	struct elevator_queue *eq;
140 
141 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
142 	if (unlikely(!eq))
143 		return NULL;
144 
145 	__elevator_get(e);
146 	eq->type = e;
147 	kobject_init(&eq->kobj, &elv_ktype);
148 	mutex_init(&eq->sysfs_lock);
149 	hash_init(eq->hash);
150 	eq->et = et;
151 
152 	return eq;
153 }
154 
elevator_release(struct kobject * kobj)155 static void elevator_release(struct kobject *kobj)
156 {
157 	struct elevator_queue *e;
158 
159 	e = container_of(kobj, struct elevator_queue, kobj);
160 	elevator_put(e->type);
161 	kfree(e);
162 }
163 
elevator_exit(struct request_queue * q)164 static void elevator_exit(struct request_queue *q)
165 {
166 	struct elevator_queue *e = q->elevator;
167 
168 	lockdep_assert_held(&q->elevator_lock);
169 
170 	ioc_clear_queue(q);
171 
172 	mutex_lock(&e->sysfs_lock);
173 	blk_mq_exit_sched(q, e);
174 	mutex_unlock(&e->sysfs_lock);
175 }
176 
__elv_rqhash_del(struct request * rq)177 static inline void __elv_rqhash_del(struct request *rq)
178 {
179 	hash_del(&rq->hash);
180 	rq->rq_flags &= ~RQF_HASHED;
181 }
182 
elv_rqhash_del(struct request_queue * q,struct request * rq)183 void elv_rqhash_del(struct request_queue *q, struct request *rq)
184 {
185 	if (ELV_ON_HASH(rq))
186 		__elv_rqhash_del(rq);
187 }
188 EXPORT_SYMBOL_GPL(elv_rqhash_del);
189 
elv_rqhash_add(struct request_queue * q,struct request * rq)190 void elv_rqhash_add(struct request_queue *q, struct request *rq)
191 {
192 	struct elevator_queue *e = q->elevator;
193 
194 	BUG_ON(ELV_ON_HASH(rq));
195 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
196 	rq->rq_flags |= RQF_HASHED;
197 }
198 EXPORT_SYMBOL_GPL(elv_rqhash_add);
199 
elv_rqhash_reposition(struct request_queue * q,struct request * rq)200 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
201 {
202 	__elv_rqhash_del(rq);
203 	elv_rqhash_add(q, rq);
204 }
205 
elv_rqhash_find(struct request_queue * q,sector_t offset)206 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
207 {
208 	struct elevator_queue *e = q->elevator;
209 	struct hlist_node *next;
210 	struct request *rq;
211 
212 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
213 		BUG_ON(!ELV_ON_HASH(rq));
214 
215 		if (unlikely(!rq_mergeable(rq))) {
216 			__elv_rqhash_del(rq);
217 			continue;
218 		}
219 
220 		if (rq_hash_key(rq) == offset)
221 			return rq;
222 	}
223 
224 	return NULL;
225 }
226 
227 /*
228  * RB-tree support functions for inserting/lookup/removal of requests
229  * in a sorted RB tree.
230  */
elv_rb_add(struct rb_root * root,struct request * rq)231 void elv_rb_add(struct rb_root *root, struct request *rq)
232 {
233 	struct rb_node **p = &root->rb_node;
234 	struct rb_node *parent = NULL;
235 	struct request *__rq;
236 
237 	while (*p) {
238 		parent = *p;
239 		__rq = rb_entry(parent, struct request, rb_node);
240 
241 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
242 			p = &(*p)->rb_left;
243 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
244 			p = &(*p)->rb_right;
245 	}
246 
247 	rb_link_node(&rq->rb_node, parent, p);
248 	rb_insert_color(&rq->rb_node, root);
249 }
250 EXPORT_SYMBOL(elv_rb_add);
251 
elv_rb_del(struct rb_root * root,struct request * rq)252 void elv_rb_del(struct rb_root *root, struct request *rq)
253 {
254 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
255 	rb_erase(&rq->rb_node, root);
256 	RB_CLEAR_NODE(&rq->rb_node);
257 }
258 EXPORT_SYMBOL(elv_rb_del);
259 
elv_rb_find(struct rb_root * root,sector_t sector)260 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
261 {
262 	struct rb_node *n = root->rb_node;
263 	struct request *rq;
264 
265 	while (n) {
266 		rq = rb_entry(n, struct request, rb_node);
267 
268 		if (sector < blk_rq_pos(rq))
269 			n = n->rb_left;
270 		else if (sector > blk_rq_pos(rq))
271 			n = n->rb_right;
272 		else
273 			return rq;
274 	}
275 
276 	return NULL;
277 }
278 EXPORT_SYMBOL(elv_rb_find);
279 
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)280 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
281 		struct bio *bio)
282 {
283 	struct elevator_queue *e = q->elevator;
284 	struct request *__rq;
285 
286 	/*
287 	 * Levels of merges:
288 	 * 	nomerges:  No merges at all attempted
289 	 * 	noxmerges: Only simple one-hit cache try
290 	 * 	merges:	   All merge tries attempted
291 	 */
292 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
293 		return ELEVATOR_NO_MERGE;
294 
295 	/*
296 	 * First try one-hit cache.
297 	 */
298 	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
299 		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
300 
301 		if (ret != ELEVATOR_NO_MERGE) {
302 			*req = q->last_merge;
303 			return ret;
304 		}
305 	}
306 
307 	if (blk_queue_noxmerges(q))
308 		return ELEVATOR_NO_MERGE;
309 
310 	/*
311 	 * See if our hash lookup can find a potential backmerge.
312 	 */
313 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
314 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
315 		*req = __rq;
316 
317 		if (blk_discard_mergable(__rq))
318 			return ELEVATOR_DISCARD_MERGE;
319 		return ELEVATOR_BACK_MERGE;
320 	}
321 
322 	if (e->type->ops.request_merge)
323 		return e->type->ops.request_merge(q, req, bio);
324 
325 	return ELEVATOR_NO_MERGE;
326 }
327 
328 /*
329  * Attempt to do an insertion back merge. Only check for the case where
330  * we can append 'rq' to an existing request, so we can throw 'rq' away
331  * afterwards.
332  *
333  * Returns true if we merged, false otherwise. 'free' will contain all
334  * requests that need to be freed.
335  */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)336 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
337 			      struct list_head *free)
338 {
339 	struct request *__rq;
340 	bool ret;
341 
342 	if (blk_queue_nomerges(q))
343 		return false;
344 
345 	/*
346 	 * First try one-hit cache.
347 	 */
348 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
349 		list_add(&rq->queuelist, free);
350 		return true;
351 	}
352 
353 	if (blk_queue_noxmerges(q))
354 		return false;
355 
356 	ret = false;
357 	/*
358 	 * See if our hash lookup can find a potential backmerge.
359 	 */
360 	while (1) {
361 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
362 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
363 			break;
364 
365 		list_add(&rq->queuelist, free);
366 		/* The merged request could be merged with others, try again */
367 		ret = true;
368 		rq = __rq;
369 	}
370 
371 	return ret;
372 }
373 
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)374 void elv_merged_request(struct request_queue *q, struct request *rq,
375 		enum elv_merge type)
376 {
377 	struct elevator_queue *e = q->elevator;
378 
379 	if (e->type->ops.request_merged)
380 		e->type->ops.request_merged(q, rq, type);
381 
382 	if (type == ELEVATOR_BACK_MERGE)
383 		elv_rqhash_reposition(q, rq);
384 
385 	q->last_merge = rq;
386 }
387 
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)388 void elv_merge_requests(struct request_queue *q, struct request *rq,
389 			     struct request *next)
390 {
391 	struct elevator_queue *e = q->elevator;
392 
393 	if (e->type->ops.requests_merged)
394 		e->type->ops.requests_merged(q, rq, next);
395 
396 	elv_rqhash_reposition(q, rq);
397 	q->last_merge = rq;
398 }
399 
elv_latter_request(struct request_queue * q,struct request * rq)400 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
401 {
402 	struct elevator_queue *e = q->elevator;
403 
404 	if (e->type->ops.next_request)
405 		return e->type->ops.next_request(q, rq);
406 
407 	return NULL;
408 }
409 
elv_former_request(struct request_queue * q,struct request * rq)410 struct request *elv_former_request(struct request_queue *q, struct request *rq)
411 {
412 	struct elevator_queue *e = q->elevator;
413 
414 	if (e->type->ops.former_request)
415 		return e->type->ops.former_request(q, rq);
416 
417 	return NULL;
418 }
419 
420 #define to_elv(atr) container_of_const((atr), struct elv_fs_entry, attr)
421 
422 static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)423 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
424 {
425 	const struct elv_fs_entry *entry = to_elv(attr);
426 	struct elevator_queue *e;
427 	ssize_t error = -ENODEV;
428 
429 	if (!entry->show)
430 		return -EIO;
431 
432 	e = container_of(kobj, struct elevator_queue, kobj);
433 	mutex_lock(&e->sysfs_lock);
434 	if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
435 		error = entry->show(e, page);
436 	mutex_unlock(&e->sysfs_lock);
437 	return error;
438 }
439 
440 static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)441 elv_attr_store(struct kobject *kobj, struct attribute *attr,
442 	       const char *page, size_t length)
443 {
444 	const struct elv_fs_entry *entry = to_elv(attr);
445 	struct elevator_queue *e;
446 	ssize_t error = -ENODEV;
447 
448 	if (!entry->store)
449 		return -EIO;
450 
451 	e = container_of(kobj, struct elevator_queue, kobj);
452 	mutex_lock(&e->sysfs_lock);
453 	if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
454 		error = entry->store(e, page, length);
455 	mutex_unlock(&e->sysfs_lock);
456 	return error;
457 }
458 
459 static const struct sysfs_ops elv_sysfs_ops = {
460 	.show	= elv_attr_show,
461 	.store	= elv_attr_store,
462 };
463 
464 static const struct kobj_type elv_ktype = {
465 	.sysfs_ops	= &elv_sysfs_ops,
466 	.release	= elevator_release,
467 };
468 
elv_register_queue(struct request_queue * q,struct elevator_queue * e,bool uevent)469 static int elv_register_queue(struct request_queue *q,
470 			      struct elevator_queue *e,
471 			      bool uevent)
472 {
473 	int error;
474 
475 	error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
476 	if (!error) {
477 		const struct elv_fs_entry *attr = e->type->elevator_attrs;
478 		if (attr) {
479 			while (attr->attr.name) {
480 				if (sysfs_create_file(&e->kobj, &attr->attr))
481 					break;
482 				attr++;
483 			}
484 		}
485 		if (uevent)
486 			kobject_uevent(&e->kobj, KOBJ_ADD);
487 
488 		/*
489 		 * Sched is initialized, it is ready to export it via
490 		 * debugfs
491 		 */
492 		blk_mq_sched_reg_debugfs(q);
493 		set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
494 	}
495 	return error;
496 }
497 
elv_unregister_queue(struct request_queue * q,struct elevator_queue * e)498 static void elv_unregister_queue(struct request_queue *q,
499 				 struct elevator_queue *e)
500 {
501 	if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
502 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
503 		kobject_del(&e->kobj);
504 
505 		/* unexport via debugfs before exiting sched */
506 		blk_mq_sched_unreg_debugfs(q);
507 	}
508 }
509 
elv_register(struct elevator_type * e)510 int elv_register(struct elevator_type *e)
511 {
512 	/* finish request is mandatory */
513 	if (WARN_ON_ONCE(!e->ops.finish_request))
514 		return -EINVAL;
515 	/* insert_requests and dispatch_request are mandatory */
516 	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
517 		return -EINVAL;
518 
519 	/* create icq_cache if requested */
520 	if (e->icq_size) {
521 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
522 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
523 			return -EINVAL;
524 
525 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
526 			 "%s_io_cq", e->elevator_name);
527 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
528 						 e->icq_align, 0, NULL);
529 		if (!e->icq_cache)
530 			return -ENOMEM;
531 	}
532 
533 	/* register, don't allow duplicate names */
534 	spin_lock(&elv_list_lock);
535 	if (__elevator_find(e->elevator_name)) {
536 		spin_unlock(&elv_list_lock);
537 		kmem_cache_destroy(e->icq_cache);
538 		return -EBUSY;
539 	}
540 	list_add_tail(&e->list, &elv_list);
541 	spin_unlock(&elv_list_lock);
542 
543 	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
544 
545 	return 0;
546 }
547 EXPORT_SYMBOL_GPL(elv_register);
548 
elv_unregister(struct elevator_type * e)549 void elv_unregister(struct elevator_type *e)
550 {
551 	/* unregister */
552 	spin_lock(&elv_list_lock);
553 	list_del_init(&e->list);
554 	spin_unlock(&elv_list_lock);
555 
556 	/*
557 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
558 	 * sure all RCU operations are complete before proceeding.
559 	 */
560 	if (e->icq_cache) {
561 		rcu_barrier();
562 		kmem_cache_destroy(e->icq_cache);
563 		e->icq_cache = NULL;
564 	}
565 }
566 EXPORT_SYMBOL_GPL(elv_unregister);
567 
568 /*
569  * Switch to new_e io scheduler.
570  *
571  * If switching fails, we are most likely running out of memory and not able
572  * to restore the old io scheduler, so leaving the io scheduler being none.
573  */
elevator_switch(struct request_queue * q,struct elv_change_ctx * ctx)574 static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
575 {
576 	struct elevator_type *new_e = NULL;
577 	int ret = 0;
578 
579 	WARN_ON_ONCE(q->mq_freeze_depth == 0);
580 	lockdep_assert_held(&q->elevator_lock);
581 
582 	if (strncmp(ctx->name, "none", 4)) {
583 		new_e = elevator_find_get(ctx->name);
584 		if (!new_e)
585 			return -EINVAL;
586 	}
587 
588 	blk_mq_quiesce_queue(q);
589 
590 	if (q->elevator) {
591 		ctx->old = q->elevator;
592 		elevator_exit(q);
593 	}
594 
595 	if (new_e) {
596 		ret = blk_mq_init_sched(q, new_e, ctx->et);
597 		if (ret)
598 			goto out_unfreeze;
599 		ctx->new = q->elevator;
600 	} else {
601 		blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
602 		q->elevator = NULL;
603 		q->nr_requests = q->tag_set->queue_depth;
604 	}
605 	blk_add_trace_msg(q, "elv switch: %s", ctx->name);
606 
607 out_unfreeze:
608 	blk_mq_unquiesce_queue(q);
609 
610 	if (ret) {
611 		pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
612 			new_e->elevator_name);
613 	}
614 
615 	if (new_e)
616 		elevator_put(new_e);
617 	return ret;
618 }
619 
elv_exit_and_release(struct request_queue * q)620 static void elv_exit_and_release(struct request_queue *q)
621 {
622 	struct elevator_queue *e;
623 	unsigned memflags;
624 
625 	memflags = blk_mq_freeze_queue(q);
626 	mutex_lock(&q->elevator_lock);
627 	e = q->elevator;
628 	elevator_exit(q);
629 	mutex_unlock(&q->elevator_lock);
630 	blk_mq_unfreeze_queue(q, memflags);
631 	if (e) {
632 		blk_mq_free_sched_tags(e->et, q->tag_set);
633 		kobject_put(&e->kobj);
634 	}
635 }
636 
elevator_change_done(struct request_queue * q,struct elv_change_ctx * ctx)637 static int elevator_change_done(struct request_queue *q,
638 				struct elv_change_ctx *ctx)
639 {
640 	int ret = 0;
641 
642 	if (ctx->old) {
643 		bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
644 				&ctx->old->flags);
645 
646 		elv_unregister_queue(q, ctx->old);
647 		blk_mq_free_sched_tags(ctx->old->et, q->tag_set);
648 		kobject_put(&ctx->old->kobj);
649 		if (enable_wbt)
650 			wbt_enable_default(q->disk);
651 	}
652 	if (ctx->new) {
653 		ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
654 		if (ret)
655 			elv_exit_and_release(q);
656 	}
657 	return ret;
658 }
659 
660 /*
661  * Switch this queue to the given IO scheduler.
662  */
elevator_change(struct request_queue * q,struct elv_change_ctx * ctx)663 static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
664 {
665 	unsigned int memflags;
666 	struct blk_mq_tag_set *set = q->tag_set;
667 	int ret = 0;
668 
669 	lockdep_assert_held(&set->update_nr_hwq_lock);
670 
671 	if (strncmp(ctx->name, "none", 4)) {
672 		ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
673 		if (!ctx->et)
674 			return -ENOMEM;
675 	}
676 
677 	memflags = blk_mq_freeze_queue(q);
678 	/*
679 	 * May be called before adding disk, when there isn't any FS I/O,
680 	 * so freezing queue plus canceling dispatch work is enough to
681 	 * drain any dispatch activities originated from passthrough
682 	 * requests, then no need to quiesce queue which may add long boot
683 	 * latency, especially when lots of disks are involved.
684 	 *
685 	 * Disk isn't added yet, so verifying queue lock only manually.
686 	 */
687 	blk_mq_cancel_work_sync(q);
688 	mutex_lock(&q->elevator_lock);
689 	if (!(q->elevator && elevator_match(q->elevator->type, ctx->name)))
690 		ret = elevator_switch(q, ctx);
691 	mutex_unlock(&q->elevator_lock);
692 	blk_mq_unfreeze_queue(q, memflags);
693 	if (!ret)
694 		ret = elevator_change_done(q, ctx);
695 	/*
696 	 * Free sched tags if it's allocated but we couldn't switch elevator.
697 	 */
698 	if (ctx->et && !ctx->new)
699 		blk_mq_free_sched_tags(ctx->et, set);
700 
701 	return ret;
702 }
703 
704 /*
705  * The I/O scheduler depends on the number of hardware queues, this forces a
706  * reattachment when nr_hw_queues changes.
707  */
elv_update_nr_hw_queues(struct request_queue * q,struct elevator_type * e,struct elevator_tags * t)708 void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
709 		struct elevator_tags *t)
710 {
711 	struct blk_mq_tag_set *set = q->tag_set;
712 	struct elv_change_ctx ctx = {};
713 	int ret = -ENODEV;
714 
715 	WARN_ON_ONCE(q->mq_freeze_depth == 0);
716 
717 	if (e && !blk_queue_dying(q) && blk_queue_registered(q)) {
718 		ctx.name = e->elevator_name;
719 		ctx.et = t;
720 
721 		mutex_lock(&q->elevator_lock);
722 		/* force to reattach elevator after nr_hw_queue is updated */
723 		ret = elevator_switch(q, &ctx);
724 		mutex_unlock(&q->elevator_lock);
725 	}
726 	blk_mq_unfreeze_queue_nomemrestore(q);
727 	if (!ret)
728 		WARN_ON_ONCE(elevator_change_done(q, &ctx));
729 	/*
730 	 * Free sched tags if it's allocated but we couldn't switch elevator.
731 	 */
732 	if (t && !ctx.new)
733 		blk_mq_free_sched_tags(t, set);
734 }
735 
736 /*
737  * Use the default elevator settings. If the chosen elevator initialization
738  * fails, fall back to the "none" elevator (no elevator).
739  */
elevator_set_default(struct request_queue * q)740 void elevator_set_default(struct request_queue *q)
741 {
742 	struct elv_change_ctx ctx = {
743 		.name = "mq-deadline",
744 		.no_uevent = true,
745 	};
746 	int err;
747 	struct elevator_type *e;
748 
749 	/* now we allow to switch elevator */
750 	blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
751 
752 	if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
753 		return;
754 
755 	/*
756 	 * For single queue devices, default to using mq-deadline. If we
757 	 * have multiple queues or mq-deadline is not available, default
758 	 * to "none".
759 	 */
760 	e = elevator_find_get(ctx.name);
761 	if (!e)
762 		return;
763 
764 	if ((q->nr_hw_queues == 1 ||
765 			blk_mq_is_shared_tags(q->tag_set->flags))) {
766 		err = elevator_change(q, &ctx);
767 		if (err < 0)
768 			pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n",
769 					ctx.name, err);
770 	}
771 	elevator_put(e);
772 }
773 
elevator_set_none(struct request_queue * q)774 void elevator_set_none(struct request_queue *q)
775 {
776 	struct elv_change_ctx ctx = {
777 		.name	= "none",
778 	};
779 	int err;
780 
781 	err = elevator_change(q, &ctx);
782 	if (err < 0)
783 		pr_warn("%s: set none elevator failed %d\n", __func__, err);
784 }
785 
elv_iosched_load_module(const char * elevator_name)786 static void elv_iosched_load_module(const char *elevator_name)
787 {
788 	struct elevator_type *found;
789 
790 	spin_lock(&elv_list_lock);
791 	found = __elevator_find(elevator_name);
792 	spin_unlock(&elv_list_lock);
793 
794 	if (!found)
795 		request_module("%s-iosched", elevator_name);
796 }
797 
elv_iosched_store(struct gendisk * disk,const char * buf,size_t count)798 ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
799 			  size_t count)
800 {
801 	char elevator_name[ELV_NAME_MAX];
802 	struct elv_change_ctx ctx = {};
803 	int ret;
804 	struct request_queue *q = disk->queue;
805 	struct blk_mq_tag_set *set = q->tag_set;
806 
807 	/* Make sure queue is not in the middle of being removed */
808 	if (!blk_queue_registered(q))
809 		return -ENOENT;
810 
811 	/*
812 	 * If the attribute needs to load a module, do it before freezing the
813 	 * queue to ensure that the module file can be read when the request
814 	 * queue is the one for the device storing the module file.
815 	 */
816 	strscpy(elevator_name, buf, sizeof(elevator_name));
817 	ctx.name = strstrip(elevator_name);
818 
819 	elv_iosched_load_module(ctx.name);
820 
821 	down_read(&set->update_nr_hwq_lock);
822 	if (!blk_queue_no_elv_switch(q)) {
823 		ret = elevator_change(q, &ctx);
824 		if (!ret)
825 			ret = count;
826 	} else {
827 		ret = -ENOENT;
828 	}
829 	up_read(&set->update_nr_hwq_lock);
830 	return ret;
831 }
832 
elv_iosched_show(struct gendisk * disk,char * name)833 ssize_t elv_iosched_show(struct gendisk *disk, char *name)
834 {
835 	struct request_queue *q = disk->queue;
836 	struct elevator_type *cur = NULL, *e;
837 	int len = 0;
838 
839 	mutex_lock(&q->elevator_lock);
840 	if (!q->elevator) {
841 		len += sprintf(name+len, "[none] ");
842 	} else {
843 		len += sprintf(name+len, "none ");
844 		cur = q->elevator->type;
845 	}
846 
847 	spin_lock(&elv_list_lock);
848 	list_for_each_entry(e, &elv_list, list) {
849 		if (e == cur)
850 			len += sprintf(name+len, "[%s] ", e->elevator_name);
851 		else
852 			len += sprintf(name+len, "%s ", e->elevator_name);
853 	}
854 	spin_unlock(&elv_list_lock);
855 
856 	len += sprintf(name+len, "\n");
857 	mutex_unlock(&q->elevator_lock);
858 
859 	return len;
860 }
861 
elv_rb_former_request(struct request_queue * q,struct request * rq)862 struct request *elv_rb_former_request(struct request_queue *q,
863 				      struct request *rq)
864 {
865 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
866 
867 	if (rbprev)
868 		return rb_entry_rq(rbprev);
869 
870 	return NULL;
871 }
872 EXPORT_SYMBOL(elv_rb_former_request);
873 
elv_rb_latter_request(struct request_queue * q,struct request * rq)874 struct request *elv_rb_latter_request(struct request_queue *q,
875 				      struct request *rq)
876 {
877 	struct rb_node *rbnext = rb_next(&rq->rb_node);
878 
879 	if (rbnext)
880 		return rb_entry_rq(rbnext);
881 
882 	return NULL;
883 }
884 EXPORT_SYMBOL(elv_rb_latter_request);
885 
elevator_setup(char * str)886 static int __init elevator_setup(char *str)
887 {
888 	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
889 		"Please use sysfs to set IO scheduler for individual devices.\n");
890 	return 1;
891 }
892 
893 __setup("elevator=", elevator_setup);
894