xref: /linux/block/elevator.c (revision a7f557923441186a3cdbabc54f1bcacf42b63bf5)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  Block device elevator/IO-scheduler.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
51da177e4SLinus Torvalds  *
60fe23479SJens Axboe  * 30042000 Jens Axboe <axboe@kernel.dk> :
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Split the elevator a bit so that it is possible to choose a different
91da177e4SLinus Torvalds  * one or even write a new "plug in". There are three pieces:
101da177e4SLinus Torvalds  * - elevator_fn, inserts a new request in the queue list
111da177e4SLinus Torvalds  * - elevator_merge_fn, decides whether a new buffer can be merged with
121da177e4SLinus Torvalds  *   an existing request
131da177e4SLinus Torvalds  * - elevator_dequeue_fn, called when a request is taken off the active list
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * 20082000 Dave Jones <davej@suse.de> :
161da177e4SLinus Torvalds  * Removed tests for max-bomb-segments, which was breaking elvtune
171da177e4SLinus Torvalds  *  when run without -bN
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  * Jens:
201da177e4SLinus Torvalds  * - Rework again to work with bio instead of buffer_heads
211da177e4SLinus Torvalds  * - loose bi_dev comparisons, partition handling is right now
221da177e4SLinus Torvalds  * - completely modularize elevator setup and teardown
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  */
251da177e4SLinus Torvalds #include <linux/kernel.h>
261da177e4SLinus Torvalds #include <linux/fs.h>
271da177e4SLinus Torvalds #include <linux/blkdev.h>
281da177e4SLinus Torvalds #include <linux/elevator.h>
291da177e4SLinus Torvalds #include <linux/bio.h>
301da177e4SLinus Torvalds #include <linux/module.h>
311da177e4SLinus Torvalds #include <linux/slab.h>
321da177e4SLinus Torvalds #include <linux/init.h>
331da177e4SLinus Torvalds #include <linux/compiler.h>
34cb98fc8bSTejun Heo #include <linux/delay.h>
352056a782SJens Axboe #include <linux/blktrace_api.h>
365f3ea37cSArnaldo Carvalho de Melo #include <trace/block.h>
379817064bSJens Axboe #include <linux/hash.h>
380835da67SJens Axboe #include <linux/uaccess.h>
391da177e4SLinus Torvalds 
40242f9dcbSJens Axboe #include "blk.h"
41242f9dcbSJens Axboe 
421da177e4SLinus Torvalds static DEFINE_SPINLOCK(elv_list_lock);
431da177e4SLinus Torvalds static LIST_HEAD(elv_list);
441da177e4SLinus Torvalds 
450bfc2455SIngo Molnar DEFINE_TRACE(block_rq_abort);
460bfc2455SIngo Molnar 
471da177e4SLinus Torvalds /*
489817064bSJens Axboe  * Merge hash stuff.
499817064bSJens Axboe  */
509817064bSJens Axboe static const int elv_hash_shift = 6;
519817064bSJens Axboe #define ELV_HASH_BLOCK(sec)	((sec) >> 3)
524eb166d9SJens Axboe #define ELV_HASH_FN(sec)	\
534eb166d9SJens Axboe 		(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
549817064bSJens Axboe #define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
559817064bSJens Axboe #define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
569817064bSJens Axboe #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
579817064bSJens Axboe 
580bfc2455SIngo Molnar DEFINE_TRACE(block_rq_insert);
590bfc2455SIngo Molnar DEFINE_TRACE(block_rq_issue);
600bfc2455SIngo Molnar 
619817064bSJens Axboe /*
62da775265SJens Axboe  * Query io scheduler to see if the current process issuing bio may be
63da775265SJens Axboe  * merged with rq.
64da775265SJens Axboe  */
65da775265SJens Axboe static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
66da775265SJens Axboe {
67165125e1SJens Axboe 	struct request_queue *q = rq->q;
68b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
69da775265SJens Axboe 
70da775265SJens Axboe 	if (e->ops->elevator_allow_merge_fn)
71da775265SJens Axboe 		return e->ops->elevator_allow_merge_fn(q, rq, bio);
72da775265SJens Axboe 
73da775265SJens Axboe 	return 1;
74da775265SJens Axboe }
75da775265SJens Axboe 
76da775265SJens Axboe /*
771da177e4SLinus Torvalds  * can we safely merge with this request?
781da177e4SLinus Torvalds  */
7972ed0bf6SAdrian Bunk int elv_rq_merge_ok(struct request *rq, struct bio *bio)
801da177e4SLinus Torvalds {
811da177e4SLinus Torvalds 	if (!rq_mergeable(rq))
821da177e4SLinus Torvalds 		return 0;
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds 	/*
85e17fc0a1SDavid Woodhouse 	 * Don't merge file system requests and discard requests
86e17fc0a1SDavid Woodhouse 	 */
87e17fc0a1SDavid Woodhouse 	if (bio_discard(bio) != bio_discard(rq->bio))
88e17fc0a1SDavid Woodhouse 		return 0;
89e17fc0a1SDavid Woodhouse 
90e17fc0a1SDavid Woodhouse 	/*
911da177e4SLinus Torvalds 	 * different data direction or already started, don't merge
921da177e4SLinus Torvalds 	 */
931da177e4SLinus Torvalds 	if (bio_data_dir(bio) != rq_data_dir(rq))
941da177e4SLinus Torvalds 		return 0;
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds 	/*
97da775265SJens Axboe 	 * must be same device and not a special request
981da177e4SLinus Torvalds 	 */
99bb4067e3SJens Axboe 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
1001da177e4SLinus Torvalds 		return 0;
101da775265SJens Axboe 
1027ba1ba12SMartin K. Petersen 	/*
1037ba1ba12SMartin K. Petersen 	 * only merge integrity protected bio into ditto rq
1047ba1ba12SMartin K. Petersen 	 */
1057ba1ba12SMartin K. Petersen 	if (bio_integrity(bio) != blk_integrity_rq(rq))
1067ba1ba12SMartin K. Petersen 		return 0;
1077ba1ba12SMartin K. Petersen 
108da775265SJens Axboe 	if (!elv_iosched_allow_merge(rq, bio))
109da775265SJens Axboe 		return 0;
110da775265SJens Axboe 
111da775265SJens Axboe 	return 1;
1121da177e4SLinus Torvalds }
1131da177e4SLinus Torvalds EXPORT_SYMBOL(elv_rq_merge_ok);
1141da177e4SLinus Torvalds 
115769db45bSCoywolf Qi Hunt static inline int elv_try_merge(struct request *__rq, struct bio *bio)
1161da177e4SLinus Torvalds {
1171da177e4SLinus Torvalds 	int ret = ELEVATOR_NO_MERGE;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds 	/*
1201da177e4SLinus Torvalds 	 * we can merge and sequence is ok, check if it's possible
1211da177e4SLinus Torvalds 	 */
1221da177e4SLinus Torvalds 	if (elv_rq_merge_ok(__rq, bio)) {
1231da177e4SLinus Torvalds 		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
1241da177e4SLinus Torvalds 			ret = ELEVATOR_BACK_MERGE;
1251da177e4SLinus Torvalds 		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
1261da177e4SLinus Torvalds 			ret = ELEVATOR_FRONT_MERGE;
1271da177e4SLinus Torvalds 	}
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds 	return ret;
1301da177e4SLinus Torvalds }
1311da177e4SLinus Torvalds 
1321da177e4SLinus Torvalds static struct elevator_type *elevator_find(const char *name)
1331da177e4SLinus Torvalds {
134a22b169dSVasily Tarasov 	struct elevator_type *e;
1351da177e4SLinus Torvalds 
13670cee26eSMatthias Kaehlcke 	list_for_each_entry(e, &elv_list, list) {
137a22b169dSVasily Tarasov 		if (!strcmp(e->elevator_name, name))
1381da177e4SLinus Torvalds 			return e;
1391da177e4SLinus Torvalds 	}
1401da177e4SLinus Torvalds 
141a22b169dSVasily Tarasov 	return NULL;
142a22b169dSVasily Tarasov }
143a22b169dSVasily Tarasov 
1441da177e4SLinus Torvalds static void elevator_put(struct elevator_type *e)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	module_put(e->elevator_owner);
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds static struct elevator_type *elevator_get(const char *name)
1501da177e4SLinus Torvalds {
1512824bc93STejun Heo 	struct elevator_type *e;
1521da177e4SLinus Torvalds 
1532a12dcd7SJens Axboe 	spin_lock(&elv_list_lock);
1542824bc93STejun Heo 
1552824bc93STejun Heo 	e = elevator_find(name);
156e1640949SJens Axboe 	if (!e) {
157e1640949SJens Axboe 		char elv[ELV_NAME_MAX + strlen("-iosched")];
158e1640949SJens Axboe 
159e1640949SJens Axboe 		spin_unlock(&elv_list_lock);
160e1640949SJens Axboe 
161e1640949SJens Axboe 		if (!strcmp(name, "anticipatory"))
162e1640949SJens Axboe 			sprintf(elv, "as-iosched");
163e1640949SJens Axboe 		else
164e1640949SJens Axboe 			sprintf(elv, "%s-iosched", name);
165e1640949SJens Axboe 
166e180f594Smaximilian attems 		request_module("%s", elv);
167e1640949SJens Axboe 		spin_lock(&elv_list_lock);
168e1640949SJens Axboe 		e = elevator_find(name);
169e1640949SJens Axboe 	}
170e1640949SJens Axboe 
1712824bc93STejun Heo 	if (e && !try_module_get(e->elevator_owner))
1722824bc93STejun Heo 		e = NULL;
1732824bc93STejun Heo 
1742a12dcd7SJens Axboe 	spin_unlock(&elv_list_lock);
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds 	return e;
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
179165125e1SJens Axboe static void *elevator_init_queue(struct request_queue *q,
180165125e1SJens Axboe 				 struct elevator_queue *eq)
1811da177e4SLinus Torvalds {
182bb37b94cSJens Axboe 	return eq->ops->elevator_init_fn(q);
183bc1c1169SJens Axboe }
1841da177e4SLinus Torvalds 
185165125e1SJens Axboe static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
186bc1c1169SJens Axboe 			   void *data)
187bc1c1169SJens Axboe {
1881da177e4SLinus Torvalds 	q->elevator = eq;
189bc1c1169SJens Axboe 	eq->elevator_data = data;
1901da177e4SLinus Torvalds }
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds static char chosen_elevator[16];
1931da177e4SLinus Torvalds 
1945f003976SNate Diller static int __init elevator_setup(char *str)
1951da177e4SLinus Torvalds {
196752a3b79SChuck Ebbert 	/*
197752a3b79SChuck Ebbert 	 * Be backwards-compatible with previous kernels, so users
198752a3b79SChuck Ebbert 	 * won't get the wrong elevator.
199752a3b79SChuck Ebbert 	 */
2005f003976SNate Diller 	if (!strcmp(str, "as"))
201752a3b79SChuck Ebbert 		strcpy(chosen_elevator, "anticipatory");
202cff3ba22SZachary Amsden 	else
2031da177e4SLinus Torvalds 		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
2049b41046cSOGAWA Hirofumi 	return 1;
2051da177e4SLinus Torvalds }
2061da177e4SLinus Torvalds 
2071da177e4SLinus Torvalds __setup("elevator=", elevator_setup);
2081da177e4SLinus Torvalds 
2093d1ab40fSAl Viro static struct kobj_type elv_ktype;
2103d1ab40fSAl Viro 
211b374d18aSJens Axboe static struct elevator_queue *elevator_alloc(struct request_queue *q,
212165125e1SJens Axboe 				  struct elevator_type *e)
2133d1ab40fSAl Viro {
214b374d18aSJens Axboe 	struct elevator_queue *eq;
2159817064bSJens Axboe 	int i;
2169817064bSJens Axboe 
217b374d18aSJens Axboe 	eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
2189817064bSJens Axboe 	if (unlikely(!eq))
2199817064bSJens Axboe 		goto err;
2209817064bSJens Axboe 
2213d1ab40fSAl Viro 	eq->ops = &e->ops;
2223d1ab40fSAl Viro 	eq->elevator_type = e;
223f9cb074bSGreg Kroah-Hartman 	kobject_init(&eq->kobj, &elv_ktype);
2243d1ab40fSAl Viro 	mutex_init(&eq->sysfs_lock);
2259817064bSJens Axboe 
226b5deef90SJens Axboe 	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
227b5deef90SJens Axboe 					GFP_KERNEL, q->node);
2289817064bSJens Axboe 	if (!eq->hash)
2299817064bSJens Axboe 		goto err;
2309817064bSJens Axboe 
2319817064bSJens Axboe 	for (i = 0; i < ELV_HASH_ENTRIES; i++)
2329817064bSJens Axboe 		INIT_HLIST_HEAD(&eq->hash[i]);
2339817064bSJens Axboe 
2343d1ab40fSAl Viro 	return eq;
2359817064bSJens Axboe err:
2369817064bSJens Axboe 	kfree(eq);
2379817064bSJens Axboe 	elevator_put(e);
2389817064bSJens Axboe 	return NULL;
2393d1ab40fSAl Viro }
2403d1ab40fSAl Viro 
2413d1ab40fSAl Viro static void elevator_release(struct kobject *kobj)
2423d1ab40fSAl Viro {
243b374d18aSJens Axboe 	struct elevator_queue *e;
2449817064bSJens Axboe 
245b374d18aSJens Axboe 	e = container_of(kobj, struct elevator_queue, kobj);
2463d1ab40fSAl Viro 	elevator_put(e->elevator_type);
2479817064bSJens Axboe 	kfree(e->hash);
2483d1ab40fSAl Viro 	kfree(e);
2493d1ab40fSAl Viro }
2503d1ab40fSAl Viro 
251165125e1SJens Axboe int elevator_init(struct request_queue *q, char *name)
2521da177e4SLinus Torvalds {
2531da177e4SLinus Torvalds 	struct elevator_type *e = NULL;
2541da177e4SLinus Torvalds 	struct elevator_queue *eq;
2551da177e4SLinus Torvalds 	int ret = 0;
256bc1c1169SJens Axboe 	void *data;
2571da177e4SLinus Torvalds 
258cb98fc8bSTejun Heo 	INIT_LIST_HEAD(&q->queue_head);
259cb98fc8bSTejun Heo 	q->last_merge = NULL;
260cb98fc8bSTejun Heo 	q->end_sector = 0;
261cb98fc8bSTejun Heo 	q->boundary_rq = NULL;
262cb98fc8bSTejun Heo 
2634eb166d9SJens Axboe 	if (name) {
2644eb166d9SJens Axboe 		e = elevator_get(name);
2654eb166d9SJens Axboe 		if (!e)
2661da177e4SLinus Torvalds 			return -EINVAL;
2674eb166d9SJens Axboe 	}
2681da177e4SLinus Torvalds 
2694eb166d9SJens Axboe 	if (!e && *chosen_elevator) {
2704eb166d9SJens Axboe 		e = elevator_get(chosen_elevator);
2714eb166d9SJens Axboe 		if (!e)
2724eb166d9SJens Axboe 			printk(KERN_ERR "I/O scheduler %s not found\n",
2734eb166d9SJens Axboe 							chosen_elevator);
2744eb166d9SJens Axboe 	}
275248d5ca5SNate Diller 
2764eb166d9SJens Axboe 	if (!e) {
2774eb166d9SJens Axboe 		e = elevator_get(CONFIG_DEFAULT_IOSCHED);
2784eb166d9SJens Axboe 		if (!e) {
2794eb166d9SJens Axboe 			printk(KERN_ERR
2804eb166d9SJens Axboe 				"Default I/O scheduler not found. " \
2814eb166d9SJens Axboe 				"Using noop.\n");
282248d5ca5SNate Diller 			e = elevator_get("noop");
2835f003976SNate Diller 		}
2844eb166d9SJens Axboe 	}
2855f003976SNate Diller 
286b5deef90SJens Axboe 	eq = elevator_alloc(q, e);
2873d1ab40fSAl Viro 	if (!eq)
2881da177e4SLinus Torvalds 		return -ENOMEM;
2891da177e4SLinus Torvalds 
290bc1c1169SJens Axboe 	data = elevator_init_queue(q, eq);
291bc1c1169SJens Axboe 	if (!data) {
2923d1ab40fSAl Viro 		kobject_put(&eq->kobj);
293bc1c1169SJens Axboe 		return -ENOMEM;
294bc1c1169SJens Axboe 	}
2951da177e4SLinus Torvalds 
296bc1c1169SJens Axboe 	elevator_attach(q, eq, data);
2971da177e4SLinus Torvalds 	return ret;
2981da177e4SLinus Torvalds }
2992e662b65SJens Axboe EXPORT_SYMBOL(elevator_init);
3002e662b65SJens Axboe 
301b374d18aSJens Axboe void elevator_exit(struct elevator_queue *e)
3021da177e4SLinus Torvalds {
3033d1ab40fSAl Viro 	mutex_lock(&e->sysfs_lock);
3041da177e4SLinus Torvalds 	if (e->ops->elevator_exit_fn)
3051da177e4SLinus Torvalds 		e->ops->elevator_exit_fn(e);
3063d1ab40fSAl Viro 	e->ops = NULL;
3073d1ab40fSAl Viro 	mutex_unlock(&e->sysfs_lock);
3081da177e4SLinus Torvalds 
3093d1ab40fSAl Viro 	kobject_put(&e->kobj);
3101da177e4SLinus Torvalds }
3112e662b65SJens Axboe EXPORT_SYMBOL(elevator_exit);
3122e662b65SJens Axboe 
313165125e1SJens Axboe static void elv_activate_rq(struct request_queue *q, struct request *rq)
314cad97516SJens Axboe {
315b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
316cad97516SJens Axboe 
317cad97516SJens Axboe 	if (e->ops->elevator_activate_req_fn)
318cad97516SJens Axboe 		e->ops->elevator_activate_req_fn(q, rq);
319cad97516SJens Axboe }
320cad97516SJens Axboe 
321165125e1SJens Axboe static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
322cad97516SJens Axboe {
323b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
324cad97516SJens Axboe 
325cad97516SJens Axboe 	if (e->ops->elevator_deactivate_req_fn)
326cad97516SJens Axboe 		e->ops->elevator_deactivate_req_fn(q, rq);
327cad97516SJens Axboe }
328cad97516SJens Axboe 
3299817064bSJens Axboe static inline void __elv_rqhash_del(struct request *rq)
3309817064bSJens Axboe {
3319817064bSJens Axboe 	hlist_del_init(&rq->hash);
3329817064bSJens Axboe }
3339817064bSJens Axboe 
334165125e1SJens Axboe static void elv_rqhash_del(struct request_queue *q, struct request *rq)
3359817064bSJens Axboe {
3369817064bSJens Axboe 	if (ELV_ON_HASH(rq))
3379817064bSJens Axboe 		__elv_rqhash_del(rq);
3389817064bSJens Axboe }
3399817064bSJens Axboe 
340165125e1SJens Axboe static void elv_rqhash_add(struct request_queue *q, struct request *rq)
3419817064bSJens Axboe {
342b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
3439817064bSJens Axboe 
3449817064bSJens Axboe 	BUG_ON(ELV_ON_HASH(rq));
3459817064bSJens Axboe 	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
3469817064bSJens Axboe }
3479817064bSJens Axboe 
348165125e1SJens Axboe static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
3499817064bSJens Axboe {
3509817064bSJens Axboe 	__elv_rqhash_del(rq);
3519817064bSJens Axboe 	elv_rqhash_add(q, rq);
3529817064bSJens Axboe }
3539817064bSJens Axboe 
354165125e1SJens Axboe static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
3559817064bSJens Axboe {
356b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
3579817064bSJens Axboe 	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
3589817064bSJens Axboe 	struct hlist_node *entry, *next;
3599817064bSJens Axboe 	struct request *rq;
3609817064bSJens Axboe 
3619817064bSJens Axboe 	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
3629817064bSJens Axboe 		BUG_ON(!ELV_ON_HASH(rq));
3639817064bSJens Axboe 
3649817064bSJens Axboe 		if (unlikely(!rq_mergeable(rq))) {
3659817064bSJens Axboe 			__elv_rqhash_del(rq);
3669817064bSJens Axboe 			continue;
3679817064bSJens Axboe 		}
3689817064bSJens Axboe 
3699817064bSJens Axboe 		if (rq_hash_key(rq) == offset)
3709817064bSJens Axboe 			return rq;
3719817064bSJens Axboe 	}
3729817064bSJens Axboe 
3739817064bSJens Axboe 	return NULL;
3749817064bSJens Axboe }
3759817064bSJens Axboe 
3768922e16cSTejun Heo /*
3772e662b65SJens Axboe  * RB-tree support functions for inserting/lookup/removal of requests
3782e662b65SJens Axboe  * in a sorted RB tree.
3792e662b65SJens Axboe  */
3802e662b65SJens Axboe struct request *elv_rb_add(struct rb_root *root, struct request *rq)
3812e662b65SJens Axboe {
3822e662b65SJens Axboe 	struct rb_node **p = &root->rb_node;
3832e662b65SJens Axboe 	struct rb_node *parent = NULL;
3842e662b65SJens Axboe 	struct request *__rq;
3852e662b65SJens Axboe 
3862e662b65SJens Axboe 	while (*p) {
3872e662b65SJens Axboe 		parent = *p;
3882e662b65SJens Axboe 		__rq = rb_entry(parent, struct request, rb_node);
3892e662b65SJens Axboe 
3902e662b65SJens Axboe 		if (rq->sector < __rq->sector)
3912e662b65SJens Axboe 			p = &(*p)->rb_left;
3922e662b65SJens Axboe 		else if (rq->sector > __rq->sector)
3932e662b65SJens Axboe 			p = &(*p)->rb_right;
3942e662b65SJens Axboe 		else
3952e662b65SJens Axboe 			return __rq;
3962e662b65SJens Axboe 	}
3972e662b65SJens Axboe 
3982e662b65SJens Axboe 	rb_link_node(&rq->rb_node, parent, p);
3992e662b65SJens Axboe 	rb_insert_color(&rq->rb_node, root);
4002e662b65SJens Axboe 	return NULL;
4012e662b65SJens Axboe }
4022e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_add);
4032e662b65SJens Axboe 
4042e662b65SJens Axboe void elv_rb_del(struct rb_root *root, struct request *rq)
4052e662b65SJens Axboe {
4062e662b65SJens Axboe 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
4072e662b65SJens Axboe 	rb_erase(&rq->rb_node, root);
4082e662b65SJens Axboe 	RB_CLEAR_NODE(&rq->rb_node);
4092e662b65SJens Axboe }
4102e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_del);
4112e662b65SJens Axboe 
4122e662b65SJens Axboe struct request *elv_rb_find(struct rb_root *root, sector_t sector)
4132e662b65SJens Axboe {
4142e662b65SJens Axboe 	struct rb_node *n = root->rb_node;
4152e662b65SJens Axboe 	struct request *rq;
4162e662b65SJens Axboe 
4172e662b65SJens Axboe 	while (n) {
4182e662b65SJens Axboe 		rq = rb_entry(n, struct request, rb_node);
4192e662b65SJens Axboe 
4202e662b65SJens Axboe 		if (sector < rq->sector)
4212e662b65SJens Axboe 			n = n->rb_left;
4222e662b65SJens Axboe 		else if (sector > rq->sector)
4232e662b65SJens Axboe 			n = n->rb_right;
4242e662b65SJens Axboe 		else
4252e662b65SJens Axboe 			return rq;
4262e662b65SJens Axboe 	}
4272e662b65SJens Axboe 
4282e662b65SJens Axboe 	return NULL;
4292e662b65SJens Axboe }
4302e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_find);
4312e662b65SJens Axboe 
4322e662b65SJens Axboe /*
4338922e16cSTejun Heo  * Insert rq into dispatch queue of q.  Queue lock must be held on
434dbe7f76dSUwe Kleine-König  * entry.  rq is sort instead into the dispatch queue. To be used by
4352e662b65SJens Axboe  * specific elevators.
4368922e16cSTejun Heo  */
437165125e1SJens Axboe void elv_dispatch_sort(struct request_queue *q, struct request *rq)
4388922e16cSTejun Heo {
4398922e16cSTejun Heo 	sector_t boundary;
4408922e16cSTejun Heo 	struct list_head *entry;
4414eb166d9SJens Axboe 	int stop_flags;
4428922e16cSTejun Heo 
44306b86245STejun Heo 	if (q->last_merge == rq)
44406b86245STejun Heo 		q->last_merge = NULL;
4459817064bSJens Axboe 
4469817064bSJens Axboe 	elv_rqhash_del(q, rq);
4479817064bSJens Axboe 
44815853af9STejun Heo 	q->nr_sorted--;
44906b86245STejun Heo 
4501b47f531SJens Axboe 	boundary = q->end_sector;
4514eb166d9SJens Axboe 	stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
4528922e16cSTejun Heo 	list_for_each_prev(entry, &q->queue_head) {
4538922e16cSTejun Heo 		struct request *pos = list_entry_rq(entry);
4548922e16cSTejun Heo 
455e17fc0a1SDavid Woodhouse 		if (blk_discard_rq(rq) != blk_discard_rq(pos))
456e17fc0a1SDavid Woodhouse 			break;
457783660b2SJens Axboe 		if (rq_data_dir(rq) != rq_data_dir(pos))
458783660b2SJens Axboe 			break;
4594eb166d9SJens Axboe 		if (pos->cmd_flags & stop_flags)
4608922e16cSTejun Heo 			break;
4618922e16cSTejun Heo 		if (rq->sector >= boundary) {
4628922e16cSTejun Heo 			if (pos->sector < boundary)
4638922e16cSTejun Heo 				continue;
4648922e16cSTejun Heo 		} else {
4658922e16cSTejun Heo 			if (pos->sector >= boundary)
4668922e16cSTejun Heo 				break;
4678922e16cSTejun Heo 		}
4688922e16cSTejun Heo 		if (rq->sector >= pos->sector)
4698922e16cSTejun Heo 			break;
4708922e16cSTejun Heo 	}
4718922e16cSTejun Heo 
4728922e16cSTejun Heo 	list_add(&rq->queuelist, entry);
4738922e16cSTejun Heo }
4742e662b65SJens Axboe EXPORT_SYMBOL(elv_dispatch_sort);
4752e662b65SJens Axboe 
4769817064bSJens Axboe /*
4772e662b65SJens Axboe  * Insert rq into dispatch queue of q.  Queue lock must be held on
4782e662b65SJens Axboe  * entry.  rq is added to the back of the dispatch queue. To be used by
4792e662b65SJens Axboe  * specific elevators.
4809817064bSJens Axboe  */
4819817064bSJens Axboe void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
4829817064bSJens Axboe {
4839817064bSJens Axboe 	if (q->last_merge == rq)
4849817064bSJens Axboe 		q->last_merge = NULL;
4859817064bSJens Axboe 
4869817064bSJens Axboe 	elv_rqhash_del(q, rq);
4879817064bSJens Axboe 
4889817064bSJens Axboe 	q->nr_sorted--;
4899817064bSJens Axboe 
4909817064bSJens Axboe 	q->end_sector = rq_end_sector(rq);
4919817064bSJens Axboe 	q->boundary_rq = rq;
4929817064bSJens Axboe 	list_add_tail(&rq->queuelist, &q->queue_head);
4939817064bSJens Axboe }
4942e662b65SJens Axboe EXPORT_SYMBOL(elv_dispatch_add_tail);
4952e662b65SJens Axboe 
496165125e1SJens Axboe int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
4971da177e4SLinus Torvalds {
498b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
4999817064bSJens Axboe 	struct request *__rq;
50006b86245STejun Heo 	int ret;
50106b86245STejun Heo 
5029817064bSJens Axboe 	/*
5039817064bSJens Axboe 	 * First try one-hit cache.
5049817064bSJens Axboe 	 */
50506b86245STejun Heo 	if (q->last_merge) {
50606b86245STejun Heo 		ret = elv_try_merge(q->last_merge, bio);
50706b86245STejun Heo 		if (ret != ELEVATOR_NO_MERGE) {
50806b86245STejun Heo 			*req = q->last_merge;
50906b86245STejun Heo 			return ret;
51006b86245STejun Heo 		}
51106b86245STejun Heo 	}
5121da177e4SLinus Torvalds 
513ac9fafa1SAlan D. Brunelle 	if (blk_queue_nomerges(q))
514ac9fafa1SAlan D. Brunelle 		return ELEVATOR_NO_MERGE;
515ac9fafa1SAlan D. Brunelle 
5169817064bSJens Axboe 	/*
5179817064bSJens Axboe 	 * See if our hash lookup can find a potential backmerge.
5189817064bSJens Axboe 	 */
5199817064bSJens Axboe 	__rq = elv_rqhash_find(q, bio->bi_sector);
5209817064bSJens Axboe 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
5219817064bSJens Axboe 		*req = __rq;
5229817064bSJens Axboe 		return ELEVATOR_BACK_MERGE;
5239817064bSJens Axboe 	}
5249817064bSJens Axboe 
5251da177e4SLinus Torvalds 	if (e->ops->elevator_merge_fn)
5261da177e4SLinus Torvalds 		return e->ops->elevator_merge_fn(q, req, bio);
5271da177e4SLinus Torvalds 
5281da177e4SLinus Torvalds 	return ELEVATOR_NO_MERGE;
5291da177e4SLinus Torvalds }
5301da177e4SLinus Torvalds 
531165125e1SJens Axboe void elv_merged_request(struct request_queue *q, struct request *rq, int type)
5321da177e4SLinus Torvalds {
533b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
5341da177e4SLinus Torvalds 
5351da177e4SLinus Torvalds 	if (e->ops->elevator_merged_fn)
5362e662b65SJens Axboe 		e->ops->elevator_merged_fn(q, rq, type);
53706b86245STejun Heo 
5382e662b65SJens Axboe 	if (type == ELEVATOR_BACK_MERGE)
5399817064bSJens Axboe 		elv_rqhash_reposition(q, rq);
5409817064bSJens Axboe 
54106b86245STejun Heo 	q->last_merge = rq;
5421da177e4SLinus Torvalds }
5431da177e4SLinus Torvalds 
544165125e1SJens Axboe void elv_merge_requests(struct request_queue *q, struct request *rq,
5451da177e4SLinus Torvalds 			     struct request *next)
5461da177e4SLinus Torvalds {
547b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
5481da177e4SLinus Torvalds 
5491da177e4SLinus Torvalds 	if (e->ops->elevator_merge_req_fn)
5501da177e4SLinus Torvalds 		e->ops->elevator_merge_req_fn(q, rq, next);
55106b86245STejun Heo 
5529817064bSJens Axboe 	elv_rqhash_reposition(q, rq);
5539817064bSJens Axboe 	elv_rqhash_del(q, next);
5549817064bSJens Axboe 
5559817064bSJens Axboe 	q->nr_sorted--;
55606b86245STejun Heo 	q->last_merge = rq;
5571da177e4SLinus Torvalds }
5581da177e4SLinus Torvalds 
559165125e1SJens Axboe void elv_requeue_request(struct request_queue *q, struct request *rq)
5601da177e4SLinus Torvalds {
5611da177e4SLinus Torvalds 	/*
5621da177e4SLinus Torvalds 	 * it already went through dequeue, we need to decrement the
5631da177e4SLinus Torvalds 	 * in_flight count again
5641da177e4SLinus Torvalds 	 */
5658922e16cSTejun Heo 	if (blk_account_rq(rq)) {
5661da177e4SLinus Torvalds 		q->in_flight--;
567cad97516SJens Axboe 		if (blk_sorted_rq(rq))
568cad97516SJens Axboe 			elv_deactivate_rq(q, rq);
5691da177e4SLinus Torvalds 	}
5701da177e4SLinus Torvalds 
5714aff5e23SJens Axboe 	rq->cmd_flags &= ~REQ_STARTED;
5721da177e4SLinus Torvalds 
57330e9656cSTejun Heo 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
5741da177e4SLinus Torvalds }
5751da177e4SLinus Torvalds 
57626308eabSJerome Marchand void elv_drain_elevator(struct request_queue *q)
57715853af9STejun Heo {
57815853af9STejun Heo 	static int printed;
57915853af9STejun Heo 	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
58015853af9STejun Heo 		;
58115853af9STejun Heo 	if (q->nr_sorted == 0)
58215853af9STejun Heo 		return;
58315853af9STejun Heo 	if (printed++ < 10) {
58415853af9STejun Heo 		printk(KERN_ERR "%s: forced dispatching is broken "
58515853af9STejun Heo 		       "(nr_sorted=%u), please report this\n",
58615853af9STejun Heo 		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
58715853af9STejun Heo 	}
58815853af9STejun Heo }
58915853af9STejun Heo 
5906c7e8ceeSJens Axboe /*
5916c7e8ceeSJens Axboe  * Call with queue lock held, interrupts disabled
5926c7e8ceeSJens Axboe  */
593f600abe2SJens Axboe void elv_quiesce_start(struct request_queue *q)
5946c7e8ceeSJens Axboe {
5956c7e8ceeSJens Axboe 	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
5966c7e8ceeSJens Axboe 
5976c7e8ceeSJens Axboe 	/*
5986c7e8ceeSJens Axboe 	 * make sure we don't have any requests in flight
5996c7e8ceeSJens Axboe 	 */
6006c7e8ceeSJens Axboe 	elv_drain_elevator(q);
6016c7e8ceeSJens Axboe 	while (q->rq.elvpriv) {
602*a7f55792STejun Heo 		__blk_run_queue(q);
6036c7e8ceeSJens Axboe 		spin_unlock_irq(q->queue_lock);
6046c7e8ceeSJens Axboe 		msleep(10);
6056c7e8ceeSJens Axboe 		spin_lock_irq(q->queue_lock);
6066c7e8ceeSJens Axboe 		elv_drain_elevator(q);
6076c7e8ceeSJens Axboe 	}
6086c7e8ceeSJens Axboe }
6096c7e8ceeSJens Axboe 
610f600abe2SJens Axboe void elv_quiesce_end(struct request_queue *q)
6116c7e8ceeSJens Axboe {
6126c7e8ceeSJens Axboe 	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
6136c7e8ceeSJens Axboe }
6146c7e8ceeSJens Axboe 
615165125e1SJens Axboe void elv_insert(struct request_queue *q, struct request *rq, int where)
6161da177e4SLinus Torvalds {
617797e7dbbSTejun Heo 	struct list_head *pos;
618797e7dbbSTejun Heo 	unsigned ordseq;
619dac07ec1SJens Axboe 	int unplug_it = 1;
620797e7dbbSTejun Heo 
6215f3ea37cSArnaldo Carvalho de Melo 	trace_block_rq_insert(q, rq);
6222056a782SJens Axboe 
6231da177e4SLinus Torvalds 	rq->q = q;
6241da177e4SLinus Torvalds 
6258922e16cSTejun Heo 	switch (where) {
6268922e16cSTejun Heo 	case ELEVATOR_INSERT_FRONT:
6274aff5e23SJens Axboe 		rq->cmd_flags |= REQ_SOFTBARRIER;
6288922e16cSTejun Heo 
6298922e16cSTejun Heo 		list_add(&rq->queuelist, &q->queue_head);
6308922e16cSTejun Heo 		break;
6318922e16cSTejun Heo 
6328922e16cSTejun Heo 	case ELEVATOR_INSERT_BACK:
6334aff5e23SJens Axboe 		rq->cmd_flags |= REQ_SOFTBARRIER;
63415853af9STejun Heo 		elv_drain_elevator(q);
6358922e16cSTejun Heo 		list_add_tail(&rq->queuelist, &q->queue_head);
6368922e16cSTejun Heo 		/*
6378922e16cSTejun Heo 		 * We kick the queue here for the following reasons.
6388922e16cSTejun Heo 		 * - The elevator might have returned NULL previously
6398922e16cSTejun Heo 		 *   to delay requests and returned them now.  As the
6408922e16cSTejun Heo 		 *   queue wasn't empty before this request, ll_rw_blk
6418922e16cSTejun Heo 		 *   won't run the queue on return, resulting in hang.
6428922e16cSTejun Heo 		 * - Usually, back inserted requests won't be merged
6438922e16cSTejun Heo 		 *   with anything.  There's no point in delaying queue
6448922e16cSTejun Heo 		 *   processing.
6458922e16cSTejun Heo 		 */
646*a7f55792STejun Heo 		__blk_run_queue(q);
6478922e16cSTejun Heo 		break;
6488922e16cSTejun Heo 
6498922e16cSTejun Heo 	case ELEVATOR_INSERT_SORT:
650e17fc0a1SDavid Woodhouse 		BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
6514aff5e23SJens Axboe 		rq->cmd_flags |= REQ_SORTED;
65215853af9STejun Heo 		q->nr_sorted++;
6539817064bSJens Axboe 		if (rq_mergeable(rq)) {
6549817064bSJens Axboe 			elv_rqhash_add(q, rq);
6559817064bSJens Axboe 			if (!q->last_merge)
65606b86245STejun Heo 				q->last_merge = rq;
6579817064bSJens Axboe 		}
6589817064bSJens Axboe 
659ca23509fSTejun Heo 		/*
660ca23509fSTejun Heo 		 * Some ioscheds (cfq) run q->request_fn directly, so
661ca23509fSTejun Heo 		 * rq cannot be accessed after calling
662ca23509fSTejun Heo 		 * elevator_add_req_fn.
663ca23509fSTejun Heo 		 */
664ca23509fSTejun Heo 		q->elevator->ops->elevator_add_req_fn(q, rq);
6658922e16cSTejun Heo 		break;
6668922e16cSTejun Heo 
667797e7dbbSTejun Heo 	case ELEVATOR_INSERT_REQUEUE:
668797e7dbbSTejun Heo 		/*
669797e7dbbSTejun Heo 		 * If ordered flush isn't in progress, we do front
670797e7dbbSTejun Heo 		 * insertion; otherwise, requests should be requeued
671797e7dbbSTejun Heo 		 * in ordseq order.
672797e7dbbSTejun Heo 		 */
6734aff5e23SJens Axboe 		rq->cmd_flags |= REQ_SOFTBARRIER;
674797e7dbbSTejun Heo 
67595543179SLinas Vepstas 		/*
67695543179SLinas Vepstas 		 * Most requeues happen because of a busy condition,
67795543179SLinas Vepstas 		 * don't force unplug of the queue for that case.
67895543179SLinas Vepstas 		 */
67995543179SLinas Vepstas 		unplug_it = 0;
68095543179SLinas Vepstas 
681797e7dbbSTejun Heo 		if (q->ordseq == 0) {
682797e7dbbSTejun Heo 			list_add(&rq->queuelist, &q->queue_head);
683797e7dbbSTejun Heo 			break;
684797e7dbbSTejun Heo 		}
685797e7dbbSTejun Heo 
686797e7dbbSTejun Heo 		ordseq = blk_ordered_req_seq(rq);
687797e7dbbSTejun Heo 
688797e7dbbSTejun Heo 		list_for_each(pos, &q->queue_head) {
689797e7dbbSTejun Heo 			struct request *pos_rq = list_entry_rq(pos);
690797e7dbbSTejun Heo 			if (ordseq <= blk_ordered_req_seq(pos_rq))
691797e7dbbSTejun Heo 				break;
692797e7dbbSTejun Heo 		}
693797e7dbbSTejun Heo 
694797e7dbbSTejun Heo 		list_add_tail(&rq->queuelist, pos);
695797e7dbbSTejun Heo 		break;
696797e7dbbSTejun Heo 
6978922e16cSTejun Heo 	default:
6988922e16cSTejun Heo 		printk(KERN_ERR "%s: bad insertion point %d\n",
69924c03d47SHarvey Harrison 		       __func__, where);
7008922e16cSTejun Heo 		BUG();
7018922e16cSTejun Heo 	}
7021da177e4SLinus Torvalds 
703dac07ec1SJens Axboe 	if (unplug_it && blk_queue_plugged(q)) {
7041faa16d2SJens Axboe 		int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
7051da177e4SLinus Torvalds 			- q->in_flight;
7061da177e4SLinus Torvalds 
707c374f127STejun Heo  		if (nrq >= q->unplug_thresh)
7081da177e4SLinus Torvalds 			__generic_unplug_device(q);
7091da177e4SLinus Torvalds 	}
7101da177e4SLinus Torvalds }
7111da177e4SLinus Torvalds 
712165125e1SJens Axboe void __elv_add_request(struct request_queue *q, struct request *rq, int where,
71330e9656cSTejun Heo 		       int plug)
71430e9656cSTejun Heo {
71530e9656cSTejun Heo 	if (q->ordcolor)
7164aff5e23SJens Axboe 		rq->cmd_flags |= REQ_ORDERED_COLOR;
71730e9656cSTejun Heo 
7184aff5e23SJens Axboe 	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
71930e9656cSTejun Heo 		/*
72030e9656cSTejun Heo 		 * toggle ordered color
72130e9656cSTejun Heo 		 */
72230e9656cSTejun Heo 		if (blk_barrier_rq(rq))
72330e9656cSTejun Heo 			q->ordcolor ^= 1;
72430e9656cSTejun Heo 
72530e9656cSTejun Heo 		/*
72630e9656cSTejun Heo 		 * barriers implicitly indicate back insertion
72730e9656cSTejun Heo 		 */
72830e9656cSTejun Heo 		if (where == ELEVATOR_INSERT_SORT)
72930e9656cSTejun Heo 			where = ELEVATOR_INSERT_BACK;
73030e9656cSTejun Heo 
73130e9656cSTejun Heo 		/*
73230e9656cSTejun Heo 		 * this request is scheduling boundary, update
73330e9656cSTejun Heo 		 * end_sector
73430e9656cSTejun Heo 		 */
735e17fc0a1SDavid Woodhouse 		if (blk_fs_request(rq) || blk_discard_rq(rq)) {
73630e9656cSTejun Heo 			q->end_sector = rq_end_sector(rq);
73730e9656cSTejun Heo 			q->boundary_rq = rq;
73830e9656cSTejun Heo 		}
7394eb166d9SJens Axboe 	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
7404eb166d9SJens Axboe 		    where == ELEVATOR_INSERT_SORT)
74130e9656cSTejun Heo 		where = ELEVATOR_INSERT_BACK;
74230e9656cSTejun Heo 
74330e9656cSTejun Heo 	if (plug)
74430e9656cSTejun Heo 		blk_plug_device(q);
74530e9656cSTejun Heo 
74630e9656cSTejun Heo 	elv_insert(q, rq, where);
74730e9656cSTejun Heo }
7482e662b65SJens Axboe EXPORT_SYMBOL(__elv_add_request);
7492e662b65SJens Axboe 
750165125e1SJens Axboe void elv_add_request(struct request_queue *q, struct request *rq, int where,
7511da177e4SLinus Torvalds 		     int plug)
7521da177e4SLinus Torvalds {
7531da177e4SLinus Torvalds 	unsigned long flags;
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds 	spin_lock_irqsave(q->queue_lock, flags);
7561da177e4SLinus Torvalds 	__elv_add_request(q, rq, where, plug);
7571da177e4SLinus Torvalds 	spin_unlock_irqrestore(q->queue_lock, flags);
7581da177e4SLinus Torvalds }
7592e662b65SJens Axboe EXPORT_SYMBOL(elv_add_request);
7602e662b65SJens Axboe 
761165125e1SJens Axboe static inline struct request *__elv_next_request(struct request_queue *q)
7621da177e4SLinus Torvalds {
7638922e16cSTejun Heo 	struct request *rq;
7648922e16cSTejun Heo 
765797e7dbbSTejun Heo 	while (1) {
766797e7dbbSTejun Heo 		while (!list_empty(&q->queue_head)) {
7678922e16cSTejun Heo 			rq = list_entry_rq(q->queue_head.next);
768797e7dbbSTejun Heo 			if (blk_do_ordered(q, &rq))
769797e7dbbSTejun Heo 				return rq;
7701da177e4SLinus Torvalds 		}
7711da177e4SLinus Torvalds 
772797e7dbbSTejun Heo 		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
773797e7dbbSTejun Heo 			return NULL;
774797e7dbbSTejun Heo 	}
7751da177e4SLinus Torvalds }
7761da177e4SLinus Torvalds 
777165125e1SJens Axboe struct request *elv_next_request(struct request_queue *q)
7781da177e4SLinus Torvalds {
7791da177e4SLinus Torvalds 	struct request *rq;
7801da177e4SLinus Torvalds 	int ret;
7811da177e4SLinus Torvalds 
7821da177e4SLinus Torvalds 	while ((rq = __elv_next_request(q)) != NULL) {
7834aff5e23SJens Axboe 		if (!(rq->cmd_flags & REQ_STARTED)) {
7841da177e4SLinus Torvalds 			/*
7858922e16cSTejun Heo 			 * This is the first time the device driver
7868922e16cSTejun Heo 			 * sees this request (possibly after
7878922e16cSTejun Heo 			 * requeueing).  Notify IO scheduler.
7888922e16cSTejun Heo 			 */
789cad97516SJens Axboe 			if (blk_sorted_rq(rq))
790cad97516SJens Axboe 				elv_activate_rq(q, rq);
7918922e16cSTejun Heo 
7928922e16cSTejun Heo 			/*
7938922e16cSTejun Heo 			 * just mark as started even if we don't start
7948922e16cSTejun Heo 			 * it, a request that has been delayed should
7958922e16cSTejun Heo 			 * not be passed by new incoming requests
7961da177e4SLinus Torvalds 			 */
7974aff5e23SJens Axboe 			rq->cmd_flags |= REQ_STARTED;
7985f3ea37cSArnaldo Carvalho de Melo 			trace_block_rq_issue(q, rq);
7998922e16cSTejun Heo 		}
8001da177e4SLinus Torvalds 
8018922e16cSTejun Heo 		if (!q->boundary_rq || q->boundary_rq == rq) {
8021b47f531SJens Axboe 			q->end_sector = rq_end_sector(rq);
8038922e16cSTejun Heo 			q->boundary_rq = NULL;
8048922e16cSTejun Heo 		}
8051da177e4SLinus Torvalds 
806fa0ccd83SJames Bottomley 		if (rq->cmd_flags & REQ_DONTPREP)
807fa0ccd83SJames Bottomley 			break;
808fa0ccd83SJames Bottomley 
809fa0ccd83SJames Bottomley 		if (q->dma_drain_size && rq->data_len) {
810fa0ccd83SJames Bottomley 			/*
811fa0ccd83SJames Bottomley 			 * make sure space for the drain appears we
812fa0ccd83SJames Bottomley 			 * know we can do this because max_hw_segments
813fa0ccd83SJames Bottomley 			 * has been adjusted to be one fewer than the
814fa0ccd83SJames Bottomley 			 * device can handle
815fa0ccd83SJames Bottomley 			 */
816fa0ccd83SJames Bottomley 			rq->nr_phys_segments++;
817fa0ccd83SJames Bottomley 		}
818fa0ccd83SJames Bottomley 
819fa0ccd83SJames Bottomley 		if (!q->prep_rq_fn)
8201da177e4SLinus Torvalds 			break;
8211da177e4SLinus Torvalds 
8221da177e4SLinus Torvalds 		ret = q->prep_rq_fn(q, rq);
8231da177e4SLinus Torvalds 		if (ret == BLKPREP_OK) {
8241da177e4SLinus Torvalds 			break;
8251da177e4SLinus Torvalds 		} else if (ret == BLKPREP_DEFER) {
8262e759cd4STejun Heo  			/*
8272e759cd4STejun Heo  			 * the request may have been (partially) prepped.
8282e759cd4STejun Heo  			 * we need to keep this request in the front to
8298922e16cSTejun Heo 			 * avoid resource deadlock.  REQ_STARTED will
8308922e16cSTejun Heo 			 * prevent other fs requests from passing this one.
8312e759cd4STejun Heo  			 */
832fa0ccd83SJames Bottomley 			if (q->dma_drain_size && rq->data_len &&
833fa0ccd83SJames Bottomley 			    !(rq->cmd_flags & REQ_DONTPREP)) {
834fa0ccd83SJames Bottomley 				/*
835fa0ccd83SJames Bottomley 				 * remove the space for the drain we added
836fa0ccd83SJames Bottomley 				 * so that we don't add it again
837fa0ccd83SJames Bottomley 				 */
838fa0ccd83SJames Bottomley 				--rq->nr_phys_segments;
839fa0ccd83SJames Bottomley 			}
840fa0ccd83SJames Bottomley 
8411da177e4SLinus Torvalds 			rq = NULL;
8421da177e4SLinus Torvalds 			break;
8431da177e4SLinus Torvalds 		} else if (ret == BLKPREP_KILL) {
8444aff5e23SJens Axboe 			rq->cmd_flags |= REQ_QUIET;
84599cd3386SKiyoshi Ueda 			__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
8461da177e4SLinus Torvalds 		} else {
84724c03d47SHarvey Harrison 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
8481da177e4SLinus Torvalds 			break;
8491da177e4SLinus Torvalds 		}
8501da177e4SLinus Torvalds 	}
8511da177e4SLinus Torvalds 
8521da177e4SLinus Torvalds 	return rq;
8531da177e4SLinus Torvalds }
8542e662b65SJens Axboe EXPORT_SYMBOL(elv_next_request);
8552e662b65SJens Axboe 
856165125e1SJens Axboe void elv_dequeue_request(struct request_queue *q, struct request *rq)
8571da177e4SLinus Torvalds {
8588922e16cSTejun Heo 	BUG_ON(list_empty(&rq->queuelist));
8599817064bSJens Axboe 	BUG_ON(ELV_ON_HASH(rq));
8608922e16cSTejun Heo 
8618922e16cSTejun Heo 	list_del_init(&rq->queuelist);
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds 	/*
8641da177e4SLinus Torvalds 	 * the time frame between a request being removed from the lists
8651da177e4SLinus Torvalds 	 * and to it is freed is accounted as io that is in progress at
8668922e16cSTejun Heo 	 * the driver side.
8671da177e4SLinus Torvalds 	 */
8681da177e4SLinus Torvalds 	if (blk_account_rq(rq))
8691da177e4SLinus Torvalds 		q->in_flight++;
8701da177e4SLinus Torvalds }
8712e662b65SJens Axboe 
872165125e1SJens Axboe int elv_queue_empty(struct request_queue *q)
8731da177e4SLinus Torvalds {
874b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
8751da177e4SLinus Torvalds 
8768922e16cSTejun Heo 	if (!list_empty(&q->queue_head))
8778922e16cSTejun Heo 		return 0;
8788922e16cSTejun Heo 
8791da177e4SLinus Torvalds 	if (e->ops->elevator_queue_empty_fn)
8801da177e4SLinus Torvalds 		return e->ops->elevator_queue_empty_fn(q);
8811da177e4SLinus Torvalds 
8828922e16cSTejun Heo 	return 1;
8831da177e4SLinus Torvalds }
8842e662b65SJens Axboe EXPORT_SYMBOL(elv_queue_empty);
8852e662b65SJens Axboe 
886165125e1SJens Axboe struct request *elv_latter_request(struct request_queue *q, struct request *rq)
8871da177e4SLinus Torvalds {
888b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
8891da177e4SLinus Torvalds 
8901da177e4SLinus Torvalds 	if (e->ops->elevator_latter_req_fn)
8911da177e4SLinus Torvalds 		return e->ops->elevator_latter_req_fn(q, rq);
8921da177e4SLinus Torvalds 	return NULL;
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds 
895165125e1SJens Axboe struct request *elv_former_request(struct request_queue *q, struct request *rq)
8961da177e4SLinus Torvalds {
897b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 	if (e->ops->elevator_former_req_fn)
9001da177e4SLinus Torvalds 		return e->ops->elevator_former_req_fn(q, rq);
9011da177e4SLinus Torvalds 	return NULL;
9021da177e4SLinus Torvalds }
9031da177e4SLinus Torvalds 
904165125e1SJens Axboe int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
9051da177e4SLinus Torvalds {
906b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	if (e->ops->elevator_set_req_fn)
909cb78b285SJens Axboe 		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	rq->elevator_private = NULL;
9121da177e4SLinus Torvalds 	return 0;
9131da177e4SLinus Torvalds }
9141da177e4SLinus Torvalds 
915165125e1SJens Axboe void elv_put_request(struct request_queue *q, struct request *rq)
9161da177e4SLinus Torvalds {
917b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds 	if (e->ops->elevator_put_req_fn)
920bb37b94cSJens Axboe 		e->ops->elevator_put_req_fn(rq);
9211da177e4SLinus Torvalds }
9221da177e4SLinus Torvalds 
923165125e1SJens Axboe int elv_may_queue(struct request_queue *q, int rw)
9241da177e4SLinus Torvalds {
925b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
9261da177e4SLinus Torvalds 
9271da177e4SLinus Torvalds 	if (e->ops->elevator_may_queue_fn)
928cb78b285SJens Axboe 		return e->ops->elevator_may_queue_fn(q, rw);
9291da177e4SLinus Torvalds 
9301da177e4SLinus Torvalds 	return ELV_MQUEUE_MAY;
9311da177e4SLinus Torvalds }
9321da177e4SLinus Torvalds 
93311914a53SMike Anderson void elv_abort_queue(struct request_queue *q)
93411914a53SMike Anderson {
93511914a53SMike Anderson 	struct request *rq;
93611914a53SMike Anderson 
93711914a53SMike Anderson 	while (!list_empty(&q->queue_head)) {
93811914a53SMike Anderson 		rq = list_entry_rq(q->queue_head.next);
93911914a53SMike Anderson 		rq->cmd_flags |= REQ_QUIET;
9405f3ea37cSArnaldo Carvalho de Melo 		trace_block_rq_abort(q, rq);
94199cd3386SKiyoshi Ueda 		__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
94211914a53SMike Anderson 	}
94311914a53SMike Anderson }
94411914a53SMike Anderson EXPORT_SYMBOL(elv_abort_queue);
94511914a53SMike Anderson 
946165125e1SJens Axboe void elv_completed_request(struct request_queue *q, struct request *rq)
9471da177e4SLinus Torvalds {
948b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
9491da177e4SLinus Torvalds 
9501da177e4SLinus Torvalds 	/*
9511da177e4SLinus Torvalds 	 * request is released from the driver, io must be done
9521da177e4SLinus Torvalds 	 */
9538922e16cSTejun Heo 	if (blk_account_rq(rq)) {
9541da177e4SLinus Torvalds 		q->in_flight--;
9551bc691d3STejun Heo 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
9561bc691d3STejun Heo 			e->ops->elevator_completed_req_fn(q, rq);
9571bc691d3STejun Heo 	}
958797e7dbbSTejun Heo 
959797e7dbbSTejun Heo 	/*
960797e7dbbSTejun Heo 	 * Check if the queue is waiting for fs requests to be
961797e7dbbSTejun Heo 	 * drained for flush sequence.
962797e7dbbSTejun Heo 	 */
9631bc691d3STejun Heo 	if (unlikely(q->ordseq)) {
9648f11b3e9STejun Heo 		struct request *next = NULL;
9658f11b3e9STejun Heo 
9668f11b3e9STejun Heo 		if (!list_empty(&q->queue_head))
9678f11b3e9STejun Heo 			next = list_entry_rq(q->queue_head.next);
9688f11b3e9STejun Heo 
9698f11b3e9STejun Heo 		if (!q->in_flight &&
970797e7dbbSTejun Heo 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
9718f11b3e9STejun Heo 		    (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
972797e7dbbSTejun Heo 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
973*a7f55792STejun Heo 			__blk_run_queue(q);
974797e7dbbSTejun Heo 		}
9751da177e4SLinus Torvalds 	}
9768922e16cSTejun Heo }
9771da177e4SLinus Torvalds 
9783d1ab40fSAl Viro #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
9793d1ab40fSAl Viro 
9803d1ab40fSAl Viro static ssize_t
9813d1ab40fSAl Viro elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
9823d1ab40fSAl Viro {
9833d1ab40fSAl Viro 	struct elv_fs_entry *entry = to_elv(attr);
984b374d18aSJens Axboe 	struct elevator_queue *e;
9853d1ab40fSAl Viro 	ssize_t error;
9863d1ab40fSAl Viro 
9873d1ab40fSAl Viro 	if (!entry->show)
9883d1ab40fSAl Viro 		return -EIO;
9893d1ab40fSAl Viro 
990b374d18aSJens Axboe 	e = container_of(kobj, struct elevator_queue, kobj);
9913d1ab40fSAl Viro 	mutex_lock(&e->sysfs_lock);
9923d1ab40fSAl Viro 	error = e->ops ? entry->show(e, page) : -ENOENT;
9933d1ab40fSAl Viro 	mutex_unlock(&e->sysfs_lock);
9943d1ab40fSAl Viro 	return error;
9953d1ab40fSAl Viro }
9963d1ab40fSAl Viro 
9973d1ab40fSAl Viro static ssize_t
9983d1ab40fSAl Viro elv_attr_store(struct kobject *kobj, struct attribute *attr,
9993d1ab40fSAl Viro 	       const char *page, size_t length)
10003d1ab40fSAl Viro {
10013d1ab40fSAl Viro 	struct elv_fs_entry *entry = to_elv(attr);
1002b374d18aSJens Axboe 	struct elevator_queue *e;
10033d1ab40fSAl Viro 	ssize_t error;
10043d1ab40fSAl Viro 
10053d1ab40fSAl Viro 	if (!entry->store)
10063d1ab40fSAl Viro 		return -EIO;
10073d1ab40fSAl Viro 
1008b374d18aSJens Axboe 	e = container_of(kobj, struct elevator_queue, kobj);
10093d1ab40fSAl Viro 	mutex_lock(&e->sysfs_lock);
10103d1ab40fSAl Viro 	error = e->ops ? entry->store(e, page, length) : -ENOENT;
10113d1ab40fSAl Viro 	mutex_unlock(&e->sysfs_lock);
10123d1ab40fSAl Viro 	return error;
10133d1ab40fSAl Viro }
10143d1ab40fSAl Viro 
10153d1ab40fSAl Viro static struct sysfs_ops elv_sysfs_ops = {
10163d1ab40fSAl Viro 	.show	= elv_attr_show,
10173d1ab40fSAl Viro 	.store	= elv_attr_store,
10183d1ab40fSAl Viro };
10193d1ab40fSAl Viro 
10203d1ab40fSAl Viro static struct kobj_type elv_ktype = {
10213d1ab40fSAl Viro 	.sysfs_ops	= &elv_sysfs_ops,
10223d1ab40fSAl Viro 	.release	= elevator_release,
10233d1ab40fSAl Viro };
10243d1ab40fSAl Viro 
10251da177e4SLinus Torvalds int elv_register_queue(struct request_queue *q)
10261da177e4SLinus Torvalds {
1027b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
10283d1ab40fSAl Viro 	int error;
10291da177e4SLinus Torvalds 
1030b2d6db58SGreg Kroah-Hartman 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
10313d1ab40fSAl Viro 	if (!error) {
1032e572ec7eSAl Viro 		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
10333d1ab40fSAl Viro 		if (attr) {
1034e572ec7eSAl Viro 			while (attr->attr.name) {
1035e572ec7eSAl Viro 				if (sysfs_create_file(&e->kobj, &attr->attr))
10363d1ab40fSAl Viro 					break;
1037e572ec7eSAl Viro 				attr++;
10383d1ab40fSAl Viro 			}
10393d1ab40fSAl Viro 		}
10403d1ab40fSAl Viro 		kobject_uevent(&e->kobj, KOBJ_ADD);
10413d1ab40fSAl Viro 	}
10423d1ab40fSAl Viro 	return error;
10431da177e4SLinus Torvalds }
10441da177e4SLinus Torvalds 
1045b374d18aSJens Axboe static void __elv_unregister_queue(struct elevator_queue *e)
10461da177e4SLinus Torvalds {
10473d1ab40fSAl Viro 	kobject_uevent(&e->kobj, KOBJ_REMOVE);
10483d1ab40fSAl Viro 	kobject_del(&e->kobj);
10491da177e4SLinus Torvalds }
1050bc1c1169SJens Axboe 
1051bc1c1169SJens Axboe void elv_unregister_queue(struct request_queue *q)
1052bc1c1169SJens Axboe {
1053bc1c1169SJens Axboe 	if (q)
1054bc1c1169SJens Axboe 		__elv_unregister_queue(q->elevator);
10551da177e4SLinus Torvalds }
10561da177e4SLinus Torvalds 
10572fdd82bdSAdrian Bunk void elv_register(struct elevator_type *e)
10581da177e4SLinus Torvalds {
10591ffb96c5SThibaut VARENE 	char *def = "";
10602a12dcd7SJens Axboe 
10612a12dcd7SJens Axboe 	spin_lock(&elv_list_lock);
1062ce524497SEric Sesterhenn 	BUG_ON(elevator_find(e->elevator_name));
10631da177e4SLinus Torvalds 	list_add_tail(&e->list, &elv_list);
10642a12dcd7SJens Axboe 	spin_unlock(&elv_list_lock);
10651da177e4SLinus Torvalds 
10665f003976SNate Diller 	if (!strcmp(e->elevator_name, chosen_elevator) ||
10675f003976SNate Diller 			(!*chosen_elevator &&
10685f003976SNate Diller 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
10691ffb96c5SThibaut VARENE 				def = " (default)";
10701ffb96c5SThibaut VARENE 
10714eb166d9SJens Axboe 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
10724eb166d9SJens Axboe 								def);
10731da177e4SLinus Torvalds }
10741da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(elv_register);
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds void elv_unregister(struct elevator_type *e)
10771da177e4SLinus Torvalds {
107883521d3eSChristoph Hellwig 	struct task_struct *g, *p;
107983521d3eSChristoph Hellwig 
108083521d3eSChristoph Hellwig 	/*
108183521d3eSChristoph Hellwig 	 * Iterate every thread in the process to remove the io contexts.
108283521d3eSChristoph Hellwig 	 */
1083e17a9489SAl Viro 	if (e->ops.trim) {
108483521d3eSChristoph Hellwig 		read_lock(&tasklist_lock);
108583521d3eSChristoph Hellwig 		do_each_thread(g, p) {
1086e17a9489SAl Viro 			task_lock(p);
10872d8f6131SOleg Nesterov 			if (p->io_context)
1088e17a9489SAl Viro 				e->ops.trim(p->io_context);
1089e17a9489SAl Viro 			task_unlock(p);
109083521d3eSChristoph Hellwig 		} while_each_thread(g, p);
109183521d3eSChristoph Hellwig 		read_unlock(&tasklist_lock);
1092e17a9489SAl Viro 	}
109383521d3eSChristoph Hellwig 
10942a12dcd7SJens Axboe 	spin_lock(&elv_list_lock);
10951da177e4SLinus Torvalds 	list_del_init(&e->list);
10962a12dcd7SJens Axboe 	spin_unlock(&elv_list_lock);
10971da177e4SLinus Torvalds }
10981da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(elv_unregister);
10991da177e4SLinus Torvalds 
11001da177e4SLinus Torvalds /*
11011da177e4SLinus Torvalds  * switch to new_e io scheduler. be careful not to introduce deadlocks -
11021da177e4SLinus Torvalds  * we don't free the old io scheduler, before we have allocated what we
11031da177e4SLinus Torvalds  * need for the new one. this way we have a chance of going back to the old
1104cb98fc8bSTejun Heo  * one, if the new one fails init for some reason.
11051da177e4SLinus Torvalds  */
1106165125e1SJens Axboe static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
11071da177e4SLinus Torvalds {
1108b374d18aSJens Axboe 	struct elevator_queue *old_elevator, *e;
1109bc1c1169SJens Axboe 	void *data;
11101da177e4SLinus Torvalds 
1111cb98fc8bSTejun Heo 	/*
1112cb98fc8bSTejun Heo 	 * Allocate new elevator
1113cb98fc8bSTejun Heo 	 */
1114b5deef90SJens Axboe 	e = elevator_alloc(q, new_e);
11151da177e4SLinus Torvalds 	if (!e)
11163d1ab40fSAl Viro 		return 0;
11171da177e4SLinus Torvalds 
1118bc1c1169SJens Axboe 	data = elevator_init_queue(q, e);
1119bc1c1169SJens Axboe 	if (!data) {
1120bc1c1169SJens Axboe 		kobject_put(&e->kobj);
1121bc1c1169SJens Axboe 		return 0;
1122bc1c1169SJens Axboe 	}
1123bc1c1169SJens Axboe 
11241da177e4SLinus Torvalds 	/*
1125cb98fc8bSTejun Heo 	 * Turn on BYPASS and drain all requests w/ elevator private data
11261da177e4SLinus Torvalds 	 */
1127cb98fc8bSTejun Heo 	spin_lock_irq(q->queue_lock);
1128f600abe2SJens Axboe 	elv_quiesce_start(q);
1129cb98fc8bSTejun Heo 
11301da177e4SLinus Torvalds 	/*
1131bc1c1169SJens Axboe 	 * Remember old elevator.
11321da177e4SLinus Torvalds 	 */
11331da177e4SLinus Torvalds 	old_elevator = q->elevator;
11341da177e4SLinus Torvalds 
11351da177e4SLinus Torvalds 	/*
11361da177e4SLinus Torvalds 	 * attach and start new elevator
11371da177e4SLinus Torvalds 	 */
1138bc1c1169SJens Axboe 	elevator_attach(q, e, data);
1139bc1c1169SJens Axboe 
1140bc1c1169SJens Axboe 	spin_unlock_irq(q->queue_lock);
1141bc1c1169SJens Axboe 
1142bc1c1169SJens Axboe 	__elv_unregister_queue(old_elevator);
11431da177e4SLinus Torvalds 
11441da177e4SLinus Torvalds 	if (elv_register_queue(q))
11451da177e4SLinus Torvalds 		goto fail_register;
11461da177e4SLinus Torvalds 
11471da177e4SLinus Torvalds 	/*
1148cb98fc8bSTejun Heo 	 * finally exit old elevator and turn off BYPASS.
11491da177e4SLinus Torvalds 	 */
11501da177e4SLinus Torvalds 	elevator_exit(old_elevator);
115175ad23bcSNick Piggin 	spin_lock_irq(q->queue_lock);
1152f600abe2SJens Axboe 	elv_quiesce_end(q);
115375ad23bcSNick Piggin 	spin_unlock_irq(q->queue_lock);
115475ad23bcSNick Piggin 
11554722dc52SAlan D. Brunelle 	blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
11564722dc52SAlan D. Brunelle 
11573d1ab40fSAl Viro 	return 1;
11581da177e4SLinus Torvalds 
11591da177e4SLinus Torvalds fail_register:
11601da177e4SLinus Torvalds 	/*
11611da177e4SLinus Torvalds 	 * switch failed, exit the new io scheduler and reattach the old
11621da177e4SLinus Torvalds 	 * one again (along with re-adding the sysfs dir)
11631da177e4SLinus Torvalds 	 */
11641da177e4SLinus Torvalds 	elevator_exit(e);
11651da177e4SLinus Torvalds 	q->elevator = old_elevator;
11661da177e4SLinus Torvalds 	elv_register_queue(q);
116775ad23bcSNick Piggin 
116875ad23bcSNick Piggin 	spin_lock_irq(q->queue_lock);
116975ad23bcSNick Piggin 	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
117075ad23bcSNick Piggin 	spin_unlock_irq(q->queue_lock);
117175ad23bcSNick Piggin 
11723d1ab40fSAl Viro 	return 0;
11731da177e4SLinus Torvalds }
11741da177e4SLinus Torvalds 
1175165125e1SJens Axboe ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1176165125e1SJens Axboe 			  size_t count)
11771da177e4SLinus Torvalds {
11781da177e4SLinus Torvalds 	char elevator_name[ELV_NAME_MAX];
11791da177e4SLinus Torvalds 	struct elevator_type *e;
11801da177e4SLinus Torvalds 
1181ee2e992cSLi Zefan 	strlcpy(elevator_name, name, sizeof(elevator_name));
1182ee2e992cSLi Zefan 	strstrip(elevator_name);
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds 	e = elevator_get(elevator_name);
11851da177e4SLinus Torvalds 	if (!e) {
11861da177e4SLinus Torvalds 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
11871da177e4SLinus Torvalds 		return -EINVAL;
11881da177e4SLinus Torvalds 	}
11891da177e4SLinus Torvalds 
11902ca7d93bSNate Diller 	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
11912ca7d93bSNate Diller 		elevator_put(e);
11921da177e4SLinus Torvalds 		return count;
11932ca7d93bSNate Diller 	}
11941da177e4SLinus Torvalds 
11953d1ab40fSAl Viro 	if (!elevator_switch(q, e))
11964eb166d9SJens Axboe 		printk(KERN_ERR "elevator: switch to %s failed\n",
11974eb166d9SJens Axboe 							elevator_name);
11981da177e4SLinus Torvalds 	return count;
11991da177e4SLinus Torvalds }
12001da177e4SLinus Torvalds 
1201165125e1SJens Axboe ssize_t elv_iosched_show(struct request_queue *q, char *name)
12021da177e4SLinus Torvalds {
1203b374d18aSJens Axboe 	struct elevator_queue *e = q->elevator;
12041da177e4SLinus Torvalds 	struct elevator_type *elv = e->elevator_type;
120570cee26eSMatthias Kaehlcke 	struct elevator_type *__e;
12061da177e4SLinus Torvalds 	int len = 0;
12071da177e4SLinus Torvalds 
12082a12dcd7SJens Axboe 	spin_lock(&elv_list_lock);
120970cee26eSMatthias Kaehlcke 	list_for_each_entry(__e, &elv_list, list) {
12101da177e4SLinus Torvalds 		if (!strcmp(elv->elevator_name, __e->elevator_name))
12111da177e4SLinus Torvalds 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
12121da177e4SLinus Torvalds 		else
12131da177e4SLinus Torvalds 			len += sprintf(name+len, "%s ", __e->elevator_name);
12141da177e4SLinus Torvalds 	}
12152a12dcd7SJens Axboe 	spin_unlock(&elv_list_lock);
12161da177e4SLinus Torvalds 
12171da177e4SLinus Torvalds 	len += sprintf(len+name, "\n");
12181da177e4SLinus Torvalds 	return len;
12191da177e4SLinus Torvalds }
12201da177e4SLinus Torvalds 
1221165125e1SJens Axboe struct request *elv_rb_former_request(struct request_queue *q,
1222165125e1SJens Axboe 				      struct request *rq)
12232e662b65SJens Axboe {
12242e662b65SJens Axboe 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
12252e662b65SJens Axboe 
12262e662b65SJens Axboe 	if (rbprev)
12272e662b65SJens Axboe 		return rb_entry_rq(rbprev);
12282e662b65SJens Axboe 
12292e662b65SJens Axboe 	return NULL;
12302e662b65SJens Axboe }
12312e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_former_request);
12322e662b65SJens Axboe 
1233165125e1SJens Axboe struct request *elv_rb_latter_request(struct request_queue *q,
1234165125e1SJens Axboe 				      struct request *rq)
12352e662b65SJens Axboe {
12362e662b65SJens Axboe 	struct rb_node *rbnext = rb_next(&rq->rb_node);
12372e662b65SJens Axboe 
12382e662b65SJens Axboe 	if (rbnext)
12392e662b65SJens Axboe 		return rb_entry_rq(rbnext);
12402e662b65SJens Axboe 
12412e662b65SJens Axboe 	return NULL;
12422e662b65SJens Axboe }
12432e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_latter_request);
1244