11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Block device elevator/IO-scheduler. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 51da177e4SLinus Torvalds * 60fe23479SJens Axboe * 30042000 Jens Axboe <axboe@kernel.dk> : 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Split the elevator a bit so that it is possible to choose a different 91da177e4SLinus Torvalds * one or even write a new "plug in". There are three pieces: 101da177e4SLinus Torvalds * - elevator_fn, inserts a new request in the queue list 111da177e4SLinus Torvalds * - elevator_merge_fn, decides whether a new buffer can be merged with 121da177e4SLinus Torvalds * an existing request 131da177e4SLinus Torvalds * - elevator_dequeue_fn, called when a request is taken off the active list 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * 20082000 Dave Jones <davej@suse.de> : 161da177e4SLinus Torvalds * Removed tests for max-bomb-segments, which was breaking elvtune 171da177e4SLinus Torvalds * when run without -bN 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * Jens: 201da177e4SLinus Torvalds * - Rework again to work with bio instead of buffer_heads 211da177e4SLinus Torvalds * - loose bi_dev comparisons, partition handling is right now 221da177e4SLinus Torvalds * - completely modularize elevator setup and teardown 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds #include <linux/kernel.h> 261da177e4SLinus Torvalds #include <linux/fs.h> 271da177e4SLinus Torvalds #include <linux/blkdev.h> 281da177e4SLinus Torvalds #include <linux/elevator.h> 291da177e4SLinus Torvalds #include <linux/bio.h> 301da177e4SLinus Torvalds #include <linux/module.h> 311da177e4SLinus Torvalds #include <linux/slab.h> 321da177e4SLinus Torvalds #include <linux/init.h> 331da177e4SLinus Torvalds #include <linux/compiler.h> 34cb98fc8bSTejun Heo #include <linux/delay.h> 352056a782SJens Axboe #include <linux/blktrace_api.h> 365f3ea37cSArnaldo Carvalho de Melo #include <trace/block.h> 379817064bSJens Axboe #include <linux/hash.h> 380835da67SJens Axboe #include <linux/uaccess.h> 391da177e4SLinus Torvalds 40242f9dcbSJens Axboe #include "blk.h" 41242f9dcbSJens Axboe 421da177e4SLinus Torvalds static DEFINE_SPINLOCK(elv_list_lock); 431da177e4SLinus Torvalds static LIST_HEAD(elv_list); 441da177e4SLinus Torvalds 450bfc2455SIngo Molnar DEFINE_TRACE(block_rq_abort); 460bfc2455SIngo Molnar 471da177e4SLinus Torvalds /* 489817064bSJens Axboe * Merge hash stuff. 499817064bSJens Axboe */ 509817064bSJens Axboe static const int elv_hash_shift = 6; 519817064bSJens Axboe #define ELV_HASH_BLOCK(sec) ((sec) >> 3) 524eb166d9SJens Axboe #define ELV_HASH_FN(sec) \ 534eb166d9SJens Axboe (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 549817064bSJens Axboe #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 559817064bSJens Axboe #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 569817064bSJens Axboe #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 579817064bSJens Axboe 580bfc2455SIngo Molnar DEFINE_TRACE(block_rq_insert); 590bfc2455SIngo Molnar DEFINE_TRACE(block_rq_issue); 600bfc2455SIngo Molnar 619817064bSJens Axboe /* 62da775265SJens Axboe * Query io scheduler to see if the current process issuing bio may be 63da775265SJens Axboe * merged with rq. 64da775265SJens Axboe */ 65da775265SJens Axboe static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 66da775265SJens Axboe { 67165125e1SJens Axboe struct request_queue *q = rq->q; 68b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 69da775265SJens Axboe 70da775265SJens Axboe if (e->ops->elevator_allow_merge_fn) 71da775265SJens Axboe return e->ops->elevator_allow_merge_fn(q, rq, bio); 72da775265SJens Axboe 73da775265SJens Axboe return 1; 74da775265SJens Axboe } 75da775265SJens Axboe 76da775265SJens Axboe /* 771da177e4SLinus Torvalds * can we safely merge with this request? 781da177e4SLinus Torvalds */ 7972ed0bf6SAdrian Bunk int elv_rq_merge_ok(struct request *rq, struct bio *bio) 801da177e4SLinus Torvalds { 811da177e4SLinus Torvalds if (!rq_mergeable(rq)) 821da177e4SLinus Torvalds return 0; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds /* 85e17fc0a1SDavid Woodhouse * Don't merge file system requests and discard requests 86e17fc0a1SDavid Woodhouse */ 87e17fc0a1SDavid Woodhouse if (bio_discard(bio) != bio_discard(rq->bio)) 88e17fc0a1SDavid Woodhouse return 0; 89e17fc0a1SDavid Woodhouse 90e17fc0a1SDavid Woodhouse /* 911da177e4SLinus Torvalds * different data direction or already started, don't merge 921da177e4SLinus Torvalds */ 931da177e4SLinus Torvalds if (bio_data_dir(bio) != rq_data_dir(rq)) 941da177e4SLinus Torvalds return 0; 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 97da775265SJens Axboe * must be same device and not a special request 981da177e4SLinus Torvalds */ 99bb4067e3SJens Axboe if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) 1001da177e4SLinus Torvalds return 0; 101da775265SJens Axboe 1027ba1ba12SMartin K. Petersen /* 1037ba1ba12SMartin K. Petersen * only merge integrity protected bio into ditto rq 1047ba1ba12SMartin K. Petersen */ 1057ba1ba12SMartin K. Petersen if (bio_integrity(bio) != blk_integrity_rq(rq)) 1067ba1ba12SMartin K. Petersen return 0; 1077ba1ba12SMartin K. Petersen 108da775265SJens Axboe if (!elv_iosched_allow_merge(rq, bio)) 109da775265SJens Axboe return 0; 110da775265SJens Axboe 111da775265SJens Axboe return 1; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds EXPORT_SYMBOL(elv_rq_merge_ok); 1141da177e4SLinus Torvalds 115769db45bSCoywolf Qi Hunt static inline int elv_try_merge(struct request *__rq, struct bio *bio) 1161da177e4SLinus Torvalds { 1171da177e4SLinus Torvalds int ret = ELEVATOR_NO_MERGE; 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds /* 1201da177e4SLinus Torvalds * we can merge and sequence is ok, check if it's possible 1211da177e4SLinus Torvalds */ 1221da177e4SLinus Torvalds if (elv_rq_merge_ok(__rq, bio)) { 1231da177e4SLinus Torvalds if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 1241da177e4SLinus Torvalds ret = ELEVATOR_BACK_MERGE; 1251da177e4SLinus Torvalds else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 1261da177e4SLinus Torvalds ret = ELEVATOR_FRONT_MERGE; 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds return ret; 1301da177e4SLinus Torvalds } 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds static struct elevator_type *elevator_find(const char *name) 1331da177e4SLinus Torvalds { 134a22b169dSVasily Tarasov struct elevator_type *e; 1351da177e4SLinus Torvalds 13670cee26eSMatthias Kaehlcke list_for_each_entry(e, &elv_list, list) { 137a22b169dSVasily Tarasov if (!strcmp(e->elevator_name, name)) 1381da177e4SLinus Torvalds return e; 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 141a22b169dSVasily Tarasov return NULL; 142a22b169dSVasily Tarasov } 143a22b169dSVasily Tarasov 1441da177e4SLinus Torvalds static void elevator_put(struct elevator_type *e) 1451da177e4SLinus Torvalds { 1461da177e4SLinus Torvalds module_put(e->elevator_owner); 1471da177e4SLinus Torvalds } 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds static struct elevator_type *elevator_get(const char *name) 1501da177e4SLinus Torvalds { 1512824bc93STejun Heo struct elevator_type *e; 1521da177e4SLinus Torvalds 1532a12dcd7SJens Axboe spin_lock(&elv_list_lock); 1542824bc93STejun Heo 1552824bc93STejun Heo e = elevator_find(name); 156e1640949SJens Axboe if (!e) { 157e1640949SJens Axboe char elv[ELV_NAME_MAX + strlen("-iosched")]; 158e1640949SJens Axboe 159e1640949SJens Axboe spin_unlock(&elv_list_lock); 160e1640949SJens Axboe 161e1640949SJens Axboe if (!strcmp(name, "anticipatory")) 162e1640949SJens Axboe sprintf(elv, "as-iosched"); 163e1640949SJens Axboe else 164e1640949SJens Axboe sprintf(elv, "%s-iosched", name); 165e1640949SJens Axboe 166e180f594Smaximilian attems request_module("%s", elv); 167e1640949SJens Axboe spin_lock(&elv_list_lock); 168e1640949SJens Axboe e = elevator_find(name); 169e1640949SJens Axboe } 170e1640949SJens Axboe 1712824bc93STejun Heo if (e && !try_module_get(e->elevator_owner)) 1722824bc93STejun Heo e = NULL; 1732824bc93STejun Heo 1742a12dcd7SJens Axboe spin_unlock(&elv_list_lock); 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds return e; 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 179165125e1SJens Axboe static void *elevator_init_queue(struct request_queue *q, 180165125e1SJens Axboe struct elevator_queue *eq) 1811da177e4SLinus Torvalds { 182bb37b94cSJens Axboe return eq->ops->elevator_init_fn(q); 183bc1c1169SJens Axboe } 1841da177e4SLinus Torvalds 185165125e1SJens Axboe static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, 186bc1c1169SJens Axboe void *data) 187bc1c1169SJens Axboe { 1881da177e4SLinus Torvalds q->elevator = eq; 189bc1c1169SJens Axboe eq->elevator_data = data; 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1921da177e4SLinus Torvalds static char chosen_elevator[16]; 1931da177e4SLinus Torvalds 1945f003976SNate Diller static int __init elevator_setup(char *str) 1951da177e4SLinus Torvalds { 196752a3b79SChuck Ebbert /* 197752a3b79SChuck Ebbert * Be backwards-compatible with previous kernels, so users 198752a3b79SChuck Ebbert * won't get the wrong elevator. 199752a3b79SChuck Ebbert */ 2005f003976SNate Diller if (!strcmp(str, "as")) 201752a3b79SChuck Ebbert strcpy(chosen_elevator, "anticipatory"); 202cff3ba22SZachary Amsden else 2031da177e4SLinus Torvalds strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); 2049b41046cSOGAWA Hirofumi return 1; 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds __setup("elevator=", elevator_setup); 2081da177e4SLinus Torvalds 2093d1ab40fSAl Viro static struct kobj_type elv_ktype; 2103d1ab40fSAl Viro 211b374d18aSJens Axboe static struct elevator_queue *elevator_alloc(struct request_queue *q, 212165125e1SJens Axboe struct elevator_type *e) 2133d1ab40fSAl Viro { 214b374d18aSJens Axboe struct elevator_queue *eq; 2159817064bSJens Axboe int i; 2169817064bSJens Axboe 217b374d18aSJens Axboe eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); 2189817064bSJens Axboe if (unlikely(!eq)) 2199817064bSJens Axboe goto err; 2209817064bSJens Axboe 2213d1ab40fSAl Viro eq->ops = &e->ops; 2223d1ab40fSAl Viro eq->elevator_type = e; 223f9cb074bSGreg Kroah-Hartman kobject_init(&eq->kobj, &elv_ktype); 2243d1ab40fSAl Viro mutex_init(&eq->sysfs_lock); 2259817064bSJens Axboe 226b5deef90SJens Axboe eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, 227b5deef90SJens Axboe GFP_KERNEL, q->node); 2289817064bSJens Axboe if (!eq->hash) 2299817064bSJens Axboe goto err; 2309817064bSJens Axboe 2319817064bSJens Axboe for (i = 0; i < ELV_HASH_ENTRIES; i++) 2329817064bSJens Axboe INIT_HLIST_HEAD(&eq->hash[i]); 2339817064bSJens Axboe 2343d1ab40fSAl Viro return eq; 2359817064bSJens Axboe err: 2369817064bSJens Axboe kfree(eq); 2379817064bSJens Axboe elevator_put(e); 2389817064bSJens Axboe return NULL; 2393d1ab40fSAl Viro } 2403d1ab40fSAl Viro 2413d1ab40fSAl Viro static void elevator_release(struct kobject *kobj) 2423d1ab40fSAl Viro { 243b374d18aSJens Axboe struct elevator_queue *e; 2449817064bSJens Axboe 245b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj); 2463d1ab40fSAl Viro elevator_put(e->elevator_type); 2479817064bSJens Axboe kfree(e->hash); 2483d1ab40fSAl Viro kfree(e); 2493d1ab40fSAl Viro } 2503d1ab40fSAl Viro 251165125e1SJens Axboe int elevator_init(struct request_queue *q, char *name) 2521da177e4SLinus Torvalds { 2531da177e4SLinus Torvalds struct elevator_type *e = NULL; 2541da177e4SLinus Torvalds struct elevator_queue *eq; 2551da177e4SLinus Torvalds int ret = 0; 256bc1c1169SJens Axboe void *data; 2571da177e4SLinus Torvalds 258cb98fc8bSTejun Heo INIT_LIST_HEAD(&q->queue_head); 259cb98fc8bSTejun Heo q->last_merge = NULL; 260cb98fc8bSTejun Heo q->end_sector = 0; 261cb98fc8bSTejun Heo q->boundary_rq = NULL; 262cb98fc8bSTejun Heo 2634eb166d9SJens Axboe if (name) { 2644eb166d9SJens Axboe e = elevator_get(name); 2654eb166d9SJens Axboe if (!e) 2661da177e4SLinus Torvalds return -EINVAL; 2674eb166d9SJens Axboe } 2681da177e4SLinus Torvalds 2694eb166d9SJens Axboe if (!e && *chosen_elevator) { 2704eb166d9SJens Axboe e = elevator_get(chosen_elevator); 2714eb166d9SJens Axboe if (!e) 2724eb166d9SJens Axboe printk(KERN_ERR "I/O scheduler %s not found\n", 2734eb166d9SJens Axboe chosen_elevator); 2744eb166d9SJens Axboe } 275248d5ca5SNate Diller 2764eb166d9SJens Axboe if (!e) { 2774eb166d9SJens Axboe e = elevator_get(CONFIG_DEFAULT_IOSCHED); 2784eb166d9SJens Axboe if (!e) { 2794eb166d9SJens Axboe printk(KERN_ERR 2804eb166d9SJens Axboe "Default I/O scheduler not found. " \ 2814eb166d9SJens Axboe "Using noop.\n"); 282248d5ca5SNate Diller e = elevator_get("noop"); 2835f003976SNate Diller } 2844eb166d9SJens Axboe } 2855f003976SNate Diller 286b5deef90SJens Axboe eq = elevator_alloc(q, e); 2873d1ab40fSAl Viro if (!eq) 2881da177e4SLinus Torvalds return -ENOMEM; 2891da177e4SLinus Torvalds 290bc1c1169SJens Axboe data = elevator_init_queue(q, eq); 291bc1c1169SJens Axboe if (!data) { 2923d1ab40fSAl Viro kobject_put(&eq->kobj); 293bc1c1169SJens Axboe return -ENOMEM; 294bc1c1169SJens Axboe } 2951da177e4SLinus Torvalds 296bc1c1169SJens Axboe elevator_attach(q, eq, data); 2971da177e4SLinus Torvalds return ret; 2981da177e4SLinus Torvalds } 2992e662b65SJens Axboe EXPORT_SYMBOL(elevator_init); 3002e662b65SJens Axboe 301b374d18aSJens Axboe void elevator_exit(struct elevator_queue *e) 3021da177e4SLinus Torvalds { 3033d1ab40fSAl Viro mutex_lock(&e->sysfs_lock); 3041da177e4SLinus Torvalds if (e->ops->elevator_exit_fn) 3051da177e4SLinus Torvalds e->ops->elevator_exit_fn(e); 3063d1ab40fSAl Viro e->ops = NULL; 3073d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock); 3081da177e4SLinus Torvalds 3093d1ab40fSAl Viro kobject_put(&e->kobj); 3101da177e4SLinus Torvalds } 3112e662b65SJens Axboe EXPORT_SYMBOL(elevator_exit); 3122e662b65SJens Axboe 313165125e1SJens Axboe static void elv_activate_rq(struct request_queue *q, struct request *rq) 314cad97516SJens Axboe { 315b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 316cad97516SJens Axboe 317cad97516SJens Axboe if (e->ops->elevator_activate_req_fn) 318cad97516SJens Axboe e->ops->elevator_activate_req_fn(q, rq); 319cad97516SJens Axboe } 320cad97516SJens Axboe 321165125e1SJens Axboe static void elv_deactivate_rq(struct request_queue *q, struct request *rq) 322cad97516SJens Axboe { 323b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 324cad97516SJens Axboe 325cad97516SJens Axboe if (e->ops->elevator_deactivate_req_fn) 326cad97516SJens Axboe e->ops->elevator_deactivate_req_fn(q, rq); 327cad97516SJens Axboe } 328cad97516SJens Axboe 3299817064bSJens Axboe static inline void __elv_rqhash_del(struct request *rq) 3309817064bSJens Axboe { 3319817064bSJens Axboe hlist_del_init(&rq->hash); 3329817064bSJens Axboe } 3339817064bSJens Axboe 334165125e1SJens Axboe static void elv_rqhash_del(struct request_queue *q, struct request *rq) 3359817064bSJens Axboe { 3369817064bSJens Axboe if (ELV_ON_HASH(rq)) 3379817064bSJens Axboe __elv_rqhash_del(rq); 3389817064bSJens Axboe } 3399817064bSJens Axboe 340165125e1SJens Axboe static void elv_rqhash_add(struct request_queue *q, struct request *rq) 3419817064bSJens Axboe { 342b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 3439817064bSJens Axboe 3449817064bSJens Axboe BUG_ON(ELV_ON_HASH(rq)); 3459817064bSJens Axboe hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 3469817064bSJens Axboe } 3479817064bSJens Axboe 348165125e1SJens Axboe static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 3499817064bSJens Axboe { 3509817064bSJens Axboe __elv_rqhash_del(rq); 3519817064bSJens Axboe elv_rqhash_add(q, rq); 3529817064bSJens Axboe } 3539817064bSJens Axboe 354165125e1SJens Axboe static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 3559817064bSJens Axboe { 356b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 3579817064bSJens Axboe struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 3589817064bSJens Axboe struct hlist_node *entry, *next; 3599817064bSJens Axboe struct request *rq; 3609817064bSJens Axboe 3619817064bSJens Axboe hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 3629817064bSJens Axboe BUG_ON(!ELV_ON_HASH(rq)); 3639817064bSJens Axboe 3649817064bSJens Axboe if (unlikely(!rq_mergeable(rq))) { 3659817064bSJens Axboe __elv_rqhash_del(rq); 3669817064bSJens Axboe continue; 3679817064bSJens Axboe } 3689817064bSJens Axboe 3699817064bSJens Axboe if (rq_hash_key(rq) == offset) 3709817064bSJens Axboe return rq; 3719817064bSJens Axboe } 3729817064bSJens Axboe 3739817064bSJens Axboe return NULL; 3749817064bSJens Axboe } 3759817064bSJens Axboe 3768922e16cSTejun Heo /* 3772e662b65SJens Axboe * RB-tree support functions for inserting/lookup/removal of requests 3782e662b65SJens Axboe * in a sorted RB tree. 3792e662b65SJens Axboe */ 3802e662b65SJens Axboe struct request *elv_rb_add(struct rb_root *root, struct request *rq) 3812e662b65SJens Axboe { 3822e662b65SJens Axboe struct rb_node **p = &root->rb_node; 3832e662b65SJens Axboe struct rb_node *parent = NULL; 3842e662b65SJens Axboe struct request *__rq; 3852e662b65SJens Axboe 3862e662b65SJens Axboe while (*p) { 3872e662b65SJens Axboe parent = *p; 3882e662b65SJens Axboe __rq = rb_entry(parent, struct request, rb_node); 3892e662b65SJens Axboe 3902e662b65SJens Axboe if (rq->sector < __rq->sector) 3912e662b65SJens Axboe p = &(*p)->rb_left; 3922e662b65SJens Axboe else if (rq->sector > __rq->sector) 3932e662b65SJens Axboe p = &(*p)->rb_right; 3942e662b65SJens Axboe else 3952e662b65SJens Axboe return __rq; 3962e662b65SJens Axboe } 3972e662b65SJens Axboe 3982e662b65SJens Axboe rb_link_node(&rq->rb_node, parent, p); 3992e662b65SJens Axboe rb_insert_color(&rq->rb_node, root); 4002e662b65SJens Axboe return NULL; 4012e662b65SJens Axboe } 4022e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_add); 4032e662b65SJens Axboe 4042e662b65SJens Axboe void elv_rb_del(struct rb_root *root, struct request *rq) 4052e662b65SJens Axboe { 4062e662b65SJens Axboe BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 4072e662b65SJens Axboe rb_erase(&rq->rb_node, root); 4082e662b65SJens Axboe RB_CLEAR_NODE(&rq->rb_node); 4092e662b65SJens Axboe } 4102e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_del); 4112e662b65SJens Axboe 4122e662b65SJens Axboe struct request *elv_rb_find(struct rb_root *root, sector_t sector) 4132e662b65SJens Axboe { 4142e662b65SJens Axboe struct rb_node *n = root->rb_node; 4152e662b65SJens Axboe struct request *rq; 4162e662b65SJens Axboe 4172e662b65SJens Axboe while (n) { 4182e662b65SJens Axboe rq = rb_entry(n, struct request, rb_node); 4192e662b65SJens Axboe 4202e662b65SJens Axboe if (sector < rq->sector) 4212e662b65SJens Axboe n = n->rb_left; 4222e662b65SJens Axboe else if (sector > rq->sector) 4232e662b65SJens Axboe n = n->rb_right; 4242e662b65SJens Axboe else 4252e662b65SJens Axboe return rq; 4262e662b65SJens Axboe } 4272e662b65SJens Axboe 4282e662b65SJens Axboe return NULL; 4292e662b65SJens Axboe } 4302e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_find); 4312e662b65SJens Axboe 4322e662b65SJens Axboe /* 4338922e16cSTejun Heo * Insert rq into dispatch queue of q. Queue lock must be held on 434dbe7f76dSUwe Kleine-König * entry. rq is sort instead into the dispatch queue. To be used by 4352e662b65SJens Axboe * specific elevators. 4368922e16cSTejun Heo */ 437165125e1SJens Axboe void elv_dispatch_sort(struct request_queue *q, struct request *rq) 4388922e16cSTejun Heo { 4398922e16cSTejun Heo sector_t boundary; 4408922e16cSTejun Heo struct list_head *entry; 4414eb166d9SJens Axboe int stop_flags; 4428922e16cSTejun Heo 44306b86245STejun Heo if (q->last_merge == rq) 44406b86245STejun Heo q->last_merge = NULL; 4459817064bSJens Axboe 4469817064bSJens Axboe elv_rqhash_del(q, rq); 4479817064bSJens Axboe 44815853af9STejun Heo q->nr_sorted--; 44906b86245STejun Heo 4501b47f531SJens Axboe boundary = q->end_sector; 4514eb166d9SJens Axboe stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 4528922e16cSTejun Heo list_for_each_prev(entry, &q->queue_head) { 4538922e16cSTejun Heo struct request *pos = list_entry_rq(entry); 4548922e16cSTejun Heo 455e17fc0a1SDavid Woodhouse if (blk_discard_rq(rq) != blk_discard_rq(pos)) 456e17fc0a1SDavid Woodhouse break; 457783660b2SJens Axboe if (rq_data_dir(rq) != rq_data_dir(pos)) 458783660b2SJens Axboe break; 4594eb166d9SJens Axboe if (pos->cmd_flags & stop_flags) 4608922e16cSTejun Heo break; 4618922e16cSTejun Heo if (rq->sector >= boundary) { 4628922e16cSTejun Heo if (pos->sector < boundary) 4638922e16cSTejun Heo continue; 4648922e16cSTejun Heo } else { 4658922e16cSTejun Heo if (pos->sector >= boundary) 4668922e16cSTejun Heo break; 4678922e16cSTejun Heo } 4688922e16cSTejun Heo if (rq->sector >= pos->sector) 4698922e16cSTejun Heo break; 4708922e16cSTejun Heo } 4718922e16cSTejun Heo 4728922e16cSTejun Heo list_add(&rq->queuelist, entry); 4738922e16cSTejun Heo } 4742e662b65SJens Axboe EXPORT_SYMBOL(elv_dispatch_sort); 4752e662b65SJens Axboe 4769817064bSJens Axboe /* 4772e662b65SJens Axboe * Insert rq into dispatch queue of q. Queue lock must be held on 4782e662b65SJens Axboe * entry. rq is added to the back of the dispatch queue. To be used by 4792e662b65SJens Axboe * specific elevators. 4809817064bSJens Axboe */ 4819817064bSJens Axboe void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) 4829817064bSJens Axboe { 4839817064bSJens Axboe if (q->last_merge == rq) 4849817064bSJens Axboe q->last_merge = NULL; 4859817064bSJens Axboe 4869817064bSJens Axboe elv_rqhash_del(q, rq); 4879817064bSJens Axboe 4889817064bSJens Axboe q->nr_sorted--; 4899817064bSJens Axboe 4909817064bSJens Axboe q->end_sector = rq_end_sector(rq); 4919817064bSJens Axboe q->boundary_rq = rq; 4929817064bSJens Axboe list_add_tail(&rq->queuelist, &q->queue_head); 4939817064bSJens Axboe } 4942e662b65SJens Axboe EXPORT_SYMBOL(elv_dispatch_add_tail); 4952e662b65SJens Axboe 496165125e1SJens Axboe int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 4971da177e4SLinus Torvalds { 498b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 4999817064bSJens Axboe struct request *__rq; 50006b86245STejun Heo int ret; 50106b86245STejun Heo 5029817064bSJens Axboe /* 5039817064bSJens Axboe * First try one-hit cache. 5049817064bSJens Axboe */ 50506b86245STejun Heo if (q->last_merge) { 50606b86245STejun Heo ret = elv_try_merge(q->last_merge, bio); 50706b86245STejun Heo if (ret != ELEVATOR_NO_MERGE) { 50806b86245STejun Heo *req = q->last_merge; 50906b86245STejun Heo return ret; 51006b86245STejun Heo } 51106b86245STejun Heo } 5121da177e4SLinus Torvalds 513ac9fafa1SAlan D. Brunelle if (blk_queue_nomerges(q)) 514ac9fafa1SAlan D. Brunelle return ELEVATOR_NO_MERGE; 515ac9fafa1SAlan D. Brunelle 5169817064bSJens Axboe /* 5179817064bSJens Axboe * See if our hash lookup can find a potential backmerge. 5189817064bSJens Axboe */ 5199817064bSJens Axboe __rq = elv_rqhash_find(q, bio->bi_sector); 5209817064bSJens Axboe if (__rq && elv_rq_merge_ok(__rq, bio)) { 5219817064bSJens Axboe *req = __rq; 5229817064bSJens Axboe return ELEVATOR_BACK_MERGE; 5239817064bSJens Axboe } 5249817064bSJens Axboe 5251da177e4SLinus Torvalds if (e->ops->elevator_merge_fn) 5261da177e4SLinus Torvalds return e->ops->elevator_merge_fn(q, req, bio); 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds return ELEVATOR_NO_MERGE; 5291da177e4SLinus Torvalds } 5301da177e4SLinus Torvalds 531165125e1SJens Axboe void elv_merged_request(struct request_queue *q, struct request *rq, int type) 5321da177e4SLinus Torvalds { 533b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 5341da177e4SLinus Torvalds 5351da177e4SLinus Torvalds if (e->ops->elevator_merged_fn) 5362e662b65SJens Axboe e->ops->elevator_merged_fn(q, rq, type); 53706b86245STejun Heo 5382e662b65SJens Axboe if (type == ELEVATOR_BACK_MERGE) 5399817064bSJens Axboe elv_rqhash_reposition(q, rq); 5409817064bSJens Axboe 54106b86245STejun Heo q->last_merge = rq; 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544165125e1SJens Axboe void elv_merge_requests(struct request_queue *q, struct request *rq, 5451da177e4SLinus Torvalds struct request *next) 5461da177e4SLinus Torvalds { 547b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 5481da177e4SLinus Torvalds 5491da177e4SLinus Torvalds if (e->ops->elevator_merge_req_fn) 5501da177e4SLinus Torvalds e->ops->elevator_merge_req_fn(q, rq, next); 55106b86245STejun Heo 5529817064bSJens Axboe elv_rqhash_reposition(q, rq); 5539817064bSJens Axboe elv_rqhash_del(q, next); 5549817064bSJens Axboe 5559817064bSJens Axboe q->nr_sorted--; 55606b86245STejun Heo q->last_merge = rq; 5571da177e4SLinus Torvalds } 5581da177e4SLinus Torvalds 559165125e1SJens Axboe void elv_requeue_request(struct request_queue *q, struct request *rq) 5601da177e4SLinus Torvalds { 5611da177e4SLinus Torvalds /* 5621da177e4SLinus Torvalds * it already went through dequeue, we need to decrement the 5631da177e4SLinus Torvalds * in_flight count again 5641da177e4SLinus Torvalds */ 5658922e16cSTejun Heo if (blk_account_rq(rq)) { 5661da177e4SLinus Torvalds q->in_flight--; 567cad97516SJens Axboe if (blk_sorted_rq(rq)) 568cad97516SJens Axboe elv_deactivate_rq(q, rq); 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5714aff5e23SJens Axboe rq->cmd_flags &= ~REQ_STARTED; 5721da177e4SLinus Torvalds 57330e9656cSTejun Heo elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 57626308eabSJerome Marchand void elv_drain_elevator(struct request_queue *q) 57715853af9STejun Heo { 57815853af9STejun Heo static int printed; 57915853af9STejun Heo while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 58015853af9STejun Heo ; 58115853af9STejun Heo if (q->nr_sorted == 0) 58215853af9STejun Heo return; 58315853af9STejun Heo if (printed++ < 10) { 58415853af9STejun Heo printk(KERN_ERR "%s: forced dispatching is broken " 58515853af9STejun Heo "(nr_sorted=%u), please report this\n", 58615853af9STejun Heo q->elevator->elevator_type->elevator_name, q->nr_sorted); 58715853af9STejun Heo } 58815853af9STejun Heo } 58915853af9STejun Heo 5906c7e8ceeSJens Axboe /* 5916c7e8ceeSJens Axboe * Call with queue lock held, interrupts disabled 5926c7e8ceeSJens Axboe */ 593*f600abe2SJens Axboe void elv_quiesce_start(struct request_queue *q) 5946c7e8ceeSJens Axboe { 5956c7e8ceeSJens Axboe queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 5966c7e8ceeSJens Axboe 5976c7e8ceeSJens Axboe /* 5986c7e8ceeSJens Axboe * make sure we don't have any requests in flight 5996c7e8ceeSJens Axboe */ 6006c7e8ceeSJens Axboe elv_drain_elevator(q); 6016c7e8ceeSJens Axboe while (q->rq.elvpriv) { 6026c7e8ceeSJens Axboe blk_start_queueing(q); 6036c7e8ceeSJens Axboe spin_unlock_irq(q->queue_lock); 6046c7e8ceeSJens Axboe msleep(10); 6056c7e8ceeSJens Axboe spin_lock_irq(q->queue_lock); 6066c7e8ceeSJens Axboe elv_drain_elevator(q); 6076c7e8ceeSJens Axboe } 6086c7e8ceeSJens Axboe } 6096c7e8ceeSJens Axboe 610*f600abe2SJens Axboe void elv_quiesce_end(struct request_queue *q) 6116c7e8ceeSJens Axboe { 6126c7e8ceeSJens Axboe queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 6136c7e8ceeSJens Axboe } 6146c7e8ceeSJens Axboe 615165125e1SJens Axboe void elv_insert(struct request_queue *q, struct request *rq, int where) 6161da177e4SLinus Torvalds { 617797e7dbbSTejun Heo struct list_head *pos; 618797e7dbbSTejun Heo unsigned ordseq; 619dac07ec1SJens Axboe int unplug_it = 1; 620797e7dbbSTejun Heo 6215f3ea37cSArnaldo Carvalho de Melo trace_block_rq_insert(q, rq); 6222056a782SJens Axboe 6231da177e4SLinus Torvalds rq->q = q; 6241da177e4SLinus Torvalds 6258922e16cSTejun Heo switch (where) { 6268922e16cSTejun Heo case ELEVATOR_INSERT_FRONT: 6274aff5e23SJens Axboe rq->cmd_flags |= REQ_SOFTBARRIER; 6288922e16cSTejun Heo 6298922e16cSTejun Heo list_add(&rq->queuelist, &q->queue_head); 6308922e16cSTejun Heo break; 6318922e16cSTejun Heo 6328922e16cSTejun Heo case ELEVATOR_INSERT_BACK: 6334aff5e23SJens Axboe rq->cmd_flags |= REQ_SOFTBARRIER; 63415853af9STejun Heo elv_drain_elevator(q); 6358922e16cSTejun Heo list_add_tail(&rq->queuelist, &q->queue_head); 6368922e16cSTejun Heo /* 6378922e16cSTejun Heo * We kick the queue here for the following reasons. 6388922e16cSTejun Heo * - The elevator might have returned NULL previously 6398922e16cSTejun Heo * to delay requests and returned them now. As the 6408922e16cSTejun Heo * queue wasn't empty before this request, ll_rw_blk 6418922e16cSTejun Heo * won't run the queue on return, resulting in hang. 6428922e16cSTejun Heo * - Usually, back inserted requests won't be merged 6438922e16cSTejun Heo * with anything. There's no point in delaying queue 6448922e16cSTejun Heo * processing. 6458922e16cSTejun Heo */ 6468922e16cSTejun Heo blk_remove_plug(q); 64780a4b58eSJens Axboe blk_start_queueing(q); 6488922e16cSTejun Heo break; 6498922e16cSTejun Heo 6508922e16cSTejun Heo case ELEVATOR_INSERT_SORT: 651e17fc0a1SDavid Woodhouse BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 6524aff5e23SJens Axboe rq->cmd_flags |= REQ_SORTED; 65315853af9STejun Heo q->nr_sorted++; 6549817064bSJens Axboe if (rq_mergeable(rq)) { 6559817064bSJens Axboe elv_rqhash_add(q, rq); 6569817064bSJens Axboe if (!q->last_merge) 65706b86245STejun Heo q->last_merge = rq; 6589817064bSJens Axboe } 6599817064bSJens Axboe 660ca23509fSTejun Heo /* 661ca23509fSTejun Heo * Some ioscheds (cfq) run q->request_fn directly, so 662ca23509fSTejun Heo * rq cannot be accessed after calling 663ca23509fSTejun Heo * elevator_add_req_fn. 664ca23509fSTejun Heo */ 665ca23509fSTejun Heo q->elevator->ops->elevator_add_req_fn(q, rq); 6668922e16cSTejun Heo break; 6678922e16cSTejun Heo 668797e7dbbSTejun Heo case ELEVATOR_INSERT_REQUEUE: 669797e7dbbSTejun Heo /* 670797e7dbbSTejun Heo * If ordered flush isn't in progress, we do front 671797e7dbbSTejun Heo * insertion; otherwise, requests should be requeued 672797e7dbbSTejun Heo * in ordseq order. 673797e7dbbSTejun Heo */ 6744aff5e23SJens Axboe rq->cmd_flags |= REQ_SOFTBARRIER; 675797e7dbbSTejun Heo 67695543179SLinas Vepstas /* 67795543179SLinas Vepstas * Most requeues happen because of a busy condition, 67895543179SLinas Vepstas * don't force unplug of the queue for that case. 67995543179SLinas Vepstas */ 68095543179SLinas Vepstas unplug_it = 0; 68195543179SLinas Vepstas 682797e7dbbSTejun Heo if (q->ordseq == 0) { 683797e7dbbSTejun Heo list_add(&rq->queuelist, &q->queue_head); 684797e7dbbSTejun Heo break; 685797e7dbbSTejun Heo } 686797e7dbbSTejun Heo 687797e7dbbSTejun Heo ordseq = blk_ordered_req_seq(rq); 688797e7dbbSTejun Heo 689797e7dbbSTejun Heo list_for_each(pos, &q->queue_head) { 690797e7dbbSTejun Heo struct request *pos_rq = list_entry_rq(pos); 691797e7dbbSTejun Heo if (ordseq <= blk_ordered_req_seq(pos_rq)) 692797e7dbbSTejun Heo break; 693797e7dbbSTejun Heo } 694797e7dbbSTejun Heo 695797e7dbbSTejun Heo list_add_tail(&rq->queuelist, pos); 696797e7dbbSTejun Heo break; 697797e7dbbSTejun Heo 6988922e16cSTejun Heo default: 6998922e16cSTejun Heo printk(KERN_ERR "%s: bad insertion point %d\n", 70024c03d47SHarvey Harrison __func__, where); 7018922e16cSTejun Heo BUG(); 7028922e16cSTejun Heo } 7031da177e4SLinus Torvalds 704dac07ec1SJens Axboe if (unplug_it && blk_queue_plugged(q)) { 7051faa16d2SJens Axboe int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 7061da177e4SLinus Torvalds - q->in_flight; 7071da177e4SLinus Torvalds 708c374f127STejun Heo if (nrq >= q->unplug_thresh) 7091da177e4SLinus Torvalds __generic_unplug_device(q); 7101da177e4SLinus Torvalds } 7111da177e4SLinus Torvalds } 7121da177e4SLinus Torvalds 713165125e1SJens Axboe void __elv_add_request(struct request_queue *q, struct request *rq, int where, 71430e9656cSTejun Heo int plug) 71530e9656cSTejun Heo { 71630e9656cSTejun Heo if (q->ordcolor) 7174aff5e23SJens Axboe rq->cmd_flags |= REQ_ORDERED_COLOR; 71830e9656cSTejun Heo 7194aff5e23SJens Axboe if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 72030e9656cSTejun Heo /* 72130e9656cSTejun Heo * toggle ordered color 72230e9656cSTejun Heo */ 72330e9656cSTejun Heo if (blk_barrier_rq(rq)) 72430e9656cSTejun Heo q->ordcolor ^= 1; 72530e9656cSTejun Heo 72630e9656cSTejun Heo /* 72730e9656cSTejun Heo * barriers implicitly indicate back insertion 72830e9656cSTejun Heo */ 72930e9656cSTejun Heo if (where == ELEVATOR_INSERT_SORT) 73030e9656cSTejun Heo where = ELEVATOR_INSERT_BACK; 73130e9656cSTejun Heo 73230e9656cSTejun Heo /* 73330e9656cSTejun Heo * this request is scheduling boundary, update 73430e9656cSTejun Heo * end_sector 73530e9656cSTejun Heo */ 736e17fc0a1SDavid Woodhouse if (blk_fs_request(rq) || blk_discard_rq(rq)) { 73730e9656cSTejun Heo q->end_sector = rq_end_sector(rq); 73830e9656cSTejun Heo q->boundary_rq = rq; 73930e9656cSTejun Heo } 7404eb166d9SJens Axboe } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 7414eb166d9SJens Axboe where == ELEVATOR_INSERT_SORT) 74230e9656cSTejun Heo where = ELEVATOR_INSERT_BACK; 74330e9656cSTejun Heo 74430e9656cSTejun Heo if (plug) 74530e9656cSTejun Heo blk_plug_device(q); 74630e9656cSTejun Heo 74730e9656cSTejun Heo elv_insert(q, rq, where); 74830e9656cSTejun Heo } 7492e662b65SJens Axboe EXPORT_SYMBOL(__elv_add_request); 7502e662b65SJens Axboe 751165125e1SJens Axboe void elv_add_request(struct request_queue *q, struct request *rq, int where, 7521da177e4SLinus Torvalds int plug) 7531da177e4SLinus Torvalds { 7541da177e4SLinus Torvalds unsigned long flags; 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds spin_lock_irqsave(q->queue_lock, flags); 7571da177e4SLinus Torvalds __elv_add_request(q, rq, where, plug); 7581da177e4SLinus Torvalds spin_unlock_irqrestore(q->queue_lock, flags); 7591da177e4SLinus Torvalds } 7602e662b65SJens Axboe EXPORT_SYMBOL(elv_add_request); 7612e662b65SJens Axboe 762165125e1SJens Axboe static inline struct request *__elv_next_request(struct request_queue *q) 7631da177e4SLinus Torvalds { 7648922e16cSTejun Heo struct request *rq; 7658922e16cSTejun Heo 766797e7dbbSTejun Heo while (1) { 767797e7dbbSTejun Heo while (!list_empty(&q->queue_head)) { 7688922e16cSTejun Heo rq = list_entry_rq(q->queue_head.next); 769797e7dbbSTejun Heo if (blk_do_ordered(q, &rq)) 770797e7dbbSTejun Heo return rq; 7711da177e4SLinus Torvalds } 7721da177e4SLinus Torvalds 773797e7dbbSTejun Heo if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 774797e7dbbSTejun Heo return NULL; 775797e7dbbSTejun Heo } 7761da177e4SLinus Torvalds } 7771da177e4SLinus Torvalds 778165125e1SJens Axboe struct request *elv_next_request(struct request_queue *q) 7791da177e4SLinus Torvalds { 7801da177e4SLinus Torvalds struct request *rq; 7811da177e4SLinus Torvalds int ret; 7821da177e4SLinus Torvalds 7831da177e4SLinus Torvalds while ((rq = __elv_next_request(q)) != NULL) { 7844aff5e23SJens Axboe if (!(rq->cmd_flags & REQ_STARTED)) { 7851da177e4SLinus Torvalds /* 7868922e16cSTejun Heo * This is the first time the device driver 7878922e16cSTejun Heo * sees this request (possibly after 7888922e16cSTejun Heo * requeueing). Notify IO scheduler. 7898922e16cSTejun Heo */ 790cad97516SJens Axboe if (blk_sorted_rq(rq)) 791cad97516SJens Axboe elv_activate_rq(q, rq); 7928922e16cSTejun Heo 7938922e16cSTejun Heo /* 7948922e16cSTejun Heo * just mark as started even if we don't start 7958922e16cSTejun Heo * it, a request that has been delayed should 7968922e16cSTejun Heo * not be passed by new incoming requests 7971da177e4SLinus Torvalds */ 7984aff5e23SJens Axboe rq->cmd_flags |= REQ_STARTED; 7995f3ea37cSArnaldo Carvalho de Melo trace_block_rq_issue(q, rq); 8008922e16cSTejun Heo } 8011da177e4SLinus Torvalds 8028922e16cSTejun Heo if (!q->boundary_rq || q->boundary_rq == rq) { 8031b47f531SJens Axboe q->end_sector = rq_end_sector(rq); 8048922e16cSTejun Heo q->boundary_rq = NULL; 8058922e16cSTejun Heo } 8061da177e4SLinus Torvalds 807fa0ccd83SJames Bottomley if (rq->cmd_flags & REQ_DONTPREP) 808fa0ccd83SJames Bottomley break; 809fa0ccd83SJames Bottomley 810fa0ccd83SJames Bottomley if (q->dma_drain_size && rq->data_len) { 811fa0ccd83SJames Bottomley /* 812fa0ccd83SJames Bottomley * make sure space for the drain appears we 813fa0ccd83SJames Bottomley * know we can do this because max_hw_segments 814fa0ccd83SJames Bottomley * has been adjusted to be one fewer than the 815fa0ccd83SJames Bottomley * device can handle 816fa0ccd83SJames Bottomley */ 817fa0ccd83SJames Bottomley rq->nr_phys_segments++; 818fa0ccd83SJames Bottomley } 819fa0ccd83SJames Bottomley 820fa0ccd83SJames Bottomley if (!q->prep_rq_fn) 8211da177e4SLinus Torvalds break; 8221da177e4SLinus Torvalds 8231da177e4SLinus Torvalds ret = q->prep_rq_fn(q, rq); 8241da177e4SLinus Torvalds if (ret == BLKPREP_OK) { 8251da177e4SLinus Torvalds break; 8261da177e4SLinus Torvalds } else if (ret == BLKPREP_DEFER) { 8272e759cd4STejun Heo /* 8282e759cd4STejun Heo * the request may have been (partially) prepped. 8292e759cd4STejun Heo * we need to keep this request in the front to 8308922e16cSTejun Heo * avoid resource deadlock. REQ_STARTED will 8318922e16cSTejun Heo * prevent other fs requests from passing this one. 8322e759cd4STejun Heo */ 833fa0ccd83SJames Bottomley if (q->dma_drain_size && rq->data_len && 834fa0ccd83SJames Bottomley !(rq->cmd_flags & REQ_DONTPREP)) { 835fa0ccd83SJames Bottomley /* 836fa0ccd83SJames Bottomley * remove the space for the drain we added 837fa0ccd83SJames Bottomley * so that we don't add it again 838fa0ccd83SJames Bottomley */ 839fa0ccd83SJames Bottomley --rq->nr_phys_segments; 840fa0ccd83SJames Bottomley } 841fa0ccd83SJames Bottomley 8421da177e4SLinus Torvalds rq = NULL; 8431da177e4SLinus Torvalds break; 8441da177e4SLinus Torvalds } else if (ret == BLKPREP_KILL) { 8454aff5e23SJens Axboe rq->cmd_flags |= REQ_QUIET; 84699cd3386SKiyoshi Ueda __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 8471da177e4SLinus Torvalds } else { 84824c03d47SHarvey Harrison printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 8491da177e4SLinus Torvalds break; 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds 8531da177e4SLinus Torvalds return rq; 8541da177e4SLinus Torvalds } 8552e662b65SJens Axboe EXPORT_SYMBOL(elv_next_request); 8562e662b65SJens Axboe 857165125e1SJens Axboe void elv_dequeue_request(struct request_queue *q, struct request *rq) 8581da177e4SLinus Torvalds { 8598922e16cSTejun Heo BUG_ON(list_empty(&rq->queuelist)); 8609817064bSJens Axboe BUG_ON(ELV_ON_HASH(rq)); 8618922e16cSTejun Heo 8628922e16cSTejun Heo list_del_init(&rq->queuelist); 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds /* 8651da177e4SLinus Torvalds * the time frame between a request being removed from the lists 8661da177e4SLinus Torvalds * and to it is freed is accounted as io that is in progress at 8678922e16cSTejun Heo * the driver side. 8681da177e4SLinus Torvalds */ 8691da177e4SLinus Torvalds if (blk_account_rq(rq)) 8701da177e4SLinus Torvalds q->in_flight++; 8711da177e4SLinus Torvalds } 8722e662b65SJens Axboe 873165125e1SJens Axboe int elv_queue_empty(struct request_queue *q) 8741da177e4SLinus Torvalds { 875b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 8761da177e4SLinus Torvalds 8778922e16cSTejun Heo if (!list_empty(&q->queue_head)) 8788922e16cSTejun Heo return 0; 8798922e16cSTejun Heo 8801da177e4SLinus Torvalds if (e->ops->elevator_queue_empty_fn) 8811da177e4SLinus Torvalds return e->ops->elevator_queue_empty_fn(q); 8821da177e4SLinus Torvalds 8838922e16cSTejun Heo return 1; 8841da177e4SLinus Torvalds } 8852e662b65SJens Axboe EXPORT_SYMBOL(elv_queue_empty); 8862e662b65SJens Axboe 887165125e1SJens Axboe struct request *elv_latter_request(struct request_queue *q, struct request *rq) 8881da177e4SLinus Torvalds { 889b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 8901da177e4SLinus Torvalds 8911da177e4SLinus Torvalds if (e->ops->elevator_latter_req_fn) 8921da177e4SLinus Torvalds return e->ops->elevator_latter_req_fn(q, rq); 8931da177e4SLinus Torvalds return NULL; 8941da177e4SLinus Torvalds } 8951da177e4SLinus Torvalds 896165125e1SJens Axboe struct request *elv_former_request(struct request_queue *q, struct request *rq) 8971da177e4SLinus Torvalds { 898b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 8991da177e4SLinus Torvalds 9001da177e4SLinus Torvalds if (e->ops->elevator_former_req_fn) 9011da177e4SLinus Torvalds return e->ops->elevator_former_req_fn(q, rq); 9021da177e4SLinus Torvalds return NULL; 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds 905165125e1SJens Axboe int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 9061da177e4SLinus Torvalds { 907b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds if (e->ops->elevator_set_req_fn) 910cb78b285SJens Axboe return e->ops->elevator_set_req_fn(q, rq, gfp_mask); 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds rq->elevator_private = NULL; 9131da177e4SLinus Torvalds return 0; 9141da177e4SLinus Torvalds } 9151da177e4SLinus Torvalds 916165125e1SJens Axboe void elv_put_request(struct request_queue *q, struct request *rq) 9171da177e4SLinus Torvalds { 918b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds if (e->ops->elevator_put_req_fn) 921bb37b94cSJens Axboe e->ops->elevator_put_req_fn(rq); 9221da177e4SLinus Torvalds } 9231da177e4SLinus Torvalds 924165125e1SJens Axboe int elv_may_queue(struct request_queue *q, int rw) 9251da177e4SLinus Torvalds { 926b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds if (e->ops->elevator_may_queue_fn) 929cb78b285SJens Axboe return e->ops->elevator_may_queue_fn(q, rw); 9301da177e4SLinus Torvalds 9311da177e4SLinus Torvalds return ELV_MQUEUE_MAY; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 93411914a53SMike Anderson void elv_abort_queue(struct request_queue *q) 93511914a53SMike Anderson { 93611914a53SMike Anderson struct request *rq; 93711914a53SMike Anderson 93811914a53SMike Anderson while (!list_empty(&q->queue_head)) { 93911914a53SMike Anderson rq = list_entry_rq(q->queue_head.next); 94011914a53SMike Anderson rq->cmd_flags |= REQ_QUIET; 9415f3ea37cSArnaldo Carvalho de Melo trace_block_rq_abort(q, rq); 94299cd3386SKiyoshi Ueda __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 94311914a53SMike Anderson } 94411914a53SMike Anderson } 94511914a53SMike Anderson EXPORT_SYMBOL(elv_abort_queue); 94611914a53SMike Anderson 947165125e1SJens Axboe void elv_completed_request(struct request_queue *q, struct request *rq) 9481da177e4SLinus Torvalds { 949b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 9501da177e4SLinus Torvalds 9511da177e4SLinus Torvalds /* 9521da177e4SLinus Torvalds * request is released from the driver, io must be done 9531da177e4SLinus Torvalds */ 9548922e16cSTejun Heo if (blk_account_rq(rq)) { 9551da177e4SLinus Torvalds q->in_flight--; 9561bc691d3STejun Heo if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 9571bc691d3STejun Heo e->ops->elevator_completed_req_fn(q, rq); 9581bc691d3STejun Heo } 959797e7dbbSTejun Heo 960797e7dbbSTejun Heo /* 961797e7dbbSTejun Heo * Check if the queue is waiting for fs requests to be 962797e7dbbSTejun Heo * drained for flush sequence. 963797e7dbbSTejun Heo */ 9641bc691d3STejun Heo if (unlikely(q->ordseq)) { 9658f11b3e9STejun Heo struct request *next = NULL; 9668f11b3e9STejun Heo 9678f11b3e9STejun Heo if (!list_empty(&q->queue_head)) 9688f11b3e9STejun Heo next = list_entry_rq(q->queue_head.next); 9698f11b3e9STejun Heo 9708f11b3e9STejun Heo if (!q->in_flight && 971797e7dbbSTejun Heo blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 9728f11b3e9STejun Heo (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 973797e7dbbSTejun Heo blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 97480a4b58eSJens Axboe blk_start_queueing(q); 975797e7dbbSTejun Heo } 9761da177e4SLinus Torvalds } 9778922e16cSTejun Heo } 9781da177e4SLinus Torvalds 9793d1ab40fSAl Viro #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 9803d1ab40fSAl Viro 9813d1ab40fSAl Viro static ssize_t 9823d1ab40fSAl Viro elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 9833d1ab40fSAl Viro { 9843d1ab40fSAl Viro struct elv_fs_entry *entry = to_elv(attr); 985b374d18aSJens Axboe struct elevator_queue *e; 9863d1ab40fSAl Viro ssize_t error; 9873d1ab40fSAl Viro 9883d1ab40fSAl Viro if (!entry->show) 9893d1ab40fSAl Viro return -EIO; 9903d1ab40fSAl Viro 991b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj); 9923d1ab40fSAl Viro mutex_lock(&e->sysfs_lock); 9933d1ab40fSAl Viro error = e->ops ? entry->show(e, page) : -ENOENT; 9943d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock); 9953d1ab40fSAl Viro return error; 9963d1ab40fSAl Viro } 9973d1ab40fSAl Viro 9983d1ab40fSAl Viro static ssize_t 9993d1ab40fSAl Viro elv_attr_store(struct kobject *kobj, struct attribute *attr, 10003d1ab40fSAl Viro const char *page, size_t length) 10013d1ab40fSAl Viro { 10023d1ab40fSAl Viro struct elv_fs_entry *entry = to_elv(attr); 1003b374d18aSJens Axboe struct elevator_queue *e; 10043d1ab40fSAl Viro ssize_t error; 10053d1ab40fSAl Viro 10063d1ab40fSAl Viro if (!entry->store) 10073d1ab40fSAl Viro return -EIO; 10083d1ab40fSAl Viro 1009b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj); 10103d1ab40fSAl Viro mutex_lock(&e->sysfs_lock); 10113d1ab40fSAl Viro error = e->ops ? entry->store(e, page, length) : -ENOENT; 10123d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock); 10133d1ab40fSAl Viro return error; 10143d1ab40fSAl Viro } 10153d1ab40fSAl Viro 10163d1ab40fSAl Viro static struct sysfs_ops elv_sysfs_ops = { 10173d1ab40fSAl Viro .show = elv_attr_show, 10183d1ab40fSAl Viro .store = elv_attr_store, 10193d1ab40fSAl Viro }; 10203d1ab40fSAl Viro 10213d1ab40fSAl Viro static struct kobj_type elv_ktype = { 10223d1ab40fSAl Viro .sysfs_ops = &elv_sysfs_ops, 10233d1ab40fSAl Viro .release = elevator_release, 10243d1ab40fSAl Viro }; 10253d1ab40fSAl Viro 10261da177e4SLinus Torvalds int elv_register_queue(struct request_queue *q) 10271da177e4SLinus Torvalds { 1028b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 10293d1ab40fSAl Viro int error; 10301da177e4SLinus Torvalds 1031b2d6db58SGreg Kroah-Hartman error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); 10323d1ab40fSAl Viro if (!error) { 1033e572ec7eSAl Viro struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; 10343d1ab40fSAl Viro if (attr) { 1035e572ec7eSAl Viro while (attr->attr.name) { 1036e572ec7eSAl Viro if (sysfs_create_file(&e->kobj, &attr->attr)) 10373d1ab40fSAl Viro break; 1038e572ec7eSAl Viro attr++; 10393d1ab40fSAl Viro } 10403d1ab40fSAl Viro } 10413d1ab40fSAl Viro kobject_uevent(&e->kobj, KOBJ_ADD); 10423d1ab40fSAl Viro } 10433d1ab40fSAl Viro return error; 10441da177e4SLinus Torvalds } 10451da177e4SLinus Torvalds 1046b374d18aSJens Axboe static void __elv_unregister_queue(struct elevator_queue *e) 10471da177e4SLinus Torvalds { 10483d1ab40fSAl Viro kobject_uevent(&e->kobj, KOBJ_REMOVE); 10493d1ab40fSAl Viro kobject_del(&e->kobj); 10501da177e4SLinus Torvalds } 1051bc1c1169SJens Axboe 1052bc1c1169SJens Axboe void elv_unregister_queue(struct request_queue *q) 1053bc1c1169SJens Axboe { 1054bc1c1169SJens Axboe if (q) 1055bc1c1169SJens Axboe __elv_unregister_queue(q->elevator); 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds 10582fdd82bdSAdrian Bunk void elv_register(struct elevator_type *e) 10591da177e4SLinus Torvalds { 10601ffb96c5SThibaut VARENE char *def = ""; 10612a12dcd7SJens Axboe 10622a12dcd7SJens Axboe spin_lock(&elv_list_lock); 1063ce524497SEric Sesterhenn BUG_ON(elevator_find(e->elevator_name)); 10641da177e4SLinus Torvalds list_add_tail(&e->list, &elv_list); 10652a12dcd7SJens Axboe spin_unlock(&elv_list_lock); 10661da177e4SLinus Torvalds 10675f003976SNate Diller if (!strcmp(e->elevator_name, chosen_elevator) || 10685f003976SNate Diller (!*chosen_elevator && 10695f003976SNate Diller !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 10701ffb96c5SThibaut VARENE def = " (default)"; 10711ffb96c5SThibaut VARENE 10724eb166d9SJens Axboe printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, 10734eb166d9SJens Axboe def); 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(elv_register); 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds void elv_unregister(struct elevator_type *e) 10781da177e4SLinus Torvalds { 107983521d3eSChristoph Hellwig struct task_struct *g, *p; 108083521d3eSChristoph Hellwig 108183521d3eSChristoph Hellwig /* 108283521d3eSChristoph Hellwig * Iterate every thread in the process to remove the io contexts. 108383521d3eSChristoph Hellwig */ 1084e17a9489SAl Viro if (e->ops.trim) { 108583521d3eSChristoph Hellwig read_lock(&tasklist_lock); 108683521d3eSChristoph Hellwig do_each_thread(g, p) { 1087e17a9489SAl Viro task_lock(p); 10882d8f6131SOleg Nesterov if (p->io_context) 1089e17a9489SAl Viro e->ops.trim(p->io_context); 1090e17a9489SAl Viro task_unlock(p); 109183521d3eSChristoph Hellwig } while_each_thread(g, p); 109283521d3eSChristoph Hellwig read_unlock(&tasklist_lock); 1093e17a9489SAl Viro } 109483521d3eSChristoph Hellwig 10952a12dcd7SJens Axboe spin_lock(&elv_list_lock); 10961da177e4SLinus Torvalds list_del_init(&e->list); 10972a12dcd7SJens Axboe spin_unlock(&elv_list_lock); 10981da177e4SLinus Torvalds } 10991da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(elv_unregister); 11001da177e4SLinus Torvalds 11011da177e4SLinus Torvalds /* 11021da177e4SLinus Torvalds * switch to new_e io scheduler. be careful not to introduce deadlocks - 11031da177e4SLinus Torvalds * we don't free the old io scheduler, before we have allocated what we 11041da177e4SLinus Torvalds * need for the new one. this way we have a chance of going back to the old 1105cb98fc8bSTejun Heo * one, if the new one fails init for some reason. 11061da177e4SLinus Torvalds */ 1107165125e1SJens Axboe static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) 11081da177e4SLinus Torvalds { 1109b374d18aSJens Axboe struct elevator_queue *old_elevator, *e; 1110bc1c1169SJens Axboe void *data; 11111da177e4SLinus Torvalds 1112cb98fc8bSTejun Heo /* 1113cb98fc8bSTejun Heo * Allocate new elevator 1114cb98fc8bSTejun Heo */ 1115b5deef90SJens Axboe e = elevator_alloc(q, new_e); 11161da177e4SLinus Torvalds if (!e) 11173d1ab40fSAl Viro return 0; 11181da177e4SLinus Torvalds 1119bc1c1169SJens Axboe data = elevator_init_queue(q, e); 1120bc1c1169SJens Axboe if (!data) { 1121bc1c1169SJens Axboe kobject_put(&e->kobj); 1122bc1c1169SJens Axboe return 0; 1123bc1c1169SJens Axboe } 1124bc1c1169SJens Axboe 11251da177e4SLinus Torvalds /* 1126cb98fc8bSTejun Heo * Turn on BYPASS and drain all requests w/ elevator private data 11271da177e4SLinus Torvalds */ 1128cb98fc8bSTejun Heo spin_lock_irq(q->queue_lock); 1129*f600abe2SJens Axboe elv_quiesce_start(q); 1130cb98fc8bSTejun Heo 11311da177e4SLinus Torvalds /* 1132bc1c1169SJens Axboe * Remember old elevator. 11331da177e4SLinus Torvalds */ 11341da177e4SLinus Torvalds old_elevator = q->elevator; 11351da177e4SLinus Torvalds 11361da177e4SLinus Torvalds /* 11371da177e4SLinus Torvalds * attach and start new elevator 11381da177e4SLinus Torvalds */ 1139bc1c1169SJens Axboe elevator_attach(q, e, data); 1140bc1c1169SJens Axboe 1141bc1c1169SJens Axboe spin_unlock_irq(q->queue_lock); 1142bc1c1169SJens Axboe 1143bc1c1169SJens Axboe __elv_unregister_queue(old_elevator); 11441da177e4SLinus Torvalds 11451da177e4SLinus Torvalds if (elv_register_queue(q)) 11461da177e4SLinus Torvalds goto fail_register; 11471da177e4SLinus Torvalds 11481da177e4SLinus Torvalds /* 1149cb98fc8bSTejun Heo * finally exit old elevator and turn off BYPASS. 11501da177e4SLinus Torvalds */ 11511da177e4SLinus Torvalds elevator_exit(old_elevator); 115275ad23bcSNick Piggin spin_lock_irq(q->queue_lock); 1153*f600abe2SJens Axboe elv_quiesce_end(q); 115475ad23bcSNick Piggin spin_unlock_irq(q->queue_lock); 115575ad23bcSNick Piggin 11564722dc52SAlan D. Brunelle blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 11574722dc52SAlan D. Brunelle 11583d1ab40fSAl Viro return 1; 11591da177e4SLinus Torvalds 11601da177e4SLinus Torvalds fail_register: 11611da177e4SLinus Torvalds /* 11621da177e4SLinus Torvalds * switch failed, exit the new io scheduler and reattach the old 11631da177e4SLinus Torvalds * one again (along with re-adding the sysfs dir) 11641da177e4SLinus Torvalds */ 11651da177e4SLinus Torvalds elevator_exit(e); 11661da177e4SLinus Torvalds q->elevator = old_elevator; 11671da177e4SLinus Torvalds elv_register_queue(q); 116875ad23bcSNick Piggin 116975ad23bcSNick Piggin spin_lock_irq(q->queue_lock); 117075ad23bcSNick Piggin queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 117175ad23bcSNick Piggin spin_unlock_irq(q->queue_lock); 117275ad23bcSNick Piggin 11733d1ab40fSAl Viro return 0; 11741da177e4SLinus Torvalds } 11751da177e4SLinus Torvalds 1176165125e1SJens Axboe ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1177165125e1SJens Axboe size_t count) 11781da177e4SLinus Torvalds { 11791da177e4SLinus Torvalds char elevator_name[ELV_NAME_MAX]; 11801da177e4SLinus Torvalds struct elevator_type *e; 11811da177e4SLinus Torvalds 1182ee2e992cSLi Zefan strlcpy(elevator_name, name, sizeof(elevator_name)); 1183ee2e992cSLi Zefan strstrip(elevator_name); 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds e = elevator_get(elevator_name); 11861da177e4SLinus Torvalds if (!e) { 11871da177e4SLinus Torvalds printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 11881da177e4SLinus Torvalds return -EINVAL; 11891da177e4SLinus Torvalds } 11901da177e4SLinus Torvalds 11912ca7d93bSNate Diller if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 11922ca7d93bSNate Diller elevator_put(e); 11931da177e4SLinus Torvalds return count; 11942ca7d93bSNate Diller } 11951da177e4SLinus Torvalds 11963d1ab40fSAl Viro if (!elevator_switch(q, e)) 11974eb166d9SJens Axboe printk(KERN_ERR "elevator: switch to %s failed\n", 11984eb166d9SJens Axboe elevator_name); 11991da177e4SLinus Torvalds return count; 12001da177e4SLinus Torvalds } 12011da177e4SLinus Torvalds 1202165125e1SJens Axboe ssize_t elv_iosched_show(struct request_queue *q, char *name) 12031da177e4SLinus Torvalds { 1204b374d18aSJens Axboe struct elevator_queue *e = q->elevator; 12051da177e4SLinus Torvalds struct elevator_type *elv = e->elevator_type; 120670cee26eSMatthias Kaehlcke struct elevator_type *__e; 12071da177e4SLinus Torvalds int len = 0; 12081da177e4SLinus Torvalds 12092a12dcd7SJens Axboe spin_lock(&elv_list_lock); 121070cee26eSMatthias Kaehlcke list_for_each_entry(__e, &elv_list, list) { 12111da177e4SLinus Torvalds if (!strcmp(elv->elevator_name, __e->elevator_name)) 12121da177e4SLinus Torvalds len += sprintf(name+len, "[%s] ", elv->elevator_name); 12131da177e4SLinus Torvalds else 12141da177e4SLinus Torvalds len += sprintf(name+len, "%s ", __e->elevator_name); 12151da177e4SLinus Torvalds } 12162a12dcd7SJens Axboe spin_unlock(&elv_list_lock); 12171da177e4SLinus Torvalds 12181da177e4SLinus Torvalds len += sprintf(len+name, "\n"); 12191da177e4SLinus Torvalds return len; 12201da177e4SLinus Torvalds } 12211da177e4SLinus Torvalds 1222165125e1SJens Axboe struct request *elv_rb_former_request(struct request_queue *q, 1223165125e1SJens Axboe struct request *rq) 12242e662b65SJens Axboe { 12252e662b65SJens Axboe struct rb_node *rbprev = rb_prev(&rq->rb_node); 12262e662b65SJens Axboe 12272e662b65SJens Axboe if (rbprev) 12282e662b65SJens Axboe return rb_entry_rq(rbprev); 12292e662b65SJens Axboe 12302e662b65SJens Axboe return NULL; 12312e662b65SJens Axboe } 12322e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_former_request); 12332e662b65SJens Axboe 1234165125e1SJens Axboe struct request *elv_rb_latter_request(struct request_queue *q, 1235165125e1SJens Axboe struct request *rq) 12362e662b65SJens Axboe { 12372e662b65SJens Axboe struct rb_node *rbnext = rb_next(&rq->rb_node); 12382e662b65SJens Axboe 12392e662b65SJens Axboe if (rbnext) 12402e662b65SJens Axboe return rb_entry_rq(rbnext); 12412e662b65SJens Axboe 12422e662b65SJens Axboe return NULL; 12432e662b65SJens Axboe } 12442e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_latter_request); 1245