xref: /linux/block/blk-settings.c (revision e692cb668fdd5a712c6ed2a2d6f2a36ee83997b4)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
1070dd5bf3SMartin K. Petersen #include <linux/gcd.h>
112cda2728SMartin K. Petersen #include <linux/lcm.h>
12ad5ebd2fSRandy Dunlap #include <linux/jiffies.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
1486db1e29SJens Axboe 
1586db1e29SJens Axboe #include "blk.h"
1686db1e29SJens Axboe 
176728cb0eSJens Axboe unsigned long blk_max_low_pfn;
1886db1e29SJens Axboe EXPORT_SYMBOL(blk_max_low_pfn);
196728cb0eSJens Axboe 
206728cb0eSJens Axboe unsigned long blk_max_pfn;
2186db1e29SJens Axboe 
2286db1e29SJens Axboe /**
2386db1e29SJens Axboe  * blk_queue_prep_rq - set a prepare_request function for queue
2486db1e29SJens Axboe  * @q:		queue
2586db1e29SJens Axboe  * @pfn:	prepare_request function
2686db1e29SJens Axboe  *
2786db1e29SJens Axboe  * It's possible for a queue to register a prepare_request callback which
2886db1e29SJens Axboe  * is invoked before the request is handed to the request_fn. The goal of
2986db1e29SJens Axboe  * the function is to prepare a request for I/O, it can be used to build a
3086db1e29SJens Axboe  * cdb from the request data for instance.
3186db1e29SJens Axboe  *
3286db1e29SJens Axboe  */
3386db1e29SJens Axboe void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
3486db1e29SJens Axboe {
3586db1e29SJens Axboe 	q->prep_rq_fn = pfn;
3686db1e29SJens Axboe }
3786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_prep_rq);
3886db1e29SJens Axboe 
3986db1e29SJens Axboe /**
4028018c24SJames Bottomley  * blk_queue_unprep_rq - set an unprepare_request function for queue
4128018c24SJames Bottomley  * @q:		queue
4228018c24SJames Bottomley  * @ufn:	unprepare_request function
4328018c24SJames Bottomley  *
4428018c24SJames Bottomley  * It's possible for a queue to register an unprepare_request callback
4528018c24SJames Bottomley  * which is invoked before the request is finally completed. The goal
4628018c24SJames Bottomley  * of the function is to deallocate any data that was allocated in the
4728018c24SJames Bottomley  * prepare_request callback.
4828018c24SJames Bottomley  *
4928018c24SJames Bottomley  */
5028018c24SJames Bottomley void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
5128018c24SJames Bottomley {
5228018c24SJames Bottomley 	q->unprep_rq_fn = ufn;
5328018c24SJames Bottomley }
5428018c24SJames Bottomley EXPORT_SYMBOL(blk_queue_unprep_rq);
5528018c24SJames Bottomley 
5628018c24SJames Bottomley /**
5786db1e29SJens Axboe  * blk_queue_merge_bvec - set a merge_bvec function for queue
5886db1e29SJens Axboe  * @q:		queue
5986db1e29SJens Axboe  * @mbfn:	merge_bvec_fn
6086db1e29SJens Axboe  *
6186db1e29SJens Axboe  * Usually queues have static limitations on the max sectors or segments that
6286db1e29SJens Axboe  * we can put in a request. Stacking drivers may have some settings that
6386db1e29SJens Axboe  * are dynamic, and thus we have to query the queue whether it is ok to
6486db1e29SJens Axboe  * add a new bio_vec to a bio at a given offset or not. If the block device
6586db1e29SJens Axboe  * has such limitations, it needs to register a merge_bvec_fn to control
6686db1e29SJens Axboe  * the size of bio's sent to it. Note that a block device *must* allow a
6786db1e29SJens Axboe  * single page to be added to an empty bio. The block device driver may want
6886db1e29SJens Axboe  * to use the bio_split() function to deal with these bio's. By default
6986db1e29SJens Axboe  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
7086db1e29SJens Axboe  * honored.
7186db1e29SJens Axboe  */
7286db1e29SJens Axboe void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
7386db1e29SJens Axboe {
7486db1e29SJens Axboe 	q->merge_bvec_fn = mbfn;
7586db1e29SJens Axboe }
7686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_merge_bvec);
7786db1e29SJens Axboe 
7886db1e29SJens Axboe void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
7986db1e29SJens Axboe {
8086db1e29SJens Axboe 	q->softirq_done_fn = fn;
8186db1e29SJens Axboe }
8286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_softirq_done);
8386db1e29SJens Axboe 
84242f9dcbSJens Axboe void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85242f9dcbSJens Axboe {
86242f9dcbSJens Axboe 	q->rq_timeout = timeout;
87242f9dcbSJens Axboe }
88242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89242f9dcbSJens Axboe 
90242f9dcbSJens Axboe void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91242f9dcbSJens Axboe {
92242f9dcbSJens Axboe 	q->rq_timed_out_fn = fn;
93242f9dcbSJens Axboe }
94242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95242f9dcbSJens Axboe 
96ef9e3facSKiyoshi Ueda void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97ef9e3facSKiyoshi Ueda {
98ef9e3facSKiyoshi Ueda 	q->lld_busy_fn = fn;
99ef9e3facSKiyoshi Ueda }
100ef9e3facSKiyoshi Ueda EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101ef9e3facSKiyoshi Ueda 
10286db1e29SJens Axboe /**
103e475bba2SMartin K. Petersen  * blk_set_default_limits - reset limits to default values
104f740f5caSRandy Dunlap  * @lim:  the queue_limits structure to reset
105e475bba2SMartin K. Petersen  *
106e475bba2SMartin K. Petersen  * Description:
107e475bba2SMartin K. Petersen  *   Returns a queue_limit struct to its default state.  Can be used by
108e475bba2SMartin K. Petersen  *   stacking drivers like DM that stage table swaps and reuse an
109e475bba2SMartin K. Petersen  *   existing device queue.
110e475bba2SMartin K. Petersen  */
111e475bba2SMartin K. Petersen void blk_set_default_limits(struct queue_limits *lim)
112e475bba2SMartin K. Petersen {
1138a78362cSMartin K. Petersen 	lim->max_segments = BLK_MAX_SEGMENTS;
11413f05c8dSMartin K. Petersen 	lim->max_integrity_segments = 0;
115e475bba2SMartin K. Petersen 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
116eb28d31bSMartin K. Petersen 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
1175dee2477SMartin K. Petersen 	lim->max_sectors = BLK_DEF_MAX_SECTORS;
1185dee2477SMartin K. Petersen 	lim->max_hw_sectors = INT_MAX;
11986b37281SMartin K. Petersen 	lim->max_discard_sectors = 0;
12086b37281SMartin K. Petersen 	lim->discard_granularity = 0;
12186b37281SMartin K. Petersen 	lim->discard_alignment = 0;
12286b37281SMartin K. Petersen 	lim->discard_misaligned = 0;
12398262f27SMartin K. Petersen 	lim->discard_zeroes_data = -1;
124e475bba2SMartin K. Petersen 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
1253a02c8e8SMartin K. Petersen 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126e475bba2SMartin K. Petersen 	lim->alignment_offset = 0;
127e475bba2SMartin K. Petersen 	lim->io_opt = 0;
128e475bba2SMartin K. Petersen 	lim->misaligned = 0;
129*e692cb66SMartin K. Petersen 	lim->cluster = 1;
130e475bba2SMartin K. Petersen }
131e475bba2SMartin K. Petersen EXPORT_SYMBOL(blk_set_default_limits);
132e475bba2SMartin K. Petersen 
133e475bba2SMartin K. Petersen /**
13486db1e29SJens Axboe  * blk_queue_make_request - define an alternate make_request function for a device
13586db1e29SJens Axboe  * @q:  the request queue for the device to be affected
13686db1e29SJens Axboe  * @mfn: the alternate make_request function
13786db1e29SJens Axboe  *
13886db1e29SJens Axboe  * Description:
13986db1e29SJens Axboe  *    The normal way for &struct bios to be passed to a device
14086db1e29SJens Axboe  *    driver is for them to be collected into requests on a request
14186db1e29SJens Axboe  *    queue, and then to allow the device driver to select requests
14286db1e29SJens Axboe  *    off that queue when it is ready.  This works well for many block
14386db1e29SJens Axboe  *    devices. However some block devices (typically virtual devices
14486db1e29SJens Axboe  *    such as md or lvm) do not benefit from the processing on the
14586db1e29SJens Axboe  *    request queue, and are served best by having the requests passed
14686db1e29SJens Axboe  *    directly to them.  This can be achieved by providing a function
14786db1e29SJens Axboe  *    to blk_queue_make_request().
14886db1e29SJens Axboe  *
14986db1e29SJens Axboe  * Caveat:
15086db1e29SJens Axboe  *    The driver that does this *must* be able to deal appropriately
15186db1e29SJens Axboe  *    with buffers in "highmemory". This can be accomplished by either calling
15286db1e29SJens Axboe  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
15386db1e29SJens Axboe  *    blk_queue_bounce() to create a buffer in normal memory.
15486db1e29SJens Axboe  **/
15586db1e29SJens Axboe void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
15686db1e29SJens Axboe {
15786db1e29SJens Axboe 	/*
15886db1e29SJens Axboe 	 * set defaults
15986db1e29SJens Axboe 	 */
16086db1e29SJens Axboe 	q->nr_requests = BLKDEV_MAX_RQ;
1610e435ac2SMilan Broz 
16286db1e29SJens Axboe 	q->make_request_fn = mfn;
16386db1e29SJens Axboe 	blk_queue_dma_alignment(q, 511);
16486db1e29SJens Axboe 	blk_queue_congestion_threshold(q);
16586db1e29SJens Axboe 	q->nr_batching = BLK_BATCH_REQ;
16686db1e29SJens Axboe 
16786db1e29SJens Axboe 	q->unplug_thresh = 4;		/* hmm */
168ad5ebd2fSRandy Dunlap 	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
16986db1e29SJens Axboe 	if (q->unplug_delay == 0)
17086db1e29SJens Axboe 		q->unplug_delay = 1;
17186db1e29SJens Axboe 
17286db1e29SJens Axboe 	q->unplug_timer.function = blk_unplug_timeout;
17386db1e29SJens Axboe 	q->unplug_timer.data = (unsigned long)q;
17486db1e29SJens Axboe 
175e475bba2SMartin K. Petersen 	blk_set_default_limits(&q->limits);
176086fa5ffSMartin K. Petersen 	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177e475bba2SMartin K. Petersen 
17886db1e29SJens Axboe 	/*
179a4e7d464SJens Axboe 	 * If the caller didn't supply a lock, fall back to our embedded
180a4e7d464SJens Axboe 	 * per-queue locks
181a4e7d464SJens Axboe 	 */
182a4e7d464SJens Axboe 	if (!q->queue_lock)
183a4e7d464SJens Axboe 		q->queue_lock = &q->__queue_lock;
184a4e7d464SJens Axboe 
185a4e7d464SJens Axboe 	/*
18686db1e29SJens Axboe 	 * by default assume old behaviour and bounce for any highmem page
18786db1e29SJens Axboe 	 */
18886db1e29SJens Axboe 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
18986db1e29SJens Axboe }
19086db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_make_request);
19186db1e29SJens Axboe 
19286db1e29SJens Axboe /**
19386db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
19486db1e29SJens Axboe  * @q: the request queue for the device
195cd0aca2dSTejun Heo  * @dma_mask: the maximum address the device can handle
19686db1e29SJens Axboe  *
19786db1e29SJens Axboe  * Description:
19886db1e29SJens Axboe  *    Different hardware can have different requirements as to what pages
19986db1e29SJens Axboe  *    it can do I/O directly to. A low level driver can call
20086db1e29SJens Axboe  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
201cd0aca2dSTejun Heo  *    buffers for doing I/O to pages residing above @dma_mask.
20286db1e29SJens Axboe  **/
203cd0aca2dSTejun Heo void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
20486db1e29SJens Axboe {
205cd0aca2dSTejun Heo 	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
20686db1e29SJens Axboe 	int dma = 0;
20786db1e29SJens Axboe 
20886db1e29SJens Axboe 	q->bounce_gfp = GFP_NOIO;
20986db1e29SJens Axboe #if BITS_PER_LONG == 64
210cd0aca2dSTejun Heo 	/*
211cd0aca2dSTejun Heo 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
212cd0aca2dSTejun Heo 	 * some IOMMUs can handle everything, but I don't know of a
213cd0aca2dSTejun Heo 	 * way to test this here.
214cd0aca2dSTejun Heo 	 */
215cd0aca2dSTejun Heo 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
21686db1e29SJens Axboe 		dma = 1;
217efb012b3SMalahal Naineni 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
21886db1e29SJens Axboe #else
2196728cb0eSJens Axboe 	if (b_pfn < blk_max_low_pfn)
22086db1e29SJens Axboe 		dma = 1;
221c49825faSMalahal Naineni 	q->limits.bounce_pfn = b_pfn;
222260a67a9SJens Axboe #endif
22386db1e29SJens Axboe 	if (dma) {
22486db1e29SJens Axboe 		init_emergency_isa_pool();
22586db1e29SJens Axboe 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
226260a67a9SJens Axboe 		q->limits.bounce_pfn = b_pfn;
22786db1e29SJens Axboe 	}
22886db1e29SJens Axboe }
22986db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
23086db1e29SJens Axboe 
23186db1e29SJens Axboe /**
232086fa5ffSMartin K. Petersen  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
23386db1e29SJens Axboe  * @q:  the request queue for the device
2342800aac1SMartin K. Petersen  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
23586db1e29SJens Axboe  *
23686db1e29SJens Axboe  * Description:
2372800aac1SMartin K. Petersen  *    Enables a low level driver to set a hard upper limit,
2382800aac1SMartin K. Petersen  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
2392800aac1SMartin K. Petersen  *    the device driver based upon the combined capabilities of I/O
2402800aac1SMartin K. Petersen  *    controller and storage device.
2412800aac1SMartin K. Petersen  *
2422800aac1SMartin K. Petersen  *    max_sectors is a soft limit imposed by the block layer for
2432800aac1SMartin K. Petersen  *    filesystem type requests.  This value can be overridden on a
2442800aac1SMartin K. Petersen  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
2452800aac1SMartin K. Petersen  *    The soft limit can not exceed max_hw_sectors.
24686db1e29SJens Axboe  **/
247086fa5ffSMartin K. Petersen void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
24886db1e29SJens Axboe {
2492800aac1SMartin K. Petersen 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
2502800aac1SMartin K. Petersen 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
25124c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
2522800aac1SMartin K. Petersen 		       __func__, max_hw_sectors);
25386db1e29SJens Axboe 	}
25486db1e29SJens Axboe 
2552800aac1SMartin K. Petersen 	q->limits.max_hw_sectors = max_hw_sectors;
2562800aac1SMartin K. Petersen 	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
2572800aac1SMartin K. Petersen 				      BLK_DEF_MAX_SECTORS);
25886db1e29SJens Axboe }
259086fa5ffSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_hw_sectors);
26086db1e29SJens Axboe 
26186db1e29SJens Axboe /**
26267efc925SChristoph Hellwig  * blk_queue_max_discard_sectors - set max sectors for a single discard
26367efc925SChristoph Hellwig  * @q:  the request queue for the device
264c7ebf065SRandy Dunlap  * @max_discard_sectors: maximum number of sectors to discard
26567efc925SChristoph Hellwig  **/
26667efc925SChristoph Hellwig void blk_queue_max_discard_sectors(struct request_queue *q,
26767efc925SChristoph Hellwig 		unsigned int max_discard_sectors)
26867efc925SChristoph Hellwig {
26967efc925SChristoph Hellwig 	q->limits.max_discard_sectors = max_discard_sectors;
27067efc925SChristoph Hellwig }
27167efc925SChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_discard_sectors);
27267efc925SChristoph Hellwig 
27367efc925SChristoph Hellwig /**
2748a78362cSMartin K. Petersen  * blk_queue_max_segments - set max hw segments for a request for this queue
27586db1e29SJens Axboe  * @q:  the request queue for the device
27686db1e29SJens Axboe  * @max_segments:  max number of segments
27786db1e29SJens Axboe  *
27886db1e29SJens Axboe  * Description:
27986db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
2808a78362cSMartin K. Petersen  *    hw data segments in a request.
28186db1e29SJens Axboe  **/
2828a78362cSMartin K. Petersen void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
28386db1e29SJens Axboe {
28486db1e29SJens Axboe 	if (!max_segments) {
28586db1e29SJens Axboe 		max_segments = 1;
28624c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
28724c03d47SHarvey Harrison 		       __func__, max_segments);
28886db1e29SJens Axboe 	}
28986db1e29SJens Axboe 
2908a78362cSMartin K. Petersen 	q->limits.max_segments = max_segments;
29186db1e29SJens Axboe }
2928a78362cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_segments);
29386db1e29SJens Axboe 
29486db1e29SJens Axboe /**
29586db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
29686db1e29SJens Axboe  * @q:  the request queue for the device
29786db1e29SJens Axboe  * @max_size:  max size of segment in bytes
29886db1e29SJens Axboe  *
29986db1e29SJens Axboe  * Description:
30086db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
30186db1e29SJens Axboe  *    coalesced segment
30286db1e29SJens Axboe  **/
30386db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
30486db1e29SJens Axboe {
30586db1e29SJens Axboe 	if (max_size < PAGE_CACHE_SIZE) {
30686db1e29SJens Axboe 		max_size = PAGE_CACHE_SIZE;
30724c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
30824c03d47SHarvey Harrison 		       __func__, max_size);
30986db1e29SJens Axboe 	}
31086db1e29SJens Axboe 
311025146e1SMartin K. Petersen 	q->limits.max_segment_size = max_size;
31286db1e29SJens Axboe }
31386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
31486db1e29SJens Axboe 
31586db1e29SJens Axboe /**
316e1defc4fSMartin K. Petersen  * blk_queue_logical_block_size - set logical block size for the queue
31786db1e29SJens Axboe  * @q:  the request queue for the device
318e1defc4fSMartin K. Petersen  * @size:  the logical block size, in bytes
31986db1e29SJens Axboe  *
32086db1e29SJens Axboe  * Description:
321e1defc4fSMartin K. Petersen  *   This should be set to the lowest possible block size that the
322e1defc4fSMartin K. Petersen  *   storage device can address.  The default of 512 covers most
323e1defc4fSMartin K. Petersen  *   hardware.
32486db1e29SJens Axboe  **/
325e1defc4fSMartin K. Petersen void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
32686db1e29SJens Axboe {
327025146e1SMartin K. Petersen 	q->limits.logical_block_size = size;
328c72758f3SMartin K. Petersen 
329c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < size)
330c72758f3SMartin K. Petersen 		q->limits.physical_block_size = size;
331c72758f3SMartin K. Petersen 
332c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
333c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
33486db1e29SJens Axboe }
335e1defc4fSMartin K. Petersen EXPORT_SYMBOL(blk_queue_logical_block_size);
33686db1e29SJens Axboe 
337c72758f3SMartin K. Petersen /**
338c72758f3SMartin K. Petersen  * blk_queue_physical_block_size - set physical block size for the queue
339c72758f3SMartin K. Petersen  * @q:  the request queue for the device
340c72758f3SMartin K. Petersen  * @size:  the physical block size, in bytes
341c72758f3SMartin K. Petersen  *
342c72758f3SMartin K. Petersen  * Description:
343c72758f3SMartin K. Petersen  *   This should be set to the lowest possible sector size that the
344c72758f3SMartin K. Petersen  *   hardware can operate on without reverting to read-modify-write
345c72758f3SMartin K. Petersen  *   operations.
346c72758f3SMartin K. Petersen  */
347892b6f90SMartin K. Petersen void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
348c72758f3SMartin K. Petersen {
349c72758f3SMartin K. Petersen 	q->limits.physical_block_size = size;
350c72758f3SMartin K. Petersen 
351c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < q->limits.logical_block_size)
352c72758f3SMartin K. Petersen 		q->limits.physical_block_size = q->limits.logical_block_size;
353c72758f3SMartin K. Petersen 
354c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
355c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
356c72758f3SMartin K. Petersen }
357c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_physical_block_size);
358c72758f3SMartin K. Petersen 
359c72758f3SMartin K. Petersen /**
360c72758f3SMartin K. Petersen  * blk_queue_alignment_offset - set physical block alignment offset
361c72758f3SMartin K. Petersen  * @q:	the request queue for the device
3628ebf9756SRandy Dunlap  * @offset: alignment offset in bytes
363c72758f3SMartin K. Petersen  *
364c72758f3SMartin K. Petersen  * Description:
365c72758f3SMartin K. Petersen  *   Some devices are naturally misaligned to compensate for things like
366c72758f3SMartin K. Petersen  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
367c72758f3SMartin K. Petersen  *   should call this function for devices whose first sector is not
368c72758f3SMartin K. Petersen  *   naturally aligned.
369c72758f3SMartin K. Petersen  */
370c72758f3SMartin K. Petersen void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
371c72758f3SMartin K. Petersen {
372c72758f3SMartin K. Petersen 	q->limits.alignment_offset =
373c72758f3SMartin K. Petersen 		offset & (q->limits.physical_block_size - 1);
374c72758f3SMartin K. Petersen 	q->limits.misaligned = 0;
375c72758f3SMartin K. Petersen }
376c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_alignment_offset);
377c72758f3SMartin K. Petersen 
378c72758f3SMartin K. Petersen /**
3797c958e32SMartin K. Petersen  * blk_limits_io_min - set minimum request size for a device
3807c958e32SMartin K. Petersen  * @limits: the queue limits
3817c958e32SMartin K. Petersen  * @min:  smallest I/O size in bytes
3827c958e32SMartin K. Petersen  *
3837c958e32SMartin K. Petersen  * Description:
3847c958e32SMartin K. Petersen  *   Some devices have an internal block size bigger than the reported
3857c958e32SMartin K. Petersen  *   hardware sector size.  This function can be used to signal the
3867c958e32SMartin K. Petersen  *   smallest I/O the device can perform without incurring a performance
3877c958e32SMartin K. Petersen  *   penalty.
3887c958e32SMartin K. Petersen  */
3897c958e32SMartin K. Petersen void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
3907c958e32SMartin K. Petersen {
3917c958e32SMartin K. Petersen 	limits->io_min = min;
3927c958e32SMartin K. Petersen 
3937c958e32SMartin K. Petersen 	if (limits->io_min < limits->logical_block_size)
3947c958e32SMartin K. Petersen 		limits->io_min = limits->logical_block_size;
3957c958e32SMartin K. Petersen 
3967c958e32SMartin K. Petersen 	if (limits->io_min < limits->physical_block_size)
3977c958e32SMartin K. Petersen 		limits->io_min = limits->physical_block_size;
3987c958e32SMartin K. Petersen }
3997c958e32SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_min);
4007c958e32SMartin K. Petersen 
4017c958e32SMartin K. Petersen /**
402c72758f3SMartin K. Petersen  * blk_queue_io_min - set minimum request size for the queue
403c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4048ebf9756SRandy Dunlap  * @min:  smallest I/O size in bytes
405c72758f3SMartin K. Petersen  *
406c72758f3SMartin K. Petersen  * Description:
4077e5f5fb0SMartin K. Petersen  *   Storage devices may report a granularity or preferred minimum I/O
4087e5f5fb0SMartin K. Petersen  *   size which is the smallest request the device can perform without
4097e5f5fb0SMartin K. Petersen  *   incurring a performance penalty.  For disk drives this is often the
4107e5f5fb0SMartin K. Petersen  *   physical block size.  For RAID arrays it is often the stripe chunk
4117e5f5fb0SMartin K. Petersen  *   size.  A properly aligned multiple of minimum_io_size is the
4127e5f5fb0SMartin K. Petersen  *   preferred request size for workloads where a high number of I/O
4137e5f5fb0SMartin K. Petersen  *   operations is desired.
414c72758f3SMartin K. Petersen  */
415c72758f3SMartin K. Petersen void blk_queue_io_min(struct request_queue *q, unsigned int min)
416c72758f3SMartin K. Petersen {
4177c958e32SMartin K. Petersen 	blk_limits_io_min(&q->limits, min);
418c72758f3SMartin K. Petersen }
419c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_min);
420c72758f3SMartin K. Petersen 
421c72758f3SMartin K. Petersen /**
4223c5820c7SMartin K. Petersen  * blk_limits_io_opt - set optimal request size for a device
4233c5820c7SMartin K. Petersen  * @limits: the queue limits
4243c5820c7SMartin K. Petersen  * @opt:  smallest I/O size in bytes
4253c5820c7SMartin K. Petersen  *
4263c5820c7SMartin K. Petersen  * Description:
4273c5820c7SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4283c5820c7SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4293c5820c7SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4303c5820c7SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4313c5820c7SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4323c5820c7SMartin K. Petersen  *   sustained throughput is desired.
4333c5820c7SMartin K. Petersen  */
4343c5820c7SMartin K. Petersen void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
4353c5820c7SMartin K. Petersen {
4363c5820c7SMartin K. Petersen 	limits->io_opt = opt;
4373c5820c7SMartin K. Petersen }
4383c5820c7SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_opt);
4393c5820c7SMartin K. Petersen 
4403c5820c7SMartin K. Petersen /**
441c72758f3SMartin K. Petersen  * blk_queue_io_opt - set optimal request size for the queue
442c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4438ebf9756SRandy Dunlap  * @opt:  optimal request size in bytes
444c72758f3SMartin K. Petersen  *
445c72758f3SMartin K. Petersen  * Description:
4467e5f5fb0SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4477e5f5fb0SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4487e5f5fb0SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4497e5f5fb0SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4507e5f5fb0SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4517e5f5fb0SMartin K. Petersen  *   sustained throughput is desired.
452c72758f3SMartin K. Petersen  */
453c72758f3SMartin K. Petersen void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
454c72758f3SMartin K. Petersen {
4553c5820c7SMartin K. Petersen 	blk_limits_io_opt(&q->limits, opt);
456c72758f3SMartin K. Petersen }
457c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_opt);
458c72758f3SMartin K. Petersen 
45986db1e29SJens Axboe /**
46086db1e29SJens Axboe  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
46186db1e29SJens Axboe  * @t:	the stacking driver (top)
46286db1e29SJens Axboe  * @b:  the underlying device (bottom)
46386db1e29SJens Axboe  **/
46486db1e29SJens Axboe void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
46586db1e29SJens Axboe {
466fef24667SMartin K. Petersen 	blk_stack_limits(&t->limits, &b->limits, 0);
46786db1e29SJens Axboe }
46886db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_stack_limits);
46986db1e29SJens Axboe 
47086db1e29SJens Axboe /**
471c72758f3SMartin K. Petersen  * blk_stack_limits - adjust queue_limits for stacked devices
47281744ee4SMartin K. Petersen  * @t:	the stacking driver limits (top device)
47381744ee4SMartin K. Petersen  * @b:  the underlying queue limits (bottom, component device)
474e03a72e1SMartin K. Petersen  * @start:  first data sector within component device
475c72758f3SMartin K. Petersen  *
476c72758f3SMartin K. Petersen  * Description:
47781744ee4SMartin K. Petersen  *    This function is used by stacking drivers like MD and DM to ensure
47881744ee4SMartin K. Petersen  *    that all component devices have compatible block sizes and
47981744ee4SMartin K. Petersen  *    alignments.  The stacking driver must provide a queue_limits
48081744ee4SMartin K. Petersen  *    struct (top) and then iteratively call the stacking function for
48181744ee4SMartin K. Petersen  *    all component (bottom) devices.  The stacking function will
48281744ee4SMartin K. Petersen  *    attempt to combine the values and ensure proper alignment.
48381744ee4SMartin K. Petersen  *
48481744ee4SMartin K. Petersen  *    Returns 0 if the top and bottom queue_limits are compatible.  The
48581744ee4SMartin K. Petersen  *    top device's block sizes and alignment offsets may be adjusted to
48681744ee4SMartin K. Petersen  *    ensure alignment with the bottom device. If no compatible sizes
48781744ee4SMartin K. Petersen  *    and alignments exist, -1 is returned and the resulting top
48881744ee4SMartin K. Petersen  *    queue_limits will have the misaligned flag set to indicate that
48981744ee4SMartin K. Petersen  *    the alignment_offset is undefined.
490c72758f3SMartin K. Petersen  */
491c72758f3SMartin K. Petersen int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
492e03a72e1SMartin K. Petersen 		     sector_t start)
493c72758f3SMartin K. Petersen {
494e03a72e1SMartin K. Petersen 	unsigned int top, bottom, alignment, ret = 0;
49586b37281SMartin K. Petersen 
496c72758f3SMartin K. Petersen 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
497c72758f3SMartin K. Petersen 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
49877634f33SMartin K. Petersen 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
499c72758f3SMartin K. Petersen 
500c72758f3SMartin K. Petersen 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
501c72758f3SMartin K. Petersen 					    b->seg_boundary_mask);
502c72758f3SMartin K. Petersen 
5038a78362cSMartin K. Petersen 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
50413f05c8dSMartin K. Petersen 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
50513f05c8dSMartin K. Petersen 						 b->max_integrity_segments);
506c72758f3SMartin K. Petersen 
507c72758f3SMartin K. Petersen 	t->max_segment_size = min_not_zero(t->max_segment_size,
508c72758f3SMartin K. Petersen 					   b->max_segment_size);
509c72758f3SMartin K. Petersen 
510fe0b393fSMartin K. Petersen 	t->misaligned |= b->misaligned;
511fe0b393fSMartin K. Petersen 
512e03a72e1SMartin K. Petersen 	alignment = queue_limit_alignment_offset(b, start);
5139504e086SMartin K. Petersen 
51481744ee4SMartin K. Petersen 	/* Bottom device has different alignment.  Check that it is
51581744ee4SMartin K. Petersen 	 * compatible with the current top alignment.
51681744ee4SMartin K. Petersen 	 */
5179504e086SMartin K. Petersen 	if (t->alignment_offset != alignment) {
5189504e086SMartin K. Petersen 
5199504e086SMartin K. Petersen 		top = max(t->physical_block_size, t->io_min)
5209504e086SMartin K. Petersen 			+ t->alignment_offset;
52181744ee4SMartin K. Petersen 		bottom = max(b->physical_block_size, b->io_min) + alignment;
5229504e086SMartin K. Petersen 
52381744ee4SMartin K. Petersen 		/* Verify that top and bottom intervals line up */
524fe0b393fSMartin K. Petersen 		if (max(top, bottom) & (min(top, bottom) - 1)) {
5259504e086SMartin K. Petersen 			t->misaligned = 1;
526fe0b393fSMartin K. Petersen 			ret = -1;
527fe0b393fSMartin K. Petersen 		}
5289504e086SMartin K. Petersen 	}
5299504e086SMartin K. Petersen 
530c72758f3SMartin K. Petersen 	t->logical_block_size = max(t->logical_block_size,
531c72758f3SMartin K. Petersen 				    b->logical_block_size);
532c72758f3SMartin K. Petersen 
533c72758f3SMartin K. Petersen 	t->physical_block_size = max(t->physical_block_size,
534c72758f3SMartin K. Petersen 				     b->physical_block_size);
535c72758f3SMartin K. Petersen 
536c72758f3SMartin K. Petersen 	t->io_min = max(t->io_min, b->io_min);
5379504e086SMartin K. Petersen 	t->io_opt = lcm(t->io_opt, b->io_opt);
5389504e086SMartin K. Petersen 
539*e692cb66SMartin K. Petersen 	t->cluster &= b->cluster;
54098262f27SMartin K. Petersen 	t->discard_zeroes_data &= b->discard_zeroes_data;
541c72758f3SMartin K. Petersen 
54281744ee4SMartin K. Petersen 	/* Physical block size a multiple of the logical block size? */
5439504e086SMartin K. Petersen 	if (t->physical_block_size & (t->logical_block_size - 1)) {
5449504e086SMartin K. Petersen 		t->physical_block_size = t->logical_block_size;
545c72758f3SMartin K. Petersen 		t->misaligned = 1;
546fe0b393fSMartin K. Petersen 		ret = -1;
54786b37281SMartin K. Petersen 	}
54886b37281SMartin K. Petersen 
54981744ee4SMartin K. Petersen 	/* Minimum I/O a multiple of the physical block size? */
5509504e086SMartin K. Petersen 	if (t->io_min & (t->physical_block_size - 1)) {
5519504e086SMartin K. Petersen 		t->io_min = t->physical_block_size;
5529504e086SMartin K. Petersen 		t->misaligned = 1;
553fe0b393fSMartin K. Petersen 		ret = -1;
5549504e086SMartin K. Petersen 	}
5559504e086SMartin K. Petersen 
55681744ee4SMartin K. Petersen 	/* Optimal I/O a multiple of the physical block size? */
5579504e086SMartin K. Petersen 	if (t->io_opt & (t->physical_block_size - 1)) {
5589504e086SMartin K. Petersen 		t->io_opt = 0;
5599504e086SMartin K. Petersen 		t->misaligned = 1;
560fe0b393fSMartin K. Petersen 		ret = -1;
5619504e086SMartin K. Petersen 	}
5629504e086SMartin K. Petersen 
56381744ee4SMartin K. Petersen 	/* Find lowest common alignment_offset */
5649504e086SMartin K. Petersen 	t->alignment_offset = lcm(t->alignment_offset, alignment)
5659504e086SMartin K. Petersen 		& (max(t->physical_block_size, t->io_min) - 1);
5669504e086SMartin K. Petersen 
56781744ee4SMartin K. Petersen 	/* Verify that new alignment_offset is on a logical block boundary */
568fe0b393fSMartin K. Petersen 	if (t->alignment_offset & (t->logical_block_size - 1)) {
5699504e086SMartin K. Petersen 		t->misaligned = 1;
570fe0b393fSMartin K. Petersen 		ret = -1;
571fe0b393fSMartin K. Petersen 	}
5729504e086SMartin K. Petersen 
5739504e086SMartin K. Petersen 	/* Discard alignment and granularity */
5749504e086SMartin K. Petersen 	if (b->discard_granularity) {
575e03a72e1SMartin K. Petersen 		alignment = queue_limit_discard_alignment(b, start);
5769504e086SMartin K. Petersen 
5779504e086SMartin K. Petersen 		if (t->discard_granularity != 0 &&
5789504e086SMartin K. Petersen 		    t->discard_alignment != alignment) {
5799504e086SMartin K. Petersen 			top = t->discard_granularity + t->discard_alignment;
5809504e086SMartin K. Petersen 			bottom = b->discard_granularity + alignment;
5819504e086SMartin K. Petersen 
5829504e086SMartin K. Petersen 			/* Verify that top and bottom intervals line up */
5839504e086SMartin K. Petersen 			if (max(top, bottom) & (min(top, bottom) - 1))
58486b37281SMartin K. Petersen 				t->discard_misaligned = 1;
585c72758f3SMartin K. Petersen 		}
586c72758f3SMartin K. Petersen 
58781744ee4SMartin K. Petersen 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
58881744ee4SMartin K. Petersen 						      b->max_discard_sectors);
5899504e086SMartin K. Petersen 		t->discard_granularity = max(t->discard_granularity,
59086b37281SMartin K. Petersen 					     b->discard_granularity);
5919504e086SMartin K. Petersen 		t->discard_alignment = lcm(t->discard_alignment, alignment) &
5929504e086SMartin K. Petersen 			(t->discard_granularity - 1);
5939504e086SMartin K. Petersen 	}
59470dd5bf3SMartin K. Petersen 
595fe0b393fSMartin K. Petersen 	return ret;
596c72758f3SMartin K. Petersen }
5975d85d324SMike Snitzer EXPORT_SYMBOL(blk_stack_limits);
598c72758f3SMartin K. Petersen 
599c72758f3SMartin K. Petersen /**
60017be8c24SMartin K. Petersen  * bdev_stack_limits - adjust queue limits for stacked drivers
60117be8c24SMartin K. Petersen  * @t:	the stacking driver limits (top device)
60217be8c24SMartin K. Petersen  * @bdev:  the component block_device (bottom)
60317be8c24SMartin K. Petersen  * @start:  first data sector within component device
60417be8c24SMartin K. Petersen  *
60517be8c24SMartin K. Petersen  * Description:
60617be8c24SMartin K. Petersen  *    Merges queue limits for a top device and a block_device.  Returns
60717be8c24SMartin K. Petersen  *    0 if alignment didn't change.  Returns -1 if adding the bottom
60817be8c24SMartin K. Petersen  *    device caused misalignment.
60917be8c24SMartin K. Petersen  */
61017be8c24SMartin K. Petersen int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
61117be8c24SMartin K. Petersen 		      sector_t start)
61217be8c24SMartin K. Petersen {
61317be8c24SMartin K. Petersen 	struct request_queue *bq = bdev_get_queue(bdev);
61417be8c24SMartin K. Petersen 
61517be8c24SMartin K. Petersen 	start += get_start_sect(bdev);
61617be8c24SMartin K. Petersen 
617e03a72e1SMartin K. Petersen 	return blk_stack_limits(t, &bq->limits, start);
61817be8c24SMartin K. Petersen }
61917be8c24SMartin K. Petersen EXPORT_SYMBOL(bdev_stack_limits);
62017be8c24SMartin K. Petersen 
62117be8c24SMartin K. Petersen /**
622c72758f3SMartin K. Petersen  * disk_stack_limits - adjust queue limits for stacked drivers
62377634f33SMartin K. Petersen  * @disk:  MD/DM gendisk (top)
624c72758f3SMartin K. Petersen  * @bdev:  the underlying block device (bottom)
625c72758f3SMartin K. Petersen  * @offset:  offset to beginning of data within component device
626c72758f3SMartin K. Petersen  *
627c72758f3SMartin K. Petersen  * Description:
628e03a72e1SMartin K. Petersen  *    Merges the limits for a top level gendisk and a bottom level
629e03a72e1SMartin K. Petersen  *    block_device.
630c72758f3SMartin K. Petersen  */
631c72758f3SMartin K. Petersen void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
632c72758f3SMartin K. Petersen 		       sector_t offset)
633c72758f3SMartin K. Petersen {
634c72758f3SMartin K. Petersen 	struct request_queue *t = disk->queue;
635c72758f3SMartin K. Petersen 
636e03a72e1SMartin K. Petersen 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
637c72758f3SMartin K. Petersen 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
638c72758f3SMartin K. Petersen 
639c72758f3SMartin K. Petersen 		disk_name(disk, 0, top);
640c72758f3SMartin K. Petersen 		bdevname(bdev, bottom);
641c72758f3SMartin K. Petersen 
642c72758f3SMartin K. Petersen 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
643c72758f3SMartin K. Petersen 		       top, bottom);
644c72758f3SMartin K. Petersen 	}
645c72758f3SMartin K. Petersen }
646c72758f3SMartin K. Petersen EXPORT_SYMBOL(disk_stack_limits);
647c72758f3SMartin K. Petersen 
648c72758f3SMartin K. Petersen /**
649e3790c7dSTejun Heo  * blk_queue_dma_pad - set pad mask
650e3790c7dSTejun Heo  * @q:     the request queue for the device
651e3790c7dSTejun Heo  * @mask:  pad mask
652e3790c7dSTejun Heo  *
65327f8221aSFUJITA Tomonori  * Set dma pad mask.
654e3790c7dSTejun Heo  *
65527f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
65627f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
657e3790c7dSTejun Heo  **/
658e3790c7dSTejun Heo void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
659e3790c7dSTejun Heo {
660e3790c7dSTejun Heo 	q->dma_pad_mask = mask;
661e3790c7dSTejun Heo }
662e3790c7dSTejun Heo EXPORT_SYMBOL(blk_queue_dma_pad);
663e3790c7dSTejun Heo 
664e3790c7dSTejun Heo /**
66527f8221aSFUJITA Tomonori  * blk_queue_update_dma_pad - update pad mask
66627f8221aSFUJITA Tomonori  * @q:     the request queue for the device
66727f8221aSFUJITA Tomonori  * @mask:  pad mask
66827f8221aSFUJITA Tomonori  *
66927f8221aSFUJITA Tomonori  * Update dma pad mask.
67027f8221aSFUJITA Tomonori  *
67127f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
67227f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
67327f8221aSFUJITA Tomonori  **/
67427f8221aSFUJITA Tomonori void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
67527f8221aSFUJITA Tomonori {
67627f8221aSFUJITA Tomonori 	if (mask > q->dma_pad_mask)
67727f8221aSFUJITA Tomonori 		q->dma_pad_mask = mask;
67827f8221aSFUJITA Tomonori }
67927f8221aSFUJITA Tomonori EXPORT_SYMBOL(blk_queue_update_dma_pad);
68027f8221aSFUJITA Tomonori 
68127f8221aSFUJITA Tomonori /**
68286db1e29SJens Axboe  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
68386db1e29SJens Axboe  * @q:  the request queue for the device
6842fb98e84STejun Heo  * @dma_drain_needed: fn which returns non-zero if drain is necessary
68586db1e29SJens Axboe  * @buf:	physically contiguous buffer
68686db1e29SJens Axboe  * @size:	size of the buffer in bytes
68786db1e29SJens Axboe  *
68886db1e29SJens Axboe  * Some devices have excess DMA problems and can't simply discard (or
68986db1e29SJens Axboe  * zero fill) the unwanted piece of the transfer.  They have to have a
69086db1e29SJens Axboe  * real area of memory to transfer it into.  The use case for this is
69186db1e29SJens Axboe  * ATAPI devices in DMA mode.  If the packet command causes a transfer
69286db1e29SJens Axboe  * bigger than the transfer size some HBAs will lock up if there
69386db1e29SJens Axboe  * aren't DMA elements to contain the excess transfer.  What this API
69486db1e29SJens Axboe  * does is adjust the queue so that the buf is always appended
69586db1e29SJens Axboe  * silently to the scatterlist.
69686db1e29SJens Axboe  *
6978a78362cSMartin K. Petersen  * Note: This routine adjusts max_hw_segments to make room for appending
6988a78362cSMartin K. Petersen  * the drain buffer.  If you call blk_queue_max_segments() after calling
6998a78362cSMartin K. Petersen  * this routine, you must set the limit to one fewer than your device
7008a78362cSMartin K. Petersen  * can support otherwise there won't be room for the drain buffer.
70186db1e29SJens Axboe  */
702448da4d2SHarvey Harrison int blk_queue_dma_drain(struct request_queue *q,
7032fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
7042fb98e84STejun Heo 			       void *buf, unsigned int size)
70586db1e29SJens Axboe {
7068a78362cSMartin K. Petersen 	if (queue_max_segments(q) < 2)
70786db1e29SJens Axboe 		return -EINVAL;
70886db1e29SJens Axboe 	/* make room for appending the drain */
7098a78362cSMartin K. Petersen 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
7102fb98e84STejun Heo 	q->dma_drain_needed = dma_drain_needed;
71186db1e29SJens Axboe 	q->dma_drain_buffer = buf;
71286db1e29SJens Axboe 	q->dma_drain_size = size;
71386db1e29SJens Axboe 
71486db1e29SJens Axboe 	return 0;
71586db1e29SJens Axboe }
71686db1e29SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
71786db1e29SJens Axboe 
71886db1e29SJens Axboe /**
71986db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
72086db1e29SJens Axboe  * @q:  the request queue for the device
72186db1e29SJens Axboe  * @mask:  the memory boundary mask
72286db1e29SJens Axboe  **/
72386db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
72486db1e29SJens Axboe {
72586db1e29SJens Axboe 	if (mask < PAGE_CACHE_SIZE - 1) {
72686db1e29SJens Axboe 		mask = PAGE_CACHE_SIZE - 1;
72724c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %lx\n",
72824c03d47SHarvey Harrison 		       __func__, mask);
72986db1e29SJens Axboe 	}
73086db1e29SJens Axboe 
731025146e1SMartin K. Petersen 	q->limits.seg_boundary_mask = mask;
73286db1e29SJens Axboe }
73386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
73486db1e29SJens Axboe 
73586db1e29SJens Axboe /**
73686db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
73786db1e29SJens Axboe  * @q:     the request queue for the device
73886db1e29SJens Axboe  * @mask:  alignment mask
73986db1e29SJens Axboe  *
74086db1e29SJens Axboe  * description:
741710027a4SRandy Dunlap  *    set required memory and length alignment for direct dma transactions.
7428feb4d20SAlan Cox  *    this is used when building direct io requests for the queue.
74386db1e29SJens Axboe  *
74486db1e29SJens Axboe  **/
74586db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
74686db1e29SJens Axboe {
74786db1e29SJens Axboe 	q->dma_alignment = mask;
74886db1e29SJens Axboe }
74986db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
75086db1e29SJens Axboe 
75186db1e29SJens Axboe /**
75286db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
75386db1e29SJens Axboe  * @q:     the request queue for the device
75486db1e29SJens Axboe  * @mask:  alignment mask
75586db1e29SJens Axboe  *
75686db1e29SJens Axboe  * description:
757710027a4SRandy Dunlap  *    update required memory and length alignment for direct dma transactions.
75886db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
75986db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
76086db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
76186db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
76286db1e29SJens Axboe  *    alignments without having them interfere.
76386db1e29SJens Axboe  *
76486db1e29SJens Axboe  **/
76586db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
76686db1e29SJens Axboe {
76786db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
76886db1e29SJens Axboe 
76986db1e29SJens Axboe 	if (mask > q->dma_alignment)
77086db1e29SJens Axboe 		q->dma_alignment = mask;
77186db1e29SJens Axboe }
77286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
77386db1e29SJens Axboe 
7744913efe4STejun Heo /**
7754913efe4STejun Heo  * blk_queue_flush - configure queue's cache flush capability
7764913efe4STejun Heo  * @q:		the request queue for the device
7774913efe4STejun Heo  * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
7784913efe4STejun Heo  *
7794913efe4STejun Heo  * Tell block layer cache flush capability of @q.  If it supports
7804913efe4STejun Heo  * flushing, REQ_FLUSH should be set.  If it supports bypassing
7814913efe4STejun Heo  * write cache for individual writes, REQ_FUA should be set.
7824913efe4STejun Heo  */
7834913efe4STejun Heo void blk_queue_flush(struct request_queue *q, unsigned int flush)
7844913efe4STejun Heo {
7854913efe4STejun Heo 	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
7864913efe4STejun Heo 
7874913efe4STejun Heo 	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
7884913efe4STejun Heo 		flush &= ~REQ_FUA;
7894913efe4STejun Heo 
7904913efe4STejun Heo 	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
7914913efe4STejun Heo }
7924913efe4STejun Heo EXPORT_SYMBOL_GPL(blk_queue_flush);
7934913efe4STejun Heo 
794aeb3d3a8SHarvey Harrison static int __init blk_settings_init(void)
79586db1e29SJens Axboe {
79686db1e29SJens Axboe 	blk_max_low_pfn = max_low_pfn - 1;
79786db1e29SJens Axboe 	blk_max_pfn = max_pfn - 1;
79886db1e29SJens Axboe 	return 0;
79986db1e29SJens Axboe }
80086db1e29SJens Axboe subsys_initcall(blk_settings_init);
801