xref: /linux/block/blk-settings.c (revision c49825facfd4969585224a896a5e717f88450cad)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
1070dd5bf3SMartin K. Petersen #include <linux/gcd.h>
112cda2728SMartin K. Petersen #include <linux/lcm.h>
12ad5ebd2fSRandy Dunlap #include <linux/jiffies.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
1486db1e29SJens Axboe 
1586db1e29SJens Axboe #include "blk.h"
1686db1e29SJens Axboe 
176728cb0eSJens Axboe unsigned long blk_max_low_pfn;
1886db1e29SJens Axboe EXPORT_SYMBOL(blk_max_low_pfn);
196728cb0eSJens Axboe 
206728cb0eSJens Axboe unsigned long blk_max_pfn;
2186db1e29SJens Axboe 
2286db1e29SJens Axboe /**
2386db1e29SJens Axboe  * blk_queue_prep_rq - set a prepare_request function for queue
2486db1e29SJens Axboe  * @q:		queue
2586db1e29SJens Axboe  * @pfn:	prepare_request function
2686db1e29SJens Axboe  *
2786db1e29SJens Axboe  * It's possible for a queue to register a prepare_request callback which
2886db1e29SJens Axboe  * is invoked before the request is handed to the request_fn. The goal of
2986db1e29SJens Axboe  * the function is to prepare a request for I/O, it can be used to build a
3086db1e29SJens Axboe  * cdb from the request data for instance.
3186db1e29SJens Axboe  *
3286db1e29SJens Axboe  */
3386db1e29SJens Axboe void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
3486db1e29SJens Axboe {
3586db1e29SJens Axboe 	q->prep_rq_fn = pfn;
3686db1e29SJens Axboe }
3786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_prep_rq);
3886db1e29SJens Axboe 
3986db1e29SJens Axboe /**
4028018c24SJames Bottomley  * blk_queue_unprep_rq - set an unprepare_request function for queue
4128018c24SJames Bottomley  * @q:		queue
4228018c24SJames Bottomley  * @ufn:	unprepare_request function
4328018c24SJames Bottomley  *
4428018c24SJames Bottomley  * It's possible for a queue to register an unprepare_request callback
4528018c24SJames Bottomley  * which is invoked before the request is finally completed. The goal
4628018c24SJames Bottomley  * of the function is to deallocate any data that was allocated in the
4728018c24SJames Bottomley  * prepare_request callback.
4828018c24SJames Bottomley  *
4928018c24SJames Bottomley  */
5028018c24SJames Bottomley void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
5128018c24SJames Bottomley {
5228018c24SJames Bottomley 	q->unprep_rq_fn = ufn;
5328018c24SJames Bottomley }
5428018c24SJames Bottomley EXPORT_SYMBOL(blk_queue_unprep_rq);
5528018c24SJames Bottomley 
5628018c24SJames Bottomley /**
5786db1e29SJens Axboe  * blk_queue_merge_bvec - set a merge_bvec function for queue
5886db1e29SJens Axboe  * @q:		queue
5986db1e29SJens Axboe  * @mbfn:	merge_bvec_fn
6086db1e29SJens Axboe  *
6186db1e29SJens Axboe  * Usually queues have static limitations on the max sectors or segments that
6286db1e29SJens Axboe  * we can put in a request. Stacking drivers may have some settings that
6386db1e29SJens Axboe  * are dynamic, and thus we have to query the queue whether it is ok to
6486db1e29SJens Axboe  * add a new bio_vec to a bio at a given offset or not. If the block device
6586db1e29SJens Axboe  * has such limitations, it needs to register a merge_bvec_fn to control
6686db1e29SJens Axboe  * the size of bio's sent to it. Note that a block device *must* allow a
6786db1e29SJens Axboe  * single page to be added to an empty bio. The block device driver may want
6886db1e29SJens Axboe  * to use the bio_split() function to deal with these bio's. By default
6986db1e29SJens Axboe  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
7086db1e29SJens Axboe  * honored.
7186db1e29SJens Axboe  */
7286db1e29SJens Axboe void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
7386db1e29SJens Axboe {
7486db1e29SJens Axboe 	q->merge_bvec_fn = mbfn;
7586db1e29SJens Axboe }
7686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_merge_bvec);
7786db1e29SJens Axboe 
7886db1e29SJens Axboe void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
7986db1e29SJens Axboe {
8086db1e29SJens Axboe 	q->softirq_done_fn = fn;
8186db1e29SJens Axboe }
8286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_softirq_done);
8386db1e29SJens Axboe 
84242f9dcbSJens Axboe void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85242f9dcbSJens Axboe {
86242f9dcbSJens Axboe 	q->rq_timeout = timeout;
87242f9dcbSJens Axboe }
88242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89242f9dcbSJens Axboe 
90242f9dcbSJens Axboe void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91242f9dcbSJens Axboe {
92242f9dcbSJens Axboe 	q->rq_timed_out_fn = fn;
93242f9dcbSJens Axboe }
94242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95242f9dcbSJens Axboe 
96ef9e3facSKiyoshi Ueda void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97ef9e3facSKiyoshi Ueda {
98ef9e3facSKiyoshi Ueda 	q->lld_busy_fn = fn;
99ef9e3facSKiyoshi Ueda }
100ef9e3facSKiyoshi Ueda EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101ef9e3facSKiyoshi Ueda 
10286db1e29SJens Axboe /**
103e475bba2SMartin K. Petersen  * blk_set_default_limits - reset limits to default values
104f740f5caSRandy Dunlap  * @lim:  the queue_limits structure to reset
105e475bba2SMartin K. Petersen  *
106e475bba2SMartin K. Petersen  * Description:
107e475bba2SMartin K. Petersen  *   Returns a queue_limit struct to its default state.  Can be used by
108e475bba2SMartin K. Petersen  *   stacking drivers like DM that stage table swaps and reuse an
109e475bba2SMartin K. Petersen  *   existing device queue.
110e475bba2SMartin K. Petersen  */
111e475bba2SMartin K. Petersen void blk_set_default_limits(struct queue_limits *lim)
112e475bba2SMartin K. Petersen {
1138a78362cSMartin K. Petersen 	lim->max_segments = BLK_MAX_SEGMENTS;
11413f05c8dSMartin K. Petersen 	lim->max_integrity_segments = 0;
115e475bba2SMartin K. Petersen 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
116eb28d31bSMartin K. Petersen 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
1175dee2477SMartin K. Petersen 	lim->max_sectors = BLK_DEF_MAX_SECTORS;
1185dee2477SMartin K. Petersen 	lim->max_hw_sectors = INT_MAX;
11986b37281SMartin K. Petersen 	lim->max_discard_sectors = 0;
12086b37281SMartin K. Petersen 	lim->discard_granularity = 0;
12186b37281SMartin K. Petersen 	lim->discard_alignment = 0;
12286b37281SMartin K. Petersen 	lim->discard_misaligned = 0;
12398262f27SMartin K. Petersen 	lim->discard_zeroes_data = -1;
124e475bba2SMartin K. Petersen 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
1253a02c8e8SMartin K. Petersen 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126e475bba2SMartin K. Petersen 	lim->alignment_offset = 0;
127e475bba2SMartin K. Petersen 	lim->io_opt = 0;
128e475bba2SMartin K. Petersen 	lim->misaligned = 0;
129e475bba2SMartin K. Petersen 	lim->no_cluster = 0;
130e475bba2SMartin K. Petersen }
131e475bba2SMartin K. Petersen EXPORT_SYMBOL(blk_set_default_limits);
132e475bba2SMartin K. Petersen 
133e475bba2SMartin K. Petersen /**
13486db1e29SJens Axboe  * blk_queue_make_request - define an alternate make_request function for a device
13586db1e29SJens Axboe  * @q:  the request queue for the device to be affected
13686db1e29SJens Axboe  * @mfn: the alternate make_request function
13786db1e29SJens Axboe  *
13886db1e29SJens Axboe  * Description:
13986db1e29SJens Axboe  *    The normal way for &struct bios to be passed to a device
14086db1e29SJens Axboe  *    driver is for them to be collected into requests on a request
14186db1e29SJens Axboe  *    queue, and then to allow the device driver to select requests
14286db1e29SJens Axboe  *    off that queue when it is ready.  This works well for many block
14386db1e29SJens Axboe  *    devices. However some block devices (typically virtual devices
14486db1e29SJens Axboe  *    such as md or lvm) do not benefit from the processing on the
14586db1e29SJens Axboe  *    request queue, and are served best by having the requests passed
14686db1e29SJens Axboe  *    directly to them.  This can be achieved by providing a function
14786db1e29SJens Axboe  *    to blk_queue_make_request().
14886db1e29SJens Axboe  *
14986db1e29SJens Axboe  * Caveat:
15086db1e29SJens Axboe  *    The driver that does this *must* be able to deal appropriately
15186db1e29SJens Axboe  *    with buffers in "highmemory". This can be accomplished by either calling
15286db1e29SJens Axboe  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
15386db1e29SJens Axboe  *    blk_queue_bounce() to create a buffer in normal memory.
15486db1e29SJens Axboe  **/
15586db1e29SJens Axboe void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
15686db1e29SJens Axboe {
15786db1e29SJens Axboe 	/*
15886db1e29SJens Axboe 	 * set defaults
15986db1e29SJens Axboe 	 */
16086db1e29SJens Axboe 	q->nr_requests = BLKDEV_MAX_RQ;
1610e435ac2SMilan Broz 
16286db1e29SJens Axboe 	q->make_request_fn = mfn;
16386db1e29SJens Axboe 	blk_queue_dma_alignment(q, 511);
16486db1e29SJens Axboe 	blk_queue_congestion_threshold(q);
16586db1e29SJens Axboe 	q->nr_batching = BLK_BATCH_REQ;
16686db1e29SJens Axboe 
16786db1e29SJens Axboe 	q->unplug_thresh = 4;		/* hmm */
168ad5ebd2fSRandy Dunlap 	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
16986db1e29SJens Axboe 	if (q->unplug_delay == 0)
17086db1e29SJens Axboe 		q->unplug_delay = 1;
17186db1e29SJens Axboe 
17286db1e29SJens Axboe 	q->unplug_timer.function = blk_unplug_timeout;
17386db1e29SJens Axboe 	q->unplug_timer.data = (unsigned long)q;
17486db1e29SJens Axboe 
175e475bba2SMartin K. Petersen 	blk_set_default_limits(&q->limits);
176086fa5ffSMartin K. Petersen 	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177e475bba2SMartin K. Petersen 
17886db1e29SJens Axboe 	/*
179a4e7d464SJens Axboe 	 * If the caller didn't supply a lock, fall back to our embedded
180a4e7d464SJens Axboe 	 * per-queue locks
181a4e7d464SJens Axboe 	 */
182a4e7d464SJens Axboe 	if (!q->queue_lock)
183a4e7d464SJens Axboe 		q->queue_lock = &q->__queue_lock;
184a4e7d464SJens Axboe 
185a4e7d464SJens Axboe 	/*
18686db1e29SJens Axboe 	 * by default assume old behaviour and bounce for any highmem page
18786db1e29SJens Axboe 	 */
18886db1e29SJens Axboe 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
18986db1e29SJens Axboe }
19086db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_make_request);
19186db1e29SJens Axboe 
19286db1e29SJens Axboe /**
19386db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
19486db1e29SJens Axboe  * @q: the request queue for the device
195cd0aca2dSTejun Heo  * @dma_mask: the maximum address the device can handle
19686db1e29SJens Axboe  *
19786db1e29SJens Axboe  * Description:
19886db1e29SJens Axboe  *    Different hardware can have different requirements as to what pages
19986db1e29SJens Axboe  *    it can do I/O directly to. A low level driver can call
20086db1e29SJens Axboe  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
201cd0aca2dSTejun Heo  *    buffers for doing I/O to pages residing above @dma_mask.
20286db1e29SJens Axboe  **/
203cd0aca2dSTejun Heo void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
20486db1e29SJens Axboe {
205cd0aca2dSTejun Heo 	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
20686db1e29SJens Axboe 	int dma = 0;
20786db1e29SJens Axboe 
20886db1e29SJens Axboe 	q->bounce_gfp = GFP_NOIO;
20986db1e29SJens Axboe #if BITS_PER_LONG == 64
210cd0aca2dSTejun Heo 	/*
211cd0aca2dSTejun Heo 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
212cd0aca2dSTejun Heo 	 * some IOMMUs can handle everything, but I don't know of a
213cd0aca2dSTejun Heo 	 * way to test this here.
214cd0aca2dSTejun Heo 	 */
215cd0aca2dSTejun Heo 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
21686db1e29SJens Axboe 		dma = 1;
21786db1e29SJens Axboe #else
2186728cb0eSJens Axboe 	if (b_pfn < blk_max_low_pfn)
21986db1e29SJens Axboe 		dma = 1;
22086db1e29SJens Axboe #endif
221*c49825faSMalahal Naineni 	q->limits.bounce_pfn = b_pfn;
22286db1e29SJens Axboe 	if (dma) {
22386db1e29SJens Axboe 		init_emergency_isa_pool();
22486db1e29SJens Axboe 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
22586db1e29SJens Axboe 	}
22686db1e29SJens Axboe }
22786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
22886db1e29SJens Axboe 
22986db1e29SJens Axboe /**
230086fa5ffSMartin K. Petersen  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
23186db1e29SJens Axboe  * @q:  the request queue for the device
2322800aac1SMartin K. Petersen  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
23386db1e29SJens Axboe  *
23486db1e29SJens Axboe  * Description:
2352800aac1SMartin K. Petersen  *    Enables a low level driver to set a hard upper limit,
2362800aac1SMartin K. Petersen  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
2372800aac1SMartin K. Petersen  *    the device driver based upon the combined capabilities of I/O
2382800aac1SMartin K. Petersen  *    controller and storage device.
2392800aac1SMartin K. Petersen  *
2402800aac1SMartin K. Petersen  *    max_sectors is a soft limit imposed by the block layer for
2412800aac1SMartin K. Petersen  *    filesystem type requests.  This value can be overridden on a
2422800aac1SMartin K. Petersen  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
2432800aac1SMartin K. Petersen  *    The soft limit can not exceed max_hw_sectors.
24486db1e29SJens Axboe  **/
245086fa5ffSMartin K. Petersen void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
24686db1e29SJens Axboe {
2472800aac1SMartin K. Petersen 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
2482800aac1SMartin K. Petersen 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
24924c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
2502800aac1SMartin K. Petersen 		       __func__, max_hw_sectors);
25186db1e29SJens Axboe 	}
25286db1e29SJens Axboe 
2532800aac1SMartin K. Petersen 	q->limits.max_hw_sectors = max_hw_sectors;
2542800aac1SMartin K. Petersen 	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
2552800aac1SMartin K. Petersen 				      BLK_DEF_MAX_SECTORS);
25686db1e29SJens Axboe }
257086fa5ffSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_hw_sectors);
25886db1e29SJens Axboe 
25986db1e29SJens Axboe /**
26067efc925SChristoph Hellwig  * blk_queue_max_discard_sectors - set max sectors for a single discard
26167efc925SChristoph Hellwig  * @q:  the request queue for the device
262c7ebf065SRandy Dunlap  * @max_discard_sectors: maximum number of sectors to discard
26367efc925SChristoph Hellwig  **/
26467efc925SChristoph Hellwig void blk_queue_max_discard_sectors(struct request_queue *q,
26567efc925SChristoph Hellwig 		unsigned int max_discard_sectors)
26667efc925SChristoph Hellwig {
26767efc925SChristoph Hellwig 	q->limits.max_discard_sectors = max_discard_sectors;
26867efc925SChristoph Hellwig }
26967efc925SChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_discard_sectors);
27067efc925SChristoph Hellwig 
27167efc925SChristoph Hellwig /**
2728a78362cSMartin K. Petersen  * blk_queue_max_segments - set max hw segments for a request for this queue
27386db1e29SJens Axboe  * @q:  the request queue for the device
27486db1e29SJens Axboe  * @max_segments:  max number of segments
27586db1e29SJens Axboe  *
27686db1e29SJens Axboe  * Description:
27786db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
2788a78362cSMartin K. Petersen  *    hw data segments in a request.
27986db1e29SJens Axboe  **/
2808a78362cSMartin K. Petersen void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
28186db1e29SJens Axboe {
28286db1e29SJens Axboe 	if (!max_segments) {
28386db1e29SJens Axboe 		max_segments = 1;
28424c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
28524c03d47SHarvey Harrison 		       __func__, max_segments);
28686db1e29SJens Axboe 	}
28786db1e29SJens Axboe 
2888a78362cSMartin K. Petersen 	q->limits.max_segments = max_segments;
28986db1e29SJens Axboe }
2908a78362cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_segments);
29186db1e29SJens Axboe 
29286db1e29SJens Axboe /**
29386db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
29486db1e29SJens Axboe  * @q:  the request queue for the device
29586db1e29SJens Axboe  * @max_size:  max size of segment in bytes
29686db1e29SJens Axboe  *
29786db1e29SJens Axboe  * Description:
29886db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
29986db1e29SJens Axboe  *    coalesced segment
30086db1e29SJens Axboe  **/
30186db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
30286db1e29SJens Axboe {
30386db1e29SJens Axboe 	if (max_size < PAGE_CACHE_SIZE) {
30486db1e29SJens Axboe 		max_size = PAGE_CACHE_SIZE;
30524c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
30624c03d47SHarvey Harrison 		       __func__, max_size);
30786db1e29SJens Axboe 	}
30886db1e29SJens Axboe 
309025146e1SMartin K. Petersen 	q->limits.max_segment_size = max_size;
31086db1e29SJens Axboe }
31186db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
31286db1e29SJens Axboe 
31386db1e29SJens Axboe /**
314e1defc4fSMartin K. Petersen  * blk_queue_logical_block_size - set logical block size for the queue
31586db1e29SJens Axboe  * @q:  the request queue for the device
316e1defc4fSMartin K. Petersen  * @size:  the logical block size, in bytes
31786db1e29SJens Axboe  *
31886db1e29SJens Axboe  * Description:
319e1defc4fSMartin K. Petersen  *   This should be set to the lowest possible block size that the
320e1defc4fSMartin K. Petersen  *   storage device can address.  The default of 512 covers most
321e1defc4fSMartin K. Petersen  *   hardware.
32286db1e29SJens Axboe  **/
323e1defc4fSMartin K. Petersen void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
32486db1e29SJens Axboe {
325025146e1SMartin K. Petersen 	q->limits.logical_block_size = size;
326c72758f3SMartin K. Petersen 
327c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < size)
328c72758f3SMartin K. Petersen 		q->limits.physical_block_size = size;
329c72758f3SMartin K. Petersen 
330c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
331c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
33286db1e29SJens Axboe }
333e1defc4fSMartin K. Petersen EXPORT_SYMBOL(blk_queue_logical_block_size);
33486db1e29SJens Axboe 
335c72758f3SMartin K. Petersen /**
336c72758f3SMartin K. Petersen  * blk_queue_physical_block_size - set physical block size for the queue
337c72758f3SMartin K. Petersen  * @q:  the request queue for the device
338c72758f3SMartin K. Petersen  * @size:  the physical block size, in bytes
339c72758f3SMartin K. Petersen  *
340c72758f3SMartin K. Petersen  * Description:
341c72758f3SMartin K. Petersen  *   This should be set to the lowest possible sector size that the
342c72758f3SMartin K. Petersen  *   hardware can operate on without reverting to read-modify-write
343c72758f3SMartin K. Petersen  *   operations.
344c72758f3SMartin K. Petersen  */
345c72758f3SMartin K. Petersen void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
346c72758f3SMartin K. Petersen {
347c72758f3SMartin K. Petersen 	q->limits.physical_block_size = size;
348c72758f3SMartin K. Petersen 
349c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < q->limits.logical_block_size)
350c72758f3SMartin K. Petersen 		q->limits.physical_block_size = q->limits.logical_block_size;
351c72758f3SMartin K. Petersen 
352c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
353c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
354c72758f3SMartin K. Petersen }
355c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_physical_block_size);
356c72758f3SMartin K. Petersen 
357c72758f3SMartin K. Petersen /**
358c72758f3SMartin K. Petersen  * blk_queue_alignment_offset - set physical block alignment offset
359c72758f3SMartin K. Petersen  * @q:	the request queue for the device
3608ebf9756SRandy Dunlap  * @offset: alignment offset in bytes
361c72758f3SMartin K. Petersen  *
362c72758f3SMartin K. Petersen  * Description:
363c72758f3SMartin K. Petersen  *   Some devices are naturally misaligned to compensate for things like
364c72758f3SMartin K. Petersen  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
365c72758f3SMartin K. Petersen  *   should call this function for devices whose first sector is not
366c72758f3SMartin K. Petersen  *   naturally aligned.
367c72758f3SMartin K. Petersen  */
368c72758f3SMartin K. Petersen void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
369c72758f3SMartin K. Petersen {
370c72758f3SMartin K. Petersen 	q->limits.alignment_offset =
371c72758f3SMartin K. Petersen 		offset & (q->limits.physical_block_size - 1);
372c72758f3SMartin K. Petersen 	q->limits.misaligned = 0;
373c72758f3SMartin K. Petersen }
374c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_alignment_offset);
375c72758f3SMartin K. Petersen 
376c72758f3SMartin K. Petersen /**
3777c958e32SMartin K. Petersen  * blk_limits_io_min - set minimum request size for a device
3787c958e32SMartin K. Petersen  * @limits: the queue limits
3797c958e32SMartin K. Petersen  * @min:  smallest I/O size in bytes
3807c958e32SMartin K. Petersen  *
3817c958e32SMartin K. Petersen  * Description:
3827c958e32SMartin K. Petersen  *   Some devices have an internal block size bigger than the reported
3837c958e32SMartin K. Petersen  *   hardware sector size.  This function can be used to signal the
3847c958e32SMartin K. Petersen  *   smallest I/O the device can perform without incurring a performance
3857c958e32SMartin K. Petersen  *   penalty.
3867c958e32SMartin K. Petersen  */
3877c958e32SMartin K. Petersen void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
3887c958e32SMartin K. Petersen {
3897c958e32SMartin K. Petersen 	limits->io_min = min;
3907c958e32SMartin K. Petersen 
3917c958e32SMartin K. Petersen 	if (limits->io_min < limits->logical_block_size)
3927c958e32SMartin K. Petersen 		limits->io_min = limits->logical_block_size;
3937c958e32SMartin K. Petersen 
3947c958e32SMartin K. Petersen 	if (limits->io_min < limits->physical_block_size)
3957c958e32SMartin K. Petersen 		limits->io_min = limits->physical_block_size;
3967c958e32SMartin K. Petersen }
3977c958e32SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_min);
3987c958e32SMartin K. Petersen 
3997c958e32SMartin K. Petersen /**
400c72758f3SMartin K. Petersen  * blk_queue_io_min - set minimum request size for the queue
401c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4028ebf9756SRandy Dunlap  * @min:  smallest I/O size in bytes
403c72758f3SMartin K. Petersen  *
404c72758f3SMartin K. Petersen  * Description:
4057e5f5fb0SMartin K. Petersen  *   Storage devices may report a granularity or preferred minimum I/O
4067e5f5fb0SMartin K. Petersen  *   size which is the smallest request the device can perform without
4077e5f5fb0SMartin K. Petersen  *   incurring a performance penalty.  For disk drives this is often the
4087e5f5fb0SMartin K. Petersen  *   physical block size.  For RAID arrays it is often the stripe chunk
4097e5f5fb0SMartin K. Petersen  *   size.  A properly aligned multiple of minimum_io_size is the
4107e5f5fb0SMartin K. Petersen  *   preferred request size for workloads where a high number of I/O
4117e5f5fb0SMartin K. Petersen  *   operations is desired.
412c72758f3SMartin K. Petersen  */
413c72758f3SMartin K. Petersen void blk_queue_io_min(struct request_queue *q, unsigned int min)
414c72758f3SMartin K. Petersen {
4157c958e32SMartin K. Petersen 	blk_limits_io_min(&q->limits, min);
416c72758f3SMartin K. Petersen }
417c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_min);
418c72758f3SMartin K. Petersen 
419c72758f3SMartin K. Petersen /**
4203c5820c7SMartin K. Petersen  * blk_limits_io_opt - set optimal request size for a device
4213c5820c7SMartin K. Petersen  * @limits: the queue limits
4223c5820c7SMartin K. Petersen  * @opt:  smallest I/O size in bytes
4233c5820c7SMartin K. Petersen  *
4243c5820c7SMartin K. Petersen  * Description:
4253c5820c7SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4263c5820c7SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4273c5820c7SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4283c5820c7SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4293c5820c7SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4303c5820c7SMartin K. Petersen  *   sustained throughput is desired.
4313c5820c7SMartin K. Petersen  */
4323c5820c7SMartin K. Petersen void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
4333c5820c7SMartin K. Petersen {
4343c5820c7SMartin K. Petersen 	limits->io_opt = opt;
4353c5820c7SMartin K. Petersen }
4363c5820c7SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_opt);
4373c5820c7SMartin K. Petersen 
4383c5820c7SMartin K. Petersen /**
439c72758f3SMartin K. Petersen  * blk_queue_io_opt - set optimal request size for the queue
440c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4418ebf9756SRandy Dunlap  * @opt:  optimal request size in bytes
442c72758f3SMartin K. Petersen  *
443c72758f3SMartin K. Petersen  * Description:
4447e5f5fb0SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4457e5f5fb0SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4467e5f5fb0SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4477e5f5fb0SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4487e5f5fb0SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4497e5f5fb0SMartin K. Petersen  *   sustained throughput is desired.
450c72758f3SMartin K. Petersen  */
451c72758f3SMartin K. Petersen void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
452c72758f3SMartin K. Petersen {
4533c5820c7SMartin K. Petersen 	blk_limits_io_opt(&q->limits, opt);
454c72758f3SMartin K. Petersen }
455c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_opt);
456c72758f3SMartin K. Petersen 
45786db1e29SJens Axboe /**
45886db1e29SJens Axboe  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
45986db1e29SJens Axboe  * @t:	the stacking driver (top)
46086db1e29SJens Axboe  * @b:  the underlying device (bottom)
46186db1e29SJens Axboe  **/
46286db1e29SJens Axboe void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
46386db1e29SJens Axboe {
464fef24667SMartin K. Petersen 	blk_stack_limits(&t->limits, &b->limits, 0);
465025146e1SMartin K. Petersen 
466e7e72bf6SNeil Brown 	if (!t->queue_lock)
467e7e72bf6SNeil Brown 		WARN_ON_ONCE(1);
468e7e72bf6SNeil Brown 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
469e7e72bf6SNeil Brown 		unsigned long flags;
470e7e72bf6SNeil Brown 		spin_lock_irqsave(t->queue_lock, flags);
47175ad23bcSNick Piggin 		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
472e7e72bf6SNeil Brown 		spin_unlock_irqrestore(t->queue_lock, flags);
473e7e72bf6SNeil Brown 	}
47486db1e29SJens Axboe }
47586db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_stack_limits);
47686db1e29SJens Axboe 
47786db1e29SJens Axboe /**
478c72758f3SMartin K. Petersen  * blk_stack_limits - adjust queue_limits for stacked devices
47981744ee4SMartin K. Petersen  * @t:	the stacking driver limits (top device)
48081744ee4SMartin K. Petersen  * @b:  the underlying queue limits (bottom, component device)
481e03a72e1SMartin K. Petersen  * @start:  first data sector within component device
482c72758f3SMartin K. Petersen  *
483c72758f3SMartin K. Petersen  * Description:
48481744ee4SMartin K. Petersen  *    This function is used by stacking drivers like MD and DM to ensure
48581744ee4SMartin K. Petersen  *    that all component devices have compatible block sizes and
48681744ee4SMartin K. Petersen  *    alignments.  The stacking driver must provide a queue_limits
48781744ee4SMartin K. Petersen  *    struct (top) and then iteratively call the stacking function for
48881744ee4SMartin K. Petersen  *    all component (bottom) devices.  The stacking function will
48981744ee4SMartin K. Petersen  *    attempt to combine the values and ensure proper alignment.
49081744ee4SMartin K. Petersen  *
49181744ee4SMartin K. Petersen  *    Returns 0 if the top and bottom queue_limits are compatible.  The
49281744ee4SMartin K. Petersen  *    top device's block sizes and alignment offsets may be adjusted to
49381744ee4SMartin K. Petersen  *    ensure alignment with the bottom device. If no compatible sizes
49481744ee4SMartin K. Petersen  *    and alignments exist, -1 is returned and the resulting top
49581744ee4SMartin K. Petersen  *    queue_limits will have the misaligned flag set to indicate that
49681744ee4SMartin K. Petersen  *    the alignment_offset is undefined.
497c72758f3SMartin K. Petersen  */
498c72758f3SMartin K. Petersen int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
499e03a72e1SMartin K. Petersen 		     sector_t start)
500c72758f3SMartin K. Petersen {
501e03a72e1SMartin K. Petersen 	unsigned int top, bottom, alignment, ret = 0;
50286b37281SMartin K. Petersen 
503c72758f3SMartin K. Petersen 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
504c72758f3SMartin K. Petersen 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
50577634f33SMartin K. Petersen 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
506c72758f3SMartin K. Petersen 
507c72758f3SMartin K. Petersen 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
508c72758f3SMartin K. Petersen 					    b->seg_boundary_mask);
509c72758f3SMartin K. Petersen 
5108a78362cSMartin K. Petersen 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
51113f05c8dSMartin K. Petersen 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
51213f05c8dSMartin K. Petersen 						 b->max_integrity_segments);
513c72758f3SMartin K. Petersen 
514c72758f3SMartin K. Petersen 	t->max_segment_size = min_not_zero(t->max_segment_size,
515c72758f3SMartin K. Petersen 					   b->max_segment_size);
516c72758f3SMartin K. Petersen 
517fe0b393fSMartin K. Petersen 	t->misaligned |= b->misaligned;
518fe0b393fSMartin K. Petersen 
519e03a72e1SMartin K. Petersen 	alignment = queue_limit_alignment_offset(b, start);
5209504e086SMartin K. Petersen 
52181744ee4SMartin K. Petersen 	/* Bottom device has different alignment.  Check that it is
52281744ee4SMartin K. Petersen 	 * compatible with the current top alignment.
52381744ee4SMartin K. Petersen 	 */
5249504e086SMartin K. Petersen 	if (t->alignment_offset != alignment) {
5259504e086SMartin K. Petersen 
5269504e086SMartin K. Petersen 		top = max(t->physical_block_size, t->io_min)
5279504e086SMartin K. Petersen 			+ t->alignment_offset;
52881744ee4SMartin K. Petersen 		bottom = max(b->physical_block_size, b->io_min) + alignment;
5299504e086SMartin K. Petersen 
53081744ee4SMartin K. Petersen 		/* Verify that top and bottom intervals line up */
531fe0b393fSMartin K. Petersen 		if (max(top, bottom) & (min(top, bottom) - 1)) {
5329504e086SMartin K. Petersen 			t->misaligned = 1;
533fe0b393fSMartin K. Petersen 			ret = -1;
534fe0b393fSMartin K. Petersen 		}
5359504e086SMartin K. Petersen 	}
5369504e086SMartin K. Petersen 
537c72758f3SMartin K. Petersen 	t->logical_block_size = max(t->logical_block_size,
538c72758f3SMartin K. Petersen 				    b->logical_block_size);
539c72758f3SMartin K. Petersen 
540c72758f3SMartin K. Petersen 	t->physical_block_size = max(t->physical_block_size,
541c72758f3SMartin K. Petersen 				     b->physical_block_size);
542c72758f3SMartin K. Petersen 
543c72758f3SMartin K. Petersen 	t->io_min = max(t->io_min, b->io_min);
5449504e086SMartin K. Petersen 	t->io_opt = lcm(t->io_opt, b->io_opt);
5459504e086SMartin K. Petersen 
546c72758f3SMartin K. Petersen 	t->no_cluster |= b->no_cluster;
54798262f27SMartin K. Petersen 	t->discard_zeroes_data &= b->discard_zeroes_data;
548c72758f3SMartin K. Petersen 
54981744ee4SMartin K. Petersen 	/* Physical block size a multiple of the logical block size? */
5509504e086SMartin K. Petersen 	if (t->physical_block_size & (t->logical_block_size - 1)) {
5519504e086SMartin K. Petersen 		t->physical_block_size = t->logical_block_size;
552c72758f3SMartin K. Petersen 		t->misaligned = 1;
553fe0b393fSMartin K. Petersen 		ret = -1;
55486b37281SMartin K. Petersen 	}
55586b37281SMartin K. Petersen 
55681744ee4SMartin K. Petersen 	/* Minimum I/O a multiple of the physical block size? */
5579504e086SMartin K. Petersen 	if (t->io_min & (t->physical_block_size - 1)) {
5589504e086SMartin K. Petersen 		t->io_min = t->physical_block_size;
5599504e086SMartin K. Petersen 		t->misaligned = 1;
560fe0b393fSMartin K. Petersen 		ret = -1;
5619504e086SMartin K. Petersen 	}
5629504e086SMartin K. Petersen 
56381744ee4SMartin K. Petersen 	/* Optimal I/O a multiple of the physical block size? */
5649504e086SMartin K. Petersen 	if (t->io_opt & (t->physical_block_size - 1)) {
5659504e086SMartin K. Petersen 		t->io_opt = 0;
5669504e086SMartin K. Petersen 		t->misaligned = 1;
567fe0b393fSMartin K. Petersen 		ret = -1;
5689504e086SMartin K. Petersen 	}
5699504e086SMartin K. Petersen 
57081744ee4SMartin K. Petersen 	/* Find lowest common alignment_offset */
5719504e086SMartin K. Petersen 	t->alignment_offset = lcm(t->alignment_offset, alignment)
5729504e086SMartin K. Petersen 		& (max(t->physical_block_size, t->io_min) - 1);
5739504e086SMartin K. Petersen 
57481744ee4SMartin K. Petersen 	/* Verify that new alignment_offset is on a logical block boundary */
575fe0b393fSMartin K. Petersen 	if (t->alignment_offset & (t->logical_block_size - 1)) {
5769504e086SMartin K. Petersen 		t->misaligned = 1;
577fe0b393fSMartin K. Petersen 		ret = -1;
578fe0b393fSMartin K. Petersen 	}
5799504e086SMartin K. Petersen 
5809504e086SMartin K. Petersen 	/* Discard alignment and granularity */
5819504e086SMartin K. Petersen 	if (b->discard_granularity) {
582e03a72e1SMartin K. Petersen 		alignment = queue_limit_discard_alignment(b, start);
5839504e086SMartin K. Petersen 
5849504e086SMartin K. Petersen 		if (t->discard_granularity != 0 &&
5859504e086SMartin K. Petersen 		    t->discard_alignment != alignment) {
5869504e086SMartin K. Petersen 			top = t->discard_granularity + t->discard_alignment;
5879504e086SMartin K. Petersen 			bottom = b->discard_granularity + alignment;
5889504e086SMartin K. Petersen 
5899504e086SMartin K. Petersen 			/* Verify that top and bottom intervals line up */
5909504e086SMartin K. Petersen 			if (max(top, bottom) & (min(top, bottom) - 1))
59186b37281SMartin K. Petersen 				t->discard_misaligned = 1;
592c72758f3SMartin K. Petersen 		}
593c72758f3SMartin K. Petersen 
59481744ee4SMartin K. Petersen 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
59581744ee4SMartin K. Petersen 						      b->max_discard_sectors);
5969504e086SMartin K. Petersen 		t->discard_granularity = max(t->discard_granularity,
59786b37281SMartin K. Petersen 					     b->discard_granularity);
5989504e086SMartin K. Petersen 		t->discard_alignment = lcm(t->discard_alignment, alignment) &
5999504e086SMartin K. Petersen 			(t->discard_granularity - 1);
6009504e086SMartin K. Petersen 	}
60170dd5bf3SMartin K. Petersen 
602fe0b393fSMartin K. Petersen 	return ret;
603c72758f3SMartin K. Petersen }
6045d85d324SMike Snitzer EXPORT_SYMBOL(blk_stack_limits);
605c72758f3SMartin K. Petersen 
606c72758f3SMartin K. Petersen /**
60717be8c24SMartin K. Petersen  * bdev_stack_limits - adjust queue limits for stacked drivers
60817be8c24SMartin K. Petersen  * @t:	the stacking driver limits (top device)
60917be8c24SMartin K. Petersen  * @bdev:  the component block_device (bottom)
61017be8c24SMartin K. Petersen  * @start:  first data sector within component device
61117be8c24SMartin K. Petersen  *
61217be8c24SMartin K. Petersen  * Description:
61317be8c24SMartin K. Petersen  *    Merges queue limits for a top device and a block_device.  Returns
61417be8c24SMartin K. Petersen  *    0 if alignment didn't change.  Returns -1 if adding the bottom
61517be8c24SMartin K. Petersen  *    device caused misalignment.
61617be8c24SMartin K. Petersen  */
61717be8c24SMartin K. Petersen int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
61817be8c24SMartin K. Petersen 		      sector_t start)
61917be8c24SMartin K. Petersen {
62017be8c24SMartin K. Petersen 	struct request_queue *bq = bdev_get_queue(bdev);
62117be8c24SMartin K. Petersen 
62217be8c24SMartin K. Petersen 	start += get_start_sect(bdev);
62317be8c24SMartin K. Petersen 
624e03a72e1SMartin K. Petersen 	return blk_stack_limits(t, &bq->limits, start);
62517be8c24SMartin K. Petersen }
62617be8c24SMartin K. Petersen EXPORT_SYMBOL(bdev_stack_limits);
62717be8c24SMartin K. Petersen 
62817be8c24SMartin K. Petersen /**
629c72758f3SMartin K. Petersen  * disk_stack_limits - adjust queue limits for stacked drivers
63077634f33SMartin K. Petersen  * @disk:  MD/DM gendisk (top)
631c72758f3SMartin K. Petersen  * @bdev:  the underlying block device (bottom)
632c72758f3SMartin K. Petersen  * @offset:  offset to beginning of data within component device
633c72758f3SMartin K. Petersen  *
634c72758f3SMartin K. Petersen  * Description:
635e03a72e1SMartin K. Petersen  *    Merges the limits for a top level gendisk and a bottom level
636e03a72e1SMartin K. Petersen  *    block_device.
637c72758f3SMartin K. Petersen  */
638c72758f3SMartin K. Petersen void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
639c72758f3SMartin K. Petersen 		       sector_t offset)
640c72758f3SMartin K. Petersen {
641c72758f3SMartin K. Petersen 	struct request_queue *t = disk->queue;
642c72758f3SMartin K. Petersen 	struct request_queue *b = bdev_get_queue(bdev);
643c72758f3SMartin K. Petersen 
644e03a72e1SMartin K. Petersen 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
645c72758f3SMartin K. Petersen 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
646c72758f3SMartin K. Petersen 
647c72758f3SMartin K. Petersen 		disk_name(disk, 0, top);
648c72758f3SMartin K. Petersen 		bdevname(bdev, bottom);
649c72758f3SMartin K. Petersen 
650c72758f3SMartin K. Petersen 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
651c72758f3SMartin K. Petersen 		       top, bottom);
652c72758f3SMartin K. Petersen 	}
653c72758f3SMartin K. Petersen 
654c72758f3SMartin K. Petersen 	if (!t->queue_lock)
655c72758f3SMartin K. Petersen 		WARN_ON_ONCE(1);
656c72758f3SMartin K. Petersen 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
657c72758f3SMartin K. Petersen 		unsigned long flags;
658c72758f3SMartin K. Petersen 
659c72758f3SMartin K. Petersen 		spin_lock_irqsave(t->queue_lock, flags);
660c72758f3SMartin K. Petersen 		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
661c72758f3SMartin K. Petersen 			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
662c72758f3SMartin K. Petersen 		spin_unlock_irqrestore(t->queue_lock, flags);
663c72758f3SMartin K. Petersen 	}
664c72758f3SMartin K. Petersen }
665c72758f3SMartin K. Petersen EXPORT_SYMBOL(disk_stack_limits);
666c72758f3SMartin K. Petersen 
667c72758f3SMartin K. Petersen /**
668e3790c7dSTejun Heo  * blk_queue_dma_pad - set pad mask
669e3790c7dSTejun Heo  * @q:     the request queue for the device
670e3790c7dSTejun Heo  * @mask:  pad mask
671e3790c7dSTejun Heo  *
67227f8221aSFUJITA Tomonori  * Set dma pad mask.
673e3790c7dSTejun Heo  *
67427f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
67527f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
676e3790c7dSTejun Heo  **/
677e3790c7dSTejun Heo void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
678e3790c7dSTejun Heo {
679e3790c7dSTejun Heo 	q->dma_pad_mask = mask;
680e3790c7dSTejun Heo }
681e3790c7dSTejun Heo EXPORT_SYMBOL(blk_queue_dma_pad);
682e3790c7dSTejun Heo 
683e3790c7dSTejun Heo /**
68427f8221aSFUJITA Tomonori  * blk_queue_update_dma_pad - update pad mask
68527f8221aSFUJITA Tomonori  * @q:     the request queue for the device
68627f8221aSFUJITA Tomonori  * @mask:  pad mask
68727f8221aSFUJITA Tomonori  *
68827f8221aSFUJITA Tomonori  * Update dma pad mask.
68927f8221aSFUJITA Tomonori  *
69027f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
69127f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
69227f8221aSFUJITA Tomonori  **/
69327f8221aSFUJITA Tomonori void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
69427f8221aSFUJITA Tomonori {
69527f8221aSFUJITA Tomonori 	if (mask > q->dma_pad_mask)
69627f8221aSFUJITA Tomonori 		q->dma_pad_mask = mask;
69727f8221aSFUJITA Tomonori }
69827f8221aSFUJITA Tomonori EXPORT_SYMBOL(blk_queue_update_dma_pad);
69927f8221aSFUJITA Tomonori 
70027f8221aSFUJITA Tomonori /**
70186db1e29SJens Axboe  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
70286db1e29SJens Axboe  * @q:  the request queue for the device
7032fb98e84STejun Heo  * @dma_drain_needed: fn which returns non-zero if drain is necessary
70486db1e29SJens Axboe  * @buf:	physically contiguous buffer
70586db1e29SJens Axboe  * @size:	size of the buffer in bytes
70686db1e29SJens Axboe  *
70786db1e29SJens Axboe  * Some devices have excess DMA problems and can't simply discard (or
70886db1e29SJens Axboe  * zero fill) the unwanted piece of the transfer.  They have to have a
70986db1e29SJens Axboe  * real area of memory to transfer it into.  The use case for this is
71086db1e29SJens Axboe  * ATAPI devices in DMA mode.  If the packet command causes a transfer
71186db1e29SJens Axboe  * bigger than the transfer size some HBAs will lock up if there
71286db1e29SJens Axboe  * aren't DMA elements to contain the excess transfer.  What this API
71386db1e29SJens Axboe  * does is adjust the queue so that the buf is always appended
71486db1e29SJens Axboe  * silently to the scatterlist.
71586db1e29SJens Axboe  *
7168a78362cSMartin K. Petersen  * Note: This routine adjusts max_hw_segments to make room for appending
7178a78362cSMartin K. Petersen  * the drain buffer.  If you call blk_queue_max_segments() after calling
7188a78362cSMartin K. Petersen  * this routine, you must set the limit to one fewer than your device
7198a78362cSMartin K. Petersen  * can support otherwise there won't be room for the drain buffer.
72086db1e29SJens Axboe  */
721448da4d2SHarvey Harrison int blk_queue_dma_drain(struct request_queue *q,
7222fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
7232fb98e84STejun Heo 			       void *buf, unsigned int size)
72486db1e29SJens Axboe {
7258a78362cSMartin K. Petersen 	if (queue_max_segments(q) < 2)
72686db1e29SJens Axboe 		return -EINVAL;
72786db1e29SJens Axboe 	/* make room for appending the drain */
7288a78362cSMartin K. Petersen 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
7292fb98e84STejun Heo 	q->dma_drain_needed = dma_drain_needed;
73086db1e29SJens Axboe 	q->dma_drain_buffer = buf;
73186db1e29SJens Axboe 	q->dma_drain_size = size;
73286db1e29SJens Axboe 
73386db1e29SJens Axboe 	return 0;
73486db1e29SJens Axboe }
73586db1e29SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
73686db1e29SJens Axboe 
73786db1e29SJens Axboe /**
73886db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
73986db1e29SJens Axboe  * @q:  the request queue for the device
74086db1e29SJens Axboe  * @mask:  the memory boundary mask
74186db1e29SJens Axboe  **/
74286db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
74386db1e29SJens Axboe {
74486db1e29SJens Axboe 	if (mask < PAGE_CACHE_SIZE - 1) {
74586db1e29SJens Axboe 		mask = PAGE_CACHE_SIZE - 1;
74624c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %lx\n",
74724c03d47SHarvey Harrison 		       __func__, mask);
74886db1e29SJens Axboe 	}
74986db1e29SJens Axboe 
750025146e1SMartin K. Petersen 	q->limits.seg_boundary_mask = mask;
75186db1e29SJens Axboe }
75286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
75386db1e29SJens Axboe 
75486db1e29SJens Axboe /**
75586db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
75686db1e29SJens Axboe  * @q:     the request queue for the device
75786db1e29SJens Axboe  * @mask:  alignment mask
75886db1e29SJens Axboe  *
75986db1e29SJens Axboe  * description:
760710027a4SRandy Dunlap  *    set required memory and length alignment for direct dma transactions.
7618feb4d20SAlan Cox  *    this is used when building direct io requests for the queue.
76286db1e29SJens Axboe  *
76386db1e29SJens Axboe  **/
76486db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
76586db1e29SJens Axboe {
76686db1e29SJens Axboe 	q->dma_alignment = mask;
76786db1e29SJens Axboe }
76886db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
76986db1e29SJens Axboe 
77086db1e29SJens Axboe /**
77186db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
77286db1e29SJens Axboe  * @q:     the request queue for the device
77386db1e29SJens Axboe  * @mask:  alignment mask
77486db1e29SJens Axboe  *
77586db1e29SJens Axboe  * description:
776710027a4SRandy Dunlap  *    update required memory and length alignment for direct dma transactions.
77786db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
77886db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
77986db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
78086db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
78186db1e29SJens Axboe  *    alignments without having them interfere.
78286db1e29SJens Axboe  *
78386db1e29SJens Axboe  **/
78486db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
78586db1e29SJens Axboe {
78686db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
78786db1e29SJens Axboe 
78886db1e29SJens Axboe 	if (mask > q->dma_alignment)
78986db1e29SJens Axboe 		q->dma_alignment = mask;
79086db1e29SJens Axboe }
79186db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
79286db1e29SJens Axboe 
793aeb3d3a8SHarvey Harrison static int __init blk_settings_init(void)
79486db1e29SJens Axboe {
79586db1e29SJens Axboe 	blk_max_low_pfn = max_low_pfn - 1;
79686db1e29SJens Axboe 	blk_max_pfn = max_pfn - 1;
79786db1e29SJens Axboe 	return 0;
79886db1e29SJens Axboe }
79986db1e29SJens Axboe subsys_initcall(blk_settings_init);
800