xref: /linux/block/blk-settings.c (revision 7ebdfaa52d15b947503f76474477f92854796d96)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
1070dd5bf3SMartin K. Petersen #include <linux/gcd.h>
112cda2728SMartin K. Petersen #include <linux/lcm.h>
12ad5ebd2fSRandy Dunlap #include <linux/jiffies.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
1486db1e29SJens Axboe 
1586db1e29SJens Axboe #include "blk.h"
1686db1e29SJens Axboe 
176728cb0eSJens Axboe unsigned long blk_max_low_pfn;
1886db1e29SJens Axboe EXPORT_SYMBOL(blk_max_low_pfn);
196728cb0eSJens Axboe 
206728cb0eSJens Axboe unsigned long blk_max_pfn;
2186db1e29SJens Axboe 
2286db1e29SJens Axboe /**
2386db1e29SJens Axboe  * blk_queue_prep_rq - set a prepare_request function for queue
2486db1e29SJens Axboe  * @q:		queue
2586db1e29SJens Axboe  * @pfn:	prepare_request function
2686db1e29SJens Axboe  *
2786db1e29SJens Axboe  * It's possible for a queue to register a prepare_request callback which
2886db1e29SJens Axboe  * is invoked before the request is handed to the request_fn. The goal of
2986db1e29SJens Axboe  * the function is to prepare a request for I/O, it can be used to build a
3086db1e29SJens Axboe  * cdb from the request data for instance.
3186db1e29SJens Axboe  *
3286db1e29SJens Axboe  */
3386db1e29SJens Axboe void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
3486db1e29SJens Axboe {
3586db1e29SJens Axboe 	q->prep_rq_fn = pfn;
3686db1e29SJens Axboe }
3786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_prep_rq);
3886db1e29SJens Axboe 
3986db1e29SJens Axboe /**
4028018c24SJames Bottomley  * blk_queue_unprep_rq - set an unprepare_request function for queue
4128018c24SJames Bottomley  * @q:		queue
4228018c24SJames Bottomley  * @ufn:	unprepare_request function
4328018c24SJames Bottomley  *
4428018c24SJames Bottomley  * It's possible for a queue to register an unprepare_request callback
4528018c24SJames Bottomley  * which is invoked before the request is finally completed. The goal
4628018c24SJames Bottomley  * of the function is to deallocate any data that was allocated in the
4728018c24SJames Bottomley  * prepare_request callback.
4828018c24SJames Bottomley  *
4928018c24SJames Bottomley  */
5028018c24SJames Bottomley void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
5128018c24SJames Bottomley {
5228018c24SJames Bottomley 	q->unprep_rq_fn = ufn;
5328018c24SJames Bottomley }
5428018c24SJames Bottomley EXPORT_SYMBOL(blk_queue_unprep_rq);
5528018c24SJames Bottomley 
5628018c24SJames Bottomley /**
5786db1e29SJens Axboe  * blk_queue_merge_bvec - set a merge_bvec function for queue
5886db1e29SJens Axboe  * @q:		queue
5986db1e29SJens Axboe  * @mbfn:	merge_bvec_fn
6086db1e29SJens Axboe  *
6186db1e29SJens Axboe  * Usually queues have static limitations on the max sectors or segments that
6286db1e29SJens Axboe  * we can put in a request. Stacking drivers may have some settings that
6386db1e29SJens Axboe  * are dynamic, and thus we have to query the queue whether it is ok to
6486db1e29SJens Axboe  * add a new bio_vec to a bio at a given offset or not. If the block device
6586db1e29SJens Axboe  * has such limitations, it needs to register a merge_bvec_fn to control
6686db1e29SJens Axboe  * the size of bio's sent to it. Note that a block device *must* allow a
6786db1e29SJens Axboe  * single page to be added to an empty bio. The block device driver may want
6886db1e29SJens Axboe  * to use the bio_split() function to deal with these bio's. By default
6986db1e29SJens Axboe  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
7086db1e29SJens Axboe  * honored.
7186db1e29SJens Axboe  */
7286db1e29SJens Axboe void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
7386db1e29SJens Axboe {
7486db1e29SJens Axboe 	q->merge_bvec_fn = mbfn;
7586db1e29SJens Axboe }
7686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_merge_bvec);
7786db1e29SJens Axboe 
7886db1e29SJens Axboe void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
7986db1e29SJens Axboe {
8086db1e29SJens Axboe 	q->softirq_done_fn = fn;
8186db1e29SJens Axboe }
8286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_softirq_done);
8386db1e29SJens Axboe 
84242f9dcbSJens Axboe void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85242f9dcbSJens Axboe {
86242f9dcbSJens Axboe 	q->rq_timeout = timeout;
87242f9dcbSJens Axboe }
88242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89242f9dcbSJens Axboe 
90242f9dcbSJens Axboe void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91242f9dcbSJens Axboe {
92242f9dcbSJens Axboe 	q->rq_timed_out_fn = fn;
93242f9dcbSJens Axboe }
94242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95242f9dcbSJens Axboe 
96ef9e3facSKiyoshi Ueda void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97ef9e3facSKiyoshi Ueda {
98ef9e3facSKiyoshi Ueda 	q->lld_busy_fn = fn;
99ef9e3facSKiyoshi Ueda }
100ef9e3facSKiyoshi Ueda EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101ef9e3facSKiyoshi Ueda 
10286db1e29SJens Axboe /**
103e475bba2SMartin K. Petersen  * blk_set_default_limits - reset limits to default values
104f740f5caSRandy Dunlap  * @lim:  the queue_limits structure to reset
105e475bba2SMartin K. Petersen  *
106e475bba2SMartin K. Petersen  * Description:
107b1bd055dSMartin K. Petersen  *   Returns a queue_limit struct to its default state.
108e475bba2SMartin K. Petersen  */
109e475bba2SMartin K. Petersen void blk_set_default_limits(struct queue_limits *lim)
110e475bba2SMartin K. Petersen {
1118a78362cSMartin K. Petersen 	lim->max_segments = BLK_MAX_SEGMENTS;
11213f05c8dSMartin K. Petersen 	lim->max_integrity_segments = 0;
113e475bba2SMartin K. Petersen 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
114eb28d31bSMartin K. Petersen 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115b1bd055dSMartin K. Petersen 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
116762380adSJens Axboe 	lim->chunk_sectors = 0;
1174363ac7cSMartin K. Petersen 	lim->max_write_same_sectors = 0;
11886b37281SMartin K. Petersen 	lim->max_discard_sectors = 0;
11986b37281SMartin K. Petersen 	lim->discard_granularity = 0;
12086b37281SMartin K. Petersen 	lim->discard_alignment = 0;
12186b37281SMartin K. Petersen 	lim->discard_misaligned = 0;
122b1bd055dSMartin K. Petersen 	lim->discard_zeroes_data = 0;
123e475bba2SMartin K. Petersen 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
1243a02c8e8SMartin K. Petersen 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
125e475bba2SMartin K. Petersen 	lim->alignment_offset = 0;
126e475bba2SMartin K. Petersen 	lim->io_opt = 0;
127e475bba2SMartin K. Petersen 	lim->misaligned = 0;
128e692cb66SMartin K. Petersen 	lim->cluster = 1;
129e475bba2SMartin K. Petersen }
130e475bba2SMartin K. Petersen EXPORT_SYMBOL(blk_set_default_limits);
131e475bba2SMartin K. Petersen 
132e475bba2SMartin K. Petersen /**
133b1bd055dSMartin K. Petersen  * blk_set_stacking_limits - set default limits for stacking devices
134b1bd055dSMartin K. Petersen  * @lim:  the queue_limits structure to reset
135b1bd055dSMartin K. Petersen  *
136b1bd055dSMartin K. Petersen  * Description:
137b1bd055dSMartin K. Petersen  *   Returns a queue_limit struct to its default state. Should be used
138b1bd055dSMartin K. Petersen  *   by stacking drivers like DM that have no internal limits.
139b1bd055dSMartin K. Petersen  */
140b1bd055dSMartin K. Petersen void blk_set_stacking_limits(struct queue_limits *lim)
141b1bd055dSMartin K. Petersen {
142b1bd055dSMartin K. Petersen 	blk_set_default_limits(lim);
143b1bd055dSMartin K. Petersen 
144b1bd055dSMartin K. Petersen 	/* Inherit limits from component devices */
145b1bd055dSMartin K. Petersen 	lim->discard_zeroes_data = 1;
146b1bd055dSMartin K. Petersen 	lim->max_segments = USHRT_MAX;
147b1bd055dSMartin K. Petersen 	lim->max_hw_sectors = UINT_MAX;
148d82ae52eSMike Snitzer 	lim->max_segment_size = UINT_MAX;
149fe86cdceSMike Snitzer 	lim->max_sectors = UINT_MAX;
1504363ac7cSMartin K. Petersen 	lim->max_write_same_sectors = UINT_MAX;
151b1bd055dSMartin K. Petersen }
152b1bd055dSMartin K. Petersen EXPORT_SYMBOL(blk_set_stacking_limits);
153b1bd055dSMartin K. Petersen 
154b1bd055dSMartin K. Petersen /**
15586db1e29SJens Axboe  * blk_queue_make_request - define an alternate make_request function for a device
15686db1e29SJens Axboe  * @q:  the request queue for the device to be affected
15786db1e29SJens Axboe  * @mfn: the alternate make_request function
15886db1e29SJens Axboe  *
15986db1e29SJens Axboe  * Description:
16086db1e29SJens Axboe  *    The normal way for &struct bios to be passed to a device
16186db1e29SJens Axboe  *    driver is for them to be collected into requests on a request
16286db1e29SJens Axboe  *    queue, and then to allow the device driver to select requests
16386db1e29SJens Axboe  *    off that queue when it is ready.  This works well for many block
16486db1e29SJens Axboe  *    devices. However some block devices (typically virtual devices
16586db1e29SJens Axboe  *    such as md or lvm) do not benefit from the processing on the
16686db1e29SJens Axboe  *    request queue, and are served best by having the requests passed
16786db1e29SJens Axboe  *    directly to them.  This can be achieved by providing a function
16886db1e29SJens Axboe  *    to blk_queue_make_request().
16986db1e29SJens Axboe  *
17086db1e29SJens Axboe  * Caveat:
17186db1e29SJens Axboe  *    The driver that does this *must* be able to deal appropriately
17286db1e29SJens Axboe  *    with buffers in "highmemory". This can be accomplished by either calling
17386db1e29SJens Axboe  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
17486db1e29SJens Axboe  *    blk_queue_bounce() to create a buffer in normal memory.
17586db1e29SJens Axboe  **/
17686db1e29SJens Axboe void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
17786db1e29SJens Axboe {
17886db1e29SJens Axboe 	/*
17986db1e29SJens Axboe 	 * set defaults
18086db1e29SJens Axboe 	 */
18186db1e29SJens Axboe 	q->nr_requests = BLKDEV_MAX_RQ;
1820e435ac2SMilan Broz 
18386db1e29SJens Axboe 	q->make_request_fn = mfn;
18486db1e29SJens Axboe 	blk_queue_dma_alignment(q, 511);
18586db1e29SJens Axboe 	blk_queue_congestion_threshold(q);
18686db1e29SJens Axboe 	q->nr_batching = BLK_BATCH_REQ;
18786db1e29SJens Axboe 
188e475bba2SMartin K. Petersen 	blk_set_default_limits(&q->limits);
189e475bba2SMartin K. Petersen 
19086db1e29SJens Axboe 	/*
19186db1e29SJens Axboe 	 * by default assume old behaviour and bounce for any highmem page
19286db1e29SJens Axboe 	 */
19386db1e29SJens Axboe 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
19486db1e29SJens Axboe }
19586db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_make_request);
19686db1e29SJens Axboe 
19786db1e29SJens Axboe /**
19886db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
19986db1e29SJens Axboe  * @q: the request queue for the device
2009f7e45d8SSantosh Shilimkar  * @max_addr: the maximum address the device can handle
20186db1e29SJens Axboe  *
20286db1e29SJens Axboe  * Description:
20386db1e29SJens Axboe  *    Different hardware can have different requirements as to what pages
20486db1e29SJens Axboe  *    it can do I/O directly to. A low level driver can call
20586db1e29SJens Axboe  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
2069f7e45d8SSantosh Shilimkar  *    buffers for doing I/O to pages residing above @max_addr.
20786db1e29SJens Axboe  **/
2089f7e45d8SSantosh Shilimkar void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
20986db1e29SJens Axboe {
2109f7e45d8SSantosh Shilimkar 	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
21186db1e29SJens Axboe 	int dma = 0;
21286db1e29SJens Axboe 
21386db1e29SJens Axboe 	q->bounce_gfp = GFP_NOIO;
21486db1e29SJens Axboe #if BITS_PER_LONG == 64
215cd0aca2dSTejun Heo 	/*
216cd0aca2dSTejun Heo 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
217cd0aca2dSTejun Heo 	 * some IOMMUs can handle everything, but I don't know of a
218cd0aca2dSTejun Heo 	 * way to test this here.
219cd0aca2dSTejun Heo 	 */
220cd0aca2dSTejun Heo 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
22186db1e29SJens Axboe 		dma = 1;
222efb012b3SMalahal Naineni 	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
22386db1e29SJens Axboe #else
2246728cb0eSJens Axboe 	if (b_pfn < blk_max_low_pfn)
22586db1e29SJens Axboe 		dma = 1;
226c49825faSMalahal Naineni 	q->limits.bounce_pfn = b_pfn;
227260a67a9SJens Axboe #endif
22886db1e29SJens Axboe 	if (dma) {
22986db1e29SJens Axboe 		init_emergency_isa_pool();
23086db1e29SJens Axboe 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
231260a67a9SJens Axboe 		q->limits.bounce_pfn = b_pfn;
23286db1e29SJens Axboe 	}
23386db1e29SJens Axboe }
23486db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
23586db1e29SJens Axboe 
23686db1e29SJens Axboe /**
23772d4cd9fSMike Snitzer  * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
23872d4cd9fSMike Snitzer  * @limits: the queue limits
2392800aac1SMartin K. Petersen  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
24086db1e29SJens Axboe  *
24186db1e29SJens Axboe  * Description:
2422800aac1SMartin K. Petersen  *    Enables a low level driver to set a hard upper limit,
2432800aac1SMartin K. Petersen  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
2442800aac1SMartin K. Petersen  *    the device driver based upon the combined capabilities of I/O
2452800aac1SMartin K. Petersen  *    controller and storage device.
2462800aac1SMartin K. Petersen  *
2472800aac1SMartin K. Petersen  *    max_sectors is a soft limit imposed by the block layer for
2482800aac1SMartin K. Petersen  *    filesystem type requests.  This value can be overridden on a
2492800aac1SMartin K. Petersen  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
2502800aac1SMartin K. Petersen  *    The soft limit can not exceed max_hw_sectors.
25186db1e29SJens Axboe  **/
25272d4cd9fSMike Snitzer void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
25386db1e29SJens Axboe {
2542800aac1SMartin K. Petersen 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
2552800aac1SMartin K. Petersen 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
25624c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
2572800aac1SMartin K. Petersen 		       __func__, max_hw_sectors);
25886db1e29SJens Axboe 	}
25986db1e29SJens Axboe 
260*34b48db6SChristoph Hellwig 	limits->max_sectors = limits->max_hw_sectors = max_hw_sectors;
26186db1e29SJens Axboe }
26272d4cd9fSMike Snitzer EXPORT_SYMBOL(blk_limits_max_hw_sectors);
26372d4cd9fSMike Snitzer 
26472d4cd9fSMike Snitzer /**
26572d4cd9fSMike Snitzer  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
26672d4cd9fSMike Snitzer  * @q:  the request queue for the device
26772d4cd9fSMike Snitzer  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
26872d4cd9fSMike Snitzer  *
26972d4cd9fSMike Snitzer  * Description:
27072d4cd9fSMike Snitzer  *    See description for blk_limits_max_hw_sectors().
27172d4cd9fSMike Snitzer  **/
27272d4cd9fSMike Snitzer void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
27372d4cd9fSMike Snitzer {
27472d4cd9fSMike Snitzer 	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
27572d4cd9fSMike Snitzer }
276086fa5ffSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_hw_sectors);
27786db1e29SJens Axboe 
27886db1e29SJens Axboe /**
279762380adSJens Axboe  * blk_queue_chunk_sectors - set size of the chunk for this queue
280762380adSJens Axboe  * @q:  the request queue for the device
281762380adSJens Axboe  * @chunk_sectors:  chunk sectors in the usual 512b unit
282762380adSJens Axboe  *
283762380adSJens Axboe  * Description:
284762380adSJens Axboe  *    If a driver doesn't want IOs to cross a given chunk size, it can set
285762380adSJens Axboe  *    this limit and prevent merging across chunks. Note that the chunk size
28658a4915aSJens Axboe  *    must currently be a power-of-2 in sectors. Also note that the block
28758a4915aSJens Axboe  *    layer must accept a page worth of data at any offset. So if the
28858a4915aSJens Axboe  *    crossing of chunks is a hard limitation in the driver, it must still be
28958a4915aSJens Axboe  *    prepared to split single page bios.
290762380adSJens Axboe  **/
291762380adSJens Axboe void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
292762380adSJens Axboe {
293762380adSJens Axboe 	BUG_ON(!is_power_of_2(chunk_sectors));
294762380adSJens Axboe 	q->limits.chunk_sectors = chunk_sectors;
295762380adSJens Axboe }
296762380adSJens Axboe EXPORT_SYMBOL(blk_queue_chunk_sectors);
297762380adSJens Axboe 
298762380adSJens Axboe /**
29967efc925SChristoph Hellwig  * blk_queue_max_discard_sectors - set max sectors for a single discard
30067efc925SChristoph Hellwig  * @q:  the request queue for the device
301c7ebf065SRandy Dunlap  * @max_discard_sectors: maximum number of sectors to discard
30267efc925SChristoph Hellwig  **/
30367efc925SChristoph Hellwig void blk_queue_max_discard_sectors(struct request_queue *q,
30467efc925SChristoph Hellwig 		unsigned int max_discard_sectors)
30567efc925SChristoph Hellwig {
30667efc925SChristoph Hellwig 	q->limits.max_discard_sectors = max_discard_sectors;
30767efc925SChristoph Hellwig }
30867efc925SChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_discard_sectors);
30967efc925SChristoph Hellwig 
31067efc925SChristoph Hellwig /**
3114363ac7cSMartin K. Petersen  * blk_queue_max_write_same_sectors - set max sectors for a single write same
3124363ac7cSMartin K. Petersen  * @q:  the request queue for the device
3134363ac7cSMartin K. Petersen  * @max_write_same_sectors: maximum number of sectors to write per command
3144363ac7cSMartin K. Petersen  **/
3154363ac7cSMartin K. Petersen void blk_queue_max_write_same_sectors(struct request_queue *q,
3164363ac7cSMartin K. Petersen 				      unsigned int max_write_same_sectors)
3174363ac7cSMartin K. Petersen {
3184363ac7cSMartin K. Petersen 	q->limits.max_write_same_sectors = max_write_same_sectors;
3194363ac7cSMartin K. Petersen }
3204363ac7cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
3214363ac7cSMartin K. Petersen 
3224363ac7cSMartin K. Petersen /**
3238a78362cSMartin K. Petersen  * blk_queue_max_segments - set max hw segments for a request for this queue
32486db1e29SJens Axboe  * @q:  the request queue for the device
32586db1e29SJens Axboe  * @max_segments:  max number of segments
32686db1e29SJens Axboe  *
32786db1e29SJens Axboe  * Description:
32886db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
3298a78362cSMartin K. Petersen  *    hw data segments in a request.
33086db1e29SJens Axboe  **/
3318a78362cSMartin K. Petersen void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
33286db1e29SJens Axboe {
33386db1e29SJens Axboe 	if (!max_segments) {
33486db1e29SJens Axboe 		max_segments = 1;
33524c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
33624c03d47SHarvey Harrison 		       __func__, max_segments);
33786db1e29SJens Axboe 	}
33886db1e29SJens Axboe 
3398a78362cSMartin K. Petersen 	q->limits.max_segments = max_segments;
34086db1e29SJens Axboe }
3418a78362cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_segments);
34286db1e29SJens Axboe 
34386db1e29SJens Axboe /**
34486db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
34586db1e29SJens Axboe  * @q:  the request queue for the device
34686db1e29SJens Axboe  * @max_size:  max size of segment in bytes
34786db1e29SJens Axboe  *
34886db1e29SJens Axboe  * Description:
34986db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
35086db1e29SJens Axboe  *    coalesced segment
35186db1e29SJens Axboe  **/
35286db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
35386db1e29SJens Axboe {
35486db1e29SJens Axboe 	if (max_size < PAGE_CACHE_SIZE) {
35586db1e29SJens Axboe 		max_size = PAGE_CACHE_SIZE;
35624c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
35724c03d47SHarvey Harrison 		       __func__, max_size);
35886db1e29SJens Axboe 	}
35986db1e29SJens Axboe 
360025146e1SMartin K. Petersen 	q->limits.max_segment_size = max_size;
36186db1e29SJens Axboe }
36286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
36386db1e29SJens Axboe 
36486db1e29SJens Axboe /**
365e1defc4fSMartin K. Petersen  * blk_queue_logical_block_size - set logical block size for the queue
36686db1e29SJens Axboe  * @q:  the request queue for the device
367e1defc4fSMartin K. Petersen  * @size:  the logical block size, in bytes
36886db1e29SJens Axboe  *
36986db1e29SJens Axboe  * Description:
370e1defc4fSMartin K. Petersen  *   This should be set to the lowest possible block size that the
371e1defc4fSMartin K. Petersen  *   storage device can address.  The default of 512 covers most
372e1defc4fSMartin K. Petersen  *   hardware.
37386db1e29SJens Axboe  **/
374e1defc4fSMartin K. Petersen void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
37586db1e29SJens Axboe {
376025146e1SMartin K. Petersen 	q->limits.logical_block_size = size;
377c72758f3SMartin K. Petersen 
378c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < size)
379c72758f3SMartin K. Petersen 		q->limits.physical_block_size = size;
380c72758f3SMartin K. Petersen 
381c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
382c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
38386db1e29SJens Axboe }
384e1defc4fSMartin K. Petersen EXPORT_SYMBOL(blk_queue_logical_block_size);
38586db1e29SJens Axboe 
386c72758f3SMartin K. Petersen /**
387c72758f3SMartin K. Petersen  * blk_queue_physical_block_size - set physical block size for the queue
388c72758f3SMartin K. Petersen  * @q:  the request queue for the device
389c72758f3SMartin K. Petersen  * @size:  the physical block size, in bytes
390c72758f3SMartin K. Petersen  *
391c72758f3SMartin K. Petersen  * Description:
392c72758f3SMartin K. Petersen  *   This should be set to the lowest possible sector size that the
393c72758f3SMartin K. Petersen  *   hardware can operate on without reverting to read-modify-write
394c72758f3SMartin K. Petersen  *   operations.
395c72758f3SMartin K. Petersen  */
396892b6f90SMartin K. Petersen void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
397c72758f3SMartin K. Petersen {
398c72758f3SMartin K. Petersen 	q->limits.physical_block_size = size;
399c72758f3SMartin K. Petersen 
400c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < q->limits.logical_block_size)
401c72758f3SMartin K. Petersen 		q->limits.physical_block_size = q->limits.logical_block_size;
402c72758f3SMartin K. Petersen 
403c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
404c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
405c72758f3SMartin K. Petersen }
406c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_physical_block_size);
407c72758f3SMartin K. Petersen 
408c72758f3SMartin K. Petersen /**
409c72758f3SMartin K. Petersen  * blk_queue_alignment_offset - set physical block alignment offset
410c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4118ebf9756SRandy Dunlap  * @offset: alignment offset in bytes
412c72758f3SMartin K. Petersen  *
413c72758f3SMartin K. Petersen  * Description:
414c72758f3SMartin K. Petersen  *   Some devices are naturally misaligned to compensate for things like
415c72758f3SMartin K. Petersen  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
416c72758f3SMartin K. Petersen  *   should call this function for devices whose first sector is not
417c72758f3SMartin K. Petersen  *   naturally aligned.
418c72758f3SMartin K. Petersen  */
419c72758f3SMartin K. Petersen void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
420c72758f3SMartin K. Petersen {
421c72758f3SMartin K. Petersen 	q->limits.alignment_offset =
422c72758f3SMartin K. Petersen 		offset & (q->limits.physical_block_size - 1);
423c72758f3SMartin K. Petersen 	q->limits.misaligned = 0;
424c72758f3SMartin K. Petersen }
425c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_alignment_offset);
426c72758f3SMartin K. Petersen 
427c72758f3SMartin K. Petersen /**
4287c958e32SMartin K. Petersen  * blk_limits_io_min - set minimum request size for a device
4297c958e32SMartin K. Petersen  * @limits: the queue limits
4307c958e32SMartin K. Petersen  * @min:  smallest I/O size in bytes
4317c958e32SMartin K. Petersen  *
4327c958e32SMartin K. Petersen  * Description:
4337c958e32SMartin K. Petersen  *   Some devices have an internal block size bigger than the reported
4347c958e32SMartin K. Petersen  *   hardware sector size.  This function can be used to signal the
4357c958e32SMartin K. Petersen  *   smallest I/O the device can perform without incurring a performance
4367c958e32SMartin K. Petersen  *   penalty.
4377c958e32SMartin K. Petersen  */
4387c958e32SMartin K. Petersen void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
4397c958e32SMartin K. Petersen {
4407c958e32SMartin K. Petersen 	limits->io_min = min;
4417c958e32SMartin K. Petersen 
4427c958e32SMartin K. Petersen 	if (limits->io_min < limits->logical_block_size)
4437c958e32SMartin K. Petersen 		limits->io_min = limits->logical_block_size;
4447c958e32SMartin K. Petersen 
4457c958e32SMartin K. Petersen 	if (limits->io_min < limits->physical_block_size)
4467c958e32SMartin K. Petersen 		limits->io_min = limits->physical_block_size;
4477c958e32SMartin K. Petersen }
4487c958e32SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_min);
4497c958e32SMartin K. Petersen 
4507c958e32SMartin K. Petersen /**
451c72758f3SMartin K. Petersen  * blk_queue_io_min - set minimum request size for the queue
452c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4538ebf9756SRandy Dunlap  * @min:  smallest I/O size in bytes
454c72758f3SMartin K. Petersen  *
455c72758f3SMartin K. Petersen  * Description:
4567e5f5fb0SMartin K. Petersen  *   Storage devices may report a granularity or preferred minimum I/O
4577e5f5fb0SMartin K. Petersen  *   size which is the smallest request the device can perform without
4587e5f5fb0SMartin K. Petersen  *   incurring a performance penalty.  For disk drives this is often the
4597e5f5fb0SMartin K. Petersen  *   physical block size.  For RAID arrays it is often the stripe chunk
4607e5f5fb0SMartin K. Petersen  *   size.  A properly aligned multiple of minimum_io_size is the
4617e5f5fb0SMartin K. Petersen  *   preferred request size for workloads where a high number of I/O
4627e5f5fb0SMartin K. Petersen  *   operations is desired.
463c72758f3SMartin K. Petersen  */
464c72758f3SMartin K. Petersen void blk_queue_io_min(struct request_queue *q, unsigned int min)
465c72758f3SMartin K. Petersen {
4667c958e32SMartin K. Petersen 	blk_limits_io_min(&q->limits, min);
467c72758f3SMartin K. Petersen }
468c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_min);
469c72758f3SMartin K. Petersen 
470c72758f3SMartin K. Petersen /**
4713c5820c7SMartin K. Petersen  * blk_limits_io_opt - set optimal request size for a device
4723c5820c7SMartin K. Petersen  * @limits: the queue limits
4733c5820c7SMartin K. Petersen  * @opt:  smallest I/O size in bytes
4743c5820c7SMartin K. Petersen  *
4753c5820c7SMartin K. Petersen  * Description:
4763c5820c7SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4773c5820c7SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4783c5820c7SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4793c5820c7SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4803c5820c7SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4813c5820c7SMartin K. Petersen  *   sustained throughput is desired.
4823c5820c7SMartin K. Petersen  */
4833c5820c7SMartin K. Petersen void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
4843c5820c7SMartin K. Petersen {
4853c5820c7SMartin K. Petersen 	limits->io_opt = opt;
4863c5820c7SMartin K. Petersen }
4873c5820c7SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_opt);
4883c5820c7SMartin K. Petersen 
4893c5820c7SMartin K. Petersen /**
490c72758f3SMartin K. Petersen  * blk_queue_io_opt - set optimal request size for the queue
491c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4928ebf9756SRandy Dunlap  * @opt:  optimal request size in bytes
493c72758f3SMartin K. Petersen  *
494c72758f3SMartin K. Petersen  * Description:
4957e5f5fb0SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4967e5f5fb0SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4977e5f5fb0SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4987e5f5fb0SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4997e5f5fb0SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
5007e5f5fb0SMartin K. Petersen  *   sustained throughput is desired.
501c72758f3SMartin K. Petersen  */
502c72758f3SMartin K. Petersen void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
503c72758f3SMartin K. Petersen {
5043c5820c7SMartin K. Petersen 	blk_limits_io_opt(&q->limits, opt);
505c72758f3SMartin K. Petersen }
506c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_opt);
507c72758f3SMartin K. Petersen 
50886db1e29SJens Axboe /**
50986db1e29SJens Axboe  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
51086db1e29SJens Axboe  * @t:	the stacking driver (top)
51186db1e29SJens Axboe  * @b:  the underlying device (bottom)
51286db1e29SJens Axboe  **/
51386db1e29SJens Axboe void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
51486db1e29SJens Axboe {
515fef24667SMartin K. Petersen 	blk_stack_limits(&t->limits, &b->limits, 0);
51686db1e29SJens Axboe }
51786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_stack_limits);
51886db1e29SJens Axboe 
51986db1e29SJens Axboe /**
520c72758f3SMartin K. Petersen  * blk_stack_limits - adjust queue_limits for stacked devices
52181744ee4SMartin K. Petersen  * @t:	the stacking driver limits (top device)
52281744ee4SMartin K. Petersen  * @b:  the underlying queue limits (bottom, component device)
523e03a72e1SMartin K. Petersen  * @start:  first data sector within component device
524c72758f3SMartin K. Petersen  *
525c72758f3SMartin K. Petersen  * Description:
52681744ee4SMartin K. Petersen  *    This function is used by stacking drivers like MD and DM to ensure
52781744ee4SMartin K. Petersen  *    that all component devices have compatible block sizes and
52881744ee4SMartin K. Petersen  *    alignments.  The stacking driver must provide a queue_limits
52981744ee4SMartin K. Petersen  *    struct (top) and then iteratively call the stacking function for
53081744ee4SMartin K. Petersen  *    all component (bottom) devices.  The stacking function will
53181744ee4SMartin K. Petersen  *    attempt to combine the values and ensure proper alignment.
53281744ee4SMartin K. Petersen  *
53381744ee4SMartin K. Petersen  *    Returns 0 if the top and bottom queue_limits are compatible.  The
53481744ee4SMartin K. Petersen  *    top device's block sizes and alignment offsets may be adjusted to
53581744ee4SMartin K. Petersen  *    ensure alignment with the bottom device. If no compatible sizes
53681744ee4SMartin K. Petersen  *    and alignments exist, -1 is returned and the resulting top
53781744ee4SMartin K. Petersen  *    queue_limits will have the misaligned flag set to indicate that
53881744ee4SMartin K. Petersen  *    the alignment_offset is undefined.
539c72758f3SMartin K. Petersen  */
540c72758f3SMartin K. Petersen int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
541e03a72e1SMartin K. Petersen 		     sector_t start)
542c72758f3SMartin K. Petersen {
543e03a72e1SMartin K. Petersen 	unsigned int top, bottom, alignment, ret = 0;
54486b37281SMartin K. Petersen 
545c72758f3SMartin K. Petersen 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
546c72758f3SMartin K. Petersen 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
5474363ac7cSMartin K. Petersen 	t->max_write_same_sectors = min(t->max_write_same_sectors,
5484363ac7cSMartin K. Petersen 					b->max_write_same_sectors);
54977634f33SMartin K. Petersen 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
550c72758f3SMartin K. Petersen 
551c72758f3SMartin K. Petersen 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
552c72758f3SMartin K. Petersen 					    b->seg_boundary_mask);
553c72758f3SMartin K. Petersen 
5548a78362cSMartin K. Petersen 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
55513f05c8dSMartin K. Petersen 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
55613f05c8dSMartin K. Petersen 						 b->max_integrity_segments);
557c72758f3SMartin K. Petersen 
558c72758f3SMartin K. Petersen 	t->max_segment_size = min_not_zero(t->max_segment_size,
559c72758f3SMartin K. Petersen 					   b->max_segment_size);
560c72758f3SMartin K. Petersen 
561fe0b393fSMartin K. Petersen 	t->misaligned |= b->misaligned;
562fe0b393fSMartin K. Petersen 
563e03a72e1SMartin K. Petersen 	alignment = queue_limit_alignment_offset(b, start);
5649504e086SMartin K. Petersen 
56581744ee4SMartin K. Petersen 	/* Bottom device has different alignment.  Check that it is
56681744ee4SMartin K. Petersen 	 * compatible with the current top alignment.
56781744ee4SMartin K. Petersen 	 */
5689504e086SMartin K. Petersen 	if (t->alignment_offset != alignment) {
5699504e086SMartin K. Petersen 
5709504e086SMartin K. Petersen 		top = max(t->physical_block_size, t->io_min)
5719504e086SMartin K. Petersen 			+ t->alignment_offset;
57281744ee4SMartin K. Petersen 		bottom = max(b->physical_block_size, b->io_min) + alignment;
5739504e086SMartin K. Petersen 
57481744ee4SMartin K. Petersen 		/* Verify that top and bottom intervals line up */
575b8839b8cSMike Snitzer 		if (max(top, bottom) % min(top, bottom)) {
5769504e086SMartin K. Petersen 			t->misaligned = 1;
577fe0b393fSMartin K. Petersen 			ret = -1;
578fe0b393fSMartin K. Petersen 		}
5799504e086SMartin K. Petersen 	}
5809504e086SMartin K. Petersen 
581c72758f3SMartin K. Petersen 	t->logical_block_size = max(t->logical_block_size,
582c72758f3SMartin K. Petersen 				    b->logical_block_size);
583c72758f3SMartin K. Petersen 
584c72758f3SMartin K. Petersen 	t->physical_block_size = max(t->physical_block_size,
585c72758f3SMartin K. Petersen 				     b->physical_block_size);
586c72758f3SMartin K. Petersen 
587c72758f3SMartin K. Petersen 	t->io_min = max(t->io_min, b->io_min);
5889504e086SMartin K. Petersen 	t->io_opt = lcm(t->io_opt, b->io_opt);
5899504e086SMartin K. Petersen 
590e692cb66SMartin K. Petersen 	t->cluster &= b->cluster;
59198262f27SMartin K. Petersen 	t->discard_zeroes_data &= b->discard_zeroes_data;
592c72758f3SMartin K. Petersen 
59381744ee4SMartin K. Petersen 	/* Physical block size a multiple of the logical block size? */
5949504e086SMartin K. Petersen 	if (t->physical_block_size & (t->logical_block_size - 1)) {
5959504e086SMartin K. Petersen 		t->physical_block_size = t->logical_block_size;
596c72758f3SMartin K. Petersen 		t->misaligned = 1;
597fe0b393fSMartin K. Petersen 		ret = -1;
59886b37281SMartin K. Petersen 	}
59986b37281SMartin K. Petersen 
60081744ee4SMartin K. Petersen 	/* Minimum I/O a multiple of the physical block size? */
6019504e086SMartin K. Petersen 	if (t->io_min & (t->physical_block_size - 1)) {
6029504e086SMartin K. Petersen 		t->io_min = t->physical_block_size;
6039504e086SMartin K. Petersen 		t->misaligned = 1;
604fe0b393fSMartin K. Petersen 		ret = -1;
6059504e086SMartin K. Petersen 	}
6069504e086SMartin K. Petersen 
60781744ee4SMartin K. Petersen 	/* Optimal I/O a multiple of the physical block size? */
6089504e086SMartin K. Petersen 	if (t->io_opt & (t->physical_block_size - 1)) {
6099504e086SMartin K. Petersen 		t->io_opt = 0;
6109504e086SMartin K. Petersen 		t->misaligned = 1;
611fe0b393fSMartin K. Petersen 		ret = -1;
6129504e086SMartin K. Petersen 	}
6139504e086SMartin K. Petersen 
614c78afc62SKent Overstreet 	t->raid_partial_stripes_expensive =
615c78afc62SKent Overstreet 		max(t->raid_partial_stripes_expensive,
616c78afc62SKent Overstreet 		    b->raid_partial_stripes_expensive);
617c78afc62SKent Overstreet 
61881744ee4SMartin K. Petersen 	/* Find lowest common alignment_offset */
6199504e086SMartin K. Petersen 	t->alignment_offset = lcm(t->alignment_offset, alignment)
620b8839b8cSMike Snitzer 		% max(t->physical_block_size, t->io_min);
6219504e086SMartin K. Petersen 
62281744ee4SMartin K. Petersen 	/* Verify that new alignment_offset is on a logical block boundary */
623fe0b393fSMartin K. Petersen 	if (t->alignment_offset & (t->logical_block_size - 1)) {
6249504e086SMartin K. Petersen 		t->misaligned = 1;
625fe0b393fSMartin K. Petersen 		ret = -1;
626fe0b393fSMartin K. Petersen 	}
6279504e086SMartin K. Petersen 
6289504e086SMartin K. Petersen 	/* Discard alignment and granularity */
6299504e086SMartin K. Petersen 	if (b->discard_granularity) {
630e03a72e1SMartin K. Petersen 		alignment = queue_limit_discard_alignment(b, start);
6319504e086SMartin K. Petersen 
6329504e086SMartin K. Petersen 		if (t->discard_granularity != 0 &&
6339504e086SMartin K. Petersen 		    t->discard_alignment != alignment) {
6349504e086SMartin K. Petersen 			top = t->discard_granularity + t->discard_alignment;
6359504e086SMartin K. Petersen 			bottom = b->discard_granularity + alignment;
6369504e086SMartin K. Petersen 
6379504e086SMartin K. Petersen 			/* Verify that top and bottom intervals line up */
6388dd2cb7eSShaohua Li 			if ((max(top, bottom) % min(top, bottom)) != 0)
63986b37281SMartin K. Petersen 				t->discard_misaligned = 1;
640c72758f3SMartin K. Petersen 		}
641c72758f3SMartin K. Petersen 
64281744ee4SMartin K. Petersen 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
64381744ee4SMartin K. Petersen 						      b->max_discard_sectors);
6449504e086SMartin K. Petersen 		t->discard_granularity = max(t->discard_granularity,
64586b37281SMartin K. Petersen 					     b->discard_granularity);
6468dd2cb7eSShaohua Li 		t->discard_alignment = lcm(t->discard_alignment, alignment) %
6478dd2cb7eSShaohua Li 			t->discard_granularity;
6489504e086SMartin K. Petersen 	}
64970dd5bf3SMartin K. Petersen 
650fe0b393fSMartin K. Petersen 	return ret;
651c72758f3SMartin K. Petersen }
6525d85d324SMike Snitzer EXPORT_SYMBOL(blk_stack_limits);
653c72758f3SMartin K. Petersen 
654c72758f3SMartin K. Petersen /**
65517be8c24SMartin K. Petersen  * bdev_stack_limits - adjust queue limits for stacked drivers
65617be8c24SMartin K. Petersen  * @t:	the stacking driver limits (top device)
65717be8c24SMartin K. Petersen  * @bdev:  the component block_device (bottom)
65817be8c24SMartin K. Petersen  * @start:  first data sector within component device
65917be8c24SMartin K. Petersen  *
66017be8c24SMartin K. Petersen  * Description:
66117be8c24SMartin K. Petersen  *    Merges queue limits for a top device and a block_device.  Returns
66217be8c24SMartin K. Petersen  *    0 if alignment didn't change.  Returns -1 if adding the bottom
66317be8c24SMartin K. Petersen  *    device caused misalignment.
66417be8c24SMartin K. Petersen  */
66517be8c24SMartin K. Petersen int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
66617be8c24SMartin K. Petersen 		      sector_t start)
66717be8c24SMartin K. Petersen {
66817be8c24SMartin K. Petersen 	struct request_queue *bq = bdev_get_queue(bdev);
66917be8c24SMartin K. Petersen 
67017be8c24SMartin K. Petersen 	start += get_start_sect(bdev);
67117be8c24SMartin K. Petersen 
672e03a72e1SMartin K. Petersen 	return blk_stack_limits(t, &bq->limits, start);
67317be8c24SMartin K. Petersen }
67417be8c24SMartin K. Petersen EXPORT_SYMBOL(bdev_stack_limits);
67517be8c24SMartin K. Petersen 
67617be8c24SMartin K. Petersen /**
677c72758f3SMartin K. Petersen  * disk_stack_limits - adjust queue limits for stacked drivers
67877634f33SMartin K. Petersen  * @disk:  MD/DM gendisk (top)
679c72758f3SMartin K. Petersen  * @bdev:  the underlying block device (bottom)
680c72758f3SMartin K. Petersen  * @offset:  offset to beginning of data within component device
681c72758f3SMartin K. Petersen  *
682c72758f3SMartin K. Petersen  * Description:
683e03a72e1SMartin K. Petersen  *    Merges the limits for a top level gendisk and a bottom level
684e03a72e1SMartin K. Petersen  *    block_device.
685c72758f3SMartin K. Petersen  */
686c72758f3SMartin K. Petersen void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
687c72758f3SMartin K. Petersen 		       sector_t offset)
688c72758f3SMartin K. Petersen {
689c72758f3SMartin K. Petersen 	struct request_queue *t = disk->queue;
690c72758f3SMartin K. Petersen 
691e03a72e1SMartin K. Petersen 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
692c72758f3SMartin K. Petersen 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
693c72758f3SMartin K. Petersen 
694c72758f3SMartin K. Petersen 		disk_name(disk, 0, top);
695c72758f3SMartin K. Petersen 		bdevname(bdev, bottom);
696c72758f3SMartin K. Petersen 
697c72758f3SMartin K. Petersen 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
698c72758f3SMartin K. Petersen 		       top, bottom);
699c72758f3SMartin K. Petersen 	}
700c72758f3SMartin K. Petersen }
701c72758f3SMartin K. Petersen EXPORT_SYMBOL(disk_stack_limits);
702c72758f3SMartin K. Petersen 
703c72758f3SMartin K. Petersen /**
704e3790c7dSTejun Heo  * blk_queue_dma_pad - set pad mask
705e3790c7dSTejun Heo  * @q:     the request queue for the device
706e3790c7dSTejun Heo  * @mask:  pad mask
707e3790c7dSTejun Heo  *
70827f8221aSFUJITA Tomonori  * Set dma pad mask.
709e3790c7dSTejun Heo  *
71027f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
71127f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
712e3790c7dSTejun Heo  **/
713e3790c7dSTejun Heo void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
714e3790c7dSTejun Heo {
715e3790c7dSTejun Heo 	q->dma_pad_mask = mask;
716e3790c7dSTejun Heo }
717e3790c7dSTejun Heo EXPORT_SYMBOL(blk_queue_dma_pad);
718e3790c7dSTejun Heo 
719e3790c7dSTejun Heo /**
72027f8221aSFUJITA Tomonori  * blk_queue_update_dma_pad - update pad mask
72127f8221aSFUJITA Tomonori  * @q:     the request queue for the device
72227f8221aSFUJITA Tomonori  * @mask:  pad mask
72327f8221aSFUJITA Tomonori  *
72427f8221aSFUJITA Tomonori  * Update dma pad mask.
72527f8221aSFUJITA Tomonori  *
72627f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
72727f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
72827f8221aSFUJITA Tomonori  **/
72927f8221aSFUJITA Tomonori void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
73027f8221aSFUJITA Tomonori {
73127f8221aSFUJITA Tomonori 	if (mask > q->dma_pad_mask)
73227f8221aSFUJITA Tomonori 		q->dma_pad_mask = mask;
73327f8221aSFUJITA Tomonori }
73427f8221aSFUJITA Tomonori EXPORT_SYMBOL(blk_queue_update_dma_pad);
73527f8221aSFUJITA Tomonori 
73627f8221aSFUJITA Tomonori /**
73786db1e29SJens Axboe  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
73886db1e29SJens Axboe  * @q:  the request queue for the device
7392fb98e84STejun Heo  * @dma_drain_needed: fn which returns non-zero if drain is necessary
74086db1e29SJens Axboe  * @buf:	physically contiguous buffer
74186db1e29SJens Axboe  * @size:	size of the buffer in bytes
74286db1e29SJens Axboe  *
74386db1e29SJens Axboe  * Some devices have excess DMA problems and can't simply discard (or
74486db1e29SJens Axboe  * zero fill) the unwanted piece of the transfer.  They have to have a
74586db1e29SJens Axboe  * real area of memory to transfer it into.  The use case for this is
74686db1e29SJens Axboe  * ATAPI devices in DMA mode.  If the packet command causes a transfer
74786db1e29SJens Axboe  * bigger than the transfer size some HBAs will lock up if there
74886db1e29SJens Axboe  * aren't DMA elements to contain the excess transfer.  What this API
74986db1e29SJens Axboe  * does is adjust the queue so that the buf is always appended
75086db1e29SJens Axboe  * silently to the scatterlist.
75186db1e29SJens Axboe  *
7528a78362cSMartin K. Petersen  * Note: This routine adjusts max_hw_segments to make room for appending
7538a78362cSMartin K. Petersen  * the drain buffer.  If you call blk_queue_max_segments() after calling
7548a78362cSMartin K. Petersen  * this routine, you must set the limit to one fewer than your device
7558a78362cSMartin K. Petersen  * can support otherwise there won't be room for the drain buffer.
75686db1e29SJens Axboe  */
757448da4d2SHarvey Harrison int blk_queue_dma_drain(struct request_queue *q,
7582fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
7592fb98e84STejun Heo 			       void *buf, unsigned int size)
76086db1e29SJens Axboe {
7618a78362cSMartin K. Petersen 	if (queue_max_segments(q) < 2)
76286db1e29SJens Axboe 		return -EINVAL;
76386db1e29SJens Axboe 	/* make room for appending the drain */
7648a78362cSMartin K. Petersen 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
7652fb98e84STejun Heo 	q->dma_drain_needed = dma_drain_needed;
76686db1e29SJens Axboe 	q->dma_drain_buffer = buf;
76786db1e29SJens Axboe 	q->dma_drain_size = size;
76886db1e29SJens Axboe 
76986db1e29SJens Axboe 	return 0;
77086db1e29SJens Axboe }
77186db1e29SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
77286db1e29SJens Axboe 
77386db1e29SJens Axboe /**
77486db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
77586db1e29SJens Axboe  * @q:  the request queue for the device
77686db1e29SJens Axboe  * @mask:  the memory boundary mask
77786db1e29SJens Axboe  **/
77886db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
77986db1e29SJens Axboe {
78086db1e29SJens Axboe 	if (mask < PAGE_CACHE_SIZE - 1) {
78186db1e29SJens Axboe 		mask = PAGE_CACHE_SIZE - 1;
78224c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %lx\n",
78324c03d47SHarvey Harrison 		       __func__, mask);
78486db1e29SJens Axboe 	}
78586db1e29SJens Axboe 
786025146e1SMartin K. Petersen 	q->limits.seg_boundary_mask = mask;
78786db1e29SJens Axboe }
78886db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
78986db1e29SJens Axboe 
79086db1e29SJens Axboe /**
79186db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
79286db1e29SJens Axboe  * @q:     the request queue for the device
79386db1e29SJens Axboe  * @mask:  alignment mask
79486db1e29SJens Axboe  *
79586db1e29SJens Axboe  * description:
796710027a4SRandy Dunlap  *    set required memory and length alignment for direct dma transactions.
7978feb4d20SAlan Cox  *    this is used when building direct io requests for the queue.
79886db1e29SJens Axboe  *
79986db1e29SJens Axboe  **/
80086db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
80186db1e29SJens Axboe {
80286db1e29SJens Axboe 	q->dma_alignment = mask;
80386db1e29SJens Axboe }
80486db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
80586db1e29SJens Axboe 
80686db1e29SJens Axboe /**
80786db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
80886db1e29SJens Axboe  * @q:     the request queue for the device
80986db1e29SJens Axboe  * @mask:  alignment mask
81086db1e29SJens Axboe  *
81186db1e29SJens Axboe  * description:
812710027a4SRandy Dunlap  *    update required memory and length alignment for direct dma transactions.
81386db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
81486db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
81586db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
81686db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
81786db1e29SJens Axboe  *    alignments without having them interfere.
81886db1e29SJens Axboe  *
81986db1e29SJens Axboe  **/
82086db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
82186db1e29SJens Axboe {
82286db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
82386db1e29SJens Axboe 
82486db1e29SJens Axboe 	if (mask > q->dma_alignment)
82586db1e29SJens Axboe 		q->dma_alignment = mask;
82686db1e29SJens Axboe }
82786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
82886db1e29SJens Axboe 
8294913efe4STejun Heo /**
8304913efe4STejun Heo  * blk_queue_flush - configure queue's cache flush capability
8314913efe4STejun Heo  * @q:		the request queue for the device
8324913efe4STejun Heo  * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
8334913efe4STejun Heo  *
8344913efe4STejun Heo  * Tell block layer cache flush capability of @q.  If it supports
8354913efe4STejun Heo  * flushing, REQ_FLUSH should be set.  If it supports bypassing
8364913efe4STejun Heo  * write cache for individual writes, REQ_FUA should be set.
8374913efe4STejun Heo  */
8384913efe4STejun Heo void blk_queue_flush(struct request_queue *q, unsigned int flush)
8394913efe4STejun Heo {
8404913efe4STejun Heo 	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
8414913efe4STejun Heo 
8424913efe4STejun Heo 	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
8434913efe4STejun Heo 		flush &= ~REQ_FUA;
8444913efe4STejun Heo 
8454913efe4STejun Heo 	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
8464913efe4STejun Heo }
8474913efe4STejun Heo EXPORT_SYMBOL_GPL(blk_queue_flush);
8484913efe4STejun Heo 
849f3876930Sshaohua.li@intel.com void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
850f3876930Sshaohua.li@intel.com {
851f3876930Sshaohua.li@intel.com 	q->flush_not_queueable = !queueable;
852f3876930Sshaohua.li@intel.com }
853f3876930Sshaohua.li@intel.com EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
854f3876930Sshaohua.li@intel.com 
855aeb3d3a8SHarvey Harrison static int __init blk_settings_init(void)
85686db1e29SJens Axboe {
85786db1e29SJens Axboe 	blk_max_low_pfn = max_low_pfn - 1;
85886db1e29SJens Axboe 	blk_max_pfn = max_pfn - 1;
85986db1e29SJens Axboe 	return 0;
86086db1e29SJens Axboe }
86186db1e29SJens Axboe subsys_initcall(blk_settings_init);
862