xref: /linux/block/blk-settings.c (revision 4913efe456c987057e5d36a3f0a55422a9072cae)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
1070dd5bf3SMartin K. Petersen #include <linux/gcd.h>
112cda2728SMartin K. Petersen #include <linux/lcm.h>
12ad5ebd2fSRandy Dunlap #include <linux/jiffies.h>
135a0e3ad6STejun Heo #include <linux/gfp.h>
1486db1e29SJens Axboe 
1586db1e29SJens Axboe #include "blk.h"
1686db1e29SJens Axboe 
176728cb0eSJens Axboe unsigned long blk_max_low_pfn;
1886db1e29SJens Axboe EXPORT_SYMBOL(blk_max_low_pfn);
196728cb0eSJens Axboe 
206728cb0eSJens Axboe unsigned long blk_max_pfn;
2186db1e29SJens Axboe 
2286db1e29SJens Axboe /**
2386db1e29SJens Axboe  * blk_queue_prep_rq - set a prepare_request function for queue
2486db1e29SJens Axboe  * @q:		queue
2586db1e29SJens Axboe  * @pfn:	prepare_request function
2686db1e29SJens Axboe  *
2786db1e29SJens Axboe  * It's possible for a queue to register a prepare_request callback which
2886db1e29SJens Axboe  * is invoked before the request is handed to the request_fn. The goal of
2986db1e29SJens Axboe  * the function is to prepare a request for I/O, it can be used to build a
3086db1e29SJens Axboe  * cdb from the request data for instance.
3186db1e29SJens Axboe  *
3286db1e29SJens Axboe  */
3386db1e29SJens Axboe void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
3486db1e29SJens Axboe {
3586db1e29SJens Axboe 	q->prep_rq_fn = pfn;
3686db1e29SJens Axboe }
3786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_prep_rq);
3886db1e29SJens Axboe 
3986db1e29SJens Axboe /**
4028018c24SJames Bottomley  * blk_queue_unprep_rq - set an unprepare_request function for queue
4128018c24SJames Bottomley  * @q:		queue
4228018c24SJames Bottomley  * @ufn:	unprepare_request function
4328018c24SJames Bottomley  *
4428018c24SJames Bottomley  * It's possible for a queue to register an unprepare_request callback
4528018c24SJames Bottomley  * which is invoked before the request is finally completed. The goal
4628018c24SJames Bottomley  * of the function is to deallocate any data that was allocated in the
4728018c24SJames Bottomley  * prepare_request callback.
4828018c24SJames Bottomley  *
4928018c24SJames Bottomley  */
5028018c24SJames Bottomley void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
5128018c24SJames Bottomley {
5228018c24SJames Bottomley 	q->unprep_rq_fn = ufn;
5328018c24SJames Bottomley }
5428018c24SJames Bottomley EXPORT_SYMBOL(blk_queue_unprep_rq);
5528018c24SJames Bottomley 
5628018c24SJames Bottomley /**
5786db1e29SJens Axboe  * blk_queue_merge_bvec - set a merge_bvec function for queue
5886db1e29SJens Axboe  * @q:		queue
5986db1e29SJens Axboe  * @mbfn:	merge_bvec_fn
6086db1e29SJens Axboe  *
6186db1e29SJens Axboe  * Usually queues have static limitations on the max sectors or segments that
6286db1e29SJens Axboe  * we can put in a request. Stacking drivers may have some settings that
6386db1e29SJens Axboe  * are dynamic, and thus we have to query the queue whether it is ok to
6486db1e29SJens Axboe  * add a new bio_vec to a bio at a given offset or not. If the block device
6586db1e29SJens Axboe  * has such limitations, it needs to register a merge_bvec_fn to control
6686db1e29SJens Axboe  * the size of bio's sent to it. Note that a block device *must* allow a
6786db1e29SJens Axboe  * single page to be added to an empty bio. The block device driver may want
6886db1e29SJens Axboe  * to use the bio_split() function to deal with these bio's. By default
6986db1e29SJens Axboe  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
7086db1e29SJens Axboe  * honored.
7186db1e29SJens Axboe  */
7286db1e29SJens Axboe void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
7386db1e29SJens Axboe {
7486db1e29SJens Axboe 	q->merge_bvec_fn = mbfn;
7586db1e29SJens Axboe }
7686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_merge_bvec);
7786db1e29SJens Axboe 
7886db1e29SJens Axboe void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
7986db1e29SJens Axboe {
8086db1e29SJens Axboe 	q->softirq_done_fn = fn;
8186db1e29SJens Axboe }
8286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_softirq_done);
8386db1e29SJens Axboe 
84242f9dcbSJens Axboe void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85242f9dcbSJens Axboe {
86242f9dcbSJens Axboe 	q->rq_timeout = timeout;
87242f9dcbSJens Axboe }
88242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89242f9dcbSJens Axboe 
90242f9dcbSJens Axboe void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91242f9dcbSJens Axboe {
92242f9dcbSJens Axboe 	q->rq_timed_out_fn = fn;
93242f9dcbSJens Axboe }
94242f9dcbSJens Axboe EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95242f9dcbSJens Axboe 
96ef9e3facSKiyoshi Ueda void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97ef9e3facSKiyoshi Ueda {
98ef9e3facSKiyoshi Ueda 	q->lld_busy_fn = fn;
99ef9e3facSKiyoshi Ueda }
100ef9e3facSKiyoshi Ueda EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101ef9e3facSKiyoshi Ueda 
10286db1e29SJens Axboe /**
103e475bba2SMartin K. Petersen  * blk_set_default_limits - reset limits to default values
104f740f5caSRandy Dunlap  * @lim:  the queue_limits structure to reset
105e475bba2SMartin K. Petersen  *
106e475bba2SMartin K. Petersen  * Description:
107e475bba2SMartin K. Petersen  *   Returns a queue_limit struct to its default state.  Can be used by
108e475bba2SMartin K. Petersen  *   stacking drivers like DM that stage table swaps and reuse an
109e475bba2SMartin K. Petersen  *   existing device queue.
110e475bba2SMartin K. Petersen  */
111e475bba2SMartin K. Petersen void blk_set_default_limits(struct queue_limits *lim)
112e475bba2SMartin K. Petersen {
1138a78362cSMartin K. Petersen 	lim->max_segments = BLK_MAX_SEGMENTS;
114e475bba2SMartin K. Petersen 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
115eb28d31bSMartin K. Petersen 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
1165dee2477SMartin K. Petersen 	lim->max_sectors = BLK_DEF_MAX_SECTORS;
1175dee2477SMartin K. Petersen 	lim->max_hw_sectors = INT_MAX;
11886b37281SMartin K. Petersen 	lim->max_discard_sectors = 0;
11986b37281SMartin K. Petersen 	lim->discard_granularity = 0;
12086b37281SMartin K. Petersen 	lim->discard_alignment = 0;
12186b37281SMartin K. Petersen 	lim->discard_misaligned = 0;
12298262f27SMartin K. Petersen 	lim->discard_zeroes_data = -1;
123e475bba2SMartin K. Petersen 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
1243a02c8e8SMartin K. Petersen 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
125e475bba2SMartin K. Petersen 	lim->alignment_offset = 0;
126e475bba2SMartin K. Petersen 	lim->io_opt = 0;
127e475bba2SMartin K. Petersen 	lim->misaligned = 0;
128e475bba2SMartin K. Petersen 	lim->no_cluster = 0;
129e475bba2SMartin K. Petersen }
130e475bba2SMartin K. Petersen EXPORT_SYMBOL(blk_set_default_limits);
131e475bba2SMartin K. Petersen 
132e475bba2SMartin K. Petersen /**
13386db1e29SJens Axboe  * blk_queue_make_request - define an alternate make_request function for a device
13486db1e29SJens Axboe  * @q:  the request queue for the device to be affected
13586db1e29SJens Axboe  * @mfn: the alternate make_request function
13686db1e29SJens Axboe  *
13786db1e29SJens Axboe  * Description:
13886db1e29SJens Axboe  *    The normal way for &struct bios to be passed to a device
13986db1e29SJens Axboe  *    driver is for them to be collected into requests on a request
14086db1e29SJens Axboe  *    queue, and then to allow the device driver to select requests
14186db1e29SJens Axboe  *    off that queue when it is ready.  This works well for many block
14286db1e29SJens Axboe  *    devices. However some block devices (typically virtual devices
14386db1e29SJens Axboe  *    such as md or lvm) do not benefit from the processing on the
14486db1e29SJens Axboe  *    request queue, and are served best by having the requests passed
14586db1e29SJens Axboe  *    directly to them.  This can be achieved by providing a function
14686db1e29SJens Axboe  *    to blk_queue_make_request().
14786db1e29SJens Axboe  *
14886db1e29SJens Axboe  * Caveat:
14986db1e29SJens Axboe  *    The driver that does this *must* be able to deal appropriately
15086db1e29SJens Axboe  *    with buffers in "highmemory". This can be accomplished by either calling
15186db1e29SJens Axboe  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
15286db1e29SJens Axboe  *    blk_queue_bounce() to create a buffer in normal memory.
15386db1e29SJens Axboe  **/
15486db1e29SJens Axboe void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
15586db1e29SJens Axboe {
15686db1e29SJens Axboe 	/*
15786db1e29SJens Axboe 	 * set defaults
15886db1e29SJens Axboe 	 */
15986db1e29SJens Axboe 	q->nr_requests = BLKDEV_MAX_RQ;
1600e435ac2SMilan Broz 
16186db1e29SJens Axboe 	q->make_request_fn = mfn;
16286db1e29SJens Axboe 	blk_queue_dma_alignment(q, 511);
16386db1e29SJens Axboe 	blk_queue_congestion_threshold(q);
16486db1e29SJens Axboe 	q->nr_batching = BLK_BATCH_REQ;
16586db1e29SJens Axboe 
16686db1e29SJens Axboe 	q->unplug_thresh = 4;		/* hmm */
167ad5ebd2fSRandy Dunlap 	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
16886db1e29SJens Axboe 	if (q->unplug_delay == 0)
16986db1e29SJens Axboe 		q->unplug_delay = 1;
17086db1e29SJens Axboe 
17186db1e29SJens Axboe 	q->unplug_timer.function = blk_unplug_timeout;
17286db1e29SJens Axboe 	q->unplug_timer.data = (unsigned long)q;
17386db1e29SJens Axboe 
174e475bba2SMartin K. Petersen 	blk_set_default_limits(&q->limits);
175086fa5ffSMartin K. Petersen 	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
176e475bba2SMartin K. Petersen 
17786db1e29SJens Axboe 	/*
178a4e7d464SJens Axboe 	 * If the caller didn't supply a lock, fall back to our embedded
179a4e7d464SJens Axboe 	 * per-queue locks
180a4e7d464SJens Axboe 	 */
181a4e7d464SJens Axboe 	if (!q->queue_lock)
182a4e7d464SJens Axboe 		q->queue_lock = &q->__queue_lock;
183a4e7d464SJens Axboe 
184a4e7d464SJens Axboe 	/*
18586db1e29SJens Axboe 	 * by default assume old behaviour and bounce for any highmem page
18686db1e29SJens Axboe 	 */
18786db1e29SJens Axboe 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
18886db1e29SJens Axboe }
18986db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_make_request);
19086db1e29SJens Axboe 
19186db1e29SJens Axboe /**
19286db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
19386db1e29SJens Axboe  * @q: the request queue for the device
194cd0aca2dSTejun Heo  * @dma_mask: the maximum address the device can handle
19586db1e29SJens Axboe  *
19686db1e29SJens Axboe  * Description:
19786db1e29SJens Axboe  *    Different hardware can have different requirements as to what pages
19886db1e29SJens Axboe  *    it can do I/O directly to. A low level driver can call
19986db1e29SJens Axboe  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
200cd0aca2dSTejun Heo  *    buffers for doing I/O to pages residing above @dma_mask.
20186db1e29SJens Axboe  **/
202cd0aca2dSTejun Heo void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
20386db1e29SJens Axboe {
204cd0aca2dSTejun Heo 	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
20586db1e29SJens Axboe 	int dma = 0;
20686db1e29SJens Axboe 
20786db1e29SJens Axboe 	q->bounce_gfp = GFP_NOIO;
20886db1e29SJens Axboe #if BITS_PER_LONG == 64
209cd0aca2dSTejun Heo 	/*
210cd0aca2dSTejun Heo 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
211cd0aca2dSTejun Heo 	 * some IOMMUs can handle everything, but I don't know of a
212cd0aca2dSTejun Heo 	 * way to test this here.
213cd0aca2dSTejun Heo 	 */
214cd0aca2dSTejun Heo 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
21586db1e29SJens Axboe 		dma = 1;
216025146e1SMartin K. Petersen 	q->limits.bounce_pfn = max_low_pfn;
21786db1e29SJens Axboe #else
2186728cb0eSJens Axboe 	if (b_pfn < blk_max_low_pfn)
21986db1e29SJens Axboe 		dma = 1;
220025146e1SMartin K. Petersen 	q->limits.bounce_pfn = b_pfn;
22186db1e29SJens Axboe #endif
22286db1e29SJens Axboe 	if (dma) {
22386db1e29SJens Axboe 		init_emergency_isa_pool();
22486db1e29SJens Axboe 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
225025146e1SMartin K. Petersen 		q->limits.bounce_pfn = b_pfn;
22686db1e29SJens Axboe 	}
22786db1e29SJens Axboe }
22886db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
22986db1e29SJens Axboe 
23086db1e29SJens Axboe /**
231086fa5ffSMartin K. Petersen  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
23286db1e29SJens Axboe  * @q:  the request queue for the device
2332800aac1SMartin K. Petersen  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
23486db1e29SJens Axboe  *
23586db1e29SJens Axboe  * Description:
2362800aac1SMartin K. Petersen  *    Enables a low level driver to set a hard upper limit,
2372800aac1SMartin K. Petersen  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
2382800aac1SMartin K. Petersen  *    the device driver based upon the combined capabilities of I/O
2392800aac1SMartin K. Petersen  *    controller and storage device.
2402800aac1SMartin K. Petersen  *
2412800aac1SMartin K. Petersen  *    max_sectors is a soft limit imposed by the block layer for
2422800aac1SMartin K. Petersen  *    filesystem type requests.  This value can be overridden on a
2432800aac1SMartin K. Petersen  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
2442800aac1SMartin K. Petersen  *    The soft limit can not exceed max_hw_sectors.
24586db1e29SJens Axboe  **/
246086fa5ffSMartin K. Petersen void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
24786db1e29SJens Axboe {
2482800aac1SMartin K. Petersen 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
2492800aac1SMartin K. Petersen 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
25024c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
2512800aac1SMartin K. Petersen 		       __func__, max_hw_sectors);
25286db1e29SJens Axboe 	}
25386db1e29SJens Axboe 
2542800aac1SMartin K. Petersen 	q->limits.max_hw_sectors = max_hw_sectors;
2552800aac1SMartin K. Petersen 	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
2562800aac1SMartin K. Petersen 				      BLK_DEF_MAX_SECTORS);
25786db1e29SJens Axboe }
258086fa5ffSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_hw_sectors);
25986db1e29SJens Axboe 
26086db1e29SJens Axboe /**
26167efc925SChristoph Hellwig  * blk_queue_max_discard_sectors - set max sectors for a single discard
26267efc925SChristoph Hellwig  * @q:  the request queue for the device
263c7ebf065SRandy Dunlap  * @max_discard_sectors: maximum number of sectors to discard
26467efc925SChristoph Hellwig  **/
26567efc925SChristoph Hellwig void blk_queue_max_discard_sectors(struct request_queue *q,
26667efc925SChristoph Hellwig 		unsigned int max_discard_sectors)
26767efc925SChristoph Hellwig {
26867efc925SChristoph Hellwig 	q->limits.max_discard_sectors = max_discard_sectors;
26967efc925SChristoph Hellwig }
27067efc925SChristoph Hellwig EXPORT_SYMBOL(blk_queue_max_discard_sectors);
27167efc925SChristoph Hellwig 
27267efc925SChristoph Hellwig /**
2738a78362cSMartin K. Petersen  * blk_queue_max_segments - set max hw segments for a request for this queue
27486db1e29SJens Axboe  * @q:  the request queue for the device
27586db1e29SJens Axboe  * @max_segments:  max number of segments
27686db1e29SJens Axboe  *
27786db1e29SJens Axboe  * Description:
27886db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
2798a78362cSMartin K. Petersen  *    hw data segments in a request.
28086db1e29SJens Axboe  **/
2818a78362cSMartin K. Petersen void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
28286db1e29SJens Axboe {
28386db1e29SJens Axboe 	if (!max_segments) {
28486db1e29SJens Axboe 		max_segments = 1;
28524c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
28624c03d47SHarvey Harrison 		       __func__, max_segments);
28786db1e29SJens Axboe 	}
28886db1e29SJens Axboe 
2898a78362cSMartin K. Petersen 	q->limits.max_segments = max_segments;
29086db1e29SJens Axboe }
2918a78362cSMartin K. Petersen EXPORT_SYMBOL(blk_queue_max_segments);
29286db1e29SJens Axboe 
29386db1e29SJens Axboe /**
29486db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
29586db1e29SJens Axboe  * @q:  the request queue for the device
29686db1e29SJens Axboe  * @max_size:  max size of segment in bytes
29786db1e29SJens Axboe  *
29886db1e29SJens Axboe  * Description:
29986db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
30086db1e29SJens Axboe  *    coalesced segment
30186db1e29SJens Axboe  **/
30286db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
30386db1e29SJens Axboe {
30486db1e29SJens Axboe 	if (max_size < PAGE_CACHE_SIZE) {
30586db1e29SJens Axboe 		max_size = PAGE_CACHE_SIZE;
30624c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %d\n",
30724c03d47SHarvey Harrison 		       __func__, max_size);
30886db1e29SJens Axboe 	}
30986db1e29SJens Axboe 
310025146e1SMartin K. Petersen 	q->limits.max_segment_size = max_size;
31186db1e29SJens Axboe }
31286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
31386db1e29SJens Axboe 
31486db1e29SJens Axboe /**
315e1defc4fSMartin K. Petersen  * blk_queue_logical_block_size - set logical block size for the queue
31686db1e29SJens Axboe  * @q:  the request queue for the device
317e1defc4fSMartin K. Petersen  * @size:  the logical block size, in bytes
31886db1e29SJens Axboe  *
31986db1e29SJens Axboe  * Description:
320e1defc4fSMartin K. Petersen  *   This should be set to the lowest possible block size that the
321e1defc4fSMartin K. Petersen  *   storage device can address.  The default of 512 covers most
322e1defc4fSMartin K. Petersen  *   hardware.
32386db1e29SJens Axboe  **/
324e1defc4fSMartin K. Petersen void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
32586db1e29SJens Axboe {
326025146e1SMartin K. Petersen 	q->limits.logical_block_size = size;
327c72758f3SMartin K. Petersen 
328c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < size)
329c72758f3SMartin K. Petersen 		q->limits.physical_block_size = size;
330c72758f3SMartin K. Petersen 
331c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
332c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
33386db1e29SJens Axboe }
334e1defc4fSMartin K. Petersen EXPORT_SYMBOL(blk_queue_logical_block_size);
33586db1e29SJens Axboe 
336c72758f3SMartin K. Petersen /**
337c72758f3SMartin K. Petersen  * blk_queue_physical_block_size - set physical block size for the queue
338c72758f3SMartin K. Petersen  * @q:  the request queue for the device
339c72758f3SMartin K. Petersen  * @size:  the physical block size, in bytes
340c72758f3SMartin K. Petersen  *
341c72758f3SMartin K. Petersen  * Description:
342c72758f3SMartin K. Petersen  *   This should be set to the lowest possible sector size that the
343c72758f3SMartin K. Petersen  *   hardware can operate on without reverting to read-modify-write
344c72758f3SMartin K. Petersen  *   operations.
345c72758f3SMartin K. Petersen  */
346c72758f3SMartin K. Petersen void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
347c72758f3SMartin K. Petersen {
348c72758f3SMartin K. Petersen 	q->limits.physical_block_size = size;
349c72758f3SMartin K. Petersen 
350c72758f3SMartin K. Petersen 	if (q->limits.physical_block_size < q->limits.logical_block_size)
351c72758f3SMartin K. Petersen 		q->limits.physical_block_size = q->limits.logical_block_size;
352c72758f3SMartin K. Petersen 
353c72758f3SMartin K. Petersen 	if (q->limits.io_min < q->limits.physical_block_size)
354c72758f3SMartin K. Petersen 		q->limits.io_min = q->limits.physical_block_size;
355c72758f3SMartin K. Petersen }
356c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_physical_block_size);
357c72758f3SMartin K. Petersen 
358c72758f3SMartin K. Petersen /**
359c72758f3SMartin K. Petersen  * blk_queue_alignment_offset - set physical block alignment offset
360c72758f3SMartin K. Petersen  * @q:	the request queue for the device
3618ebf9756SRandy Dunlap  * @offset: alignment offset in bytes
362c72758f3SMartin K. Petersen  *
363c72758f3SMartin K. Petersen  * Description:
364c72758f3SMartin K. Petersen  *   Some devices are naturally misaligned to compensate for things like
365c72758f3SMartin K. Petersen  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
366c72758f3SMartin K. Petersen  *   should call this function for devices whose first sector is not
367c72758f3SMartin K. Petersen  *   naturally aligned.
368c72758f3SMartin K. Petersen  */
369c72758f3SMartin K. Petersen void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
370c72758f3SMartin K. Petersen {
371c72758f3SMartin K. Petersen 	q->limits.alignment_offset =
372c72758f3SMartin K. Petersen 		offset & (q->limits.physical_block_size - 1);
373c72758f3SMartin K. Petersen 	q->limits.misaligned = 0;
374c72758f3SMartin K. Petersen }
375c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_alignment_offset);
376c72758f3SMartin K. Petersen 
377c72758f3SMartin K. Petersen /**
3787c958e32SMartin K. Petersen  * blk_limits_io_min - set minimum request size for a device
3797c958e32SMartin K. Petersen  * @limits: the queue limits
3807c958e32SMartin K. Petersen  * @min:  smallest I/O size in bytes
3817c958e32SMartin K. Petersen  *
3827c958e32SMartin K. Petersen  * Description:
3837c958e32SMartin K. Petersen  *   Some devices have an internal block size bigger than the reported
3847c958e32SMartin K. Petersen  *   hardware sector size.  This function can be used to signal the
3857c958e32SMartin K. Petersen  *   smallest I/O the device can perform without incurring a performance
3867c958e32SMartin K. Petersen  *   penalty.
3877c958e32SMartin K. Petersen  */
3887c958e32SMartin K. Petersen void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
3897c958e32SMartin K. Petersen {
3907c958e32SMartin K. Petersen 	limits->io_min = min;
3917c958e32SMartin K. Petersen 
3927c958e32SMartin K. Petersen 	if (limits->io_min < limits->logical_block_size)
3937c958e32SMartin K. Petersen 		limits->io_min = limits->logical_block_size;
3947c958e32SMartin K. Petersen 
3957c958e32SMartin K. Petersen 	if (limits->io_min < limits->physical_block_size)
3967c958e32SMartin K. Petersen 		limits->io_min = limits->physical_block_size;
3977c958e32SMartin K. Petersen }
3987c958e32SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_min);
3997c958e32SMartin K. Petersen 
4007c958e32SMartin K. Petersen /**
401c72758f3SMartin K. Petersen  * blk_queue_io_min - set minimum request size for the queue
402c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4038ebf9756SRandy Dunlap  * @min:  smallest I/O size in bytes
404c72758f3SMartin K. Petersen  *
405c72758f3SMartin K. Petersen  * Description:
4067e5f5fb0SMartin K. Petersen  *   Storage devices may report a granularity or preferred minimum I/O
4077e5f5fb0SMartin K. Petersen  *   size which is the smallest request the device can perform without
4087e5f5fb0SMartin K. Petersen  *   incurring a performance penalty.  For disk drives this is often the
4097e5f5fb0SMartin K. Petersen  *   physical block size.  For RAID arrays it is often the stripe chunk
4107e5f5fb0SMartin K. Petersen  *   size.  A properly aligned multiple of minimum_io_size is the
4117e5f5fb0SMartin K. Petersen  *   preferred request size for workloads where a high number of I/O
4127e5f5fb0SMartin K. Petersen  *   operations is desired.
413c72758f3SMartin K. Petersen  */
414c72758f3SMartin K. Petersen void blk_queue_io_min(struct request_queue *q, unsigned int min)
415c72758f3SMartin K. Petersen {
4167c958e32SMartin K. Petersen 	blk_limits_io_min(&q->limits, min);
417c72758f3SMartin K. Petersen }
418c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_min);
419c72758f3SMartin K. Petersen 
420c72758f3SMartin K. Petersen /**
4213c5820c7SMartin K. Petersen  * blk_limits_io_opt - set optimal request size for a device
4223c5820c7SMartin K. Petersen  * @limits: the queue limits
4233c5820c7SMartin K. Petersen  * @opt:  smallest I/O size in bytes
4243c5820c7SMartin K. Petersen  *
4253c5820c7SMartin K. Petersen  * Description:
4263c5820c7SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4273c5820c7SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4283c5820c7SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4293c5820c7SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4303c5820c7SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4313c5820c7SMartin K. Petersen  *   sustained throughput is desired.
4323c5820c7SMartin K. Petersen  */
4333c5820c7SMartin K. Petersen void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
4343c5820c7SMartin K. Petersen {
4353c5820c7SMartin K. Petersen 	limits->io_opt = opt;
4363c5820c7SMartin K. Petersen }
4373c5820c7SMartin K. Petersen EXPORT_SYMBOL(blk_limits_io_opt);
4383c5820c7SMartin K. Petersen 
4393c5820c7SMartin K. Petersen /**
440c72758f3SMartin K. Petersen  * blk_queue_io_opt - set optimal request size for the queue
441c72758f3SMartin K. Petersen  * @q:	the request queue for the device
4428ebf9756SRandy Dunlap  * @opt:  optimal request size in bytes
443c72758f3SMartin K. Petersen  *
444c72758f3SMartin K. Petersen  * Description:
4457e5f5fb0SMartin K. Petersen  *   Storage devices may report an optimal I/O size, which is the
4467e5f5fb0SMartin K. Petersen  *   device's preferred unit for sustained I/O.  This is rarely reported
4477e5f5fb0SMartin K. Petersen  *   for disk drives.  For RAID arrays it is usually the stripe width or
4487e5f5fb0SMartin K. Petersen  *   the internal track size.  A properly aligned multiple of
4497e5f5fb0SMartin K. Petersen  *   optimal_io_size is the preferred request size for workloads where
4507e5f5fb0SMartin K. Petersen  *   sustained throughput is desired.
451c72758f3SMartin K. Petersen  */
452c72758f3SMartin K. Petersen void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
453c72758f3SMartin K. Petersen {
4543c5820c7SMartin K. Petersen 	blk_limits_io_opt(&q->limits, opt);
455c72758f3SMartin K. Petersen }
456c72758f3SMartin K. Petersen EXPORT_SYMBOL(blk_queue_io_opt);
457c72758f3SMartin K. Petersen 
45886db1e29SJens Axboe /*
45986db1e29SJens Axboe  * Returns the minimum that is _not_ zero, unless both are zero.
46086db1e29SJens Axboe  */
46186db1e29SJens Axboe #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
46286db1e29SJens Axboe 
46386db1e29SJens Axboe /**
46486db1e29SJens Axboe  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
46586db1e29SJens Axboe  * @t:	the stacking driver (top)
46686db1e29SJens Axboe  * @b:  the underlying device (bottom)
46786db1e29SJens Axboe  **/
46886db1e29SJens Axboe void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
46986db1e29SJens Axboe {
470fef24667SMartin K. Petersen 	blk_stack_limits(&t->limits, &b->limits, 0);
471025146e1SMartin K. Petersen 
472e7e72bf6SNeil Brown 	if (!t->queue_lock)
473e7e72bf6SNeil Brown 		WARN_ON_ONCE(1);
474e7e72bf6SNeil Brown 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
475e7e72bf6SNeil Brown 		unsigned long flags;
476e7e72bf6SNeil Brown 		spin_lock_irqsave(t->queue_lock, flags);
47775ad23bcSNick Piggin 		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
478e7e72bf6SNeil Brown 		spin_unlock_irqrestore(t->queue_lock, flags);
479e7e72bf6SNeil Brown 	}
48086db1e29SJens Axboe }
48186db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_stack_limits);
48286db1e29SJens Axboe 
48386db1e29SJens Axboe /**
484c72758f3SMartin K. Petersen  * blk_stack_limits - adjust queue_limits for stacked devices
48581744ee4SMartin K. Petersen  * @t:	the stacking driver limits (top device)
48681744ee4SMartin K. Petersen  * @b:  the underlying queue limits (bottom, component device)
487e03a72e1SMartin K. Petersen  * @start:  first data sector within component device
488c72758f3SMartin K. Petersen  *
489c72758f3SMartin K. Petersen  * Description:
49081744ee4SMartin K. Petersen  *    This function is used by stacking drivers like MD and DM to ensure
49181744ee4SMartin K. Petersen  *    that all component devices have compatible block sizes and
49281744ee4SMartin K. Petersen  *    alignments.  The stacking driver must provide a queue_limits
49381744ee4SMartin K. Petersen  *    struct (top) and then iteratively call the stacking function for
49481744ee4SMartin K. Petersen  *    all component (bottom) devices.  The stacking function will
49581744ee4SMartin K. Petersen  *    attempt to combine the values and ensure proper alignment.
49681744ee4SMartin K. Petersen  *
49781744ee4SMartin K. Petersen  *    Returns 0 if the top and bottom queue_limits are compatible.  The
49881744ee4SMartin K. Petersen  *    top device's block sizes and alignment offsets may be adjusted to
49981744ee4SMartin K. Petersen  *    ensure alignment with the bottom device. If no compatible sizes
50081744ee4SMartin K. Petersen  *    and alignments exist, -1 is returned and the resulting top
50181744ee4SMartin K. Petersen  *    queue_limits will have the misaligned flag set to indicate that
50281744ee4SMartin K. Petersen  *    the alignment_offset is undefined.
503c72758f3SMartin K. Petersen  */
504c72758f3SMartin K. Petersen int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
505e03a72e1SMartin K. Petersen 		     sector_t start)
506c72758f3SMartin K. Petersen {
507e03a72e1SMartin K. Petersen 	unsigned int top, bottom, alignment, ret = 0;
50886b37281SMartin K. Petersen 
509c72758f3SMartin K. Petersen 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
510c72758f3SMartin K. Petersen 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
51177634f33SMartin K. Petersen 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
512c72758f3SMartin K. Petersen 
513c72758f3SMartin K. Petersen 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
514c72758f3SMartin K. Petersen 					    b->seg_boundary_mask);
515c72758f3SMartin K. Petersen 
5168a78362cSMartin K. Petersen 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
517c72758f3SMartin K. Petersen 
518c72758f3SMartin K. Petersen 	t->max_segment_size = min_not_zero(t->max_segment_size,
519c72758f3SMartin K. Petersen 					   b->max_segment_size);
520c72758f3SMartin K. Petersen 
521fe0b393fSMartin K. Petersen 	t->misaligned |= b->misaligned;
522fe0b393fSMartin K. Petersen 
523e03a72e1SMartin K. Petersen 	alignment = queue_limit_alignment_offset(b, start);
5249504e086SMartin K. Petersen 
52581744ee4SMartin K. Petersen 	/* Bottom device has different alignment.  Check that it is
52681744ee4SMartin K. Petersen 	 * compatible with the current top alignment.
52781744ee4SMartin K. Petersen 	 */
5289504e086SMartin K. Petersen 	if (t->alignment_offset != alignment) {
5299504e086SMartin K. Petersen 
5309504e086SMartin K. Petersen 		top = max(t->physical_block_size, t->io_min)
5319504e086SMartin K. Petersen 			+ t->alignment_offset;
53281744ee4SMartin K. Petersen 		bottom = max(b->physical_block_size, b->io_min) + alignment;
5339504e086SMartin K. Petersen 
53481744ee4SMartin K. Petersen 		/* Verify that top and bottom intervals line up */
535fe0b393fSMartin K. Petersen 		if (max(top, bottom) & (min(top, bottom) - 1)) {
5369504e086SMartin K. Petersen 			t->misaligned = 1;
537fe0b393fSMartin K. Petersen 			ret = -1;
538fe0b393fSMartin K. Petersen 		}
5399504e086SMartin K. Petersen 	}
5409504e086SMartin K. Petersen 
541c72758f3SMartin K. Petersen 	t->logical_block_size = max(t->logical_block_size,
542c72758f3SMartin K. Petersen 				    b->logical_block_size);
543c72758f3SMartin K. Petersen 
544c72758f3SMartin K. Petersen 	t->physical_block_size = max(t->physical_block_size,
545c72758f3SMartin K. Petersen 				     b->physical_block_size);
546c72758f3SMartin K. Petersen 
547c72758f3SMartin K. Petersen 	t->io_min = max(t->io_min, b->io_min);
5489504e086SMartin K. Petersen 	t->io_opt = lcm(t->io_opt, b->io_opt);
5499504e086SMartin K. Petersen 
550c72758f3SMartin K. Petersen 	t->no_cluster |= b->no_cluster;
55198262f27SMartin K. Petersen 	t->discard_zeroes_data &= b->discard_zeroes_data;
552c72758f3SMartin K. Petersen 
55381744ee4SMartin K. Petersen 	/* Physical block size a multiple of the logical block size? */
5549504e086SMartin K. Petersen 	if (t->physical_block_size & (t->logical_block_size - 1)) {
5559504e086SMartin K. Petersen 		t->physical_block_size = t->logical_block_size;
556c72758f3SMartin K. Petersen 		t->misaligned = 1;
557fe0b393fSMartin K. Petersen 		ret = -1;
55886b37281SMartin K. Petersen 	}
55986b37281SMartin K. Petersen 
56081744ee4SMartin K. Petersen 	/* Minimum I/O a multiple of the physical block size? */
5619504e086SMartin K. Petersen 	if (t->io_min & (t->physical_block_size - 1)) {
5629504e086SMartin K. Petersen 		t->io_min = t->physical_block_size;
5639504e086SMartin K. Petersen 		t->misaligned = 1;
564fe0b393fSMartin K. Petersen 		ret = -1;
5659504e086SMartin K. Petersen 	}
5669504e086SMartin K. Petersen 
56781744ee4SMartin K. Petersen 	/* Optimal I/O a multiple of the physical block size? */
5689504e086SMartin K. Petersen 	if (t->io_opt & (t->physical_block_size - 1)) {
5699504e086SMartin K. Petersen 		t->io_opt = 0;
5709504e086SMartin K. Petersen 		t->misaligned = 1;
571fe0b393fSMartin K. Petersen 		ret = -1;
5729504e086SMartin K. Petersen 	}
5739504e086SMartin K. Petersen 
57481744ee4SMartin K. Petersen 	/* Find lowest common alignment_offset */
5759504e086SMartin K. Petersen 	t->alignment_offset = lcm(t->alignment_offset, alignment)
5769504e086SMartin K. Petersen 		& (max(t->physical_block_size, t->io_min) - 1);
5779504e086SMartin K. Petersen 
57881744ee4SMartin K. Petersen 	/* Verify that new alignment_offset is on a logical block boundary */
579fe0b393fSMartin K. Petersen 	if (t->alignment_offset & (t->logical_block_size - 1)) {
5809504e086SMartin K. Petersen 		t->misaligned = 1;
581fe0b393fSMartin K. Petersen 		ret = -1;
582fe0b393fSMartin K. Petersen 	}
5839504e086SMartin K. Petersen 
5849504e086SMartin K. Petersen 	/* Discard alignment and granularity */
5859504e086SMartin K. Petersen 	if (b->discard_granularity) {
586e03a72e1SMartin K. Petersen 		alignment = queue_limit_discard_alignment(b, start);
5879504e086SMartin K. Petersen 
5889504e086SMartin K. Petersen 		if (t->discard_granularity != 0 &&
5899504e086SMartin K. Petersen 		    t->discard_alignment != alignment) {
5909504e086SMartin K. Petersen 			top = t->discard_granularity + t->discard_alignment;
5919504e086SMartin K. Petersen 			bottom = b->discard_granularity + alignment;
5929504e086SMartin K. Petersen 
5939504e086SMartin K. Petersen 			/* Verify that top and bottom intervals line up */
5949504e086SMartin K. Petersen 			if (max(top, bottom) & (min(top, bottom) - 1))
59586b37281SMartin K. Petersen 				t->discard_misaligned = 1;
596c72758f3SMartin K. Petersen 		}
597c72758f3SMartin K. Petersen 
59881744ee4SMartin K. Petersen 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
59981744ee4SMartin K. Petersen 						      b->max_discard_sectors);
6009504e086SMartin K. Petersen 		t->discard_granularity = max(t->discard_granularity,
60186b37281SMartin K. Petersen 					     b->discard_granularity);
6029504e086SMartin K. Petersen 		t->discard_alignment = lcm(t->discard_alignment, alignment) &
6039504e086SMartin K. Petersen 			(t->discard_granularity - 1);
6049504e086SMartin K. Petersen 	}
60570dd5bf3SMartin K. Petersen 
606fe0b393fSMartin K. Petersen 	return ret;
607c72758f3SMartin K. Petersen }
6085d85d324SMike Snitzer EXPORT_SYMBOL(blk_stack_limits);
609c72758f3SMartin K. Petersen 
610c72758f3SMartin K. Petersen /**
61117be8c24SMartin K. Petersen  * bdev_stack_limits - adjust queue limits for stacked drivers
61217be8c24SMartin K. Petersen  * @t:	the stacking driver limits (top device)
61317be8c24SMartin K. Petersen  * @bdev:  the component block_device (bottom)
61417be8c24SMartin K. Petersen  * @start:  first data sector within component device
61517be8c24SMartin K. Petersen  *
61617be8c24SMartin K. Petersen  * Description:
61717be8c24SMartin K. Petersen  *    Merges queue limits for a top device and a block_device.  Returns
61817be8c24SMartin K. Petersen  *    0 if alignment didn't change.  Returns -1 if adding the bottom
61917be8c24SMartin K. Petersen  *    device caused misalignment.
62017be8c24SMartin K. Petersen  */
62117be8c24SMartin K. Petersen int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
62217be8c24SMartin K. Petersen 		      sector_t start)
62317be8c24SMartin K. Petersen {
62417be8c24SMartin K. Petersen 	struct request_queue *bq = bdev_get_queue(bdev);
62517be8c24SMartin K. Petersen 
62617be8c24SMartin K. Petersen 	start += get_start_sect(bdev);
62717be8c24SMartin K. Petersen 
628e03a72e1SMartin K. Petersen 	return blk_stack_limits(t, &bq->limits, start);
62917be8c24SMartin K. Petersen }
63017be8c24SMartin K. Petersen EXPORT_SYMBOL(bdev_stack_limits);
63117be8c24SMartin K. Petersen 
63217be8c24SMartin K. Petersen /**
633c72758f3SMartin K. Petersen  * disk_stack_limits - adjust queue limits for stacked drivers
63477634f33SMartin K. Petersen  * @disk:  MD/DM gendisk (top)
635c72758f3SMartin K. Petersen  * @bdev:  the underlying block device (bottom)
636c72758f3SMartin K. Petersen  * @offset:  offset to beginning of data within component device
637c72758f3SMartin K. Petersen  *
638c72758f3SMartin K. Petersen  * Description:
639e03a72e1SMartin K. Petersen  *    Merges the limits for a top level gendisk and a bottom level
640e03a72e1SMartin K. Petersen  *    block_device.
641c72758f3SMartin K. Petersen  */
642c72758f3SMartin K. Petersen void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
643c72758f3SMartin K. Petersen 		       sector_t offset)
644c72758f3SMartin K. Petersen {
645c72758f3SMartin K. Petersen 	struct request_queue *t = disk->queue;
646c72758f3SMartin K. Petersen 	struct request_queue *b = bdev_get_queue(bdev);
647c72758f3SMartin K. Petersen 
648e03a72e1SMartin K. Petersen 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
649c72758f3SMartin K. Petersen 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
650c72758f3SMartin K. Petersen 
651c72758f3SMartin K. Petersen 		disk_name(disk, 0, top);
652c72758f3SMartin K. Petersen 		bdevname(bdev, bottom);
653c72758f3SMartin K. Petersen 
654c72758f3SMartin K. Petersen 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
655c72758f3SMartin K. Petersen 		       top, bottom);
656c72758f3SMartin K. Petersen 	}
657c72758f3SMartin K. Petersen 
658c72758f3SMartin K. Petersen 	if (!t->queue_lock)
659c72758f3SMartin K. Petersen 		WARN_ON_ONCE(1);
660c72758f3SMartin K. Petersen 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
661c72758f3SMartin K. Petersen 		unsigned long flags;
662c72758f3SMartin K. Petersen 
663c72758f3SMartin K. Petersen 		spin_lock_irqsave(t->queue_lock, flags);
664c72758f3SMartin K. Petersen 		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
665c72758f3SMartin K. Petersen 			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
666c72758f3SMartin K. Petersen 		spin_unlock_irqrestore(t->queue_lock, flags);
667c72758f3SMartin K. Petersen 	}
668c72758f3SMartin K. Petersen }
669c72758f3SMartin K. Petersen EXPORT_SYMBOL(disk_stack_limits);
670c72758f3SMartin K. Petersen 
671c72758f3SMartin K. Petersen /**
672e3790c7dSTejun Heo  * blk_queue_dma_pad - set pad mask
673e3790c7dSTejun Heo  * @q:     the request queue for the device
674e3790c7dSTejun Heo  * @mask:  pad mask
675e3790c7dSTejun Heo  *
67627f8221aSFUJITA Tomonori  * Set dma pad mask.
677e3790c7dSTejun Heo  *
67827f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
67927f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
680e3790c7dSTejun Heo  **/
681e3790c7dSTejun Heo void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
682e3790c7dSTejun Heo {
683e3790c7dSTejun Heo 	q->dma_pad_mask = mask;
684e3790c7dSTejun Heo }
685e3790c7dSTejun Heo EXPORT_SYMBOL(blk_queue_dma_pad);
686e3790c7dSTejun Heo 
687e3790c7dSTejun Heo /**
68827f8221aSFUJITA Tomonori  * blk_queue_update_dma_pad - update pad mask
68927f8221aSFUJITA Tomonori  * @q:     the request queue for the device
69027f8221aSFUJITA Tomonori  * @mask:  pad mask
69127f8221aSFUJITA Tomonori  *
69227f8221aSFUJITA Tomonori  * Update dma pad mask.
69327f8221aSFUJITA Tomonori  *
69427f8221aSFUJITA Tomonori  * Appending pad buffer to a request modifies the last entry of a
69527f8221aSFUJITA Tomonori  * scatter list such that it includes the pad buffer.
69627f8221aSFUJITA Tomonori  **/
69727f8221aSFUJITA Tomonori void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
69827f8221aSFUJITA Tomonori {
69927f8221aSFUJITA Tomonori 	if (mask > q->dma_pad_mask)
70027f8221aSFUJITA Tomonori 		q->dma_pad_mask = mask;
70127f8221aSFUJITA Tomonori }
70227f8221aSFUJITA Tomonori EXPORT_SYMBOL(blk_queue_update_dma_pad);
70327f8221aSFUJITA Tomonori 
70427f8221aSFUJITA Tomonori /**
70586db1e29SJens Axboe  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
70686db1e29SJens Axboe  * @q:  the request queue for the device
7072fb98e84STejun Heo  * @dma_drain_needed: fn which returns non-zero if drain is necessary
70886db1e29SJens Axboe  * @buf:	physically contiguous buffer
70986db1e29SJens Axboe  * @size:	size of the buffer in bytes
71086db1e29SJens Axboe  *
71186db1e29SJens Axboe  * Some devices have excess DMA problems and can't simply discard (or
71286db1e29SJens Axboe  * zero fill) the unwanted piece of the transfer.  They have to have a
71386db1e29SJens Axboe  * real area of memory to transfer it into.  The use case for this is
71486db1e29SJens Axboe  * ATAPI devices in DMA mode.  If the packet command causes a transfer
71586db1e29SJens Axboe  * bigger than the transfer size some HBAs will lock up if there
71686db1e29SJens Axboe  * aren't DMA elements to contain the excess transfer.  What this API
71786db1e29SJens Axboe  * does is adjust the queue so that the buf is always appended
71886db1e29SJens Axboe  * silently to the scatterlist.
71986db1e29SJens Axboe  *
7208a78362cSMartin K. Petersen  * Note: This routine adjusts max_hw_segments to make room for appending
7218a78362cSMartin K. Petersen  * the drain buffer.  If you call blk_queue_max_segments() after calling
7228a78362cSMartin K. Petersen  * this routine, you must set the limit to one fewer than your device
7238a78362cSMartin K. Petersen  * can support otherwise there won't be room for the drain buffer.
72486db1e29SJens Axboe  */
725448da4d2SHarvey Harrison int blk_queue_dma_drain(struct request_queue *q,
7262fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
7272fb98e84STejun Heo 			       void *buf, unsigned int size)
72886db1e29SJens Axboe {
7298a78362cSMartin K. Petersen 	if (queue_max_segments(q) < 2)
73086db1e29SJens Axboe 		return -EINVAL;
73186db1e29SJens Axboe 	/* make room for appending the drain */
7328a78362cSMartin K. Petersen 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
7332fb98e84STejun Heo 	q->dma_drain_needed = dma_drain_needed;
73486db1e29SJens Axboe 	q->dma_drain_buffer = buf;
73586db1e29SJens Axboe 	q->dma_drain_size = size;
73686db1e29SJens Axboe 
73786db1e29SJens Axboe 	return 0;
73886db1e29SJens Axboe }
73986db1e29SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
74086db1e29SJens Axboe 
74186db1e29SJens Axboe /**
74286db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
74386db1e29SJens Axboe  * @q:  the request queue for the device
74486db1e29SJens Axboe  * @mask:  the memory boundary mask
74586db1e29SJens Axboe  **/
74686db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
74786db1e29SJens Axboe {
74886db1e29SJens Axboe 	if (mask < PAGE_CACHE_SIZE - 1) {
74986db1e29SJens Axboe 		mask = PAGE_CACHE_SIZE - 1;
75024c03d47SHarvey Harrison 		printk(KERN_INFO "%s: set to minimum %lx\n",
75124c03d47SHarvey Harrison 		       __func__, mask);
75286db1e29SJens Axboe 	}
75386db1e29SJens Axboe 
754025146e1SMartin K. Petersen 	q->limits.seg_boundary_mask = mask;
75586db1e29SJens Axboe }
75686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
75786db1e29SJens Axboe 
75886db1e29SJens Axboe /**
75986db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
76086db1e29SJens Axboe  * @q:     the request queue for the device
76186db1e29SJens Axboe  * @mask:  alignment mask
76286db1e29SJens Axboe  *
76386db1e29SJens Axboe  * description:
764710027a4SRandy Dunlap  *    set required memory and length alignment for direct dma transactions.
7658feb4d20SAlan Cox  *    this is used when building direct io requests for the queue.
76686db1e29SJens Axboe  *
76786db1e29SJens Axboe  **/
76886db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
76986db1e29SJens Axboe {
77086db1e29SJens Axboe 	q->dma_alignment = mask;
77186db1e29SJens Axboe }
77286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
77386db1e29SJens Axboe 
77486db1e29SJens Axboe /**
77586db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
77686db1e29SJens Axboe  * @q:     the request queue for the device
77786db1e29SJens Axboe  * @mask:  alignment mask
77886db1e29SJens Axboe  *
77986db1e29SJens Axboe  * description:
780710027a4SRandy Dunlap  *    update required memory and length alignment for direct dma transactions.
78186db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
78286db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
78386db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
78486db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
78586db1e29SJens Axboe  *    alignments without having them interfere.
78686db1e29SJens Axboe  *
78786db1e29SJens Axboe  **/
78886db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
78986db1e29SJens Axboe {
79086db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
79186db1e29SJens Axboe 
79286db1e29SJens Axboe 	if (mask > q->dma_alignment)
79386db1e29SJens Axboe 		q->dma_alignment = mask;
79486db1e29SJens Axboe }
79586db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
79686db1e29SJens Axboe 
797*4913efe4STejun Heo /**
798*4913efe4STejun Heo  * blk_queue_flush - configure queue's cache flush capability
799*4913efe4STejun Heo  * @q:		the request queue for the device
800*4913efe4STejun Heo  * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
801*4913efe4STejun Heo  *
802*4913efe4STejun Heo  * Tell block layer cache flush capability of @q.  If it supports
803*4913efe4STejun Heo  * flushing, REQ_FLUSH should be set.  If it supports bypassing
804*4913efe4STejun Heo  * write cache for individual writes, REQ_FUA should be set.
805*4913efe4STejun Heo  */
806*4913efe4STejun Heo void blk_queue_flush(struct request_queue *q, unsigned int flush)
807*4913efe4STejun Heo {
808*4913efe4STejun Heo 	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
809*4913efe4STejun Heo 
810*4913efe4STejun Heo 	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
811*4913efe4STejun Heo 		flush &= ~REQ_FUA;
812*4913efe4STejun Heo 
813*4913efe4STejun Heo 	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
814*4913efe4STejun Heo }
815*4913efe4STejun Heo EXPORT_SYMBOL_GPL(blk_queue_flush);
816*4913efe4STejun Heo 
817aeb3d3a8SHarvey Harrison static int __init blk_settings_init(void)
81886db1e29SJens Axboe {
81986db1e29SJens Axboe 	blk_max_low_pfn = max_low_pfn - 1;
82086db1e29SJens Axboe 	blk_max_pfn = max_pfn - 1;
82186db1e29SJens Axboe 	return 0;
82286db1e29SJens Axboe }
82386db1e29SJens Axboe subsys_initcall(blk_settings_init);
824