xref: /linux/block/blk-settings.c (revision e3790c7d42a545e8fe8b38b513613ca96687b670)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to setting various queue properties from drivers
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
1086db1e29SJens Axboe 
1186db1e29SJens Axboe #include "blk.h"
1286db1e29SJens Axboe 
136728cb0eSJens Axboe unsigned long blk_max_low_pfn;
1486db1e29SJens Axboe EXPORT_SYMBOL(blk_max_low_pfn);
156728cb0eSJens Axboe 
166728cb0eSJens Axboe unsigned long blk_max_pfn;
1786db1e29SJens Axboe EXPORT_SYMBOL(blk_max_pfn);
1886db1e29SJens Axboe 
1986db1e29SJens Axboe /**
2086db1e29SJens Axboe  * blk_queue_prep_rq - set a prepare_request function for queue
2186db1e29SJens Axboe  * @q:		queue
2286db1e29SJens Axboe  * @pfn:	prepare_request function
2386db1e29SJens Axboe  *
2486db1e29SJens Axboe  * It's possible for a queue to register a prepare_request callback which
2586db1e29SJens Axboe  * is invoked before the request is handed to the request_fn. The goal of
2686db1e29SJens Axboe  * the function is to prepare a request for I/O, it can be used to build a
2786db1e29SJens Axboe  * cdb from the request data for instance.
2886db1e29SJens Axboe  *
2986db1e29SJens Axboe  */
3086db1e29SJens Axboe void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
3186db1e29SJens Axboe {
3286db1e29SJens Axboe 	q->prep_rq_fn = pfn;
3386db1e29SJens Axboe }
3486db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_prep_rq);
3586db1e29SJens Axboe 
3686db1e29SJens Axboe /**
3786db1e29SJens Axboe  * blk_queue_merge_bvec - set a merge_bvec function for queue
3886db1e29SJens Axboe  * @q:		queue
3986db1e29SJens Axboe  * @mbfn:	merge_bvec_fn
4086db1e29SJens Axboe  *
4186db1e29SJens Axboe  * Usually queues have static limitations on the max sectors or segments that
4286db1e29SJens Axboe  * we can put in a request. Stacking drivers may have some settings that
4386db1e29SJens Axboe  * are dynamic, and thus we have to query the queue whether it is ok to
4486db1e29SJens Axboe  * add a new bio_vec to a bio at a given offset or not. If the block device
4586db1e29SJens Axboe  * has such limitations, it needs to register a merge_bvec_fn to control
4686db1e29SJens Axboe  * the size of bio's sent to it. Note that a block device *must* allow a
4786db1e29SJens Axboe  * single page to be added to an empty bio. The block device driver may want
4886db1e29SJens Axboe  * to use the bio_split() function to deal with these bio's. By default
4986db1e29SJens Axboe  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
5086db1e29SJens Axboe  * honored.
5186db1e29SJens Axboe  */
5286db1e29SJens Axboe void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
5386db1e29SJens Axboe {
5486db1e29SJens Axboe 	q->merge_bvec_fn = mbfn;
5586db1e29SJens Axboe }
5686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_merge_bvec);
5786db1e29SJens Axboe 
5886db1e29SJens Axboe void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
5986db1e29SJens Axboe {
6086db1e29SJens Axboe 	q->softirq_done_fn = fn;
6186db1e29SJens Axboe }
6286db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_softirq_done);
6386db1e29SJens Axboe 
6486db1e29SJens Axboe /**
6586db1e29SJens Axboe  * blk_queue_make_request - define an alternate make_request function for a device
6686db1e29SJens Axboe  * @q:  the request queue for the device to be affected
6786db1e29SJens Axboe  * @mfn: the alternate make_request function
6886db1e29SJens Axboe  *
6986db1e29SJens Axboe  * Description:
7086db1e29SJens Axboe  *    The normal way for &struct bios to be passed to a device
7186db1e29SJens Axboe  *    driver is for them to be collected into requests on a request
7286db1e29SJens Axboe  *    queue, and then to allow the device driver to select requests
7386db1e29SJens Axboe  *    off that queue when it is ready.  This works well for many block
7486db1e29SJens Axboe  *    devices. However some block devices (typically virtual devices
7586db1e29SJens Axboe  *    such as md or lvm) do not benefit from the processing on the
7686db1e29SJens Axboe  *    request queue, and are served best by having the requests passed
7786db1e29SJens Axboe  *    directly to them.  This can be achieved by providing a function
7886db1e29SJens Axboe  *    to blk_queue_make_request().
7986db1e29SJens Axboe  *
8086db1e29SJens Axboe  * Caveat:
8186db1e29SJens Axboe  *    The driver that does this *must* be able to deal appropriately
8286db1e29SJens Axboe  *    with buffers in "highmemory". This can be accomplished by either calling
8386db1e29SJens Axboe  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
8486db1e29SJens Axboe  *    blk_queue_bounce() to create a buffer in normal memory.
8586db1e29SJens Axboe  **/
8686db1e29SJens Axboe void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
8786db1e29SJens Axboe {
8886db1e29SJens Axboe 	/*
8986db1e29SJens Axboe 	 * set defaults
9086db1e29SJens Axboe 	 */
9186db1e29SJens Axboe 	q->nr_requests = BLKDEV_MAX_RQ;
9286db1e29SJens Axboe 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
9386db1e29SJens Axboe 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
9486db1e29SJens Axboe 	q->make_request_fn = mfn;
956728cb0eSJens Axboe 	q->backing_dev_info.ra_pages =
966728cb0eSJens Axboe 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
9786db1e29SJens Axboe 	q->backing_dev_info.state = 0;
9886db1e29SJens Axboe 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
9986db1e29SJens Axboe 	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
10086db1e29SJens Axboe 	blk_queue_hardsect_size(q, 512);
10186db1e29SJens Axboe 	blk_queue_dma_alignment(q, 511);
10286db1e29SJens Axboe 	blk_queue_congestion_threshold(q);
10386db1e29SJens Axboe 	q->nr_batching = BLK_BATCH_REQ;
10486db1e29SJens Axboe 
10586db1e29SJens Axboe 	q->unplug_thresh = 4;		/* hmm */
10686db1e29SJens Axboe 	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
10786db1e29SJens Axboe 	if (q->unplug_delay == 0)
10886db1e29SJens Axboe 		q->unplug_delay = 1;
10986db1e29SJens Axboe 
11086db1e29SJens Axboe 	INIT_WORK(&q->unplug_work, blk_unplug_work);
11186db1e29SJens Axboe 
11286db1e29SJens Axboe 	q->unplug_timer.function = blk_unplug_timeout;
11386db1e29SJens Axboe 	q->unplug_timer.data = (unsigned long)q;
11486db1e29SJens Axboe 
11586db1e29SJens Axboe 	/*
11686db1e29SJens Axboe 	 * by default assume old behaviour and bounce for any highmem page
11786db1e29SJens Axboe 	 */
11886db1e29SJens Axboe 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
11986db1e29SJens Axboe }
12086db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_make_request);
12186db1e29SJens Axboe 
12286db1e29SJens Axboe /**
12386db1e29SJens Axboe  * blk_queue_bounce_limit - set bounce buffer limit for queue
12486db1e29SJens Axboe  * @q:  the request queue for the device
12586db1e29SJens Axboe  * @dma_addr:   bus address limit
12686db1e29SJens Axboe  *
12786db1e29SJens Axboe  * Description:
12886db1e29SJens Axboe  *    Different hardware can have different requirements as to what pages
12986db1e29SJens Axboe  *    it can do I/O directly to. A low level driver can call
13086db1e29SJens Axboe  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
13186db1e29SJens Axboe  *    buffers for doing I/O to pages residing above @page.
13286db1e29SJens Axboe  **/
13386db1e29SJens Axboe void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
13486db1e29SJens Axboe {
1356728cb0eSJens Axboe 	unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
13686db1e29SJens Axboe 	int dma = 0;
13786db1e29SJens Axboe 
13886db1e29SJens Axboe 	q->bounce_gfp = GFP_NOIO;
13986db1e29SJens Axboe #if BITS_PER_LONG == 64
14086db1e29SJens Axboe 	/* Assume anything <= 4GB can be handled by IOMMU.
14186db1e29SJens Axboe 	   Actually some IOMMUs can handle everything, but I don't
14286db1e29SJens Axboe 	   know of a way to test this here. */
1436728cb0eSJens Axboe 	if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
14486db1e29SJens Axboe 		dma = 1;
14586db1e29SJens Axboe 	q->bounce_pfn = max_low_pfn;
14686db1e29SJens Axboe #else
1476728cb0eSJens Axboe 	if (b_pfn < blk_max_low_pfn)
14886db1e29SJens Axboe 		dma = 1;
1496728cb0eSJens Axboe 	q->bounce_pfn = b_pfn;
15086db1e29SJens Axboe #endif
15186db1e29SJens Axboe 	if (dma) {
15286db1e29SJens Axboe 		init_emergency_isa_pool();
15386db1e29SJens Axboe 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
1546728cb0eSJens Axboe 		q->bounce_pfn = b_pfn;
15586db1e29SJens Axboe 	}
15686db1e29SJens Axboe }
15786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_bounce_limit);
15886db1e29SJens Axboe 
15986db1e29SJens Axboe /**
16086db1e29SJens Axboe  * blk_queue_max_sectors - set max sectors for a request for this queue
16186db1e29SJens Axboe  * @q:  the request queue for the device
16286db1e29SJens Axboe  * @max_sectors:  max sectors in the usual 512b unit
16386db1e29SJens Axboe  *
16486db1e29SJens Axboe  * Description:
16586db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of
16686db1e29SJens Axboe  *    received requests.
16786db1e29SJens Axboe  **/
16886db1e29SJens Axboe void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
16986db1e29SJens Axboe {
17086db1e29SJens Axboe 	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
17186db1e29SJens Axboe 		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
1726728cb0eSJens Axboe 		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
1736728cb0eSJens Axboe 							max_sectors);
17486db1e29SJens Axboe 	}
17586db1e29SJens Axboe 
17686db1e29SJens Axboe 	if (BLK_DEF_MAX_SECTORS > max_sectors)
17786db1e29SJens Axboe 		q->max_hw_sectors = q->max_sectors = max_sectors;
17886db1e29SJens Axboe 	else {
17986db1e29SJens Axboe 		q->max_sectors = BLK_DEF_MAX_SECTORS;
18086db1e29SJens Axboe 		q->max_hw_sectors = max_sectors;
18186db1e29SJens Axboe 	}
18286db1e29SJens Axboe }
18386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_sectors);
18486db1e29SJens Axboe 
18586db1e29SJens Axboe /**
18686db1e29SJens Axboe  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
18786db1e29SJens Axboe  * @q:  the request queue for the device
18886db1e29SJens Axboe  * @max_segments:  max number of segments
18986db1e29SJens Axboe  *
19086db1e29SJens Axboe  * Description:
19186db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
19286db1e29SJens Axboe  *    physical data segments in a request.  This would be the largest sized
19386db1e29SJens Axboe  *    scatter list the driver could handle.
19486db1e29SJens Axboe  **/
19586db1e29SJens Axboe void blk_queue_max_phys_segments(struct request_queue *q,
19686db1e29SJens Axboe 				 unsigned short max_segments)
19786db1e29SJens Axboe {
19886db1e29SJens Axboe 	if (!max_segments) {
19986db1e29SJens Axboe 		max_segments = 1;
2006728cb0eSJens Axboe 		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
2016728cb0eSJens Axboe 							max_segments);
20286db1e29SJens Axboe 	}
20386db1e29SJens Axboe 
20486db1e29SJens Axboe 	q->max_phys_segments = max_segments;
20586db1e29SJens Axboe }
20686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_phys_segments);
20786db1e29SJens Axboe 
20886db1e29SJens Axboe /**
20986db1e29SJens Axboe  * blk_queue_max_hw_segments - set max hw segments for a request for this queue
21086db1e29SJens Axboe  * @q:  the request queue for the device
21186db1e29SJens Axboe  * @max_segments:  max number of segments
21286db1e29SJens Axboe  *
21386db1e29SJens Axboe  * Description:
21486db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the number of
21586db1e29SJens Axboe  *    hw data segments in a request.  This would be the largest number of
21686db1e29SJens Axboe  *    address/length pairs the host adapter can actually give as once
21786db1e29SJens Axboe  *    to the device.
21886db1e29SJens Axboe  **/
21986db1e29SJens Axboe void blk_queue_max_hw_segments(struct request_queue *q,
22086db1e29SJens Axboe 			       unsigned short max_segments)
22186db1e29SJens Axboe {
22286db1e29SJens Axboe 	if (!max_segments) {
22386db1e29SJens Axboe 		max_segments = 1;
2246728cb0eSJens Axboe 		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
2256728cb0eSJens Axboe 							max_segments);
22686db1e29SJens Axboe 	}
22786db1e29SJens Axboe 
22886db1e29SJens Axboe 	q->max_hw_segments = max_segments;
22986db1e29SJens Axboe }
23086db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_hw_segments);
23186db1e29SJens Axboe 
23286db1e29SJens Axboe /**
23386db1e29SJens Axboe  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
23486db1e29SJens Axboe  * @q:  the request queue for the device
23586db1e29SJens Axboe  * @max_size:  max size of segment in bytes
23686db1e29SJens Axboe  *
23786db1e29SJens Axboe  * Description:
23886db1e29SJens Axboe  *    Enables a low level driver to set an upper limit on the size of a
23986db1e29SJens Axboe  *    coalesced segment
24086db1e29SJens Axboe  **/
24186db1e29SJens Axboe void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
24286db1e29SJens Axboe {
24386db1e29SJens Axboe 	if (max_size < PAGE_CACHE_SIZE) {
24486db1e29SJens Axboe 		max_size = PAGE_CACHE_SIZE;
2456728cb0eSJens Axboe 		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
2466728cb0eSJens Axboe 							max_size);
24786db1e29SJens Axboe 	}
24886db1e29SJens Axboe 
24986db1e29SJens Axboe 	q->max_segment_size = max_size;
25086db1e29SJens Axboe }
25186db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_max_segment_size);
25286db1e29SJens Axboe 
25386db1e29SJens Axboe /**
25486db1e29SJens Axboe  * blk_queue_hardsect_size - set hardware sector size for the queue
25586db1e29SJens Axboe  * @q:  the request queue for the device
25686db1e29SJens Axboe  * @size:  the hardware sector size, in bytes
25786db1e29SJens Axboe  *
25886db1e29SJens Axboe  * Description:
25986db1e29SJens Axboe  *   This should typically be set to the lowest possible sector size
26086db1e29SJens Axboe  *   that the hardware can operate on (possible without reverting to
26186db1e29SJens Axboe  *   even internal read-modify-write operations). Usually the default
26286db1e29SJens Axboe  *   of 512 covers most hardware.
26386db1e29SJens Axboe  **/
26486db1e29SJens Axboe void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
26586db1e29SJens Axboe {
26686db1e29SJens Axboe 	q->hardsect_size = size;
26786db1e29SJens Axboe }
26886db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_hardsect_size);
26986db1e29SJens Axboe 
27086db1e29SJens Axboe /*
27186db1e29SJens Axboe  * Returns the minimum that is _not_ zero, unless both are zero.
27286db1e29SJens Axboe  */
27386db1e29SJens Axboe #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
27486db1e29SJens Axboe 
27586db1e29SJens Axboe /**
27686db1e29SJens Axboe  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
27786db1e29SJens Axboe  * @t:	the stacking driver (top)
27886db1e29SJens Axboe  * @b:  the underlying device (bottom)
27986db1e29SJens Axboe  **/
28086db1e29SJens Axboe void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
28186db1e29SJens Axboe {
28286db1e29SJens Axboe 	/* zero is "infinity" */
28386db1e29SJens Axboe 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
28486db1e29SJens Axboe 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
28586db1e29SJens Axboe 
28686db1e29SJens Axboe 	t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
28786db1e29SJens Axboe 	t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
28886db1e29SJens Axboe 	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
28986db1e29SJens Axboe 	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
29086db1e29SJens Axboe 	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
29186db1e29SJens Axboe 		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
29286db1e29SJens Axboe }
29386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_stack_limits);
29486db1e29SJens Axboe 
29586db1e29SJens Axboe /**
296*e3790c7dSTejun Heo  * blk_queue_dma_pad - set pad mask
297*e3790c7dSTejun Heo  * @q:     the request queue for the device
298*e3790c7dSTejun Heo  * @mask:  pad mask
299*e3790c7dSTejun Heo  *
300*e3790c7dSTejun Heo  * Set pad mask.  Direct IO requests are padded to the mask specified.
301*e3790c7dSTejun Heo  *
302*e3790c7dSTejun Heo  * Appending pad buffer to a request modifies ->data_len such that it
303*e3790c7dSTejun Heo  * includes the pad buffer.  The original requested data length can be
304*e3790c7dSTejun Heo  * obtained using blk_rq_raw_data_len().
305*e3790c7dSTejun Heo  **/
306*e3790c7dSTejun Heo void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
307*e3790c7dSTejun Heo {
308*e3790c7dSTejun Heo 	q->dma_pad_mask = mask;
309*e3790c7dSTejun Heo }
310*e3790c7dSTejun Heo EXPORT_SYMBOL(blk_queue_dma_pad);
311*e3790c7dSTejun Heo 
312*e3790c7dSTejun Heo /**
31386db1e29SJens Axboe  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
31486db1e29SJens Axboe  * @q:  the request queue for the device
3152fb98e84STejun Heo  * @dma_drain_needed: fn which returns non-zero if drain is necessary
31686db1e29SJens Axboe  * @buf:	physically contiguous buffer
31786db1e29SJens Axboe  * @size:	size of the buffer in bytes
31886db1e29SJens Axboe  *
31986db1e29SJens Axboe  * Some devices have excess DMA problems and can't simply discard (or
32086db1e29SJens Axboe  * zero fill) the unwanted piece of the transfer.  They have to have a
32186db1e29SJens Axboe  * real area of memory to transfer it into.  The use case for this is
32286db1e29SJens Axboe  * ATAPI devices in DMA mode.  If the packet command causes a transfer
32386db1e29SJens Axboe  * bigger than the transfer size some HBAs will lock up if there
32486db1e29SJens Axboe  * aren't DMA elements to contain the excess transfer.  What this API
32586db1e29SJens Axboe  * does is adjust the queue so that the buf is always appended
32686db1e29SJens Axboe  * silently to the scatterlist.
32786db1e29SJens Axboe  *
32886db1e29SJens Axboe  * Note: This routine adjusts max_hw_segments to make room for
32986db1e29SJens Axboe  * appending the drain buffer.  If you call
33086db1e29SJens Axboe  * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
33186db1e29SJens Axboe  * calling this routine, you must set the limit to one fewer than your
33286db1e29SJens Axboe  * device can support otherwise there won't be room for the drain
33386db1e29SJens Axboe  * buffer.
33486db1e29SJens Axboe  */
3352fb98e84STejun Heo extern int blk_queue_dma_drain(struct request_queue *q,
3362fb98e84STejun Heo 			       dma_drain_needed_fn *dma_drain_needed,
3372fb98e84STejun Heo 			       void *buf, unsigned int size)
33886db1e29SJens Axboe {
33986db1e29SJens Axboe 	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
34086db1e29SJens Axboe 		return -EINVAL;
34186db1e29SJens Axboe 	/* make room for appending the drain */
34286db1e29SJens Axboe 	--q->max_hw_segments;
34386db1e29SJens Axboe 	--q->max_phys_segments;
3442fb98e84STejun Heo 	q->dma_drain_needed = dma_drain_needed;
34586db1e29SJens Axboe 	q->dma_drain_buffer = buf;
34686db1e29SJens Axboe 	q->dma_drain_size = size;
34786db1e29SJens Axboe 
34886db1e29SJens Axboe 	return 0;
34986db1e29SJens Axboe }
35086db1e29SJens Axboe EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
35186db1e29SJens Axboe 
35286db1e29SJens Axboe /**
35386db1e29SJens Axboe  * blk_queue_segment_boundary - set boundary rules for segment merging
35486db1e29SJens Axboe  * @q:  the request queue for the device
35586db1e29SJens Axboe  * @mask:  the memory boundary mask
35686db1e29SJens Axboe  **/
35786db1e29SJens Axboe void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
35886db1e29SJens Axboe {
35986db1e29SJens Axboe 	if (mask < PAGE_CACHE_SIZE - 1) {
36086db1e29SJens Axboe 		mask = PAGE_CACHE_SIZE - 1;
3616728cb0eSJens Axboe 		printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
3626728cb0eSJens Axboe 							mask);
36386db1e29SJens Axboe 	}
36486db1e29SJens Axboe 
36586db1e29SJens Axboe 	q->seg_boundary_mask = mask;
36686db1e29SJens Axboe }
36786db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_segment_boundary);
36886db1e29SJens Axboe 
36986db1e29SJens Axboe /**
37086db1e29SJens Axboe  * blk_queue_dma_alignment - set dma length and memory alignment
37186db1e29SJens Axboe  * @q:     the request queue for the device
37286db1e29SJens Axboe  * @mask:  alignment mask
37386db1e29SJens Axboe  *
37486db1e29SJens Axboe  * description:
37586db1e29SJens Axboe  *    set required memory and length aligment for direct dma transactions.
37686db1e29SJens Axboe  *    this is used when buiding direct io requests for the queue.
37786db1e29SJens Axboe  *
37886db1e29SJens Axboe  **/
37986db1e29SJens Axboe void blk_queue_dma_alignment(struct request_queue *q, int mask)
38086db1e29SJens Axboe {
38186db1e29SJens Axboe 	q->dma_alignment = mask;
38286db1e29SJens Axboe }
38386db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_dma_alignment);
38486db1e29SJens Axboe 
38586db1e29SJens Axboe /**
38686db1e29SJens Axboe  * blk_queue_update_dma_alignment - update dma length and memory alignment
38786db1e29SJens Axboe  * @q:     the request queue for the device
38886db1e29SJens Axboe  * @mask:  alignment mask
38986db1e29SJens Axboe  *
39086db1e29SJens Axboe  * description:
39186db1e29SJens Axboe  *    update required memory and length aligment for direct dma transactions.
39286db1e29SJens Axboe  *    If the requested alignment is larger than the current alignment, then
39386db1e29SJens Axboe  *    the current queue alignment is updated to the new value, otherwise it
39486db1e29SJens Axboe  *    is left alone.  The design of this is to allow multiple objects
39586db1e29SJens Axboe  *    (driver, device, transport etc) to set their respective
39686db1e29SJens Axboe  *    alignments without having them interfere.
39786db1e29SJens Axboe  *
39886db1e29SJens Axboe  **/
39986db1e29SJens Axboe void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
40086db1e29SJens Axboe {
40186db1e29SJens Axboe 	BUG_ON(mask > PAGE_SIZE);
40286db1e29SJens Axboe 
40386db1e29SJens Axboe 	if (mask > q->dma_alignment)
40486db1e29SJens Axboe 		q->dma_alignment = mask;
40586db1e29SJens Axboe }
40686db1e29SJens Axboe EXPORT_SYMBOL(blk_queue_update_dma_alignment);
40786db1e29SJens Axboe 
40852ff4caeSAdrian Bunk static int __init blk_settings_init(void)
40986db1e29SJens Axboe {
41086db1e29SJens Axboe 	blk_max_low_pfn = max_low_pfn - 1;
41186db1e29SJens Axboe 	blk_max_pfn = max_pfn - 1;
41286db1e29SJens Axboe 	return 0;
41386db1e29SJens Axboe }
41486db1e29SJens Axboe subsys_initcall(blk_settings_init);
415