Lines Matching +full:layer +full:- +full:depth

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/backing-dev-defs.h>
16 #include <linux/dma-mapping.h>
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
24 q->rq_timeout = timeout; in blk_queue_rq_timeout()
29 * blk_set_default_limits - reset limits to default values
37 lim->max_segments = BLK_MAX_SEGMENTS; in blk_set_default_limits()
38 lim->max_discard_segments = 1; in blk_set_default_limits()
39 lim->max_integrity_segments = 0; in blk_set_default_limits()
40 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_default_limits()
41 lim->virt_boundary_mask = 0; in blk_set_default_limits()
42 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; in blk_set_default_limits()
43 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits()
44 lim->max_user_sectors = lim->max_dev_sectors = 0; in blk_set_default_limits()
45 lim->chunk_sectors = 0; in blk_set_default_limits()
46 lim->max_write_zeroes_sectors = 0; in blk_set_default_limits()
47 lim->max_zone_append_sectors = 0; in blk_set_default_limits()
48 lim->max_discard_sectors = 0; in blk_set_default_limits()
49 lim->max_hw_discard_sectors = 0; in blk_set_default_limits()
50 lim->max_secure_erase_sectors = 0; in blk_set_default_limits()
51 lim->discard_granularity = 512; in blk_set_default_limits()
52 lim->discard_alignment = 0; in blk_set_default_limits()
53 lim->discard_misaligned = 0; in blk_set_default_limits()
54 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; in blk_set_default_limits()
55 lim->bounce = BLK_BOUNCE_NONE; in blk_set_default_limits()
56 lim->alignment_offset = 0; in blk_set_default_limits()
57 lim->io_opt = 0; in blk_set_default_limits()
58 lim->misaligned = 0; in blk_set_default_limits()
59 lim->zoned = false; in blk_set_default_limits()
60 lim->zone_write_granularity = 0; in blk_set_default_limits()
61 lim->dma_alignment = 511; in blk_set_default_limits()
65 * blk_set_stacking_limits - set default limits for stacking devices
77 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
78 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
79 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits()
80 lim->max_segment_size = UINT_MAX; in blk_set_stacking_limits()
81 lim->max_sectors = UINT_MAX; in blk_set_stacking_limits()
82 lim->max_dev_sectors = UINT_MAX; in blk_set_stacking_limits()
83 lim->max_write_zeroes_sectors = UINT_MAX; in blk_set_stacking_limits()
84 lim->max_zone_append_sectors = UINT_MAX; in blk_set_stacking_limits()
89 * blk_queue_bounce_limit - set bounce buffer limit for queue
100 q->limits.bounce = bounce; in blk_queue_bounce_limit()
105 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
118 * max_sectors is a soft limit imposed by the block layer for
120 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
125 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
129 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
134 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
135 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
137 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
139 if (limits->max_user_sectors) in blk_queue_max_hw_sectors()
140 max_sectors = min(max_sectors, limits->max_user_sectors); in blk_queue_max_hw_sectors()
145 limits->logical_block_size >> SECTOR_SHIFT); in blk_queue_max_hw_sectors()
146 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
148 if (!q->disk) in blk_queue_max_hw_sectors()
150 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
155 * blk_queue_chunk_sectors - set size of the chunk for this queue
161 * this limit and prevent merging across chunks. Note that the block layer
168 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
173 * blk_queue_max_discard_sectors - set max sectors for a single discard
180 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
181 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
186 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
193 q->limits.max_secure_erase_sectors = max_sectors; in blk_queue_max_secure_erase_sectors()
198 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
206 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
211 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
223 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
224 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
233 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
238 * blk_queue_max_segments - set max hw segments for a request for this queue
253 q->limits.max_segments = max_segments; in blk_queue_max_segments()
258 * blk_queue_max_discard_segments - set max segments for discard requests
269 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
274 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
290 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
292 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
297 * blk_queue_logical_block_size - set logical block size for the queue
308 struct queue_limits *limits = &q->limits; in blk_queue_logical_block_size()
310 limits->logical_block_size = size; in blk_queue_logical_block_size()
312 if (limits->discard_granularity < limits->logical_block_size) in blk_queue_logical_block_size()
313 limits->discard_granularity = limits->logical_block_size; in blk_queue_logical_block_size()
315 if (limits->physical_block_size < size) in blk_queue_logical_block_size()
316 limits->physical_block_size = size; in blk_queue_logical_block_size()
318 if (limits->io_min < limits->physical_block_size) in blk_queue_logical_block_size()
319 limits->io_min = limits->physical_block_size; in blk_queue_logical_block_size()
321 limits->max_hw_sectors = in blk_queue_logical_block_size()
322 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
323 limits->max_sectors = in blk_queue_logical_block_size()
324 round_down(limits->max_sectors, size >> SECTOR_SHIFT); in blk_queue_logical_block_size()
329 * blk_queue_physical_block_size - set physical block size for the queue
335 * hardware can operate on without reverting to read-modify-write
340 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
342 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
343 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
345 if (q->limits.discard_granularity < q->limits.physical_block_size) in blk_queue_physical_block_size()
346 q->limits.discard_granularity = q->limits.physical_block_size; in blk_queue_physical_block_size()
348 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
349 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
354 * blk_queue_zone_write_granularity - set zone write granularity for the queue
368 q->limits.zone_write_granularity = size; in blk_queue_zone_write_granularity()
370 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
371 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
376 * blk_queue_alignment_offset - set physical block alignment offset
382 * the legacy DOS partition table 63-sector offset. Low-level drivers
388 q->limits.alignment_offset = in blk_queue_alignment_offset()
389 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
390 q->limits.misaligned = 0; in blk_queue_alignment_offset()
396 struct request_queue *q = disk->queue; in disk_update_readahead()
399 * For read-ahead of large files to be effective, we need to read ahead in disk_update_readahead()
402 disk->bdi->ra_pages = in disk_update_readahead()
404 disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9); in disk_update_readahead()
409 * blk_limits_io_min - set minimum request size for a device
421 limits->io_min = min; in blk_limits_io_min()
423 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
424 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
426 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
427 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
432 * blk_queue_io_min - set minimum request size for the queue
447 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
452 * blk_limits_io_opt - set optimal request size for a device
466 limits->io_opt = opt; in blk_limits_io_opt()
471 * blk_queue_io_opt - set optimal request size for the queue
485 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
486 if (!q->disk) in blk_queue_io_opt()
488 q->disk->bdi->ra_pages = in blk_queue_io_opt()
496 unsigned int granularity = max(lim->physical_block_size, lim->io_min); in queue_limit_alignment_offset()
500 return (granularity + lim->alignment_offset - alignment) % granularity; in queue_limit_alignment_offset()
508 if (!lim->max_discard_sectors) in queue_limit_discard_alignment()
512 alignment = lim->discard_alignment >> SECTOR_SHIFT; in queue_limit_discard_alignment()
513 granularity = lim->discard_granularity >> SECTOR_SHIFT; in queue_limit_discard_alignment()
521 offset = (granularity + alignment - offset) % granularity; in queue_limit_discard_alignment()
536 * blk_stack_limits - adjust queue_limits for stacked devices
552 * and alignments exist, -1 is returned and the resulting top
561 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
562 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
563 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
564 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits()
565 b->max_write_zeroes_sectors); in blk_stack_limits()
566 t->max_zone_append_sectors = min(t->max_zone_append_sectors, in blk_stack_limits()
567 b->max_zone_append_sectors); in blk_stack_limits()
568 t->bounce = max(t->bounce, b->bounce); in blk_stack_limits()
570 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
571 b->seg_boundary_mask); in blk_stack_limits()
572 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
573 b->virt_boundary_mask); in blk_stack_limits()
575 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
576 t->max_discard_segments = min_not_zero(t->max_discard_segments, in blk_stack_limits()
577 b->max_discard_segments); in blk_stack_limits()
578 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
579 b->max_integrity_segments); in blk_stack_limits()
581 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
582 b->max_segment_size); in blk_stack_limits()
584 t->misaligned |= b->misaligned; in blk_stack_limits()
591 if (t->alignment_offset != alignment) { in blk_stack_limits()
593 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
594 + t->alignment_offset; in blk_stack_limits()
595 bottom = max(b->physical_block_size, b->io_min) + alignment; in blk_stack_limits()
599 t->misaligned = 1; in blk_stack_limits()
600 ret = -1; in blk_stack_limits()
604 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
605 b->logical_block_size); in blk_stack_limits()
607 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
608 b->physical_block_size); in blk_stack_limits()
610 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
611 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
612 t->dma_alignment = max(t->dma_alignment, b->dma_alignment); in blk_stack_limits()
614 /* Set non-power-of-2 compatible chunk_sectors boundary */ in blk_stack_limits()
615 if (b->chunk_sectors) in blk_stack_limits()
616 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits()
619 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
620 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
621 t->misaligned = 1; in blk_stack_limits()
622 ret = -1; in blk_stack_limits()
626 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
627 t->io_min = t->physical_block_size; in blk_stack_limits()
628 t->misaligned = 1; in blk_stack_limits()
629 ret = -1; in blk_stack_limits()
633 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
634 t->io_opt = 0; in blk_stack_limits()
635 t->misaligned = 1; in blk_stack_limits()
636 ret = -1; in blk_stack_limits()
640 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits()
641 t->chunk_sectors = 0; in blk_stack_limits()
642 t->misaligned = 1; in blk_stack_limits()
643 ret = -1; in blk_stack_limits()
646 t->raid_partial_stripes_expensive = in blk_stack_limits()
647 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
648 b->raid_partial_stripes_expensive); in blk_stack_limits()
651 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
652 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
655 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
656 t->misaligned = 1; in blk_stack_limits()
657 ret = -1; in blk_stack_limits()
660 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); in blk_stack_limits()
661 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); in blk_stack_limits()
662 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); in blk_stack_limits()
665 if (b->discard_granularity) { in blk_stack_limits()
668 if (t->discard_granularity != 0 && in blk_stack_limits()
669 t->discard_alignment != alignment) { in blk_stack_limits()
670 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
671 bottom = b->discard_granularity + alignment; in blk_stack_limits()
675 t->discard_misaligned = 1; in blk_stack_limits()
678 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
679 b->max_discard_sectors); in blk_stack_limits()
680 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
681 b->max_hw_discard_sectors); in blk_stack_limits()
682 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
683 b->discard_granularity); in blk_stack_limits()
684 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
685 t->discard_granularity; in blk_stack_limits()
687 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, in blk_stack_limits()
688 b->max_secure_erase_sectors); in blk_stack_limits()
689 t->zone_write_granularity = max(t->zone_write_granularity, in blk_stack_limits()
690 b->zone_write_granularity); in blk_stack_limits()
691 t->zoned = max(t->zoned, b->zoned); in blk_stack_limits()
697 * disk_stack_limits - adjust queue limits for stacked drivers
709 struct request_queue *t = disk->queue; in disk_stack_limits()
711 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
714 disk->disk_name, bdev); in disk_stack_limits()
721 * blk_queue_update_dma_pad - update pad mask
732 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
733 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
738 * blk_queue_segment_boundary - set boundary rules for segment merging
744 if (mask < PAGE_SIZE - 1) { in blk_queue_segment_boundary()
745 mask = PAGE_SIZE - 1; in blk_queue_segment_boundary()
749 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
754 * blk_queue_virt_boundary - set boundary rules for bio merging
760 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
769 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
774 * blk_queue_dma_alignment - set dma length and memory alignment
785 q->limits.dma_alignment = mask; in blk_queue_dma_alignment()
790 * blk_queue_update_dma_alignment - update dma length and memory alignment
807 if (mask > q->limits.dma_alignment) in blk_queue_update_dma_alignment()
808 q->limits.dma_alignment = mask; in blk_queue_update_dma_alignment()
813 * blk_set_queue_depth - tell the block layer about the device queue depth
815 * @depth: queue depth
818 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth() argument
820 q->queue_depth = depth; in blk_set_queue_depth()
826 * blk_queue_write_cache - configure queue's write cache
831 * Tell the block layer about the write cache of @q.
850 * blk_queue_required_elevator_features - Set a queue required elevator features
854 * Tell the block layer that for the device controlled through @q, only the
861 q->required_elevator_features = features; in blk_queue_required_elevator_features()
866 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
870 * Tell the block layer about merging the segments by dma map of @q.
888 * disk_set_zoned - inidicate a zoned device
893 struct request_queue *q = disk->queue; in disk_set_zoned()
901 q->limits.zoned = true; in disk_set_zoned()
910 if (q->limits.misaligned) in bdev_alignment_offset()
911 return -1; in bdev_alignment_offset()
913 return queue_limit_alignment_offset(&q->limits, in bdev_alignment_offset()
914 bdev->bd_start_sect); in bdev_alignment_offset()
915 return q->limits.alignment_offset; in bdev_alignment_offset()
924 return queue_limit_discard_alignment(&q->limits, in bdev_discard_alignment()
925 bdev->bd_start_sect); in bdev_discard_alignment()
926 return q->limits.discard_alignment; in bdev_discard_alignment()