Lines Matching full:limits
346 * Flags automatically inherited when stacking limits.
394 /* atomic write limits */
508 struct queue_limits limits; member
585 * Protects queue limits and also sysfs attribute read_ahead_kb.
660 #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
661 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
663 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
664 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
665 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
681 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
712 (q->limits.features & BLK_FEAT_ZONED); in blk_queue_is_zoned()
719 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no()
724 return bdev->bd_disk->queue->limits.max_open_zones; in bdev_max_open_zones()
729 return bdev->bd_disk->queue->limits.max_active_zones; in bdev_max_active_zones()
846 sector_t zone_sectors = disk->queue->limits.chunk_sectors; in disk_zone_capacity()
881 * @lim: queue limits to be used for this disk.
982 * queue_limits_start_update - start an atomic update of queue limits
985 * This functions starts an atomic update of the queue limits. It takes a lock
986 * to prevent other updates and returns a snapshot of the current limits that
996 return q->limits; in queue_limits_start_update()
1006 * queue_limits_cancel_update - cancel an atomic update of queue limits
1009 * This functions cancels an atomic update of the queue limits started by
1027 q->limits.max_discard_sectors = 0; in blk_queue_disable_discard()
1032 q->limits.max_secure_erase_sectors = 0; in blk_queue_disable_secure_erase()
1037 q->limits.max_write_zeroes_sectors = 0; in blk_queue_disable_write_zeroes()
1221 * controlled by the driver, usually based on hardware limits.
1227 return &bdev_get_queue(bdev)->limits; in bdev_limits()
1232 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1237 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1242 return q->limits.max_sectors; in queue_max_sectors()
1252 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1257 return q->limits.max_segments; in queue_max_segments()
1262 return q->limits.max_discard_segments; in queue_max_discard_segments()
1267 return q->limits.max_segment_size; in queue_max_segment_size()
1272 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; in queue_emulates_zone_append()
1293 return q->limits.logical_block_size; in queue_logical_block_size()
1303 return q->limits.physical_block_size; in queue_physical_block_size()
1313 return q->limits.io_min; in queue_io_min()
1323 return q->limits.io_opt; in queue_io_opt()
1334 return q->limits.zone_write_granularity; in queue_zone_write_granularity()
1374 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; in bdev_synchronous()
1382 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) in bdev_stable_writes()
1384 return q->limits.features & BLK_FEAT_STABLE_WRITES; in bdev_stable_writes()
1389 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && in blk_queue_write_cache()
1390 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); in blk_queue_write_cache()
1405 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; in bdev_nowait()
1424 return q->limits.chunk_sectors; in bdev_zone_sectors()
1477 return q->limits.dma_alignment; in queue_dma_alignment()
1483 return q->limits.atomic_write_unit_max; in queue_atomic_write_unit_max_bytes()
1489 return q->limits.atomic_write_unit_min; in queue_atomic_write_unit_min_bytes()
1495 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; in queue_atomic_write_boundary_bytes()
1501 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; in queue_atomic_write_max_bytes()
1525 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); in blk_rq_aligned()
1750 struct queue_limits *limits) in blk_atomic_write_start_sect_aligned() argument
1752 unsigned int alignment = max(limits->atomic_write_hw_unit_min, in blk_atomic_write_start_sect_aligned()
1753 limits->atomic_write_hw_boundary); in blk_atomic_write_start_sect_aligned()
1761 struct queue_limits *limits = &bd_queue->limits; in bdev_can_atomic_write() local
1763 if (!limits->atomic_write_unit_min) in bdev_can_atomic_write()
1768 limits); in bdev_can_atomic_write()