Home
last modified time | relevance | path

Searched refs:lim (Results 1 – 25 of 202) sorted by relevance

123456789

/linux/block/
H A Dblk-settings.c37 void blk_set_stacking_limits(struct queue_limits *lim) in blk_set_stacking_limits() argument
39 memset(lim, 0, sizeof(*lim)); in blk_set_stacking_limits()
40 lim->logical_block_size = SECTOR_SIZE; in blk_set_stacking_limits()
41 lim->physical_block_size = SECTOR_SIZE; in blk_set_stacking_limits()
42 lim->io_min = SECTOR_SIZE; in blk_set_stacking_limits()
43 lim->discard_granularity = SECTOR_SIZE; in blk_set_stacking_limits()
44 lim->dma_alignment = SECTOR_SIZE - 1; in blk_set_stacking_limits()
45 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_stacking_limits()
48 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
49 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
[all …]
H A Dblk-merge.c102 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) in bio_allowed_max_sectors() argument
104 return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT; in bio_allowed_max_sectors()
161 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, in bio_split_discard() argument
170 granularity = max(lim->discard_granularity >> 9, 1U); in bio_split_discard()
173 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); in bio_split_discard()
188 ((lim->discard_alignment >> 9) % granularity); in bio_split_discard()
197 static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim, in blk_boundary_sectors() argument
204 if (is_atomic && lim->atomic_write_boundary_sectors) in blk_boundary_sectors()
205 return lim->atomic_write_boundary_sectors; in blk_boundary_sectors()
207 return lim->chunk_sectors; in blk_boundary_sectors()
[all …]
H A Dblk.h143 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, in __bvec_gap_to_prev() argument
146 return (offset & lim->virt_boundary_mask) || in __bvec_gap_to_prev()
147 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev()
154 static inline bool bvec_gap_to_prev(const struct queue_limits *lim, in bvec_gap_to_prev() argument
157 if (!lim->virt_boundary_mask) in bvec_gap_to_prev()
159 return __bvec_gap_to_prev(lim, bprv, offset); in bvec_gap_to_prev()
355 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
358 const struct queue_limits *lim, unsigned *nsegs);
359 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
362 const struct queue_limits *lim, unsigned *nr_segs);
[all …]
H A Dblk-sysfs.c30 size_t count, struct queue_limits *lim);
229 const char *page, size_t count, struct queue_limits *lim)
244 lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
249 const char *page, size_t count, struct queue_limits *lim) in queue_max_wzeroes_unmap_sectors_store() argument
258 max_hw_zeroes_bytes = lim->max_hw_wzeroes_unmap_sectors << SECTOR_SHIFT; in queue_max_wzeroes_unmap_sectors_store()
262 lim->max_user_wzeroes_unmap_sectors = max_zeroes_bytes >> SECTOR_SHIFT; in queue_max_wzeroes_unmap_sectors_store()
268 struct queue_limits *lim) in queue_max_sectors_store() argument
277 lim->max_user_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
282 size_t count, struct queue_limits *lim, blk_features_t feature) in queue_feature_store() argument
292 lim->features |= feature; in queue_feature_store()
[all …]
/linux/drivers/md/
H A Ddm-zone.c266 struct queue_limits *lim; member
319 zlim->lim->max_active_zones = in device_get_zone_resource_limits()
320 min_not_zero(max_active_zones, zlim->lim->max_active_zones); in device_get_zone_resource_limits()
325 zlim->lim->max_open_zones = in device_get_zone_resource_limits()
326 min_not_zero(max_open_zones, zlim->lim->max_open_zones); in device_get_zone_resource_limits()
339 struct queue_limits *lim) in dm_set_zones_restrictions() argument
345 .lim = lim, in dm_set_zones_restrictions()
356 lim->max_hw_zone_append_sectors = 0; in dm_set_zones_restrictions()
357 else if (lim->max_hw_zone_append_sectors == 0) in dm_set_zones_restrictions()
358 lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors; in dm_set_zones_restrictions()
[all …]
H A Dmd-linear.c70 struct queue_limits lim; in linear_set_limits() local
73 md_init_stacking_limits(&lim); in linear_set_limits()
74 lim.max_hw_sectors = mddev->chunk_sectors; in linear_set_limits()
75 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in linear_set_limits()
76 lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; in linear_set_limits()
77 lim.io_min = mddev->chunk_sectors << 9; in linear_set_limits()
78 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); in linear_set_limits()
82 return queue_limits_set(mddev->gendisk->queue, &lim); in linear_set_limits()
H A Draid0.c379 struct queue_limits lim; in raid0_set_limits() local
382 md_init_stacking_limits(&lim); in raid0_set_limits()
383 lim.max_hw_sectors = mddev->chunk_sectors; in raid0_set_limits()
384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in raid0_set_limits()
385 lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; in raid0_set_limits()
386 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits()
387 lim.io_opt = lim.io_min * mddev->raid_disks; in raid0_set_limits()
388 lim.chunk_sectors = mddev->chunk_sectors; in raid0_set_limits()
389 lim.features |= BLK_FEAT_ATOMIC_WRITES; in raid0_set_limits()
390 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); in raid0_set_limits()
[all …]
/linux/drivers/media/i2c/
H A Dccs-pll.c157 const struct ccs_pll_limits *lim, in check_fr_bounds() argument
166 lim_fr = &lim->op_fr; in check_fr_bounds()
169 lim_fr = &lim->vt_fr; in check_fr_bounds()
197 const struct ccs_pll_limits *lim, in check_bk_bounds() argument
209 lim_bk = &lim->op_bk; in check_bk_bounds()
212 lim_bk = &lim->vt_bk; in check_bk_bounds()
256 ccs_pll_find_vt_sys_div(struct device *dev, const struct ccs_pll_limits *lim, in ccs_pll_find_vt_sys_div() argument
265 *min_sys_div = lim->vt_bk.min_sys_clk_div; in ccs_pll_find_vt_sys_div()
269 lim->vt_bk.max_pix_clk_div)); in ccs_pll_find_vt_sys_div()
273 / lim->vt_bk.max_sys_clk_freq_hz); in ccs_pll_find_vt_sys_div()
[all …]
/linux/drivers/mmc/core/
H A Dqueue.c178 struct queue_limits *lim) in mmc_queue_setup_discard() argument
186 lim->max_hw_discard_sectors = max_discard; in mmc_queue_setup_discard()
188 lim->max_secure_erase_sectors = max_discard; in mmc_queue_setup_discard()
190 lim->max_write_zeroes_sectors = max_discard; in mmc_queue_setup_discard()
194 lim->discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard()
196 lim->discard_granularity = card->pref_erase << 9; in mmc_queue_setup_discard()
350 struct queue_limits lim = { in mmc_alloc_disk() local
356 mmc_queue_setup_discard(card, &lim); in mmc_alloc_disk()
358 lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); in mmc_alloc_disk()
361 lim.logical_block_size = card->ext_csd.data_sector_size; in mmc_alloc_disk()
[all …]
/linux/lib/
H A Dbitmap.c40 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_equal() local
41 for (k = 0; k < lim; ++k) in __bitmap_equal()
58 unsigned int k, lim = bits / BITS_PER_LONG; in __bitmap_or_equal() local
61 for (k = 0; k < lim; ++k) { in __bitmap_or_equal()
75 unsigned int k, lim = BITS_TO_LONGS(bits); in __bitmap_complement() local
76 for (k = 0; k < lim; ++k) in __bitmap_complement()
95 unsigned k, lim = BITS_TO_LONGS(nbits); in __bitmap_shift_right() local
98 for (k = 0; off + k < lim; ++k) { in __bitmap_shift_right()
105 if (!rem || off + k + 1 >= lim) in __bitmap_shift_right()
109 if (off + k + 1 == lim - 1) in __bitmap_shift_right()
[all …]
/linux/tools/lib/
H A Dbitmap.c10 unsigned int k, w = 0, lim = bits/BITS_PER_LONG; in __bitmap_weight() local
12 for (k = 0; k < lim; k++) in __bitmap_weight()
64 unsigned int lim = bits/BITS_PER_LONG; in __bitmap_and() local
67 for (k = 0; k < lim; k++) in __bitmap_and()
78 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_equal() local
79 for (k = 0; k < lim; ++k) in __bitmap_equal()
93 unsigned int k, lim = bits/BITS_PER_LONG; in __bitmap_intersects() local
94 for (k = 0; k < lim; ++k) in __bitmap_intersects()
/linux/drivers/block/
H A Dvirtio_blk.c718 struct queue_limits *lim) in virtblk_read_zoned_limits() argument
725 lim->features |= BLK_FEAT_ZONED; in virtblk_read_zoned_limits()
729 lim->max_open_zones = v; in virtblk_read_zoned_limits()
734 lim->max_active_zones = v; in virtblk_read_zoned_limits()
743 lim->physical_block_size = wg; in virtblk_read_zoned_limits()
744 lim->io_min = wg; in virtblk_read_zoned_limits()
760 lim->chunk_sectors = vblk->zone_sectors; in virtblk_read_zoned_limits()
766 lim->max_hw_discard_sectors = 0; in virtblk_read_zoned_limits()
781 lim->max_hw_zone_append_sectors = v; in virtblk_read_zoned_limits()
793 struct queue_limits *lim) in virtblk_read_zoned_limits() argument
[all …]
H A Dloop.c226 struct queue_limits lim = queue_limits_start_update(lo->lo_queue); in loop_clear_limits() local
229 lim.max_write_zeroes_sectors = 0; in loop_clear_limits()
232 lim.max_hw_discard_sectors = 0; in loop_clear_limits()
233 lim.discard_granularity = 0; in loop_clear_limits()
243 queue_limits_commit_update(lo->lo_queue, &lim); in loop_clear_limits()
949 static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim, in loop_update_limits() argument
967 lim->logical_block_size = bsize; in loop_update_limits()
968 lim->physical_block_size = bsize; in loop_update_limits()
969 lim->io_min = bsize; in loop_update_limits()
970 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL); in loop_update_limits()
[all …]
/linux/tools/include/nolibc/
H A Dstdlib.h240 unsigned long lim; in utoa_r() local
246 for (dig = 0, lim = 1; dig < pos; dig++) in utoa_r()
247 lim *= 10; in utoa_r()
249 if (digits || in >= lim || !pos) { in utoa_r()
250 for (dig = 0; in >= lim; dig++) in utoa_r()
251 in -= lim; in utoa_r()
376 unsigned long long lim; in u64toa_r() local
382 for (dig = 0, lim = 1; dig < pos; dig++) in u64toa_r()
383 lim *= 10; in u64toa_r()
385 if (digits || in >= lim || !pos) { in u64toa_r()
[all …]
/linux/drivers/usb/storage/
H A Dscsiglue.c91 static int sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) in sdev_configure() argument
106 lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors); in sdev_configure()
113 lim->max_hw_sectors = 0x7FFFFF; in sdev_configure()
119 lim->max_hw_sectors = 2048; in sdev_configure()
126 lim->max_hw_sectors = min_t(size_t, in sdev_configure()
127 lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT); in sdev_configure()
588 struct queue_limits lim; in max_sectors_store() local
595 lim = queue_limits_start_update(sdev->request_queue); in max_sectors_store()
596 lim.max_hw_sectors = ms; in max_sectors_store()
597 ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim); in max_sectors_store()
/linux/arch/um/os-Linux/
H A Dmain.c28 struct rlimit lim; in set_stklim() local
30 if (getrlimit(RLIMIT_STACK, &lim) < 0) { in set_stklim()
34 if ((lim.rlim_cur == RLIM_INFINITY) || (lim.rlim_cur > STACKSIZE)) { in set_stklim()
35 lim.rlim_cur = STACKSIZE; in set_stklim()
36 if (setrlimit(RLIMIT_STACK, &lim) < 0) { in set_stklim()
/linux/drivers/pps/generators/
H A Dpps_gen_parport.c60 long lim, delta; in hrtimer_event() local
78 lim = NSEC_PER_SEC - send_delay - dev->port_write_time; in hrtimer_event()
81 if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { in hrtimer_event()
91 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event()
98 lim = NSEC_PER_SEC - dev->port_write_time; in hrtimer_event()
101 } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); in hrtimer_event()
/linux/drivers/scsi/
H A Dsd.c105 static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
108 struct queue_limits *lim);
125 struct queue_limits *lim) in sd_set_flush_flag() argument
128 lim->features |= BLK_FEAT_WRITE_CACHE; in sd_set_flush_flag()
130 lim->features |= BLK_FEAT_FUA; in sd_set_flush_flag()
132 lim->features &= ~BLK_FEAT_FUA; in sd_set_flush_flag()
134 lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); in sd_set_flush_flag()
173 struct queue_limits lim; in cache_type_store() local
178 lim = queue_limits_start_update(sdkp->disk->queue); in cache_type_store()
179 sd_set_flush_flag(sdkp, &lim); in cache_type_store()
[all …]
H A Dsun3x_esp.c87 int lim; in sun3x_esp_dma_drain() local
95 lim = 1000; in sun3x_esp_dma_drain()
97 if (--lim == 0) { in sun3x_esp_dma_drain()
109 int lim; in sun3x_esp_dma_invalidate() local
111 lim = 1000; in sun3x_esp_dma_invalidate()
113 if (--lim == 0) { in sun3x_esp_dma_invalidate()
H A Dsun_esp.c225 int can_do_sbus64, lim; in sbus_esp_reset_dma() local
265 lim = 1000; in sbus_esp_reset_dma()
267 if (--lim == 0) { in sbus_esp_reset_dma()
324 int lim; in sbus_esp_dma_drain() local
336 lim = 1000; in sbus_esp_dma_drain()
338 if (--lim == 0) { in sbus_esp_dma_drain()
366 int lim; in sbus_esp_dma_invalidate() local
368 lim = 1000; in sbus_esp_dma_invalidate()
370 if (--lim == 0) { in sbus_esp_dma_invalidate()
/linux/rust/kernel/block/mq/
H A Dgen_disk.rs111 let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() }; in build() localVariable
113 lim.logical_block_size = self.logical_block_size; in build()
114 lim.physical_block_size = self.physical_block_size; in build()
116 lim.features = bindings::BLK_FEAT_ROTATIONAL; in build()
123 &mut lim, in build()
/linux/drivers/edac/
H A Damd64_edac.h136 #define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
137 #define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
289 struct reg_pair lim; member
422 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; in get_dram_limit() local
425 return lim; in get_dram_limit()
427 return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; in get_dram_limit()
/linux/drivers/nvme/host/
H A Dcore.c1820 struct queue_limits *lim, struct nvme_ns_info *info) in nvme_init_integrity() argument
1822 struct blk_integrity *bi = &lim->integrity; in nvme_init_integrity()
1885 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) in nvme_config_discard() argument
1890 lim->max_hw_discard_sectors = in nvme_config_discard()
1893 lim->max_hw_discard_sectors = UINT_MAX; in nvme_config_discard()
1895 lim->max_hw_discard_sectors = 0; in nvme_config_discard()
1897 lim->discard_granularity = lim->logical_block_size; in nvme_config_discard()
1900 lim->max_discard_segments = ctrl->dmrl; in nvme_config_discard()
1902 lim->max_discard_segments = NVME_DSM_MAX_RANGES; in nvme_config_discard()
2030 struct nvme_id_ns *id, struct queue_limits *lim, u32 bs) in nvme_configure_atomic_write() argument
[all …]
H A Dzns.c108 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, in nvme_update_zone_info() argument
111 lim->features |= BLK_FEAT_ZONED; in nvme_update_zone_info()
112 lim->max_open_zones = zi->max_open_zones; in nvme_update_zone_info()
113 lim->max_active_zones = zi->max_active_zones; in nvme_update_zone_info()
114 lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info()
115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
/linux/drivers/regulator/
H A Dqcom-labibb-regulator.c310 static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim, in qcom_labibb_set_ocp() argument
322 if (lim || severity != REGULATOR_SEVERITY_PROT || !enable) in qcom_labibb_set_ocp()
565 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_set_current_limit() local
569 if (min_uA < lim->uA_min || max_uA < lim->uA_min) in qcom_labibb_set_current_limit()
573 int uA_limit = (lim->uA_step * i) + lim->uA_min; in qcom_labibb_set_current_limit()
587 mask = desc->csel_mask | lim->ovr_val; in qcom_labibb_set_current_limit()
589 val = (u32)sel | lim->ovr_val; in qcom_labibb_set_current_limit()
599 struct labibb_current_limits *lim = &vreg->uA_limits; in qcom_labibb_get_current_limit() local
608 return (cur_step * lim->uA_step) + lim->uA_min; in qcom_labibb_get_current_limit()

123456789