Home
last modified time | relevance | path

Searched refs:sectors (Results 1 – 25 of 250) sorted by relevance

12345678910

/linux/block/
H A Dbadblocks.c549 * (from bad table) indexed by 'prev'. The return value is sectors
554 sector_t sectors = bad->len; in front_merge() local
562 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge()
564 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge()
839 static bool _badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors, in _badblocks_set() argument
852 if (sectors == 0) in _badblocks_set()
853 /* Invalid sectors number */ in _badblocks_set()
858 sector_t next = s + sectors; in _badblocks_set()
862 sectors = next - s; in _badblocks_set()
872 bad.len = sectors; in _badblocks_set()
991 sector_t sectors = bad->len; front_clear() local
1037 sector_t sectors = bad->len; front_splitting_clear() local
1049 _badblocks_clear(struct badblocks * bb,sector_t s,sector_t sectors) _badblocks_clear() argument
1186 _badblocks_check(struct badblocks * bb,sector_t s,sector_t sectors,sector_t * first_bad,sector_t * bad_sectors) _badblocks_check() argument
1298 badblocks_check(struct badblocks * bb,sector_t s,sector_t sectors,sector_t * first_bad,sector_t * bad_sectors) badblocks_check() argument
1341 badblocks_set(struct badblocks * bb,sector_t s,sector_t sectors,int acknowledged) badblocks_set() argument
1362 badblocks_clear(struct badblocks * bb,sector_t s,sector_t sectors) badblocks_clear() argument
[all...]
/linux/fs/bcachefs/
H A Dfs-io.h58 u64 sectors; member
67 BUG_ON(res->sectors > inode->ei_quota_reserved); in __bch2_quota_reservation_put()
70 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC); in __bch2_quota_reservation_put()
71 inode->ei_quota_reserved -= res->sectors; in __bch2_quota_reservation_put()
72 res->sectors = 0; in __bch2_quota_reservation_put()
79 if (res->sectors) { in bch2_quota_reservation_put()
89 u64 sectors, in bch2_quota_reservation_add() argument
98 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, in bch2_quota_reservation_add()
101 inode->ei_quota_reserved += sectors; in bch2_quota_reservation_add()
102 res->sectors in bch2_quota_reservation_add()
122 bch2_quota_reservation_add(struct bch_fs * c,struct bch_inode_info * inode,struct quota_res * res,unsigned sectors,bool check_enospc) bch2_quota_reservation_add() argument
134 bch2_i_sectors_acct(struct bch_fs * c,struct bch_inode_info * inode,struct quota_res * quota_res,s64 sectors) bch2_i_sectors_acct() argument
[all...]
H A Dbuckets.h125 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) in ptr_disk_sectors() argument
127 EBUG_ON(sectors < 0); in ptr_disk_sectors()
130 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, in ptr_disk_sectors()
132 : sectors; in ptr_disk_sectors()
296 if (res->sectors) { in bch2_disk_reservation_put()
297 this_cpu_sub(*c->online_reserved, res->sectors); in bch2_disk_reservation_put()
298 res->sectors = 0; in bch2_disk_reservation_put()
311 u64 sectors, enum bch_reservation_flags flags) in bch2_disk_reservation_add() argument
318 if (sectors > old) in bch2_disk_reservation_add()
319 return __bch2_disk_reservation_add(c, res, sectors, flag in bch2_disk_reservation_add()
347 bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags) bch2_disk_reservation_get() argument
[all...]
H A Dbuckets.c102 usage->d[i].sectors, in bch2_dev_usage_to_text()
362 m->sectors)) { in bch2_check_fix_ptrs()
457 s64 sectors, enum bch_data_type ptr_data_type, in bch2_bucket_ref_update() argument
464 bool inserting = sectors > 0; in bch2_bucket_ref_update()
467 BUG_ON(!sectors); in bch2_bucket_ref_update()
526 if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) { in bch2_bucket_ref_update()
532 *bucket_sectors, sectors); in bch2_bucket_ref_update()
536 sectors = -*bucket_sectors; in bch2_bucket_ref_update()
540 *bucket_sectors += sectors; in bch2_bucket_ref_update()
549 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors in bch2_trans_account_disk_usage_change()
598 __mark_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct extent_ptr_decoded * p,s64 sectors,enum bch_data_type ptr_data_type,struct bch_alloc_v4 * a,bool insert) __mark_pointer() argument
619 bch2_trigger_pointer(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,s64 * sectors,enum btree_iter_update_trigger_flags flags) bch2_trigger_pointer() argument
688 bch2_trigger_stripe_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,enum bch_data_type data_type,s64 sectors,enum btree_iter_update_trigger_flags flags) bch2_trigger_stripe_ptr() argument
962 s64 sectors[1] = { k.k->size }; __trigger_reservation() local
987 __bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors) __bch2_trans_mark_metadata_bucket() argument
1033 bch2_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type data_type,unsigned sectors,enum btree_iter_update_trigger_flags flags) bch2_mark_metadata_bucket() argument
1075 bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors,enum btree_iter_update_trigger_flags flags) bch2_trans_mark_metadata_bucket() argument
1104 unsigned sectors = bch2_trans_mark_metadata_sectors() local
1231 __bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags) __bch2_disk_reservation_add() argument
[all...]
H A Dfs-io-pagecache.c160 unsigned i, sectors = folio_sectors(folio); in __bch2_folio_set() local
162 BUG_ON(pg_offset >= sectors); in __bch2_folio_set()
163 BUG_ON(pg_offset + pg_len > sectors); in __bch2_folio_set()
172 if (i == sectors) in __bch2_folio_set()
367 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0; in bch2_get_folio_disk_reservation() local
373 for (i = 0; i < sectors; i++) in bch2_get_folio_disk_reservation()
387 for (i = 0; i < sectors; i++) in bch2_get_folio_disk_reservation()
433 if (unlikely(disk_res.sectors != disk_sectors)) { in __bch2_folio_reservation_get()
440 if (disk_sectors > disk_res.sectors) { in __bch2_folio_reservation_get()
467 res->disk.sectors in __bch2_folio_reservation_get()
495 int i, sectors = folio_sectors(folio), dirty_sectors = 0; bch2_clear_folio_bits() local
537 unsigned sectors = sectors_to_reserve(&s->s[i], bch2_set_folio_dirty() local
682 unsigned i, sectors = folio_sectors(folio); folio_data_offset() local
753 unsigned i, sectors; folio_hole_offset() local
[all...]
H A Dmovinggc.c35 size_t sectors; member
56 list->sectors += b->sectors; in move_bucket_in_flight_add()
88 b->sectors = bch2_bucket_sectors_dirty(*a); in bch2_bucket_is_movable()
125 list->sectors -= i->sectors; in move_buckets_wait()
145 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0; in bch2_copygc_get_buckets() local
192 sectors += b.sectors; in bch2_copygc_get_buckets()
201 buckets_in_flight->nr, buckets_in_flight->sectors, in bch2_copygc_get_buckets()
[all...]
H A Dfs-io.c148 struct quota_res *quota_res, s64 sectors) in __bch2_i_sectors_acct() argument
150 if (unlikely((s64) inode->v.i_blocks + sectors < 0)) { in __bch2_i_sectors_acct()
154 inode->v.i_ino, (u64) inode->v.i_blocks, sectors, in __bch2_i_sectors_acct()
162 if (sectors < 0) in __bch2_i_sectors_acct()
163 sectors = -inode->v.i_blocks; in __bch2_i_sectors_acct()
165 sectors = 0; in __bch2_i_sectors_acct()
168 inode->v.i_blocks += sectors; in __bch2_i_sectors_acct()
173 sectors > 0) { in __bch2_i_sectors_acct()
174 BUG_ON(sectors > quota_res->sectors); in __bch2_i_sectors_acct()
650 unsigned sectors; __bchfs_fallocate() local
862 u64 sectors = end - start; quota_reserve_range() local
[all...]
H A Dalloc_foreground.h254 * Append pointers to the space we just allocated to @k, and mark @sectors space
259 struct bkey_i *k, unsigned sectors, in bch2_alloc_sectors_append_ptrs_inlined() argument
265 BUG_ON(sectors > wp->sectors_free); in bch2_alloc_sectors_append_ptrs_inlined()
266 wp->sectors_free -= sectors; in bch2_alloc_sectors_append_ptrs_inlined()
267 wp->sectors_allocated += sectors; in bch2_alloc_sectors_append_ptrs_inlined()
279 BUG_ON(sectors > ob->sectors_free); in bch2_alloc_sectors_append_ptrs_inlined()
280 ob->sectors_free -= sectors; in bch2_alloc_sectors_append_ptrs_inlined()
H A Dec.c152 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ", in bch2_stripe_to_text()
154 le16_to_cpu(s.sectors), in bch2_stripe_to_text()
199 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; in __mark_stripe_bucket() local
205 sectors = -sectors; in __mark_stripe_bucket()
252 (a->dirty_sectors != -sectors || in __mark_stripe_bucket()
254 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s", in __mark_stripe_bucket()
264 if (sectors) { in __mark_stripe_bucket()
265 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_typ in __mark_stripe_bucket()
456 s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant; bch2_trigger_stripe() local
472 s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant; bch2_trigger_stripe() local
1063 unsigned sectors = stripe_blockcount_get(v, i); ec_stripe_key_update() local
2128 s64 sectors = 0; bch2_invalidate_stripe_to_dev() local
[all...]
/linux/Documentation/admin-guide/device-mapper/
H A Ddm-ebs.rst8 size. Its main purpose is to provide emulation of 512 byte sectors on
18 <dev path> <offset> <emulated sectors> [<underlying sectors>]
27 has to be a multiple of <emulated sectors>.
28 <emulated sectors>:
29 Number of sectors defining the logical block size to be emulated;
30 1, 2, 4, 8 sectors of 512 bytes supported.
34 <underlying sectors>:
35 Number of sectors defining the logical block size of <dev path>.
36 2^N supported, e.g. 8 = emulate 8 sectors o
[all...]
H A Ddm-integrity.rst68 dm-integrity won't read of write these sectors
77 not used and data sectors and integrity tags are written
96 data directly in the underlying device sectors.
111 The number of interleaved sectors. This values is rounded down to
120 The number of sectors in one metadata buffer. The value is rounded
183 512-byte sectors that corresponds to one bitmap bit.
202 copy sectors from one journal section to another journal section
206 key and also to disallow the attacker to move sectors from one
224 256 sectors of metadata per data area. With the default buffer_sectors of
231 2. provided data sectors
[all...]
/linux/drivers/target/
H A Dtarget_core_sbc.c216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument
218 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size()
279 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local
288 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same()
289 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", in sbc_setup_write_same()
290 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same()
296 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same()
297 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same()
298 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", in sbc_setup_write_same()
299 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same()
668 sbc_check_prot(struct se_device * dev,struct se_cmd * cmd,unsigned char protect,u32 sectors,bool is_write) sbc_check_prot() argument
773 u32 sectors = 0; sbc_parse_cdb() local
1270 sbc_dif_copy_prot(struct se_cmd * cmd,unsigned int sectors,bool read,struct scatterlist * sg,int sg_off) sbc_dif_copy_prot() argument
1316 sbc_dif_verify(struct se_cmd * cmd,sector_t start,unsigned int sectors,unsigned int ei_lba,struct scatterlist * psg,int psg_off) sbc_dif_verify() argument
[all...]
/linux/drivers/md/
H A Draid0.c66 sector_t curr_zone_end, sectors; in create_strip_zones() local
83 sectors = rdev1->sectors; in create_strip_zones()
84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
95 (unsigned long long)rdev1->sectors, in create_strip_zones()
97 (unsigned long long)rdev2->sectors); in create_strip_zones()
103 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
353 raid0_size(struct mddev * mddev,sector_t sectors,int raid_disks) raid0_size() argument
595 unsigned sectors; raid0_make_request() local
[all...]
H A Dmd-linear.c55 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
61 WARN_ONCE(sectors || raid_disks, in linear_size()
115 sector_t sectors; in linear_conf() local
125 sectors = rdev->sectors; in linear_conf()
126 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
127 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
130 conf->array_sectors += rdev->sectors; in linear_conf()
142 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf()
[all...]
H A Draid1.c65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
317 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", in raid_end_bio_io()
341 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
456 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
513 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && in raid1_end_write_request()
538 pr_debug("raid1: behind end write sectors" in raid1_end_write_request()
561 sector_t sectors) in align_to_barrier_unit_end() argument
565 WARN_ON(sectors == 0); in align_to_barrier_unit_end()
567 * len is the number of sectors from start_sector to end of the in align_to_barrier_unit_end()
573 if (len > sectors) in align_to_barrier_unit_end()
1693 sector_t sectors; raid1_make_request() local
2107 r1_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,blk_opf_t rw) r1_sync_page_io() argument
2143 int sectors = r1_bio->sectors; fix_sync_read_error() local
2403 int sectors = r1_bio->sectors; fix_read_error() local
2510 int sectors; narrow_write_error() local
3072 raid1_size(struct mddev * mddev,sector_t sectors,int raid_disks) raid1_size() argument
3317 raid1_resize(struct mddev * mddev,sector_t sectors) raid1_resize() argument
[all...]
H A Draid10.c346 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
530 r10_bio->sectors) && in raid10_end_write_request()
727 int sectors = r10_bio->sectors; in read_balance() local
747 if (raid1_should_read_first(conf->mddev, this_sector, sectors)) in read_balance()
762 r10_bio->devs[slot].addr + sectors > in read_balance()
769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
773 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
784 if (!do_balance && sectors > bad_sectors) in read_balance()
785 sectors in read_balance()
1121 regular_request_wait(struct mddev * mddev,struct r10conf * conf,struct bio * bio,sector_t sectors) regular_request_wait() argument
1352 sector_t sectors; raid10_write_request() local
1539 __make_request(struct mddev * mddev,struct bio * bio,int sectors) __make_request() argument
1876 int sectors = bio_sectors(bio); raid10_make_request() local
2397 int sectors = r10_bio->sectors; sync_request_write() local
2489 int sectors = r10_bio->sectors; fix_recovery_read_error() local
2598 r10_sync_page_io(struct md_rdev * rdev,sector_t sector,int sectors,struct page * page,enum req_op op) r10_sync_page_io() argument
2629 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; fix_read_error() local
2801 int sectors; narrow_write_error() local
3787 raid10_size(struct mddev * mddev,sector_t sectors,int raid_disks) raid10_size() argument
4212 raid10_resize(struct mddev * mddev,sector_t sectors) raid10_resize() argument
4983 int sectors = r10_bio->sectors; handle_reshape_read_error() local
[all...]
H A Dmd.h88 * the same. The number of mismatch sectors will be exported to user
133 sector_t sectors; /* Device size (in 512bytes sectors) */ member
151 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
219 unsigned int size; /* Size in sectors of the PPL space */
295 static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors, in is_badblock() argument
300 sectors, in is_badblock()
310 int sectors) in rdev_has_badblock() argument
315 return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); in rdev_has_badblock()
318 extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
875 unsigned long sectors; global() member
[all...]
/linux/Documentation/block/
H A Dstat.rst31 read sectors sectors number of sectors read
35 write sectors sectors number of sectors written
42 discard sectors sectors number of sectors discarded
67 read sectors, writ
[all...]
/linux/drivers/scsi/
H A Dscsicam.c50 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
52 * @capacity: size of the disk in sectors
53 * @geom: output in form of [hds, cylinders, sectors]
170 * minimizes the number of sectors that will be unused at the end
179 unsigned long heads, sectors, cylinders, temp; in setsize() local
182 sectors = 62L; /* Maximize sectors per track */ in setsize()
184 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize()
188 temp = cylinders * heads; /* Compute divisor for sectors */ in setsize()
189 sectors in setsize()
[all...]
/linux/include/linux/
H A Dblk-integrity.h63 * @sectors: Size of the bio in 512-byte sectors
66 * sectors but integrity metadata is done in terms of the data integrity
67 * interval size of the storage device. Convert the block layer sectors
71 unsigned int sectors) in bio_integrity_intervals() argument
73 return sectors >> (bi->interval_exp - 9); in bio_integrity_intervals()
77 unsigned int sectors) in bio_integrity_bytes() argument
79 return bio_integrity_intervals(bi, sectors) * bi->metadata_size; in bio_integrity_bytes()
138 unsigned int sectors) in bio_integrity_intervals() argument
144 unsigned int sectors) in bio_integrity_bytes() argument
[all...]
H A Dbadblocks.h35 int shift; /* shift from sectors to block size
42 sector_t size; /* in sectors */
51 int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
53 bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
55 bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors);
/linux/drivers/mtd/
H A Dssfdc.c22 unsigned char sectors; member
317 ssfdc->sectors = 32; in ssfdcr_add_mtd()
318 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd()
320 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd()
323 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd()
325 (long)ssfdc->sectors); in ssfdcr_add_mtd()
328 (long)ssfdc->sectors; in ssfdcr_add_mtd()
411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo()
414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
[all...]
/linux/drivers/usb/storage/
H A Ddatafab.c60 unsigned long sectors; /* total sector count */ member
138 u32 sectors) in datafab_read_data() argument
153 if (sectors > 0x0FFFFFFF) in datafab_read_data()
162 totallen = sectors * info->ssize; in datafab_read_data()
221 u32 sectors) in datafab_write_data() argument
237 if (sectors > 0x0FFFFFFF) in datafab_write_data()
246 totallen = sectors * info->ssize; in datafab_write_data()
420 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device()
576 info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec in datafab_transport()
581 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, in datafab_transport()
[all...]
/linux/Documentation/ABI/testing/
H A Dprocfs-diskstats15 6 sectors read
19 10 sectors written
32 17 sectors discarded
/linux/drivers/md/bcache/
H A Dalloc.c86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument
93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
160 * first: we also take into account the number of sectors of live data in that
544 * For example, dirty sectors of flash only volume is not reclaimable, if their
545 * dirty sectors mixed with dirty sectors of cached device, such buckets will
594 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
595 * sectors were actually allocated.
601 unsigned int sectors, in bch_alloc_sectors() argument
[all...]

12345678910