Lines Matching +full:mi +full:- +full:v

1 // SPDX-License-Identifier: GPL-2.0
34 usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets); in bch2_dev_usage_read_fast()
40 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, in bch2_dev_usage_full_read_fast()
55 ret.capacity = c->capacity - in __bch2_fs_usage_read_short()
56 percpu_u64_get(&c->usage->hidden); in __bch2_fs_usage_read_short()
58 data = percpu_u64_get(&c->usage->data) + in __bch2_fs_usage_read_short()
59 percpu_u64_get(&c->usage->btree); in __bch2_fs_usage_read_short()
60 reserved = percpu_u64_get(&c->usage->reserved) + in __bch2_fs_usage_read_short()
61 percpu_u64_get(c->online_reserved); in __bch2_fs_usage_read_short()
64 ret.free = ret.capacity - ret.used; in __bch2_fs_usage_read_short()
66 ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes); in __bch2_fs_usage_read_short()
76 percpu_down_read(&c->mark_lock); in bch2_fs_usage_read_short()
78 percpu_up_read(&c->mark_lock); in bch2_fs_usage_read_short()
87 if (out->nr_tabstops < 5) { in bch2_dev_usage_to_text()
101 usage->d[i].buckets, in bch2_dev_usage_to_text()
102 usage->d[i].sectors, in bch2_dev_usage_to_text()
103 usage->d[i].fragmented); in bch2_dev_usage_to_text()
106 prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets); in bch2_dev_usage_to_text()
115 struct bch_fs *c = trans->c; in bch2_check_fix_ptr()
146 if (fsck_err_on(!g->gen_valid, in bch2_check_fix_ptr()
156 g->gen_valid = true; in bch2_check_fix_ptr()
157 g->gen = p.ptr.gen; in bch2_check_fix_ptr()
163 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, in bch2_check_fix_ptr()
169 p.ptr.gen, g->gen, in bch2_check_fix_ptr()
173 (g->data_type != BCH_DATA_btree || in bch2_check_fix_ptr()
175 g->gen_valid = true; in bch2_check_fix_ptr()
176 g->gen = p.ptr.gen; in bch2_check_fix_ptr()
177 g->data_type = 0; in bch2_check_fix_ptr()
178 g->stripe_sectors = 0; in bch2_check_fix_ptr()
179 g->dirty_sectors = 0; in bch2_check_fix_ptr()
180 g->cached_sectors = 0; in bch2_check_fix_ptr()
186 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, in bch2_check_fix_ptr()
190 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, in bch2_check_fix_ptr()
197 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, in bch2_check_fix_ptr()
203 p.ptr.gen, g->gen, in bch2_check_fix_ptr()
208 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) in bch2_check_fix_ptr()
211 if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), in bch2_check_fix_ptr()
215 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, in bch2_check_fix_ptr()
216 bch2_data_type_str(g->data_type), in bch2_check_fix_ptr()
221 g->gen_valid = true; in bch2_check_fix_ptr()
222 g->gen = p.ptr.gen; in bch2_check_fix_ptr()
223 g->data_type = data_type; in bch2_check_fix_ptr()
224 g->stripe_sectors = 0; in bch2_check_fix_ptr()
225 g->dirty_sectors = 0; in bch2_check_fix_ptr()
226 g->cached_sectors = 0; in bch2_check_fix_ptr()
233 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); in bch2_check_fix_ptr()
235 if (fsck_err_on(!m || !m->alive, in bch2_check_fix_ptr()
244 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), in bch2_check_fix_ptr()
264 struct bch_fs *c = trans->c; in bch2_check_fix_ptrs()
281 ret = -EINVAL; in bch2_check_fix_ptrs()
291 bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev)); in bch2_check_fix_ptrs()
296 * We don't want to drop btree node pointers - if the in bch2_check_fix_ptrs()
303 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); in bch2_check_fix_ptrs()
306 ptr->gen = g->gen; in bch2_check_fix_ptrs()
322 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) || in bch2_check_fix_ptrs()
324 gen_cmp(p.ptr.gen, g->gen) < 0) || in bch2_check_fix_ptrs()
325 gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX || in bch2_check_fix_ptrs()
326 (g->data_type && in bch2_check_fix_ptrs()
327 g->data_type != data_type)) { in bch2_check_fix_ptrs()
328 bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr); in bch2_check_fix_ptrs()
337 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, in bch2_check_fix_ptrs()
338 entry->stripe_ptr.idx); in bch2_check_fix_ptrs()
351 if (!m || !m->alive || in bch2_check_fix_ptrs()
352 !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block], in bch2_check_fix_ptrs()
353 &next_ptr->ptr, in bch2_check_fix_ptrs()
354 m->sectors)) { in bch2_check_fix_ptrs()
373 bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level, in bch2_check_fix_ptrs()
384 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); in bch2_check_fix_ptrs()
394 struct bch_fs *c = trans->c; in bucket_ref_update_err()
401 __bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress); in bucket_ref_update_err()
411 ret = -BCH_ERR_bucket_ref_update; in bucket_ref_update_err()
417 bch2_print_string_as_lines(KERN_ERR, buf->buf); in bucket_ref_update_err()
428 struct bch_fs *c = trans->c; in bch2_bucket_ref_update()
436 if (unlikely(gen_after(ptr->gen, b_gen))) { in bch2_bucket_ref_update()
440 ptr->dev, bucket_nr, b_gen, in bch2_bucket_ref_update()
442 ptr->gen); in bch2_bucket_ref_update()
449 if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) { in bch2_bucket_ref_update()
453 ptr->dev, bucket_nr, b_gen, in bch2_bucket_ref_update()
455 ptr->gen); in bch2_bucket_ref_update()
462 if (b_gen != ptr->gen && ptr->cached) { in bch2_bucket_ref_update()
467 if (unlikely(b_gen != ptr->gen)) { in bch2_bucket_ref_update()
471 ptr->dev, bucket_nr, b_gen, in bch2_bucket_ref_update()
474 ptr->gen); in bch2_bucket_ref_update()
484 ptr->dev, bucket_nr, b_gen, in bch2_bucket_ref_update()
497 ptr->dev, bucket_nr, b_gen, in bch2_bucket_ref_update()
503 sectors = -*bucket_sectors; in bch2_bucket_ref_update()
515 struct bch_fs *c = trans->c; in bch2_trans_account_disk_usage_change()
516 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; in bch2_trans_account_disk_usage_change()
520 percpu_down_read(&c->mark_lock); in bch2_trans_account_disk_usage_change()
521 struct bch_fs_usage_base *src = &trans->fs_usage_delta; in bch2_trans_account_disk_usage_change()
523 s64 added = src->btree + src->data + src->reserved; in bch2_trans_account_disk_usage_change()
529 s64 should_not_have_added = added - (s64) disk_res_sectors; in bch2_trans_account_disk_usage_change()
533 old = atomic64_read(&c->sectors_available); in bch2_trans_account_disk_usage_change()
535 new = max_t(s64, 0, old - should_not_have_added); in bch2_trans_account_disk_usage_change()
536 } while (!atomic64_try_cmpxchg(&c->sectors_available, in bch2_trans_account_disk_usage_change()
539 added -= should_not_have_added; in bch2_trans_account_disk_usage_change()
544 trans->disk_res->sectors -= added; in bch2_trans_account_disk_usage_change()
545 this_cpu_sub(*c->online_reserved, added); in bch2_trans_account_disk_usage_change()
549 struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); in bch2_trans_account_disk_usage_change()
552 percpu_up_read(&c->mark_lock); in bch2_trans_account_disk_usage_change()
569 u32 *dst_sectors = p->has_ec ? &a->stripe_sectors : in __mark_pointer()
570 !p->ptr.cached ? &a->dirty_sectors : in __mark_pointer()
571 &a->cached_sectors; in __mark_pointer()
572 int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type, in __mark_pointer()
573 a->gen, a->data_type, dst_sectors); in __mark_pointer()
589 struct bch_fs *c = trans->c; in bch2_trigger_pointer()
597 *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len; in bch2_trigger_pointer()
602 ret = -BCH_ERR_trigger_pointer; in bch2_trigger_pointer()
610 ret = -BCH_ERR_trigger_pointer; in bch2_trigger_pointer()
618 __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert); in bch2_trigger_pointer()
632 ret = -BCH_ERR_trigger_pointer; in bch2_trigger_pointer()
638 ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert); in bch2_trigger_pointer()
671 if (!bch2_ptr_matches_stripe(&s->v, p)) { in bch2_trigger_stripe_ptr()
675 ret = -BCH_ERR_trigger_stripe_pointer; in bch2_trigger_stripe_ptr()
679 stripe_blockcount_set(&s->v, p.ec.block, in bch2_trigger_stripe_ptr()
680 stripe_blockcount_get(&s->v, p.ec.block) + in bch2_trigger_stripe_ptr()
686 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); in bch2_trigger_stripe_ptr()
695 struct bch_fs *c = trans->c; in bch2_trigger_stripe_ptr()
697 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL); in bch2_trigger_stripe_ptr()
701 return -BCH_ERR_ENOMEM_mark_stripe_ptr; in bch2_trigger_stripe_ptr()
706 if (!m || !m->alive) { in bch2_trigger_stripe_ptr()
716 return -BCH_ERR_trigger_stripe_pointer; in bch2_trigger_stripe_ptr()
719 m->block_sectors[p.ec.block] += sectors; in bch2_trigger_stripe_ptr()
724 unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA"); in bch2_trigger_stripe_ptr()
820 if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) { in __trigger_extent()
821 ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot); in __trigger_extent()
843 s64 v[3] = { in __trigger_extent() local
844 insert ? 1 : -1, in __trigger_extent()
845 insert ? k.k->size : -((s64) k.k->size), in __trigger_extent()
848 ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode); in __trigger_extent()
861 struct bch_fs *c = trans->c; in bch2_trigger_extent()
864 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; in bch2_trigger_extent()
865 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start; in bch2_trigger_extent()
870 /* if pointers aren't changing - nothing to do: */ in bch2_trigger_extent()
880 if (old.k->type) { in bch2_trigger_extent()
888 if (new.k->type) { in bch2_trigger_extent()
900 need_rebalance_delta -= s != 0; in bch2_trigger_extent()
901 need_rebalance_sectors_delta[0] -= s; in bch2_trigger_extent()
909 new.k->p, need_rebalance_delta > 0); in bch2_trigger_extent()
932 s64 sectors[1] = { k.k->size }; in __trigger_reservation()
935 sectors[0] = -sectors[0]; in __trigger_reservation()
938 persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas); in __trigger_reservation()
959 struct bch_fs *c = trans->c; in __bch2_trans_mark_metadata_bucket()
964 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b)); in __bch2_trans_mark_metadata_bucket()
968 if (a->v.data_type && type && a->v.data_type != type) { in __bch2_trans_mark_metadata_bucket()
973 iter.pos.inode, iter.pos.offset, a->v.gen, in __bch2_trans_mark_metadata_bucket()
974 bch2_data_type_str(a->v.data_type), in __bch2_trans_mark_metadata_bucket()
977 ret = -BCH_ERR_metadata_bucket_inconsistency; in __bch2_trans_mark_metadata_bucket()
981 if (a->v.data_type != type || in __bch2_trans_mark_metadata_bucket()
982 a->v.dirty_sectors != sectors) { in __bch2_trans_mark_metadata_bucket()
983 a->v.data_type = type; in __bch2_trans_mark_metadata_bucket()
984 a->v.dirty_sectors = sectors; in __bch2_trans_mark_metadata_bucket()
985 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); in __bch2_trans_mark_metadata_bucket()
997 struct bch_fs *c = trans->c; in bch2_mark_metadata_bucket()
1002 ca->dev_idx, bch2_data_type_str(data_type))) in bch2_mark_metadata_bucket()
1008 if (bch2_fs_inconsistent_on(g->data_type && in bch2_mark_metadata_bucket()
1009 g->data_type != data_type, c, in bch2_mark_metadata_bucket()
1011 bch2_data_type_str(g->data_type), in bch2_mark_metadata_bucket()
1015 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, in bch2_mark_metadata_bucket()
1017 ca->dev_idx, b, g->gen, in bch2_mark_metadata_bucket()
1018 bch2_data_type_str(g->data_type ?: data_type), in bch2_mark_metadata_bucket()
1019 g->dirty_sectors, sectors)) in bch2_mark_metadata_bucket()
1022 g->data_type = data_type; in bch2_mark_metadata_bucket()
1023 g->dirty_sectors += sectors; in bch2_mark_metadata_bucket()
1031 return -BCH_ERR_metadata_bucket_inconsistency; in bch2_mark_metadata_bucket()
1046 if (b >= ca->mi.nbuckets) in bch2_trans_mark_metadata_bucket()
1066 min_t(u64, bucket_to_sector(ca, b + 1), end) - start; in bch2_trans_mark_metadata_sectors()
1088 struct bch_fs *c = trans->c; in __bch2_trans_mark_dev_sb()
1090 mutex_lock(&c->sb_lock); in __bch2_trans_mark_dev_sb()
1091 struct bch_sb_layout layout = ca->disk_sb.sb->layout; in __bch2_trans_mark_dev_sb()
1092 mutex_unlock(&c->sb_lock); in __bch2_trans_mark_dev_sb()
1123 for (i = 0; i < ca->journal.nr; i++) { in __bch2_trans_mark_dev_sb()
1125 ca->journal.buckets[i], in __bch2_trans_mark_dev_sb()
1126 BCH_DATA_journal, ca->mi.bucket_size, flags); in __bch2_trans_mark_dev_sb()
1149 percpu_ref_put(&ca->io_ref[READ]); in bch2_trans_mark_dev_sbs_flags()
1164 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; in bch2_is_superblock_bucket()
1172 for (i = 0; i < layout->nr_superblocks; i++) { in bch2_is_superblock_bucket()
1173 u64 offset = le64_to_cpu(layout->sb_offset[i]); in bch2_is_superblock_bucket()
1174 u64 end = offset + (1 << layout->sb_max_size_bits); in bch2_is_superblock_bucket()
1180 for (i = 0; i < ca->journal.nr; i++) in bch2_is_superblock_bucket()
1181 if (b == ca->journal.buckets[i]) in bch2_is_superblock_bucket()
1199 percpu_down_read(&c->mark_lock); in __bch2_disk_reservation_add()
1201 pcpu = this_cpu_ptr(c->pcpu); in __bch2_disk_reservation_add()
1203 if (sectors <= pcpu->sectors_available) in __bch2_disk_reservation_add()
1206 old = atomic64_read(&c->sectors_available); in __bch2_disk_reservation_add()
1214 } while (!atomic64_try_cmpxchg(&c->sectors_available, in __bch2_disk_reservation_add()
1215 &old, old - get)); in __bch2_disk_reservation_add()
1217 pcpu->sectors_available += get; in __bch2_disk_reservation_add()
1220 pcpu->sectors_available -= sectors; in __bch2_disk_reservation_add()
1221 this_cpu_add(*c->online_reserved, sectors); in __bch2_disk_reservation_add()
1222 res->sectors += sectors; in __bch2_disk_reservation_add()
1225 percpu_up_read(&c->mark_lock); in __bch2_disk_reservation_add()
1229 mutex_lock(&c->sectors_available_lock); in __bch2_disk_reservation_add()
1231 percpu_u64_set(&c->pcpu->sectors_available, 0); in __bch2_disk_reservation_add()
1239 atomic64_set(&c->sectors_available, in __bch2_disk_reservation_add()
1240 max_t(s64, 0, sectors_available - sectors)); in __bch2_disk_reservation_add()
1241 this_cpu_add(*c->online_reserved, sectors); in __bch2_disk_reservation_add()
1242 res->sectors += sectors; in __bch2_disk_reservation_add()
1245 atomic64_set(&c->sectors_available, sectors_available); in __bch2_disk_reservation_add()
1246 ret = -BCH_ERR_ENOSPC_disk_reservation; in __bch2_disk_reservation_add()
1249 mutex_unlock(&c->sectors_available_lock); in __bch2_disk_reservation_add()
1250 percpu_up_read(&c->mark_lock); in __bch2_disk_reservation_add()
1260 kvfree_rcu_mightsleep(ca->buckets_nouse); in bch2_buckets_nouse_free()
1261 ca->buckets_nouse = NULL; in bch2_buckets_nouse_free()
1268 BUG_ON(ca->buckets_nouse); in bch2_buckets_nouse_alloc()
1270 ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * in bch2_buckets_nouse_alloc()
1273 if (!ca->buckets_nouse) { in bch2_buckets_nouse_alloc()
1275 return -BCH_ERR_ENOMEM_buckets_nouse; in bch2_buckets_nouse_alloc()
1293 bool resize = ca->bucket_gens != NULL; in bch2_dev_buckets_resize()
1297 lockdep_assert_held(&c->state_lock); in bch2_dev_buckets_resize()
1299 if (resize && ca->buckets_nouse) in bch2_dev_buckets_resize()
1300 return -BCH_ERR_no_resize_with_buckets_nouse; in bch2_dev_buckets_resize()
1305 ret = -BCH_ERR_ENOMEM_bucket_gens; in bch2_dev_buckets_resize()
1309 bucket_gens->first_bucket = ca->mi.first_bucket; in bch2_dev_buckets_resize()
1310 bucket_gens->nbuckets = nbuckets; in bch2_dev_buckets_resize()
1311 bucket_gens->nbuckets_minus_first = in bch2_dev_buckets_resize()
1312 bucket_gens->nbuckets - bucket_gens->first_bucket; in bch2_dev_buckets_resize()
1314 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); in bch2_dev_buckets_resize()
1317 u64 copy = min(bucket_gens->nbuckets, in bch2_dev_buckets_resize()
1318 old_bucket_gens->nbuckets); in bch2_dev_buckets_resize()
1319 memcpy(bucket_gens->b, in bch2_dev_buckets_resize()
1320 old_bucket_gens->b, in bch2_dev_buckets_resize()
1321 sizeof(bucket_gens->b[0]) * copy); in bch2_dev_buckets_resize()
1324 rcu_assign_pointer(ca->bucket_gens, bucket_gens); in bch2_dev_buckets_resize()
1327 nbuckets = ca->mi.nbuckets; in bch2_dev_buckets_resize()
1332 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); in bch2_dev_buckets_resize()
1339 kvfree(ca->buckets_nouse); in bch2_dev_buckets_free()
1340 kvfree(rcu_dereference_protected(ca->bucket_gens, 1)); in bch2_dev_buckets_free()
1341 free_percpu(ca->usage); in bch2_dev_buckets_free()
1346 ca->usage = alloc_percpu(struct bch_dev_usage_full); in bch2_dev_buckets_alloc()
1347 if (!ca->usage) in bch2_dev_buckets_alloc()
1348 return -BCH_ERR_ENOMEM_usage_init; in bch2_dev_buckets_alloc()
1350 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); in bch2_dev_buckets_alloc()