Lines Matching +full:mi +full:- +full:v
1 // SPDX-License-Identifier: GPL-2.0
61 u64 v; in alloc_field_v1_get() local
63 if (!(a->fields & (1 << field))) in alloc_field_v1_get()
68 v = *((const u8 *) *p); in alloc_field_v1_get()
71 v = le16_to_cpup(*p); in alloc_field_v1_get()
74 v = le32_to_cpup(*p); in alloc_field_v1_get()
77 v = le64_to_cpup(*p); in alloc_field_v1_get()
84 return v; in alloc_field_v1_get()
90 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; in bch2_alloc_unpack_v1()
91 const void *d = in->data; in bch2_alloc_unpack_v1()
94 out->gen = in->gen; in bch2_alloc_unpack_v1()
96 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); in bch2_alloc_unpack_v1()
105 const u8 *in = a.v->data; in bch2_alloc_unpack_v2()
109 u64 v; in bch2_alloc_unpack_v2() local
111 out->gen = a.v->gen; in bch2_alloc_unpack_v2()
112 out->oldest_gen = a.v->oldest_gen; in bch2_alloc_unpack_v2()
113 out->data_type = a.v->data_type; in bch2_alloc_unpack_v2()
116 if (fieldnr < a.v->nr_fields) { \ in bch2_alloc_unpack_v2()
117 ret = bch2_varint_decode_fast(in, end, &v); \ in bch2_alloc_unpack_v2()
122 v = 0; \ in bch2_alloc_unpack_v2()
124 out->_name = v; \ in bch2_alloc_unpack_v2()
125 if (v != out->_name) \ in bch2_alloc_unpack_v2()
126 return -1; \ in bch2_alloc_unpack_v2()
138 const u8 *in = a.v->data; in bch2_alloc_unpack_v3()
142 u64 v; in bch2_alloc_unpack_v3() local
144 out->gen = a.v->gen; in bch2_alloc_unpack_v3()
145 out->oldest_gen = a.v->oldest_gen; in bch2_alloc_unpack_v3()
146 out->data_type = a.v->data_type; in bch2_alloc_unpack_v3()
147 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); in bch2_alloc_unpack_v3()
148 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); in bch2_alloc_unpack_v3()
149 out->journal_seq = le64_to_cpu(a.v->journal_seq); in bch2_alloc_unpack_v3()
152 if (fieldnr < a.v->nr_fields) { \ in bch2_alloc_unpack_v3()
153 ret = bch2_varint_decode_fast(in, end, &v); \ in bch2_alloc_unpack_v3()
158 v = 0; \ in bch2_alloc_unpack_v3()
160 out->_name = v; \ in bch2_alloc_unpack_v3()
161 if (v != out->_name) \ in bch2_alloc_unpack_v3()
162 return -1; \ in bch2_alloc_unpack_v3()
174 switch (k.k->type) { in bch2_alloc_unpack()
194 if (a->fields & (1 << i)) in bch_alloc_v1_val_u64s()
207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), in bch2_alloc_v1_validate()
210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); in bch2_alloc_v1_validate()
311 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, in bch2_alloc_v4_validate()
324 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; in bch2_alloc_v4_swab()
326 a->journal_seq_nonempty = swab64(a->journal_seq_nonempty); in bch2_alloc_v4_swab()
327 a->journal_seq_empty = swab64(a->journal_seq_empty); in bch2_alloc_v4_swab()
328 a->flags = swab32(a->flags); in bch2_alloc_v4_swab()
329 a->dirty_sectors = swab32(a->dirty_sectors); in bch2_alloc_v4_swab()
330 a->cached_sectors = swab32(a->cached_sectors); in bch2_alloc_v4_swab()
331 a->io_time[0] = swab64(a->io_time[0]); in bch2_alloc_v4_swab()
332 a->io_time[1] = swab64(a->io_time[1]); in bch2_alloc_v4_swab()
333 a->stripe = swab32(a->stripe); in bch2_alloc_v4_swab()
334 a->nr_external_backpointers = swab32(a->nr_external_backpointers); in bch2_alloc_v4_swab()
335 a->stripe_sectors = swab32(a->stripe_sectors); in bch2_alloc_v4_swab()
342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text()
347 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); in bch2_alloc_to_text()
348 bch2_prt_data_type(out, a->data_type); in bch2_alloc_to_text()
350 prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); in bch2_alloc_to_text()
351 prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); in bch2_alloc_to_text()
354 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); in bch2_alloc_to_text()
355 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); in bch2_alloc_to_text()
356 prt_printf(out, "cached_sectors %u\n", a->cached_sectors); in bch2_alloc_to_text()
357 prt_printf(out, "stripe %u\n", a->stripe); in bch2_alloc_to_text()
358 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); in bch2_alloc_to_text()
359 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); in bch2_alloc_to_text()
360 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); in bch2_alloc_to_text()
372 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4()
375 *out = *bkey_s_c_to_alloc_v4(k).v; in __bch2_alloc_to_v4()
382 memset(src, 0, dst - src); in __bch2_alloc_to_v4()
415 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4_mut()
418 bkey_reassemble(&ret->k_i, k); in __bch2_alloc_to_v4_mut()
420 src = alloc_v4_backpointers(&ret->v); in __bch2_alloc_to_v4_mut()
421 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s); in __bch2_alloc_to_v4_mut()
422 dst = alloc_v4_backpointers(&ret->v); in __bch2_alloc_to_v4_mut()
425 memset(src, 0, dst - src); in __bch2_alloc_to_v4_mut()
427 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0); in __bch2_alloc_to_v4_mut()
430 bkey_alloc_v4_init(&ret->k_i); in __bch2_alloc_to_v4_mut()
431 ret->k.p = k.k->p; in __bch2_alloc_to_v4_mut()
432 bch2_alloc_to_v4(k, &ret->v); in __bch2_alloc_to_v4_mut()
441 if (likely(k.k->type == KEY_TYPE_alloc_v4) && in bch2_alloc_to_v4_mut_inlined()
443 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) in bch2_alloc_to_v4_mut_inlined()
486 ret = bch2_trans_update(trans, &iter, &a->k_i, flags); in bch2_trans_start_alloc_update()
508 return k.k->type == KEY_TYPE_bucket_gens in alloc_gen()
509 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] in alloc_gen()
531 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { in bch2_bucket_gens_to_text()
534 prt_printf(out, "%u", g.v->gens[i]); in bch2_bucket_gens_to_text()
551 if (!bch2_dev_bucket_exists(c, k.k->p)) in bch2_bucket_gens_init()
555 u8 gen = bch2_alloc_to_v4(k, &a)->gen; in bch2_bucket_gens_init()
574 g.v.gens[offset] = gen; in bch2_bucket_gens_init()
592 down_read(&c->state_lock); in bch2_alloc_read()
598 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { in bch2_alloc_read()
601 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_alloc_read()
602 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_alloc_read()
604 if (k.k->type != KEY_TYPE_bucket_gens) in bch2_alloc_read()
607 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
613 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
617 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; in bch2_alloc_read()
619 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
620 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
622 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
628 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
634 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
638 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
639 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
643 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
644 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
649 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
657 up_read(&c->state_lock); in bch2_alloc_read()
668 struct bch_fs *c = trans->c; in __need_discard_or_freespace_err()
683 if (ret == -BCH_ERR_fsck_ignore || in __need_discard_or_freespace_err()
684 ret == -BCH_ERR_fsck_errors_not_fixed) in __need_discard_or_freespace_err()
706 if (a->data_type != BCH_DATA_free && in bch2_bucket_do_index()
707 a->data_type != BCH_DATA_need_discard) in bch2_bucket_do_index()
710 switch (a->data_type) { in bch2_bucket_do_index()
713 pos = alloc_freespace_pos(alloc_k.k->p, *a); in bch2_bucket_do_index()
717 pos = alloc_k.k->p; in bch2_bucket_do_index()
729 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && in bch2_bucket_do_index()
730 !old.k->type != set, in bch2_bucket_do_index()
762 if (k.k->type != KEY_TYPE_bucket_gens) { in bch2_bucket_gen_update()
763 bkey_bucket_gens_init(&g->k_i); in bch2_bucket_gen_update()
764 g->k.p = iter.pos; in bch2_bucket_gen_update()
766 bkey_reassemble(&g->k_i, k); in bch2_bucket_gen_update()
769 g->v.gens[offset] = gen; in bch2_bucket_gen_update()
771 ret = bch2_trans_update(trans, &iter, &g->k_i, 0); in bch2_bucket_gen_update()
786 .dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
797 if (old->data_type != new->data_type) { in bch2_alloc_key_to_dev_counters()
798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
807 new_sectors - old_sectors, in bch2_alloc_key_to_dev_counters()
808 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
818 !!new_unstriped - !!old_unstriped, in bch2_alloc_key_to_dev_counters()
819 new_unstriped - old_unstriped, in bch2_alloc_key_to_dev_counters()
834 struct bch_fs *c = trans->c; in bch2_trigger_alloc()
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc()
840 return -BCH_ERR_trigger_alloc; in bch2_trigger_alloc()
846 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { in bch2_trigger_alloc()
847 new_a = bkey_s_to_alloc_v4(new).v; in bch2_trigger_alloc()
855 new_a = &new_ka->v; in bch2_trigger_alloc()
859 alloc_data_type_set(new_a, new_a->data_type); in bch2_trigger_alloc()
861 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - in bch2_trigger_alloc()
862 (int) data_type_is_empty(old_a->data_type); in bch2_trigger_alloc()
865 new_a->io_time[READ] = bch2_current_io_time(c, READ); in bch2_trigger_alloc()
866 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); in bch2_trigger_alloc()
871 if (data_type_is_empty(new_a->data_type) && in bch2_trigger_alloc()
873 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { in bch2_trigger_alloc()
874 if (new_a->oldest_gen == new_a->gen && in bch2_trigger_alloc()
876 new_a->oldest_gen++; in bch2_trigger_alloc()
877 new_a->gen++; in bch2_trigger_alloc()
879 alloc_data_type_set(new_a, new_a->data_type); in bch2_trigger_alloc()
882 if (old_a->data_type != new_a->data_type || in bch2_trigger_alloc()
883 (new_a->data_type == BCH_DATA_free && in bch2_trigger_alloc()
891 if (new_a->data_type == BCH_DATA_cached && in bch2_trigger_alloc()
892 !new_a->io_time[READ]) in bch2_trigger_alloc()
893 new_a->io_time[READ] = bch2_current_io_time(c, READ); in bch2_trigger_alloc()
895 ret = bch2_lru_change(trans, new.k->p.inode, in bch2_trigger_alloc()
896 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
904 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
910 if (old_a->gen != new_a->gen) { in bch2_trigger_alloc()
911 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); in bch2_trigger_alloc()
917 old_a->cached_sectors) { in bch2_trigger_alloc()
918 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, in bch2_trigger_alloc()
919 -((s64) old_a->cached_sectors), in bch2_trigger_alloc()
931 u64 transaction_seq = trans->journal_res.seq; in bch2_trigger_alloc()
934 if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq, in bch2_trigger_alloc()
937 journal_cur_seq(&c->journal), in bch2_trigger_alloc()
939 new_a->journal_seq_nonempty = transaction_seq; in bch2_trigger_alloc()
941 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - in bch2_trigger_alloc()
942 (int) data_type_is_empty(old_a->data_type); in bch2_trigger_alloc()
945 * Record journal sequence number of empty -> nonempty transition: in bch2_trigger_alloc()
946 * Note that there may be multiple empty -> nonempty in bch2_trigger_alloc()
948 * still writing to it - so be careful to only record the first: in bch2_trigger_alloc()
951 new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) { in bch2_trigger_alloc()
952 new_a->journal_seq_nonempty = transaction_seq; in bch2_trigger_alloc()
953 new_a->journal_seq_empty = 0; in bch2_trigger_alloc()
958 * unless updates since empty -> nonempty transition were never in bch2_trigger_alloc()
959 * flushed - we may need to ask the journal not to flush in bch2_trigger_alloc()
963 if (new_a->journal_seq_nonempty == transaction_seq || in bch2_trigger_alloc()
964 bch2_journal_noflush_seq(&c->journal, in bch2_trigger_alloc()
965 new_a->journal_seq_nonempty, in bch2_trigger_alloc()
967 new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0; in bch2_trigger_alloc()
969 new_a->journal_seq_empty = transaction_seq; in bch2_trigger_alloc()
971 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, in bch2_trigger_alloc()
972 c->journal.flushed_seq_ondisk, in bch2_trigger_alloc()
973 new.k->p.inode, new.k->p.offset, in bch2_trigger_alloc()
982 if (new_a->gen != old_a->gen) { in bch2_trigger_alloc()
984 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
989 *gen = new_a->gen; in bch2_trigger_alloc()
995 #define bucket_flushed(a) (a->journal_seq_empty <= c->journal.flushed_seq_ondisk) in bch2_trigger_alloc()
997 if (statechange(a->data_type == BCH_DATA_free) && in bch2_trigger_alloc()
999 closure_wake_up(&c->freelist_wait); in bch2_trigger_alloc()
1001 if (statechange(a->data_type == BCH_DATA_need_discard) && in bch2_trigger_alloc()
1002 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
1004 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
1006 if (statechange(a->data_type == BCH_DATA_cached) && in bch2_trigger_alloc()
1007 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
1011 if (statechange(a->data_type == BCH_DATA_need_gc_gens)) in bch2_trigger_alloc()
1017 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1022 g->gen_valid = 1; in bch2_trigger_alloc()
1023 g->gen = new_a->gen; in bch2_trigger_alloc()
1034 ret = -BCH_ERR_trigger_alloc; in bch2_trigger_alloc()
1040 * extents style btrees, but works on non-extents btrees:
1050 if (k.k->type) { in bch2_get_key_or_hole()
1059 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) in bch2_get_key_or_hole()
1060 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); in bch2_get_key_or_hole()
1062 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); in bch2_get_key_or_hole()
1072 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); in bch2_get_key_or_hole()
1078 hole->p = iter->pos; in bch2_get_key_or_hole()
1080 bch2_key_resize(hole, next.offset - iter->pos.offset); in bch2_get_key_or_hole()
1088 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1089 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1091 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1096 bucket->inode++; in next_bucket()
1097 bucket->offset = 0; in next_bucket()
1101 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1103 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1115 struct bch_fs *c = trans->c; in bch2_get_key_or_real_bucket_hole()
1122 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1124 if (!k.k->type) { in bch2_get_key_or_real_bucket_hole()
1135 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1136 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1150 struct bch_fs *c = trans->c; in bch2_check_alloc_key()
1158 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key()
1162 alloc_k.k->p.inode, alloc_k.k->p.offset)) in bch2_check_alloc_key()
1167 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1172 bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p); in bch2_check_alloc_key()
1178 bool is_discarded = a->data_type == BCH_DATA_need_discard; in bch2_check_alloc_key()
1179 if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, in bch2_check_alloc_key()
1186 bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); in bch2_check_alloc_key()
1192 bool is_free = a->data_type == BCH_DATA_free; in bch2_check_alloc_key()
1193 if (need_discard_or_freespace_err_on(!!k.k->type != is_free, in bch2_check_alloc_key()
1200 bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); in bch2_check_alloc_key()
1206 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), in bch2_check_alloc_key()
1209 alloc_gen(k, gens_offset), a->gen, in bch2_check_alloc_key()
1219 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_key()
1220 bkey_reassemble(&g->k_i, k); in bch2_check_alloc_key()
1222 bkey_bucket_gens_init(&g->k_i); in bch2_check_alloc_key()
1223 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); in bch2_check_alloc_key()
1226 g->v.gens[gens_offset] = a->gen; in bch2_check_alloc_key()
1228 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); in bch2_check_alloc_key()
1251 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1261 *end = bkey_min(k.k->p, *end); in bch2_check_alloc_hole_freespace()
1263 if (fsck_err_on(k.k->type != KEY_TYPE_set, in bch2_check_alloc_hole_freespace()
1266 "device %llu buckets %llu-%llu", in bch2_check_alloc_hole_freespace()
1267 freespace_iter->pos.inode, in bch2_check_alloc_hole_freespace()
1268 freespace_iter->pos.offset, in bch2_check_alloc_hole_freespace()
1269 end->offset)) { in bch2_check_alloc_hole_freespace()
1277 bkey_init(&update->k); in bch2_check_alloc_hole_freespace()
1278 update->k.type = KEY_TYPE_set; in bch2_check_alloc_hole_freespace()
1279 update->k.p = freespace_iter->pos; in bch2_check_alloc_hole_freespace()
1280 bch2_key_resize(&update->k, in bch2_check_alloc_hole_freespace()
1281 min_t(u64, U32_MAX, end->offset - in bch2_check_alloc_hole_freespace()
1282 freespace_iter->pos.offset)); in bch2_check_alloc_hole_freespace()
1316 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_hole_bucket_gens()
1323 if (fsck_err_on(g.v.gens[i], trans, in bch2_check_alloc_hole_bucket_gens()
1326 bucket_gens_pos_to_alloc(k.k->p, i).inode, in bch2_check_alloc_hole_bucket_gens()
1327 bucket_gens_pos_to_alloc(k.k->p, i).offset, in bch2_check_alloc_hole_bucket_gens()
1328 g.v.gens[i])) { in bch2_check_alloc_hole_bucket_gens()
1329 g.v.gens[i] = 0; in bch2_check_alloc_hole_bucket_gens()
1349 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); in bch2_check_alloc_hole_bucket_gens()
1371 ret = k.k->type != KEY_TYPE_set in bch2_recheck_discard_freespace_key()
1383 bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); in check_discard_freespace_key_work()
1384 bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); in check_discard_freespace_key_work()
1391 struct bch_fs *c = trans->c; in bch2_check_discard_freespace_key()
1392 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard in bch2_check_discard_freespace_key()
1397 struct bpos bucket = iter->pos; in bch2_check_discard_freespace_key()
1399 u64 genbits = iter->pos.offset & (~0ULL << 56); in bch2_check_discard_freespace_key()
1412 bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) in bch2_check_discard_freespace_key()
1421 if (a->data_type != state || in bch2_check_discard_freespace_key()
1427 bch2_btree_id_str(iter->btree_id), in bch2_check_discard_freespace_key()
1428 iter->pos.inode, in bch2_check_discard_freespace_key()
1429 iter->pos.offset, in bch2_check_discard_freespace_key()
1430 a->data_type == state, in bch2_check_discard_freespace_key()
1437 *gen = a->gen; in bch2_check_discard_freespace_key()
1449 -BCH_ERR_transaction_restart_commit; in bch2_check_discard_freespace_key()
1466 INIT_WORK(&w->work, check_discard_freespace_key_work); in bch2_check_discard_freespace_key()
1467 w->c = c; in bch2_check_discard_freespace_key()
1468 w->pos = BBPOS(iter->btree_id, iter->pos); in bch2_check_discard_freespace_key()
1469 queue_work(c->write_ref_wq, &w->work); in bch2_check_discard_freespace_key()
1491 struct bch_fs *c = trans->c; in bch2_check_bucket_gens_key()
1493 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_check_bucket_gens_key()
1494 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_check_bucket_gens_key()
1500 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); in bch2_check_bucket_gens_key()
1503 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key()
1512 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1513 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1521 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1522 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], in bch2_check_bucket_gens_key()
1525 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; in bch2_check_bucket_gens_key()
1529 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1530 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], in bch2_check_bucket_gens_key()
1533 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; in bch2_check_bucket_gens_key()
1585 if (k.k->type) { in bch2_check_alloc_info()
1586 next = bpos_nosnap_successor(k.k->p); in bch2_check_alloc_info()
1596 next = k.k->p; in bch2_check_alloc_info()
1683 struct bch_fs *c = trans->c; in bch2_check_alloc_to_lru_ref()
1698 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref()
1707 bucket_to_u64(alloc_k.k->p), in bch2_check_alloc_to_lru_ref()
1713 if (a->data_type != BCH_DATA_cached) in bch2_check_alloc_to_lru_ref()
1716 if (fsck_err_on(!a->io_time[READ], in bch2_check_alloc_to_lru_ref()
1727 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); in bch2_check_alloc_to_lru_ref()
1729 &a_mut->k_i, BTREE_TRIGGER_norun); in bch2_check_alloc_to_lru_ref()
1733 a = &a_mut->v; in bch2_check_alloc_to_lru_ref()
1736 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, in bch2_check_alloc_to_lru_ref()
1737 bucket_to_u64(alloc_k.k->p), in bch2_check_alloc_to_lru_ref()
1738 a->io_time[READ], in bch2_check_alloc_to_lru_ref()
1754 bkey_init(&last_flushed.k->k); in bch2_check_alloc_to_lru_refs()
1772 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1773 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_add()
1774 if (i->bucket == bucket) { in discard_in_flight_add()
1775 ret = -BCH_ERR_EEXIST_discard_in_flight_add; in discard_in_flight_add()
1779 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1784 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1790 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1791 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_remove()
1792 if (i->bucket == bucket) { in discard_in_flight_remove()
1793 BUG_ON(!i->in_progress); in discard_in_flight_remove()
1794 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1799 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1817 return test_bit(BCH_FS_discard_mount_opt_set, &c->flags) in discard_opt_enabled()
1818 ? c->opts.discard in discard_opt_enabled()
1819 : ca->mi.discard; in discard_opt_enabled()
1829 struct bch_fs *c = trans->c; in bch2_discard_one_bucket()
1830 struct bpos pos = need_discard_iter->pos; in bch2_discard_one_bucket()
1839 s->open++; in bch2_discard_one_bucket()
1843 u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, in bch2_discard_one_bucket()
1845 if (seq_ready > c->journal.flushed_seq_ondisk) { in bch2_discard_one_bucket()
1846 if (seq_ready > c->journal.flushing_seq) in bch2_discard_one_bucket()
1847 s->need_journal_commit++; in bch2_discard_one_bucket()
1852 need_discard_iter->pos, in bch2_discard_one_bucket()
1863 if (a->v.data_type != BCH_DATA_need_discard) { in bch2_discard_one_bucket()
1882 s->discarded++; in bch2_discard_one_bucket()
1885 if (discard_opt_enabled(c, ca) && !c->opts.nochanges) { in bch2_discard_one_bucket()
1891 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1892 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1893 ca->mi.bucket_size, in bch2_discard_one_bucket()
1901 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); in bch2_discard_one_bucket()
1902 alloc_data_type_set(&a->v, a->v.data_type); in bch2_discard_one_bucket()
1904 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); in bch2_discard_one_bucket()
1923 s->seen++; in bch2_discard_one_bucket()
1932 struct bch_fs *c = ca->fs; in bch2_do_discards_work()
1945 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1946 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1950 bch2_journal_flush_async(&c->journal, NULL); in bch2_do_discards_work()
1955 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_discards_work()
1961 struct bch_fs *c = ca->fs; in bch2_dev_do_discards()
1966 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_discards()
1969 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1972 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_dev_do_discards()
1991 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); in bch2_do_discards_fast_one()
1996 if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, in bch2_do_discards_fast_one()
1999 ca->dev_idx, bucket)) in bch2_do_discards_fast_one()
2012 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work()
2022 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2023 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
2024 if (i->in_progress) in bch2_do_discards_fast_work()
2028 bucket = i->bucket; in bch2_do_discards_fast_work()
2029 i->in_progress = true; in bch2_do_discards_fast_work()
2032 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2050 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_discards_fast_work()
2056 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast()
2064 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_discard_one_bucket_fast()
2067 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2070 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_discard_one_bucket_fast()
2097 bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); in invalidate_one_bp()
2117 if (k.k->type != KEY_TYPE_backpointer) in invalidate_one_bucket_by_bps()
2122 if (bp.v->bucket_gen != gen) in invalidate_one_bucket_by_bps()
2139 struct bch_fs *c = trans->c; in invalidate_one_bucket()
2141 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); in invalidate_one_bucket()
2152 return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); in invalidate_one_bucket()
2170 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) in invalidate_one_bucket()
2180 BUG_ON(a->data_type != BCH_DATA_cached); in invalidate_one_bucket()
2181 BUG_ON(a->dirty_sectors); in invalidate_one_bucket()
2183 if (!a->cached_sectors) in invalidate_one_bucket()
2186 unsigned cached_sectors = a->cached_sectors; in invalidate_one_bucket()
2187 u8 gen = a->gen; in invalidate_one_bucket()
2194 --*nr_to_invalidate; in invalidate_one_bucket()
2207 k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2209 bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2220 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work()
2226 bkey_init(&last_flushed.k->k); in bch2_do_invalidates_work()
2238 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2264 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_invalidates_work()
2271 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates()
2276 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_invalidates()
2279 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2282 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_dev_do_invalidates()
2300 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2306 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2309 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2318 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2334 if (k.k->type) { in bch2_dev_freespace_init()
2357 bkey_init(&freespace->k); in bch2_dev_freespace_init()
2358 freespace->k.type = KEY_TYPE_set; in bch2_dev_freespace_init()
2359 freespace->k.p = k.k->p; in bch2_dev_freespace_init()
2360 freespace->k.size = k.k->size; in bch2_dev_freespace_init()
2368 bch2_btree_iter_set_pos(trans, &iter, k.k->p); in bch2_dev_freespace_init()
2385 mutex_lock(&c->sb_lock); in bch2_dev_freespace_init()
2386 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2388 mutex_unlock(&c->sb_lock); in bch2_dev_freespace_init()
2404 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2412 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2421 mutex_lock(&c->sb_lock); in bch2_fs_freespace_init()
2423 mutex_unlock(&c->sb_lock); in bch2_fs_freespace_init()
2434 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2435 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2442 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: in bch2_dev_remove_alloc()
2455 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2465 struct bch_fs *c = trans->c; in __bch2_bucket_io_time_reset()
2475 if (a->v.io_time[rw] == now) in __bch2_bucket_io_time_reset()
2478 a->v.io_time[rw] = now; in __bch2_bucket_io_time_reset()
2480 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: in __bch2_bucket_io_time_reset()
2504 lockdep_assert_held(&c->state_lock); in bch2_recalc_capacity()
2507 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; in bch2_recalc_capacity()
2509 ra_pages += bdi->ra_pages; in bch2_recalc_capacity()
2524 * from scratch - copygc will use its entire in bch2_recalc_capacity()
2530 * allocations for foreground writes must wait - in bch2_recalc_capacity()
2531 * not -ENOSPC calculations. in bch2_recalc_capacity()
2534 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2535 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2541 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2543 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2544 ca->mi.first_bucket); in bch2_recalc_capacity()
2549 ca->mi.bucket_size); in bch2_recalc_capacity()
2552 gc_reserve = c->opts.gc_reserve_bytes in bch2_recalc_capacity()
2553 ? c->opts.gc_reserve_bytes >> 9 in bch2_recalc_capacity()
2554 : div64_u64(capacity * c->opts.gc_reserve_percent, 100); in bch2_recalc_capacity()
2560 c->reserved = reserved_sectors; in bch2_recalc_capacity()
2561 c->capacity = capacity - reserved_sectors; in bch2_recalc_capacity()
2563 c->bucket_size_max = bucket_size_max; in bch2_recalc_capacity()
2566 closure_wake_up(&c->freelist_wait); in bch2_recalc_capacity()
2574 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2583 for (ob = c->open_buckets; in bch2_dev_has_open_write_point()
2584 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); in bch2_dev_has_open_write_point()
2586 spin_lock(&ob->lock); in bch2_dev_has_open_write_point()
2587 if (ob->valid && !ob->on_partial_list && in bch2_dev_has_open_write_point()
2588 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2590 spin_unlock(&ob->lock); in bch2_dev_has_open_write_point()
2599 lockdep_assert_held(&c->state_lock); in bch2_dev_allocator_remove()
2603 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) in bch2_dev_allocator_remove()
2604 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_remove()
2606 c->rw_devs_change_count++; in bch2_dev_allocator_remove()
2619 closure_wake_up(&c->freelist_wait); in bch2_dev_allocator_remove()
2622 * journal_res_get() can block waiting for free space in the journal - in bch2_dev_allocator_remove()
2625 wake_up(&c->journal.wait); in bch2_dev_allocator_remove()
2629 closure_wait_event(&c->open_buckets_wait, in bch2_dev_allocator_remove()
2636 lockdep_assert_held(&c->state_lock); in bch2_dev_allocator_add()
2638 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) in bch2_dev_allocator_add()
2639 if (ca->mi.data_allowed & (1 << i)) in bch2_dev_allocator_add()
2640 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_add()
2642 c->rw_devs_change_count++; in bch2_dev_allocator_add()
2647 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2652 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2653 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2654 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2655 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()
2660 spin_lock_init(&c->freelist_lock); in bch2_fs_allocator_background_init()