Lines Matching +full:mi +full:- +full:v
1 // SPDX-License-Identifier: GPL-2.0
27 #include "super-io.h"
32 #define x(n, v) [BCH_EXTENT_FLAG_##n] = #n, argument
53 for (i = f->devs; i < f->devs + f->nr; i++) in bch2_dev_io_failures()
54 if (i->dev == dev) in bch2_dev_io_failures()
64 struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev); in bch2_mark_io_failure()
67 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); in bch2_mark_io_failure()
69 f = &failed->devs[failed->nr++]; in bch2_mark_io_failure()
71 f->dev = p->ptr.dev; in bch2_mark_io_failure()
74 if (p->do_ec_reconstruct) in bch2_mark_io_failure()
75 f->failed_ec = true; in bch2_mark_io_failure()
77 f->failed_io = true; in bch2_mark_io_failure()
79 f->failed_csum_nr++; in bch2_mark_io_failure()
84 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX; in dev_latency()
89 return !ca || ca->mi.state == BCH_MEMBER_STATE_failed; in dev_failed()
104 int failed_delta = dev_failed(ca1) - dev_failed(ca2); in ptr_better()
114 int crc_retry_delta = (int) p1.crc_retry_nr - (int) p2.crc_retry_nr; in ptr_better()
124 * This picks a non-stale pointer, preferably from a device other than @avoid.
125 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
136 if (k.k->type == KEY_TYPE_error) in bch2_bkey_pick_read_device()
137 return -BCH_ERR_key_type_error; in bch2_bkey_pick_read_device()
142 return -BCH_ERR_extent_poisoned; in bch2_bkey_pick_read_device()
173 p.crc_retry_nr = f->failed_csum_nr; in bch2_bkey_pick_read_device()
174 p.has_ec &= ~f->failed_ec; in bch2_bkey_pick_read_device()
176 if (ca && ca->mi.state != BCH_MEMBER_STATE_failed) { in bch2_bkey_pick_read_device()
177 have_io_errors |= f->failed_io; in bch2_bkey_pick_read_device()
178 have_io_errors |= f->failed_ec; in bch2_bkey_pick_read_device()
180 have_csum_errors |= !!f->failed_csum_nr; in bch2_bkey_pick_read_device()
182 if (p.has_ec && (f->failed_io || f->failed_csum_nr)) in bch2_bkey_pick_read_device()
184 else if (f->failed_io || in bch2_bkey_pick_read_device()
185 f->failed_csum_nr > c->opts.checksum_err_retry_nr) in bch2_bkey_pick_read_device()
203 * device - we never want to stop issuing reads to the slower in bch2_bkey_pick_read_device()
224 return -BCH_ERR_no_device_to_read_from; in bch2_bkey_pick_read_device()
226 return -BCH_ERR_data_read_csum_err; in bch2_bkey_pick_read_device()
228 return -BCH_ERR_data_read_io_err; in bch2_bkey_pick_read_device()
234 return -BCH_ERR_no_devices_valid; in bch2_bkey_pick_read_device()
270 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p), in bch2_btree_ptr_v2_validate()
275 c->sb.version_min >= bcachefs_metadata_version_btree_ptr_sectors_written) in bch2_btree_ptr_v2_validate()
276 bkey_fsck_err_on(!bp.v->sectors_written, in bch2_btree_ptr_v2_validate()
291 le64_to_cpu(bp.v->seq), in bch2_btree_ptr_v2_to_text()
292 le16_to_cpu(bp.v->sectors_written), in bch2_btree_ptr_v2_to_text()
293 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : ""); in bch2_btree_ptr_v2_to_text()
295 bch2_bpos_to_text(out, bp.v->min_key); in bch2_btree_ptr_v2_to_text()
306 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key); in bch2_btree_ptr_v2_compat()
310 !bkey_eq(bp.v->min_key, POS_MIN)) in bch2_btree_ptr_v2_compat()
311 bp.v->min_key = write in bch2_btree_ptr_v2_compat()
312 ? bpos_nosnap_predecessor(bp.v->min_key) in bch2_btree_ptr_v2_compat()
313 : bpos_nosnap_successor(bp.v->min_key); in bch2_btree_ptr_v2_compat()
394 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9)) in bch2_extent_merge()
424 en_l->ptr = en_r->ptr; in bch2_extent_merge()
439 crc_r.offset -= crc_l.live_size; in bch2_extent_merge()
460 bch2_key_resize(l.k, l.k->size + r.k->size); in bch2_extent_merge()
472 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, in bch2_reservation_validate()
474 "invalid nr_replicas (%u)", r.v->nr_replicas); in bch2_reservation_validate()
485 le32_to_cpu(r.v->generation), in bch2_reservation_to_text()
486 r.v->nr_replicas); in bch2_reservation_to_text()
494 if (l.v->generation != r.v->generation || in bch2_reservation_merge()
495 l.v->nr_replicas != r.v->nr_replicas) in bch2_reservation_merge()
498 bch2_key_resize(l.k, l.k->size + r.k->size); in bch2_reservation_merge()
564 bkey_for_each_crc(&k->k, ptrs, u, i) in bch2_bkey_narrow_crcs()
576 BUG_ON(n.live_size != k->k.size); in bch2_bkey_narrow_crcs()
581 bkey_for_each_ptr_decode(&k->k, ptrs, p, i) in bch2_bkey_narrow_crcs()
583 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr); in bch2_bkey_narrow_crcs()
602 ._compressed_size = _src.compressed_size - 1, \ in bch2_extent_crc_pack()
603 ._uncompressed_size = _src.uncompressed_size - 1, \ in bch2_extent_crc_pack()
608 dst->crc32 = (struct bch_extent_crc32) { in bch2_extent_crc_pack()
614 dst->crc64 = (struct bch_extent_crc64) { in bch2_extent_crc_pack()
622 dst->crc128 = (struct bch_extent_crc128) { in bch2_extent_crc_pack()
658 k->k.u64s += extent_entry_u64s(ptrs.end); in bch2_extent_crc_append()
660 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX); in bch2_extent_crc_append()
672 return k.k->type == KEY_TYPE_reservation in bch2_bkey_nr_ptrs_allocated()
673 ? bkey_s_c_to_reservation(k).v->nr_replicas in bch2_bkey_nr_ptrs_allocated()
681 if (k.k->type == KEY_TYPE_reservation) { in bch2_bkey_nr_ptrs_fully_allocated()
682 ret = bkey_s_c_to_reservation(k).v->nr_replicas; in bch2_bkey_nr_ptrs_fully_allocated()
744 if (p->ptr.cached) in __extent_ptr_durability()
747 return p->has_ec in __extent_ptr_durability()
748 ? p->ec.redundancy + 1 in __extent_ptr_durability()
749 : ca->mi.durability; in __extent_ptr_durability()
754 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); in bch2_extent_ptr_desired_durability()
761 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); in bch2_extent_ptr_durability()
763 if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed) in bch2_extent_ptr_durability()
793 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev]) in bch2_bkey_durability_safe()
805 memmove_u64s(entry, next, (u64 *) end - (u64 *) next); in bch2_bkey_extent_entry_drop()
806 k->k.u64s -= extent_entry_u64s(entry); in bch2_bkey_extent_entry_drop()
814 bch2_extent_crc_unpack(&k->k, NULL); in bch2_extent_ptr_decoded_append()
817 if (!bch2_crc_unpacked_cmp(crc, p->crc)) { in bch2_extent_ptr_decoded_append()
822 bkey_for_each_crc(&k->k, ptrs, crc, pos) in bch2_extent_ptr_decoded_append()
823 if (!bch2_crc_unpacked_cmp(crc, p->crc)) { in bch2_extent_ptr_decoded_append()
828 bch2_extent_crc_append(k, p->crc); in bch2_extent_ptr_decoded_append()
831 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; in bch2_extent_ptr_decoded_append()
832 __extent_entry_insert(k, pos, to_entry(&p->ptr)); in bch2_extent_ptr_decoded_append()
834 if (p->has_ec) { in bch2_extent_ptr_decoded_append()
835 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr; in bch2_extent_ptr_decoded_append()
836 __extent_entry_insert(k, pos, to_entry(&p->ec)); in bch2_extent_ptr_decoded_append()
862 if (k.k->type == KEY_TYPE_stripe) { in bch2_bkey_drop_ptr_noerror()
863 ptr->dev = BCH_SB_MEMBER_INVALID; in bch2_bkey_drop_ptr_noerror()
867 EBUG_ON(ptr < &ptrs.start->ptr || in bch2_bkey_drop_ptr_noerror()
868 ptr >= &ptrs.end->ptr); in bch2_bkey_drop_ptr_noerror()
869 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr); in bch2_bkey_drop_ptr_noerror()
896 if (k.k->type != KEY_TYPE_stripe) { in bch2_bkey_drop_ptr()
902 if (p.ptr.dev == ptr->dev && p.has_ec) { in bch2_bkey_drop_ptr()
903 ptr->dev = BCH_SB_MEMBER_INVALID; in bch2_bkey_drop_ptr()
915 * stale - but to do that correctly we'd need to grab an open_bucket in bch2_bkey_drop_ptr()
920 k.k->type = KEY_TYPE_error; in bch2_bkey_drop_ptr()
923 k.k->type = KEY_TYPE_deleted; in bch2_bkey_drop_ptr()
930 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device()
935 bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device_noerror()
943 if (ptr->dev == dev) in bch2_bkey_has_device_c()
957 if (bch2_dev_in_target(c, ptr->dev, target) && in bch2_bkey_has_target()
958 (ca = bch2_dev_rcu(c, ptr->dev)) && in bch2_bkey_has_target()
959 (!ptr->cached || in bch2_bkey_has_target()
979 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) == in bch2_bkey_matches_ptr()
980 (s64) m.offset - offset) in bch2_bkey_matches_ptr()
991 if (k1.k->type != k2.k->type) in bch2_extents_match()
1010 * to the same region on disk - adjusting in bch2_extents_match()
1014 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extents_match()
1015 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) && in bch2_extents_match()
1049 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extent_has_ptr()
1050 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) in bch2_extent_has_ptr()
1051 return &entry2->ptr; in bch2_extent_has_ptr()
1059 unsigned target = opts->promote_target ?: opts->foreground_target; in want_cached_ptr()
1061 if (target && !bch2_dev_in_target(c, ptr->dev, target)) in want_cached_ptr()
1064 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in want_cached_ptr()
1085 * Stripes can't contain cached data, for - reasons. in bch2_extent_ptr_set_cached()
1090 if (&entry->ptr == ptr) { in bch2_extent_ptr_set_cached()
1094 ptr->cached = true; in bch2_extent_ptr_set_cached()
1104 * bch2_extent_normalize - clean up an extent, dropping stale pointers etc.
1117 ptr->cached && in bch2_extent_normalize()
1118 (!(ca = bch2_dev_rcu(c, ptr->dev)) || in bch2_extent_normalize()
1126 * bch2_extent_normalize_by_opts - clean up an extent, dropping stale pointers etc.
1144 if (ptr->cached) { in bch2_extent_normalize_by_opts()
1158 out->atomic++; in bch2_extent_ptr_to_text()
1160 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in bch2_extent_ptr_to_text()
1162 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev, in bch2_extent_ptr_to_text()
1163 (u64) ptr->offset, ptr->gen, in bch2_extent_ptr_to_text()
1164 ptr->cached ? " cached" : ""); in bch2_extent_ptr_to_text()
1167 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); in bch2_extent_ptr_to_text()
1170 ptr->dev, b, offset, ptr->gen); in bch2_extent_ptr_to_text()
1171 if (ca->mi.durability != 1) in bch2_extent_ptr_to_text()
1172 prt_printf(out, " d=%u", ca->mi.durability); in bch2_extent_ptr_to_text()
1173 if (ptr->cached) in bch2_extent_ptr_to_text()
1175 if (ptr->unwritten) in bch2_extent_ptr_to_text()
1184 --out->atomic; in bch2_extent_ptr_to_text()
1190 crc->compressed_size, in bch2_extent_crc_unpacked_to_text()
1191 crc->uncompressed_size, in bch2_extent_crc_unpacked_to_text()
1192 crc->offset, crc->nonce); in bch2_extent_crc_unpacked_to_text()
1193 bch2_prt_csum_type(out, crc->csum_type); in bch2_extent_crc_unpacked_to_text()
1194 prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo); in bch2_extent_crc_unpacked_to_text()
1196 bch2_prt_compression_type(out, crc->compression_type); in bch2_extent_crc_unpacked_to_text()
1204 prt_printf(out, " replicas=%u", r->data_replicas); in bch2_extent_rebalance_to_text()
1205 if (r->data_replicas_from_inode) in bch2_extent_rebalance_to_text()
1209 bch2_prt_csum_opt(out, r->data_checksum); in bch2_extent_rebalance_to_text()
1210 if (r->data_checksum_from_inode) in bch2_extent_rebalance_to_text()
1213 if (r->background_compression || r->background_compression_from_inode) { in bch2_extent_rebalance_to_text()
1215 bch2_compression_opt_to_text(out, r->background_compression); in bch2_extent_rebalance_to_text()
1217 if (r->background_compression_from_inode) in bch2_extent_rebalance_to_text()
1221 if (r->background_target || r->background_target_from_inode) { in bch2_extent_rebalance_to_text()
1224 bch2_target_to_text(out, c, r->background_target); in bch2_extent_rebalance_to_text()
1226 prt_printf(out, "%u", r->background_target); in bch2_extent_rebalance_to_text()
1228 if (r->background_target_from_inode) in bch2_extent_rebalance_to_text()
1232 if (r->promote_target || r->promote_target_from_inode) { in bch2_extent_rebalance_to_text()
1235 bch2_target_to_text(out, c, r->promote_target); in bch2_extent_rebalance_to_text()
1237 prt_printf(out, "%u", r->promote_target); in bch2_extent_rebalance_to_text()
1239 if (r->promote_target_from_inode) in bch2_extent_rebalance_to_text()
1243 if (r->erasure_code || r->erasure_code_from_inode) { in bch2_extent_rebalance_to_text()
1244 prt_printf(out, " ec=%u", r->erasure_code); in bch2_extent_rebalance_to_text()
1245 if (r->erasure_code_from_inode) in bch2_extent_rebalance_to_text()
1279 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr; in bch2_bkey_ptrs_to_text()
1282 (u64) ec->idx, ec->block); in bch2_bkey_ptrs_to_text()
1286 bch2_extent_rebalance_to_text(out, c, &entry->rebalance); in bch2_bkey_ptrs_to_text()
1290 prt_bitflags(out, bch2_extent_flags_strs, entry->flags.flags); in bch2_bkey_ptrs_to_text()
1313 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, in extent_ptr_validate()
1315 "multiple pointers to same device (%u)", ptr->dev); in extent_ptr_validate()
1319 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in extent_ptr_validate()
1325 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset); in extent_ptr_validate()
1326 unsigned first_bucket = ca->mi.first_bucket; in extent_ptr_validate()
1327 u64 nbuckets = ca->mi.nbuckets; in extent_ptr_validate()
1328 unsigned bucket_size = ca->mi.bucket_size; in extent_ptr_validate()
1351 unsigned size_ondisk = k.k->size; in bch2_bkey_ptrs_validate()
1373 ret = extent_ptr_validate(c, k, from, &entry->ptr, size_ondisk, false); in bch2_bkey_ptrs_validate()
1377 bkey_fsck_err_on(entry->ptr.cached && have_ec, in bch2_bkey_ptrs_validate()
1381 if (!entry->ptr.unwritten) in bch2_bkey_ptrs_validate()
1406 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && in bch2_bkey_ptrs_validate()
1443 const struct bch_extent_rebalance *r = &entry->rebalance; in bch2_bkey_ptrs_validate()
1445 if (!bch2_compression_opt_valid(r->compression)) { in bch2_bkey_ptrs_validate()
1446 struct bch_compression_opt opt = __bch2_compression_decode(r->compression); in bch2_bkey_ptrs_validate()
1449 return -BCH_ERR_invalid_bkey; in bch2_bkey_ptrs_validate()
1471 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, in bch2_bkey_ptrs_validate()
1502 entry->crc32.csum = swab32(entry->crc32.csum); in bch2_ptr_swab()
1505 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi); in bch2_ptr_swab()
1506 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo); in bch2_ptr_swab()
1509 entry->crc128.csum.hi = (__force __le64) in bch2_ptr_swab()
1510 swab64((__force u64) entry->crc128.csum.hi); in bch2_ptr_swab()
1511 entry->crc128.csum.lo = (__force __le64) in bch2_ptr_swab()
1512 swab64((__force u64) entry->crc128.csum.lo); in bch2_ptr_swab()
1535 ptrs.start->flags.flags = flags; in bch2_bkey_extent_flags_set()
1558 EBUG_ON(bkey_gt(where, k.k->p)); in bch2_cut_front_s()
1560 sub = where.offset - bkey_start_offset(k.k); in bch2_cut_front_s()
1562 k.k->size -= sub; in bch2_cut_front_s()
1564 if (!k.k->size) { in bch2_cut_front_s()
1565 k.k->type = KEY_TYPE_deleted; in bch2_cut_front_s()
1569 switch (k.k->type) { in bch2_cut_front_s()
1580 entry->ptr.offset += sub; in bch2_cut_front_s()
1583 entry->crc32.offset += sub; in bch2_cut_front_s()
1586 entry->crc64.offset += sub; in bch2_cut_front_s()
1589 entry->crc128.offset += sub; in bch2_cut_front_s()
1606 SET_REFLINK_P_IDX(p.v, REFLINK_P_IDX(p.v) + sub); in bch2_cut_front_s()
1616 memmove(p, p + sub, bytes - sub); in bch2_cut_front_s()
1618 new_val_u64s -= sub >> 3; in bch2_cut_front_s()
1623 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; in bch2_cut_front_s()
1628 return -val_u64s_delta; in bch2_cut_front_s()
1637 if (bkey_ge(where, k.k->p)) in bch2_cut_back_s()
1642 len = where.offset - bkey_start_offset(k.k); in bch2_cut_back_s()
1644 k.k->p.offset = where.offset; in bch2_cut_back_s()
1645 k.k->size = len; in bch2_cut_back_s()
1648 k.k->type = KEY_TYPE_deleted; in bch2_cut_back_s()
1652 switch (k.k->type) { in bch2_cut_back_s()
1656 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3; in bch2_cut_back_s()
1660 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; in bch2_cut_back_s()
1665 return -val_u64s_delta; in bch2_cut_back_s()