1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "alloc_background.h" 5 #include "bkey_buf.h" 6 #include "btree_iter.h" 7 #include "btree_update.h" 8 #include "btree_write_buffer.h" 9 #include "ec.h" 10 #include "error.h" 11 #include "lru.h" 12 #include "recovery.h" 13 14 /* KEY_TYPE_lru is obsolete: */ 15 int bch2_lru_validate(struct bch_fs *c, struct bkey_s_c k, 16 struct bkey_validate_context from) 17 { 18 int ret = 0; 19 20 bkey_fsck_err_on(!lru_pos_time(k.k->p), 21 c, lru_entry_at_time_0, 22 "lru entry at time=0"); 23 fsck_err: 24 return ret; 25 } 26 27 void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c, 28 struct bkey_s_c k) 29 { 30 const struct bch_lru *lru = bkey_s_c_to_lru(k).v; 31 32 prt_printf(out, "idx %llu", le64_to_cpu(lru->idx)); 33 } 34 35 void bch2_lru_pos_to_text(struct printbuf *out, struct bpos lru) 36 { 37 prt_printf(out, "%llu:%llu -> %llu:%llu", 38 lru_pos_id(lru), 39 lru_pos_time(lru), 40 u64_to_bucket(lru.offset).inode, 41 u64_to_bucket(lru.offset).offset); 42 } 43 44 static int __bch2_lru_set(struct btree_trans *trans, u16 lru_id, 45 u64 dev_bucket, u64 time, bool set) 46 { 47 return time 48 ? bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, 49 lru_pos(lru_id, dev_bucket, time), set) 50 : 0; 51 } 52 53 int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time) 54 { 55 return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_deleted); 56 } 57 58 int bch2_lru_set(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time) 59 { 60 return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_set); 61 } 62 63 int __bch2_lru_change(struct btree_trans *trans, 64 u16 lru_id, u64 dev_bucket, 65 u64 old_time, u64 new_time) 66 { 67 if (old_time == new_time) 68 return 0; 69 70 return bch2_lru_del(trans, lru_id, dev_bucket, old_time) ?: 71 bch2_lru_set(trans, lru_id, dev_bucket, new_time); 72 } 73 74 static const char * const bch2_lru_types[] = { 75 #define x(n) #n, 76 BCH_LRU_TYPES() 77 #undef x 78 NULL 79 }; 80 81 int bch2_lru_check_set(struct btree_trans *trans, 82 u16 lru_id, 83 u64 dev_bucket, 84 u64 time, 85 struct bkey_s_c referring_k, 86 struct bkey_buf *last_flushed) 87 { 88 struct bch_fs *c = trans->c; 89 struct printbuf buf = PRINTBUF; 90 struct btree_iter lru_iter; 91 struct bkey_s_c lru_k = 92 bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, 93 lru_pos(lru_id, dev_bucket, time), 0); 94 int ret = bkey_err(lru_k); 95 if (ret) 96 return ret; 97 98 if (lru_k.k->type != KEY_TYPE_set) { 99 ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed); 100 if (ret) 101 goto err; 102 103 if (fsck_err(trans, alloc_key_to_missing_lru_entry, 104 "missing %s lru entry\n%s", 105 bch2_lru_types[lru_type(lru_k)], 106 (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) { 107 ret = bch2_lru_set(trans, lru_id, dev_bucket, time); 108 if (ret) 109 goto err; 110 } 111 } 112 err: 113 fsck_err: 114 bch2_trans_iter_exit(trans, &lru_iter); 115 printbuf_exit(&buf); 116 return ret; 117 } 118 119 static struct bbpos lru_pos_to_bp(struct bkey_s_c lru_k) 120 { 121 enum bch_lru_type type = lru_type(lru_k); 122 123 switch (type) { 124 case BCH_LRU_read: 125 case BCH_LRU_fragmentation: 126 return BBPOS(BTREE_ID_alloc, u64_to_bucket(lru_k.k->p.offset)); 127 case BCH_LRU_stripes: 128 return BBPOS(BTREE_ID_stripes, POS(0, lru_k.k->p.offset)); 129 default: 130 BUG(); 131 } 132 } 133 134 static u64 bkey_lru_type_idx(struct bch_fs *c, 135 enum bch_lru_type type, 136 struct bkey_s_c k) 137 { 138 struct bch_alloc_v4 a_convert; 139 const struct bch_alloc_v4 *a; 140 141 switch (type) { 142 case BCH_LRU_read: 143 a = bch2_alloc_to_v4(k, &a_convert); 144 return alloc_lru_idx_read(*a); 145 case BCH_LRU_fragmentation: { 146 a = bch2_alloc_to_v4(k, &a_convert); 147 148 rcu_read_lock(); 149 struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode); 150 u64 idx = ca 151 ? alloc_lru_idx_fragmentation(*a, ca) 152 : 0; 153 rcu_read_unlock(); 154 return idx; 155 } 156 case BCH_LRU_stripes: 157 return k.k->type == KEY_TYPE_stripe 158 ? stripe_lru_pos(bkey_s_c_to_stripe(k).v) 159 : 0; 160 default: 161 BUG(); 162 } 163 } 164 165 static int bch2_check_lru_key(struct btree_trans *trans, 166 struct btree_iter *lru_iter, 167 struct bkey_s_c lru_k, 168 struct bkey_buf *last_flushed) 169 { 170 struct bch_fs *c = trans->c; 171 struct printbuf buf1 = PRINTBUF; 172 struct printbuf buf2 = PRINTBUF; 173 174 struct bbpos bp = lru_pos_to_bp(lru_k); 175 176 struct btree_iter iter; 177 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, bp.btree, bp.pos, 0); 178 int ret = bkey_err(k); 179 if (ret) 180 goto err; 181 182 enum bch_lru_type type = lru_type(lru_k); 183 u64 idx = bkey_lru_type_idx(c, type, k); 184 185 if (lru_pos_time(lru_k.k->p) != idx) { 186 ret = bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed); 187 if (ret) 188 goto err; 189 190 if (fsck_err(trans, lru_entry_bad, 191 "incorrect lru entry: lru %s time %llu\n" 192 "%s\n" 193 "for %s", 194 bch2_lru_types[type], 195 lru_pos_time(lru_k.k->p), 196 (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf), 197 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) 198 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); 199 } 200 err: 201 fsck_err: 202 bch2_trans_iter_exit(trans, &iter); 203 printbuf_exit(&buf2); 204 printbuf_exit(&buf1); 205 return ret; 206 } 207 208 int bch2_check_lrus(struct bch_fs *c) 209 { 210 struct bkey_buf last_flushed; 211 212 bch2_bkey_buf_init(&last_flushed); 213 bkey_init(&last_flushed.k->k); 214 215 int ret = bch2_trans_run(c, 216 for_each_btree_key_commit(trans, iter, 217 BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k, 218 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 219 bch2_check_lru_key(trans, &iter, k, &last_flushed))); 220 221 bch2_bkey_buf_exit(&last_flushed, c); 222 bch_err_fn(c, ret); 223 return ret; 224 225 } 226