1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SB_MEMBERS_H 3 #define _BCACHEFS_SB_MEMBERS_H 4 5 #include "darray.h" 6 #include "bkey_types.h" 7 8 extern char * const bch2_member_error_strs[]; 9 10 static inline struct bch_member * 11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) 12 { 13 return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); 14 } 15 16 int bch2_sb_members_v2_init(struct bch_fs *c); 17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); 18 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); 19 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); 20 21 static inline bool bch2_dev_is_online(struct bch_dev *ca) 22 { 23 return !percpu_ref_is_zero(&ca->io_ref[READ]); 24 } 25 26 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned); 27 28 static inline bool bch2_dev_idx_is_online(struct bch_fs *c, unsigned dev) 29 { 30 rcu_read_lock(); 31 struct bch_dev *ca = bch2_dev_rcu(c, dev); 32 bool ret = ca && bch2_dev_is_online(ca); 33 rcu_read_unlock(); 34 35 return ret; 36 } 37 38 static inline bool bch2_dev_is_healthy(struct bch_dev *ca) 39 { 40 return bch2_dev_is_online(ca) && 41 ca->mi.state != BCH_MEMBER_STATE_failed; 42 } 43 44 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) 45 { 46 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); 47 } 48 49 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, 50 unsigned dev) 51 { 52 darray_for_each(devs, i) 53 if (*i == dev) 54 return true; 55 return false; 56 } 57 58 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, 59 unsigned dev) 60 { 61 darray_for_each(*devs, i) 62 if (*i == dev) { 63 darray_remove_item(devs, i); 64 return; 65 } 66 } 67 68 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, 69 unsigned dev) 70 { 71 if (!bch2_dev_list_has_dev(*devs, dev)) { 72 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); 73 devs->data[devs->nr++] = dev; 74 } 75 } 76 77 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) 78 { 79 return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; 80 } 81 82 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, 83 const struct bch_devs_mask *mask) 84 { 85 struct bch_dev *ca = NULL; 86 87 while ((idx = mask 88 ? find_next_bit(mask->d, c->sb.nr_devices, idx) 89 : idx) < c->sb.nr_devices && 90 !(ca = rcu_dereference_check(c->devs[idx], 91 lockdep_is_held(&c->state_lock)))) 92 idx++; 93 94 return ca; 95 } 96 97 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, 98 const struct bch_devs_mask *mask) 99 { 100 return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); 101 } 102 103 #define for_each_member_device_rcu(_c, _ca, _mask) \ 104 for (struct bch_dev *_ca = NULL; \ 105 (_ca = __bch2_next_dev((_c), _ca, (_mask)));) 106 107 static inline void bch2_dev_get(struct bch_dev *ca) 108 { 109 #ifdef CONFIG_BCACHEFS_DEBUG 110 BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L); 111 #else 112 percpu_ref_get(&ca->ref); 113 #endif 114 } 115 116 static inline void __bch2_dev_put(struct bch_dev *ca) 117 { 118 #ifdef CONFIG_BCACHEFS_DEBUG 119 long r = atomic_long_dec_return(&ca->ref); 120 if (r < (long) !ca->dying) 121 panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put); 122 ca->last_put = _THIS_IP_; 123 if (!r) 124 complete(&ca->ref_completion); 125 #else 126 percpu_ref_put(&ca->ref); 127 #endif 128 } 129 130 static inline void bch2_dev_put(struct bch_dev *ca) 131 { 132 if (ca) 133 __bch2_dev_put(ca); 134 } 135 136 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) 137 { 138 rcu_read_lock(); 139 bch2_dev_put(ca); 140 if ((ca = __bch2_next_dev(c, ca, NULL))) 141 bch2_dev_get(ca); 142 rcu_read_unlock(); 143 144 return ca; 145 } 146 147 /* 148 * If you break early, you must drop your ref on the current device 149 */ 150 #define __for_each_member_device(_c, _ca) \ 151 for (; (_ca = bch2_get_next_dev(_c, _ca));) 152 153 #define for_each_member_device(_c, _ca) \ 154 for (struct bch_dev *_ca = NULL; \ 155 (_ca = bch2_get_next_dev(_c, _ca));) 156 157 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, 158 struct bch_dev *ca, 159 unsigned state_mask, 160 int rw) 161 { 162 rcu_read_lock(); 163 if (ca) 164 percpu_ref_put(&ca->io_ref[rw]); 165 166 while ((ca = __bch2_next_dev(c, ca, NULL)) && 167 (!((1 << ca->mi.state) & state_mask) || 168 !percpu_ref_tryget(&ca->io_ref[rw]))) 169 ; 170 rcu_read_unlock(); 171 172 return ca; 173 } 174 175 #define __for_each_online_member(_c, _ca, state_mask, rw) \ 176 for (struct bch_dev *_ca = NULL; \ 177 (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));) 178 179 #define for_each_online_member(c, ca) \ 180 __for_each_online_member(c, ca, ~0, READ) 181 182 #define for_each_rw_member(c, ca) \ 183 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE) 184 185 #define for_each_readable_member(c, ca) \ 186 __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ) 187 188 static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev) 189 { 190 return dev < c->sb.nr_devices && c->devs[dev]; 191 } 192 193 static inline bool bucket_valid(const struct bch_dev *ca, u64 b) 194 { 195 return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first; 196 } 197 198 static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev) 199 { 200 EBUG_ON(!bch2_dev_exists(c, dev)); 201 202 return rcu_dereference_check(c->devs[dev], 1); 203 } 204 205 static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev) 206 { 207 EBUG_ON(!bch2_dev_exists(c, dev)); 208 209 return rcu_dereference_protected(c->devs[dev], 210 lockdep_is_held(&c->sb_lock) || 211 lockdep_is_held(&c->state_lock)); 212 } 213 214 static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev) 215 { 216 return c && dev < c->sb.nr_devices 217 ? rcu_dereference(c->devs[dev]) 218 : NULL; 219 } 220 221 void bch2_dev_missing(struct bch_fs *, unsigned); 222 223 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev) 224 { 225 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); 226 if (unlikely(!ca)) 227 bch2_dev_missing(c, dev); 228 return ca; 229 } 230 231 static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev) 232 { 233 rcu_read_lock(); 234 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); 235 if (ca) 236 bch2_dev_get(ca); 237 rcu_read_unlock(); 238 return ca; 239 } 240 241 static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev) 242 { 243 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); 244 if (unlikely(!ca)) 245 bch2_dev_missing(c, dev); 246 return ca; 247 } 248 249 static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket) 250 { 251 struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); 252 if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { 253 bch2_dev_put(ca); 254 ca = NULL; 255 } 256 return ca; 257 } 258 259 void bch2_dev_bucket_missing(struct bch_dev *, u64); 260 261 static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket) 262 { 263 struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); 264 if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { 265 bch2_dev_bucket_missing(ca, bucket.offset); 266 bch2_dev_put(ca); 267 ca = NULL; 268 } 269 return ca; 270 } 271 272 static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 273 { 274 if (ca && ca->dev_idx == dev_idx) 275 return ca; 276 bch2_dev_put(ca); 277 return bch2_dev_tryget_noerror(c, dev_idx); 278 } 279 280 static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 281 { 282 if (ca && ca->dev_idx == dev_idx) 283 return ca; 284 bch2_dev_put(ca); 285 return bch2_dev_tryget(c, dev_idx); 286 } 287 288 static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw) 289 { 290 might_sleep(); 291 292 rcu_read_lock(); 293 struct bch_dev *ca = bch2_dev_rcu(c, dev); 294 if (ca && !percpu_ref_tryget(&ca->io_ref[rw])) 295 ca = NULL; 296 rcu_read_unlock(); 297 298 if (ca && 299 (ca->mi.state == BCH_MEMBER_STATE_rw || 300 (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))) 301 return ca; 302 303 if (ca) 304 percpu_ref_put(&ca->io_ref[rw]); 305 return NULL; 306 } 307 308 /* XXX kill, move to struct bch_fs */ 309 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) 310 { 311 struct bch_devs_mask devs; 312 313 memset(&devs, 0, sizeof(devs)); 314 for_each_online_member(c, ca) 315 __set_bit(ca->dev_idx, devs.d); 316 return devs; 317 } 318 319 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; 320 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; 321 322 static inline bool bch2_member_alive(struct bch_member *m) 323 { 324 return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); 325 } 326 327 static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev) 328 { 329 if (dev < sb->nr_devices) { 330 struct bch_member m = bch2_sb_member_get(sb, dev); 331 return bch2_member_alive(&m); 332 } 333 return false; 334 } 335 336 unsigned bch2_sb_nr_devices(const struct bch_sb *); 337 338 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) 339 { 340 return (struct bch_member_cpu) { 341 .nbuckets = le64_to_cpu(mi->nbuckets), 342 .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) - 343 le16_to_cpu(mi->first_bucket), 344 .first_bucket = le16_to_cpu(mi->first_bucket), 345 .bucket_size = le16_to_cpu(mi->bucket_size), 346 .group = BCH_MEMBER_GROUP(mi), 347 .state = BCH_MEMBER_STATE(mi), 348 .discard = BCH_MEMBER_DISCARD(mi), 349 .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), 350 .durability = BCH_MEMBER_DURABILITY(mi) 351 ? BCH_MEMBER_DURABILITY(mi) - 1 352 : 1, 353 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), 354 .valid = bch2_member_alive(mi), 355 .btree_bitmap_shift = mi->btree_bitmap_shift, 356 .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap), 357 }; 358 } 359 360 void bch2_sb_members_from_cpu(struct bch_fs *); 361 362 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); 363 void bch2_dev_errors_reset(struct bch_dev *); 364 365 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors) 366 { 367 u64 end = start + sectors; 368 369 if (end > 64ULL << ca->mi.btree_bitmap_shift) 370 return false; 371 372 for (unsigned bit = start >> ca->mi.btree_bitmap_shift; 373 (u64) bit << ca->mi.btree_bitmap_shift < end; 374 bit++) 375 if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit))) 376 return false; 377 return true; 378 } 379 380 bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c); 381 void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c); 382 383 int bch2_sb_member_alloc(struct bch_fs *); 384 385 #endif /* _BCACHEFS_SB_MEMBERS_H */ 386