1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Code for manipulating bucket marks for garbage collection.
4 *
5 * Copyright 2014 Datera, Inc.
6 */
7
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14
sector_to_bucket(const struct bch_dev * ca,sector_t s)15 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 return div_u64(s, ca->mi.bucket_size);
18 }
19
bucket_to_sector(const struct bch_dev * ca,size_t b)20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 return ((sector_t) b) * ca->mi.bucket_size;
23 }
24
bucket_remainder(const struct bch_dev * ca,sector_t s)25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 u32 remainder;
28
29 div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 return remainder;
31 }
32
sector_to_bucket_and_offset(const struct bch_dev * ca,sector_t s,u32 * offset)33 static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
34 u32 *offset)
35 {
36 return div_u64_rem(s, ca->mi.bucket_size, offset);
37 }
38
39 #define for_each_bucket(_b, _buckets) \
40 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
41 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
42
43 /*
44 * Ugly hack alert:
45 *
46 * We need to cram a spinlock in a single byte, because that's what we have left
47 * in struct bucket, and we care about the size of these - during fsck, we need
48 * in memory state for every single bucket on every device.
49 *
50 * We used to do
51 * while (xchg(&b->lock, 1) cpu_relax();
52 * but, it turns out not all architectures support xchg on a single byte.
53 *
54 * So now we use bit_spin_lock(), with fun games since we can't burn a whole
55 * ulong for this - we just need to make sure the lock bit always ends up in the
56 * first byte.
57 */
58
59 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
60 #define BUCKET_LOCK_BITNR 0
61 #else
62 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
63 #endif
64
65 union ulong_byte_assert {
66 ulong ulong;
67 u8 byte;
68 };
69
bucket_unlock(struct bucket * b)70 static inline void bucket_unlock(struct bucket *b)
71 {
72 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
73
74 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
75 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
76 }
77
bucket_lock(struct bucket * b)78 static inline void bucket_lock(struct bucket *b)
79 {
80 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
81 TASK_UNINTERRUPTIBLE);
82 }
83
gc_bucket_array(struct bch_dev * ca)84 static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
85 {
86 return rcu_dereference_check(ca->buckets_gc,
87 !ca->fs ||
88 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
89 lockdep_is_held(&ca->fs->gc_lock) ||
90 lockdep_is_held(&ca->bucket_lock));
91 }
92
gc_bucket(struct bch_dev * ca,size_t b)93 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
94 {
95 struct bucket_array *buckets = gc_bucket_array(ca);
96
97 BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
98 return buckets->b + b;
99 }
100
bucket_gens(struct bch_dev * ca)101 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
102 {
103 return rcu_dereference_check(ca->bucket_gens,
104 !ca->fs ||
105 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
106 lockdep_is_held(&ca->fs->gc_lock) ||
107 lockdep_is_held(&ca->bucket_lock));
108 }
109
bucket_gen(struct bch_dev * ca,size_t b)110 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
111 {
112 struct bucket_gens *gens = bucket_gens(ca);
113
114 BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
115 return gens->b + b;
116 }
117
PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)118 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
119 const struct bch_extent_ptr *ptr)
120 {
121 return sector_to_bucket(ca, ptr->offset);
122 }
123
PTR_BUCKET_POS(const struct bch_fs * c,const struct bch_extent_ptr * ptr)124 static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
125 const struct bch_extent_ptr *ptr)
126 {
127 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
128
129 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
130 }
131
PTR_BUCKET_POS_OFFSET(const struct bch_fs * c,const struct bch_extent_ptr * ptr,u32 * bucket_offset)132 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
133 const struct bch_extent_ptr *ptr,
134 u32 *bucket_offset)
135 {
136 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
137
138 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
139 }
140
PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr)141 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
142 const struct bch_extent_ptr *ptr)
143 {
144 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
145 }
146
ptr_data_type(const struct bkey * k,const struct bch_extent_ptr * ptr)147 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
148 const struct bch_extent_ptr *ptr)
149 {
150 if (bkey_is_btree_ptr(k))
151 return BCH_DATA_btree;
152
153 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
154 }
155
ptr_disk_sectors(s64 sectors,struct extent_ptr_decoded p)156 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
157 {
158 EBUG_ON(sectors < 0);
159
160 return crc_is_compressed(p.crc)
161 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
162 p.crc.uncompressed_size)
163 : sectors;
164 }
165
gen_cmp(u8 a,u8 b)166 static inline int gen_cmp(u8 a, u8 b)
167 {
168 return (s8) (a - b);
169 }
170
gen_after(u8 a,u8 b)171 static inline int gen_after(u8 a, u8 b)
172 {
173 int r = gen_cmp(a, b);
174
175 return r > 0 ? r : 0;
176 }
177
178 /**
179 * ptr_stale() - check if a pointer points into a bucket that has been
180 * invalidated.
181 */
ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr)182 static inline u8 ptr_stale(struct bch_dev *ca,
183 const struct bch_extent_ptr *ptr)
184 {
185 u8 ret;
186
187 rcu_read_lock();
188 ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
189 rcu_read_unlock();
190
191 return ret;
192 }
193
194 /* Device usage: */
195
196 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
bch2_dev_usage_read(struct bch_dev * ca)197 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
198 {
199 struct bch_dev_usage ret;
200
201 bch2_dev_usage_read_fast(ca, &ret);
202 return ret;
203 }
204
205 void bch2_dev_usage_init(struct bch_dev *);
206 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);
207
bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark)208 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
209 {
210 s64 reserved = 0;
211
212 switch (watermark) {
213 case BCH_WATERMARK_NR:
214 BUG();
215 case BCH_WATERMARK_stripe:
216 reserved += ca->mi.nbuckets >> 6;
217 fallthrough;
218 case BCH_WATERMARK_normal:
219 reserved += ca->mi.nbuckets >> 6;
220 fallthrough;
221 case BCH_WATERMARK_copygc:
222 reserved += ca->nr_btree_reserve;
223 fallthrough;
224 case BCH_WATERMARK_btree:
225 reserved += ca->nr_btree_reserve;
226 fallthrough;
227 case BCH_WATERMARK_btree_copygc:
228 case BCH_WATERMARK_reclaim:
229 break;
230 }
231
232 return reserved;
233 }
234
dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)235 static inline u64 dev_buckets_free(struct bch_dev *ca,
236 struct bch_dev_usage usage,
237 enum bch_watermark watermark)
238 {
239 return max_t(s64, 0,
240 usage.d[BCH_DATA_free].buckets -
241 ca->nr_open_buckets -
242 bch2_dev_buckets_reserved(ca, watermark));
243 }
244
__dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)245 static inline u64 __dev_buckets_available(struct bch_dev *ca,
246 struct bch_dev_usage usage,
247 enum bch_watermark watermark)
248 {
249 return max_t(s64, 0,
250 usage.d[BCH_DATA_free].buckets
251 + usage.d[BCH_DATA_cached].buckets
252 + usage.d[BCH_DATA_need_gc_gens].buckets
253 + usage.d[BCH_DATA_need_discard].buckets
254 - ca->nr_open_buckets
255 - bch2_dev_buckets_reserved(ca, watermark));
256 }
257
dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark)258 static inline u64 dev_buckets_available(struct bch_dev *ca,
259 enum bch_watermark watermark)
260 {
261 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
262 }
263
264 /* Filesystem usage: */
265
__fs_usage_u64s(unsigned nr_replicas)266 static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
267 {
268 return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
269 }
270
fs_usage_u64s(struct bch_fs * c)271 static inline unsigned fs_usage_u64s(struct bch_fs *c)
272 {
273 return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
274 }
275
__fs_usage_online_u64s(unsigned nr_replicas)276 static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
277 {
278 return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
279 }
280
fs_usage_online_u64s(struct bch_fs * c)281 static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
282 {
283 return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
284 }
285
dev_usage_u64s(void)286 static inline unsigned dev_usage_u64s(void)
287 {
288 return sizeof(struct bch_dev_usage) / sizeof(u64);
289 }
290
291 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
292
293 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
294
295 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
296
297 void bch2_fs_usage_to_text(struct printbuf *,
298 struct bch_fs *, struct bch_fs_usage_online *);
299
300 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
301
302 struct bch_fs_usage_short
303 bch2_fs_usage_read_short(struct bch_fs *);
304
305 void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
306 const struct bch_alloc_v4 *,
307 const struct bch_alloc_v4 *, u64, bool);
308 void bch2_dev_usage_update_m(struct bch_fs *, struct bch_dev *,
309 struct bucket *, struct bucket *);
310
311 /* key/bucket marking: */
312
fs_usage_ptr(struct bch_fs * c,unsigned journal_seq,bool gc)313 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
314 unsigned journal_seq,
315 bool gc)
316 {
317 percpu_rwsem_assert_held(&c->mark_lock);
318 BUG_ON(!gc && !journal_seq);
319
320 return this_cpu_ptr(gc
321 ? c->usage_gc
322 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
323 }
324
325 int bch2_update_replicas(struct bch_fs *, struct bkey_s_c,
326 struct bch_replicas_entry_v1 *, s64,
327 unsigned, bool);
328 int bch2_update_replicas_list(struct btree_trans *,
329 struct bch_replicas_entry_v1 *, s64);
330 int bch2_update_cached_sectors_list(struct btree_trans *, unsigned, s64);
331 int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
332
333 void bch2_fs_usage_initialize(struct bch_fs *);
334
335 int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
336 const struct bch_extent_ptr *,
337 s64, enum bch_data_type, u8, u8, u32);
338
339 int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
340 size_t, enum bch_data_type, unsigned,
341 struct gc_pos, unsigned);
342
343 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
344 struct bkey_s_c, struct bkey_s, unsigned);
345 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
346 struct bkey_s_c, struct bkey_s, unsigned);
347
348 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
349 ({ \
350 int ret = 0; \
351 \
352 if (_old.k->type) \
353 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
354 if (!ret && _new.k->type) \
355 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
356 ret; \
357 })
358
359 void bch2_trans_account_disk_usage_change(struct btree_trans *);
360
361 void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
362 int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
363
364 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
365 size_t, enum bch_data_type, unsigned);
366 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
367 int bch2_trans_mark_dev_sbs(struct bch_fs *);
368
is_superblock_bucket(struct bch_dev * ca,u64 b)369 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
370 {
371 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
372 u64 b_offset = bucket_to_sector(ca, b);
373 u64 b_end = bucket_to_sector(ca, b + 1);
374 unsigned i;
375
376 if (!b)
377 return true;
378
379 for (i = 0; i < layout->nr_superblocks; i++) {
380 u64 offset = le64_to_cpu(layout->sb_offset[i]);
381 u64 end = offset + (1 << layout->sb_max_size_bits);
382
383 if (!(offset >= b_end || end <= b_offset))
384 return true;
385 }
386
387 return false;
388 }
389
bch2_data_type_str(enum bch_data_type type)390 static inline const char *bch2_data_type_str(enum bch_data_type type)
391 {
392 return type < BCH_DATA_NR
393 ? __bch2_data_types[type]
394 : "(invalid data type)";
395 }
396
bch2_prt_data_type(struct printbuf * out,enum bch_data_type type)397 static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
398 {
399 if (type < BCH_DATA_NR)
400 prt_str(out, __bch2_data_types[type]);
401 else
402 prt_printf(out, "(invalid data type %u)", type);
403 }
404
405 /* disk reservations: */
406
bch2_disk_reservation_put(struct bch_fs * c,struct disk_reservation * res)407 static inline void bch2_disk_reservation_put(struct bch_fs *c,
408 struct disk_reservation *res)
409 {
410 if (res->sectors) {
411 this_cpu_sub(*c->online_reserved, res->sectors);
412 res->sectors = 0;
413 }
414 }
415
416 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
417
418 int __bch2_disk_reservation_add(struct bch_fs *,
419 struct disk_reservation *,
420 u64, int);
421
bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,int flags)422 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
423 u64 sectors, int flags)
424 {
425 #ifdef __KERNEL__
426 u64 old, new;
427
428 do {
429 old = this_cpu_read(c->pcpu->sectors_available);
430 if (sectors > old)
431 return __bch2_disk_reservation_add(c, res, sectors, flags);
432
433 new = old - sectors;
434 } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
435
436 this_cpu_add(*c->online_reserved, sectors);
437 res->sectors += sectors;
438 return 0;
439 #else
440 return __bch2_disk_reservation_add(c, res, sectors, flags);
441 #endif
442 }
443
444 static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs * c,unsigned nr_replicas)445 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
446 {
447 return (struct disk_reservation) {
448 .sectors = 0,
449 #if 0
450 /* not used yet: */
451 .gen = c->capacity_gen,
452 #endif
453 .nr_replicas = nr_replicas,
454 };
455 }
456
bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags)457 static inline int bch2_disk_reservation_get(struct bch_fs *c,
458 struct disk_reservation *res,
459 u64 sectors, unsigned nr_replicas,
460 int flags)
461 {
462 *res = bch2_disk_reservation_init(c, nr_replicas);
463
464 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
465 }
466
467 #define RESERVE_FACTOR 6
468
avail_factor(u64 r)469 static inline u64 avail_factor(u64 r)
470 {
471 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
472 }
473
474 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
475 void bch2_dev_buckets_free(struct bch_dev *);
476 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
477
478 #endif /* _BUCKETS_H */
479