1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Code for manipulating bucket marks for garbage collection.
4 *
5 * Copyright 2014 Datera, Inc.
6 */
7
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14
sector_to_bucket(const struct bch_dev * ca,sector_t s)15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 return div_u64(s, ca->mi.bucket_size);
18 }
19
bucket_to_sector(const struct bch_dev * ca,size_t b)20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 return ((sector_t) b) * ca->mi.bucket_size;
23 }
24
bucket_remainder(const struct bch_dev * ca,sector_t s)25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 u32 remainder;
28
29 div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 return remainder;
31 }
32
sector_to_bucket_and_offset(const struct bch_dev * ca,sector_t s,u32 * offset)33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37
38 #define for_each_bucket(_b, _buckets) \
39 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41
bucket_unlock(struct bucket * b)42 static inline void bucket_unlock(struct bucket *b)
43 {
44 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
45
46 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
47 smp_mb__after_atomic();
48 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
49 }
50
bucket_lock(struct bucket * b)51 static inline void bucket_lock(struct bucket *b)
52 {
53 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
54 TASK_UNINTERRUPTIBLE);
55 }
56
gc_bucket(struct bch_dev * ca,size_t b)57 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
58 {
59 return bucket_valid(ca, b)
60 ? genradix_ptr(&ca->buckets_gc, b)
61 : NULL;
62 }
63
bucket_gens(struct bch_dev * ca)64 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
65 {
66 return rcu_dereference_check(ca->bucket_gens,
67 lockdep_is_held(&ca->fs->state_lock));
68 }
69
bucket_gen(struct bch_dev * ca,size_t b)70 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
71 {
72 struct bucket_gens *gens = bucket_gens(ca);
73
74 if (b - gens->first_bucket >= gens->nbuckets_minus_first)
75 return NULL;
76 return gens->b + b;
77 }
78
bucket_gen_get_rcu(struct bch_dev * ca,size_t b)79 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
80 {
81 u8 *gen = bucket_gen(ca, b);
82 return gen ? *gen : -1;
83 }
84
bucket_gen_get(struct bch_dev * ca,size_t b)85 static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
86 {
87 rcu_read_lock();
88 int ret = bucket_gen_get_rcu(ca, b);
89 rcu_read_unlock();
90 return ret;
91 }
92
PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)93 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
94 const struct bch_extent_ptr *ptr)
95 {
96 return sector_to_bucket(ca, ptr->offset);
97 }
98
PTR_BUCKET_POS(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)99 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
100 const struct bch_extent_ptr *ptr)
101 {
102 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
103 }
104
PTR_BUCKET_POS_OFFSET(const struct bch_dev * ca,const struct bch_extent_ptr * ptr,u32 * bucket_offset)105 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
106 const struct bch_extent_ptr *ptr,
107 u32 *bucket_offset)
108 {
109 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
110 }
111
PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr)112 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
113 const struct bch_extent_ptr *ptr)
114 {
115 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
116 }
117
ptr_data_type(const struct bkey * k,const struct bch_extent_ptr * ptr)118 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
119 const struct bch_extent_ptr *ptr)
120 {
121 if (bkey_is_btree_ptr(k))
122 return BCH_DATA_btree;
123
124 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
125 }
126
ptr_disk_sectors(s64 sectors,struct extent_ptr_decoded p)127 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
128 {
129 EBUG_ON(sectors < 0);
130
131 return crc_is_compressed(p.crc)
132 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
133 p.crc.uncompressed_size)
134 : sectors;
135 }
136
gen_cmp(u8 a,u8 b)137 static inline int gen_cmp(u8 a, u8 b)
138 {
139 return (s8) (a - b);
140 }
141
gen_after(u8 a,u8 b)142 static inline int gen_after(u8 a, u8 b)
143 {
144 return max(0, gen_cmp(a, b));
145 }
146
dev_ptr_stale_rcu(struct bch_dev * ca,const struct bch_extent_ptr * ptr)147 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
148 {
149 int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
150 return gen < 0 ? gen : gen_after(gen, ptr->gen);
151 }
152
153 /**
154 * dev_ptr_stale() - check if a pointer points into a bucket that has been
155 * invalidated.
156 */
dev_ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr)157 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
158 {
159 rcu_read_lock();
160 int ret = dev_ptr_stale_rcu(ca, ptr);
161 rcu_read_unlock();
162 return ret;
163 }
164
165 /* Device usage: */
166
167 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
bch2_dev_usage_read(struct bch_dev * ca)168 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
169 {
170 struct bch_dev_usage ret;
171
172 bch2_dev_usage_read_fast(ca, &ret);
173 return ret;
174 }
175
176 void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
bch2_dev_usage_full_read(struct bch_dev * ca)177 static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
178 {
179 struct bch_dev_usage_full ret;
180
181 bch2_dev_usage_full_read_fast(ca, &ret);
182 return ret;
183 }
184
185 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
186
bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark)187 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
188 {
189 s64 reserved = 0;
190
191 switch (watermark) {
192 case BCH_WATERMARK_NR:
193 BUG();
194 case BCH_WATERMARK_stripe:
195 reserved += ca->mi.nbuckets >> 6;
196 fallthrough;
197 case BCH_WATERMARK_normal:
198 reserved += ca->mi.nbuckets >> 6;
199 fallthrough;
200 case BCH_WATERMARK_copygc:
201 reserved += ca->nr_btree_reserve;
202 fallthrough;
203 case BCH_WATERMARK_btree:
204 reserved += ca->nr_btree_reserve;
205 fallthrough;
206 case BCH_WATERMARK_btree_copygc:
207 case BCH_WATERMARK_reclaim:
208 case BCH_WATERMARK_interior_updates:
209 break;
210 }
211
212 return reserved;
213 }
214
dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)215 static inline u64 dev_buckets_free(struct bch_dev *ca,
216 struct bch_dev_usage usage,
217 enum bch_watermark watermark)
218 {
219 return max_t(s64, 0,
220 usage.buckets[BCH_DATA_free]-
221 ca->nr_open_buckets -
222 bch2_dev_buckets_reserved(ca, watermark));
223 }
224
__dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)225 static inline u64 __dev_buckets_available(struct bch_dev *ca,
226 struct bch_dev_usage usage,
227 enum bch_watermark watermark)
228 {
229 return max_t(s64, 0,
230 usage.buckets[BCH_DATA_free]
231 + usage.buckets[BCH_DATA_cached]
232 + usage.buckets[BCH_DATA_need_gc_gens]
233 + usage.buckets[BCH_DATA_need_discard]
234 - ca->nr_open_buckets
235 - bch2_dev_buckets_reserved(ca, watermark));
236 }
237
dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark)238 static inline u64 dev_buckets_available(struct bch_dev *ca,
239 enum bch_watermark watermark)
240 {
241 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
242 }
243
244 /* Filesystem usage: */
245
246 struct bch_fs_usage_short
247 bch2_fs_usage_read_short(struct bch_fs *);
248
249 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
250 struct bkey_s_c, const struct bch_extent_ptr *,
251 s64, enum bch_data_type, u8, u8, u32 *);
252
253 int bch2_check_fix_ptrs(struct btree_trans *,
254 enum btree_id, unsigned, struct bkey_s_c,
255 enum btree_iter_update_trigger_flags);
256
257 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
258 struct bkey_s_c, struct bkey_s,
259 enum btree_iter_update_trigger_flags);
260 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
261 struct bkey_s_c, struct bkey_s,
262 enum btree_iter_update_trigger_flags);
263
264 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
265 ({ \
266 int ret = 0; \
267 \
268 if (_old.k->type) \
269 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
270 if (!ret && _new.k->type) \
271 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
272 ret; \
273 })
274
275 void bch2_trans_account_disk_usage_change(struct btree_trans *);
276
277 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
278 enum bch_data_type, unsigned,
279 enum btree_iter_update_trigger_flags);
280 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
281 enum btree_iter_update_trigger_flags);
282 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
283 enum btree_iter_update_trigger_flags);
284 int bch2_trans_mark_dev_sbs(struct bch_fs *);
285
286 bool bch2_is_superblock_bucket(struct bch_dev *, u64);
287
bch2_data_type_str(enum bch_data_type type)288 static inline const char *bch2_data_type_str(enum bch_data_type type)
289 {
290 return type < BCH_DATA_NR
291 ? __bch2_data_types[type]
292 : "(invalid data type)";
293 }
294
295 /* disk reservations: */
296
bch2_disk_reservation_put(struct bch_fs * c,struct disk_reservation * res)297 static inline void bch2_disk_reservation_put(struct bch_fs *c,
298 struct disk_reservation *res)
299 {
300 if (res->sectors) {
301 this_cpu_sub(*c->online_reserved, res->sectors);
302 res->sectors = 0;
303 }
304 }
305
306 enum bch_reservation_flags {
307 BCH_DISK_RESERVATION_NOFAIL = 1 << 0,
308 BCH_DISK_RESERVATION_PARTIAL = 1 << 1,
309 };
310
311 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
312 u64, enum bch_reservation_flags);
313
bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)314 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
315 u64 sectors, enum bch_reservation_flags flags)
316 {
317 #ifdef __KERNEL__
318 u64 old, new;
319
320 old = this_cpu_read(c->pcpu->sectors_available);
321 do {
322 if (sectors > old)
323 return __bch2_disk_reservation_add(c, res, sectors, flags);
324
325 new = old - sectors;
326 } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
327
328 this_cpu_add(*c->online_reserved, sectors);
329 res->sectors += sectors;
330 return 0;
331 #else
332 return __bch2_disk_reservation_add(c, res, sectors, flags);
333 #endif
334 }
335
336 static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs * c,unsigned nr_replicas)337 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
338 {
339 return (struct disk_reservation) {
340 .sectors = 0,
341 #if 0
342 /* not used yet: */
343 .gen = c->capacity_gen,
344 #endif
345 .nr_replicas = nr_replicas,
346 };
347 }
348
bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags)349 static inline int bch2_disk_reservation_get(struct bch_fs *c,
350 struct disk_reservation *res,
351 u64 sectors, unsigned nr_replicas,
352 int flags)
353 {
354 *res = bch2_disk_reservation_init(c, nr_replicas);
355
356 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
357 }
358
359 #define RESERVE_FACTOR 6
360
avail_factor(u64 r)361 static inline u64 avail_factor(u64 r)
362 {
363 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
364 }
365
366 void bch2_buckets_nouse_free(struct bch_fs *);
367 int bch2_buckets_nouse_alloc(struct bch_fs *);
368
369 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
370 void bch2_dev_buckets_free(struct bch_dev *);
371 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
372
373 #endif /* _BUCKETS_H */
374