1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_FOREGROUND_H
3 #define _BCACHEFS_ALLOC_FOREGROUND_H
4
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "extents.h"
8 #include "sb-members.h"
9
10 #include <linux/hash.h>
11
12 struct bkey;
13 struct bch_dev;
14 struct bch_fs;
15 struct bch_devs_List;
16
17 extern const char * const bch2_watermarks[];
18
19 void bch2_reset_alloc_cursors(struct bch_fs *);
20
21 struct dev_alloc_list {
22 unsigned nr;
23 u8 data[BCH_SB_MEMBERS_MAX];
24 };
25
26 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
27 struct dev_stripe_state *,
28 struct bch_devs_mask *);
29 void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
30
ob_dev(struct bch_fs * c,struct open_bucket * ob)31 static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
32 {
33 return bch2_dev_have_ref(c, ob->dev);
34 }
35
bch2_open_buckets_reserved(enum bch_watermark watermark)36 static inline unsigned bch2_open_buckets_reserved(enum bch_watermark watermark)
37 {
38 switch (watermark) {
39 case BCH_WATERMARK_interior_updates:
40 return 0;
41 case BCH_WATERMARK_reclaim:
42 return OPEN_BUCKETS_COUNT / 6;
43 case BCH_WATERMARK_btree:
44 case BCH_WATERMARK_btree_copygc:
45 return OPEN_BUCKETS_COUNT / 4;
46 case BCH_WATERMARK_copygc:
47 return OPEN_BUCKETS_COUNT / 3;
48 default:
49 return OPEN_BUCKETS_COUNT / 2;
50 }
51 }
52
53 struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
54 enum bch_watermark, enum bch_data_type,
55 struct closure *);
56
ob_push(struct bch_fs * c,struct open_buckets * obs,struct open_bucket * ob)57 static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
58 struct open_bucket *ob)
59 {
60 BUG_ON(obs->nr >= ARRAY_SIZE(obs->v));
61
62 obs->v[obs->nr++] = ob - c->open_buckets;
63 }
64
65 #define open_bucket_for_each(_c, _obs, _ob, _i) \
66 for ((_i) = 0; \
67 (_i) < (_obs)->nr && \
68 ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true); \
69 (_i)++)
70
ec_open_bucket(struct bch_fs * c,struct open_buckets * obs)71 static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
72 struct open_buckets *obs)
73 {
74 struct open_bucket *ob;
75 unsigned i;
76
77 open_bucket_for_each(c, obs, ob, i)
78 if (ob->ec)
79 return ob;
80
81 return NULL;
82 }
83
84 void bch2_open_bucket_write_error(struct bch_fs *,
85 struct open_buckets *, unsigned, int);
86
87 void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
88
bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)89 static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
90 {
91 if (atomic_dec_and_test(&ob->pin))
92 __bch2_open_bucket_put(c, ob);
93 }
94
bch2_open_buckets_put(struct bch_fs * c,struct open_buckets * ptrs)95 static inline void bch2_open_buckets_put(struct bch_fs *c,
96 struct open_buckets *ptrs)
97 {
98 struct open_bucket *ob;
99 unsigned i;
100
101 open_bucket_for_each(c, ptrs, ob, i)
102 bch2_open_bucket_put(c, ob);
103 ptrs->nr = 0;
104 }
105
bch2_alloc_sectors_done_inlined(struct bch_fs * c,struct write_point * wp)106 static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
107 {
108 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
109 struct open_bucket *ob;
110 unsigned i;
111
112 open_bucket_for_each(c, &wp->ptrs, ob, i)
113 ob_push(c, ob->sectors_free < block_sectors(c)
114 ? &ptrs
115 : &keep, ob);
116 wp->ptrs = keep;
117
118 mutex_unlock(&wp->lock);
119
120 bch2_open_buckets_put(c, &ptrs);
121 }
122
bch2_open_bucket_get(struct bch_fs * c,struct write_point * wp,struct open_buckets * ptrs)123 static inline void bch2_open_bucket_get(struct bch_fs *c,
124 struct write_point *wp,
125 struct open_buckets *ptrs)
126 {
127 struct open_bucket *ob;
128 unsigned i;
129
130 open_bucket_for_each(c, &wp->ptrs, ob, i) {
131 ob->data_type = wp->data_type;
132 atomic_inc(&ob->pin);
133 ob_push(c, ptrs, ob);
134 }
135 }
136
open_bucket_hashslot(struct bch_fs * c,unsigned dev,u64 bucket)137 static inline open_bucket_idx_t *open_bucket_hashslot(struct bch_fs *c,
138 unsigned dev, u64 bucket)
139 {
140 return c->open_buckets_hash +
141 (jhash_3words(dev, bucket, bucket >> 32, 0) &
142 (OPEN_BUCKETS_COUNT - 1));
143 }
144
bch2_bucket_is_open(struct bch_fs * c,unsigned dev,u64 bucket)145 static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket)
146 {
147 open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket);
148
149 while (slot) {
150 struct open_bucket *ob = &c->open_buckets[slot];
151
152 if (ob->dev == dev && ob->bucket == bucket)
153 return true;
154
155 slot = ob->hash;
156 }
157
158 return false;
159 }
160
bch2_bucket_is_open_safe(struct bch_fs * c,unsigned dev,u64 bucket)161 static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket)
162 {
163 bool ret;
164
165 if (bch2_bucket_is_open(c, dev, bucket))
166 return true;
167
168 spin_lock(&c->freelist_lock);
169 ret = bch2_bucket_is_open(c, dev, bucket);
170 spin_unlock(&c->freelist_lock);
171
172 return ret;
173 }
174
175 enum bch_write_flags;
176 int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
177 struct dev_stripe_state *, struct bch_devs_mask *,
178 unsigned, unsigned *, bool *, enum bch_write_flags,
179 enum bch_data_type, enum bch_watermark,
180 struct closure *);
181
182 int bch2_alloc_sectors_start_trans(struct btree_trans *,
183 unsigned, unsigned,
184 struct write_point_specifier,
185 struct bch_devs_list *,
186 unsigned, unsigned,
187 enum bch_watermark,
188 enum bch_write_flags,
189 struct closure *,
190 struct write_point **);
191
192 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
193
194 /*
195 * Append pointers to the space we just allocated to @k, and mark @sectors space
196 * as allocated out of @ob
197 */
198 static inline void
bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)199 bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
200 struct bkey_i *k, unsigned sectors,
201 bool cached)
202 {
203 struct open_bucket *ob;
204 unsigned i;
205
206 BUG_ON(sectors > wp->sectors_free);
207 wp->sectors_free -= sectors;
208 wp->sectors_allocated += sectors;
209
210 open_bucket_for_each(c, &wp->ptrs, ob, i) {
211 struct bch_dev *ca = ob_dev(c, ob);
212 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
213
214 ptr.cached = cached ||
215 (!ca->mi.durability &&
216 wp->data_type == BCH_DATA_user);
217
218 bch2_bkey_append_ptr(k, ptr);
219
220 BUG_ON(sectors > ob->sectors_free);
221 ob->sectors_free -= sectors;
222 }
223 }
224
225 void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
226 struct bkey_i *, unsigned, bool);
227 void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
228
229 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *, bool);
230
writepoint_hashed(unsigned long v)231 static inline struct write_point_specifier writepoint_hashed(unsigned long v)
232 {
233 return (struct write_point_specifier) { .v = v | 1 };
234 }
235
writepoint_ptr(struct write_point * wp)236 static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
237 {
238 return (struct write_point_specifier) { .v = (unsigned long) wp };
239 }
240
241 void bch2_fs_allocator_foreground_init(struct bch_fs *);
242
243 void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *);
244 void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *);
245 void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
246
247 void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
248
249 void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
250 void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
251
252 void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)253 static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
254 {
255 if (cl->closure_get_happened)
256 __bch2_wait_on_allocator(c, cl);
257 }
258
259 #endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
260