1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13 
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io_write.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "trace.h"
33 
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
37 
bch2_trans_mutex_lock_norelock(struct btree_trans * trans,struct mutex * lock)38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
39 					   struct mutex *lock)
40 {
41 	if (!mutex_trylock(lock)) {
42 		bch2_trans_unlock(trans);
43 		mutex_lock(lock);
44 	}
45 }
46 
47 const char * const bch2_watermarks[] = {
48 #define x(t) #t,
49 	BCH_WATERMARKS()
50 #undef x
51 	NULL
52 };
53 
54 /*
55  * Open buckets represent a bucket that's currently being allocated from.  They
56  * serve two purposes:
57  *
58  *  - They track buckets that have been partially allocated, allowing for
59  *    sub-bucket sized allocations - they're used by the sector allocator below
60  *
61  *  - They provide a reference to the buckets they own that mark and sweep GC
62  *    can find, until the new allocation has a pointer to it inserted into the
63  *    btree
64  *
65  * When allocating some space with the sector allocator, the allocation comes
66  * with a reference to an open bucket - the caller is required to put that
67  * reference _after_ doing the index update that makes its allocation reachable.
68  */
69 
bch2_reset_alloc_cursors(struct bch_fs * c)70 void bch2_reset_alloc_cursors(struct bch_fs *c)
71 {
72 	rcu_read_lock();
73 	for_each_member_device_rcu(c, ca, NULL)
74 		memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
75 	rcu_read_unlock();
76 }
77 
bch2_open_bucket_hash_add(struct bch_fs * c,struct open_bucket * ob)78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
79 {
80 	open_bucket_idx_t idx = ob - c->open_buckets;
81 	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
82 
83 	ob->hash = *slot;
84 	*slot = idx;
85 }
86 
bch2_open_bucket_hash_remove(struct bch_fs * c,struct open_bucket * ob)87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
88 {
89 	open_bucket_idx_t idx = ob - c->open_buckets;
90 	open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
91 
92 	while (*slot != idx) {
93 		BUG_ON(!*slot);
94 		slot = &c->open_buckets[*slot].hash;
95 	}
96 
97 	*slot = ob->hash;
98 	ob->hash = 0;
99 }
100 
__bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
102 {
103 	struct bch_dev *ca = ob_dev(c, ob);
104 
105 	if (ob->ec) {
106 		ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
107 		return;
108 	}
109 
110 	spin_lock(&ob->lock);
111 	ob->valid = false;
112 	ob->data_type = 0;
113 	spin_unlock(&ob->lock);
114 
115 	spin_lock(&c->freelist_lock);
116 	bch2_open_bucket_hash_remove(c, ob);
117 
118 	ob->freelist = c->open_buckets_freelist;
119 	c->open_buckets_freelist = ob - c->open_buckets;
120 
121 	c->open_buckets_nr_free++;
122 	ca->nr_open_buckets--;
123 	spin_unlock(&c->freelist_lock);
124 
125 	closure_wake_up(&c->open_buckets_wait);
126 }
127 
bch2_open_bucket_write_error(struct bch_fs * c,struct open_buckets * obs,unsigned dev,int err)128 void bch2_open_bucket_write_error(struct bch_fs *c,
129 				  struct open_buckets *obs,
130 				  unsigned dev, int err)
131 {
132 	struct open_bucket *ob;
133 	unsigned i;
134 
135 	open_bucket_for_each(c, obs, ob, i)
136 		if (ob->dev == dev && ob->ec)
137 			bch2_ec_bucket_cancel(c, ob, err);
138 }
139 
bch2_open_bucket_alloc(struct bch_fs * c)140 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
141 {
142 	struct open_bucket *ob;
143 
144 	BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
145 
146 	ob = c->open_buckets + c->open_buckets_freelist;
147 	c->open_buckets_freelist = ob->freelist;
148 	atomic_set(&ob->pin, 1);
149 	ob->data_type = 0;
150 
151 	c->open_buckets_nr_free--;
152 	return ob;
153 }
154 
is_superblock_bucket(struct bch_fs * c,struct bch_dev * ca,u64 b)155 static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
156 {
157 	if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs)
158 		return false;
159 
160 	return bch2_is_superblock_bucket(ca, b);
161 }
162 
open_bucket_free_unused(struct bch_fs * c,struct open_bucket * ob)163 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
164 {
165 	BUG_ON(c->open_buckets_partial_nr >=
166 	       ARRAY_SIZE(c->open_buckets_partial));
167 
168 	spin_lock(&c->freelist_lock);
169 	rcu_read_lock();
170 	bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
171 	rcu_read_unlock();
172 
173 	ob->on_partial_list = true;
174 	c->open_buckets_partial[c->open_buckets_partial_nr++] =
175 		ob - c->open_buckets;
176 	spin_unlock(&c->freelist_lock);
177 
178 	closure_wake_up(&c->open_buckets_wait);
179 	closure_wake_up(&c->freelist_wait);
180 }
181 
may_alloc_bucket(struct bch_fs * c,struct bpos bucket,struct bucket_alloc_state * s)182 static inline bool may_alloc_bucket(struct bch_fs *c,
183 				    struct bpos bucket,
184 				    struct bucket_alloc_state *s)
185 {
186 	if (bch2_bucket_is_open(c, bucket.inode, bucket.offset)) {
187 		s->skipped_open++;
188 		return false;
189 	}
190 
191 	u64 journal_seq_ready =
192 		bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal,
193 					      bucket.inode, bucket.offset);
194 	if (journal_seq_ready > c->journal.flushed_seq_ondisk) {
195 		if (journal_seq_ready > c->journal.flushing_seq)
196 			s->need_journal_commit++;
197 		s->skipped_need_journal_commit++;
198 		return false;
199 	}
200 
201 	if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) {
202 		s->skipped_nocow++;
203 		return false;
204 	}
205 
206 	return true;
207 }
208 
__try_alloc_bucket(struct bch_fs * c,struct bch_dev * ca,u64 bucket,u8 gen,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)209 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
210 					      u64 bucket, u8 gen,
211 					      enum bch_watermark watermark,
212 					      struct bucket_alloc_state *s,
213 					      struct closure *cl)
214 {
215 	if (unlikely(is_superblock_bucket(c, ca, bucket)))
216 		return NULL;
217 
218 	if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
219 		s->skipped_nouse++;
220 		return NULL;
221 	}
222 
223 	spin_lock(&c->freelist_lock);
224 
225 	if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(watermark))) {
226 		if (cl)
227 			closure_wait(&c->open_buckets_wait, cl);
228 
229 		track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
230 		spin_unlock(&c->freelist_lock);
231 		return ERR_PTR(-BCH_ERR_open_buckets_empty);
232 	}
233 
234 	/* Recheck under lock: */
235 	if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
236 		spin_unlock(&c->freelist_lock);
237 		s->skipped_open++;
238 		return NULL;
239 	}
240 
241 	struct open_bucket *ob = bch2_open_bucket_alloc(c);
242 
243 	spin_lock(&ob->lock);
244 	ob->valid	= true;
245 	ob->sectors_free = ca->mi.bucket_size;
246 	ob->dev		= ca->dev_idx;
247 	ob->gen		= gen;
248 	ob->bucket	= bucket;
249 	spin_unlock(&ob->lock);
250 
251 	ca->nr_open_buckets++;
252 	bch2_open_bucket_hash_add(c, ob);
253 
254 	track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
255 	track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
256 
257 	spin_unlock(&c->freelist_lock);
258 	return ob;
259 }
260 
try_alloc_bucket(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct btree_iter * freespace_iter,struct closure * cl)261 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
262 					    enum bch_watermark watermark,
263 					    struct bucket_alloc_state *s,
264 					    struct btree_iter *freespace_iter,
265 					    struct closure *cl)
266 {
267 	struct bch_fs *c = trans->c;
268 	u64 b = freespace_iter->pos.offset & ~(~0ULL << 56);
269 
270 	if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s))
271 		return NULL;
272 
273 	u8 gen;
274 	int ret = bch2_check_discard_freespace_key(trans, freespace_iter, &gen, true);
275 	if (ret < 0)
276 		return ERR_PTR(ret);
277 	if (ret)
278 		return NULL;
279 
280 	return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl);
281 }
282 
283 /*
284  * This path is for before the freespace btree is initialized:
285  */
286 static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)287 bch2_bucket_alloc_early(struct btree_trans *trans,
288 			struct bch_dev *ca,
289 			enum bch_watermark watermark,
290 			struct bucket_alloc_state *s,
291 			struct closure *cl)
292 {
293 	struct bch_fs *c = trans->c;
294 	struct btree_iter iter, citer;
295 	struct bkey_s_c k, ck;
296 	struct open_bucket *ob = NULL;
297 	u64 first_bucket = ca->mi.first_bucket;
298 	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
299 	u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
300 	u64 alloc_cursor = alloc_start;
301 	int ret;
302 
303 	/*
304 	 * Scan with an uncached iterator to avoid polluting the key cache. An
305 	 * uncached iter will return a cached key if one exists, but if not
306 	 * there is no other underlying protection for the associated key cache
307 	 * slot. To avoid racing bucket allocations, look up the cached key slot
308 	 * of any likely allocation candidate before attempting to proceed with
309 	 * the allocation. This provides proper exclusion on the associated
310 	 * bucket.
311 	 */
312 again:
313 	for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
314 			   BTREE_ITER_slots, k, ret) {
315 		u64 bucket = k.k->p.offset;
316 
317 		if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
318 			break;
319 
320 		if (s->btree_bitmap != BTREE_BITMAP_ANY &&
321 		    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
322 				bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
323 			if (s->btree_bitmap == BTREE_BITMAP_YES &&
324 			    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
325 				break;
326 
327 			bucket = sector_to_bucket(ca,
328 					round_up(bucket_to_sector(ca, bucket) + 1,
329 						 1ULL << ca->mi.btree_bitmap_shift));
330 			bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket));
331 			s->buckets_seen++;
332 			s->skipped_mi_btree_bitmap++;
333 			continue;
334 		}
335 
336 		struct bch_alloc_v4 a_convert;
337 		const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
338 		if (a->data_type != BCH_DATA_free)
339 			continue;
340 
341 		/* now check the cached key to serialize concurrent allocs of the bucket */
342 		ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
343 		ret = bkey_err(ck);
344 		if (ret)
345 			break;
346 
347 		a = bch2_alloc_to_v4(ck, &a_convert);
348 		if (a->data_type != BCH_DATA_free)
349 			goto next;
350 
351 		s->buckets_seen++;
352 
353 		ob = may_alloc_bucket(c, k.k->p, s)
354 			? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen,
355 					     watermark, s, cl)
356 			: NULL;
357 next:
358 		bch2_set_btree_iter_dontneed(trans, &citer);
359 		bch2_trans_iter_exit(trans, &citer);
360 		if (ob)
361 			break;
362 	}
363 	bch2_trans_iter_exit(trans, &iter);
364 
365 	alloc_cursor = iter.pos.offset;
366 
367 	if (!ob && ret)
368 		ob = ERR_PTR(ret);
369 
370 	if (!ob && alloc_start > first_bucket) {
371 		alloc_cursor = alloc_start = first_bucket;
372 		goto again;
373 	}
374 
375 	*dev_alloc_cursor = alloc_cursor;
376 
377 	return ob;
378 }
379 
bch2_bucket_alloc_freelist(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,struct bucket_alloc_state * s,struct closure * cl)380 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
381 						   struct bch_dev *ca,
382 						   enum bch_watermark watermark,
383 						   struct bucket_alloc_state *s,
384 						   struct closure *cl)
385 {
386 	struct btree_iter iter;
387 	struct bkey_s_c k;
388 	struct open_bucket *ob = NULL;
389 	u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
390 	u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
391 	u64 alloc_cursor = alloc_start;
392 	int ret;
393 again:
394 	for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace,
395 					 POS(ca->dev_idx, alloc_cursor),
396 					 POS(ca->dev_idx, U64_MAX),
397 					 0, k, ret) {
398 		/*
399 		 * peek normally dosen't trim extents - they can span iter.pos,
400 		 * which is not what we want here:
401 		 */
402 		iter.k.size = iter.k.p.offset - iter.pos.offset;
403 
404 		while (iter.k.size) {
405 			s->buckets_seen++;
406 
407 			u64 bucket = iter.pos.offset & ~(~0ULL << 56);
408 			if (s->btree_bitmap != BTREE_BITMAP_ANY &&
409 			    s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
410 					bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
411 				if (s->btree_bitmap == BTREE_BITMAP_YES &&
412 				    bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
413 					goto fail;
414 
415 				bucket = sector_to_bucket(ca,
416 						round_up(bucket_to_sector(ca, bucket + 1),
417 							 1ULL << ca->mi.btree_bitmap_shift));
418 				alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
419 
420 				bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor));
421 				s->skipped_mi_btree_bitmap++;
422 				goto next;
423 			}
424 
425 			ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl);
426 			if (ob) {
427 				if (!IS_ERR(ob))
428 					*dev_alloc_cursor = iter.pos.offset;
429 				bch2_set_btree_iter_dontneed(trans, &iter);
430 				break;
431 			}
432 
433 			iter.k.size--;
434 			iter.pos.offset++;
435 		}
436 next:
437 		if (ob || ret)
438 			break;
439 	}
440 fail:
441 	bch2_trans_iter_exit(trans, &iter);
442 
443 	BUG_ON(ob && ret);
444 
445 	if (ret)
446 		ob = ERR_PTR(ret);
447 
448 	if (!ob && alloc_start > ca->mi.first_bucket) {
449 		alloc_cursor = alloc_start = ca->mi.first_bucket;
450 		goto again;
451 	}
452 
453 	return ob;
454 }
455 
trace_bucket_alloc2(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,struct bch_dev_usage * usage,struct bucket_alloc_state * s,struct open_bucket * ob)456 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
457 					 enum bch_watermark watermark,
458 					 enum bch_data_type data_type,
459 					 struct closure *cl,
460 					 struct bch_dev_usage *usage,
461 					 struct bucket_alloc_state *s,
462 					 struct open_bucket *ob)
463 {
464 	struct printbuf buf = PRINTBUF;
465 
466 	printbuf_tabstop_push(&buf, 24);
467 
468 	prt_printf(&buf, "dev\t%s (%u)\n",	ca->name, ca->dev_idx);
469 	prt_printf(&buf, "watermark\t%s\n",	bch2_watermarks[watermark]);
470 	prt_printf(&buf, "data type\t%s\n",	__bch2_data_types[data_type]);
471 	prt_printf(&buf, "blocking\t%u\n",	cl != NULL);
472 	prt_printf(&buf, "free\t%llu\n",	usage->buckets[BCH_DATA_free]);
473 	prt_printf(&buf, "avail\t%llu\n",	dev_buckets_free(ca, *usage, watermark));
474 	prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
475 		   bch2_copygc_wait_amount(c),
476 		   c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
477 	prt_printf(&buf, "seen\t%llu\n",	s->buckets_seen);
478 	prt_printf(&buf, "open\t%llu\n",	s->skipped_open);
479 	prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
480 	prt_printf(&buf, "nocow\t%llu\n",	s->skipped_nocow);
481 	prt_printf(&buf, "nouse\t%llu\n",	s->skipped_nouse);
482 	prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
483 
484 	if (!IS_ERR(ob)) {
485 		prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
486 		trace_bucket_alloc(c, buf.buf);
487 	} else {
488 		prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
489 		trace_bucket_alloc_fail(c, buf.buf);
490 	}
491 
492 	printbuf_exit(&buf);
493 }
494 
495 /**
496  * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
497  * @trans:	transaction object
498  * @ca:		device to allocate from
499  * @watermark:	how important is this allocation?
500  * @data_type:	BCH_DATA_journal, btree, user...
501  * @cl:		if not NULL, closure to be used to wait if buckets not available
502  * @nowait:	if true, do not wait for buckets to become available
503  * @usage:	for secondarily also returning the current device usage
504  *
505  * Returns:	an open_bucket on success, or an ERR_PTR() on failure.
506  */
bch2_bucket_alloc_trans(struct btree_trans * trans,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl,bool nowait,struct bch_dev_usage * usage)507 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
508 				      struct bch_dev *ca,
509 				      enum bch_watermark watermark,
510 				      enum bch_data_type data_type,
511 				      struct closure *cl,
512 				      bool nowait,
513 				      struct bch_dev_usage *usage)
514 {
515 	struct bch_fs *c = trans->c;
516 	struct open_bucket *ob = NULL;
517 	bool freespace = READ_ONCE(ca->mi.freespace_initialized);
518 	u64 avail;
519 	struct bucket_alloc_state s = {
520 		.btree_bitmap = data_type == BCH_DATA_btree,
521 	};
522 	bool waiting = nowait;
523 again:
524 	bch2_dev_usage_read_fast(ca, usage);
525 	avail = dev_buckets_free(ca, *usage, watermark);
526 
527 	if (usage->buckets[BCH_DATA_need_discard] > avail)
528 		bch2_dev_do_discards(ca);
529 
530 	if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
531 		bch2_gc_gens_async(c);
532 
533 	if (should_invalidate_buckets(ca, *usage))
534 		bch2_dev_do_invalidates(ca);
535 
536 	if (!avail) {
537 		if (watermark > BCH_WATERMARK_normal &&
538 		    c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
539 			goto alloc;
540 
541 		if (cl && !waiting) {
542 			closure_wait(&c->freelist_wait, cl);
543 			waiting = true;
544 			goto again;
545 		}
546 
547 		track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
548 
549 		ob = ERR_PTR(-BCH_ERR_freelist_empty);
550 		goto err;
551 	}
552 
553 	if (waiting)
554 		closure_wake_up(&c->freelist_wait);
555 alloc:
556 	ob = likely(freespace)
557 		? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
558 		: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
559 
560 	if (s.need_journal_commit * 2 > avail)
561 		bch2_journal_flush_async(&c->journal, NULL);
562 
563 	if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
564 		s.btree_bitmap = BTREE_BITMAP_ANY;
565 		goto alloc;
566 	}
567 
568 	if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
569 		freespace = false;
570 		goto alloc;
571 	}
572 err:
573 	if (!ob)
574 		ob = ERR_PTR(-BCH_ERR_no_buckets_found);
575 
576 	if (!IS_ERR(ob))
577 		ob->data_type = data_type;
578 
579 	if (!IS_ERR(ob))
580 		count_event(c, bucket_alloc);
581 	else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
582 		count_event(c, bucket_alloc_fail);
583 
584 	if (!IS_ERR(ob)
585 	    ? trace_bucket_alloc_enabled()
586 	    : trace_bucket_alloc_fail_enabled())
587 		trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
588 
589 	return ob;
590 }
591 
bch2_bucket_alloc(struct bch_fs * c,struct bch_dev * ca,enum bch_watermark watermark,enum bch_data_type data_type,struct closure * cl)592 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
593 				      enum bch_watermark watermark,
594 				      enum bch_data_type data_type,
595 				      struct closure *cl)
596 {
597 	struct bch_dev_usage usage;
598 	struct open_bucket *ob;
599 
600 	bch2_trans_do(c,
601 		      PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
602 							data_type, cl, false, &usage)));
603 	return ob;
604 }
605 
__dev_stripe_cmp(struct dev_stripe_state * stripe,unsigned l,unsigned r)606 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
607 			    unsigned l, unsigned r)
608 {
609 	return cmp_int(stripe->next_alloc[l], stripe->next_alloc[r]);
610 }
611 
612 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
613 
bch2_dev_alloc_list(struct bch_fs * c,struct dev_stripe_state * stripe,struct bch_devs_mask * devs)614 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
615 					  struct dev_stripe_state *stripe,
616 					  struct bch_devs_mask *devs)
617 {
618 	struct dev_alloc_list ret = { .nr = 0 };
619 	unsigned i;
620 
621 	for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
622 		ret.data[ret.nr++] = i;
623 
624 	bubble_sort(ret.data, ret.nr, dev_stripe_cmp);
625 	return ret;
626 }
627 
628 static const u64 stripe_clock_hand_rescale	= 1ULL << 62; /* trigger rescale at */
629 static const u64 stripe_clock_hand_max		= 1ULL << 56; /* max after rescale */
630 static const u64 stripe_clock_hand_inv		= 1ULL << 52; /* max increment, if a device is empty */
631 
bch2_stripe_state_rescale(struct dev_stripe_state * stripe)632 static noinline void bch2_stripe_state_rescale(struct dev_stripe_state *stripe)
633 {
634 	/*
635 	 * Avoid underflowing clock hands if at all possible, if clock hands go
636 	 * to 0 then we lose information - clock hands can be in a wide range if
637 	 * we have devices we rarely try to allocate from, if we generally
638 	 * allocate from a specified target but only sometimes have to fall back
639 	 * to the whole filesystem.
640 	 */
641 	u64 scale_max = U64_MAX;	/* maximum we can subtract without underflow */
642 	u64 scale_min = 0;		/* minumum we must subtract to avoid overflow */
643 
644 	for (u64 *v = stripe->next_alloc;
645 	     v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) {
646 		if (*v)
647 			scale_max = min(scale_max, *v);
648 		if (*v > stripe_clock_hand_max)
649 			scale_min = max(scale_min, *v - stripe_clock_hand_max);
650 	}
651 
652 	u64 scale = max(scale_min, scale_max);
653 
654 	for (u64 *v = stripe->next_alloc;
655 	     v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
656 		*v = *v < scale ? 0 : *v - scale;
657 }
658 
bch2_dev_stripe_increment_inlined(struct bch_dev * ca,struct dev_stripe_state * stripe,struct bch_dev_usage * usage)659 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
660 			       struct dev_stripe_state *stripe,
661 			       struct bch_dev_usage *usage)
662 {
663 	/*
664 	 * Stripe state has a per device clock hand: we allocate from the device
665 	 * with the smallest clock hand.
666 	 *
667 	 * When we allocate, we don't do a simple increment; we add the inverse
668 	 * of the device's free space. This results in round robin behavior that
669 	 * biases in favor of the device(s) with more free space.
670 	 */
671 
672 	u64 *v = stripe->next_alloc + ca->dev_idx;
673 	u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
674 	u64 free_space_inv = free_space
675 		? div64_u64(stripe_clock_hand_inv, free_space)
676 		: stripe_clock_hand_inv;
677 
678 	/* Saturating add, avoid overflow: */
679 	u64 sum = *v + free_space_inv;
680 	*v = sum >= *v ? sum : U64_MAX;
681 
682 	if (unlikely(*v > stripe_clock_hand_rescale))
683 		bch2_stripe_state_rescale(stripe);
684 }
685 
bch2_dev_stripe_increment(struct bch_dev * ca,struct dev_stripe_state * stripe)686 void bch2_dev_stripe_increment(struct bch_dev *ca,
687 			       struct dev_stripe_state *stripe)
688 {
689 	struct bch_dev_usage usage;
690 
691 	bch2_dev_usage_read_fast(ca, &usage);
692 	bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
693 }
694 
add_new_bucket(struct bch_fs * c,struct open_buckets * ptrs,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,struct open_bucket * ob)695 static int add_new_bucket(struct bch_fs *c,
696 			   struct open_buckets *ptrs,
697 			   struct bch_devs_mask *devs_may_alloc,
698 			   unsigned nr_replicas,
699 			   unsigned *nr_effective,
700 			   bool *have_cache,
701 			   struct open_bucket *ob)
702 {
703 	unsigned durability = ob_dev(c, ob)->mi.durability;
704 
705 	BUG_ON(*nr_effective >= nr_replicas);
706 
707 	__clear_bit(ob->dev, devs_may_alloc->d);
708 	*nr_effective	+= durability;
709 	*have_cache	|= !durability;
710 
711 	ob_push(c, ptrs, ob);
712 
713 	if (*nr_effective >= nr_replicas)
714 		return 1;
715 	if (ob->ec)
716 		return 1;
717 	return 0;
718 }
719 
bch2_bucket_alloc_set_trans(struct btree_trans * trans,struct open_buckets * ptrs,struct dev_stripe_state * stripe,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_write_flags flags,enum bch_data_type data_type,enum bch_watermark watermark,struct closure * cl)720 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
721 		      struct open_buckets *ptrs,
722 		      struct dev_stripe_state *stripe,
723 		      struct bch_devs_mask *devs_may_alloc,
724 		      unsigned nr_replicas,
725 		      unsigned *nr_effective,
726 		      bool *have_cache,
727 		      enum bch_write_flags flags,
728 		      enum bch_data_type data_type,
729 		      enum bch_watermark watermark,
730 		      struct closure *cl)
731 {
732 	struct bch_fs *c = trans->c;
733 	int ret = -BCH_ERR_insufficient_devices;
734 
735 	BUG_ON(*nr_effective >= nr_replicas);
736 
737 	struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc);
738 	darray_for_each(devs_sorted, i) {
739 		struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i);
740 		if (!ca)
741 			continue;
742 
743 		if (!ca->mi.durability && *have_cache) {
744 			bch2_dev_put(ca);
745 			continue;
746 		}
747 
748 		struct bch_dev_usage usage;
749 		struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
750 						     cl, flags & BCH_WRITE_alloc_nowait, &usage);
751 		if (!IS_ERR(ob))
752 			bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
753 		bch2_dev_put(ca);
754 
755 		if (IS_ERR(ob)) {
756 			ret = PTR_ERR(ob);
757 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
758 				break;
759 			continue;
760 		}
761 
762 		if (add_new_bucket(c, ptrs, devs_may_alloc,
763 				   nr_replicas, nr_effective,
764 				   have_cache, ob)) {
765 			ret = 0;
766 			break;
767 		}
768 	}
769 
770 	return ret;
771 }
772 
773 /* Allocate from stripes: */
774 
775 /*
776  * if we can't allocate a new stripe because there are already too many
777  * partially filled stripes, force allocating from an existing stripe even when
778  * it's to a device we don't want:
779  */
780 
bucket_alloc_from_stripe(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,u16 target,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl)781 static int bucket_alloc_from_stripe(struct btree_trans *trans,
782 			 struct open_buckets *ptrs,
783 			 struct write_point *wp,
784 			 struct bch_devs_mask *devs_may_alloc,
785 			 u16 target,
786 			 unsigned nr_replicas,
787 			 unsigned *nr_effective,
788 			 bool *have_cache,
789 			 enum bch_watermark watermark,
790 			 enum bch_write_flags flags,
791 			 struct closure *cl)
792 {
793 	struct bch_fs *c = trans->c;
794 	int ret = 0;
795 
796 	if (nr_replicas < 2)
797 		return 0;
798 
799 	if (ec_open_bucket(c, ptrs))
800 		return 0;
801 
802 	struct ec_stripe_head *h =
803 		bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
804 	if (IS_ERR(h))
805 		return PTR_ERR(h);
806 	if (!h)
807 		return 0;
808 
809 	struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
810 	darray_for_each(devs_sorted, i)
811 		for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
812 			if (!h->s->blocks[ec_idx])
813 				continue;
814 
815 			struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx];
816 			if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) {
817 				ob->ec_idx	= ec_idx;
818 				ob->ec		= h->s;
819 				ec_stripe_new_get(h->s, STRIPE_REF_io);
820 
821 				ret = add_new_bucket(c, ptrs, devs_may_alloc,
822 						     nr_replicas, nr_effective,
823 						     have_cache, ob);
824 				goto out;
825 			}
826 		}
827 out:
828 	bch2_ec_stripe_head_put(c, h);
829 	return ret;
830 }
831 
832 /* Sector allocator */
833 
want_bucket(struct bch_fs * c,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,bool * have_cache,bool ec,struct open_bucket * ob)834 static bool want_bucket(struct bch_fs *c,
835 			struct write_point *wp,
836 			struct bch_devs_mask *devs_may_alloc,
837 			bool *have_cache, bool ec,
838 			struct open_bucket *ob)
839 {
840 	struct bch_dev *ca = ob_dev(c, ob);
841 
842 	if (!test_bit(ob->dev, devs_may_alloc->d))
843 		return false;
844 
845 	if (ob->data_type != wp->data_type)
846 		return false;
847 
848 	if (!ca->mi.durability &&
849 	    (wp->data_type == BCH_DATA_btree || ec || *have_cache))
850 		return false;
851 
852 	if (ec != (ob->ec != NULL))
853 		return false;
854 
855 	return true;
856 }
857 
bucket_alloc_set_writepoint(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec)858 static int bucket_alloc_set_writepoint(struct bch_fs *c,
859 				       struct open_buckets *ptrs,
860 				       struct write_point *wp,
861 				       struct bch_devs_mask *devs_may_alloc,
862 				       unsigned nr_replicas,
863 				       unsigned *nr_effective,
864 				       bool *have_cache,
865 				       bool ec)
866 {
867 	struct open_buckets ptrs_skip = { .nr = 0 };
868 	struct open_bucket *ob;
869 	unsigned i;
870 	int ret = 0;
871 
872 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
873 		if (!ret && want_bucket(c, wp, devs_may_alloc,
874 					have_cache, ec, ob))
875 			ret = add_new_bucket(c, ptrs, devs_may_alloc,
876 				       nr_replicas, nr_effective,
877 				       have_cache, ob);
878 		else
879 			ob_push(c, &ptrs_skip, ob);
880 	}
881 	wp->ptrs = ptrs_skip;
882 
883 	return ret;
884 }
885 
bucket_alloc_set_partial(struct bch_fs * c,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_mask * devs_may_alloc,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,bool ec,enum bch_watermark watermark)886 static int bucket_alloc_set_partial(struct bch_fs *c,
887 				    struct open_buckets *ptrs,
888 				    struct write_point *wp,
889 				    struct bch_devs_mask *devs_may_alloc,
890 				    unsigned nr_replicas,
891 				    unsigned *nr_effective,
892 				    bool *have_cache, bool ec,
893 				    enum bch_watermark watermark)
894 {
895 	int i, ret = 0;
896 
897 	if (!c->open_buckets_partial_nr)
898 		return 0;
899 
900 	spin_lock(&c->freelist_lock);
901 
902 	if (!c->open_buckets_partial_nr)
903 		goto unlock;
904 
905 	for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
906 		struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
907 
908 		if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
909 			struct bch_dev *ca = ob_dev(c, ob);
910 			struct bch_dev_usage usage;
911 			u64 avail;
912 
913 			bch2_dev_usage_read_fast(ca, &usage);
914 			avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets;
915 			if (!avail)
916 				continue;
917 
918 			array_remove_item(c->open_buckets_partial,
919 					  c->open_buckets_partial_nr,
920 					  i);
921 			ob->on_partial_list = false;
922 
923 			rcu_read_lock();
924 			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
925 			rcu_read_unlock();
926 
927 			ret = add_new_bucket(c, ptrs, devs_may_alloc,
928 					     nr_replicas, nr_effective,
929 					     have_cache, ob);
930 			if (ret)
931 				break;
932 		}
933 	}
934 unlock:
935 	spin_unlock(&c->freelist_lock);
936 	return ret;
937 }
938 
__open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,bool erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * _cl)939 static int __open_bucket_add_buckets(struct btree_trans *trans,
940 			struct open_buckets *ptrs,
941 			struct write_point *wp,
942 			struct bch_devs_list *devs_have,
943 			u16 target,
944 			bool erasure_code,
945 			unsigned nr_replicas,
946 			unsigned *nr_effective,
947 			bool *have_cache,
948 			enum bch_watermark watermark,
949 			enum bch_write_flags flags,
950 			struct closure *_cl)
951 {
952 	struct bch_fs *c = trans->c;
953 	struct bch_devs_mask devs;
954 	struct open_bucket *ob;
955 	struct closure *cl = NULL;
956 	unsigned i;
957 	int ret;
958 
959 	devs = target_rw_devs(c, wp->data_type, target);
960 
961 	/* Don't allocate from devices we already have pointers to: */
962 	darray_for_each(*devs_have, i)
963 		__clear_bit(*i, devs.d);
964 
965 	open_bucket_for_each(c, ptrs, ob, i)
966 		__clear_bit(ob->dev, devs.d);
967 
968 	ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
969 				 nr_replicas, nr_effective,
970 				 have_cache, erasure_code);
971 	if (ret)
972 		return ret;
973 
974 	ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
975 				 nr_replicas, nr_effective,
976 				 have_cache, erasure_code, watermark);
977 	if (ret)
978 		return ret;
979 
980 	if (erasure_code) {
981 		ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
982 					 target,
983 					 nr_replicas, nr_effective,
984 					 have_cache,
985 					 watermark, flags, _cl);
986 	} else {
987 retry_blocking:
988 		/*
989 		 * Try nonblocking first, so that if one device is full we'll try from
990 		 * other devices:
991 		 */
992 		ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
993 					nr_replicas, nr_effective, have_cache,
994 					flags, wp->data_type, watermark, cl);
995 		if (ret &&
996 		    !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
997 		    !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
998 		    !cl && _cl) {
999 			cl = _cl;
1000 			goto retry_blocking;
1001 		}
1002 	}
1003 
1004 	return ret;
1005 }
1006 
open_bucket_add_buckets(struct btree_trans * trans,struct open_buckets * ptrs,struct write_point * wp,struct bch_devs_list * devs_have,u16 target,unsigned erasure_code,unsigned nr_replicas,unsigned * nr_effective,bool * have_cache,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl)1007 static int open_bucket_add_buckets(struct btree_trans *trans,
1008 			struct open_buckets *ptrs,
1009 			struct write_point *wp,
1010 			struct bch_devs_list *devs_have,
1011 			u16 target,
1012 			unsigned erasure_code,
1013 			unsigned nr_replicas,
1014 			unsigned *nr_effective,
1015 			bool *have_cache,
1016 			enum bch_watermark watermark,
1017 			enum bch_write_flags flags,
1018 			struct closure *cl)
1019 {
1020 	int ret;
1021 
1022 	if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
1023 		ret = __open_bucket_add_buckets(trans, ptrs, wp,
1024 				devs_have, target, erasure_code,
1025 				nr_replicas, nr_effective, have_cache,
1026 				watermark, flags, cl);
1027 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1028 		    bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1029 		    bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1030 		    bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1031 			return ret;
1032 		if (*nr_effective >= nr_replicas)
1033 			return 0;
1034 	}
1035 
1036 	ret = __open_bucket_add_buckets(trans, ptrs, wp,
1037 			devs_have, target, false,
1038 			nr_replicas, nr_effective, have_cache,
1039 			watermark, flags, cl);
1040 	return ret < 0 ? ret : 0;
1041 }
1042 
1043 /**
1044  * should_drop_bucket - check if this is open_bucket should go away
1045  * @ob:		open_bucket to predicate on
1046  * @c:		filesystem handle
1047  * @ca:		if set, we're killing buckets for a particular device
1048  * @ec:		if true, we're shutting down erasure coding and killing all ec
1049  *		open_buckets
1050  *		otherwise, return true
1051  * Returns: true if we should kill this open_bucket
1052  *
1053  * We're killing open_buckets because we're shutting down a device, erasure
1054  * coding, or the entire filesystem - check if this open_bucket matches:
1055  */
should_drop_bucket(struct open_bucket * ob,struct bch_fs * c,struct bch_dev * ca,bool ec)1056 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1057 			       struct bch_dev *ca, bool ec)
1058 {
1059 	if (ec) {
1060 		return ob->ec != NULL;
1061 	} else if (ca) {
1062 		bool drop = ob->dev == ca->dev_idx;
1063 		struct open_bucket *ob2;
1064 		unsigned i;
1065 
1066 		if (!drop && ob->ec) {
1067 			unsigned nr_blocks;
1068 
1069 			mutex_lock(&ob->ec->lock);
1070 			nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1071 
1072 			for (i = 0; i < nr_blocks; i++) {
1073 				if (!ob->ec->blocks[i])
1074 					continue;
1075 
1076 				ob2 = c->open_buckets + ob->ec->blocks[i];
1077 				drop |= ob2->dev == ca->dev_idx;
1078 			}
1079 			mutex_unlock(&ob->ec->lock);
1080 		}
1081 
1082 		return drop;
1083 	} else {
1084 		return true;
1085 	}
1086 }
1087 
bch2_writepoint_stop(struct bch_fs * c,struct bch_dev * ca,bool ec,struct write_point * wp)1088 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1089 				 bool ec, struct write_point *wp)
1090 {
1091 	struct open_buckets ptrs = { .nr = 0 };
1092 	struct open_bucket *ob;
1093 	unsigned i;
1094 
1095 	mutex_lock(&wp->lock);
1096 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1097 		if (should_drop_bucket(ob, c, ca, ec))
1098 			bch2_open_bucket_put(c, ob);
1099 		else
1100 			ob_push(c, &ptrs, ob);
1101 	wp->ptrs = ptrs;
1102 	mutex_unlock(&wp->lock);
1103 }
1104 
bch2_open_buckets_stop(struct bch_fs * c,struct bch_dev * ca,bool ec)1105 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1106 			    bool ec)
1107 {
1108 	unsigned i;
1109 
1110 	/* Next, close write points that point to this device... */
1111 	for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1112 		bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1113 
1114 	bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1115 	bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1116 	bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1117 
1118 	mutex_lock(&c->btree_reserve_cache_lock);
1119 	while (c->btree_reserve_cache_nr) {
1120 		struct btree_alloc *a =
1121 			&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1122 
1123 		bch2_open_buckets_put(c, &a->ob);
1124 	}
1125 	mutex_unlock(&c->btree_reserve_cache_lock);
1126 
1127 	spin_lock(&c->freelist_lock);
1128 	i = 0;
1129 	while (i < c->open_buckets_partial_nr) {
1130 		struct open_bucket *ob =
1131 			c->open_buckets + c->open_buckets_partial[i];
1132 
1133 		if (should_drop_bucket(ob, c, ca, ec)) {
1134 			--c->open_buckets_partial_nr;
1135 			swap(c->open_buckets_partial[i],
1136 			     c->open_buckets_partial[c->open_buckets_partial_nr]);
1137 
1138 			ob->on_partial_list = false;
1139 
1140 			rcu_read_lock();
1141 			bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
1142 			rcu_read_unlock();
1143 
1144 			spin_unlock(&c->freelist_lock);
1145 			bch2_open_bucket_put(c, ob);
1146 			spin_lock(&c->freelist_lock);
1147 		} else {
1148 			i++;
1149 		}
1150 	}
1151 	spin_unlock(&c->freelist_lock);
1152 
1153 	bch2_ec_stop_dev(c, ca);
1154 }
1155 
writepoint_hash(struct bch_fs * c,unsigned long write_point)1156 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1157 						 unsigned long write_point)
1158 {
1159 	unsigned hash =
1160 		hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1161 
1162 	return &c->write_points_hash[hash];
1163 }
1164 
__writepoint_find(struct hlist_head * head,unsigned long write_point)1165 static struct write_point *__writepoint_find(struct hlist_head *head,
1166 					     unsigned long write_point)
1167 {
1168 	struct write_point *wp;
1169 
1170 	rcu_read_lock();
1171 	hlist_for_each_entry_rcu(wp, head, node)
1172 		if (wp->write_point == write_point)
1173 			goto out;
1174 	wp = NULL;
1175 out:
1176 	rcu_read_unlock();
1177 	return wp;
1178 }
1179 
too_many_writepoints(struct bch_fs * c,unsigned factor)1180 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1181 {
1182 	u64 stranded	= c->write_points_nr * c->bucket_size_max;
1183 	u64 free	= bch2_fs_usage_read_short(c).free;
1184 
1185 	return stranded * factor > free;
1186 }
1187 
try_increase_writepoints(struct bch_fs * c)1188 static bool try_increase_writepoints(struct bch_fs *c)
1189 {
1190 	struct write_point *wp;
1191 
1192 	if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1193 	    too_many_writepoints(c, 32))
1194 		return false;
1195 
1196 	wp = c->write_points + c->write_points_nr++;
1197 	hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1198 	return true;
1199 }
1200 
try_decrease_writepoints(struct btree_trans * trans,unsigned old_nr)1201 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1202 {
1203 	struct bch_fs *c = trans->c;
1204 	struct write_point *wp;
1205 	struct open_bucket *ob;
1206 	unsigned i;
1207 
1208 	mutex_lock(&c->write_points_hash_lock);
1209 	if (c->write_points_nr < old_nr) {
1210 		mutex_unlock(&c->write_points_hash_lock);
1211 		return true;
1212 	}
1213 
1214 	if (c->write_points_nr == 1 ||
1215 	    !too_many_writepoints(c, 8)) {
1216 		mutex_unlock(&c->write_points_hash_lock);
1217 		return false;
1218 	}
1219 
1220 	wp = c->write_points + --c->write_points_nr;
1221 
1222 	hlist_del_rcu(&wp->node);
1223 	mutex_unlock(&c->write_points_hash_lock);
1224 
1225 	bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1226 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1227 		open_bucket_free_unused(c, ob);
1228 	wp->ptrs.nr = 0;
1229 	mutex_unlock(&wp->lock);
1230 	return true;
1231 }
1232 
writepoint_find(struct btree_trans * trans,unsigned long write_point)1233 static struct write_point *writepoint_find(struct btree_trans *trans,
1234 					   unsigned long write_point)
1235 {
1236 	struct bch_fs *c = trans->c;
1237 	struct write_point *wp, *oldest;
1238 	struct hlist_head *head;
1239 
1240 	if (!(write_point & 1UL)) {
1241 		wp = (struct write_point *) write_point;
1242 		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1243 		return wp;
1244 	}
1245 
1246 	head = writepoint_hash(c, write_point);
1247 restart_find:
1248 	wp = __writepoint_find(head, write_point);
1249 	if (wp) {
1250 lock_wp:
1251 		bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1252 		if (wp->write_point == write_point)
1253 			goto out;
1254 		mutex_unlock(&wp->lock);
1255 		goto restart_find;
1256 	}
1257 restart_find_oldest:
1258 	oldest = NULL;
1259 	for (wp = c->write_points;
1260 	     wp < c->write_points + c->write_points_nr; wp++)
1261 		if (!oldest || time_before64(wp->last_used, oldest->last_used))
1262 			oldest = wp;
1263 
1264 	bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1265 	bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1266 	if (oldest >= c->write_points + c->write_points_nr ||
1267 	    try_increase_writepoints(c)) {
1268 		mutex_unlock(&c->write_points_hash_lock);
1269 		mutex_unlock(&oldest->lock);
1270 		goto restart_find_oldest;
1271 	}
1272 
1273 	wp = __writepoint_find(head, write_point);
1274 	if (wp && wp != oldest) {
1275 		mutex_unlock(&c->write_points_hash_lock);
1276 		mutex_unlock(&oldest->lock);
1277 		goto lock_wp;
1278 	}
1279 
1280 	wp = oldest;
1281 	hlist_del_rcu(&wp->node);
1282 	wp->write_point = write_point;
1283 	hlist_add_head_rcu(&wp->node, head);
1284 	mutex_unlock(&c->write_points_hash_lock);
1285 out:
1286 	wp->last_used = local_clock();
1287 	return wp;
1288 }
1289 
1290 static noinline void
deallocate_extra_replicas(struct bch_fs * c,struct open_buckets * ptrs,struct open_buckets * ptrs_no_use,unsigned extra_replicas)1291 deallocate_extra_replicas(struct bch_fs *c,
1292 			  struct open_buckets *ptrs,
1293 			  struct open_buckets *ptrs_no_use,
1294 			  unsigned extra_replicas)
1295 {
1296 	struct open_buckets ptrs2 = { 0 };
1297 	struct open_bucket *ob;
1298 	unsigned i;
1299 
1300 	open_bucket_for_each(c, ptrs, ob, i) {
1301 		unsigned d = ob_dev(c, ob)->mi.durability;
1302 
1303 		if (d && d <= extra_replicas) {
1304 			extra_replicas -= d;
1305 			ob_push(c, ptrs_no_use, ob);
1306 		} else {
1307 			ob_push(c, &ptrs2, ob);
1308 		}
1309 	}
1310 
1311 	*ptrs = ptrs2;
1312 }
1313 
1314 /*
1315  * Get us an open_bucket we can allocate from, return with it locked:
1316  */
bch2_alloc_sectors_start_trans(struct btree_trans * trans,unsigned target,unsigned erasure_code,struct write_point_specifier write_point,struct bch_devs_list * devs_have,unsigned nr_replicas,unsigned nr_replicas_required,enum bch_watermark watermark,enum bch_write_flags flags,struct closure * cl,struct write_point ** wp_ret)1317 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1318 			     unsigned target,
1319 			     unsigned erasure_code,
1320 			     struct write_point_specifier write_point,
1321 			     struct bch_devs_list *devs_have,
1322 			     unsigned nr_replicas,
1323 			     unsigned nr_replicas_required,
1324 			     enum bch_watermark watermark,
1325 			     enum bch_write_flags flags,
1326 			     struct closure *cl,
1327 			     struct write_point **wp_ret)
1328 {
1329 	struct bch_fs *c = trans->c;
1330 	struct write_point *wp;
1331 	struct open_bucket *ob;
1332 	struct open_buckets ptrs;
1333 	unsigned nr_effective, write_points_nr;
1334 	bool have_cache;
1335 	int ret;
1336 	int i;
1337 
1338 	if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1339 		erasure_code = false;
1340 
1341 	BUG_ON(!nr_replicas || !nr_replicas_required);
1342 retry:
1343 	ptrs.nr		= 0;
1344 	nr_effective	= 0;
1345 	write_points_nr = c->write_points_nr;
1346 	have_cache	= false;
1347 
1348 	*wp_ret = wp = writepoint_find(trans, write_point.v);
1349 
1350 	ret = bch2_trans_relock(trans);
1351 	if (ret)
1352 		goto err;
1353 
1354 	/* metadata may not allocate on cache devices: */
1355 	if (wp->data_type != BCH_DATA_user)
1356 		have_cache = true;
1357 
1358 	if (target && !(flags & BCH_WRITE_only_specified_devs)) {
1359 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1360 					      target, erasure_code,
1361 					      nr_replicas, &nr_effective,
1362 					      &have_cache, watermark,
1363 					      flags, NULL);
1364 		if (!ret ||
1365 		    bch2_err_matches(ret, BCH_ERR_transaction_restart))
1366 			goto alloc_done;
1367 
1368 		/* Don't retry from all devices if we're out of open buckets: */
1369 		if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1370 			int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1371 					      target, erasure_code,
1372 					      nr_replicas, &nr_effective,
1373 					      &have_cache, watermark,
1374 					      flags, cl);
1375 			if (!ret2 ||
1376 			    bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1377 			    bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1378 				ret = ret2;
1379 				goto alloc_done;
1380 			}
1381 		}
1382 
1383 		/*
1384 		 * Only try to allocate cache (durability = 0 devices) from the
1385 		 * specified target:
1386 		 */
1387 		have_cache = true;
1388 
1389 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1390 					      0, erasure_code,
1391 					      nr_replicas, &nr_effective,
1392 					      &have_cache, watermark,
1393 					      flags, cl);
1394 	} else {
1395 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1396 					      target, erasure_code,
1397 					      nr_replicas, &nr_effective,
1398 					      &have_cache, watermark,
1399 					      flags, cl);
1400 	}
1401 alloc_done:
1402 	BUG_ON(!ret && nr_effective < nr_replicas);
1403 
1404 	if (erasure_code && !ec_open_bucket(c, &ptrs))
1405 		pr_debug("failed to get ec bucket: ret %u", ret);
1406 
1407 	if (ret == -BCH_ERR_insufficient_devices &&
1408 	    nr_effective >= nr_replicas_required)
1409 		ret = 0;
1410 
1411 	if (ret)
1412 		goto err;
1413 
1414 	if (nr_effective > nr_replicas)
1415 		deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1416 
1417 	/* Free buckets we didn't use: */
1418 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1419 		open_bucket_free_unused(c, ob);
1420 
1421 	wp->ptrs = ptrs;
1422 
1423 	wp->sectors_free = UINT_MAX;
1424 
1425 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
1426 		/*
1427 		 * Ensure proper write alignment - either due to misaligned
1428 		 * bucket sizes (from buggy bcachefs-tools), or writes that mix
1429 		 * logical/physical alignment:
1430 		 */
1431 		struct bch_dev *ca = ob_dev(c, ob);
1432 		u64 offset = bucket_to_sector(ca, ob->bucket) +
1433 			ca->mi.bucket_size -
1434 			ob->sectors_free;
1435 		unsigned align = round_up(offset, block_sectors(c)) - offset;
1436 
1437 		ob->sectors_free = max_t(int, 0, ob->sectors_free - align);
1438 
1439 		wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1440 	}
1441 
1442 	wp->sectors_free = rounddown(wp->sectors_free, block_sectors(c));
1443 
1444 	/* Did alignment use up space in an open_bucket? */
1445 	if (unlikely(!wp->sectors_free)) {
1446 		bch2_alloc_sectors_done(c, wp);
1447 		goto retry;
1448 	}
1449 
1450 	BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1451 
1452 	return 0;
1453 err:
1454 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1455 		if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1456 			ob_push(c, &ptrs, ob);
1457 		else
1458 			open_bucket_free_unused(c, ob);
1459 	wp->ptrs = ptrs;
1460 
1461 	mutex_unlock(&wp->lock);
1462 
1463 	if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1464 	    try_decrease_writepoints(trans, write_points_nr))
1465 		goto retry;
1466 
1467 	if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1468 		ret = -BCH_ERR_bucket_alloc_blocked;
1469 
1470 	if (cl && !(flags & BCH_WRITE_alloc_nowait) &&
1471 	    bch2_err_matches(ret, BCH_ERR_freelist_empty))
1472 		ret = -BCH_ERR_bucket_alloc_blocked;
1473 
1474 	return ret;
1475 }
1476 
bch2_ob_ptr(struct bch_fs * c,struct open_bucket * ob)1477 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1478 {
1479 	struct bch_dev *ca = ob_dev(c, ob);
1480 
1481 	return (struct bch_extent_ptr) {
1482 		.type	= 1 << BCH_EXTENT_ENTRY_ptr,
1483 		.gen	= ob->gen,
1484 		.dev	= ob->dev,
1485 		.offset	= bucket_to_sector(ca, ob->bucket) +
1486 			ca->mi.bucket_size -
1487 			ob->sectors_free,
1488 	};
1489 }
1490 
bch2_alloc_sectors_append_ptrs(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)1491 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1492 				    struct bkey_i *k, unsigned sectors,
1493 				    bool cached)
1494 {
1495 	bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1496 }
1497 
1498 /*
1499  * Append pointers to the space we just allocated to @k, and mark @sectors space
1500  * as allocated out of @ob
1501  */
bch2_alloc_sectors_done(struct bch_fs * c,struct write_point * wp)1502 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1503 {
1504 	bch2_alloc_sectors_done_inlined(c, wp);
1505 }
1506 
writepoint_init(struct write_point * wp,enum bch_data_type type)1507 static inline void writepoint_init(struct write_point *wp,
1508 				   enum bch_data_type type)
1509 {
1510 	mutex_init(&wp->lock);
1511 	wp->data_type = type;
1512 
1513 	INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1514 	INIT_LIST_HEAD(&wp->writes);
1515 	spin_lock_init(&wp->writes_lock);
1516 }
1517 
bch2_fs_allocator_foreground_init(struct bch_fs * c)1518 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1519 {
1520 	struct open_bucket *ob;
1521 	struct write_point *wp;
1522 
1523 	mutex_init(&c->write_points_hash_lock);
1524 	c->write_points_nr = ARRAY_SIZE(c->write_points);
1525 
1526 	/* open bucket 0 is a sentinal NULL: */
1527 	spin_lock_init(&c->open_buckets[0].lock);
1528 
1529 	for (ob = c->open_buckets + 1;
1530 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1531 		spin_lock_init(&ob->lock);
1532 		c->open_buckets_nr_free++;
1533 
1534 		ob->freelist = c->open_buckets_freelist;
1535 		c->open_buckets_freelist = ob - c->open_buckets;
1536 	}
1537 
1538 	writepoint_init(&c->btree_write_point,		BCH_DATA_btree);
1539 	writepoint_init(&c->rebalance_write_point,	BCH_DATA_user);
1540 	writepoint_init(&c->copygc_write_point,		BCH_DATA_user);
1541 
1542 	for (wp = c->write_points;
1543 	     wp < c->write_points + c->write_points_nr; wp++) {
1544 		writepoint_init(wp, BCH_DATA_user);
1545 
1546 		wp->last_used	= local_clock();
1547 		wp->write_point	= (unsigned long) wp;
1548 		hlist_add_head_rcu(&wp->node,
1549 				   writepoint_hash(c, wp->write_point));
1550 	}
1551 }
1552 
bch2_open_bucket_to_text(struct printbuf * out,struct bch_fs * c,struct open_bucket * ob)1553 void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1554 {
1555 	struct bch_dev *ca = ob_dev(c, ob);
1556 	unsigned data_type = ob->data_type;
1557 	barrier(); /* READ_ONCE() doesn't work on bitfields */
1558 
1559 	prt_printf(out, "%zu ref %u ",
1560 		   ob - c->open_buckets,
1561 		   atomic_read(&ob->pin));
1562 	bch2_prt_data_type(out, data_type);
1563 	prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1564 		   ob->dev, ob->bucket, ob->gen,
1565 		   ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1566 	if (ob->ec)
1567 		prt_printf(out, " ec idx %llu", ob->ec->idx);
1568 	if (ob->on_partial_list)
1569 		prt_str(out, " partial");
1570 	prt_newline(out);
1571 }
1572 
bch2_open_buckets_to_text(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca)1573 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
1574 			       struct bch_dev *ca)
1575 {
1576 	struct open_bucket *ob;
1577 
1578 	out->atomic++;
1579 
1580 	for (ob = c->open_buckets;
1581 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1582 	     ob++) {
1583 		spin_lock(&ob->lock);
1584 		if (ob->valid && (!ca || ob->dev == ca->dev_idx))
1585 			bch2_open_bucket_to_text(out, c, ob);
1586 		spin_unlock(&ob->lock);
1587 	}
1588 
1589 	--out->atomic;
1590 }
1591 
bch2_open_buckets_partial_to_text(struct printbuf * out,struct bch_fs * c)1592 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1593 {
1594 	unsigned i;
1595 
1596 	out->atomic++;
1597 	spin_lock(&c->freelist_lock);
1598 
1599 	for (i = 0; i < c->open_buckets_partial_nr; i++)
1600 		bch2_open_bucket_to_text(out, c,
1601 				c->open_buckets + c->open_buckets_partial[i]);
1602 
1603 	spin_unlock(&c->freelist_lock);
1604 	--out->atomic;
1605 }
1606 
1607 static const char * const bch2_write_point_states[] = {
1608 #define x(n)	#n,
1609 	WRITE_POINT_STATES()
1610 #undef x
1611 	NULL
1612 };
1613 
bch2_write_point_to_text(struct printbuf * out,struct bch_fs * c,struct write_point * wp)1614 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1615 				     struct write_point *wp)
1616 {
1617 	struct open_bucket *ob;
1618 	unsigned i;
1619 
1620 	prt_printf(out, "%lu: ", wp->write_point);
1621 	prt_human_readable_u64(out, wp->sectors_allocated << 9);
1622 
1623 	prt_printf(out, " last wrote: ");
1624 	bch2_pr_time_units(out, sched_clock() - wp->last_used);
1625 
1626 	for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1627 		prt_printf(out, " %s: ", bch2_write_point_states[i]);
1628 		bch2_pr_time_units(out, wp->time[i]);
1629 	}
1630 
1631 	prt_newline(out);
1632 
1633 	printbuf_indent_add(out, 2);
1634 	open_bucket_for_each(c, &wp->ptrs, ob, i)
1635 		bch2_open_bucket_to_text(out, c, ob);
1636 	printbuf_indent_sub(out, 2);
1637 }
1638 
bch2_write_points_to_text(struct printbuf * out,struct bch_fs * c)1639 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1640 {
1641 	struct write_point *wp;
1642 
1643 	prt_str(out, "Foreground write points\n");
1644 	for (wp = c->write_points;
1645 	     wp < c->write_points + ARRAY_SIZE(c->write_points);
1646 	     wp++)
1647 		bch2_write_point_to_text(out, c, wp);
1648 
1649 	prt_str(out, "Copygc write point\n");
1650 	bch2_write_point_to_text(out, c, &c->copygc_write_point);
1651 
1652 	prt_str(out, "Rebalance write point\n");
1653 	bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1654 
1655 	prt_str(out, "Btree write point\n");
1656 	bch2_write_point_to_text(out, c, &c->btree_write_point);
1657 }
1658 
bch2_fs_alloc_debug_to_text(struct printbuf * out,struct bch_fs * c)1659 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1660 {
1661 	unsigned nr[BCH_DATA_NR];
1662 
1663 	memset(nr, 0, sizeof(nr));
1664 
1665 	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1666 		nr[c->open_buckets[i].data_type]++;
1667 
1668 	printbuf_tabstops_reset(out);
1669 	printbuf_tabstop_push(out, 24);
1670 
1671 	prt_printf(out, "capacity\t%llu\n",		c->capacity);
1672 	prt_printf(out, "reserved\t%llu\n",		c->reserved);
1673 	prt_printf(out, "hidden\t%llu\n",		percpu_u64_get(&c->usage->hidden));
1674 	prt_printf(out, "btree\t%llu\n",		percpu_u64_get(&c->usage->btree));
1675 	prt_printf(out, "data\t%llu\n",			percpu_u64_get(&c->usage->data));
1676 	prt_printf(out, "cached\t%llu\n",		percpu_u64_get(&c->usage->cached));
1677 	prt_printf(out, "reserved\t%llu\n",		percpu_u64_get(&c->usage->reserved));
1678 	prt_printf(out, "online_reserved\t%llu\n",	percpu_u64_get(c->online_reserved));
1679 	prt_printf(out, "nr_inodes\t%llu\n",		percpu_u64_get(&c->usage->nr_inodes));
1680 
1681 	prt_newline(out);
1682 	prt_printf(out, "freelist_wait\t%s\n",			c->freelist_wait.list.first ? "waiting" : "empty");
1683 	prt_printf(out, "open buckets allocated\t%i\n",		OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1684 	prt_printf(out, "open buckets total\t%u\n",		OPEN_BUCKETS_COUNT);
1685 	prt_printf(out, "open_buckets_wait\t%s\n",		c->open_buckets_wait.list.first ? "waiting" : "empty");
1686 	prt_printf(out, "open_buckets_btree\t%u\n",		nr[BCH_DATA_btree]);
1687 	prt_printf(out, "open_buckets_user\t%u\n",		nr[BCH_DATA_user]);
1688 	prt_printf(out, "btree reserve cache\t%u\n",		c->btree_reserve_cache_nr);
1689 }
1690 
bch2_dev_alloc_debug_to_text(struct printbuf * out,struct bch_dev * ca)1691 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1692 {
1693 	struct bch_fs *c = ca->fs;
1694 	struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
1695 	unsigned nr[BCH_DATA_NR];
1696 
1697 	memset(nr, 0, sizeof(nr));
1698 
1699 	for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1700 		nr[c->open_buckets[i].data_type]++;
1701 
1702 	bch2_dev_usage_to_text(out, ca, &stats);
1703 
1704 	prt_newline(out);
1705 
1706 	prt_printf(out, "reserves:\n");
1707 	for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1708 		prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1709 
1710 	prt_newline(out);
1711 
1712 	printbuf_tabstops_reset(out);
1713 	printbuf_tabstop_push(out, 12);
1714 	printbuf_tabstop_push(out, 16);
1715 
1716 	prt_printf(out, "open buckets\t%i\r\n",	ca->nr_open_buckets);
1717 	prt_printf(out, "buckets to invalidate\t%llu\r\n",
1718 		   should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
1719 }
1720 
bch2_print_allocator_stuck(struct bch_fs * c)1721 static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
1722 {
1723 	struct printbuf buf = PRINTBUF;
1724 
1725 	prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
1726 		   c->opts.allocator_stuck_timeout);
1727 
1728 	prt_printf(&buf, "Allocator debug:\n");
1729 	printbuf_indent_add(&buf, 2);
1730 	bch2_fs_alloc_debug_to_text(&buf, c);
1731 	printbuf_indent_sub(&buf, 2);
1732 	prt_newline(&buf);
1733 
1734 	for_each_online_member(c, ca) {
1735 		prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1736 		printbuf_indent_add(&buf, 2);
1737 		bch2_dev_alloc_debug_to_text(&buf, ca);
1738 		printbuf_indent_sub(&buf, 2);
1739 		prt_newline(&buf);
1740 	}
1741 
1742 	prt_printf(&buf, "Copygc debug:\n");
1743 	printbuf_indent_add(&buf, 2);
1744 	bch2_copygc_wait_to_text(&buf, c);
1745 	printbuf_indent_sub(&buf, 2);
1746 	prt_newline(&buf);
1747 
1748 	prt_printf(&buf, "Journal debug:\n");
1749 	printbuf_indent_add(&buf, 2);
1750 	bch2_journal_debug_to_text(&buf, &c->journal);
1751 	printbuf_indent_sub(&buf, 2);
1752 
1753 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
1754 	printbuf_exit(&buf);
1755 }
1756 
allocator_wait_timeout(struct bch_fs * c)1757 static inline unsigned allocator_wait_timeout(struct bch_fs *c)
1758 {
1759 	if (c->allocator_last_stuck &&
1760 	    time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
1761 		return 0;
1762 
1763 	return c->opts.allocator_stuck_timeout * HZ;
1764 }
1765 
__bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)1766 void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
1767 {
1768 	unsigned t = allocator_wait_timeout(c);
1769 
1770 	if (t && closure_sync_timeout(cl, t)) {
1771 		c->allocator_last_stuck = jiffies;
1772 		bch2_print_allocator_stuck(c);
1773 	}
1774 
1775 	closure_sync(cl);
1776 }
1777