1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "inode.h"
20 #include "movinggc.h"
21 #include "rebalance.h"
22 #include "recovery.h"
23 #include "recovery_passes.h"
24 #include "reflink.h"
25 #include "replicas.h"
26 #include "subvolume.h"
27 #include "trace.h"
28 
29 #include <linux/preempt.h>
30 
bch2_dev_usage_read_fast(struct bch_dev * ca,struct bch_dev_usage * usage)31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
32 {
33 	for (unsigned i = 0; i < BCH_DATA_NR; i++)
34 		usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
35 }
36 
bch2_dev_usage_full_read_fast(struct bch_dev * ca,struct bch_dev_usage_full * usage)37 void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
38 {
39 	memset(usage, 0, sizeof(*usage));
40 	acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage,
41 			sizeof(struct bch_dev_usage_full) / sizeof(u64));
42 }
43 
reserve_factor(u64 r)44 static u64 reserve_factor(u64 r)
45 {
46 	return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
47 }
48 
49 static struct bch_fs_usage_short
__bch2_fs_usage_read_short(struct bch_fs * c)50 __bch2_fs_usage_read_short(struct bch_fs *c)
51 {
52 	struct bch_fs_usage_short ret;
53 	u64 data, reserved;
54 
55 	ret.capacity = c->capacity -
56 		percpu_u64_get(&c->usage->hidden);
57 
58 	data		= percpu_u64_get(&c->usage->data) +
59 		percpu_u64_get(&c->usage->btree);
60 	reserved	= percpu_u64_get(&c->usage->reserved) +
61 		percpu_u64_get(c->online_reserved);
62 
63 	ret.used	= min(ret.capacity, data + reserve_factor(reserved));
64 	ret.free	= ret.capacity - ret.used;
65 
66 	ret.nr_inodes	= percpu_u64_get(&c->usage->nr_inodes);
67 
68 	return ret;
69 }
70 
71 struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs * c)72 bch2_fs_usage_read_short(struct bch_fs *c)
73 {
74 	struct bch_fs_usage_short ret;
75 
76 	percpu_down_read(&c->mark_lock);
77 	ret = __bch2_fs_usage_read_short(c);
78 	percpu_up_read(&c->mark_lock);
79 
80 	return ret;
81 }
82 
bch2_dev_usage_to_text(struct printbuf * out,struct bch_dev * ca,struct bch_dev_usage_full * usage)83 void bch2_dev_usage_to_text(struct printbuf *out,
84 			    struct bch_dev *ca,
85 			    struct bch_dev_usage_full *usage)
86 {
87 	if (out->nr_tabstops < 5) {
88 		printbuf_tabstops_reset(out);
89 		printbuf_tabstop_push(out, 12);
90 		printbuf_tabstop_push(out, 16);
91 		printbuf_tabstop_push(out, 16);
92 		printbuf_tabstop_push(out, 16);
93 		printbuf_tabstop_push(out, 16);
94 	}
95 
96 	prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
97 
98 	for (unsigned i = 0; i < BCH_DATA_NR; i++) {
99 		bch2_prt_data_type(out, i);
100 		prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
101 			   usage->d[i].buckets,
102 			   usage->d[i].sectors,
103 			   usage->d[i].fragmented);
104 	}
105 
106 	prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
107 }
108 
bch2_check_fix_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,bool * do_update)109 static int bch2_check_fix_ptr(struct btree_trans *trans,
110 			      struct bkey_s_c k,
111 			      struct extent_ptr_decoded p,
112 			      const union bch_extent_entry *entry,
113 			      bool *do_update)
114 {
115 	struct bch_fs *c = trans->c;
116 	struct printbuf buf = PRINTBUF;
117 	int ret = 0;
118 
119 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
120 	if (!ca) {
121 		if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
122 				trans, ptr_to_invalid_device,
123 				"pointer to missing device %u\n"
124 				"while marking %s",
125 				p.ptr.dev,
126 				(printbuf_reset(&buf),
127 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
128 			*do_update = true;
129 		return 0;
130 	}
131 
132 	struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
133 	if (!g) {
134 		if (fsck_err(trans, ptr_to_invalid_device,
135 			     "pointer to invalid bucket on device %u\n"
136 			     "while marking %s",
137 			     p.ptr.dev,
138 			     (printbuf_reset(&buf),
139 			      bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
140 			*do_update = true;
141 		goto out;
142 	}
143 
144 	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
145 
146 	if (fsck_err_on(!g->gen_valid,
147 			trans, ptr_to_missing_alloc_key,
148 			"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
149 			"while marking %s",
150 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
151 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
152 			p.ptr.gen,
153 			(printbuf_reset(&buf),
154 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
155 		if (!p.ptr.cached) {
156 			g->gen_valid		= true;
157 			g->gen			= p.ptr.gen;
158 		} else {
159 			*do_update = true;
160 		}
161 	}
162 
163 	if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
164 			trans, ptr_gen_newer_than_bucket_gen,
165 			"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
166 			"while marking %s",
167 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
168 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
169 			p.ptr.gen, g->gen,
170 			(printbuf_reset(&buf),
171 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
172 		if (!p.ptr.cached &&
173 		    (g->data_type != BCH_DATA_btree ||
174 		     data_type == BCH_DATA_btree)) {
175 			g->gen_valid		= true;
176 			g->gen			= p.ptr.gen;
177 			g->data_type		= 0;
178 			g->stripe_sectors	= 0;
179 			g->dirty_sectors	= 0;
180 			g->cached_sectors	= 0;
181 		} else {
182 			*do_update = true;
183 		}
184 	}
185 
186 	if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
187 			trans, ptr_gen_newer_than_bucket_gen,
188 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
189 			"while marking %s",
190 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
191 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
192 			p.ptr.gen,
193 			(printbuf_reset(&buf),
194 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
195 		*do_update = true;
196 
197 	if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
198 			trans, stale_dirty_ptr,
199 			"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
200 			"while marking %s",
201 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
202 			bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
203 			p.ptr.gen, g->gen,
204 			(printbuf_reset(&buf),
205 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
206 		*do_update = true;
207 
208 	if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
209 		goto out;
210 
211 	if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
212 			trans, ptr_bucket_data_type_mismatch,
213 			"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
214 			"while marking %s",
215 			p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
216 			bch2_data_type_str(g->data_type),
217 			bch2_data_type_str(data_type),
218 			(printbuf_reset(&buf),
219 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
220 		if (data_type == BCH_DATA_btree) {
221 			g->gen_valid		= true;
222 			g->gen			= p.ptr.gen;
223 			g->data_type		= data_type;
224 			g->stripe_sectors	= 0;
225 			g->dirty_sectors	= 0;
226 			g->cached_sectors	= 0;
227 		} else {
228 			*do_update = true;
229 		}
230 	}
231 
232 	if (p.has_ec) {
233 		struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
234 
235 		if (fsck_err_on(!m || !m->alive,
236 				trans, ptr_to_missing_stripe,
237 				"pointer to nonexistent stripe %llu\n"
238 				"while marking %s",
239 				(u64) p.ec.idx,
240 				(printbuf_reset(&buf),
241 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
242 			*do_update = true;
243 
244 		if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
245 				trans, ptr_to_incorrect_stripe,
246 				"pointer does not match stripe %llu\n"
247 				"while marking %s",
248 				(u64) p.ec.idx,
249 				(printbuf_reset(&buf),
250 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
251 			*do_update = true;
252 	}
253 out:
254 fsck_err:
255 	bch2_dev_put(ca);
256 	printbuf_exit(&buf);
257 	return ret;
258 }
259 
bch2_check_fix_ptrs(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)260 int bch2_check_fix_ptrs(struct btree_trans *trans,
261 			enum btree_id btree, unsigned level, struct bkey_s_c k,
262 			enum btree_iter_update_trigger_flags flags)
263 {
264 	struct bch_fs *c = trans->c;
265 	struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
266 	const union bch_extent_entry *entry_c;
267 	struct extent_ptr_decoded p = { 0 };
268 	bool do_update = false;
269 	struct printbuf buf = PRINTBUF;
270 	int ret = 0;
271 
272 	bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
273 		ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
274 		if (ret)
275 			goto err;
276 	}
277 
278 	if (do_update) {
279 		if (flags & BTREE_TRIGGER_is_root) {
280 			bch_err(c, "cannot update btree roots yet");
281 			ret = -EINVAL;
282 			goto err;
283 		}
284 
285 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
286 		ret = PTR_ERR_OR_ZERO(new);
287 		if (ret)
288 			goto err;
289 
290 		rcu_read_lock();
291 		bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
292 		rcu_read_unlock();
293 
294 		if (level) {
295 			/*
296 			 * We don't want to drop btree node pointers - if the
297 			 * btree node isn't there anymore, the read path will
298 			 * sort it out:
299 			 */
300 			struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
301 			rcu_read_lock();
302 			bkey_for_each_ptr(ptrs, ptr) {
303 				struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
304 				struct bucket *g = PTR_GC_BUCKET(ca, ptr);
305 
306 				ptr->gen = g->gen;
307 			}
308 			rcu_read_unlock();
309 		} else {
310 			struct bkey_ptrs ptrs;
311 			union bch_extent_entry *entry;
312 
313 			rcu_read_lock();
314 restart_drop_ptrs:
315 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
316 			bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
317 				struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
318 				struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
319 				enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
320 
321 				if ((p.ptr.cached &&
322 				     (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
323 				    (!p.ptr.cached &&
324 				     gen_cmp(p.ptr.gen, g->gen) < 0) ||
325 				    gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
326 				    (g->data_type &&
327 				     g->data_type != data_type)) {
328 					bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
329 					goto restart_drop_ptrs;
330 				}
331 			}
332 			rcu_read_unlock();
333 again:
334 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
335 			bkey_extent_entry_for_each(ptrs, entry) {
336 				if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
337 					struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
338 									entry->stripe_ptr.idx);
339 					union bch_extent_entry *next_ptr;
340 
341 					bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
342 						if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
343 							goto found;
344 					next_ptr = NULL;
345 found:
346 					if (!next_ptr) {
347 						bch_err(c, "aieee, found stripe ptr with no data ptr");
348 						continue;
349 					}
350 
351 					if (!m || !m->alive ||
352 					    !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
353 								       &next_ptr->ptr,
354 								       m->sectors)) {
355 						bch2_bkey_extent_entry_drop(new, entry);
356 						goto again;
357 					}
358 				}
359 			}
360 		}
361 
362 		if (0) {
363 			printbuf_reset(&buf);
364 			bch2_bkey_val_to_text(&buf, c, k);
365 			bch_info(c, "updated %s", buf.buf);
366 
367 			printbuf_reset(&buf);
368 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
369 			bch_info(c, "new key %s", buf.buf);
370 		}
371 
372 		struct btree_iter iter;
373 		bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
374 					  BTREE_ITER_intent|BTREE_ITER_all_snapshots);
375 		ret =   bch2_btree_iter_traverse(trans, &iter) ?:
376 			bch2_trans_update(trans, &iter, new,
377 					  BTREE_UPDATE_internal_snapshot_node|
378 					  BTREE_TRIGGER_norun);
379 		bch2_trans_iter_exit(trans, &iter);
380 		if (ret)
381 			goto err;
382 
383 		if (level)
384 			bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
385 	}
386 err:
387 	printbuf_exit(&buf);
388 	return ret;
389 }
390 
bucket_ref_update_err(struct btree_trans * trans,struct printbuf * buf,struct bkey_s_c k,bool insert,enum bch_sb_error_id id)391 static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf,
392 				 struct bkey_s_c k, bool insert, enum bch_sb_error_id id)
393 {
394 	struct bch_fs *c = trans->c;
395 	bool repeat = false, print = true, suppress = false;
396 
397 	prt_printf(buf, "\nwhile marking ");
398 	bch2_bkey_val_to_text(buf, c, k);
399 	prt_newline(buf);
400 
401 	__bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress);
402 
403 	int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
404 
405 	if (insert) {
406 		print = true;
407 		suppress = false;
408 
409 		bch2_trans_updates_to_text(buf, trans);
410 		__bch2_inconsistent_error(c, buf);
411 		ret = -BCH_ERR_bucket_ref_update;
412 	}
413 
414 	if (suppress)
415 		prt_printf(buf, "Ratelimiting new instances of previous error\n");
416 	if (print)
417 		bch2_print_string_as_lines(KERN_ERR, buf->buf);
418 	return ret;
419 }
420 
bch2_bucket_ref_update(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct bch_extent_ptr * ptr,s64 sectors,enum bch_data_type ptr_data_type,u8 b_gen,u8 bucket_data_type,u32 * bucket_sectors)421 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
422 			   struct bkey_s_c k,
423 			   const struct bch_extent_ptr *ptr,
424 			   s64 sectors, enum bch_data_type ptr_data_type,
425 			   u8 b_gen, u8 bucket_data_type,
426 			   u32 *bucket_sectors)
427 {
428 	struct bch_fs *c = trans->c;
429 	size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
430 	struct printbuf buf = PRINTBUF;
431 	bool inserting = sectors > 0;
432 	int ret = 0;
433 
434 	BUG_ON(!sectors);
435 
436 	if (unlikely(gen_after(ptr->gen, b_gen))) {
437 		bch2_log_msg_start(c, &buf);
438 		prt_printf(&buf,
439 			"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen",
440 			ptr->dev, bucket_nr, b_gen,
441 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
442 			ptr->gen);
443 
444 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
445 					    BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen);
446 		goto out;
447 	}
448 
449 	if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) {
450 		bch2_log_msg_start(c, &buf);
451 		prt_printf(&buf,
452 			"bucket %u:%zu gen %u data type %s: ptr gen %u too stale",
453 			ptr->dev, bucket_nr, b_gen,
454 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
455 			ptr->gen);
456 
457 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
458 					    BCH_FSCK_ERR_ptr_too_stale);
459 		goto out;
460 	}
461 
462 	if (b_gen != ptr->gen && ptr->cached) {
463 		ret = 1;
464 		goto out;
465 	}
466 
467 	if (unlikely(b_gen != ptr->gen)) {
468 		bch2_log_msg_start(c, &buf);
469 		prt_printf(&buf,
470 			"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)",
471 			ptr->dev, bucket_nr, b_gen,
472 			bucket_gen_get(ca, bucket_nr),
473 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
474 			ptr->gen);
475 
476 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
477 					    BCH_FSCK_ERR_stale_dirty_ptr);
478 		goto out;
479 	}
480 
481 	if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) {
482 		bch2_log_msg_start(c, &buf);
483 		prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
484 			   ptr->dev, bucket_nr, b_gen,
485 			   bch2_data_type_str(bucket_data_type),
486 			   bch2_data_type_str(ptr_data_type));
487 
488 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
489 					    BCH_FSCK_ERR_ptr_bucket_data_type_mismatch);
490 		goto out;
491 	}
492 
493 	if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) {
494 		bch2_log_msg_start(c, &buf);
495 		prt_printf(&buf,
496 			"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX",
497 			ptr->dev, bucket_nr, b_gen,
498 			bch2_data_type_str(bucket_data_type ?: ptr_data_type),
499 			*bucket_sectors, sectors);
500 
501 		ret = bucket_ref_update_err(trans, &buf, k, inserting,
502 					    BCH_FSCK_ERR_bucket_sector_count_overflow);
503 		sectors = -*bucket_sectors;
504 		goto out;
505 	}
506 
507 	*bucket_sectors += sectors;
508 out:
509 	printbuf_exit(&buf);
510 	return ret;
511 }
512 
bch2_trans_account_disk_usage_change(struct btree_trans * trans)513 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
514 {
515 	struct bch_fs *c = trans->c;
516 	u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
517 	static int warned_disk_usage = 0;
518 	bool warn = false;
519 
520 	percpu_down_read(&c->mark_lock);
521 	struct bch_fs_usage_base *src = &trans->fs_usage_delta;
522 
523 	s64 added = src->btree + src->data + src->reserved;
524 
525 	/*
526 	 * Not allowed to reduce sectors_available except by getting a
527 	 * reservation:
528 	 */
529 	s64 should_not_have_added = added - (s64) disk_res_sectors;
530 	if (unlikely(should_not_have_added > 0)) {
531 		u64 old, new;
532 
533 		old = atomic64_read(&c->sectors_available);
534 		do {
535 			new = max_t(s64, 0, old - should_not_have_added);
536 		} while (!atomic64_try_cmpxchg(&c->sectors_available,
537 					       &old, new));
538 
539 		added -= should_not_have_added;
540 		warn = true;
541 	}
542 
543 	if (added > 0) {
544 		trans->disk_res->sectors -= added;
545 		this_cpu_sub(*c->online_reserved, added);
546 	}
547 
548 	preempt_disable();
549 	struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
550 	acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
551 	preempt_enable();
552 	percpu_up_read(&c->mark_lock);
553 
554 	if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
555 		bch2_trans_inconsistent(trans,
556 					"disk usage increased %lli more than %llu sectors reserved)",
557 					should_not_have_added, disk_res_sectors);
558 }
559 
560 /* KEY_TYPE_extent: */
561 
__mark_pointer(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c k,const struct extent_ptr_decoded * p,s64 sectors,enum bch_data_type ptr_data_type,struct bch_alloc_v4 * a,bool insert)562 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
563 			  struct bkey_s_c k,
564 			  const struct extent_ptr_decoded *p,
565 			  s64 sectors, enum bch_data_type ptr_data_type,
566 			  struct bch_alloc_v4 *a,
567 			  bool insert)
568 {
569 	u32 *dst_sectors = p->has_ec	? &a->stripe_sectors :
570 		!p->ptr.cached		? &a->dirty_sectors :
571 					  &a->cached_sectors;
572 	int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
573 					 a->gen, a->data_type, dst_sectors);
574 
575 	if (ret)
576 		return ret;
577 	if (insert)
578 		alloc_data_type_set(a, ptr_data_type);
579 	return 0;
580 }
581 
bch2_trigger_pointer(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,s64 * sectors,enum btree_iter_update_trigger_flags flags)582 static int bch2_trigger_pointer(struct btree_trans *trans,
583 			enum btree_id btree_id, unsigned level,
584 			struct bkey_s_c k, struct extent_ptr_decoded p,
585 			const union bch_extent_entry *entry,
586 			s64 *sectors,
587 			enum btree_iter_update_trigger_flags flags)
588 {
589 	struct bch_fs *c = trans->c;
590 	bool insert = !(flags & BTREE_TRIGGER_overwrite);
591 	struct printbuf buf = PRINTBUF;
592 	int ret = 0;
593 
594 	struct bkey_i_backpointer bp;
595 	bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp);
596 
597 	*sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len;
598 
599 	struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
600 	if (unlikely(!ca)) {
601 		if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
602 			ret = -BCH_ERR_trigger_pointer;
603 		goto err;
604 	}
605 
606 	struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
607 	if (!bucket_valid(ca, bucket.offset)) {
608 		if (insert) {
609 			bch2_dev_bucket_missing(ca, bucket.offset);
610 			ret = -BCH_ERR_trigger_pointer;
611 		}
612 		goto err;
613 	}
614 
615 	if (flags & BTREE_TRIGGER_transactional) {
616 		struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
617 		ret = PTR_ERR_OR_ZERO(a) ?:
618 			__mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert);
619 		if (ret)
620 			goto err;
621 
622 		ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
623 		if (ret)
624 			goto err;
625 	}
626 
627 	if (flags & BTREE_TRIGGER_gc) {
628 		struct bucket *g = gc_bucket(ca, bucket.offset);
629 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n  %s",
630 					    p.ptr.dev,
631 					    (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
632 			ret = -BCH_ERR_trigger_pointer;
633 			goto err;
634 		}
635 
636 		bucket_lock(g);
637 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
638 		ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert);
639 		alloc_to_bucket(g, new);
640 		bucket_unlock(g);
641 
642 		if (!ret)
643 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
644 	}
645 err:
646 	bch2_dev_put(ca);
647 	printbuf_exit(&buf);
648 	return ret;
649 }
650 
bch2_trigger_stripe_ptr(struct btree_trans * trans,struct bkey_s_c k,struct extent_ptr_decoded p,enum bch_data_type data_type,s64 sectors,enum btree_iter_update_trigger_flags flags)651 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
652 				struct bkey_s_c k,
653 				struct extent_ptr_decoded p,
654 				enum bch_data_type data_type,
655 				s64 sectors,
656 				enum btree_iter_update_trigger_flags flags)
657 {
658 	if (flags & BTREE_TRIGGER_transactional) {
659 		struct btree_iter iter;
660 		struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
661 				BTREE_ID_stripes, POS(0, p.ec.idx),
662 				BTREE_ITER_with_updates, stripe);
663 		int ret = PTR_ERR_OR_ZERO(s);
664 		if (unlikely(ret)) {
665 			bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
666 				"pointer to nonexistent stripe %llu",
667 				(u64) p.ec.idx);
668 			goto err;
669 		}
670 
671 		if (!bch2_ptr_matches_stripe(&s->v, p)) {
672 			bch2_trans_inconsistent(trans,
673 				"stripe pointer doesn't match stripe %llu",
674 				(u64) p.ec.idx);
675 			ret = -BCH_ERR_trigger_stripe_pointer;
676 			goto err;
677 		}
678 
679 		stripe_blockcount_set(&s->v, p.ec.block,
680 			stripe_blockcount_get(&s->v, p.ec.block) +
681 			sectors);
682 
683 		struct disk_accounting_pos acc;
684 		memset(&acc, 0, sizeof(acc));
685 		acc.type = BCH_DISK_ACCOUNTING_replicas;
686 		bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
687 		acc.replicas.data_type = data_type;
688 		ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
689 err:
690 		bch2_trans_iter_exit(trans, &iter);
691 		return ret;
692 	}
693 
694 	if (flags & BTREE_TRIGGER_gc) {
695 		struct bch_fs *c = trans->c;
696 
697 		struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
698 		if (!m) {
699 			bch_err(c, "error allocating memory for gc_stripes, idx %llu",
700 				(u64) p.ec.idx);
701 			return -BCH_ERR_ENOMEM_mark_stripe_ptr;
702 		}
703 
704 		gc_stripe_lock(m);
705 
706 		if (!m || !m->alive) {
707 			gc_stripe_unlock(m);
708 			struct printbuf buf = PRINTBUF;
709 			bch2_log_msg_start(c, &buf);
710 			prt_printf(&buf, "pointer to nonexistent stripe %llu\n  while marking ",
711 				   (u64) p.ec.idx);
712 			bch2_bkey_val_to_text(&buf, c, k);
713 			__bch2_inconsistent_error(c, &buf);
714 			bch2_print_string_as_lines(KERN_ERR, buf.buf);
715 			printbuf_exit(&buf);
716 			return -BCH_ERR_trigger_stripe_pointer;
717 		}
718 
719 		m->block_sectors[p.ec.block] += sectors;
720 
721 		struct disk_accounting_pos acc;
722 		memset(&acc, 0, sizeof(acc));
723 		acc.type = BCH_DISK_ACCOUNTING_replicas;
724 		unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
725 		gc_stripe_unlock(m);
726 
727 		acc.replicas.data_type = data_type;
728 		int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
729 		if (ret)
730 			return ret;
731 	}
732 
733 	return 0;
734 }
735 
__trigger_extent(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags,s64 * replicas_sectors)736 static int __trigger_extent(struct btree_trans *trans,
737 			    enum btree_id btree_id, unsigned level,
738 			    struct bkey_s_c k,
739 			    enum btree_iter_update_trigger_flags flags,
740 			    s64 *replicas_sectors)
741 {
742 	bool gc = flags & BTREE_TRIGGER_gc;
743 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
744 	const union bch_extent_entry *entry;
745 	struct extent_ptr_decoded p;
746 	enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
747 		? BCH_DATA_btree
748 		: BCH_DATA_user;
749 	int ret = 0;
750 
751 	struct disk_accounting_pos acc_replicas_key;
752 	memset(&acc_replicas_key, 0, sizeof(acc_replicas_key));
753 	acc_replicas_key.type = BCH_DISK_ACCOUNTING_replicas;
754 	acc_replicas_key.replicas.data_type	= data_type;
755 	acc_replicas_key.replicas.nr_devs	= 0;
756 	acc_replicas_key.replicas.nr_required	= 1;
757 
758 	unsigned cur_compression_type = 0;
759 	u64 compression_acct[3] = { 1, 0, 0 };
760 
761 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
762 		s64 disk_sectors = 0;
763 		ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
764 		if (ret < 0)
765 			return ret;
766 
767 		bool stale = ret > 0;
768 
769 		if (p.ptr.cached && stale)
770 			continue;
771 
772 		if (p.ptr.cached) {
773 			ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
774 			if (ret)
775 				return ret;
776 		} else if (!p.has_ec) {
777 			*replicas_sectors       += disk_sectors;
778 			replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
779 		} else {
780 			ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
781 			if (ret)
782 				return ret;
783 
784 			/*
785 			 * There may be other dirty pointers in this extent, but
786 			 * if so they're not required for mounting if we have an
787 			 * erasure coded pointer in this extent:
788 			 */
789 			acc_replicas_key.replicas.nr_required = 0;
790 		}
791 
792 		if (cur_compression_type &&
793 		    cur_compression_type != p.crc.compression_type) {
794 			if (flags & BTREE_TRIGGER_overwrite)
795 				bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
796 
797 			ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
798 							compression, cur_compression_type);
799 			if (ret)
800 				return ret;
801 
802 			compression_acct[0] = 1;
803 			compression_acct[1] = 0;
804 			compression_acct[2] = 0;
805 		}
806 
807 		cur_compression_type = p.crc.compression_type;
808 		if (p.crc.compression_type) {
809 			compression_acct[1] += p.crc.uncompressed_size;
810 			compression_acct[2] += p.crc.compressed_size;
811 		}
812 	}
813 
814 	if (acc_replicas_key.replicas.nr_devs) {
815 		ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc);
816 		if (ret)
817 			return ret;
818 	}
819 
820 	if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
821 		ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot);
822 		if (ret)
823 			return ret;
824 	}
825 
826 	if (cur_compression_type) {
827 		if (flags & BTREE_TRIGGER_overwrite)
828 			bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
829 
830 		ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
831 						compression, cur_compression_type);
832 		if (ret)
833 			return ret;
834 	}
835 
836 	if (level) {
837 		ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, btree, btree_id);
838 		if (ret)
839 			return ret;
840 	} else {
841 		bool insert = !(flags & BTREE_TRIGGER_overwrite);
842 
843 		s64 v[3] = {
844 			insert ? 1 : -1,
845 			insert ? k.k->size : -((s64) k.k->size),
846 			*replicas_sectors,
847 		};
848 		ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode);
849 		if (ret)
850 			return ret;
851 	}
852 
853 	return 0;
854 }
855 
bch2_trigger_extent(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)856 int bch2_trigger_extent(struct btree_trans *trans,
857 			enum btree_id btree, unsigned level,
858 			struct bkey_s_c old, struct bkey_s new,
859 			enum btree_iter_update_trigger_flags flags)
860 {
861 	struct bch_fs *c = trans->c;
862 	struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
863 	struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
864 	unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
865 	unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
866 
867 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
868 		return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
869 
870 	/* if pointers aren't changing - nothing to do: */
871 	if (new_ptrs_bytes == old_ptrs_bytes &&
872 	    !memcmp(new_ptrs.start,
873 		    old_ptrs.start,
874 		    new_ptrs_bytes))
875 		return 0;
876 
877 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
878 		s64 old_replicas_sectors = 0, new_replicas_sectors = 0;
879 
880 		if (old.k->type) {
881 			int ret = __trigger_extent(trans, btree, level, old,
882 						   flags & ~BTREE_TRIGGER_insert,
883 						   &old_replicas_sectors);
884 			if (ret)
885 				return ret;
886 		}
887 
888 		if (new.k->type) {
889 			int ret = __trigger_extent(trans, btree, level, new.s_c,
890 						   flags & ~BTREE_TRIGGER_overwrite,
891 						   &new_replicas_sectors);
892 			if (ret)
893 				return ret;
894 		}
895 
896 		int need_rebalance_delta = 0;
897 		s64 need_rebalance_sectors_delta[1] = { 0 };
898 
899 		s64 s = bch2_bkey_sectors_need_rebalance(c, old);
900 		need_rebalance_delta -= s != 0;
901 		need_rebalance_sectors_delta[0] -= s;
902 
903 		s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
904 		need_rebalance_delta += s != 0;
905 		need_rebalance_sectors_delta[0] += s;
906 
907 		if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
908 			int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
909 							  new.k->p, need_rebalance_delta > 0);
910 			if (ret)
911 				return ret;
912 		}
913 
914 		if (need_rebalance_sectors_delta[0]) {
915 			int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
916 							    need_rebalance_sectors_delta, rebalance_work);
917 			if (ret)
918 				return ret;
919 		}
920 	}
921 
922 	return 0;
923 }
924 
925 /* KEY_TYPE_reservation */
926 
__trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c k,enum btree_iter_update_trigger_flags flags)927 static int __trigger_reservation(struct btree_trans *trans,
928 			enum btree_id btree_id, unsigned level, struct bkey_s_c k,
929 			enum btree_iter_update_trigger_flags flags)
930 {
931 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
932 		s64 sectors[1] = { k.k->size };
933 
934 		if (flags & BTREE_TRIGGER_overwrite)
935 			sectors[0] = -sectors[0];
936 
937 		return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors,
938 				persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas);
939 	}
940 
941 	return 0;
942 }
943 
bch2_trigger_reservation(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)944 int bch2_trigger_reservation(struct btree_trans *trans,
945 			  enum btree_id btree_id, unsigned level,
946 			  struct bkey_s_c old, struct bkey_s new,
947 			  enum btree_iter_update_trigger_flags flags)
948 {
949 	return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
950 }
951 
952 /* Mark superblocks: */
953 
__bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors)954 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
955 				    struct bch_dev *ca, u64 b,
956 				    enum bch_data_type type,
957 				    unsigned sectors)
958 {
959 	struct bch_fs *c = trans->c;
960 	struct btree_iter iter;
961 	int ret = 0;
962 
963 	struct bkey_i_alloc_v4 *a =
964 		bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
965 	if (IS_ERR(a))
966 		return PTR_ERR(a);
967 
968 	if (a->v.data_type && type && a->v.data_type != type) {
969 		bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
970 		log_fsck_err(trans, bucket_metadata_type_mismatch,
971 			"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
972 			"while marking %s",
973 			iter.pos.inode, iter.pos.offset, a->v.gen,
974 			bch2_data_type_str(a->v.data_type),
975 			bch2_data_type_str(type),
976 			bch2_data_type_str(type));
977 		ret = -BCH_ERR_metadata_bucket_inconsistency;
978 		goto err;
979 	}
980 
981 	if (a->v.data_type	!= type ||
982 	    a->v.dirty_sectors	!= sectors) {
983 		a->v.data_type		= type;
984 		a->v.dirty_sectors	= sectors;
985 		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
986 	}
987 err:
988 fsck_err:
989 	bch2_trans_iter_exit(trans, &iter);
990 	return ret;
991 }
992 
bch2_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type data_type,unsigned sectors,enum btree_iter_update_trigger_flags flags)993 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
994 			u64 b, enum bch_data_type data_type, unsigned sectors,
995 			enum btree_iter_update_trigger_flags flags)
996 {
997 	struct bch_fs *c = trans->c;
998 	int ret = 0;
999 
1000 	struct bucket *g = gc_bucket(ca, b);
1001 	if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
1002 				    ca->dev_idx, bch2_data_type_str(data_type)))
1003 		goto err;
1004 
1005 	bucket_lock(g);
1006 	struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
1007 
1008 	if (bch2_fs_inconsistent_on(g->data_type &&
1009 			g->data_type != data_type, c,
1010 			"different types of data in same bucket: %s, %s",
1011 			bch2_data_type_str(g->data_type),
1012 			bch2_data_type_str(data_type)))
1013 		goto err_unlock;
1014 
1015 	if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
1016 			"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
1017 			ca->dev_idx, b, g->gen,
1018 			bch2_data_type_str(g->data_type ?: data_type),
1019 			g->dirty_sectors, sectors))
1020 		goto err_unlock;
1021 
1022 	g->data_type = data_type;
1023 	g->dirty_sectors += sectors;
1024 	struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
1025 	bucket_unlock(g);
1026 	ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
1027 	return ret;
1028 err_unlock:
1029 	bucket_unlock(g);
1030 err:
1031 	return -BCH_ERR_metadata_bucket_inconsistency;
1032 }
1033 
bch2_trans_mark_metadata_bucket(struct btree_trans * trans,struct bch_dev * ca,u64 b,enum bch_data_type type,unsigned sectors,enum btree_iter_update_trigger_flags flags)1034 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1035 			struct bch_dev *ca, u64 b,
1036 			enum bch_data_type type, unsigned sectors,
1037 			enum btree_iter_update_trigger_flags flags)
1038 {
1039 	BUG_ON(type != BCH_DATA_free &&
1040 	       type != BCH_DATA_sb &&
1041 	       type != BCH_DATA_journal);
1042 
1043 	/*
1044 	 * Backup superblock might be past the end of our normal usable space:
1045 	 */
1046 	if (b >= ca->mi.nbuckets)
1047 		return 0;
1048 
1049 	if (flags & BTREE_TRIGGER_gc)
1050 		return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
1051 	else if (flags & BTREE_TRIGGER_transactional)
1052 		return commit_do(trans, NULL, NULL, 0,
1053 				 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1054 	else
1055 		BUG();
1056 }
1057 
bch2_trans_mark_metadata_sectors(struct btree_trans * trans,struct bch_dev * ca,u64 start,u64 end,enum bch_data_type type,u64 * bucket,unsigned * bucket_sectors,enum btree_iter_update_trigger_flags flags)1058 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1059 			struct bch_dev *ca, u64 start, u64 end,
1060 			enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1061 			enum btree_iter_update_trigger_flags flags)
1062 {
1063 	do {
1064 		u64 b = sector_to_bucket(ca, start);
1065 		unsigned sectors =
1066 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1067 
1068 		if (b != *bucket && *bucket_sectors) {
1069 			int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1070 							type, *bucket_sectors, flags);
1071 			if (ret)
1072 				return ret;
1073 
1074 			*bucket_sectors = 0;
1075 		}
1076 
1077 		*bucket		= b;
1078 		*bucket_sectors	+= sectors;
1079 		start += sectors;
1080 	} while (start < end);
1081 
1082 	return 0;
1083 }
1084 
__bch2_trans_mark_dev_sb(struct btree_trans * trans,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1085 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1086 			enum btree_iter_update_trigger_flags flags)
1087 {
1088 	struct bch_fs *c = trans->c;
1089 
1090 	mutex_lock(&c->sb_lock);
1091 	struct bch_sb_layout layout = ca->disk_sb.sb->layout;
1092 	mutex_unlock(&c->sb_lock);
1093 
1094 	u64 bucket = 0;
1095 	unsigned i, bucket_sectors = 0;
1096 	int ret;
1097 
1098 	for (i = 0; i < layout.nr_superblocks; i++) {
1099 		u64 offset = le64_to_cpu(layout.sb_offset[i]);
1100 
1101 		if (offset == BCH_SB_SECTOR) {
1102 			ret = bch2_trans_mark_metadata_sectors(trans, ca,
1103 						0, BCH_SB_SECTOR,
1104 						BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1105 			if (ret)
1106 				return ret;
1107 		}
1108 
1109 		ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1110 				      offset + (1 << layout.sb_max_size_bits),
1111 				      BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1112 		if (ret)
1113 			return ret;
1114 	}
1115 
1116 	if (bucket_sectors) {
1117 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1118 				bucket, BCH_DATA_sb, bucket_sectors, flags);
1119 		if (ret)
1120 			return ret;
1121 	}
1122 
1123 	for (i = 0; i < ca->journal.nr; i++) {
1124 		ret = bch2_trans_mark_metadata_bucket(trans, ca,
1125 				ca->journal.buckets[i],
1126 				BCH_DATA_journal, ca->mi.bucket_size, flags);
1127 		if (ret)
1128 			return ret;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
bch2_trans_mark_dev_sb(struct bch_fs * c,struct bch_dev * ca,enum btree_iter_update_trigger_flags flags)1134 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1135 			enum btree_iter_update_trigger_flags flags)
1136 {
1137 	int ret = bch2_trans_run(c,
1138 		__bch2_trans_mark_dev_sb(trans, ca, flags));
1139 	bch_err_fn(c, ret);
1140 	return ret;
1141 }
1142 
bch2_trans_mark_dev_sbs_flags(struct bch_fs * c,enum btree_iter_update_trigger_flags flags)1143 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1144 			enum btree_iter_update_trigger_flags flags)
1145 {
1146 	for_each_online_member(c, ca) {
1147 		int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1148 		if (ret) {
1149 			percpu_ref_put(&ca->io_ref[READ]);
1150 			return ret;
1151 		}
1152 	}
1153 
1154 	return 0;
1155 }
1156 
bch2_trans_mark_dev_sbs(struct bch_fs * c)1157 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1158 {
1159 	return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1160 }
1161 
bch2_is_superblock_bucket(struct bch_dev * ca,u64 b)1162 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
1163 {
1164 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1165 	u64 b_offset	= bucket_to_sector(ca, b);
1166 	u64 b_end	= bucket_to_sector(ca, b + 1);
1167 	unsigned i;
1168 
1169 	if (!b)
1170 		return true;
1171 
1172 	for (i = 0; i < layout->nr_superblocks; i++) {
1173 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1174 		u64 end = offset + (1 << layout->sb_max_size_bits);
1175 
1176 		if (!(offset >= b_end || end <= b_offset))
1177 			return true;
1178 	}
1179 
1180 	for (i = 0; i < ca->journal.nr; i++)
1181 		if (b == ca->journal.buckets[i])
1182 			return true;
1183 
1184 	return false;
1185 }
1186 
1187 /* Disk reservations: */
1188 
1189 #define SECTORS_CACHE	1024
1190 
__bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)1191 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1192 				u64 sectors, enum bch_reservation_flags flags)
1193 {
1194 	struct bch_fs_pcpu *pcpu;
1195 	u64 old, get;
1196 	u64 sectors_available;
1197 	int ret;
1198 
1199 	percpu_down_read(&c->mark_lock);
1200 	preempt_disable();
1201 	pcpu = this_cpu_ptr(c->pcpu);
1202 
1203 	if (sectors <= pcpu->sectors_available)
1204 		goto out;
1205 
1206 	old = atomic64_read(&c->sectors_available);
1207 	do {
1208 		get = min((u64) sectors + SECTORS_CACHE, old);
1209 
1210 		if (get < sectors) {
1211 			preempt_enable();
1212 			goto recalculate;
1213 		}
1214 	} while (!atomic64_try_cmpxchg(&c->sectors_available,
1215 				       &old, old - get));
1216 
1217 	pcpu->sectors_available		+= get;
1218 
1219 out:
1220 	pcpu->sectors_available		-= sectors;
1221 	this_cpu_add(*c->online_reserved, sectors);
1222 	res->sectors			+= sectors;
1223 
1224 	preempt_enable();
1225 	percpu_up_read(&c->mark_lock);
1226 	return 0;
1227 
1228 recalculate:
1229 	mutex_lock(&c->sectors_available_lock);
1230 
1231 	percpu_u64_set(&c->pcpu->sectors_available, 0);
1232 	sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1233 
1234 	if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
1235 		sectors = min(sectors, sectors_available);
1236 
1237 	if (sectors <= sectors_available ||
1238 	    (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1239 		atomic64_set(&c->sectors_available,
1240 			     max_t(s64, 0, sectors_available - sectors));
1241 		this_cpu_add(*c->online_reserved, sectors);
1242 		res->sectors			+= sectors;
1243 		ret = 0;
1244 	} else {
1245 		atomic64_set(&c->sectors_available, sectors_available);
1246 		ret = -BCH_ERR_ENOSPC_disk_reservation;
1247 	}
1248 
1249 	mutex_unlock(&c->sectors_available_lock);
1250 	percpu_up_read(&c->mark_lock);
1251 
1252 	return ret;
1253 }
1254 
1255 /* Startup/shutdown: */
1256 
bch2_buckets_nouse_free(struct bch_fs * c)1257 void bch2_buckets_nouse_free(struct bch_fs *c)
1258 {
1259 	for_each_member_device(c, ca) {
1260 		kvfree_rcu_mightsleep(ca->buckets_nouse);
1261 		ca->buckets_nouse = NULL;
1262 	}
1263 }
1264 
bch2_buckets_nouse_alloc(struct bch_fs * c)1265 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1266 {
1267 	for_each_member_device(c, ca) {
1268 		BUG_ON(ca->buckets_nouse);
1269 
1270 		ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1271 					    sizeof(unsigned long),
1272 					    GFP_KERNEL|__GFP_ZERO);
1273 		if (!ca->buckets_nouse) {
1274 			bch2_dev_put(ca);
1275 			return -BCH_ERR_ENOMEM_buckets_nouse;
1276 		}
1277 	}
1278 
1279 	return 0;
1280 }
1281 
bucket_gens_free_rcu(struct rcu_head * rcu)1282 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1283 {
1284 	struct bucket_gens *buckets =
1285 		container_of(rcu, struct bucket_gens, rcu);
1286 
1287 	kvfree(buckets);
1288 }
1289 
bch2_dev_buckets_resize(struct bch_fs * c,struct bch_dev * ca,u64 nbuckets)1290 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1291 {
1292 	struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1293 	bool resize = ca->bucket_gens != NULL;
1294 	int ret;
1295 
1296 	if (resize)
1297 		lockdep_assert_held(&c->state_lock);
1298 
1299 	if (resize && ca->buckets_nouse)
1300 		return -BCH_ERR_no_resize_with_buckets_nouse;
1301 
1302 	bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
1303 				    GFP_KERNEL|__GFP_ZERO);
1304 	if (!bucket_gens) {
1305 		ret = -BCH_ERR_ENOMEM_bucket_gens;
1306 		goto err;
1307 	}
1308 
1309 	bucket_gens->first_bucket = ca->mi.first_bucket;
1310 	bucket_gens->nbuckets	= nbuckets;
1311 	bucket_gens->nbuckets_minus_first =
1312 		bucket_gens->nbuckets - bucket_gens->first_bucket;
1313 
1314 	old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1315 
1316 	if (resize) {
1317 		u64 copy = min(bucket_gens->nbuckets,
1318 			       old_bucket_gens->nbuckets);
1319 		memcpy(bucket_gens->b,
1320 		       old_bucket_gens->b,
1321 		       sizeof(bucket_gens->b[0]) * copy);
1322 	}
1323 
1324 	rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1325 	bucket_gens	= old_bucket_gens;
1326 
1327 	nbuckets = ca->mi.nbuckets;
1328 
1329 	ret = 0;
1330 err:
1331 	if (bucket_gens)
1332 		call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1333 
1334 	return ret;
1335 }
1336 
bch2_dev_buckets_free(struct bch_dev * ca)1337 void bch2_dev_buckets_free(struct bch_dev *ca)
1338 {
1339 	kvfree(ca->buckets_nouse);
1340 	kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1341 	free_percpu(ca->usage);
1342 }
1343 
bch2_dev_buckets_alloc(struct bch_fs * c,struct bch_dev * ca)1344 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1345 {
1346 	ca->usage = alloc_percpu(struct bch_dev_usage_full);
1347 	if (!ca->usage)
1348 		return -BCH_ERR_ENOMEM_usage_init;
1349 
1350 	return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1351 }
1352