1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* erasure coding */
4 
5 #include "bcachefs.h"
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "disk_accounting.h"
17 #include "disk_groups.h"
18 #include "ec.h"
19 #include "error.h"
20 #include "io_read.h"
21 #include "io_write.h"
22 #include "keylist.h"
23 #include "lru.h"
24 #include "recovery.h"
25 #include "replicas.h"
26 #include "super-io.h"
27 #include "util.h"
28 
29 #include <linux/sort.h>
30 #include <linux/string_choices.h>
31 
32 #ifdef __KERNEL__
33 
34 #include <linux/raid/pq.h>
35 #include <linux/raid/xor.h>
36 
raid5_recov(unsigned disks,unsigned failed_idx,size_t size,void ** data)37 static void raid5_recov(unsigned disks, unsigned failed_idx,
38 			size_t size, void **data)
39 {
40 	unsigned i = 2, nr;
41 
42 	BUG_ON(failed_idx >= disks);
43 
44 	swap(data[0], data[failed_idx]);
45 	memcpy(data[0], data[1], size);
46 
47 	while (i < disks) {
48 		nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
49 		xor_blocks(nr, size, data[0], data + i);
50 		i += nr;
51 	}
52 
53 	swap(data[0], data[failed_idx]);
54 }
55 
raid_gen(int nd,int np,size_t size,void ** v)56 static void raid_gen(int nd, int np, size_t size, void **v)
57 {
58 	if (np >= 1)
59 		raid5_recov(nd + np, nd, size, v);
60 	if (np >= 2)
61 		raid6_call.gen_syndrome(nd + np, size, v);
62 	BUG_ON(np > 2);
63 }
64 
raid_rec(int nr,int * ir,int nd,int np,size_t size,void ** v)65 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
66 {
67 	switch (nr) {
68 	case 0:
69 		break;
70 	case 1:
71 		if (ir[0] < nd + 1)
72 			raid5_recov(nd + 1, ir[0], size, v);
73 		else
74 			raid6_call.gen_syndrome(nd + np, size, v);
75 		break;
76 	case 2:
77 		if (ir[1] < nd) {
78 			/* data+data failure. */
79 			raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
80 		} else if (ir[0] < nd) {
81 			/* data + p/q failure */
82 
83 			if (ir[1] == nd) /* data + p failure */
84 				raid6_datap_recov(nd + np, size, ir[0], v);
85 			else { /* data + q failure */
86 				raid5_recov(nd + 1, ir[0], size, v);
87 				raid6_call.gen_syndrome(nd + np, size, v);
88 			}
89 		} else {
90 			raid_gen(nd, np, size, v);
91 		}
92 		break;
93 	default:
94 		BUG();
95 	}
96 }
97 
98 #else
99 
100 #include <raid/raid.h>
101 
102 #endif
103 
104 struct ec_bio {
105 	struct bch_dev		*ca;
106 	struct ec_stripe_buf	*buf;
107 	size_t			idx;
108 	int			rw;
109 	u64			submit_time;
110 	struct bio		bio;
111 };
112 
113 /* Stripes btree keys: */
114 
bch2_stripe_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)115 int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k,
116 			 struct bkey_validate_context from)
117 {
118 	const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
119 	int ret = 0;
120 
121 	bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
122 			 bpos_gt(k.k->p, POS(0, U32_MAX)),
123 			 c, stripe_pos_bad,
124 			 "stripe at bad pos");
125 
126 	bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s),
127 			 c, stripe_val_size_bad,
128 			 "incorrect value size (%zu < %u)",
129 			 bkey_val_u64s(k.k), stripe_val_u64s(s));
130 
131 	bkey_fsck_err_on(s->csum_granularity_bits >= 64,
132 			 c, stripe_csum_granularity_bad,
133 			 "invalid csum granularity (%u >= 64)",
134 			 s->csum_granularity_bits);
135 
136 	ret = bch2_bkey_ptrs_validate(c, k, from);
137 fsck_err:
138 	return ret;
139 }
140 
bch2_stripe_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)141 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
142 			 struct bkey_s_c k)
143 {
144 	const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
145 	struct bch_stripe s = {};
146 
147 	memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
148 
149 	unsigned nr_data = s.nr_blocks - s.nr_redundant;
150 
151 	prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
152 		   s.algorithm,
153 		   le16_to_cpu(s.sectors),
154 		   nr_data,
155 		   s.nr_redundant);
156 	bch2_prt_csum_type(out, s.csum_type);
157 	prt_str(out, " gran ");
158 	if (s.csum_granularity_bits < 64)
159 		prt_printf(out, "%llu", 1ULL << s.csum_granularity_bits);
160 	else
161 		prt_printf(out, "(invalid shift %u)", s.csum_granularity_bits);
162 
163 	if (s.disk_label) {
164 		prt_str(out, " label");
165 		bch2_disk_path_to_text(out, c, s.disk_label - 1);
166 	}
167 
168 	for (unsigned i = 0; i < s.nr_blocks; i++) {
169 		const struct bch_extent_ptr *ptr = sp->ptrs + i;
170 
171 		if ((void *) ptr >= bkey_val_end(k))
172 			break;
173 
174 		prt_char(out, ' ');
175 		bch2_extent_ptr_to_text(out, c, ptr);
176 
177 		if (s.csum_type < BCH_CSUM_NR &&
178 		    i < nr_data &&
179 		    stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
180 			prt_printf(out,  "#%u", stripe_blockcount_get(sp, i));
181 	}
182 }
183 
184 /* Triggers: */
185 
__mark_stripe_bucket(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c_stripe s,unsigned ptr_idx,bool deleting,struct bpos bucket,struct bch_alloc_v4 * a,enum btree_iter_update_trigger_flags flags)186 static int __mark_stripe_bucket(struct btree_trans *trans,
187 				struct bch_dev *ca,
188 				struct bkey_s_c_stripe s,
189 				unsigned ptr_idx, bool deleting,
190 				struct bpos bucket,
191 				struct bch_alloc_v4 *a,
192 				enum btree_iter_update_trigger_flags flags)
193 {
194 	const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
195 	unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant;
196 	bool parity = ptr_idx >= nr_data;
197 	enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
198 	s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0;
199 	struct printbuf buf = PRINTBUF;
200 	int ret = 0;
201 
202 	struct bch_fs *c = trans->c;
203 	if (deleting)
204 		sectors = -sectors;
205 
206 	if (!deleting) {
207 		if (bch2_trans_inconsistent_on(a->stripe ||
208 					       a->stripe_redundancy, trans,
209 				"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
210 				bucket.inode, bucket.offset, a->gen,
211 				bch2_data_type_str(a->data_type),
212 				a->dirty_sectors,
213 				a->stripe, s.k->p.offset,
214 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
215 			ret = -BCH_ERR_mark_stripe;
216 			goto err;
217 		}
218 
219 		if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans,
220 				"bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
221 				bucket.inode, bucket.offset, a->gen,
222 				bch2_data_type_str(a->data_type),
223 				a->dirty_sectors,
224 				a->cached_sectors,
225 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
226 			ret = -BCH_ERR_mark_stripe;
227 			goto err;
228 		}
229 	} else {
230 		if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset ||
231 					       a->stripe_redundancy != s.v->nr_redundant, trans,
232 				"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
233 				bucket.inode, bucket.offset, a->gen,
234 				a->stripe,
235 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
236 			ret = -BCH_ERR_mark_stripe;
237 			goto err;
238 		}
239 
240 		if (bch2_trans_inconsistent_on(a->data_type != data_type, trans,
241 				"bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
242 				bucket.inode, bucket.offset, a->gen,
243 				bch2_data_type_str(a->data_type),
244 				bch2_data_type_str(data_type),
245 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
246 			ret = -BCH_ERR_mark_stripe;
247 			goto err;
248 		}
249 
250 		if (bch2_trans_inconsistent_on(parity &&
251 					       (a->dirty_sectors != -sectors ||
252 						a->cached_sectors), trans,
253 				"bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
254 				bucket.inode, bucket.offset, a->gen,
255 				a->dirty_sectors,
256 				a->cached_sectors,
257 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
258 			ret = -BCH_ERR_mark_stripe;
259 			goto err;
260 		}
261 	}
262 
263 	if (sectors) {
264 		ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
265 					     a->gen, a->data_type, &a->dirty_sectors);
266 		if (ret)
267 			goto err;
268 	}
269 
270 	if (!deleting) {
271 		a->stripe		= s.k->p.offset;
272 		a->stripe_redundancy	= s.v->nr_redundant;
273 		alloc_data_type_set(a, data_type);
274 	} else {
275 		a->stripe		= 0;
276 		a->stripe_redundancy	= 0;
277 		alloc_data_type_set(a, BCH_DATA_user);
278 	}
279 err:
280 	printbuf_exit(&buf);
281 	return ret;
282 }
283 
mark_stripe_bucket(struct btree_trans * trans,struct bkey_s_c_stripe s,unsigned ptr_idx,bool deleting,enum btree_iter_update_trigger_flags flags)284 static int mark_stripe_bucket(struct btree_trans *trans,
285 			      struct bkey_s_c_stripe s,
286 			      unsigned ptr_idx, bool deleting,
287 			      enum btree_iter_update_trigger_flags flags)
288 {
289 	struct bch_fs *c = trans->c;
290 	const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
291 	struct printbuf buf = PRINTBUF;
292 	int ret = 0;
293 
294 	struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
295 	if (unlikely(!ca)) {
296 		if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
297 			ret = -BCH_ERR_mark_stripe;
298 		goto err;
299 	}
300 
301 	struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
302 
303 	if (flags & BTREE_TRIGGER_transactional) {
304 		struct extent_ptr_decoded p = {
305 			.ptr = *ptr,
306 			.crc = bch2_extent_crc_unpack(s.k, NULL),
307 		};
308 		struct bkey_i_backpointer bp;
309 		bch2_extent_ptr_to_bp(c, BTREE_ID_stripes, 0, s.s_c, p,
310 				      (const union bch_extent_entry *) ptr, &bp);
311 
312 		struct bkey_i_alloc_v4 *a =
313 			bch2_trans_start_alloc_update(trans, bucket, 0);
314 		ret   = PTR_ERR_OR_ZERO(a) ?:
315 			__mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags) ?:
316 			bch2_bucket_backpointer_mod(trans, s.s_c, &bp,
317 						    !(flags & BTREE_TRIGGER_overwrite));
318 		if (ret)
319 			goto err;
320 	}
321 
322 	if (flags & BTREE_TRIGGER_gc) {
323 		struct bucket *g = gc_bucket(ca, bucket.offset);
324 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n%s",
325 					    ptr->dev,
326 					    (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
327 			ret = -BCH_ERR_mark_stripe;
328 			goto err;
329 		}
330 
331 		bucket_lock(g);
332 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
333 		ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
334 		alloc_to_bucket(g, new);
335 		bucket_unlock(g);
336 
337 		if (!ret)
338 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
339 	}
340 err:
341 	bch2_dev_put(ca);
342 	printbuf_exit(&buf);
343 	return ret;
344 }
345 
mark_stripe_buckets(struct btree_trans * trans,struct bkey_s_c old,struct bkey_s_c new,enum btree_iter_update_trigger_flags flags)346 static int mark_stripe_buckets(struct btree_trans *trans,
347 			       struct bkey_s_c old, struct bkey_s_c new,
348 			       enum btree_iter_update_trigger_flags flags)
349 {
350 	const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
351 		? bkey_s_c_to_stripe(old).v : NULL;
352 	const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
353 		? bkey_s_c_to_stripe(new).v : NULL;
354 
355 	BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks);
356 
357 	unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
358 
359 	for (unsigned i = 0; i < nr_blocks; i++) {
360 		if (new_s && old_s &&
361 		    !memcmp(&new_s->ptrs[i],
362 			    &old_s->ptrs[i],
363 			    sizeof(new_s->ptrs[i])))
364 			continue;
365 
366 		if (new_s) {
367 			int ret = mark_stripe_bucket(trans,
368 					bkey_s_c_to_stripe(new), i, false, flags);
369 			if (ret)
370 				return ret;
371 		}
372 
373 		if (old_s) {
374 			int ret = mark_stripe_bucket(trans,
375 					bkey_s_c_to_stripe(old), i, true, flags);
376 			if (ret)
377 				return ret;
378 		}
379 	}
380 
381 	return 0;
382 }
383 
bch2_trigger_stripe(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s _new,enum btree_iter_update_trigger_flags flags)384 int bch2_trigger_stripe(struct btree_trans *trans,
385 			enum btree_id btree, unsigned level,
386 			struct bkey_s_c old, struct bkey_s _new,
387 			enum btree_iter_update_trigger_flags flags)
388 {
389 	struct bkey_s_c new = _new.s_c;
390 	struct bch_fs *c = trans->c;
391 	u64 idx = new.k->p.offset;
392 	const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
393 		? bkey_s_c_to_stripe(old).v : NULL;
394 	const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
395 		? bkey_s_c_to_stripe(new).v : NULL;
396 
397 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
398 		return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags);
399 
400 	BUG_ON(new_s && old_s &&
401 	       (new_s->nr_blocks	!= old_s->nr_blocks ||
402 		new_s->nr_redundant	!= old_s->nr_redundant));
403 
404 	if (flags & BTREE_TRIGGER_transactional) {
405 		int ret = bch2_lru_change(trans,
406 					  BCH_LRU_STRIPE_FRAGMENTATION,
407 					  idx,
408 					  stripe_lru_pos(old_s),
409 					  stripe_lru_pos(new_s));
410 		if (ret)
411 			return ret;
412 	}
413 
414 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
415 		/*
416 		 * If the pointers aren't changing, we don't need to do anything:
417 		 */
418 		if (new_s && old_s &&
419 		    new_s->nr_blocks	== old_s->nr_blocks &&
420 		    new_s->nr_redundant	== old_s->nr_redundant &&
421 		    !memcmp(old_s->ptrs, new_s->ptrs,
422 			    new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
423 			return 0;
424 
425 		struct gc_stripe *gc = NULL;
426 		if (flags & BTREE_TRIGGER_gc) {
427 			gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
428 			if (!gc) {
429 				bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx);
430 				return -BCH_ERR_ENOMEM_mark_stripe;
431 			}
432 
433 			/*
434 			 * This will be wrong when we bring back runtime gc: we should
435 			 * be unmarking the old key and then marking the new key
436 			 *
437 			 * Also: when we bring back runtime gc, locking
438 			 */
439 			gc->alive	= true;
440 			gc->sectors	= le16_to_cpu(new_s->sectors);
441 			gc->nr_blocks	= new_s->nr_blocks;
442 			gc->nr_redundant	= new_s->nr_redundant;
443 
444 			for (unsigned i = 0; i < new_s->nr_blocks; i++)
445 				gc->ptrs[i] = new_s->ptrs[i];
446 
447 			/*
448 			 * gc recalculates this field from stripe ptr
449 			 * references:
450 			 */
451 			memset(gc->block_sectors, 0, sizeof(gc->block_sectors));
452 		}
453 
454 		if (new_s) {
455 			s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant;
456 
457 			struct disk_accounting_pos acc;
458 			memset(&acc, 0, sizeof(acc));
459 			acc.type = BCH_DISK_ACCOUNTING_replicas;
460 			bch2_bkey_to_replicas(&acc.replicas, new);
461 			int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
462 			if (ret)
463 				return ret;
464 
465 			if (gc)
466 				unsafe_memcpy(&gc->r.e, &acc.replicas,
467 					      replicas_entry_bytes(&acc.replicas), "VLA");
468 		}
469 
470 		if (old_s) {
471 			s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant;
472 
473 			struct disk_accounting_pos acc;
474 			memset(&acc, 0, sizeof(acc));
475 			acc.type = BCH_DISK_ACCOUNTING_replicas;
476 			bch2_bkey_to_replicas(&acc.replicas, old);
477 			int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
478 			if (ret)
479 				return ret;
480 		}
481 
482 		int ret = mark_stripe_buckets(trans, old, new, flags);
483 		if (ret)
484 			return ret;
485 	}
486 
487 	return 0;
488 }
489 
490 /* returns blocknr in stripe that we matched: */
bkey_matches_stripe(struct bch_stripe * s,struct bkey_s_c k,unsigned * block)491 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
492 						struct bkey_s_c k, unsigned *block)
493 {
494 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
495 	unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
496 
497 	bkey_for_each_ptr(ptrs, ptr)
498 		for (i = 0; i < nr_data; i++)
499 			if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
500 						      le16_to_cpu(s->sectors))) {
501 				*block = i;
502 				return ptr;
503 			}
504 
505 	return NULL;
506 }
507 
extent_has_stripe_ptr(struct bkey_s_c k,u64 idx)508 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
509 {
510 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
511 	const union bch_extent_entry *entry;
512 
513 	bkey_extent_entry_for_each(ptrs, entry)
514 		if (extent_entry_type(entry) ==
515 		    BCH_EXTENT_ENTRY_stripe_ptr &&
516 		    entry->stripe_ptr.idx == idx)
517 			return true;
518 
519 	return false;
520 }
521 
522 /* Stripe bufs: */
523 
ec_stripe_buf_exit(struct ec_stripe_buf * buf)524 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
525 {
526 	if (buf->key.k.type == KEY_TYPE_stripe) {
527 		struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
528 		unsigned i;
529 
530 		for (i = 0; i < s->v.nr_blocks; i++) {
531 			kvfree(buf->data[i]);
532 			buf->data[i] = NULL;
533 		}
534 	}
535 }
536 
537 /* XXX: this is a non-mempoolified memory allocation: */
ec_stripe_buf_init(struct ec_stripe_buf * buf,unsigned offset,unsigned size)538 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
539 			      unsigned offset, unsigned size)
540 {
541 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
542 	unsigned csum_granularity = 1U << v->csum_granularity_bits;
543 	unsigned end = offset + size;
544 	unsigned i;
545 
546 	BUG_ON(end > le16_to_cpu(v->sectors));
547 
548 	offset	= round_down(offset, csum_granularity);
549 	end	= min_t(unsigned, le16_to_cpu(v->sectors),
550 			round_up(end, csum_granularity));
551 
552 	buf->offset	= offset;
553 	buf->size	= end - offset;
554 
555 	memset(buf->valid, 0xFF, sizeof(buf->valid));
556 
557 	for (i = 0; i < v->nr_blocks; i++) {
558 		buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
559 		if (!buf->data[i])
560 			goto err;
561 	}
562 
563 	return 0;
564 err:
565 	ec_stripe_buf_exit(buf);
566 	return -BCH_ERR_ENOMEM_stripe_buf;
567 }
568 
569 /* Checksumming: */
570 
ec_block_checksum(struct ec_stripe_buf * buf,unsigned block,unsigned offset)571 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
572 					 unsigned block, unsigned offset)
573 {
574 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
575 	unsigned csum_granularity = 1 << v->csum_granularity_bits;
576 	unsigned end = buf->offset + buf->size;
577 	unsigned len = min(csum_granularity, end - offset);
578 
579 	BUG_ON(offset >= end);
580 	BUG_ON(offset <  buf->offset);
581 	BUG_ON(offset & (csum_granularity - 1));
582 	BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
583 	       (len & (csum_granularity - 1)));
584 
585 	return bch2_checksum(NULL, v->csum_type,
586 			     null_nonce(),
587 			     buf->data[block] + ((offset - buf->offset) << 9),
588 			     len << 9);
589 }
590 
ec_generate_checksums(struct ec_stripe_buf * buf)591 static void ec_generate_checksums(struct ec_stripe_buf *buf)
592 {
593 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
594 	unsigned i, j, csums_per_device = stripe_csums_per_device(v);
595 
596 	if (!v->csum_type)
597 		return;
598 
599 	BUG_ON(buf->offset);
600 	BUG_ON(buf->size != le16_to_cpu(v->sectors));
601 
602 	for (i = 0; i < v->nr_blocks; i++)
603 		for (j = 0; j < csums_per_device; j++)
604 			stripe_csum_set(v, i, j,
605 				ec_block_checksum(buf, i, j << v->csum_granularity_bits));
606 }
607 
ec_validate_checksums(struct bch_fs * c,struct ec_stripe_buf * buf)608 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
609 {
610 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
611 	unsigned csum_granularity = 1 << v->csum_granularity_bits;
612 	unsigned i;
613 
614 	if (!v->csum_type)
615 		return;
616 
617 	for (i = 0; i < v->nr_blocks; i++) {
618 		unsigned offset = buf->offset;
619 		unsigned end = buf->offset + buf->size;
620 
621 		if (!test_bit(i, buf->valid))
622 			continue;
623 
624 		while (offset < end) {
625 			unsigned j = offset >> v->csum_granularity_bits;
626 			unsigned len = min(csum_granularity, end - offset);
627 			struct bch_csum want = stripe_csum_get(v, i, j);
628 			struct bch_csum got = ec_block_checksum(buf, i, offset);
629 
630 			if (bch2_crc_cmp(want, got)) {
631 				struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev);
632 				if (ca) {
633 					struct printbuf err = PRINTBUF;
634 
635 					prt_str(&err, "stripe ");
636 					bch2_csum_err_msg(&err, v->csum_type, want, got);
637 					prt_printf(&err, "  for %ps at %u of\n  ", (void *) _RET_IP_, i);
638 					bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
639 					bch_err_ratelimited(ca, "%s", err.buf);
640 					printbuf_exit(&err);
641 
642 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
643 				}
644 
645 				clear_bit(i, buf->valid);
646 				break;
647 			}
648 
649 			offset += len;
650 		}
651 	}
652 }
653 
654 /* Erasure coding: */
655 
ec_generate_ec(struct ec_stripe_buf * buf)656 static void ec_generate_ec(struct ec_stripe_buf *buf)
657 {
658 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
659 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
660 	unsigned bytes = le16_to_cpu(v->sectors) << 9;
661 
662 	raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
663 }
664 
ec_nr_failed(struct ec_stripe_buf * buf)665 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
666 {
667 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
668 
669 	return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
670 }
671 
ec_do_recov(struct bch_fs * c,struct ec_stripe_buf * buf)672 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
673 {
674 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
675 	unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
676 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
677 	unsigned bytes = buf->size << 9;
678 
679 	if (ec_nr_failed(buf) > v->nr_redundant) {
680 		bch_err_ratelimited(c,
681 			"error doing reconstruct read: unable to read enough blocks");
682 		return -1;
683 	}
684 
685 	for (i = 0; i < nr_data; i++)
686 		if (!test_bit(i, buf->valid))
687 			failed[nr_failed++] = i;
688 
689 	raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
690 	return 0;
691 }
692 
693 /* IO: */
694 
ec_block_endio(struct bio * bio)695 static void ec_block_endio(struct bio *bio)
696 {
697 	struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
698 	struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
699 	struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
700 	struct bch_dev *ca = ec_bio->ca;
701 	struct closure *cl = bio->bi_private;
702 	int rw = ec_bio->rw;
703 
704 	bch2_account_io_completion(ca, bio_data_dir(bio),
705 				   ec_bio->submit_time, !bio->bi_status);
706 
707 	if (bio->bi_status) {
708 		bch_err_dev_ratelimited(ca, "erasure coding %s error: %s",
709 			       str_write_read(bio_data_dir(bio)),
710 			       bch2_blk_status_to_str(bio->bi_status));
711 		clear_bit(ec_bio->idx, ec_bio->buf->valid);
712 	}
713 
714 	int stale = dev_ptr_stale(ca, ptr);
715 	if (stale) {
716 		bch_err_ratelimited(ca->fs,
717 				    "error %s stripe: stale/invalid pointer (%i) after io",
718 				    bio_data_dir(bio) == READ ? "reading from" : "writing to",
719 				    stale);
720 		clear_bit(ec_bio->idx, ec_bio->buf->valid);
721 	}
722 
723 	bio_put(&ec_bio->bio);
724 	percpu_ref_put(&ca->io_ref[rw]);
725 	closure_put(cl);
726 }
727 
ec_block_io(struct bch_fs * c,struct ec_stripe_buf * buf,blk_opf_t opf,unsigned idx,struct closure * cl)728 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
729 			blk_opf_t opf, unsigned idx, struct closure *cl)
730 {
731 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
732 	unsigned offset = 0, bytes = buf->size << 9;
733 	struct bch_extent_ptr *ptr = &v->ptrs[idx];
734 	enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
735 		? BCH_DATA_user
736 		: BCH_DATA_parity;
737 	int rw = op_is_write(opf);
738 
739 	struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
740 	if (!ca) {
741 		clear_bit(idx, buf->valid);
742 		return;
743 	}
744 
745 	int stale = dev_ptr_stale(ca, ptr);
746 	if (stale) {
747 		bch_err_ratelimited(c,
748 				    "error %s stripe: stale pointer (%i)",
749 				    rw == READ ? "reading from" : "writing to",
750 				    stale);
751 		clear_bit(idx, buf->valid);
752 		return;
753 	}
754 
755 
756 	this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
757 
758 	while (offset < bytes) {
759 		unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
760 					   DIV_ROUND_UP(bytes, PAGE_SIZE));
761 		unsigned b = min_t(size_t, bytes - offset,
762 				   nr_iovecs << PAGE_SHIFT);
763 		struct ec_bio *ec_bio;
764 
765 		ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
766 						       nr_iovecs,
767 						       opf,
768 						       GFP_KERNEL,
769 						       &c->ec_bioset),
770 				      struct ec_bio, bio);
771 
772 		ec_bio->ca			= ca;
773 		ec_bio->buf			= buf;
774 		ec_bio->idx			= idx;
775 		ec_bio->rw			= rw;
776 		ec_bio->submit_time		= local_clock();
777 
778 		ec_bio->bio.bi_iter.bi_sector	= ptr->offset + buf->offset + (offset >> 9);
779 		ec_bio->bio.bi_end_io		= ec_block_endio;
780 		ec_bio->bio.bi_private		= cl;
781 
782 		bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
783 
784 		closure_get(cl);
785 		percpu_ref_get(&ca->io_ref[rw]);
786 
787 		submit_bio(&ec_bio->bio);
788 
789 		offset += b;
790 	}
791 
792 	percpu_ref_put(&ca->io_ref[rw]);
793 }
794 
get_stripe_key_trans(struct btree_trans * trans,u64 idx,struct ec_stripe_buf * stripe)795 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
796 				struct ec_stripe_buf *stripe)
797 {
798 	struct btree_iter iter;
799 	struct bkey_s_c k;
800 	int ret;
801 
802 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
803 			       POS(0, idx), BTREE_ITER_slots);
804 	ret = bkey_err(k);
805 	if (ret)
806 		goto err;
807 	if (k.k->type != KEY_TYPE_stripe) {
808 		ret = -ENOENT;
809 		goto err;
810 	}
811 	bkey_reassemble(&stripe->key, k);
812 err:
813 	bch2_trans_iter_exit(trans, &iter);
814 	return ret;
815 }
816 
817 /* recovery read path: */
bch2_ec_read_extent(struct btree_trans * trans,struct bch_read_bio * rbio,struct bkey_s_c orig_k)818 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
819 			struct bkey_s_c orig_k)
820 {
821 	struct bch_fs *c = trans->c;
822 	struct ec_stripe_buf *buf = NULL;
823 	struct closure cl;
824 	struct bch_stripe *v;
825 	unsigned i, offset;
826 	const char *msg = NULL;
827 	struct printbuf msgbuf = PRINTBUF;
828 	int ret = 0;
829 
830 	closure_init_stack(&cl);
831 
832 	BUG_ON(!rbio->pick.has_ec);
833 
834 	buf = kzalloc(sizeof(*buf), GFP_NOFS);
835 	if (!buf)
836 		return -BCH_ERR_ENOMEM_ec_read_extent;
837 
838 	ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
839 	if (ret) {
840 		msg = "stripe not found";
841 		goto err;
842 	}
843 
844 	v = &bkey_i_to_stripe(&buf->key)->v;
845 
846 	if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
847 		msg = "pointer doesn't match stripe";
848 		goto err;
849 	}
850 
851 	offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
852 	if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
853 		msg = "read is bigger than stripe";
854 		goto err;
855 	}
856 
857 	ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
858 	if (ret) {
859 		msg = "-ENOMEM";
860 		goto err;
861 	}
862 
863 	for (i = 0; i < v->nr_blocks; i++)
864 		ec_block_io(c, buf, REQ_OP_READ, i, &cl);
865 
866 	closure_sync(&cl);
867 
868 	if (ec_nr_failed(buf) > v->nr_redundant) {
869 		msg = "unable to read enough blocks";
870 		goto err;
871 	}
872 
873 	ec_validate_checksums(c, buf);
874 
875 	ret = ec_do_recov(c, buf);
876 	if (ret)
877 		goto err;
878 
879 	memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
880 		      buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
881 out:
882 	ec_stripe_buf_exit(buf);
883 	kfree(buf);
884 	return ret;
885 err:
886 	bch2_bkey_val_to_text(&msgbuf, c, orig_k);
887 	bch_err_ratelimited(c,
888 			    "error doing reconstruct read: %s\n  %s", msg, msgbuf.buf);
889 	printbuf_exit(&msgbuf);
890 	ret = -BCH_ERR_stripe_reconstruct;
891 	goto out;
892 }
893 
894 /* stripe bucket accounting: */
895 
__ec_stripe_mem_alloc(struct bch_fs * c,size_t idx,gfp_t gfp)896 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
897 {
898 	if (c->gc_pos.phase != GC_PHASE_not_running &&
899 	    !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
900 		return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
901 
902 	return 0;
903 }
904 
ec_stripe_mem_alloc(struct btree_trans * trans,struct btree_iter * iter)905 static int ec_stripe_mem_alloc(struct btree_trans *trans,
906 			       struct btree_iter *iter)
907 {
908 	return allocate_dropping_locks_errcode(trans,
909 			__ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
910 }
911 
912 /*
913  * Hash table of open stripes:
914  * Stripes that are being created or modified are kept in a hash table, so that
915  * stripe deletion can skip them.
916  */
917 
__bch2_stripe_is_open(struct bch_fs * c,u64 idx)918 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
919 {
920 	unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
921 	struct ec_stripe_new *s;
922 
923 	hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
924 		if (s->idx == idx)
925 			return true;
926 	return false;
927 }
928 
bch2_stripe_is_open(struct bch_fs * c,u64 idx)929 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
930 {
931 	bool ret = false;
932 
933 	spin_lock(&c->ec_stripes_new_lock);
934 	ret = __bch2_stripe_is_open(c, idx);
935 	spin_unlock(&c->ec_stripes_new_lock);
936 
937 	return ret;
938 }
939 
bch2_try_open_stripe(struct bch_fs * c,struct ec_stripe_new * s,u64 idx)940 static bool bch2_try_open_stripe(struct bch_fs *c,
941 				 struct ec_stripe_new *s,
942 				 u64 idx)
943 {
944 	bool ret;
945 
946 	spin_lock(&c->ec_stripes_new_lock);
947 	ret = !__bch2_stripe_is_open(c, idx);
948 	if (ret) {
949 		unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
950 
951 		s->idx = idx;
952 		hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
953 	}
954 	spin_unlock(&c->ec_stripes_new_lock);
955 
956 	return ret;
957 }
958 
bch2_stripe_close(struct bch_fs * c,struct ec_stripe_new * s)959 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
960 {
961 	BUG_ON(!s->idx);
962 
963 	spin_lock(&c->ec_stripes_new_lock);
964 	hlist_del_init(&s->hash);
965 	spin_unlock(&c->ec_stripes_new_lock);
966 
967 	s->idx = 0;
968 }
969 
970 /* stripe deletion */
971 
ec_stripe_delete(struct btree_trans * trans,u64 idx)972 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
973 {
974 	struct btree_iter iter;
975 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
976 					       BTREE_ID_stripes, POS(0, idx),
977 					       BTREE_ITER_intent);
978 	int ret = bkey_err(k);
979 	if (ret)
980 		goto err;
981 
982 	/*
983 	 * We expect write buffer races here
984 	 * Important: check stripe_is_open with stripe key locked:
985 	 */
986 	if (k.k->type == KEY_TYPE_stripe &&
987 	    !bch2_stripe_is_open(trans->c, idx) &&
988 	    stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1)
989 		ret = bch2_btree_delete_at(trans, &iter, 0);
990 err:
991 	bch2_trans_iter_exit(trans, &iter);
992 	return ret;
993 }
994 
995 /*
996  * XXX
997  * can we kill this and delete stripes from the trigger?
998  */
ec_stripe_delete_work(struct work_struct * work)999 static void ec_stripe_delete_work(struct work_struct *work)
1000 {
1001 	struct bch_fs *c =
1002 		container_of(work, struct bch_fs, ec_stripe_delete_work);
1003 
1004 	bch2_trans_run(c,
1005 		bch2_btree_write_buffer_tryflush(trans) ?:
1006 		for_each_btree_key_max_commit(trans, lru_iter, BTREE_ID_lru,
1007 				lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, 0),
1008 				lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, LRU_TIME_MAX),
1009 				0, lru_k,
1010 				NULL, NULL,
1011 				BCH_TRANS_COMMIT_no_enospc, ({
1012 			ec_stripe_delete(trans, lru_k.k->p.offset);
1013 		})));
1014 	bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1015 }
1016 
bch2_do_stripe_deletes(struct bch_fs * c)1017 void bch2_do_stripe_deletes(struct bch_fs *c)
1018 {
1019 	if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
1020 	    !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
1021 		bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1022 }
1023 
1024 /* stripe creation: */
1025 
ec_stripe_key_update(struct btree_trans * trans,struct bkey_i_stripe * old,struct bkey_i_stripe * new)1026 static int ec_stripe_key_update(struct btree_trans *trans,
1027 				struct bkey_i_stripe *old,
1028 				struct bkey_i_stripe *new)
1029 {
1030 	struct bch_fs *c = trans->c;
1031 	bool create = !old;
1032 
1033 	struct btree_iter iter;
1034 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
1035 					       new->k.p, BTREE_ITER_intent);
1036 	int ret = bkey_err(k);
1037 	if (ret)
1038 		goto err;
1039 
1040 	if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe),
1041 				    c, "error %s stripe: got existing key type %s",
1042 				    create ? "creating" : "updating",
1043 				    bch2_bkey_types[k.k->type])) {
1044 		ret = -EINVAL;
1045 		goto err;
1046 	}
1047 
1048 	if (k.k->type == KEY_TYPE_stripe) {
1049 		const struct bch_stripe *v = bkey_s_c_to_stripe(k).v;
1050 
1051 		BUG_ON(old->v.nr_blocks != new->v.nr_blocks);
1052 		BUG_ON(old->v.nr_blocks != v->nr_blocks);
1053 
1054 		for (unsigned i = 0; i < new->v.nr_blocks; i++) {
1055 			unsigned sectors = stripe_blockcount_get(v, i);
1056 
1057 			if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) {
1058 				struct printbuf buf = PRINTBUF;
1059 
1060 				prt_printf(&buf, "stripe changed nonempty block %u", i);
1061 				prt_str(&buf, "\nold: ");
1062 				bch2_bkey_val_to_text(&buf, c, k);
1063 				prt_str(&buf, "\nnew: ");
1064 				bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i));
1065 				bch2_fs_inconsistent(c, "%s", buf.buf);
1066 				printbuf_exit(&buf);
1067 				ret = -EINVAL;
1068 				goto err;
1069 			}
1070 
1071 			/*
1072 			 * If the stripe ptr changed underneath us, it must have
1073 			 * been dev_remove_stripes() -> * invalidate_stripe_to_dev()
1074 			 */
1075 			if (!bch2_extent_ptr_eq(old->v.ptrs[i], v->ptrs[i])) {
1076 				BUG_ON(v->ptrs[i].dev != BCH_SB_MEMBER_INVALID);
1077 
1078 				if (bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]))
1079 					new->v.ptrs[i].dev = BCH_SB_MEMBER_INVALID;
1080 			}
1081 
1082 			stripe_blockcount_set(&new->v, i, sectors);
1083 		}
1084 	}
1085 
1086 	ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
1087 err:
1088 	bch2_trans_iter_exit(trans, &iter);
1089 	return ret;
1090 }
1091 
ec_stripe_update_extent(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket,u8 gen,struct ec_stripe_buf * s,struct bkey_s_c_backpointer bp,struct bkey_buf * last_flushed)1092 static int ec_stripe_update_extent(struct btree_trans *trans,
1093 				   struct bch_dev *ca,
1094 				   struct bpos bucket, u8 gen,
1095 				   struct ec_stripe_buf *s,
1096 				   struct bkey_s_c_backpointer bp,
1097 				   struct bkey_buf *last_flushed)
1098 {
1099 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1100 	struct bch_fs *c = trans->c;
1101 	struct btree_iter iter;
1102 	struct bkey_s_c k;
1103 	const struct bch_extent_ptr *ptr_c;
1104 	struct bch_extent_ptr *ec_ptr = NULL;
1105 	struct bch_extent_stripe_ptr stripe_ptr;
1106 	struct bkey_i *n;
1107 	int ret, dev, block;
1108 
1109 	if (bp.v->level) {
1110 		struct printbuf buf = PRINTBUF;
1111 		struct btree_iter node_iter;
1112 		struct btree *b;
1113 
1114 		b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed);
1115 		bch2_trans_iter_exit(trans, &node_iter);
1116 
1117 		if (!b)
1118 			return 0;
1119 
1120 		prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
1121 		bch2_bkey_val_to_text(&buf, c, bp.s_c);
1122 
1123 		bch2_fs_inconsistent(c, "%s", buf.buf);
1124 		printbuf_exit(&buf);
1125 		return -BCH_ERR_erasure_coding_found_btree_node;
1126 	}
1127 
1128 	k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed);
1129 	ret = bkey_err(k);
1130 	if (ret)
1131 		return ret;
1132 	if (!k.k) {
1133 		/*
1134 		 * extent no longer exists - we could flush the btree
1135 		 * write buffer and retry to verify, but no need:
1136 		 */
1137 		return 0;
1138 	}
1139 
1140 	if (extent_has_stripe_ptr(k, s->key.k.p.offset))
1141 		goto out;
1142 
1143 	ptr_c = bkey_matches_stripe(v, k, &block);
1144 	/*
1145 	 * It doesn't generally make sense to erasure code cached ptrs:
1146 	 * XXX: should we be incrementing a counter?
1147 	 */
1148 	if (!ptr_c || ptr_c->cached)
1149 		goto out;
1150 
1151 	dev = v->ptrs[block].dev;
1152 
1153 	n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
1154 	ret = PTR_ERR_OR_ZERO(n);
1155 	if (ret)
1156 		goto out;
1157 
1158 	bkey_reassemble(n, k);
1159 
1160 	bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev);
1161 	ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
1162 	BUG_ON(!ec_ptr);
1163 
1164 	stripe_ptr = (struct bch_extent_stripe_ptr) {
1165 		.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
1166 		.block		= block,
1167 		.redundancy	= v->nr_redundant,
1168 		.idx		= s->key.k.p.offset,
1169 	};
1170 
1171 	__extent_entry_insert(n,
1172 			(union bch_extent_entry *) ec_ptr,
1173 			(union bch_extent_entry *) &stripe_ptr);
1174 
1175 	ret = bch2_trans_update(trans, &iter, n, 0);
1176 out:
1177 	bch2_trans_iter_exit(trans, &iter);
1178 	return ret;
1179 }
1180 
ec_stripe_update_bucket(struct btree_trans * trans,struct ec_stripe_buf * s,unsigned block)1181 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
1182 				   unsigned block)
1183 {
1184 	struct bch_fs *c = trans->c;
1185 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1186 	struct bch_extent_ptr ptr = v->ptrs[block];
1187 	int ret = 0;
1188 
1189 	struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
1190 	if (!ca)
1191 		return -BCH_ERR_ENOENT_dev_not_found;
1192 
1193 	struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
1194 
1195 	struct bkey_buf last_flushed;
1196 	bch2_bkey_buf_init(&last_flushed);
1197 	bkey_init(&last_flushed.k->k);
1198 
1199 	ret = for_each_btree_key_max_commit(trans, bp_iter, BTREE_ID_backpointers,
1200 			bucket_pos_to_bp_start(ca, bucket_pos),
1201 			bucket_pos_to_bp_end(ca, bucket_pos), 0, bp_k,
1202 			NULL, NULL,
1203 			BCH_TRANS_COMMIT_no_check_rw|
1204 			BCH_TRANS_COMMIT_no_enospc, ({
1205 		if (bkey_ge(bp_k.k->p, bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket_pos), 0)))
1206 			break;
1207 
1208 		if (bp_k.k->type != KEY_TYPE_backpointer)
1209 			continue;
1210 
1211 		struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
1212 		if (bp.v->btree_id == BTREE_ID_stripes)
1213 			continue;
1214 
1215 		ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s,
1216 					bp, &last_flushed);
1217 	}));
1218 
1219 	bch2_bkey_buf_exit(&last_flushed, c);
1220 	bch2_dev_put(ca);
1221 	return ret;
1222 }
1223 
ec_stripe_update_extents(struct bch_fs * c,struct ec_stripe_buf * s)1224 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1225 {
1226 	struct btree_trans *trans = bch2_trans_get(c);
1227 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1228 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
1229 
1230 	int ret = bch2_btree_write_buffer_flush_sync(trans);
1231 	if (ret)
1232 		goto err;
1233 
1234 	for (unsigned i = 0; i < nr_data; i++) {
1235 		ret = ec_stripe_update_bucket(trans, s, i);
1236 		if (ret)
1237 			break;
1238 	}
1239 err:
1240 	bch2_trans_put(trans);
1241 	return ret;
1242 }
1243 
zero_out_rest_of_ec_bucket(struct bch_fs * c,struct ec_stripe_new * s,unsigned block,struct open_bucket * ob)1244 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1245 				       struct ec_stripe_new *s,
1246 				       unsigned block,
1247 				       struct open_bucket *ob)
1248 {
1249 	struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
1250 	if (!ca) {
1251 		s->err = -BCH_ERR_erofs_no_writes;
1252 		return;
1253 	}
1254 
1255 	unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1256 	memset(s->new_stripe.data[block] + (offset << 9),
1257 	       0,
1258 	       ob->sectors_free << 9);
1259 
1260 	int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1261 			ob->bucket * ca->mi.bucket_size + offset,
1262 			ob->sectors_free,
1263 			GFP_KERNEL, 0);
1264 
1265 	percpu_ref_put(&ca->io_ref[WRITE]);
1266 
1267 	if (ret)
1268 		s->err = ret;
1269 }
1270 
bch2_ec_stripe_new_free(struct bch_fs * c,struct ec_stripe_new * s)1271 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1272 {
1273 	if (s->idx)
1274 		bch2_stripe_close(c, s);
1275 	kfree(s);
1276 }
1277 
1278 /*
1279  * data buckets of new stripe all written: create the stripe
1280  */
ec_stripe_create(struct ec_stripe_new * s)1281 static void ec_stripe_create(struct ec_stripe_new *s)
1282 {
1283 	struct bch_fs *c = s->c;
1284 	struct open_bucket *ob;
1285 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1286 	unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1287 	int ret;
1288 
1289 	BUG_ON(s->h->s == s);
1290 
1291 	closure_sync(&s->iodone);
1292 
1293 	if (!s->err) {
1294 		for (i = 0; i < nr_data; i++)
1295 			if (s->blocks[i]) {
1296 				ob = c->open_buckets + s->blocks[i];
1297 
1298 				if (ob->sectors_free)
1299 					zero_out_rest_of_ec_bucket(c, s, i, ob);
1300 			}
1301 	}
1302 
1303 	if (s->err) {
1304 		if (!bch2_err_matches(s->err, EROFS))
1305 			bch_err(c, "error creating stripe: error writing data buckets");
1306 		ret = s->err;
1307 		goto err;
1308 	}
1309 
1310 	if (s->have_existing_stripe) {
1311 		ec_validate_checksums(c, &s->existing_stripe);
1312 
1313 		if (ec_do_recov(c, &s->existing_stripe)) {
1314 			bch_err(c, "error creating stripe: error reading existing stripe");
1315 			ret = -BCH_ERR_ec_block_read;
1316 			goto err;
1317 		}
1318 
1319 		for (i = 0; i < nr_data; i++)
1320 			if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
1321 				swap(s->new_stripe.data[i],
1322 				     s->existing_stripe.data[i]);
1323 
1324 		ec_stripe_buf_exit(&s->existing_stripe);
1325 	}
1326 
1327 	BUG_ON(!s->allocated);
1328 	BUG_ON(!s->idx);
1329 
1330 	ec_generate_ec(&s->new_stripe);
1331 
1332 	ec_generate_checksums(&s->new_stripe);
1333 
1334 	/* write p/q: */
1335 	for (i = nr_data; i < v->nr_blocks; i++)
1336 		ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1337 	closure_sync(&s->iodone);
1338 
1339 	if (ec_nr_failed(&s->new_stripe)) {
1340 		bch_err(c, "error creating stripe: error writing redundancy buckets");
1341 		ret = -BCH_ERR_ec_block_write;
1342 		goto err;
1343 	}
1344 
1345 	ret = bch2_trans_commit_do(c, &s->res, NULL,
1346 		BCH_TRANS_COMMIT_no_check_rw|
1347 		BCH_TRANS_COMMIT_no_enospc,
1348 		ec_stripe_key_update(trans,
1349 				     s->have_existing_stripe
1350 				     ? bkey_i_to_stripe(&s->existing_stripe.key)
1351 				     : NULL,
1352 				     bkey_i_to_stripe(&s->new_stripe.key)));
1353 	bch_err_msg(c, ret, "creating stripe key");
1354 	if (ret) {
1355 		goto err;
1356 	}
1357 
1358 	ret = ec_stripe_update_extents(c, &s->new_stripe);
1359 	bch_err_msg(c, ret, "error updating extents");
1360 	if (ret)
1361 		goto err;
1362 err:
1363 	trace_stripe_create(c, s->idx, ret);
1364 
1365 	bch2_disk_reservation_put(c, &s->res);
1366 
1367 	for (i = 0; i < v->nr_blocks; i++)
1368 		if (s->blocks[i]) {
1369 			ob = c->open_buckets + s->blocks[i];
1370 
1371 			if (i < nr_data) {
1372 				ob->ec = NULL;
1373 				__bch2_open_bucket_put(c, ob);
1374 			} else {
1375 				bch2_open_bucket_put(c, ob);
1376 			}
1377 		}
1378 
1379 	mutex_lock(&c->ec_stripe_new_lock);
1380 	list_del(&s->list);
1381 	mutex_unlock(&c->ec_stripe_new_lock);
1382 	wake_up(&c->ec_stripe_new_wait);
1383 
1384 	ec_stripe_buf_exit(&s->existing_stripe);
1385 	ec_stripe_buf_exit(&s->new_stripe);
1386 	closure_debug_destroy(&s->iodone);
1387 
1388 	ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1389 }
1390 
get_pending_stripe(struct bch_fs * c)1391 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1392 {
1393 	struct ec_stripe_new *s;
1394 
1395 	mutex_lock(&c->ec_stripe_new_lock);
1396 	list_for_each_entry(s, &c->ec_stripe_new_list, list)
1397 		if (!atomic_read(&s->ref[STRIPE_REF_io]))
1398 			goto out;
1399 	s = NULL;
1400 out:
1401 	mutex_unlock(&c->ec_stripe_new_lock);
1402 
1403 	return s;
1404 }
1405 
ec_stripe_create_work(struct work_struct * work)1406 static void ec_stripe_create_work(struct work_struct *work)
1407 {
1408 	struct bch_fs *c = container_of(work,
1409 		struct bch_fs, ec_stripe_create_work);
1410 	struct ec_stripe_new *s;
1411 
1412 	while ((s = get_pending_stripe(c)))
1413 		ec_stripe_create(s);
1414 
1415 	bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1416 }
1417 
bch2_ec_do_stripe_creates(struct bch_fs * c)1418 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1419 {
1420 	bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1421 
1422 	if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1423 		bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1424 }
1425 
ec_stripe_new_set_pending(struct bch_fs * c,struct ec_stripe_head * h)1426 static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1427 {
1428 	struct ec_stripe_new *s = h->s;
1429 
1430 	lockdep_assert_held(&h->lock);
1431 
1432 	BUG_ON(!s->allocated && !s->err);
1433 
1434 	h->s		= NULL;
1435 	s->pending	= true;
1436 
1437 	mutex_lock(&c->ec_stripe_new_lock);
1438 	list_add(&s->list, &c->ec_stripe_new_list);
1439 	mutex_unlock(&c->ec_stripe_new_lock);
1440 
1441 	ec_stripe_new_put(c, s, STRIPE_REF_io);
1442 }
1443 
ec_stripe_new_cancel(struct bch_fs * c,struct ec_stripe_head * h,int err)1444 static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err)
1445 {
1446 	h->s->err = err;
1447 	ec_stripe_new_set_pending(c, h);
1448 }
1449 
bch2_ec_bucket_cancel(struct bch_fs * c,struct open_bucket * ob,int err)1450 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob, int err)
1451 {
1452 	struct ec_stripe_new *s = ob->ec;
1453 
1454 	s->err = err;
1455 }
1456 
bch2_writepoint_ec_buf(struct bch_fs * c,struct write_point * wp)1457 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1458 {
1459 	struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1460 	if (!ob)
1461 		return NULL;
1462 
1463 	BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1464 
1465 	struct bch_dev *ca	= ob_dev(c, ob);
1466 	unsigned offset		= ca->mi.bucket_size - ob->sectors_free;
1467 
1468 	return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1469 }
1470 
unsigned_cmp(const void * _l,const void * _r)1471 static int unsigned_cmp(const void *_l, const void *_r)
1472 {
1473 	unsigned l = *((const unsigned *) _l);
1474 	unsigned r = *((const unsigned *) _r);
1475 
1476 	return cmp_int(l, r);
1477 }
1478 
1479 /* pick most common bucket size: */
pick_blocksize(struct bch_fs * c,struct bch_devs_mask * devs)1480 static unsigned pick_blocksize(struct bch_fs *c,
1481 			       struct bch_devs_mask *devs)
1482 {
1483 	unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1484 	struct {
1485 		unsigned nr, size;
1486 	} cur = { 0, 0 }, best = { 0, 0 };
1487 
1488 	for_each_member_device_rcu(c, ca, devs)
1489 		sizes[nr++] = ca->mi.bucket_size;
1490 
1491 	sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1492 
1493 	for (unsigned i = 0; i < nr; i++) {
1494 		if (sizes[i] != cur.size) {
1495 			if (cur.nr > best.nr)
1496 				best = cur;
1497 
1498 			cur.nr = 0;
1499 			cur.size = sizes[i];
1500 		}
1501 
1502 		cur.nr++;
1503 	}
1504 
1505 	if (cur.nr > best.nr)
1506 		best = cur;
1507 
1508 	return best.size;
1509 }
1510 
may_create_new_stripe(struct bch_fs * c)1511 static bool may_create_new_stripe(struct bch_fs *c)
1512 {
1513 	return false;
1514 }
1515 
ec_stripe_key_init(struct bch_fs * c,struct bkey_i * k,unsigned nr_data,unsigned nr_parity,unsigned stripe_size,unsigned disk_label)1516 static void ec_stripe_key_init(struct bch_fs *c,
1517 			       struct bkey_i *k,
1518 			       unsigned nr_data,
1519 			       unsigned nr_parity,
1520 			       unsigned stripe_size,
1521 			       unsigned disk_label)
1522 {
1523 	struct bkey_i_stripe *s = bkey_stripe_init(k);
1524 	unsigned u64s;
1525 
1526 	s->v.sectors			= cpu_to_le16(stripe_size);
1527 	s->v.algorithm			= 0;
1528 	s->v.nr_blocks			= nr_data + nr_parity;
1529 	s->v.nr_redundant		= nr_parity;
1530 	s->v.csum_granularity_bits	= ilog2(c->opts.encoded_extent_max >> 9);
1531 	s->v.csum_type			= BCH_CSUM_crc32c;
1532 	s->v.disk_label			= disk_label;
1533 
1534 	while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1535 		BUG_ON(1 << s->v.csum_granularity_bits >=
1536 		       le16_to_cpu(s->v.sectors) ||
1537 		       s->v.csum_granularity_bits == U8_MAX);
1538 		s->v.csum_granularity_bits++;
1539 	}
1540 
1541 	set_bkey_val_u64s(&s->k, u64s);
1542 }
1543 
ec_new_stripe_alloc(struct bch_fs * c,struct ec_stripe_head * h)1544 static struct ec_stripe_new *ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1545 {
1546 	struct ec_stripe_new *s;
1547 
1548 	lockdep_assert_held(&h->lock);
1549 
1550 	s = kzalloc(sizeof(*s), GFP_KERNEL);
1551 	if (!s)
1552 		return NULL;
1553 
1554 	mutex_init(&s->lock);
1555 	closure_init(&s->iodone, NULL);
1556 	atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1557 	atomic_set(&s->ref[STRIPE_REF_io], 1);
1558 	s->c		= c;
1559 	s->h		= h;
1560 	s->nr_data	= min_t(unsigned, h->nr_active_devs,
1561 				BCH_BKEY_PTRS_MAX) - h->redundancy;
1562 	s->nr_parity	= h->redundancy;
1563 
1564 	ec_stripe_key_init(c, &s->new_stripe.key,
1565 			   s->nr_data, s->nr_parity,
1566 			   h->blocksize, h->disk_label);
1567 	return s;
1568 }
1569 
ec_stripe_head_devs_update(struct bch_fs * c,struct ec_stripe_head * h)1570 static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
1571 {
1572 	struct bch_devs_mask devs = h->devs;
1573 
1574 	rcu_read_lock();
1575 	h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
1576 				 ? group_to_target(h->disk_label - 1)
1577 				 : 0);
1578 	unsigned nr_devs = dev_mask_nr(&h->devs);
1579 
1580 	for_each_member_device_rcu(c, ca, &h->devs)
1581 		if (!ca->mi.durability)
1582 			__clear_bit(ca->dev_idx, h->devs.d);
1583 	unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
1584 
1585 	h->blocksize = pick_blocksize(c, &h->devs);
1586 
1587 	h->nr_active_devs = 0;
1588 	for_each_member_device_rcu(c, ca, &h->devs)
1589 		if (ca->mi.bucket_size == h->blocksize)
1590 			h->nr_active_devs++;
1591 
1592 	rcu_read_unlock();
1593 
1594 	/*
1595 	 * If we only have redundancy + 1 devices, we're better off with just
1596 	 * replication:
1597 	 */
1598 	h->insufficient_devs = h->nr_active_devs < h->redundancy + 2;
1599 
1600 	if (h->insufficient_devs) {
1601 		const char *err;
1602 
1603 		if (nr_devs < h->redundancy + 2)
1604 			err = NULL;
1605 		else if (nr_devs_with_durability < h->redundancy + 2)
1606 			err = "cannot use durability=0 devices";
1607 		else
1608 			err = "mismatched bucket sizes";
1609 
1610 		if (err)
1611 			bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s",
1612 				h->nr_active_devs, h->redundancy + 2, err);
1613 	}
1614 
1615 	struct bch_devs_mask devs_leaving;
1616 	bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX);
1617 
1618 	if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving))
1619 		ec_stripe_new_cancel(c, h, -EINTR);
1620 
1621 	h->rw_devs_change_count = c->rw_devs_change_count;
1622 }
1623 
1624 static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs * c,unsigned disk_label,unsigned algo,unsigned redundancy,enum bch_watermark watermark)1625 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
1626 			 unsigned algo, unsigned redundancy,
1627 			 enum bch_watermark watermark)
1628 {
1629 	struct ec_stripe_head *h;
1630 
1631 	h = kzalloc(sizeof(*h), GFP_KERNEL);
1632 	if (!h)
1633 		return NULL;
1634 
1635 	mutex_init(&h->lock);
1636 	BUG_ON(!mutex_trylock(&h->lock));
1637 
1638 	h->disk_label	= disk_label;
1639 	h->algo		= algo;
1640 	h->redundancy	= redundancy;
1641 	h->watermark	= watermark;
1642 
1643 	list_add(&h->list, &c->ec_stripe_head_list);
1644 	return h;
1645 }
1646 
bch2_ec_stripe_head_put(struct bch_fs * c,struct ec_stripe_head * h)1647 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1648 {
1649 	if (h->s &&
1650 	    h->s->allocated &&
1651 	    bitmap_weight(h->s->blocks_allocated,
1652 			  h->s->nr_data) == h->s->nr_data)
1653 		ec_stripe_new_set_pending(c, h);
1654 
1655 	mutex_unlock(&h->lock);
1656 }
1657 
1658 static struct ec_stripe_head *
__bch2_ec_stripe_head_get(struct btree_trans * trans,unsigned disk_label,unsigned algo,unsigned redundancy,enum bch_watermark watermark)1659 __bch2_ec_stripe_head_get(struct btree_trans *trans,
1660 			  unsigned disk_label,
1661 			  unsigned algo,
1662 			  unsigned redundancy,
1663 			  enum bch_watermark watermark)
1664 {
1665 	struct bch_fs *c = trans->c;
1666 	struct ec_stripe_head *h;
1667 	int ret;
1668 
1669 	if (!redundancy)
1670 		return NULL;
1671 
1672 	ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1673 	if (ret)
1674 		return ERR_PTR(ret);
1675 
1676 	if (test_bit(BCH_FS_going_ro, &c->flags)) {
1677 		h = ERR_PTR(-BCH_ERR_erofs_no_writes);
1678 		goto err;
1679 	}
1680 
1681 	list_for_each_entry(h, &c->ec_stripe_head_list, list)
1682 		if (h->disk_label	== disk_label &&
1683 		    h->algo		== algo &&
1684 		    h->redundancy	== redundancy &&
1685 		    h->watermark	== watermark) {
1686 			ret = bch2_trans_mutex_lock(trans, &h->lock);
1687 			if (ret) {
1688 				h = ERR_PTR(ret);
1689 				goto err;
1690 			}
1691 			goto found;
1692 		}
1693 
1694 	h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark);
1695 	if (!h) {
1696 		h = ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc);
1697 		goto err;
1698 	}
1699 found:
1700 	if (h->rw_devs_change_count != c->rw_devs_change_count)
1701 		ec_stripe_head_devs_update(c, h);
1702 
1703 	if (h->insufficient_devs) {
1704 		mutex_unlock(&h->lock);
1705 		h = NULL;
1706 	}
1707 err:
1708 	mutex_unlock(&c->ec_stripe_head_lock);
1709 	return h;
1710 }
1711 
new_stripe_alloc_buckets(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s,enum bch_watermark watermark,struct closure * cl)1712 static int new_stripe_alloc_buckets(struct btree_trans *trans,
1713 				    struct ec_stripe_head *h, struct ec_stripe_new *s,
1714 				    enum bch_watermark watermark, struct closure *cl)
1715 {
1716 	struct bch_fs *c = trans->c;
1717 	struct bch_devs_mask devs = h->devs;
1718 	struct open_bucket *ob;
1719 	struct open_buckets buckets;
1720 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1721 	unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1722 	bool have_cache = true;
1723 	int ret = 0;
1724 
1725 	BUG_ON(v->nr_blocks	!= s->nr_data + s->nr_parity);
1726 	BUG_ON(v->nr_redundant	!= s->nr_parity);
1727 
1728 	/* * We bypass the sector allocator which normally does this: */
1729 	bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
1730 
1731 	for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) {
1732 		/*
1733 		 * Note: we don't yet repair invalid blocks (failed/removed
1734 		 * devices) when reusing stripes - we still need a codepath to
1735 		 * walk backpointers and update all extents that point to that
1736 		 * block when updating the stripe
1737 		 */
1738 		if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID)
1739 			__clear_bit(v->ptrs[i].dev, devs.d);
1740 
1741 		if (i < s->nr_data)
1742 			nr_have_data++;
1743 		else
1744 			nr_have_parity++;
1745 	}
1746 
1747 	BUG_ON(nr_have_data	> s->nr_data);
1748 	BUG_ON(nr_have_parity	> s->nr_parity);
1749 
1750 	buckets.nr = 0;
1751 	if (nr_have_parity < s->nr_parity) {
1752 		ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1753 					    &h->parity_stripe,
1754 					    &devs,
1755 					    s->nr_parity,
1756 					    &nr_have_parity,
1757 					    &have_cache, 0,
1758 					    BCH_DATA_parity,
1759 					    watermark,
1760 					    cl);
1761 
1762 		open_bucket_for_each(c, &buckets, ob, i) {
1763 			j = find_next_zero_bit(s->blocks_gotten,
1764 					       s->nr_data + s->nr_parity,
1765 					       s->nr_data);
1766 			BUG_ON(j >= s->nr_data + s->nr_parity);
1767 
1768 			s->blocks[j] = buckets.v[i];
1769 			v->ptrs[j] = bch2_ob_ptr(c, ob);
1770 			__set_bit(j, s->blocks_gotten);
1771 		}
1772 
1773 		if (ret)
1774 			return ret;
1775 	}
1776 
1777 	buckets.nr = 0;
1778 	if (nr_have_data < s->nr_data) {
1779 		ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1780 					    &h->block_stripe,
1781 					    &devs,
1782 					    s->nr_data,
1783 					    &nr_have_data,
1784 					    &have_cache, 0,
1785 					    BCH_DATA_user,
1786 					    watermark,
1787 					    cl);
1788 
1789 		open_bucket_for_each(c, &buckets, ob, i) {
1790 			j = find_next_zero_bit(s->blocks_gotten,
1791 					       s->nr_data, 0);
1792 			BUG_ON(j >= s->nr_data);
1793 
1794 			s->blocks[j] = buckets.v[i];
1795 			v->ptrs[j] = bch2_ob_ptr(c, ob);
1796 			__set_bit(j, s->blocks_gotten);
1797 		}
1798 
1799 		if (ret)
1800 			return ret;
1801 	}
1802 
1803 	return 0;
1804 }
1805 
__get_existing_stripe(struct btree_trans * trans,struct ec_stripe_head * head,struct ec_stripe_buf * stripe,u64 idx)1806 static int __get_existing_stripe(struct btree_trans *trans,
1807 				 struct ec_stripe_head *head,
1808 				 struct ec_stripe_buf *stripe,
1809 				 u64 idx)
1810 {
1811 	struct bch_fs *c = trans->c;
1812 
1813 	struct btree_iter iter;
1814 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
1815 					  BTREE_ID_stripes, POS(0, idx), 0);
1816 	int ret = bkey_err(k);
1817 	if (ret)
1818 		goto err;
1819 
1820 	/* We expect write buffer races here */
1821 	if (k.k->type != KEY_TYPE_stripe)
1822 		goto out;
1823 
1824 	struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1825 	if (stripe_lru_pos(s.v) <= 1)
1826 		goto out;
1827 
1828 	if (s.v->disk_label		== head->disk_label &&
1829 	    s.v->algorithm		== head->algo &&
1830 	    s.v->nr_redundant		== head->redundancy &&
1831 	    le16_to_cpu(s.v->sectors)	== head->blocksize &&
1832 	    bch2_try_open_stripe(c, head->s, idx)) {
1833 		bkey_reassemble(&stripe->key, k);
1834 		ret = 1;
1835 	}
1836 out:
1837 	bch2_set_btree_iter_dontneed(trans, &iter);
1838 err:
1839 	bch2_trans_iter_exit(trans, &iter);
1840 	return ret;
1841 }
1842 
init_new_stripe_from_existing(struct bch_fs * c,struct ec_stripe_new * s)1843 static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s)
1844 {
1845 	struct bch_stripe *new_v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1846 	struct bch_stripe *existing_v = &bkey_i_to_stripe(&s->existing_stripe.key)->v;
1847 	unsigned i;
1848 
1849 	BUG_ON(existing_v->nr_redundant != s->nr_parity);
1850 	s->nr_data = existing_v->nr_blocks -
1851 		existing_v->nr_redundant;
1852 
1853 	int ret = ec_stripe_buf_init(&s->existing_stripe, 0, le16_to_cpu(existing_v->sectors));
1854 	if (ret) {
1855 		bch2_stripe_close(c, s);
1856 		return ret;
1857 	}
1858 
1859 	BUG_ON(s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
1860 
1861 	/*
1862 	 * Free buckets we initially allocated - they might conflict with
1863 	 * blocks from the stripe we're reusing:
1864 	 */
1865 	for_each_set_bit(i, s->blocks_gotten, new_v->nr_blocks) {
1866 		bch2_open_bucket_put(c, c->open_buckets + s->blocks[i]);
1867 		s->blocks[i] = 0;
1868 	}
1869 	memset(s->blocks_gotten, 0, sizeof(s->blocks_gotten));
1870 	memset(s->blocks_allocated, 0, sizeof(s->blocks_allocated));
1871 
1872 	for (unsigned i = 0; i < existing_v->nr_blocks; i++) {
1873 		if (stripe_blockcount_get(existing_v, i)) {
1874 			__set_bit(i, s->blocks_gotten);
1875 			__set_bit(i, s->blocks_allocated);
1876 		}
1877 
1878 		ec_block_io(c, &s->existing_stripe, READ, i, &s->iodone);
1879 	}
1880 
1881 	bkey_copy(&s->new_stripe.key, &s->existing_stripe.key);
1882 	s->have_existing_stripe = true;
1883 
1884 	return 0;
1885 }
1886 
__bch2_ec_stripe_head_reuse(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s)1887 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h,
1888 				       struct ec_stripe_new *s)
1889 {
1890 	struct bch_fs *c = trans->c;
1891 
1892 	/*
1893 	 * If we can't allocate a new stripe, and there's no stripes with empty
1894 	 * blocks for us to reuse, that means we have to wait on copygc:
1895 	 */
1896 	if (may_create_new_stripe(c))
1897 		return -1;
1898 
1899 	struct btree_iter lru_iter;
1900 	struct bkey_s_c lru_k;
1901 	int ret = 0;
1902 
1903 	for_each_btree_key_max_norestart(trans, lru_iter, BTREE_ID_lru,
1904 			lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, 0),
1905 			lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, LRU_TIME_MAX),
1906 			0, lru_k, ret) {
1907 		ret = __get_existing_stripe(trans, h, &s->existing_stripe, lru_k.k->p.offset);
1908 		if (ret)
1909 			break;
1910 	}
1911 	bch2_trans_iter_exit(trans, &lru_iter);
1912 	if (!ret)
1913 		ret = -BCH_ERR_stripe_alloc_blocked;
1914 	if (ret == 1)
1915 		ret = 0;
1916 	if (ret)
1917 		return ret;
1918 
1919 	return init_new_stripe_from_existing(c, s);
1920 }
1921 
__bch2_ec_stripe_head_reserve(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s)1922 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h,
1923 					 struct ec_stripe_new *s)
1924 {
1925 	struct bch_fs *c = trans->c;
1926 	struct btree_iter iter;
1927 	struct bkey_s_c k;
1928 	struct bpos min_pos = POS(0, 1);
1929 	struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1930 	int ret;
1931 
1932 	if (!s->res.sectors) {
1933 		ret = bch2_disk_reservation_get(c, &s->res,
1934 					h->blocksize,
1935 					s->nr_parity,
1936 					BCH_DISK_RESERVATION_NOFAIL);
1937 		if (ret)
1938 			return ret;
1939 	}
1940 
1941 	/*
1942 	 * Allocate stripe slot
1943 	 * XXX: we're going to need a bitrange btree of free stripes
1944 	 */
1945 	for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1946 			   BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
1947 		if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1948 			if (start_pos.offset) {
1949 				start_pos = min_pos;
1950 				bch2_btree_iter_set_pos(trans, &iter, start_pos);
1951 				continue;
1952 			}
1953 
1954 			ret = -BCH_ERR_ENOSPC_stripe_create;
1955 			break;
1956 		}
1957 
1958 		if (bkey_deleted(k.k) &&
1959 		    bch2_try_open_stripe(c, s, k.k->p.offset))
1960 			break;
1961 	}
1962 
1963 	c->ec_stripe_hint = iter.pos.offset;
1964 
1965 	if (ret)
1966 		goto err;
1967 
1968 	ret = ec_stripe_mem_alloc(trans, &iter);
1969 	if (ret) {
1970 		bch2_stripe_close(c, s);
1971 		goto err;
1972 	}
1973 
1974 	s->new_stripe.key.k.p = iter.pos;
1975 out:
1976 	bch2_trans_iter_exit(trans, &iter);
1977 	return ret;
1978 err:
1979 	bch2_disk_reservation_put(c, &s->res);
1980 	goto out;
1981 }
1982 
bch2_ec_stripe_head_get(struct btree_trans * trans,unsigned target,unsigned algo,unsigned redundancy,enum bch_watermark watermark,struct closure * cl)1983 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
1984 					       unsigned target,
1985 					       unsigned algo,
1986 					       unsigned redundancy,
1987 					       enum bch_watermark watermark,
1988 					       struct closure *cl)
1989 {
1990 	struct bch_fs *c = trans->c;
1991 	struct ec_stripe_head *h;
1992 	bool waiting = false;
1993 	unsigned disk_label = 0;
1994 	struct target t = target_decode(target);
1995 	int ret;
1996 
1997 	if (t.type == TARGET_GROUP) {
1998 		if (t.group > U8_MAX) {
1999 			bch_err(c, "cannot create a stripe when disk_label > U8_MAX");
2000 			return NULL;
2001 		}
2002 		disk_label = t.group + 1; /* 0 == no label */
2003 	}
2004 
2005 	h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark);
2006 	if (IS_ERR_OR_NULL(h))
2007 		return h;
2008 
2009 	if (!h->s) {
2010 		h->s = ec_new_stripe_alloc(c, h);
2011 		if (!h->s) {
2012 			ret = -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
2013 			bch_err(c, "failed to allocate new stripe");
2014 			goto err;
2015 		}
2016 
2017 		h->nr_created++;
2018 	}
2019 
2020 	struct ec_stripe_new *s = h->s;
2021 
2022 	if (s->allocated)
2023 		goto allocated;
2024 
2025 	if (s->have_existing_stripe)
2026 		goto alloc_existing;
2027 
2028 	/* First, try to allocate a full stripe: */
2029 	ret =   new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?:
2030 		__bch2_ec_stripe_head_reserve(trans, h, s);
2031 	if (!ret)
2032 		goto allocate_buf;
2033 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2034 	    bch2_err_matches(ret, ENOMEM))
2035 		goto err;
2036 
2037 	/*
2038 	 * Not enough buckets available for a full stripe: we must reuse an
2039 	 * existing stripe:
2040 	 */
2041 	while (1) {
2042 		ret = __bch2_ec_stripe_head_reuse(trans, h, s);
2043 		if (!ret)
2044 			break;
2045 		if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
2046 			goto err;
2047 
2048 		if (watermark == BCH_WATERMARK_copygc) {
2049 			ret =   new_stripe_alloc_buckets(trans, h, s, watermark, NULL) ?:
2050 				__bch2_ec_stripe_head_reserve(trans, h, s);
2051 			if (ret)
2052 				goto err;
2053 			goto allocate_buf;
2054 		}
2055 
2056 		/* XXX freelist_wait? */
2057 		closure_wait(&c->freelist_wait, cl);
2058 		waiting = true;
2059 	}
2060 
2061 	if (waiting)
2062 		closure_wake_up(&c->freelist_wait);
2063 alloc_existing:
2064 	/*
2065 	 * Retry allocating buckets, with the watermark for this
2066 	 * particular write:
2067 	 */
2068 	ret = new_stripe_alloc_buckets(trans, h, s, watermark, cl);
2069 	if (ret)
2070 		goto err;
2071 
2072 allocate_buf:
2073 	ret = ec_stripe_buf_init(&s->new_stripe, 0, h->blocksize);
2074 	if (ret)
2075 		goto err;
2076 
2077 	s->allocated = true;
2078 allocated:
2079 	BUG_ON(!s->idx);
2080 	BUG_ON(!s->new_stripe.data[0]);
2081 	BUG_ON(trans->restarted);
2082 	return h;
2083 err:
2084 	bch2_ec_stripe_head_put(c, h);
2085 	return ERR_PTR(ret);
2086 }
2087 
2088 /* device removal */
2089 
bch2_invalidate_stripe_to_dev(struct btree_trans * trans,struct bkey_s_c k_a)2090 static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a)
2091 {
2092 	struct bch_alloc_v4 a_convert;
2093 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert);
2094 
2095 	if (!a->stripe)
2096 		return 0;
2097 
2098 	if (a->stripe_sectors) {
2099 		bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
2100 		return -BCH_ERR_invalidate_stripe_to_dev;
2101 	}
2102 
2103 	struct btree_iter iter;
2104 	struct bkey_i_stripe *s =
2105 		bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
2106 					BTREE_ITER_slots, stripe);
2107 	int ret = PTR_ERR_OR_ZERO(s);
2108 	if (ret)
2109 		return ret;
2110 
2111 	struct disk_accounting_pos acc;
2112 
2113 	s64 sectors = 0;
2114 	for (unsigned i = 0; i < s->v.nr_blocks; i++)
2115 		sectors -= stripe_blockcount_get(&s->v, i);
2116 
2117 	memset(&acc, 0, sizeof(acc));
2118 	acc.type = BCH_DISK_ACCOUNTING_replicas;
2119 	bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
2120 	acc.replicas.data_type = BCH_DATA_user;
2121 	ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
2122 	if (ret)
2123 		goto err;
2124 
2125 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i));
2126 	bkey_for_each_ptr(ptrs, ptr)
2127 		if (ptr->dev == k_a.k->p.inode)
2128 			ptr->dev = BCH_SB_MEMBER_INVALID;
2129 
2130 	sectors = -sectors;
2131 
2132 	memset(&acc, 0, sizeof(acc));
2133 	acc.type = BCH_DISK_ACCOUNTING_replicas;
2134 	bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
2135 	acc.replicas.data_type = BCH_DATA_user;
2136 	ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
2137 	if (ret)
2138 		goto err;
2139 err:
2140 	bch2_trans_iter_exit(trans, &iter);
2141 	return ret;
2142 }
2143 
bch2_dev_remove_stripes(struct bch_fs * c,unsigned dev_idx)2144 int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx)
2145 {
2146 	return bch2_trans_run(c,
2147 		for_each_btree_key_max_commit(trans, iter,
2148 				  BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX),
2149 				  BTREE_ITER_intent, k,
2150 				  NULL, NULL, 0, ({
2151 			bch2_invalidate_stripe_to_dev(trans, k);
2152 	})));
2153 }
2154 
2155 /* startup/shutdown */
2156 
__bch2_ec_stop(struct bch_fs * c,struct bch_dev * ca)2157 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
2158 {
2159 	struct ec_stripe_head *h;
2160 	struct open_bucket *ob;
2161 	unsigned i;
2162 
2163 	mutex_lock(&c->ec_stripe_head_lock);
2164 	list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2165 		mutex_lock(&h->lock);
2166 		if (!h->s)
2167 			goto unlock;
2168 
2169 		if (!ca)
2170 			goto found;
2171 
2172 		for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
2173 			if (!h->s->blocks[i])
2174 				continue;
2175 
2176 			ob = c->open_buckets + h->s->blocks[i];
2177 			if (ob->dev == ca->dev_idx)
2178 				goto found;
2179 		}
2180 		goto unlock;
2181 found:
2182 		ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes);
2183 unlock:
2184 		mutex_unlock(&h->lock);
2185 	}
2186 	mutex_unlock(&c->ec_stripe_head_lock);
2187 }
2188 
bch2_ec_stop_dev(struct bch_fs * c,struct bch_dev * ca)2189 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
2190 {
2191 	__bch2_ec_stop(c, ca);
2192 }
2193 
bch2_fs_ec_stop(struct bch_fs * c)2194 void bch2_fs_ec_stop(struct bch_fs *c)
2195 {
2196 	__bch2_ec_stop(c, NULL);
2197 }
2198 
bch2_fs_ec_flush_done(struct bch_fs * c)2199 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
2200 {
2201 	sched_annotate_sleep();
2202 
2203 	mutex_lock(&c->ec_stripe_new_lock);
2204 	bool ret = list_empty(&c->ec_stripe_new_list);
2205 	mutex_unlock(&c->ec_stripe_new_lock);
2206 
2207 	return ret;
2208 }
2209 
bch2_fs_ec_flush(struct bch_fs * c)2210 void bch2_fs_ec_flush(struct bch_fs *c)
2211 {
2212 	wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
2213 }
2214 
bch2_stripes_read(struct bch_fs * c)2215 int bch2_stripes_read(struct bch_fs *c)
2216 {
2217 	return 0;
2218 }
2219 
bch2_new_stripe_to_text(struct printbuf * out,struct bch_fs * c,struct ec_stripe_new * s)2220 static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
2221 				    struct ec_stripe_new *s)
2222 {
2223 	prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs",
2224 		   s->idx, s->nr_data, s->nr_parity,
2225 		   bitmap_weight(s->blocks_allocated, s->nr_data),
2226 		   atomic_read(&s->ref[STRIPE_REF_io]),
2227 		   atomic_read(&s->ref[STRIPE_REF_stripe]),
2228 		   bch2_watermarks[s->h->watermark]);
2229 
2230 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
2231 	unsigned i;
2232 	for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
2233 		prt_printf(out, " %u", s->blocks[i]);
2234 	prt_newline(out);
2235 	bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key));
2236 	prt_newline(out);
2237 }
2238 
bch2_new_stripes_to_text(struct printbuf * out,struct bch_fs * c)2239 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
2240 {
2241 	struct ec_stripe_head *h;
2242 	struct ec_stripe_new *s;
2243 
2244 	mutex_lock(&c->ec_stripe_head_lock);
2245 	list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2246 		prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
2247 		       h->disk_label, h->algo, h->redundancy,
2248 		       bch2_watermarks[h->watermark],
2249 		       h->nr_created);
2250 
2251 		if (h->s)
2252 			bch2_new_stripe_to_text(out, c, h->s);
2253 	}
2254 	mutex_unlock(&c->ec_stripe_head_lock);
2255 
2256 	prt_printf(out, "in flight:\n");
2257 
2258 	mutex_lock(&c->ec_stripe_new_lock);
2259 	list_for_each_entry(s, &c->ec_stripe_new_list, list)
2260 		bch2_new_stripe_to_text(out, c, s);
2261 	mutex_unlock(&c->ec_stripe_new_lock);
2262 }
2263 
bch2_fs_ec_exit(struct bch_fs * c)2264 void bch2_fs_ec_exit(struct bch_fs *c)
2265 {
2266 	struct ec_stripe_head *h;
2267 	unsigned i;
2268 
2269 	while (1) {
2270 		mutex_lock(&c->ec_stripe_head_lock);
2271 		h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list);
2272 		mutex_unlock(&c->ec_stripe_head_lock);
2273 
2274 		if (!h)
2275 			break;
2276 
2277 		if (h->s) {
2278 			for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
2279 				BUG_ON(h->s->blocks[i]);
2280 
2281 			kfree(h->s);
2282 		}
2283 		kfree(h);
2284 	}
2285 
2286 	BUG_ON(!list_empty(&c->ec_stripe_new_list));
2287 
2288 	bioset_exit(&c->ec_bioset);
2289 }
2290 
bch2_fs_ec_init_early(struct bch_fs * c)2291 void bch2_fs_ec_init_early(struct bch_fs *c)
2292 {
2293 	spin_lock_init(&c->ec_stripes_new_lock);
2294 
2295 	INIT_LIST_HEAD(&c->ec_stripe_head_list);
2296 	mutex_init(&c->ec_stripe_head_lock);
2297 
2298 	INIT_LIST_HEAD(&c->ec_stripe_new_list);
2299 	mutex_init(&c->ec_stripe_new_lock);
2300 	init_waitqueue_head(&c->ec_stripe_new_wait);
2301 
2302 	INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
2303 	INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
2304 }
2305 
bch2_fs_ec_init(struct bch_fs * c)2306 int bch2_fs_ec_init(struct bch_fs *c)
2307 {
2308 	return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
2309 			   BIOSET_NEED_BVECS);
2310 }
2311 
bch2_check_stripe_to_lru_ref(struct btree_trans * trans,struct bkey_s_c k,struct bkey_buf * last_flushed)2312 static int bch2_check_stripe_to_lru_ref(struct btree_trans *trans,
2313 					struct bkey_s_c k,
2314 					struct bkey_buf *last_flushed)
2315 {
2316 	if (k.k->type != KEY_TYPE_stripe)
2317 		return 0;
2318 
2319 	struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
2320 
2321 	u64 lru_idx = stripe_lru_pos(s.v);
2322 	if (lru_idx) {
2323 		int ret = bch2_lru_check_set(trans, BCH_LRU_STRIPE_FRAGMENTATION,
2324 					     k.k->p.offset, lru_idx, k, last_flushed);
2325 		if (ret)
2326 			return ret;
2327 	}
2328 	return 0;
2329 }
2330 
bch2_check_stripe_to_lru_refs(struct bch_fs * c)2331 int bch2_check_stripe_to_lru_refs(struct bch_fs *c)
2332 {
2333 	struct bkey_buf last_flushed;
2334 
2335 	bch2_bkey_buf_init(&last_flushed);
2336 	bkey_init(&last_flushed.k->k);
2337 
2338 	int ret = bch2_trans_run(c,
2339 		for_each_btree_key_commit(trans, iter, BTREE_ID_stripes,
2340 				POS_MIN, BTREE_ITER_prefetch, k,
2341 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2342 			bch2_check_stripe_to_lru_ref(trans, k, &last_flushed)));
2343 
2344 	bch2_bkey_buf_exit(&last_flushed, c);
2345 	bch_err_fn(c, ret);
2346 	return ret;
2347 }
2348