1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright 2012 Google, Inc.
5 */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "clock.h"
15 #include "compress.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "extent_update.h"
20 #include "inode.h"
21 #include "io_write.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
28 #include "super.h"
29 #include "super-io.h"
30 #include "trace.h"
31
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
36
37 #ifdef CONFIG_BCACHEFS_DEBUG
38 static unsigned bch2_write_corrupt_ratio;
39 module_param_named(write_corrupt_ratio, bch2_write_corrupt_ratio, uint, 0644);
40 MODULE_PARM_DESC(write_corrupt_ratio, "");
41 #endif
42
43 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
44
bch2_congested_acct(struct bch_dev * ca,u64 io_latency,u64 now,int rw)45 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
46 u64 now, int rw)
47 {
48 u64 latency_capable =
49 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
50 /* ideally we'd be taking into account the device's variance here: */
51 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
52 s64 latency_over = io_latency - latency_threshold;
53
54 if (latency_threshold && latency_over > 0) {
55 /*
56 * bump up congested by approximately latency_over * 4 /
57 * latency_threshold - we don't need much accuracy here so don't
58 * bother with the divide:
59 */
60 if (atomic_read(&ca->congested) < CONGESTED_MAX)
61 atomic_add(latency_over >>
62 max_t(int, ilog2(latency_threshold) - 2, 0),
63 &ca->congested);
64
65 ca->congested_last = now;
66 } else if (atomic_read(&ca->congested) > 0) {
67 atomic_dec(&ca->congested);
68 }
69 }
70
bch2_latency_acct(struct bch_dev * ca,u64 submit_time,int rw)71 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
72 {
73 atomic64_t *latency = &ca->cur_latency[rw];
74 u64 now = local_clock();
75 u64 io_latency = time_after64(now, submit_time)
76 ? now - submit_time
77 : 0;
78 u64 old, new;
79
80 old = atomic64_read(latency);
81 do {
82 /*
83 * If the io latency was reasonably close to the current
84 * latency, skip doing the update and atomic operation - most of
85 * the time:
86 */
87 if (abs((int) (old - io_latency)) < (old >> 1) &&
88 now & ~(~0U << 5))
89 break;
90
91 new = ewma_add(old, io_latency, 5);
92 } while (!atomic64_try_cmpxchg(latency, &old, new));
93
94 bch2_congested_acct(ca, io_latency, now, rw);
95
96 __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
97 }
98
99 #endif
100
101 /* Allocate, free from mempool: */
102
bch2_bio_free_pages_pool(struct bch_fs * c,struct bio * bio)103 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
104 {
105 struct bvec_iter_all iter;
106 struct bio_vec *bv;
107
108 bio_for_each_segment_all(bv, bio, iter)
109 if (bv->bv_page != ZERO_PAGE(0))
110 mempool_free(bv->bv_page, &c->bio_bounce_pages);
111 bio->bi_vcnt = 0;
112 }
113
__bio_alloc_page_pool(struct bch_fs * c,bool * using_mempool)114 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
115 {
116 struct page *page;
117
118 if (likely(!*using_mempool)) {
119 page = alloc_page(GFP_NOFS);
120 if (unlikely(!page)) {
121 mutex_lock(&c->bio_bounce_pages_lock);
122 *using_mempool = true;
123 goto pool_alloc;
124
125 }
126 } else {
127 pool_alloc:
128 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
129 }
130
131 return page;
132 }
133
bch2_bio_alloc_pages_pool(struct bch_fs * c,struct bio * bio,size_t size)134 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
135 size_t size)
136 {
137 bool using_mempool = false;
138
139 while (size) {
140 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
141 unsigned len = min_t(size_t, PAGE_SIZE, size);
142
143 BUG_ON(!bio_add_page(bio, page, len, 0));
144 size -= len;
145 }
146
147 if (using_mempool)
148 mutex_unlock(&c->bio_bounce_pages_lock);
149 }
150
151 /* Extent update path: */
152
bch2_sum_sector_overwrites(struct btree_trans * trans,struct btree_iter * extent_iter,struct bkey_i * new,bool * usage_increasing,s64 * i_sectors_delta,s64 * disk_sectors_delta)153 int bch2_sum_sector_overwrites(struct btree_trans *trans,
154 struct btree_iter *extent_iter,
155 struct bkey_i *new,
156 bool *usage_increasing,
157 s64 *i_sectors_delta,
158 s64 *disk_sectors_delta)
159 {
160 struct bch_fs *c = trans->c;
161 struct btree_iter iter;
162 struct bkey_s_c old;
163 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
164 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
165 int ret = 0;
166
167 *usage_increasing = false;
168 *i_sectors_delta = 0;
169 *disk_sectors_delta = 0;
170
171 bch2_trans_copy_iter(trans, &iter, extent_iter);
172
173 for_each_btree_key_max_continue_norestart(trans, iter,
174 new->k.p, BTREE_ITER_slots, old, ret) {
175 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
176 max(bkey_start_offset(&new->k),
177 bkey_start_offset(old.k));
178
179 *i_sectors_delta += sectors *
180 (bkey_extent_is_allocation(&new->k) -
181 bkey_extent_is_allocation(old.k));
182
183 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
184 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
185 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
186 : 0;
187
188 if (!*usage_increasing &&
189 (new->k.p.snapshot != old.k->p.snapshot ||
190 new_replicas > bch2_bkey_replicas(c, old) ||
191 (!new_compressed && bch2_bkey_sectors_compressed(old))))
192 *usage_increasing = true;
193
194 if (bkey_ge(old.k->p, new->k.p))
195 break;
196 }
197
198 bch2_trans_iter_exit(trans, &iter);
199 return ret;
200 }
201
bch2_extent_update_i_size_sectors(struct btree_trans * trans,struct btree_iter * extent_iter,u64 new_i_size,s64 i_sectors_delta)202 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
203 struct btree_iter *extent_iter,
204 u64 new_i_size,
205 s64 i_sectors_delta)
206 {
207 /*
208 * Crazy performance optimization:
209 * Every extent update needs to also update the inode: the inode trigger
210 * will set bi->journal_seq to the journal sequence number of this
211 * transaction - for fsync.
212 *
213 * But if that's the only reason we're updating the inode (we're not
214 * updating bi_size or bi_sectors), then we don't need the inode update
215 * to be journalled - if we crash, the bi_journal_seq update will be
216 * lost, but that's fine.
217 */
218 unsigned inode_update_flags = BTREE_UPDATE_nojournal;
219
220 struct btree_iter iter;
221 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
222 SPOS(0,
223 extent_iter->pos.inode,
224 extent_iter->snapshot),
225 BTREE_ITER_intent|
226 BTREE_ITER_cached);
227 int ret = bkey_err(k);
228 if (unlikely(ret))
229 return ret;
230
231 /*
232 * varint_decode_fast(), in the inode .invalid method, reads up to 7
233 * bytes past the end of the buffer:
234 */
235 struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
236 ret = PTR_ERR_OR_ZERO(k_mut);
237 if (unlikely(ret))
238 goto err;
239
240 bkey_reassemble(k_mut, k);
241
242 if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
243 k_mut = bch2_inode_to_v3(trans, k_mut);
244 ret = PTR_ERR_OR_ZERO(k_mut);
245 if (unlikely(ret))
246 goto err;
247 }
248
249 struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
250
251 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
252 new_i_size > le64_to_cpu(inode->v.bi_size)) {
253 inode->v.bi_size = cpu_to_le64(new_i_size);
254 inode_update_flags = 0;
255 }
256
257 if (i_sectors_delta) {
258 s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors);
259 if (unlikely(bi_sectors + i_sectors_delta < 0)) {
260 struct bch_fs *c = trans->c;
261 struct printbuf buf = PRINTBUF;
262 bch2_log_msg_start(c, &buf);
263 prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0",
264 extent_iter->pos.inode, bi_sectors, i_sectors_delta);
265
266 bool repeat = false, print = false, suppress = false;
267 bch2_count_fsck_err(c, inode_i_sectors_underflow, buf.buf,
268 &repeat, &print, &suppress);
269 if (print)
270 bch2_print_str(c, buf.buf);
271 printbuf_exit(&buf);
272
273 if (i_sectors_delta < 0)
274 i_sectors_delta = -bi_sectors;
275 else
276 i_sectors_delta = 0;
277 }
278
279 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
280 inode_update_flags = 0;
281 }
282
283 if (inode->k.p.snapshot != iter.snapshot) {
284 inode->k.p.snapshot = iter.snapshot;
285 inode_update_flags = 0;
286 }
287
288 ret = bch2_trans_update(trans, &iter, &inode->k_i,
289 BTREE_UPDATE_internal_snapshot_node|
290 inode_update_flags);
291 err:
292 bch2_trans_iter_exit(trans, &iter);
293 return ret;
294 }
295
bch2_extent_update(struct btree_trans * trans,subvol_inum inum,struct btree_iter * iter,struct bkey_i * k,struct disk_reservation * disk_res,u64 new_i_size,s64 * i_sectors_delta_total,bool check_enospc)296 int bch2_extent_update(struct btree_trans *trans,
297 subvol_inum inum,
298 struct btree_iter *iter,
299 struct bkey_i *k,
300 struct disk_reservation *disk_res,
301 u64 new_i_size,
302 s64 *i_sectors_delta_total,
303 bool check_enospc)
304 {
305 struct bpos next_pos;
306 bool usage_increasing;
307 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
308 int ret;
309
310 /*
311 * This traverses us the iterator without changing iter->path->pos to
312 * search_key() (which is pos + 1 for extents): we want there to be a
313 * path already traversed at iter->pos because
314 * bch2_trans_extent_update() will use it to attempt extent merging
315 */
316 ret = __bch2_btree_iter_traverse(trans, iter);
317 if (ret)
318 return ret;
319
320 ret = bch2_extent_trim_atomic(trans, iter, k);
321 if (ret)
322 return ret;
323
324 next_pos = k->k.p;
325
326 ret = bch2_sum_sector_overwrites(trans, iter, k,
327 &usage_increasing,
328 &i_sectors_delta,
329 &disk_sectors_delta);
330 if (ret)
331 return ret;
332
333 if (disk_res &&
334 disk_sectors_delta > (s64) disk_res->sectors) {
335 ret = bch2_disk_reservation_add(trans->c, disk_res,
336 disk_sectors_delta - disk_res->sectors,
337 !check_enospc || !usage_increasing
338 ? BCH_DISK_RESERVATION_NOFAIL : 0);
339 if (ret)
340 return ret;
341 }
342
343 /*
344 * Note:
345 * We always have to do an inode update - even when i_size/i_sectors
346 * aren't changing - for fsync to work properly; fsync relies on
347 * inode->bi_journal_seq which is updated by the trigger code:
348 */
349 ret = bch2_extent_update_i_size_sectors(trans, iter,
350 min(k->k.p.offset << 9, new_i_size),
351 i_sectors_delta) ?:
352 bch2_trans_update(trans, iter, k, 0) ?:
353 bch2_trans_commit(trans, disk_res, NULL,
354 BCH_TRANS_COMMIT_no_check_rw|
355 BCH_TRANS_COMMIT_no_enospc);
356 if (unlikely(ret))
357 return ret;
358
359 if (i_sectors_delta_total)
360 *i_sectors_delta_total += i_sectors_delta;
361 bch2_btree_iter_set_pos(trans, iter, next_pos);
362 return 0;
363 }
364
bch2_write_index_default(struct bch_write_op * op)365 static int bch2_write_index_default(struct bch_write_op *op)
366 {
367 struct bch_fs *c = op->c;
368 struct bkey_buf sk;
369 struct keylist *keys = &op->insert_keys;
370 struct bkey_i *k = bch2_keylist_front(keys);
371 struct btree_trans *trans = bch2_trans_get(c);
372 struct btree_iter iter;
373 subvol_inum inum = {
374 .subvol = op->subvol,
375 .inum = k->k.p.inode,
376 };
377 int ret;
378
379 BUG_ON(!inum.subvol);
380
381 bch2_bkey_buf_init(&sk);
382
383 do {
384 bch2_trans_begin(trans);
385
386 k = bch2_keylist_front(keys);
387 bch2_bkey_buf_copy(&sk, c, k);
388
389 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
390 &sk.k->k.p.snapshot);
391 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
392 continue;
393 if (ret)
394 break;
395
396 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
397 bkey_start_pos(&sk.k->k),
398 BTREE_ITER_slots|BTREE_ITER_intent);
399
400 ret = bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?:
401 bch2_extent_update(trans, inum, &iter, sk.k,
402 &op->res,
403 op->new_i_size, &op->i_sectors_delta,
404 op->flags & BCH_WRITE_check_enospc);
405 bch2_trans_iter_exit(trans, &iter);
406
407 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
408 continue;
409 if (ret)
410 break;
411
412 if (bkey_ge(iter.pos, k->k.p))
413 bch2_keylist_pop_front(&op->insert_keys);
414 else
415 bch2_cut_front(iter.pos, k);
416 } while (!bch2_keylist_empty(keys));
417
418 bch2_trans_put(trans);
419 bch2_bkey_buf_exit(&sk, c);
420
421 return ret;
422 }
423
424 /* Writes */
425
bch2_write_op_error(struct bch_write_op * op,u64 offset,const char * fmt,...)426 void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...)
427 {
428 struct printbuf buf = PRINTBUF;
429
430 if (op->subvol) {
431 bch2_inum_offset_err_msg(op->c, &buf,
432 (subvol_inum) { op->subvol, op->pos.inode, },
433 offset << 9);
434 } else {
435 struct bpos pos = op->pos;
436 pos.offset = offset;
437 bch2_inum_snap_offset_err_msg(op->c, &buf, pos);
438 }
439
440 prt_str(&buf, "write error: ");
441
442 va_list args;
443 va_start(args, fmt);
444 prt_vprintf(&buf, fmt, args);
445 va_end(args);
446
447 if (op->flags & BCH_WRITE_move) {
448 struct data_update *u = container_of(op, struct data_update, op);
449
450 prt_printf(&buf, "\n from internal move ");
451 bch2_bkey_val_to_text(&buf, op->c, bkey_i_to_s_c(u->k.k));
452 }
453
454 bch_err_ratelimited(op->c, "%s", buf.buf);
455 printbuf_exit(&buf);
456 }
457
bch2_submit_wbio_replicas(struct bch_write_bio * wbio,struct bch_fs * c,enum bch_data_type type,const struct bkey_i * k,bool nocow)458 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
459 enum bch_data_type type,
460 const struct bkey_i *k,
461 bool nocow)
462 {
463 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
464 struct bch_write_bio *n;
465
466 BUG_ON(c->opts.nochanges);
467
468 bkey_for_each_ptr(ptrs, ptr) {
469 /*
470 * XXX: btree writes should be using io_ref[WRITE], but we
471 * aren't retrying failed btree writes yet (due to device
472 * removal/ro):
473 */
474 struct bch_dev *ca = nocow
475 ? bch2_dev_have_ref(c, ptr->dev)
476 : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
477
478 if (to_entry(ptr + 1) < ptrs.end) {
479 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
480
481 n->bio.bi_end_io = wbio->bio.bi_end_io;
482 n->bio.bi_private = wbio->bio.bi_private;
483 n->parent = wbio;
484 n->split = true;
485 n->bounce = false;
486 n->put_bio = true;
487 n->bio.bi_opf = wbio->bio.bi_opf;
488 bio_inc_remaining(&wbio->bio);
489 } else {
490 n = wbio;
491 n->split = false;
492 }
493
494 n->c = c;
495 n->dev = ptr->dev;
496 n->have_ioref = ca != NULL;
497 n->nocow = nocow;
498 n->submit_time = local_clock();
499 n->inode_offset = bkey_start_offset(&k->k);
500 if (nocow)
501 n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
502 n->bio.bi_iter.bi_sector = ptr->offset;
503
504 if (likely(n->have_ioref)) {
505 this_cpu_add(ca->io_done->sectors[WRITE][type],
506 bio_sectors(&n->bio));
507
508 bio_set_dev(&n->bio, ca->disk_sb.bdev);
509
510 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
511 bio_endio(&n->bio);
512 continue;
513 }
514
515 submit_bio(&n->bio);
516 } else {
517 n->bio.bi_status = BLK_STS_REMOVED;
518 bio_endio(&n->bio);
519 }
520 }
521 }
522
523 static void __bch2_write(struct bch_write_op *);
524
bch2_write_done(struct closure * cl)525 static void bch2_write_done(struct closure *cl)
526 {
527 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
528 struct bch_fs *c = op->c;
529
530 EBUG_ON(op->open_buckets.nr);
531
532 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
533 bch2_disk_reservation_put(c, &op->res);
534
535 if (!(op->flags & BCH_WRITE_move))
536 bch2_write_ref_put(c, BCH_WRITE_REF_write);
537 bch2_keylist_free(&op->insert_keys, op->inline_keys);
538
539 EBUG_ON(cl->parent);
540 closure_debug_destroy(cl);
541 if (op->end_io)
542 op->end_io(op);
543 }
544
bch2_write_drop_io_error_ptrs(struct bch_write_op * op)545 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
546 {
547 struct keylist *keys = &op->insert_keys;
548 struct bkey_i *src, *dst = keys->keys, *n;
549
550 for (src = keys->keys; src != keys->top; src = n) {
551 n = bkey_next(src);
552
553 if (bkey_extent_is_direct_data(&src->k)) {
554 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
555 test_bit(ptr->dev, op->failed.d));
556
557 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
558 return -BCH_ERR_data_write_io;
559 }
560
561 if (dst != src)
562 memmove_u64s_down(dst, src, src->k.u64s);
563 dst = bkey_next(dst);
564 }
565
566 keys->top = dst;
567 return 0;
568 }
569
570 /**
571 * __bch2_write_index - after a write, update index to point to new data
572 * @op: bch_write_op to process
573 */
__bch2_write_index(struct bch_write_op * op)574 static void __bch2_write_index(struct bch_write_op *op)
575 {
576 struct bch_fs *c = op->c;
577 struct keylist *keys = &op->insert_keys;
578 unsigned dev;
579 int ret = 0;
580
581 if (unlikely(op->flags & BCH_WRITE_io_error)) {
582 ret = bch2_write_drop_io_error_ptrs(op);
583 if (ret)
584 goto err;
585 }
586
587 if (!bch2_keylist_empty(keys)) {
588 u64 sectors_start = keylist_sectors(keys);
589
590 ret = !(op->flags & BCH_WRITE_move)
591 ? bch2_write_index_default(op)
592 : bch2_data_update_index_update(op);
593
594 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
595 BUG_ON(keylist_sectors(keys) && !ret);
596
597 op->written += sectors_start - keylist_sectors(keys);
598
599 if (unlikely(ret && !bch2_err_matches(ret, EROFS))) {
600 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
601
602 bch2_write_op_error(op, bkey_start_offset(&insert->k),
603 "btree update error: %s", bch2_err_str(ret));
604 }
605
606 if (ret)
607 goto err;
608 }
609 out:
610 /* If some a bucket wasn't written, we can't erasure code it: */
611 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
612 bch2_open_bucket_write_error(c, &op->open_buckets, dev, -BCH_ERR_data_write_io);
613
614 bch2_open_buckets_put(c, &op->open_buckets);
615 return;
616 err:
617 keys->top = keys->keys;
618 op->error = ret;
619 op->flags |= BCH_WRITE_submitted;
620 goto out;
621 }
622
__wp_update_state(struct write_point * wp,enum write_point_state state)623 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
624 {
625 if (state != wp->state) {
626 struct task_struct *p = current;
627 u64 now = ktime_get_ns();
628 u64 runtime = p->se.sum_exec_runtime +
629 (now - p->se.exec_start);
630
631 if (state == WRITE_POINT_runnable)
632 wp->last_runtime = runtime;
633 else if (wp->state == WRITE_POINT_runnable)
634 wp->time[WRITE_POINT_running] += runtime - wp->last_runtime;
635
636 if (wp->last_state_change &&
637 time_after64(now, wp->last_state_change))
638 wp->time[wp->state] += now - wp->last_state_change;
639 wp->state = state;
640 wp->last_state_change = now;
641 }
642 }
643
wp_update_state(struct write_point * wp,bool running)644 static inline void wp_update_state(struct write_point *wp, bool running)
645 {
646 enum write_point_state state;
647
648 state = running ? WRITE_POINT_runnable:
649 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
650 : WRITE_POINT_stopped;
651
652 __wp_update_state(wp, state);
653 }
654
CLOSURE_CALLBACK(bch2_write_index)655 static CLOSURE_CALLBACK(bch2_write_index)
656 {
657 closure_type(op, struct bch_write_op, cl);
658 struct write_point *wp = op->wp;
659 struct workqueue_struct *wq = index_update_wq(op);
660 unsigned long flags;
661
662 if ((op->flags & BCH_WRITE_submitted) &&
663 (op->flags & BCH_WRITE_move))
664 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
665
666 spin_lock_irqsave(&wp->writes_lock, flags);
667 if (wp->state == WRITE_POINT_waiting_io)
668 __wp_update_state(wp, WRITE_POINT_waiting_work);
669 list_add_tail(&op->wp_list, &wp->writes);
670 spin_unlock_irqrestore (&wp->writes_lock, flags);
671
672 queue_work(wq, &wp->index_update_work);
673 }
674
bch2_write_queue(struct bch_write_op * op,struct write_point * wp)675 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
676 {
677 op->wp = wp;
678
679 if (wp->state == WRITE_POINT_stopped) {
680 spin_lock_irq(&wp->writes_lock);
681 __wp_update_state(wp, WRITE_POINT_waiting_io);
682 spin_unlock_irq(&wp->writes_lock);
683 }
684 }
685
bch2_write_point_do_index_updates(struct work_struct * work)686 void bch2_write_point_do_index_updates(struct work_struct *work)
687 {
688 struct write_point *wp =
689 container_of(work, struct write_point, index_update_work);
690 struct bch_write_op *op;
691
692 while (1) {
693 spin_lock_irq(&wp->writes_lock);
694 op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list);
695 wp_update_state(wp, op != NULL);
696 spin_unlock_irq(&wp->writes_lock);
697
698 if (!op)
699 break;
700
701 op->flags |= BCH_WRITE_in_worker;
702
703 __bch2_write_index(op);
704
705 if (!(op->flags & BCH_WRITE_submitted))
706 __bch2_write(op);
707 else
708 bch2_write_done(&op->cl);
709 }
710 }
711
bch2_write_endio(struct bio * bio)712 static void bch2_write_endio(struct bio *bio)
713 {
714 struct closure *cl = bio->bi_private;
715 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
716 struct bch_write_bio *wbio = to_wbio(bio);
717 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
718 struct bch_fs *c = wbio->c;
719 struct bch_dev *ca = wbio->have_ioref
720 ? bch2_dev_have_ref(c, wbio->dev)
721 : NULL;
722
723 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
724 wbio->submit_time, !bio->bi_status);
725
726 if (unlikely(bio->bi_status)) {
727 if (ca)
728 bch_err_inum_offset_ratelimited(ca,
729 op->pos.inode,
730 wbio->inode_offset << 9,
731 "data write error: %s",
732 bch2_blk_status_to_str(bio->bi_status));
733 else
734 bch_err_inum_offset_ratelimited(c,
735 op->pos.inode,
736 wbio->inode_offset << 9,
737 "data write error: %s",
738 bch2_blk_status_to_str(bio->bi_status));
739 set_bit(wbio->dev, op->failed.d);
740 op->flags |= BCH_WRITE_io_error;
741 }
742
743 if (wbio->nocow) {
744 bch2_bucket_nocow_unlock(&c->nocow_locks,
745 POS(ca->dev_idx, wbio->nocow_bucket),
746 BUCKET_NOCOW_LOCK_UPDATE);
747 set_bit(wbio->dev, op->devs_need_flush->d);
748 }
749
750 if (wbio->have_ioref)
751 percpu_ref_put(&ca->io_ref[WRITE]);
752
753 if (wbio->bounce)
754 bch2_bio_free_pages_pool(c, bio);
755
756 if (wbio->put_bio)
757 bio_put(bio);
758
759 if (parent)
760 bio_endio(&parent->bio);
761 else
762 closure_put(cl);
763 }
764
init_append_extent(struct bch_write_op * op,struct write_point * wp,struct bversion version,struct bch_extent_crc_unpacked crc)765 static void init_append_extent(struct bch_write_op *op,
766 struct write_point *wp,
767 struct bversion version,
768 struct bch_extent_crc_unpacked crc)
769 {
770 struct bkey_i_extent *e;
771
772 op->pos.offset += crc.uncompressed_size;
773
774 e = bkey_extent_init(op->insert_keys.top);
775 e->k.p = op->pos;
776 e->k.size = crc.uncompressed_size;
777 e->k.bversion = version;
778
779 if (crc.csum_type ||
780 crc.compression_type ||
781 crc.nonce)
782 bch2_extent_crc_append(&e->k_i, crc);
783
784 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
785 op->flags & BCH_WRITE_cached);
786
787 bch2_keylist_push(&op->insert_keys);
788 }
789
bch2_write_bio_alloc(struct bch_fs * c,struct write_point * wp,struct bio * src,bool * page_alloc_failed,void * buf)790 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
791 struct write_point *wp,
792 struct bio *src,
793 bool *page_alloc_failed,
794 void *buf)
795 {
796 struct bch_write_bio *wbio;
797 struct bio *bio;
798 unsigned output_available =
799 min(wp->sectors_free << 9, src->bi_iter.bi_size);
800 unsigned pages = DIV_ROUND_UP(output_available +
801 (buf
802 ? ((unsigned long) buf & (PAGE_SIZE - 1))
803 : 0), PAGE_SIZE);
804
805 pages = min(pages, BIO_MAX_VECS);
806
807 bio = bio_alloc_bioset(NULL, pages, 0,
808 GFP_NOFS, &c->bio_write);
809 wbio = wbio_init(bio);
810 wbio->put_bio = true;
811 /* copy WRITE_SYNC flag */
812 wbio->bio.bi_opf = src->bi_opf;
813
814 if (buf) {
815 bch2_bio_map(bio, buf, output_available);
816 return bio;
817 }
818
819 wbio->bounce = true;
820
821 /*
822 * We can't use mempool for more than c->sb.encoded_extent_max
823 * worth of pages, but we'd like to allocate more if we can:
824 */
825 bch2_bio_alloc_pages_pool(c, bio,
826 min_t(unsigned, output_available,
827 c->opts.encoded_extent_max));
828
829 if (bio->bi_iter.bi_size < output_available)
830 *page_alloc_failed =
831 bch2_bio_alloc_pages(bio,
832 output_available -
833 bio->bi_iter.bi_size,
834 GFP_NOFS) != 0;
835
836 return bio;
837 }
838
bch2_write_rechecksum(struct bch_fs * c,struct bch_write_op * op,unsigned new_csum_type)839 static int bch2_write_rechecksum(struct bch_fs *c,
840 struct bch_write_op *op,
841 unsigned new_csum_type)
842 {
843 struct bio *bio = &op->wbio.bio;
844 struct bch_extent_crc_unpacked new_crc;
845
846 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
847
848 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
849 bch2_csum_type_is_encryption(new_csum_type))
850 new_csum_type = op->crc.csum_type;
851
852 int ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
853 NULL, &new_crc,
854 op->crc.offset, op->crc.live_size,
855 new_csum_type);
856 if (ret)
857 return ret;
858
859 bio_advance(bio, op->crc.offset << 9);
860 bio->bi_iter.bi_size = op->crc.live_size << 9;
861 op->crc = new_crc;
862 return 0;
863 }
864
bch2_write_prep_encoded_data(struct bch_write_op * op,struct write_point * wp)865 static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
866 {
867 struct bch_fs *c = op->c;
868 struct bio *bio = &op->wbio.bio;
869 struct bch_csum csum;
870 int ret = 0;
871
872 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
873
874 /* Can we just write the entire extent as is? */
875 if (op->crc.uncompressed_size == op->crc.live_size &&
876 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
877 op->crc.compressed_size <= wp->sectors_free &&
878 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
879 op->incompressible)) {
880 if (!crc_is_compressed(op->crc) &&
881 op->csum_type != op->crc.csum_type) {
882 ret = bch2_write_rechecksum(c, op, op->csum_type);
883 if (ret)
884 return ret;
885 }
886
887 return 1;
888 }
889
890 /*
891 * If the data is compressed and we couldn't write the entire extent as
892 * is, we have to decompress it:
893 */
894 if (crc_is_compressed(op->crc)) {
895 /* Last point we can still verify checksum: */
896 struct nonce nonce = extent_nonce(op->version, op->crc);
897 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
898 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
899 goto csum_err;
900
901 if (bch2_csum_type_is_encryption(op->crc.csum_type)) {
902 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
903 if (ret)
904 return ret;
905
906 op->crc.csum_type = 0;
907 op->crc.csum = (struct bch_csum) { 0, 0 };
908 }
909
910 ret = bch2_bio_uncompress_inplace(op, bio);
911 if (ret)
912 return ret;
913 }
914
915 /*
916 * No longer have compressed data after this point - data might be
917 * encrypted:
918 */
919
920 /*
921 * If the data is checksummed and we're only writing a subset,
922 * rechecksum and adjust bio to point to currently live data:
923 */
924 if (op->crc.live_size != op->crc.uncompressed_size ||
925 op->crc.csum_type != op->csum_type) {
926 ret = bch2_write_rechecksum(c, op, op->csum_type);
927 if (ret)
928 return ret;
929 }
930
931 /*
932 * If we want to compress the data, it has to be decrypted:
933 */
934 if (bch2_csum_type_is_encryption(op->crc.csum_type) &&
935 (op->compression_opt || op->crc.csum_type != op->csum_type)) {
936 struct nonce nonce = extent_nonce(op->version, op->crc);
937 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
938 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
939 goto csum_err;
940
941 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
942 if (ret)
943 return ret;
944
945 op->crc.csum_type = 0;
946 op->crc.csum = (struct bch_csum) { 0, 0 };
947 }
948
949 return 0;
950 csum_err:
951 bch2_write_op_error(op, op->pos.offset,
952 "error verifying existing checksum while moving existing data (memory corruption?)\n"
953 " expected %0llx:%0llx got %0llx:%0llx type %s",
954 op->crc.csum.hi,
955 op->crc.csum.lo,
956 csum.hi,
957 csum.lo,
958 op->crc.csum_type < BCH_CSUM_NR
959 ? __bch2_csum_types[op->crc.csum_type]
960 : "(unknown)");
961 return -BCH_ERR_data_write_csum;
962 }
963
bch2_write_extent(struct bch_write_op * op,struct write_point * wp,struct bio ** _dst)964 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
965 struct bio **_dst)
966 {
967 struct bch_fs *c = op->c;
968 struct bio *src = &op->wbio.bio, *dst = src;
969 struct bvec_iter saved_iter;
970 void *ec_buf;
971 unsigned total_output = 0, total_input = 0;
972 bool bounce = false;
973 bool page_alloc_failed = false;
974 int ret, more = 0;
975
976 if (op->incompressible)
977 op->compression_opt = 0;
978
979 BUG_ON(!bio_sectors(src));
980
981 ec_buf = bch2_writepoint_ec_buf(c, wp);
982
983 if (unlikely(op->flags & BCH_WRITE_data_encoded)) {
984 ret = bch2_write_prep_encoded_data(op, wp);
985 if (ret < 0)
986 goto err;
987 if (ret) {
988 if (ec_buf) {
989 dst = bch2_write_bio_alloc(c, wp, src,
990 &page_alloc_failed,
991 ec_buf);
992 bio_copy_data(dst, src);
993 bounce = true;
994 }
995 init_append_extent(op, wp, op->version, op->crc);
996 goto do_write;
997 }
998 }
999
1000 if (ec_buf ||
1001 op->compression_opt ||
1002 (op->csum_type &&
1003 !(op->flags & BCH_WRITE_pages_stable)) ||
1004 (bch2_csum_type_is_encryption(op->csum_type) &&
1005 !(op->flags & BCH_WRITE_pages_owned))) {
1006 dst = bch2_write_bio_alloc(c, wp, src,
1007 &page_alloc_failed,
1008 ec_buf);
1009 bounce = true;
1010 }
1011
1012 #ifdef CONFIG_BCACHEFS_DEBUG
1013 unsigned write_corrupt_ratio = READ_ONCE(bch2_write_corrupt_ratio);
1014 if (!bounce && write_corrupt_ratio) {
1015 dst = bch2_write_bio_alloc(c, wp, src,
1016 &page_alloc_failed,
1017 ec_buf);
1018 bounce = true;
1019 }
1020 #endif
1021 saved_iter = dst->bi_iter;
1022
1023 do {
1024 struct bch_extent_crc_unpacked crc = { 0 };
1025 struct bversion version = op->version;
1026 size_t dst_len = 0, src_len = 0;
1027
1028 if (page_alloc_failed &&
1029 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
1030 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
1031 break;
1032
1033 BUG_ON(op->compression_opt &&
1034 (op->flags & BCH_WRITE_data_encoded) &&
1035 bch2_csum_type_is_encryption(op->crc.csum_type));
1036 BUG_ON(op->compression_opt && !bounce);
1037
1038 crc.compression_type = op->incompressible
1039 ? BCH_COMPRESSION_TYPE_incompressible
1040 : op->compression_opt
1041 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
1042 op->compression_opt)
1043 : 0;
1044 if (!crc_is_compressed(crc)) {
1045 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
1046 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
1047
1048 if (op->csum_type)
1049 dst_len = min_t(unsigned, dst_len,
1050 c->opts.encoded_extent_max);
1051
1052 if (bounce) {
1053 swap(dst->bi_iter.bi_size, dst_len);
1054 bio_copy_data(dst, src);
1055 swap(dst->bi_iter.bi_size, dst_len);
1056 }
1057
1058 src_len = dst_len;
1059 }
1060
1061 BUG_ON(!src_len || !dst_len);
1062
1063 if (bch2_csum_type_is_encryption(op->csum_type)) {
1064 if (bversion_zero(version)) {
1065 version.lo = atomic64_inc_return(&c->key_version);
1066 } else {
1067 crc.nonce = op->nonce;
1068 op->nonce += src_len >> 9;
1069 }
1070 }
1071
1072 if ((op->flags & BCH_WRITE_data_encoded) &&
1073 !crc_is_compressed(crc) &&
1074 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1075 bch2_csum_type_is_encryption(op->csum_type)) {
1076 u8 compression_type = crc.compression_type;
1077 u16 nonce = crc.nonce;
1078 /*
1079 * Note: when we're using rechecksum(), we need to be
1080 * checksumming @src because it has all the data our
1081 * existing checksum covers - if we bounced (because we
1082 * were trying to compress), @dst will only have the
1083 * part of the data the new checksum will cover.
1084 *
1085 * But normally we want to be checksumming post bounce,
1086 * because part of the reason for bouncing is so the
1087 * data can't be modified (by userspace) while it's in
1088 * flight.
1089 */
1090 ret = bch2_rechecksum_bio(c, src, version, op->crc,
1091 &crc, &op->crc,
1092 src_len >> 9,
1093 bio_sectors(src) - (src_len >> 9),
1094 op->csum_type);
1095 if (ret)
1096 goto err;
1097 /*
1098 * rchecksum_bio sets compression_type on crc from op->crc,
1099 * this isn't always correct as sometimes we're changing
1100 * an extent from uncompressed to incompressible.
1101 */
1102 crc.compression_type = compression_type;
1103 crc.nonce = nonce;
1104 } else {
1105 if ((op->flags & BCH_WRITE_data_encoded) &&
1106 (ret = bch2_rechecksum_bio(c, src, version, op->crc,
1107 NULL, &op->crc,
1108 src_len >> 9,
1109 bio_sectors(src) - (src_len >> 9),
1110 op->crc.csum_type)))
1111 goto err;
1112
1113 crc.compressed_size = dst_len >> 9;
1114 crc.uncompressed_size = src_len >> 9;
1115 crc.live_size = src_len >> 9;
1116
1117 swap(dst->bi_iter.bi_size, dst_len);
1118 ret = bch2_encrypt_bio(c, op->csum_type,
1119 extent_nonce(version, crc), dst);
1120 if (ret)
1121 goto err;
1122
1123 crc.csum = bch2_checksum_bio(c, op->csum_type,
1124 extent_nonce(version, crc), dst);
1125 crc.csum_type = op->csum_type;
1126 swap(dst->bi_iter.bi_size, dst_len);
1127 }
1128
1129 init_append_extent(op, wp, version, crc);
1130
1131 #ifdef CONFIG_BCACHEFS_DEBUG
1132 if (write_corrupt_ratio) {
1133 swap(dst->bi_iter.bi_size, dst_len);
1134 bch2_maybe_corrupt_bio(dst, write_corrupt_ratio);
1135 swap(dst->bi_iter.bi_size, dst_len);
1136 }
1137 #endif
1138
1139 if (dst != src)
1140 bio_advance(dst, dst_len);
1141 bio_advance(src, src_len);
1142 total_output += dst_len;
1143 total_input += src_len;
1144 } while (dst->bi_iter.bi_size &&
1145 src->bi_iter.bi_size &&
1146 wp->sectors_free &&
1147 !bch2_keylist_realloc(&op->insert_keys,
1148 op->inline_keys,
1149 ARRAY_SIZE(op->inline_keys),
1150 BKEY_EXTENT_U64s_MAX));
1151
1152 more = src->bi_iter.bi_size != 0;
1153
1154 dst->bi_iter = saved_iter;
1155
1156 if (dst == src && more) {
1157 BUG_ON(total_output != total_input);
1158
1159 dst = bio_split(src, total_input >> 9,
1160 GFP_NOFS, &c->bio_write);
1161 wbio_init(dst)->put_bio = true;
1162 /* copy WRITE_SYNC flag */
1163 dst->bi_opf = src->bi_opf;
1164 }
1165
1166 dst->bi_iter.bi_size = total_output;
1167 do_write:
1168 *_dst = dst;
1169 return more;
1170 err:
1171 if (to_wbio(dst)->bounce)
1172 bch2_bio_free_pages_pool(c, dst);
1173 if (to_wbio(dst)->put_bio)
1174 bio_put(dst);
1175
1176 return ret;
1177 }
1178
bch2_extent_is_writeable(struct bch_write_op * op,struct bkey_s_c k)1179 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1180 struct bkey_s_c k)
1181 {
1182 struct bch_fs *c = op->c;
1183 struct bkey_s_c_extent e;
1184 struct extent_ptr_decoded p;
1185 const union bch_extent_entry *entry;
1186 unsigned replicas = 0;
1187
1188 if (k.k->type != KEY_TYPE_extent)
1189 return false;
1190
1191 e = bkey_s_c_to_extent(k);
1192
1193 rcu_read_lock();
1194 extent_for_each_ptr_decode(e, p, entry) {
1195 if (crc_is_encoded(p.crc) || p.has_ec) {
1196 rcu_read_unlock();
1197 return false;
1198 }
1199
1200 replicas += bch2_extent_ptr_durability(c, &p);
1201 }
1202 rcu_read_unlock();
1203
1204 return replicas >= op->opts.data_replicas;
1205 }
1206
bch2_nocow_write_convert_one_unwritten(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i * orig,struct bkey_s_c k,u64 new_i_size)1207 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1208 struct btree_iter *iter,
1209 struct bkey_i *orig,
1210 struct bkey_s_c k,
1211 u64 new_i_size)
1212 {
1213 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1214 /* trace this */
1215 return 0;
1216 }
1217
1218 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1219 int ret = PTR_ERR_OR_ZERO(new);
1220 if (ret)
1221 return ret;
1222
1223 bch2_cut_front(bkey_start_pos(&orig->k), new);
1224 bch2_cut_back(orig->k.p, new);
1225
1226 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1227 bkey_for_each_ptr(ptrs, ptr)
1228 ptr->unwritten = 0;
1229
1230 /*
1231 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1232 * that was done when we kicked off the write, and here it's important
1233 * that we update the extent that we wrote to - even if a snapshot has
1234 * since been created. The write is still outstanding, so we're ok
1235 * w.r.t. snapshot atomicity:
1236 */
1237 return bch2_extent_update_i_size_sectors(trans, iter,
1238 min(new->k.p.offset << 9, new_i_size), 0) ?:
1239 bch2_trans_update(trans, iter, new,
1240 BTREE_UPDATE_internal_snapshot_node);
1241 }
1242
bch2_nocow_write_convert_unwritten(struct bch_write_op * op)1243 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1244 {
1245 struct bch_fs *c = op->c;
1246 struct btree_trans *trans = bch2_trans_get(c);
1247 int ret = 0;
1248
1249 for_each_keylist_key(&op->insert_keys, orig) {
1250 ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
1251 bkey_start_pos(&orig->k), orig->k.p,
1252 BTREE_ITER_intent, k,
1253 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1254 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1255 }));
1256 if (ret)
1257 break;
1258 }
1259
1260 bch2_trans_put(trans);
1261
1262 if (ret && !bch2_err_matches(ret, EROFS)) {
1263 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1264 bch2_write_op_error(op, bkey_start_offset(&insert->k),
1265 "btree update error: %s", bch2_err_str(ret));
1266 }
1267
1268 if (ret)
1269 op->error = ret;
1270 }
1271
__bch2_nocow_write_done(struct bch_write_op * op)1272 static void __bch2_nocow_write_done(struct bch_write_op *op)
1273 {
1274 if (unlikely(op->flags & BCH_WRITE_io_error)) {
1275 op->error = -BCH_ERR_data_write_io;
1276 } else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
1277 bch2_nocow_write_convert_unwritten(op);
1278 }
1279
CLOSURE_CALLBACK(bch2_nocow_write_done)1280 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1281 {
1282 closure_type(op, struct bch_write_op, cl);
1283
1284 __bch2_nocow_write_done(op);
1285 bch2_write_done(cl);
1286 }
1287
1288 struct bucket_to_lock {
1289 struct bpos b;
1290 unsigned gen;
1291 struct nocow_lock_bucket *l;
1292 };
1293
bch2_nocow_write(struct bch_write_op * op)1294 static void bch2_nocow_write(struct bch_write_op *op)
1295 {
1296 struct bch_fs *c = op->c;
1297 struct btree_trans *trans;
1298 struct btree_iter iter;
1299 struct bkey_s_c k;
1300 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1301 u32 snapshot;
1302 struct bucket_to_lock *stale_at;
1303 int stale, ret;
1304
1305 if (op->flags & BCH_WRITE_move)
1306 return;
1307
1308 darray_init(&buckets);
1309 trans = bch2_trans_get(c);
1310 retry:
1311 bch2_trans_begin(trans);
1312
1313 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1314 if (unlikely(ret))
1315 goto err;
1316
1317 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1318 SPOS(op->pos.inode, op->pos.offset, snapshot),
1319 BTREE_ITER_slots);
1320 while (1) {
1321 struct bio *bio = &op->wbio.bio;
1322
1323 buckets.nr = 0;
1324
1325 ret = bch2_trans_relock(trans);
1326 if (ret)
1327 break;
1328
1329 k = bch2_btree_iter_peek_slot(trans, &iter);
1330 ret = bkey_err(k);
1331 if (ret)
1332 break;
1333
1334 /* fall back to normal cow write path? */
1335 if (unlikely(k.k->p.snapshot != snapshot ||
1336 !bch2_extent_is_writeable(op, k)))
1337 break;
1338
1339 if (bch2_keylist_realloc(&op->insert_keys,
1340 op->inline_keys,
1341 ARRAY_SIZE(op->inline_keys),
1342 k.k->u64s))
1343 break;
1344
1345 /* Get iorefs before dropping btree locks: */
1346 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1347 bkey_for_each_ptr(ptrs, ptr) {
1348 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
1349 if (unlikely(!ca))
1350 goto err_get_ioref;
1351
1352 struct bpos b = PTR_BUCKET_POS(ca, ptr);
1353 struct nocow_lock_bucket *l =
1354 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1355 prefetch(l);
1356
1357 /* XXX allocating memory with btree locks held - rare */
1358 darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1359 .b = b, .gen = ptr->gen, .l = l,
1360 }), GFP_KERNEL|__GFP_NOFAIL);
1361
1362 if (ptr->unwritten)
1363 op->flags |= BCH_WRITE_convert_unwritten;
1364 }
1365
1366 /* Unlock before taking nocow locks, doing IO: */
1367 bkey_reassemble(op->insert_keys.top, k);
1368 bch2_trans_unlock(trans);
1369
1370 bch2_cut_front(op->pos, op->insert_keys.top);
1371 if (op->flags & BCH_WRITE_convert_unwritten)
1372 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1373
1374 darray_for_each(buckets, i) {
1375 struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
1376
1377 __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1378 bucket_to_u64(i->b),
1379 BUCKET_NOCOW_LOCK_UPDATE);
1380
1381 int gen = bucket_gen_get(ca, i->b.offset);
1382 stale = gen < 0 ? gen : gen_after(gen, i->gen);
1383 if (unlikely(stale)) {
1384 stale_at = i;
1385 goto err_bucket_stale;
1386 }
1387 }
1388
1389 bio = &op->wbio.bio;
1390 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1391 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1392 GFP_KERNEL, &c->bio_write);
1393 wbio_init(bio)->put_bio = true;
1394 bio->bi_opf = op->wbio.bio.bi_opf;
1395 } else {
1396 op->flags |= BCH_WRITE_submitted;
1397 }
1398
1399 op->pos.offset += bio_sectors(bio);
1400 op->written += bio_sectors(bio);
1401
1402 bio->bi_end_io = bch2_write_endio;
1403 bio->bi_private = &op->cl;
1404 bio->bi_opf |= REQ_OP_WRITE;
1405 closure_get(&op->cl);
1406
1407 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1408 op->insert_keys.top, true);
1409
1410 bch2_keylist_push(&op->insert_keys);
1411 if (op->flags & BCH_WRITE_submitted)
1412 break;
1413 bch2_btree_iter_advance(trans, &iter);
1414 }
1415 out:
1416 bch2_trans_iter_exit(trans, &iter);
1417 err:
1418 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1419 goto retry;
1420
1421 bch2_trans_put(trans);
1422 darray_exit(&buckets);
1423
1424 if (ret) {
1425 bch2_write_op_error(op, op->pos.offset,
1426 "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
1427 op->error = ret;
1428 op->flags |= BCH_WRITE_submitted;
1429 }
1430
1431 /* fallback to cow write path? */
1432 if (!(op->flags & BCH_WRITE_submitted)) {
1433 closure_sync(&op->cl);
1434 __bch2_nocow_write_done(op);
1435 op->insert_keys.top = op->insert_keys.keys;
1436 } else if (op->flags & BCH_WRITE_sync) {
1437 closure_sync(&op->cl);
1438 bch2_nocow_write_done(&op->cl.work);
1439 } else {
1440 /*
1441 * XXX
1442 * needs to run out of process context because ei_quota_lock is
1443 * a mutex
1444 */
1445 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1446 }
1447 return;
1448 err_get_ioref:
1449 darray_for_each(buckets, i)
1450 percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
1451
1452 /* Fall back to COW path: */
1453 goto out;
1454 err_bucket_stale:
1455 darray_for_each(buckets, i) {
1456 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1457 if (i == stale_at)
1458 break;
1459 }
1460
1461 struct printbuf buf = PRINTBUF;
1462 if (bch2_fs_inconsistent_on(stale < 0, c,
1463 "pointer to invalid bucket in nocow path on device %llu\n %s",
1464 stale_at->b.inode,
1465 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1466 ret = -BCH_ERR_data_write_invalid_ptr;
1467 } else {
1468 /* We can retry this: */
1469 ret = -BCH_ERR_transaction_restart;
1470 }
1471 printbuf_exit(&buf);
1472
1473 goto err_get_ioref;
1474 }
1475
__bch2_write(struct bch_write_op * op)1476 static void __bch2_write(struct bch_write_op *op)
1477 {
1478 struct bch_fs *c = op->c;
1479 struct write_point *wp = NULL;
1480 struct bio *bio = NULL;
1481 unsigned nofs_flags;
1482 int ret;
1483
1484 nofs_flags = memalloc_nofs_save();
1485
1486 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1487 bch2_nocow_write(op);
1488 if (op->flags & BCH_WRITE_submitted)
1489 goto out_nofs_restore;
1490 }
1491 again:
1492 memset(&op->failed, 0, sizeof(op->failed));
1493
1494 do {
1495 struct bkey_i *key_to_write;
1496 unsigned key_to_write_offset = op->insert_keys.top_p -
1497 op->insert_keys.keys_p;
1498
1499 /* +1 for possible cache device: */
1500 if (op->open_buckets.nr + op->nr_replicas + 1 >
1501 ARRAY_SIZE(op->open_buckets.v))
1502 break;
1503
1504 if (bch2_keylist_realloc(&op->insert_keys,
1505 op->inline_keys,
1506 ARRAY_SIZE(op->inline_keys),
1507 BKEY_EXTENT_U64s_MAX))
1508 break;
1509
1510 /*
1511 * The copygc thread is now global, which means it's no longer
1512 * freeing up space on specific disks, which means that
1513 * allocations for specific disks may hang arbitrarily long:
1514 */
1515 ret = bch2_trans_run(c, lockrestart_do(trans,
1516 bch2_alloc_sectors_start_trans(trans,
1517 op->target,
1518 op->opts.erasure_code && !(op->flags & BCH_WRITE_cached),
1519 op->write_point,
1520 &op->devs_have,
1521 op->nr_replicas,
1522 op->nr_replicas_required,
1523 op->watermark,
1524 op->flags,
1525 &op->cl, &wp)));
1526 if (unlikely(ret)) {
1527 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1528 break;
1529
1530 goto err;
1531 }
1532
1533 EBUG_ON(!wp);
1534
1535 bch2_open_bucket_get(c, wp, &op->open_buckets);
1536 ret = bch2_write_extent(op, wp, &bio);
1537
1538 bch2_alloc_sectors_done_inlined(c, wp);
1539 err:
1540 if (ret <= 0) {
1541 op->flags |= BCH_WRITE_submitted;
1542
1543 if (unlikely(ret < 0)) {
1544 if (!(op->flags & BCH_WRITE_alloc_nowait))
1545 bch2_write_op_error(op, op->pos.offset,
1546 "%s(): %s", __func__, bch2_err_str(ret));
1547 op->error = ret;
1548 break;
1549 }
1550 }
1551
1552 bio->bi_end_io = bch2_write_endio;
1553 bio->bi_private = &op->cl;
1554 bio->bi_opf |= REQ_OP_WRITE;
1555
1556 closure_get(bio->bi_private);
1557
1558 key_to_write = (void *) (op->insert_keys.keys_p +
1559 key_to_write_offset);
1560
1561 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1562 key_to_write, false);
1563 } while (ret);
1564
1565 /*
1566 * Sync or no?
1567 *
1568 * If we're running asynchronously, wne may still want to block
1569 * synchronously here if we weren't able to submit all of the IO at
1570 * once, as that signals backpressure to the caller.
1571 */
1572 if ((op->flags & BCH_WRITE_sync) ||
1573 (!(op->flags & BCH_WRITE_submitted) &&
1574 !(op->flags & BCH_WRITE_in_worker))) {
1575 bch2_wait_on_allocator(c, &op->cl);
1576
1577 __bch2_write_index(op);
1578
1579 if (!(op->flags & BCH_WRITE_submitted))
1580 goto again;
1581 bch2_write_done(&op->cl);
1582 } else {
1583 bch2_write_queue(op, wp);
1584 continue_at(&op->cl, bch2_write_index, NULL);
1585 }
1586 out_nofs_restore:
1587 memalloc_nofs_restore(nofs_flags);
1588 }
1589
bch2_write_data_inline(struct bch_write_op * op,unsigned data_len)1590 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1591 {
1592 struct bio *bio = &op->wbio.bio;
1593 struct bvec_iter iter;
1594 struct bkey_i_inline_data *id;
1595 unsigned sectors;
1596 int ret;
1597
1598 memset(&op->failed, 0, sizeof(op->failed));
1599
1600 op->flags |= BCH_WRITE_wrote_data_inline;
1601 op->flags |= BCH_WRITE_submitted;
1602
1603 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1604
1605 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1606 ARRAY_SIZE(op->inline_keys),
1607 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1608 if (ret) {
1609 op->error = ret;
1610 goto err;
1611 }
1612
1613 sectors = bio_sectors(bio);
1614 op->pos.offset += sectors;
1615
1616 id = bkey_inline_data_init(op->insert_keys.top);
1617 id->k.p = op->pos;
1618 id->k.bversion = op->version;
1619 id->k.size = sectors;
1620
1621 iter = bio->bi_iter;
1622 iter.bi_size = data_len;
1623 memcpy_from_bio(id->v.data, bio, iter);
1624
1625 while (data_len & 7)
1626 id->v.data[data_len++] = '\0';
1627 set_bkey_val_bytes(&id->k, data_len);
1628 bch2_keylist_push(&op->insert_keys);
1629
1630 __bch2_write_index(op);
1631 err:
1632 bch2_write_done(&op->cl);
1633 }
1634
1635 /**
1636 * bch2_write() - handle a write to a cache device or flash only volume
1637 * @cl: &bch_write_op->cl
1638 *
1639 * This is the starting point for any data to end up in a cache device; it could
1640 * be from a normal write, or a writeback write, or a write to a flash only
1641 * volume - it's also used by the moving garbage collector to compact data in
1642 * mostly empty buckets.
1643 *
1644 * It first writes the data to the cache, creating a list of keys to be inserted
1645 * (if the data won't fit in a single open bucket, there will be multiple keys);
1646 * after the data is written it calls bch_journal, and after the keys have been
1647 * added to the next journal write they're inserted into the btree.
1648 *
1649 * If op->discard is true, instead of inserting the data it invalidates the
1650 * region of the cache represented by op->bio and op->inode.
1651 */
CLOSURE_CALLBACK(bch2_write)1652 CLOSURE_CALLBACK(bch2_write)
1653 {
1654 closure_type(op, struct bch_write_op, cl);
1655 struct bio *bio = &op->wbio.bio;
1656 struct bch_fs *c = op->c;
1657 unsigned data_len;
1658
1659 EBUG_ON(op->cl.parent);
1660 BUG_ON(!op->nr_replicas);
1661 BUG_ON(!op->write_point.v);
1662 BUG_ON(bkey_eq(op->pos, POS_MAX));
1663
1664 if (op->flags & BCH_WRITE_only_specified_devs)
1665 op->flags |= BCH_WRITE_alloc_nowait;
1666
1667 op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
1668 op->start_time = local_clock();
1669 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1670 wbio_init(bio)->put_bio = false;
1671
1672 if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
1673 bch2_write_op_error(op, op->pos.offset, "misaligned write");
1674 op->error = -BCH_ERR_data_write_misaligned;
1675 goto err;
1676 }
1677
1678 if (c->opts.nochanges) {
1679 op->error = -BCH_ERR_erofs_no_writes;
1680 goto err;
1681 }
1682
1683 if (!(op->flags & BCH_WRITE_move) &&
1684 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1685 op->error = -BCH_ERR_erofs_no_writes;
1686 goto err;
1687 }
1688
1689 if (!(op->flags & BCH_WRITE_move))
1690 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1691 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1692
1693 data_len = min_t(u64, bio->bi_iter.bi_size,
1694 op->new_i_size - (op->pos.offset << 9));
1695
1696 if (c->opts.inline_data &&
1697 data_len <= min(block_bytes(c) / 2, 1024U)) {
1698 bch2_write_data_inline(op, data_len);
1699 return;
1700 }
1701
1702 __bch2_write(op);
1703 return;
1704 err:
1705 bch2_disk_reservation_put(c, &op->res);
1706
1707 closure_debug_destroy(&op->cl);
1708 if (op->end_io)
1709 op->end_io(op);
1710 }
1711
1712 static const char * const bch2_write_flags[] = {
1713 #define x(f) #f,
1714 BCH_WRITE_FLAGS()
1715 #undef x
1716 NULL
1717 };
1718
bch2_write_op_to_text(struct printbuf * out,struct bch_write_op * op)1719 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1720 {
1721 if (!out->nr_tabstops)
1722 printbuf_tabstop_push(out, 32);
1723
1724 prt_printf(out, "pos:\t");
1725 bch2_bpos_to_text(out, op->pos);
1726 prt_newline(out);
1727 printbuf_indent_add(out, 2);
1728
1729 prt_printf(out, "started:\t");
1730 bch2_pr_time_units(out, local_clock() - op->start_time);
1731 prt_newline(out);
1732
1733 prt_printf(out, "flags:\t");
1734 prt_bitflags(out, bch2_write_flags, op->flags);
1735 prt_newline(out);
1736
1737 prt_printf(out, "nr_replicas:\t%u\n", op->nr_replicas);
1738 prt_printf(out, "nr_replicas_required:\t%u\n", op->nr_replicas_required);
1739
1740 prt_printf(out, "ref:\t%u\n", closure_nr_remaining(&op->cl));
1741
1742 printbuf_indent_sub(out, 2);
1743 }
1744
bch2_fs_io_write_exit(struct bch_fs * c)1745 void bch2_fs_io_write_exit(struct bch_fs *c)
1746 {
1747 mempool_exit(&c->bio_bounce_pages);
1748 bioset_exit(&c->replica_set);
1749 bioset_exit(&c->bio_write);
1750 }
1751
bch2_fs_io_write_init(struct bch_fs * c)1752 int bch2_fs_io_write_init(struct bch_fs *c)
1753 {
1754 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
1755 bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
1756 return -BCH_ERR_ENOMEM_bio_write_init;
1757
1758 if (mempool_init_page_pool(&c->bio_bounce_pages,
1759 max_t(unsigned,
1760 c->opts.btree_node_size,
1761 c->opts.encoded_extent_max) /
1762 PAGE_SIZE, 0))
1763 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
1764
1765 return 0;
1766 }
1767