1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "extent-tree.h"
13 #include "transaction.h"
14 #include "qgroup.h"
15 #include "space-info.h"
16 #include "tree-mod-log.h"
17 #include "fs.h"
18
19 struct kmem_cache *btrfs_delayed_ref_head_cachep;
20 struct kmem_cache *btrfs_delayed_ref_node_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
29 */
30
btrfs_check_space_for_delayed_refs(struct btrfs_fs_info * fs_info)31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 bool ret = false;
36 u64 reserved;
37
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
41
42 /*
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
46 * bailing.
47 */
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
51 ret = true;
52 spin_unlock(&delayed_refs_rsv->lock);
53 return ret;
54 }
55
56 /*
57 * Release a ref head's reservation.
58 *
59 * @fs_info: the filesystem
60 * @nr_refs: number of delayed refs to drop
61 * @nr_csums: number of csum items to drop
62 *
63 * Drops the delayed ref head's count from the delayed refs rsv and free any
64 * excess reservation we had.
65 */
btrfs_delayed_refs_rsv_release(struct btrfs_fs_info * fs_info,int nr_refs,int nr_csums)66 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67 {
68 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 u64 num_bytes;
70 u64 released;
71
72 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74
75 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 if (released)
77 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 0, released, 0);
79 }
80
81 /*
82 * Adjust the size of the delayed refs rsv.
83 *
84 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86 * add it to the delayed_refs_rsv.
87 */
btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle * trans)88 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89 {
90 struct btrfs_fs_info *fs_info = trans->fs_info;
91 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 u64 num_bytes;
94 u64 reserved_bytes;
95
96 if (btrfs_is_testing(fs_info))
97 return;
98
99 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
100 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
101 trans->delayed_ref_csum_deletions);
102
103 if (num_bytes == 0)
104 return;
105
106 /*
107 * Try to take num_bytes from the transaction's local delayed reserve.
108 * If not possible, try to take as much as it's available. If the local
109 * reserve doesn't have enough reserved space, the delayed refs reserve
110 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
111 * by someone or if a transaction commit is triggered before that, the
112 * global block reserve will be used. We want to minimize using the
113 * global block reserve for cases we can account for in advance, to
114 * avoid exhausting it and reach -ENOSPC during a transaction commit.
115 */
116 spin_lock(&local_rsv->lock);
117 reserved_bytes = min(num_bytes, local_rsv->reserved);
118 local_rsv->reserved -= reserved_bytes;
119 local_rsv->full = (local_rsv->reserved >= local_rsv->size);
120 spin_unlock(&local_rsv->lock);
121
122 spin_lock(&delayed_rsv->lock);
123 delayed_rsv->size += num_bytes;
124 delayed_rsv->reserved += reserved_bytes;
125 delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
126 spin_unlock(&delayed_rsv->lock);
127 trans->delayed_ref_updates = 0;
128 trans->delayed_ref_csum_deletions = 0;
129 }
130
131 /*
132 * Adjust the size of the delayed refs block reserve for 1 block group item
133 * insertion, used after allocating a block group.
134 */
btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)135 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
136 {
137 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
138
139 spin_lock(&delayed_rsv->lock);
140 /*
141 * Inserting a block group item does not require changing the free space
142 * tree, only the extent tree or the block group tree, so this is all we
143 * need.
144 */
145 delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
146 delayed_rsv->full = false;
147 spin_unlock(&delayed_rsv->lock);
148 }
149
150 /*
151 * Adjust the size of the delayed refs block reserve to release space for 1
152 * block group item insertion.
153 */
btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)154 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
155 {
156 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
157 const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
158 u64 released;
159
160 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
161 if (released > 0)
162 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
163 0, released, 0);
164 }
165
166 /*
167 * Adjust the size of the delayed refs block reserve for 1 block group item
168 * update.
169 */
btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)170 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
171 {
172 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
173
174 spin_lock(&delayed_rsv->lock);
175 /*
176 * Updating a block group item does not result in new nodes/leaves and
177 * does not require changing the free space tree, only the extent tree
178 * or the block group tree, so this is all we need.
179 */
180 delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
181 delayed_rsv->full = false;
182 spin_unlock(&delayed_rsv->lock);
183 }
184
185 /*
186 * Adjust the size of the delayed refs block reserve to release space for 1
187 * block group item update.
188 */
btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)189 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
190 {
191 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
192 const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
193 u64 released;
194
195 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
196 if (released > 0)
197 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
198 0, released, 0);
199 }
200
201 /*
202 * Refill based on our delayed refs usage.
203 *
204 * @fs_info: the filesystem
205 * @flush: control how we can flush for this reservation.
206 *
207 * This will refill the delayed block_rsv up to 1 items size worth of space and
208 * will return -ENOSPC if we can't make the reservation.
209 */
btrfs_zoned_cap_metadata_reservation(struct btrfs_space_info * space_info)210 static int btrfs_zoned_cap_metadata_reservation(struct btrfs_space_info *space_info)
211 {
212 struct btrfs_fs_info *fs_info = space_info->fs_info;
213 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
214 u64 usable;
215 u64 cap;
216 int ret = 0;
217
218 if (!btrfs_is_zoned(fs_info))
219 return 0;
220
221 spin_lock(&space_info->lock);
222 usable = space_info->total_bytes - space_info->bytes_zone_unusable;
223 spin_unlock(&space_info->lock);
224 cap = usable >> 1;
225
226 spin_lock(&block_rsv->lock);
227 if (block_rsv->size > cap)
228 ret = -EAGAIN;
229 spin_unlock(&block_rsv->lock);
230
231 return ret;
232 }
233
btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush)234 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
235 enum btrfs_reserve_flush_enum flush)
236 {
237 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
238 struct btrfs_space_info *space_info = block_rsv->space_info;
239 u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
240 u64 num_bytes = 0;
241 u64 refilled_bytes;
242 u64 to_free;
243 int ret = -ENOSPC;
244
245 spin_lock(&block_rsv->lock);
246 if (block_rsv->reserved < block_rsv->size) {
247 num_bytes = block_rsv->size - block_rsv->reserved;
248 num_bytes = min(num_bytes, limit);
249 }
250 spin_unlock(&block_rsv->lock);
251
252 if (!num_bytes)
253 return 0;
254
255 ret = btrfs_zoned_cap_metadata_reservation(space_info);
256 if (ret)
257 return ret;
258
259 ret = btrfs_reserve_metadata_bytes(space_info, num_bytes, flush);
260 if (ret)
261 return ret;
262
263 /*
264 * We may have raced with someone else, so check again if we the block
265 * reserve is still not full and release any excess space.
266 */
267 spin_lock(&block_rsv->lock);
268 if (block_rsv->reserved < block_rsv->size) {
269 u64 needed = block_rsv->size - block_rsv->reserved;
270
271 if (num_bytes >= needed) {
272 block_rsv->reserved += needed;
273 block_rsv->full = true;
274 to_free = num_bytes - needed;
275 refilled_bytes = needed;
276 } else {
277 block_rsv->reserved += num_bytes;
278 to_free = 0;
279 refilled_bytes = num_bytes;
280 }
281 } else {
282 to_free = num_bytes;
283 refilled_bytes = 0;
284 }
285 spin_unlock(&block_rsv->lock);
286
287 if (to_free > 0)
288 btrfs_space_info_free_bytes_may_use(space_info, to_free);
289
290 if (refilled_bytes > 0)
291 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
292 refilled_bytes, 1);
293 return 0;
294 }
295
296 /*
297 * compare two delayed data backrefs with same bytenr and type
298 */
comp_data_refs(const struct btrfs_delayed_ref_node * ref1,const struct btrfs_delayed_ref_node * ref2)299 static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
300 const struct btrfs_delayed_ref_node *ref2)
301 {
302 if (ref1->data_ref.objectid < ref2->data_ref.objectid)
303 return -1;
304 if (ref1->data_ref.objectid > ref2->data_ref.objectid)
305 return 1;
306 if (ref1->data_ref.offset < ref2->data_ref.offset)
307 return -1;
308 if (ref1->data_ref.offset > ref2->data_ref.offset)
309 return 1;
310 return 0;
311 }
312
comp_refs(const struct btrfs_delayed_ref_node * ref1,const struct btrfs_delayed_ref_node * ref2,bool check_seq)313 static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
314 const struct btrfs_delayed_ref_node *ref2,
315 bool check_seq)
316 {
317 int ret = 0;
318
319 if (ref1->type < ref2->type)
320 return -1;
321 if (ref1->type > ref2->type)
322 return 1;
323 if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
324 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
325 if (ref1->parent < ref2->parent)
326 return -1;
327 if (ref1->parent > ref2->parent)
328 return 1;
329 } else {
330 if (ref1->ref_root < ref2->ref_root)
331 return -1;
332 if (ref1->ref_root > ref2->ref_root)
333 return 1;
334 if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
335 ret = comp_data_refs(ref1, ref2);
336 }
337 if (ret)
338 return ret;
339 if (check_seq) {
340 if (ref1->seq < ref2->seq)
341 return -1;
342 if (ref1->seq > ref2->seq)
343 return 1;
344 }
345 return 0;
346 }
347
cmp_refs_node(const struct rb_node * new,const struct rb_node * exist)348 static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
349 {
350 const struct btrfs_delayed_ref_node *new_node =
351 rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
352 const struct btrfs_delayed_ref_node *exist_node =
353 rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
354
355 return comp_refs(new_node, exist_node, true);
356 }
357
tree_insert(struct rb_root_cached * root,struct btrfs_delayed_ref_node * ins)358 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
359 struct btrfs_delayed_ref_node *ins)
360 {
361 struct rb_node *node = &ins->ref_node;
362 struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
363
364 return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
365 }
366
find_first_ref_head(struct btrfs_delayed_ref_root * dr)367 static struct btrfs_delayed_ref_head *find_first_ref_head(
368 struct btrfs_delayed_ref_root *dr)
369 {
370 unsigned long from = 0;
371
372 lockdep_assert_held(&dr->lock);
373
374 return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
375 }
376
btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)377 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
378 struct btrfs_delayed_ref_head *head)
379 {
380 lockdep_assert_held(&delayed_refs->lock);
381 if (mutex_trylock(&head->mutex))
382 return true;
383
384 refcount_inc(&head->refs);
385 spin_unlock(&delayed_refs->lock);
386
387 mutex_lock(&head->mutex);
388 spin_lock(&delayed_refs->lock);
389 if (!head->tracked) {
390 mutex_unlock(&head->mutex);
391 btrfs_put_delayed_ref_head(head);
392 return false;
393 }
394 btrfs_put_delayed_ref_head(head);
395 return true;
396 }
397
drop_delayed_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)398 static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
399 struct btrfs_delayed_ref_root *delayed_refs,
400 struct btrfs_delayed_ref_head *head,
401 struct btrfs_delayed_ref_node *ref)
402 {
403 lockdep_assert_held(&head->lock);
404 rb_erase_cached(&ref->ref_node, &head->ref_tree);
405 RB_CLEAR_NODE(&ref->ref_node);
406 if (!list_empty(&ref->add_list))
407 list_del(&ref->add_list);
408 btrfs_put_delayed_ref(ref);
409 btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
410 }
411
merge_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)412 static bool merge_ref(struct btrfs_fs_info *fs_info,
413 struct btrfs_delayed_ref_root *delayed_refs,
414 struct btrfs_delayed_ref_head *head,
415 struct btrfs_delayed_ref_node *ref,
416 u64 seq)
417 {
418 struct btrfs_delayed_ref_node *next;
419 struct rb_node *node = rb_next(&ref->ref_node);
420 bool done = false;
421
422 while (!done && node) {
423 int mod;
424
425 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
426 node = rb_next(node);
427 if (seq && next->seq >= seq)
428 break;
429 if (comp_refs(ref, next, false))
430 break;
431
432 if (ref->action == next->action) {
433 mod = next->ref_mod;
434 } else {
435 if (ref->ref_mod < next->ref_mod) {
436 swap(ref, next);
437 done = true;
438 }
439 mod = -next->ref_mod;
440 }
441
442 drop_delayed_ref(fs_info, delayed_refs, head, next);
443 ref->ref_mod += mod;
444 if (ref->ref_mod == 0) {
445 drop_delayed_ref(fs_info, delayed_refs, head, ref);
446 done = true;
447 } else {
448 /*
449 * Can't have multiples of the same ref on a tree block.
450 */
451 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
452 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
453 }
454 }
455
456 return done;
457 }
458
btrfs_merge_delayed_refs(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)459 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
460 struct btrfs_delayed_ref_root *delayed_refs,
461 struct btrfs_delayed_ref_head *head)
462 {
463 struct btrfs_delayed_ref_node *ref;
464 struct rb_node *node;
465 u64 seq = 0;
466
467 lockdep_assert_held(&head->lock);
468
469 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
470 return;
471
472 /* We don't have too many refs to merge for data. */
473 if (head->is_data)
474 return;
475
476 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
477 again:
478 for (node = rb_first_cached(&head->ref_tree); node;
479 node = rb_next(node)) {
480 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
481 if (seq && ref->seq >= seq)
482 continue;
483 if (merge_ref(fs_info, delayed_refs, head, ref, seq))
484 goto again;
485 }
486 }
487
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,u64 seq)488 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
489 {
490 int ret = 0;
491 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
492
493 if (min_seq != 0 && seq >= min_seq) {
494 btrfs_debug(fs_info,
495 "holding back delayed_ref %llu, lowest is %llu",
496 seq, min_seq);
497 ret = 1;
498 }
499
500 return ret;
501 }
502
btrfs_select_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs)503 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
504 const struct btrfs_fs_info *fs_info,
505 struct btrfs_delayed_ref_root *delayed_refs)
506 {
507 struct btrfs_delayed_ref_head *head;
508 unsigned long start_index;
509 unsigned long found_index;
510 bool found_head = false;
511 bool locked;
512
513 spin_lock(&delayed_refs->lock);
514 again:
515 start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
516 xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
517 if (!head->processing) {
518 found_head = true;
519 break;
520 }
521 }
522 if (!found_head) {
523 if (delayed_refs->run_delayed_start == 0) {
524 spin_unlock(&delayed_refs->lock);
525 return NULL;
526 }
527 delayed_refs->run_delayed_start = 0;
528 goto again;
529 }
530
531 head->processing = true;
532 WARN_ON(delayed_refs->num_heads_ready == 0);
533 delayed_refs->num_heads_ready--;
534 delayed_refs->run_delayed_start = head->bytenr +
535 head->num_bytes;
536
537 locked = btrfs_delayed_ref_lock(delayed_refs, head);
538 spin_unlock(&delayed_refs->lock);
539
540 /*
541 * We may have dropped the spin lock to get the head mutex lock, and
542 * that might have given someone else time to free the head. If that's
543 * true, it has been removed from our list and we can move on.
544 */
545 if (!locked)
546 return ERR_PTR(-EAGAIN);
547
548 return head;
549 }
550
btrfs_unselect_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)551 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
552 struct btrfs_delayed_ref_head *head)
553 {
554 spin_lock(&delayed_refs->lock);
555 head->processing = false;
556 delayed_refs->num_heads_ready++;
557 spin_unlock(&delayed_refs->lock);
558 btrfs_delayed_ref_unlock(head);
559 }
560
btrfs_delete_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)561 void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
562 struct btrfs_delayed_ref_root *delayed_refs,
563 struct btrfs_delayed_ref_head *head)
564 {
565 const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
566
567 lockdep_assert_held(&delayed_refs->lock);
568 lockdep_assert_held(&head->lock);
569
570 xa_erase(&delayed_refs->head_refs, index);
571 head->tracked = false;
572 delayed_refs->num_heads--;
573 if (!head->processing)
574 delayed_refs->num_heads_ready--;
575 }
576
btrfs_select_delayed_ref(struct btrfs_delayed_ref_head * head)577 struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
578 {
579 struct btrfs_delayed_ref_node *ref;
580
581 lockdep_assert_held(&head->mutex);
582 lockdep_assert_held(&head->lock);
583
584 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
585 return NULL;
586
587 /*
588 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
589 * This is to prevent a ref count from going down to zero, which deletes
590 * the extent item from the extent tree, when there still are references
591 * to add, which would fail because they would not find the extent item.
592 */
593 if (!list_empty(&head->ref_add_list))
594 return list_first_entry(&head->ref_add_list,
595 struct btrfs_delayed_ref_node, add_list);
596
597 ref = rb_entry(rb_first_cached(&head->ref_tree),
598 struct btrfs_delayed_ref_node, ref_node);
599 ASSERT(list_empty(&ref->add_list));
600 return ref;
601 }
602
603 /*
604 * Helper to insert the ref_node to the tail or merge with tail.
605 *
606 * Return false if the ref was inserted.
607 * Return true if the ref was merged into an existing one (and therefore can be
608 * freed by the caller).
609 */
insert_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)610 static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
611 struct btrfs_delayed_ref_head *href,
612 struct btrfs_delayed_ref_node *ref)
613 {
614 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
615 struct btrfs_delayed_ref_node *exist;
616 int mod;
617
618 spin_lock(&href->lock);
619 exist = tree_insert(&href->ref_tree, ref);
620 if (!exist) {
621 if (ref->action == BTRFS_ADD_DELAYED_REF)
622 list_add_tail(&ref->add_list, &href->ref_add_list);
623 spin_unlock(&href->lock);
624 trans->delayed_ref_updates++;
625 return false;
626 }
627
628 /* Now we are sure we can merge */
629 if (exist->action == ref->action) {
630 mod = ref->ref_mod;
631 } else {
632 /* Need to change action */
633 if (exist->ref_mod < ref->ref_mod) {
634 exist->action = ref->action;
635 mod = -exist->ref_mod;
636 exist->ref_mod = ref->ref_mod;
637 if (ref->action == BTRFS_ADD_DELAYED_REF)
638 list_add_tail(&exist->add_list,
639 &href->ref_add_list);
640 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
641 ASSERT(!list_empty(&exist->add_list));
642 list_del_init(&exist->add_list);
643 } else {
644 ASSERT(0);
645 }
646 } else
647 mod = -ref->ref_mod;
648 }
649 exist->ref_mod += mod;
650
651 /* remove existing tail if its ref_mod is zero */
652 if (exist->ref_mod == 0)
653 drop_delayed_ref(trans->fs_info, root, href, exist);
654 spin_unlock(&href->lock);
655 return true;
656 }
657
658 /*
659 * helper function to update the accounting in the head ref
660 * existing and update must have the same bytenr
661 */
update_existing_head_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * existing,struct btrfs_delayed_ref_head * update)662 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
663 struct btrfs_delayed_ref_head *existing,
664 struct btrfs_delayed_ref_head *update)
665 {
666 struct btrfs_delayed_ref_root *delayed_refs =
667 &trans->transaction->delayed_refs;
668 struct btrfs_fs_info *fs_info = trans->fs_info;
669 int old_ref_mod;
670
671 BUG_ON(existing->is_data != update->is_data);
672
673 spin_lock(&existing->lock);
674
675 /*
676 * When freeing an extent, we may not know the owning root when we
677 * first create the head_ref. However, some deref before the last deref
678 * will know it, so we just need to update the head_ref accordingly.
679 */
680 if (!existing->owning_root)
681 existing->owning_root = update->owning_root;
682
683 if (update->must_insert_reserved) {
684 /* if the extent was freed and then
685 * reallocated before the delayed ref
686 * entries were processed, we can end up
687 * with an existing head ref without
688 * the must_insert_reserved flag set.
689 * Set it again here
690 */
691 existing->must_insert_reserved = update->must_insert_reserved;
692 existing->owning_root = update->owning_root;
693
694 /*
695 * update the num_bytes so we make sure the accounting
696 * is done correctly
697 */
698 existing->num_bytes = update->num_bytes;
699
700 }
701
702 if (update->extent_op) {
703 if (!existing->extent_op) {
704 existing->extent_op = update->extent_op;
705 } else {
706 if (update->extent_op->update_key) {
707 memcpy(&existing->extent_op->key,
708 &update->extent_op->key,
709 sizeof(update->extent_op->key));
710 existing->extent_op->update_key = true;
711 }
712 if (update->extent_op->update_flags) {
713 existing->extent_op->flags_to_set |=
714 update->extent_op->flags_to_set;
715 existing->extent_op->update_flags = true;
716 }
717 btrfs_free_delayed_extent_op(update->extent_op);
718 }
719 }
720 /*
721 * update the reference mod on the head to reflect this new operation,
722 * only need the lock for this case cause we could be processing it
723 * currently, for refs we just added we know we're a-ok.
724 */
725 old_ref_mod = existing->total_ref_mod;
726 existing->ref_mod += update->ref_mod;
727 existing->total_ref_mod += update->ref_mod;
728
729 /*
730 * If we are going to from a positive ref mod to a negative or vice
731 * versa we need to make sure to adjust pending_csums accordingly.
732 * We reserve bytes for csum deletion when adding or updating a ref head
733 * see add_delayed_ref_head() for more details.
734 */
735 if (existing->is_data) {
736 u64 csum_leaves =
737 btrfs_csum_bytes_to_leaves(fs_info,
738 existing->num_bytes);
739
740 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
741 delayed_refs->pending_csums -= existing->num_bytes;
742 btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
743 }
744 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
745 delayed_refs->pending_csums += existing->num_bytes;
746 trans->delayed_ref_csum_deletions += csum_leaves;
747 }
748 }
749
750 spin_unlock(&existing->lock);
751 }
752
init_delayed_ref_head(struct btrfs_delayed_ref_head * head_ref,struct btrfs_ref * generic_ref,struct btrfs_qgroup_extent_record * qrecord,u64 reserved)753 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
754 struct btrfs_ref *generic_ref,
755 struct btrfs_qgroup_extent_record *qrecord,
756 u64 reserved)
757 {
758 int count_mod = 1;
759 bool must_insert_reserved = false;
760
761 /* If reserved is provided, it must be a data extent. */
762 BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
763
764 switch (generic_ref->action) {
765 case BTRFS_ADD_DELAYED_REF:
766 /* count_mod is already set to 1. */
767 break;
768 case BTRFS_UPDATE_DELAYED_HEAD:
769 count_mod = 0;
770 break;
771 case BTRFS_DROP_DELAYED_REF:
772 /*
773 * The head node stores the sum of all the mods, so dropping a ref
774 * should drop the sum in the head node by one.
775 */
776 count_mod = -1;
777 break;
778 case BTRFS_ADD_DELAYED_EXTENT:
779 /*
780 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
781 * reserved accounting when the extent is finally added, or if a
782 * later modification deletes the delayed ref without ever
783 * inserting the extent into the extent allocation tree.
784 * ref->must_insert_reserved is the flag used to record that
785 * accounting mods are required.
786 *
787 * Once we record must_insert_reserved, switch the action to
788 * BTRFS_ADD_DELAYED_REF because other special casing is not
789 * required.
790 */
791 must_insert_reserved = true;
792 break;
793 }
794
795 refcount_set(&head_ref->refs, 1);
796 head_ref->bytenr = generic_ref->bytenr;
797 head_ref->num_bytes = generic_ref->num_bytes;
798 head_ref->ref_mod = count_mod;
799 head_ref->reserved_bytes = reserved;
800 head_ref->must_insert_reserved = must_insert_reserved;
801 head_ref->owning_root = generic_ref->owning_root;
802 head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
803 head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
804 head_ref->ref_tree = RB_ROOT_CACHED;
805 INIT_LIST_HEAD(&head_ref->ref_add_list);
806 head_ref->tracked = false;
807 head_ref->processing = false;
808 head_ref->total_ref_mod = count_mod;
809 spin_lock_init(&head_ref->lock);
810 mutex_init(&head_ref->mutex);
811
812 /* If not metadata set an impossible level to help debugging. */
813 if (generic_ref->type == BTRFS_REF_METADATA)
814 head_ref->level = generic_ref->tree_ref.level;
815 else
816 head_ref->level = U8_MAX;
817
818 if (qrecord) {
819 if (generic_ref->ref_root && reserved) {
820 qrecord->data_rsv = reserved;
821 qrecord->data_rsv_refroot = generic_ref->ref_root;
822 }
823 qrecord->num_bytes = generic_ref->num_bytes;
824 qrecord->old_roots = NULL;
825 }
826 }
827
828 /*
829 * Helper function to actually insert a head node into the xarray. This does all
830 * the dirty work in terms of maintaining the correct overall modification
831 * count.
832 *
833 * The caller is responsible for calling kfree() on @qrecord. More specifically,
834 * if this function reports that it did not insert it as noted in
835 * @qrecord_inserted_ret, then it's safe to call kfree() on it.
836 *
837 * Returns an error pointer in case of an error.
838 */
839 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,int action,bool * qrecord_inserted_ret)840 add_delayed_ref_head(struct btrfs_trans_handle *trans,
841 struct btrfs_delayed_ref_head *head_ref,
842 struct btrfs_qgroup_extent_record *qrecord,
843 int action, bool *qrecord_inserted_ret)
844 {
845 struct btrfs_fs_info *fs_info = trans->fs_info;
846 struct btrfs_delayed_ref_head *existing;
847 struct btrfs_delayed_ref_root *delayed_refs;
848 const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
849
850 /*
851 * If 'qrecord_inserted_ret' is provided, then the first thing we need
852 * to do is to initialize it to false just in case we have an exit
853 * before trying to insert the record.
854 */
855 if (qrecord_inserted_ret)
856 *qrecord_inserted_ret = false;
857
858 delayed_refs = &trans->transaction->delayed_refs;
859 lockdep_assert_held(&delayed_refs->lock);
860
861 #if BITS_PER_LONG == 32
862 if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
863 if (qrecord)
864 xa_release(&delayed_refs->dirty_extents, index);
865 btrfs_err_rl(fs_info,
866 "delayed ref head %llu is beyond 32bit page cache and xarray index limit",
867 head_ref->bytenr);
868 btrfs_err_32bit_limit(fs_info);
869 return ERR_PTR(-EOVERFLOW);
870 }
871 #endif
872
873 /* Record qgroup extent info if provided */
874 if (qrecord) {
875 /*
876 * Setting 'qrecord' but not 'qrecord_inserted_ret' will likely
877 * result in a memory leakage.
878 */
879 ASSERT(qrecord_inserted_ret != NULL);
880
881 int ret;
882
883 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
884 head_ref->bytenr);
885 if (ret) {
886 /* Clean up if insertion fails or item exists. */
887 xa_release(&delayed_refs->dirty_extents, index);
888 if (ret < 0)
889 return ERR_PTR(ret);
890 } else if (qrecord_inserted_ret) {
891 *qrecord_inserted_ret = true;
892 }
893 }
894
895 trace_add_delayed_ref_head(fs_info, head_ref, action);
896
897 existing = xa_load(&delayed_refs->head_refs, index);
898 if (existing) {
899 update_existing_head_ref(trans, existing, head_ref);
900 /*
901 * we've updated the existing ref, free the newly
902 * allocated ref
903 */
904 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
905 head_ref = existing;
906 } else {
907 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
908 if (xa_is_err(existing)) {
909 /* Memory was preallocated by the caller. */
910 ASSERT(xa_err(existing) != -ENOMEM);
911 return ERR_PTR(xa_err(existing));
912 } else if (WARN_ON(existing)) {
913 /*
914 * Shouldn't happen we just did a lookup before under
915 * delayed_refs->lock.
916 */
917 return ERR_PTR(-EEXIST);
918 }
919 head_ref->tracked = true;
920 /*
921 * We reserve the amount of bytes needed to delete csums when
922 * adding the ref head and not when adding individual drop refs
923 * since the csum items are deleted only after running the last
924 * delayed drop ref (the data extent's ref count drops to 0).
925 */
926 if (head_ref->is_data && head_ref->ref_mod < 0) {
927 delayed_refs->pending_csums += head_ref->num_bytes;
928 trans->delayed_ref_csum_deletions +=
929 btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
930 }
931 delayed_refs->num_heads++;
932 delayed_refs->num_heads_ready++;
933 }
934
935 return head_ref;
936 }
937
938 /*
939 * Initialize the structure which represents a modification to an extent.
940 *
941 * @fs_info: Internal to the mounted filesystem mount structure.
942 *
943 * @ref: The structure which is going to be initialized.
944 *
945 * @bytenr: The logical address of the extent for which a modification is
946 * going to be recorded.
947 *
948 * @num_bytes: Size of the extent whose modification is being recorded.
949 *
950 * @ref_root: The id of the root where this modification has originated, this
951 * can be either one of the well-known metadata trees or the
952 * subvolume id which references this extent.
953 *
954 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
955 * BTRFS_ADD_DELAYED_EXTENT
956 *
957 * @ref_type: Holds the type of the extent which is being recorded, can be
958 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
959 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
960 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
961 */
init_delayed_ref_common(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_node * ref,struct btrfs_ref * generic_ref)962 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
963 struct btrfs_delayed_ref_node *ref,
964 struct btrfs_ref *generic_ref)
965 {
966 int action = generic_ref->action;
967 u64 seq = 0;
968
969 if (action == BTRFS_ADD_DELAYED_EXTENT)
970 action = BTRFS_ADD_DELAYED_REF;
971
972 if (btrfs_is_fstree(generic_ref->ref_root))
973 seq = atomic64_read(&fs_info->tree_mod_seq);
974
975 refcount_set(&ref->refs, 1);
976 ref->bytenr = generic_ref->bytenr;
977 ref->num_bytes = generic_ref->num_bytes;
978 ref->ref_mod = 1;
979 ref->action = action;
980 ref->seq = seq;
981 ref->type = btrfs_ref_type(generic_ref);
982 ref->ref_root = generic_ref->ref_root;
983 ref->parent = generic_ref->parent;
984 RB_CLEAR_NODE(&ref->ref_node);
985 INIT_LIST_HEAD(&ref->add_list);
986
987 if (generic_ref->type == BTRFS_REF_DATA)
988 ref->data_ref = generic_ref->data_ref;
989 else
990 ref->tree_ref = generic_ref->tree_ref;
991 }
992
btrfs_init_tree_ref(struct btrfs_ref * generic_ref,int level,u64 mod_root,bool skip_qgroup)993 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
994 bool skip_qgroup)
995 {
996 #ifdef CONFIG_BTRFS_DEBUG
997 /* If @real_root not set, use @root as fallback */
998 generic_ref->real_root = mod_root ?: generic_ref->ref_root;
999 #endif
1000 generic_ref->tree_ref.level = level;
1001 generic_ref->type = BTRFS_REF_METADATA;
1002 if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
1003 (!mod_root || btrfs_is_fstree(mod_root))))
1004 generic_ref->skip_qgroup = true;
1005 else
1006 generic_ref->skip_qgroup = false;
1007
1008 }
1009
btrfs_init_data_ref(struct btrfs_ref * generic_ref,u64 ino,u64 offset,u64 mod_root,bool skip_qgroup)1010 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
1011 u64 mod_root, bool skip_qgroup)
1012 {
1013 #ifdef CONFIG_BTRFS_DEBUG
1014 /* If @real_root not set, use @root as fallback */
1015 generic_ref->real_root = mod_root ?: generic_ref->ref_root;
1016 #endif
1017 generic_ref->data_ref.objectid = ino;
1018 generic_ref->data_ref.offset = offset;
1019 generic_ref->type = BTRFS_REF_DATA;
1020 if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
1021 (!mod_root || btrfs_is_fstree(mod_root))))
1022 generic_ref->skip_qgroup = true;
1023 else
1024 generic_ref->skip_qgroup = false;
1025 }
1026
add_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op,u64 reserved)1027 static int add_delayed_ref(struct btrfs_trans_handle *trans,
1028 struct btrfs_ref *generic_ref,
1029 struct btrfs_delayed_extent_op *extent_op,
1030 u64 reserved)
1031 {
1032 struct btrfs_fs_info *fs_info = trans->fs_info;
1033 struct btrfs_delayed_ref_node *node;
1034 struct btrfs_delayed_ref_head *head_ref;
1035 struct btrfs_delayed_ref_head *new_head_ref;
1036 struct btrfs_delayed_ref_root *delayed_refs;
1037 struct btrfs_qgroup_extent_record *record = NULL;
1038 const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
1039 bool qrecord_reserved = false;
1040 bool qrecord_inserted;
1041 int action = generic_ref->action;
1042 bool merged;
1043 int ret;
1044
1045 node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
1046 if (!node)
1047 return -ENOMEM;
1048
1049 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1050 if (!head_ref) {
1051 ret = -ENOMEM;
1052 goto free_node;
1053 }
1054
1055 delayed_refs = &trans->transaction->delayed_refs;
1056
1057 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1058 record = kzalloc_obj(*record, GFP_NOFS);
1059 if (!record) {
1060 ret = -ENOMEM;
1061 goto free_head_ref;
1062 }
1063 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1064 ret = -ENOMEM;
1065 goto free_record;
1066 }
1067 qrecord_reserved = true;
1068 }
1069
1070 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1071 if (ret) {
1072 if (qrecord_reserved)
1073 xa_release(&delayed_refs->dirty_extents, index);
1074 goto free_record;
1075 }
1076
1077 init_delayed_ref_common(fs_info, node, generic_ref);
1078 init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1079 head_ref->extent_op = extent_op;
1080
1081 spin_lock(&delayed_refs->lock);
1082
1083 /*
1084 * insert both the head node and the new ref without dropping
1085 * the spin lock
1086 */
1087 new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1088 action, &qrecord_inserted);
1089 if (IS_ERR(new_head_ref)) {
1090 xa_release(&delayed_refs->head_refs, index);
1091 spin_unlock(&delayed_refs->lock);
1092 ret = PTR_ERR(new_head_ref);
1093
1094 /*
1095 * It's only safe to call kfree() on 'qrecord' if
1096 * add_delayed_ref_head() has _not_ inserted it for
1097 * tracing. Otherwise we need to handle this here.
1098 */
1099 if (!qrecord_reserved || qrecord_inserted)
1100 goto free_head_ref;
1101 goto free_record;
1102 }
1103 head_ref = new_head_ref;
1104
1105 merged = insert_delayed_ref(trans, head_ref, node);
1106 spin_unlock(&delayed_refs->lock);
1107
1108 /*
1109 * Need to update the delayed_refs_rsv with any changes we may have
1110 * made.
1111 */
1112 btrfs_update_delayed_refs_rsv(trans);
1113
1114 if (generic_ref->type == BTRFS_REF_DATA)
1115 trace_add_delayed_data_ref(trans->fs_info, node);
1116 else
1117 trace_add_delayed_tree_ref(trans->fs_info, node);
1118 if (merged)
1119 kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1120
1121 if (qrecord_inserted)
1122 return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1123
1124 kfree(record);
1125 return 0;
1126
1127 free_record:
1128 kfree(record);
1129 free_head_ref:
1130 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1131 free_node:
1132 kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1133 return ret;
1134 }
1135
1136 /*
1137 * Add a delayed tree ref. This does all of the accounting required to make sure
1138 * the delayed ref is eventually processed before this transaction commits.
1139 */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op)1140 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1141 struct btrfs_ref *generic_ref,
1142 struct btrfs_delayed_extent_op *extent_op)
1143 {
1144 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1145 return add_delayed_ref(trans, generic_ref, extent_op, 0);
1146 }
1147
1148 /*
1149 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1150 */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,u64 reserved)1151 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1152 struct btrfs_ref *generic_ref,
1153 u64 reserved)
1154 {
1155 ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1156 return add_delayed_ref(trans, generic_ref, NULL, reserved);
1157 }
1158
btrfs_add_delayed_extent_op(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u8 level,struct btrfs_delayed_extent_op * extent_op)1159 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1160 u64 bytenr, u64 num_bytes, u8 level,
1161 struct btrfs_delayed_extent_op *extent_op)
1162 {
1163 const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1164 struct btrfs_delayed_ref_head *head_ref;
1165 struct btrfs_delayed_ref_head *head_ref_ret;
1166 struct btrfs_delayed_ref_root *delayed_refs;
1167 struct btrfs_ref generic_ref = {
1168 .type = BTRFS_REF_METADATA,
1169 .action = BTRFS_UPDATE_DELAYED_HEAD,
1170 .bytenr = bytenr,
1171 .num_bytes = num_bytes,
1172 .tree_ref.level = level,
1173 };
1174 int ret;
1175
1176 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1177 if (!head_ref)
1178 return -ENOMEM;
1179
1180 init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1181 head_ref->extent_op = extent_op;
1182
1183 delayed_refs = &trans->transaction->delayed_refs;
1184
1185 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1186 if (ret) {
1187 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1188 return ret;
1189 }
1190
1191 spin_lock(&delayed_refs->lock);
1192 head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1193 BTRFS_UPDATE_DELAYED_HEAD, NULL);
1194 if (IS_ERR(head_ref_ret)) {
1195 xa_release(&delayed_refs->head_refs, index);
1196 spin_unlock(&delayed_refs->lock);
1197 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1198 return PTR_ERR(head_ref_ret);
1199 }
1200 spin_unlock(&delayed_refs->lock);
1201
1202 /*
1203 * Need to update the delayed_refs_rsv with any changes we may have
1204 * made.
1205 */
1206 btrfs_update_delayed_refs_rsv(trans);
1207 return 0;
1208 }
1209
btrfs_put_delayed_ref(struct btrfs_delayed_ref_node * ref)1210 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1211 {
1212 if (refcount_dec_and_test(&ref->refs)) {
1213 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1214 kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1215 }
1216 }
1217
1218 /*
1219 * This does a simple search for the head node for a given extent. Returns the
1220 * head node if found, or NULL if not.
1221 */
1222 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,u64 bytenr)1223 btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1224 struct btrfs_delayed_ref_root *delayed_refs,
1225 u64 bytenr)
1226 {
1227 const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1228
1229 lockdep_assert_held(&delayed_refs->lock);
1230
1231 return xa_load(&delayed_refs->head_refs, index);
1232 }
1233
find_comp(struct btrfs_delayed_ref_node * entry,u64 root,u64 parent)1234 static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
1235 {
1236 int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
1237
1238 if (type < entry->type)
1239 return -1;
1240 if (type > entry->type)
1241 return 1;
1242
1243 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1244 if (root < entry->ref_root)
1245 return -1;
1246 if (root > entry->ref_root)
1247 return 1;
1248 } else {
1249 if (parent < entry->parent)
1250 return -1;
1251 if (parent > entry->parent)
1252 return 1;
1253 }
1254 return 0;
1255 }
1256
1257 /*
1258 * Check to see if a given root/parent reference is attached to the head. This
1259 * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1260 * indicates the reference exists for the given root or parent. This is for
1261 * tree blocks only.
1262 *
1263 * @head: the head of the bytenr we're searching.
1264 * @root: the root objectid of the reference if it is a normal reference.
1265 * @parent: the parent if this is a shared backref.
1266 */
btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head * head,u64 root,u64 parent)1267 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1268 u64 root, u64 parent)
1269 {
1270 struct rb_node *node;
1271 bool found = false;
1272
1273 lockdep_assert_held(&head->mutex);
1274
1275 spin_lock(&head->lock);
1276 node = head->ref_tree.rb_root.rb_node;
1277 while (node) {
1278 struct btrfs_delayed_ref_node *entry;
1279 int ret;
1280
1281 entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1282 ret = find_comp(entry, root, parent);
1283 if (ret < 0) {
1284 node = node->rb_left;
1285 } else if (ret > 0) {
1286 node = node->rb_right;
1287 } else {
1288 /*
1289 * We only want to count ADD actions, as drops mean the
1290 * ref doesn't exist.
1291 */
1292 if (entry->action == BTRFS_ADD_DELAYED_REF)
1293 found = true;
1294 break;
1295 }
1296 }
1297 spin_unlock(&head->lock);
1298 return found;
1299 }
1300
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans)1301 void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
1302 {
1303 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1304 struct btrfs_fs_info *fs_info = trans->fs_info;
1305
1306 spin_lock(&delayed_refs->lock);
1307 while (true) {
1308 struct btrfs_delayed_ref_head *head;
1309 struct rb_node *n;
1310 bool pin_bytes = false;
1311
1312 head = find_first_ref_head(delayed_refs);
1313 if (!head)
1314 break;
1315
1316 if (!btrfs_delayed_ref_lock(delayed_refs, head))
1317 continue;
1318
1319 spin_lock(&head->lock);
1320 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1321 struct btrfs_delayed_ref_node *ref;
1322
1323 ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1324 drop_delayed_ref(fs_info, delayed_refs, head, ref);
1325 }
1326 if (head->must_insert_reserved)
1327 pin_bytes = true;
1328 btrfs_free_delayed_extent_op(head->extent_op);
1329 btrfs_delete_ref_head(fs_info, delayed_refs, head);
1330 spin_unlock(&head->lock);
1331 spin_unlock(&delayed_refs->lock);
1332 mutex_unlock(&head->mutex);
1333
1334 if (!btrfs_is_testing(fs_info) && pin_bytes) {
1335 struct btrfs_block_group *bg;
1336
1337 bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1338 if (WARN_ON_ONCE(bg == NULL)) {
1339 /*
1340 * Unexpected and there's nothing we can do here
1341 * because we are in a transaction abort path,
1342 * so any errors can only be ignored or reported
1343 * while attempting to cleanup all resources.
1344 */
1345 btrfs_err(fs_info,
1346 "block group for delayed ref at %llu was not found while destroying ref head",
1347 head->bytenr);
1348 } else {
1349 spin_lock(&bg->space_info->lock);
1350 spin_lock(&bg->lock);
1351 bg->pinned += head->num_bytes;
1352 btrfs_space_info_update_bytes_pinned(bg->space_info,
1353 head->num_bytes);
1354 bg->reserved -= head->num_bytes;
1355 bg->space_info->bytes_reserved -= head->num_bytes;
1356 spin_unlock(&bg->lock);
1357 spin_unlock(&bg->space_info->lock);
1358
1359 btrfs_put_block_group(bg);
1360 }
1361
1362 btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1363 head->bytenr + head->num_bytes - 1);
1364 }
1365 if (!btrfs_is_testing(fs_info))
1366 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1367 btrfs_put_delayed_ref_head(head);
1368 cond_resched();
1369 spin_lock(&delayed_refs->lock);
1370 }
1371
1372 if (!btrfs_is_testing(fs_info))
1373 btrfs_qgroup_destroy_extent_records(trans);
1374
1375 spin_unlock(&delayed_refs->lock);
1376 }
1377
btrfs_delayed_ref_exit(void)1378 void __cold btrfs_delayed_ref_exit(void)
1379 {
1380 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1381 kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
1382 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1383 }
1384
btrfs_delayed_ref_init(void)1385 int __init btrfs_delayed_ref_init(void)
1386 {
1387 btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1388 if (!btrfs_delayed_ref_head_cachep)
1389 return -ENOMEM;
1390
1391 btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1392 if (!btrfs_delayed_ref_node_cachep)
1393 goto fail;
1394
1395 btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1396 if (!btrfs_delayed_extent_op_cachep)
1397 goto fail;
1398
1399 return 0;
1400 fail:
1401 btrfs_delayed_ref_exit();
1402 return -ENOMEM;
1403 }
1404