xref: /linux/fs/btrfs/qgroup.c (revision 0e39a731820ad26533eb988cef27ad2506063b5b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
btrfs_qgroup_qgroupid_key_cmp(const void * key,const struct rb_node * node)163 static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
164 {
165 	const u64 *qgroupid = key;
166 	const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
167 
168 	if (qgroup->qgroupid < *qgroupid)
169 		return -1;
170 	else if (qgroup->qgroupid > *qgroupid)
171 		return 1;
172 
173 	return 0;
174 }
175 
176 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)177 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
178 					   u64 qgroupid)
179 {
180 	struct rb_node *node;
181 
182 	node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
183 	return rb_entry_safe(node, struct btrfs_qgroup, node);
184 }
185 
btrfs_qgroup_qgroupid_cmp(struct rb_node * new,const struct rb_node * existing)186 static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
187 {
188 	const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
189 
190 	return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
191 }
192 
193 /*
194  * Add qgroup to the filesystem's qgroup tree.
195  *
196  * Must be called with qgroup_lock held and @prealloc preallocated.
197  *
198  * The control on the lifespan of @prealloc would be transferred to this
199  * function, thus caller should no longer touch @prealloc.
200  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
202 					  struct btrfs_qgroup *prealloc,
203 					  u64 qgroupid)
204 {
205 	struct rb_node *node;
206 
207 	/* Caller must have pre-allocated @prealloc. */
208 	ASSERT(prealloc);
209 
210 	prealloc->qgroupid = qgroupid;
211 	node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
212 	if (node) {
213 		kfree(prealloc);
214 		return rb_entry(node, struct btrfs_qgroup, node);
215 	}
216 
217 	INIT_LIST_HEAD(&prealloc->groups);
218 	INIT_LIST_HEAD(&prealloc->members);
219 	INIT_LIST_HEAD(&prealloc->dirty);
220 	INIT_LIST_HEAD(&prealloc->iterator);
221 	INIT_LIST_HEAD(&prealloc->nested_iterator);
222 
223 	return prealloc;
224 }
225 
__del_qgroup_rb(struct btrfs_qgroup * qgroup)226 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
227 {
228 	struct btrfs_qgroup_list *list;
229 
230 	list_del(&qgroup->dirty);
231 	while (!list_empty(&qgroup->groups)) {
232 		list = list_first_entry(&qgroup->groups,
233 					struct btrfs_qgroup_list, next_group);
234 		list_del(&list->next_group);
235 		list_del(&list->next_member);
236 		kfree(list);
237 	}
238 
239 	while (!list_empty(&qgroup->members)) {
240 		list = list_first_entry(&qgroup->members,
241 					struct btrfs_qgroup_list, next_member);
242 		list_del(&list->next_group);
243 		list_del(&list->next_member);
244 		kfree(list);
245 	}
246 }
247 
248 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
250 {
251 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
252 
253 	if (!qgroup)
254 		return -ENOENT;
255 
256 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257 	__del_qgroup_rb(qgroup);
258 	return 0;
259 }
260 
261 /*
262  * Add relation specified by two qgroups.
263  *
264  * Must be called with qgroup_lock held, the ownership of @prealloc is
265  * transferred to this function and caller should not touch it anymore.
266  *
267  * Return: 0        on success
268  *         -ENOENT  if one of the qgroups is NULL
269  *         <0       other errors
270  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)271 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
272 			     struct btrfs_qgroup *member,
273 			     struct btrfs_qgroup *parent)
274 {
275 	if (!member || !parent) {
276 		kfree(prealloc);
277 		return -ENOENT;
278 	}
279 
280 	prealloc->group = parent;
281 	prealloc->member = member;
282 	list_add_tail(&prealloc->next_group, &member->groups);
283 	list_add_tail(&prealloc->next_member, &parent->members);
284 
285 	return 0;
286 }
287 
288 /*
289  * Add relation specified by two qgroup ids.
290  *
291  * Must be called with qgroup_lock held.
292  *
293  * Return: 0        on success
294  *         -ENOENT  if one of the ids does not exist
295  *         <0       other errors
296  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)297 static int add_relation_rb(struct btrfs_fs_info *fs_info,
298 			   struct btrfs_qgroup_list *prealloc,
299 			   u64 memberid, u64 parentid)
300 {
301 	struct btrfs_qgroup *member;
302 	struct btrfs_qgroup *parent;
303 
304 	member = find_qgroup_rb(fs_info, memberid);
305 	parent = find_qgroup_rb(fs_info, parentid);
306 
307 	return __add_relation_rb(prealloc, member, parent);
308 }
309 
310 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)311 static int del_relation_rb(struct btrfs_fs_info *fs_info,
312 			   u64 memberid, u64 parentid)
313 {
314 	struct btrfs_qgroup *member;
315 	struct btrfs_qgroup *parent;
316 	struct btrfs_qgroup_list *list;
317 
318 	member = find_qgroup_rb(fs_info, memberid);
319 	parent = find_qgroup_rb(fs_info, parentid);
320 	if (!member || !parent)
321 		return -ENOENT;
322 
323 	list_for_each_entry(list, &member->groups, next_group) {
324 		if (list->group == parent) {
325 			list_del(&list->next_group);
326 			list_del(&list->next_member);
327 			kfree(list);
328 			return 0;
329 		}
330 	}
331 	return -ENOENT;
332 }
333 
334 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)335 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
336 			       u64 rfer, u64 excl)
337 {
338 	struct btrfs_qgroup *qgroup;
339 
340 	qgroup = find_qgroup_rb(fs_info, qgroupid);
341 	if (!qgroup)
342 		return -EINVAL;
343 	if (qgroup->rfer != rfer || qgroup->excl != excl)
344 		return -EINVAL;
345 	return 0;
346 }
347 #endif
348 
349 __printf(2, 3)
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info,const char * fmt,...)350 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
351 {
352 	const u64 old_flags = fs_info->qgroup_flags;
353 
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 	if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
360 		struct va_format vaf;
361 		va_list args;
362 
363 		va_start(args, fmt);
364 		vaf.fmt = fmt;
365 		vaf.va = &args;
366 
367 		btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
368 		va_end(args);
369 	}
370 }
371 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)372 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
373 				   struct extent_buffer *leaf, int slot,
374 				   struct btrfs_qgroup_status_item *ptr)
375 {
376 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
377 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
378 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
379 }
380 
381 /*
382  * The full config is read in one go, only called from open_ctree()
383  * It doesn't use any locking, as at this point we're still single-threaded
384  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)385 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
386 {
387 	struct btrfs_key key;
388 	struct btrfs_key found_key;
389 	struct btrfs_root *quota_root = fs_info->quota_root;
390 	struct btrfs_path *path = NULL;
391 	struct extent_buffer *l;
392 	int slot;
393 	int ret = 0;
394 	u64 flags = 0;
395 	u64 rescan_progress = 0;
396 
397 	if (!fs_info->quota_root)
398 		return 0;
399 
400 	path = btrfs_alloc_path();
401 	if (!path) {
402 		ret = -ENOMEM;
403 		goto out;
404 	}
405 
406 	ret = btrfs_sysfs_add_qgroups(fs_info);
407 	if (ret < 0)
408 		goto out;
409 	/* default this to quota off, in case no status key is found */
410 	fs_info->qgroup_flags = 0;
411 
412 	/*
413 	 * pass 1: read status, all qgroup infos and limits
414 	 */
415 	key.objectid = 0;
416 	key.type = 0;
417 	key.offset = 0;
418 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
419 	if (ret)
420 		goto out;
421 
422 	while (1) {
423 		struct btrfs_qgroup *qgroup;
424 
425 		slot = path->slots[0];
426 		l = path->nodes[0];
427 		btrfs_item_key_to_cpu(l, &found_key, slot);
428 
429 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
430 			struct btrfs_qgroup_status_item *ptr;
431 
432 			ptr = btrfs_item_ptr(l, slot,
433 					     struct btrfs_qgroup_status_item);
434 
435 			if (btrfs_qgroup_status_version(l, ptr) !=
436 			    BTRFS_QGROUP_STATUS_VERSION) {
437 				btrfs_err(fs_info,
438 				 "old qgroup version, quota disabled");
439 				goto out;
440 			}
441 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
442 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
443 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
444 			else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
445 				qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
446 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
447 			goto next1;
448 		}
449 
450 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
451 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
452 			goto next1;
453 
454 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
455 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
456 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
457 			qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * reusing that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (btrfs_is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
585 		btrfs_sysfs_del_qgroups(fs_info);
586 	}
587 
588 	return ret < 0 ? ret : 0;
589 }
590 
591 /*
592  * Called in close_ctree() when quota is still enabled.  This verifies we don't
593  * leak some reserved space.
594  *
595  * Return false if no reserved space is left.
596  * Return true if some reserved space is leaked.
597  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)598 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
599 {
600 	struct rb_node *node;
601 	bool ret = false;
602 
603 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
604 		return ret;
605 	/*
606 	 * Since we're unmounting, there is no race and no need to grab qgroup
607 	 * lock.  And here we don't go post-order to provide a more user
608 	 * friendly sorted result.
609 	 */
610 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
611 		struct btrfs_qgroup *qgroup;
612 		int i;
613 
614 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
615 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
616 			if (qgroup->rsv.values[i]) {
617 				ret = true;
618 				btrfs_warn(fs_info,
619 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
620 				   btrfs_qgroup_level(qgroup->qgroupid),
621 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
622 				   i, qgroup->rsv.values[i]);
623 			}
624 		}
625 	}
626 	return ret;
627 }
628 
629 /*
630  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
631  * first two are in single-threaded paths.
632  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)633 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
634 {
635 	struct rb_node *n;
636 	struct btrfs_qgroup *qgroup;
637 
638 	/*
639 	 * btrfs_quota_disable() can be called concurrently with
640 	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
641 	 * lock.
642 	 */
643 	spin_lock(&fs_info->qgroup_lock);
644 	while ((n = rb_first(&fs_info->qgroup_tree))) {
645 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
646 		rb_erase(n, &fs_info->qgroup_tree);
647 		__del_qgroup_rb(qgroup);
648 		spin_unlock(&fs_info->qgroup_lock);
649 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
650 		kfree(qgroup);
651 		spin_lock(&fs_info->qgroup_lock);
652 	}
653 	spin_unlock(&fs_info->qgroup_lock);
654 
655 	btrfs_sysfs_del_qgroups(fs_info);
656 }
657 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)658 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
659 				    u64 dst)
660 {
661 	int ret;
662 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
663 	struct btrfs_path *path;
664 	struct btrfs_key key;
665 
666 	path = btrfs_alloc_path();
667 	if (!path)
668 		return -ENOMEM;
669 
670 	key.objectid = src;
671 	key.type = BTRFS_QGROUP_RELATION_KEY;
672 	key.offset = dst;
673 
674 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
675 	btrfs_free_path(path);
676 	return ret;
677 }
678 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)679 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
680 				    u64 dst)
681 {
682 	int ret;
683 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
684 	struct btrfs_path *path;
685 	struct btrfs_key key;
686 
687 	path = btrfs_alloc_path();
688 	if (!path)
689 		return -ENOMEM;
690 
691 	key.objectid = src;
692 	key.type = BTRFS_QGROUP_RELATION_KEY;
693 	key.offset = dst;
694 
695 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
696 	if (ret < 0)
697 		goto out;
698 
699 	if (ret > 0) {
700 		ret = -ENOENT;
701 		goto out;
702 	}
703 
704 	ret = btrfs_del_item(trans, quota_root, path);
705 out:
706 	btrfs_free_path(path);
707 	return ret;
708 }
709 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)710 static int add_qgroup_item(struct btrfs_trans_handle *trans,
711 			   struct btrfs_root *quota_root, u64 qgroupid)
712 {
713 	int ret;
714 	struct btrfs_path *path;
715 	struct btrfs_qgroup_info_item *qgroup_info;
716 	struct btrfs_qgroup_limit_item *qgroup_limit;
717 	struct extent_buffer *leaf;
718 	struct btrfs_key key;
719 
720 	if (btrfs_is_testing(quota_root->fs_info))
721 		return 0;
722 
723 	path = btrfs_alloc_path();
724 	if (!path)
725 		return -ENOMEM;
726 
727 	key.objectid = 0;
728 	key.type = BTRFS_QGROUP_INFO_KEY;
729 	key.offset = qgroupid;
730 
731 	/*
732 	 * Avoid a transaction abort by catching -EEXIST here. In that
733 	 * case, we proceed by re-initializing the existing structure
734 	 * on disk.
735 	 */
736 
737 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
738 				      sizeof(*qgroup_info));
739 	if (ret && ret != -EEXIST)
740 		goto out;
741 
742 	leaf = path->nodes[0];
743 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
744 				 struct btrfs_qgroup_info_item);
745 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
746 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
747 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
748 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
749 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
750 
751 	btrfs_release_path(path);
752 
753 	key.type = BTRFS_QGROUP_LIMIT_KEY;
754 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
755 				      sizeof(*qgroup_limit));
756 	if (ret && ret != -EEXIST)
757 		goto out;
758 
759 	leaf = path->nodes[0];
760 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
761 				  struct btrfs_qgroup_limit_item);
762 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
763 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
764 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
765 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
766 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
767 
768 	ret = 0;
769 out:
770 	btrfs_free_path(path);
771 	return ret;
772 }
773 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)774 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
775 {
776 	int ret;
777 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
778 	struct btrfs_path *path;
779 	struct btrfs_key key;
780 
781 	path = btrfs_alloc_path();
782 	if (!path)
783 		return -ENOMEM;
784 
785 	key.objectid = 0;
786 	key.type = BTRFS_QGROUP_INFO_KEY;
787 	key.offset = qgroupid;
788 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
789 	if (ret < 0)
790 		goto out;
791 
792 	if (ret > 0) {
793 		ret = -ENOENT;
794 		goto out;
795 	}
796 
797 	ret = btrfs_del_item(trans, quota_root, path);
798 	if (ret)
799 		goto out;
800 
801 	btrfs_release_path(path);
802 
803 	key.type = BTRFS_QGROUP_LIMIT_KEY;
804 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
805 	if (ret < 0)
806 		goto out;
807 
808 	if (ret > 0) {
809 		ret = -ENOENT;
810 		goto out;
811 	}
812 
813 	ret = btrfs_del_item(trans, quota_root, path);
814 
815 out:
816 	btrfs_free_path(path);
817 	return ret;
818 }
819 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)820 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
821 				    struct btrfs_qgroup *qgroup)
822 {
823 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
824 	struct btrfs_path *path;
825 	struct btrfs_key key;
826 	struct extent_buffer *l;
827 	struct btrfs_qgroup_limit_item *qgroup_limit;
828 	int ret;
829 	int slot;
830 
831 	key.objectid = 0;
832 	key.type = BTRFS_QGROUP_LIMIT_KEY;
833 	key.offset = qgroup->qgroupid;
834 
835 	path = btrfs_alloc_path();
836 	if (!path)
837 		return -ENOMEM;
838 
839 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
840 	if (ret > 0)
841 		ret = -ENOENT;
842 
843 	if (ret)
844 		goto out;
845 
846 	l = path->nodes[0];
847 	slot = path->slots[0];
848 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
849 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
850 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
851 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
852 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
853 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
854 out:
855 	btrfs_free_path(path);
856 	return ret;
857 }
858 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)859 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
860 				   struct btrfs_qgroup *qgroup)
861 {
862 	struct btrfs_fs_info *fs_info = trans->fs_info;
863 	struct btrfs_root *quota_root = fs_info->quota_root;
864 	struct btrfs_path *path;
865 	struct btrfs_key key;
866 	struct extent_buffer *l;
867 	struct btrfs_qgroup_info_item *qgroup_info;
868 	int ret;
869 	int slot;
870 
871 	if (btrfs_is_testing(fs_info))
872 		return 0;
873 
874 	key.objectid = 0;
875 	key.type = BTRFS_QGROUP_INFO_KEY;
876 	key.offset = qgroup->qgroupid;
877 
878 	path = btrfs_alloc_path();
879 	if (!path)
880 		return -ENOMEM;
881 
882 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
883 	if (ret > 0)
884 		ret = -ENOENT;
885 
886 	if (ret)
887 		goto out;
888 
889 	l = path->nodes[0];
890 	slot = path->slots[0];
891 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
892 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
893 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
894 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
895 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
896 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
897 out:
898 	btrfs_free_path(path);
899 	return ret;
900 }
901 
update_qgroup_status_item(struct btrfs_trans_handle * trans)902 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
903 {
904 	struct btrfs_fs_info *fs_info = trans->fs_info;
905 	struct btrfs_root *quota_root = fs_info->quota_root;
906 	struct btrfs_path *path;
907 	struct btrfs_key key;
908 	struct extent_buffer *l;
909 	struct btrfs_qgroup_status_item *ptr;
910 	int ret;
911 	int slot;
912 
913 	key.objectid = 0;
914 	key.type = BTRFS_QGROUP_STATUS_KEY;
915 	key.offset = 0;
916 
917 	path = btrfs_alloc_path();
918 	if (!path)
919 		return -ENOMEM;
920 
921 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
922 	if (ret > 0)
923 		ret = -ENOENT;
924 
925 	if (ret)
926 		goto out;
927 
928 	l = path->nodes[0];
929 	slot = path->slots[0];
930 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
931 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
932 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
933 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
934 	btrfs_set_qgroup_status_rescan(l, ptr,
935 				fs_info->qgroup_rescan_progress.objectid);
936 out:
937 	btrfs_free_path(path);
938 	return ret;
939 }
940 
941 /*
942  * called with qgroup_lock held
943  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)944 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
945 				  struct btrfs_root *root)
946 {
947 	struct btrfs_path *path;
948 	struct btrfs_key key;
949 	struct extent_buffer *leaf = NULL;
950 	int ret;
951 	int nr = 0;
952 
953 	path = btrfs_alloc_path();
954 	if (!path)
955 		return -ENOMEM;
956 
957 	key.objectid = 0;
958 	key.type = 0;
959 	key.offset = 0;
960 
961 	while (1) {
962 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
963 		if (ret < 0)
964 			goto out;
965 		leaf = path->nodes[0];
966 		nr = btrfs_header_nritems(leaf);
967 		if (!nr)
968 			break;
969 		/*
970 		 * delete the leaf one by one
971 		 * since the whole tree is going
972 		 * to be deleted.
973 		 */
974 		path->slots[0] = 0;
975 		ret = btrfs_del_items(trans, root, path, 0, nr);
976 		if (ret)
977 			goto out;
978 
979 		btrfs_release_path(path);
980 	}
981 	ret = 0;
982 out:
983 	btrfs_free_path(path);
984 	return ret;
985 }
986 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)987 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
988 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
989 {
990 	struct btrfs_root *quota_root;
991 	struct btrfs_root *tree_root = fs_info->tree_root;
992 	struct btrfs_path *path = NULL;
993 	struct btrfs_qgroup_status_item *ptr;
994 	struct extent_buffer *leaf;
995 	struct btrfs_key key;
996 	struct btrfs_key found_key;
997 	struct btrfs_qgroup *qgroup = NULL;
998 	struct btrfs_qgroup *prealloc = NULL;
999 	struct btrfs_trans_handle *trans = NULL;
1000 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1001 	int ret = 0;
1002 	int slot;
1003 
1004 	/*
1005 	 * We need to have subvol_sem write locked, to prevent races between
1006 	 * concurrent tasks trying to enable quotas, because we will unlock
1007 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1008 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1009 	 */
1010 	lockdep_assert_held_write(&fs_info->subvol_sem);
1011 
1012 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1013 		btrfs_err(fs_info,
1014 			  "qgroups are currently unsupported in extent tree v2");
1015 		return -EINVAL;
1016 	}
1017 
1018 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1019 	if (fs_info->quota_root)
1020 		goto out;
1021 
1022 	ret = btrfs_sysfs_add_qgroups(fs_info);
1023 	if (ret < 0)
1024 		goto out;
1025 
1026 	/*
1027 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1028 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1029 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1030 	 * start a transaction.
1031 	 * After we started the transaction lock qgroup_ioctl_lock again and
1032 	 * check if someone else created the quota root in the meanwhile. If so,
1033 	 * just return success and release the transaction handle.
1034 	 *
1035 	 * Also we don't need to worry about someone else calling
1036 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1037 	 * that function returns 0 (success) when the sysfs entries already exist.
1038 	 */
1039 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1040 
1041 	/*
1042 	 * 1 for quota root item
1043 	 * 1 for BTRFS_QGROUP_STATUS item
1044 	 *
1045 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1046 	 * per subvolume. However those are not currently reserved since it
1047 	 * would be a lot of overkill.
1048 	 */
1049 	trans = btrfs_start_transaction(tree_root, 2);
1050 
1051 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1052 	if (IS_ERR(trans)) {
1053 		ret = PTR_ERR(trans);
1054 		trans = NULL;
1055 		goto out;
1056 	}
1057 
1058 	if (fs_info->quota_root)
1059 		goto out;
1060 
1061 	/*
1062 	 * initially create the quota tree
1063 	 */
1064 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1065 	if (IS_ERR(quota_root)) {
1066 		ret =  PTR_ERR(quota_root);
1067 		btrfs_abort_transaction(trans, ret);
1068 		goto out;
1069 	}
1070 
1071 	path = btrfs_alloc_path();
1072 	if (!path) {
1073 		ret = -ENOMEM;
1074 		btrfs_abort_transaction(trans, ret);
1075 		goto out_free_root;
1076 	}
1077 
1078 	key.objectid = 0;
1079 	key.type = BTRFS_QGROUP_STATUS_KEY;
1080 	key.offset = 0;
1081 
1082 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1083 				      sizeof(*ptr));
1084 	if (ret) {
1085 		btrfs_abort_transaction(trans, ret);
1086 		goto out_free_path;
1087 	}
1088 
1089 	leaf = path->nodes[0];
1090 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1091 				 struct btrfs_qgroup_status_item);
1092 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1093 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1094 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1095 	if (simple) {
1096 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1097 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1098 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1099 	} else {
1100 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1101 	}
1102 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1103 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1104 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1105 
1106 	key.objectid = 0;
1107 	key.type = BTRFS_ROOT_REF_KEY;
1108 	key.offset = 0;
1109 
1110 	btrfs_release_path(path);
1111 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1112 	if (ret > 0)
1113 		goto out_add_root;
1114 	if (ret < 0) {
1115 		btrfs_abort_transaction(trans, ret);
1116 		goto out_free_path;
1117 	}
1118 
1119 	while (1) {
1120 		slot = path->slots[0];
1121 		leaf = path->nodes[0];
1122 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1123 
1124 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1125 
1126 			/* Release locks on tree_root before we access quota_root */
1127 			btrfs_release_path(path);
1128 
1129 			/* We should not have a stray @prealloc pointer. */
1130 			ASSERT(prealloc == NULL);
1131 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1132 			if (!prealloc) {
1133 				ret = -ENOMEM;
1134 				btrfs_abort_transaction(trans, ret);
1135 				goto out_free_path;
1136 			}
1137 
1138 			ret = add_qgroup_item(trans, quota_root,
1139 					      found_key.offset);
1140 			if (ret) {
1141 				btrfs_abort_transaction(trans, ret);
1142 				goto out_free_path;
1143 			}
1144 
1145 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1146 			prealloc = NULL;
1147 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1148 			if (ret < 0) {
1149 				btrfs_abort_transaction(trans, ret);
1150 				goto out_free_path;
1151 			}
1152 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1153 							 path, 1, 0);
1154 			if (ret < 0) {
1155 				btrfs_abort_transaction(trans, ret);
1156 				goto out_free_path;
1157 			}
1158 			if (ret > 0) {
1159 				/*
1160 				 * Shouldn't happen, but in case it does we
1161 				 * don't need to do the btrfs_next_item, just
1162 				 * continue.
1163 				 */
1164 				continue;
1165 			}
1166 		}
1167 		ret = btrfs_next_item(tree_root, path);
1168 		if (ret < 0) {
1169 			btrfs_abort_transaction(trans, ret);
1170 			goto out_free_path;
1171 		}
1172 		if (ret)
1173 			break;
1174 	}
1175 
1176 out_add_root:
1177 	btrfs_release_path(path);
1178 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1179 	if (ret) {
1180 		btrfs_abort_transaction(trans, ret);
1181 		goto out_free_path;
1182 	}
1183 
1184 	ASSERT(prealloc == NULL);
1185 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1186 	if (!prealloc) {
1187 		ret = -ENOMEM;
1188 		goto out_free_path;
1189 	}
1190 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1191 	prealloc = NULL;
1192 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1193 	if (ret < 0) {
1194 		btrfs_abort_transaction(trans, ret);
1195 		goto out_free_path;
1196 	}
1197 
1198 	fs_info->qgroup_enable_gen = trans->transid;
1199 
1200 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1201 	/*
1202 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1203 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1204 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1205 	 * because all qgroup operations first start or join a transaction and then
1206 	 * lock the qgroup_ioctl_lock mutex.
1207 	 * We are safe from a concurrent task trying to enable quotas, by calling
1208 	 * this function, since we are serialized by fs_info->subvol_sem.
1209 	 */
1210 	ret = btrfs_commit_transaction(trans);
1211 	trans = NULL;
1212 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1213 	if (ret)
1214 		goto out_free_path;
1215 
1216 	/*
1217 	 * Set quota enabled flag after committing the transaction, to avoid
1218 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1219 	 * creation.
1220 	 */
1221 	spin_lock(&fs_info->qgroup_lock);
1222 	fs_info->quota_root = quota_root;
1223 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1224 	spin_unlock(&fs_info->qgroup_lock);
1225 
1226 	/* Skip rescan for simple qgroups. */
1227 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1228 		goto out_free_path;
1229 
1230 	ret = qgroup_rescan_init(fs_info, 0, 1);
1231 	if (!ret) {
1232 	        qgroup_rescan_zero_tracking(fs_info);
1233 		fs_info->qgroup_rescan_running = true;
1234 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1235 	                         &fs_info->qgroup_rescan_work);
1236 	} else {
1237 		/*
1238 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1239 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1240 		 * -EINPROGRESS. That can happen because someone started the
1241 		 * rescan worker by calling quota rescan ioctl before we
1242 		 * attempted to initialize the rescan worker. Failure due to
1243 		 * quotas disabled in the meanwhile is not possible, because
1244 		 * we are holding a write lock on fs_info->subvol_sem, which
1245 		 * is also acquired when disabling quotas.
1246 		 * Ignore such error, and any other error would need to undo
1247 		 * everything we did in the transaction we just committed.
1248 		 */
1249 		ASSERT(ret == -EINPROGRESS);
1250 		ret = 0;
1251 	}
1252 
1253 out_free_path:
1254 	btrfs_free_path(path);
1255 out_free_root:
1256 	if (ret)
1257 		btrfs_put_root(quota_root);
1258 out:
1259 	if (ret)
1260 		btrfs_sysfs_del_qgroups(fs_info);
1261 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1262 	if (ret && trans)
1263 		btrfs_end_transaction(trans);
1264 	else if (trans)
1265 		ret = btrfs_end_transaction(trans);
1266 	kfree(prealloc);
1267 	return ret;
1268 }
1269 
1270 /*
1271  * It is possible to have outstanding ordered extents which reserved bytes
1272  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1273  * commit to ensure that we don't leak such reservations, only to have them
1274  * come back if we re-enable.
1275  *
1276  * - enable simple quotas
1277  * - reserve space
1278  * - release it, store rsv_bytes in OE
1279  * - disable quotas
1280  * - enable simple quotas (qgroup rsv are all 0)
1281  * - OE finishes
1282  * - run delayed refs
1283  * - free rsv_bytes, resulting in miscounting or even underflow
1284  */
flush_reservations(struct btrfs_fs_info * fs_info)1285 static int flush_reservations(struct btrfs_fs_info *fs_info)
1286 {
1287 	int ret;
1288 
1289 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1290 	if (ret)
1291 		return ret;
1292 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1293 
1294 	return btrfs_commit_current_transaction(fs_info->tree_root);
1295 }
1296 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1297 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1298 {
1299 	struct btrfs_root *quota_root = NULL;
1300 	struct btrfs_trans_handle *trans = NULL;
1301 	int ret = 0;
1302 
1303 	/*
1304 	 * We need to have subvol_sem write locked to prevent races with
1305 	 * snapshot creation.
1306 	 */
1307 	lockdep_assert_held_write(&fs_info->subvol_sem);
1308 
1309 	/*
1310 	 * Relocation will mess with backrefs, so make sure we have the
1311 	 * cleaner_mutex held to protect us from relocate.
1312 	 */
1313 	lockdep_assert_held(&fs_info->cleaner_mutex);
1314 
1315 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1316 	if (!fs_info->quota_root)
1317 		goto out;
1318 
1319 	/*
1320 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1321 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1322 	 * to lock that mutex while holding a transaction handle and the rescan
1323 	 * worker needs to commit a transaction.
1324 	 */
1325 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1326 
1327 	/*
1328 	 * Request qgroup rescan worker to complete and wait for it. This wait
1329 	 * must be done before transaction start for quota disable since it may
1330 	 * deadlock with transaction by the qgroup rescan worker.
1331 	 */
1332 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1333 	btrfs_qgroup_wait_for_completion(fs_info, false);
1334 
1335 	/*
1336 	 * We have nothing held here and no trans handle, just return the error
1337 	 * if there is one and set back the quota enabled bit since we didn't
1338 	 * actually disable quotas.
1339 	 */
1340 	ret = flush_reservations(fs_info);
1341 	if (ret) {
1342 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1343 		return ret;
1344 	}
1345 
1346 	/*
1347 	 * 1 For the root item
1348 	 *
1349 	 * We should also reserve enough items for the quota tree deletion in
1350 	 * btrfs_clean_quota_tree but this is not done.
1351 	 *
1352 	 * Also, we must always start a transaction without holding the mutex
1353 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1354 	 */
1355 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1356 
1357 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1358 	if (IS_ERR(trans)) {
1359 		ret = PTR_ERR(trans);
1360 		trans = NULL;
1361 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1362 		goto out;
1363 	}
1364 
1365 	if (!fs_info->quota_root)
1366 		goto out;
1367 
1368 	spin_lock(&fs_info->qgroup_lock);
1369 	quota_root = fs_info->quota_root;
1370 	fs_info->quota_root = NULL;
1371 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1372 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1373 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1374 	spin_unlock(&fs_info->qgroup_lock);
1375 
1376 	btrfs_free_qgroup_config(fs_info);
1377 
1378 	ret = btrfs_clean_quota_tree(trans, quota_root);
1379 	if (ret) {
1380 		btrfs_abort_transaction(trans, ret);
1381 		goto out;
1382 	}
1383 
1384 	ret = btrfs_del_root(trans, &quota_root->root_key);
1385 	if (ret) {
1386 		btrfs_abort_transaction(trans, ret);
1387 		goto out;
1388 	}
1389 
1390 	spin_lock(&fs_info->trans_lock);
1391 	list_del(&quota_root->dirty_list);
1392 	spin_unlock(&fs_info->trans_lock);
1393 
1394 	btrfs_tree_lock(quota_root->node);
1395 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1396 	btrfs_tree_unlock(quota_root->node);
1397 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1398 				    quota_root->node, 0, 1);
1399 
1400 	if (ret < 0)
1401 		btrfs_abort_transaction(trans, ret);
1402 
1403 out:
1404 	btrfs_put_root(quota_root);
1405 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1406 	if (ret && trans)
1407 		btrfs_end_transaction(trans);
1408 	else if (trans)
1409 		ret = btrfs_commit_transaction(trans);
1410 	return ret;
1411 }
1412 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1413 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1414 			 struct btrfs_qgroup *qgroup)
1415 {
1416 	if (list_empty(&qgroup->dirty))
1417 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1418 }
1419 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1420 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1421 {
1422 	if (!list_empty(&qgroup->iterator))
1423 		return;
1424 
1425 	list_add_tail(&qgroup->iterator, head);
1426 }
1427 
qgroup_iterator_clean(struct list_head * head)1428 static void qgroup_iterator_clean(struct list_head *head)
1429 {
1430 	while (!list_empty(head)) {
1431 		struct btrfs_qgroup *qgroup;
1432 
1433 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1434 		list_del_init(&qgroup->iterator);
1435 	}
1436 }
1437 
1438 /*
1439  * The easy accounting, we're updating qgroup relationship whose child qgroup
1440  * only has exclusive extents.
1441  *
1442  * In this case, all exclusive extents will also be exclusive for parent, so
1443  * excl/rfer just get added/removed.
1444  *
1445  * So is qgroup reservation space, which should also be added/removed to
1446  * parent.
1447  * Or when child tries to release reservation space, parent will underflow its
1448  * reservation (for relationship adding case).
1449  *
1450  * Caller should hold fs_info->qgroup_lock.
1451  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1452 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1453 				    struct btrfs_qgroup *src, int sign)
1454 {
1455 	struct btrfs_qgroup *qgroup;
1456 	LIST_HEAD(qgroup_list);
1457 	u64 num_bytes = src->excl;
1458 	int ret = 0;
1459 
1460 	qgroup = find_qgroup_rb(fs_info, ref_root);
1461 	if (!qgroup)
1462 		goto out;
1463 
1464 	qgroup_iterator_add(&qgroup_list, qgroup);
1465 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
1466 		struct btrfs_qgroup_list *glist;
1467 
1468 		qgroup->rfer += sign * num_bytes;
1469 		qgroup->rfer_cmpr += sign * num_bytes;
1470 
1471 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1472 		qgroup->excl += sign * num_bytes;
1473 		qgroup->excl_cmpr += sign * num_bytes;
1474 
1475 		if (sign > 0)
1476 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1477 		else
1478 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1479 		qgroup_dirty(fs_info, qgroup);
1480 
1481 		/* Append parent qgroups to @qgroup_list. */
1482 		list_for_each_entry(glist, &qgroup->groups, next_group)
1483 			qgroup_iterator_add(&qgroup_list, glist->group);
1484 	}
1485 	ret = 0;
1486 out:
1487 	qgroup_iterator_clean(&qgroup_list);
1488 	return ret;
1489 }
1490 
1491 
1492 /*
1493  * Quick path for updating qgroup with only excl refs.
1494  *
1495  * In that case, just update all parent will be enough.
1496  * Or we needs to do a full rescan.
1497  * Caller should also hold fs_info->qgroup_lock.
1498  *
1499  * Return 0 for quick update, return >0 for need to full rescan
1500  * and mark INCONSISTENT flag.
1501  * Return < 0 for other error.
1502  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1503 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1504 				   u64 src, u64 dst, int sign)
1505 {
1506 	struct btrfs_qgroup *qgroup;
1507 	int ret = 1;
1508 
1509 	qgroup = find_qgroup_rb(fs_info, src);
1510 	if (!qgroup)
1511 		goto out;
1512 	if (qgroup->excl == qgroup->rfer) {
1513 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1514 		if (ret < 0)
1515 			goto out;
1516 		ret = 0;
1517 	}
1518 out:
1519 	if (ret)
1520 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1521 	return ret;
1522 }
1523 
1524 /*
1525  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1526  * callers and transferred here (either used or freed on error).
1527  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1528 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1529 			      struct btrfs_qgroup_list *prealloc)
1530 {
1531 	struct btrfs_fs_info *fs_info = trans->fs_info;
1532 	struct btrfs_qgroup *parent;
1533 	struct btrfs_qgroup *member;
1534 	struct btrfs_qgroup_list *list;
1535 	int ret = 0;
1536 
1537 	ASSERT(prealloc);
1538 
1539 	/* Check the level of src and dst first */
1540 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1541 		return -EINVAL;
1542 
1543 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1544 	if (!fs_info->quota_root) {
1545 		ret = -ENOTCONN;
1546 		goto out;
1547 	}
1548 	member = find_qgroup_rb(fs_info, src);
1549 	parent = find_qgroup_rb(fs_info, dst);
1550 	if (!member || !parent) {
1551 		ret = -EINVAL;
1552 		goto out;
1553 	}
1554 
1555 	/* check if such qgroup relation exist firstly */
1556 	list_for_each_entry(list, &member->groups, next_group) {
1557 		if (list->group == parent) {
1558 			ret = -EEXIST;
1559 			goto out;
1560 		}
1561 	}
1562 
1563 	ret = add_qgroup_relation_item(trans, src, dst);
1564 	if (ret)
1565 		goto out;
1566 
1567 	ret = add_qgroup_relation_item(trans, dst, src);
1568 	if (ret) {
1569 		del_qgroup_relation_item(trans, src, dst);
1570 		goto out;
1571 	}
1572 
1573 	spin_lock(&fs_info->qgroup_lock);
1574 	ret = __add_relation_rb(prealloc, member, parent);
1575 	prealloc = NULL;
1576 	if (ret < 0) {
1577 		spin_unlock(&fs_info->qgroup_lock);
1578 		goto out;
1579 	}
1580 	ret = quick_update_accounting(fs_info, src, dst, 1);
1581 	spin_unlock(&fs_info->qgroup_lock);
1582 out:
1583 	kfree(prealloc);
1584 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1585 	return ret;
1586 }
1587 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1588 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1589 				 u64 dst)
1590 {
1591 	struct btrfs_fs_info *fs_info = trans->fs_info;
1592 	struct btrfs_qgroup *parent;
1593 	struct btrfs_qgroup *member;
1594 	struct btrfs_qgroup_list *list;
1595 	bool found = false;
1596 	int ret = 0;
1597 	int ret2;
1598 
1599 	if (!fs_info->quota_root) {
1600 		ret = -ENOTCONN;
1601 		goto out;
1602 	}
1603 
1604 	member = find_qgroup_rb(fs_info, src);
1605 	parent = find_qgroup_rb(fs_info, dst);
1606 	/*
1607 	 * The parent/member pair doesn't exist, then try to delete the dead
1608 	 * relation items only.
1609 	 */
1610 	if (!member || !parent)
1611 		goto delete_item;
1612 
1613 	/* check if such qgroup relation exist firstly */
1614 	list_for_each_entry(list, &member->groups, next_group) {
1615 		if (list->group == parent) {
1616 			found = true;
1617 			break;
1618 		}
1619 	}
1620 
1621 delete_item:
1622 	ret = del_qgroup_relation_item(trans, src, dst);
1623 	if (ret < 0 && ret != -ENOENT)
1624 		goto out;
1625 	ret2 = del_qgroup_relation_item(trans, dst, src);
1626 	if (ret2 < 0 && ret2 != -ENOENT)
1627 		goto out;
1628 
1629 	/* At least one deletion succeeded, return 0 */
1630 	if (!ret || !ret2)
1631 		ret = 0;
1632 
1633 	if (found) {
1634 		spin_lock(&fs_info->qgroup_lock);
1635 		del_relation_rb(fs_info, src, dst);
1636 		ret = quick_update_accounting(fs_info, src, dst, -1);
1637 		spin_unlock(&fs_info->qgroup_lock);
1638 	}
1639 out:
1640 	return ret;
1641 }
1642 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1643 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1644 			      u64 dst)
1645 {
1646 	struct btrfs_fs_info *fs_info = trans->fs_info;
1647 	int ret = 0;
1648 
1649 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1650 	ret = __del_qgroup_relation(trans, src, dst);
1651 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1652 
1653 	return ret;
1654 }
1655 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1656 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1657 {
1658 	struct btrfs_fs_info *fs_info = trans->fs_info;
1659 	struct btrfs_root *quota_root;
1660 	struct btrfs_qgroup *qgroup;
1661 	struct btrfs_qgroup *prealloc = NULL;
1662 	int ret = 0;
1663 
1664 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1665 	if (!fs_info->quota_root) {
1666 		ret = -ENOTCONN;
1667 		goto out;
1668 	}
1669 	quota_root = fs_info->quota_root;
1670 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1671 	if (qgroup) {
1672 		ret = -EEXIST;
1673 		goto out;
1674 	}
1675 
1676 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1677 	if (!prealloc) {
1678 		ret = -ENOMEM;
1679 		goto out;
1680 	}
1681 
1682 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1683 	if (ret)
1684 		goto out;
1685 
1686 	spin_lock(&fs_info->qgroup_lock);
1687 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1688 	spin_unlock(&fs_info->qgroup_lock);
1689 	prealloc = NULL;
1690 
1691 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1692 out:
1693 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1694 	kfree(prealloc);
1695 	return ret;
1696 }
1697 
1698 /*
1699  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1700  * Return >0 if we can delete the qgroup.
1701  * Return <0 for other errors during tree search.
1702  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1703 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1704 {
1705 	struct btrfs_key key;
1706 	struct btrfs_path *path;
1707 	int ret;
1708 
1709 	/*
1710 	 * Squota would never be inconsistent, but there can still be case
1711 	 * where a dropped subvolume still has qgroup numbers, and squota
1712 	 * relies on such qgroup for future accounting.
1713 	 *
1714 	 * So for squota, do not allow dropping any non-zero qgroup.
1715 	 */
1716 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1717 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1718 		return 0;
1719 
1720 	/* For higher level qgroup, we can only delete it if it has no child. */
1721 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1722 		if (!list_empty(&qgroup->members))
1723 			return 0;
1724 		return 1;
1725 	}
1726 
1727 	/*
1728 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1729 	 * for it.
1730 	 * This means even a subvolume is unlinked but not yet fully dropped,
1731 	 * we can not delete the qgroup.
1732 	 */
1733 	key.objectid = qgroup->qgroupid;
1734 	key.type = BTRFS_ROOT_ITEM_KEY;
1735 	key.offset = -1ULL;
1736 	path = btrfs_alloc_path();
1737 	if (!path)
1738 		return -ENOMEM;
1739 
1740 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1741 	btrfs_free_path(path);
1742 	/*
1743 	 * The @ret from btrfs_find_root() exactly matches our definition for
1744 	 * the return value, thus can be returned directly.
1745 	 */
1746 	return ret;
1747 }
1748 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1749 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1750 {
1751 	struct btrfs_fs_info *fs_info = trans->fs_info;
1752 	struct btrfs_qgroup *qgroup;
1753 	struct btrfs_qgroup_list *list;
1754 	int ret = 0;
1755 
1756 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1757 	if (!fs_info->quota_root) {
1758 		ret = -ENOTCONN;
1759 		goto out;
1760 	}
1761 
1762 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1763 	if (!qgroup) {
1764 		ret = -ENOENT;
1765 		goto out;
1766 	}
1767 
1768 	ret = can_delete_qgroup(fs_info, qgroup);
1769 	if (ret < 0)
1770 		goto out;
1771 	if (ret == 0) {
1772 		ret = -EBUSY;
1773 		goto out;
1774 	}
1775 
1776 	/* Check if there are no children of this qgroup */
1777 	if (!list_empty(&qgroup->members)) {
1778 		ret = -EBUSY;
1779 		goto out;
1780 	}
1781 
1782 	ret = del_qgroup_item(trans, qgroupid);
1783 	if (ret && ret != -ENOENT)
1784 		goto out;
1785 
1786 	while (!list_empty(&qgroup->groups)) {
1787 		list = list_first_entry(&qgroup->groups,
1788 					struct btrfs_qgroup_list, next_group);
1789 		ret = __del_qgroup_relation(trans, qgroupid,
1790 					    list->group->qgroupid);
1791 		if (ret)
1792 			goto out;
1793 	}
1794 
1795 	spin_lock(&fs_info->qgroup_lock);
1796 	/*
1797 	 * Warn on reserved space. The subvolume should has no child nor
1798 	 * corresponding subvolume.
1799 	 * Thus its reserved space should all be zero, no matter if qgroup
1800 	 * is consistent or the mode.
1801 	 */
1802 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1803 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1804 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1805 		DEBUG_WARN();
1806 		btrfs_warn_rl(fs_info,
1807 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1808 			      btrfs_qgroup_level(qgroup->qgroupid),
1809 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1810 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1811 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1812 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1813 
1814 	}
1815 	/*
1816 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1817 	 * consistent and if it's in regular qgroup mode.
1818 	 * For simple mode it's not as accurate thus we can hit non-zero values
1819 	 * very frequently.
1820 	 */
1821 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1822 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1823 		if (qgroup->rfer || qgroup->excl ||
1824 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1825 			DEBUG_WARN();
1826 			qgroup_mark_inconsistent(fs_info,
1827 				"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1828 				btrfs_qgroup_level(qgroup->qgroupid),
1829 				btrfs_qgroup_subvolid(qgroup->qgroupid),
1830 				qgroup->rfer, qgroup->rfer_cmpr,
1831 				qgroup->excl, qgroup->excl_cmpr);
1832 		}
1833 	}
1834 	del_qgroup_rb(fs_info, qgroupid);
1835 	spin_unlock(&fs_info->qgroup_lock);
1836 
1837 	/*
1838 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1839 	 * spinlock, since the sysfs_remove_group() function needs to take
1840 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1841 	 */
1842 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1843 	kfree(qgroup);
1844 out:
1845 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1846 	return ret;
1847 }
1848 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1849 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1850 {
1851 	struct btrfs_trans_handle *trans;
1852 	int ret;
1853 
1854 	if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
1855 	    !fs_info->quota_root)
1856 		return 0;
1857 
1858 	/*
1859 	 * Commit current transaction to make sure all the rfer/excl numbers
1860 	 * get updated.
1861 	 */
1862 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1863 	if (ret < 0)
1864 		return ret;
1865 
1866 	/* Start new trans to delete the qgroup info and limit items. */
1867 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1868 	if (IS_ERR(trans))
1869 		return PTR_ERR(trans);
1870 	ret = btrfs_remove_qgroup(trans, subvolid);
1871 	btrfs_end_transaction(trans);
1872 	/*
1873 	 * It's squota and the subvolume still has numbers needed for future
1874 	 * accounting, in this case we can not delete it.  Just skip it.
1875 	 *
1876 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1877 	 * safe to ignore them.
1878 	 */
1879 	if (ret == -EBUSY || ret == -ENOENT)
1880 		ret = 0;
1881 	return ret;
1882 }
1883 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1884 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1885 		       struct btrfs_qgroup_limit *limit)
1886 {
1887 	struct btrfs_fs_info *fs_info = trans->fs_info;
1888 	struct btrfs_qgroup *qgroup;
1889 	int ret = 0;
1890 	/* Sometimes we would want to clear the limit on this qgroup.
1891 	 * To meet this requirement, we treat the -1 as a special value
1892 	 * which tell kernel to clear the limit on this qgroup.
1893 	 */
1894 	const u64 CLEAR_VALUE = -1;
1895 
1896 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1897 	if (!fs_info->quota_root) {
1898 		ret = -ENOTCONN;
1899 		goto out;
1900 	}
1901 
1902 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1903 	if (!qgroup) {
1904 		ret = -ENOENT;
1905 		goto out;
1906 	}
1907 
1908 	spin_lock(&fs_info->qgroup_lock);
1909 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1910 		if (limit->max_rfer == CLEAR_VALUE) {
1911 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1912 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1913 			qgroup->max_rfer = 0;
1914 		} else {
1915 			qgroup->max_rfer = limit->max_rfer;
1916 		}
1917 	}
1918 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1919 		if (limit->max_excl == CLEAR_VALUE) {
1920 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1921 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1922 			qgroup->max_excl = 0;
1923 		} else {
1924 			qgroup->max_excl = limit->max_excl;
1925 		}
1926 	}
1927 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1928 		if (limit->rsv_rfer == CLEAR_VALUE) {
1929 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1930 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1931 			qgroup->rsv_rfer = 0;
1932 		} else {
1933 			qgroup->rsv_rfer = limit->rsv_rfer;
1934 		}
1935 	}
1936 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1937 		if (limit->rsv_excl == CLEAR_VALUE) {
1938 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1939 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1940 			qgroup->rsv_excl = 0;
1941 		} else {
1942 			qgroup->rsv_excl = limit->rsv_excl;
1943 		}
1944 	}
1945 	qgroup->lim_flags |= limit->flags;
1946 
1947 	spin_unlock(&fs_info->qgroup_lock);
1948 
1949 	ret = update_qgroup_limit_item(trans, qgroup);
1950 	if (ret)
1951 		qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
1952 
1953 out:
1954 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1955 	return ret;
1956 }
1957 
1958 /*
1959  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1960  * So qgroup can account it at transaction committing time.
1961  *
1962  * No lock version, caller must acquire delayed ref lock and allocated memory,
1963  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1964  *
1965  * Return 0 for success insert
1966  * Return >0 for existing record, caller can free @record safely.
1967  * Return <0 for insertion failure, caller can free @record safely.
1968  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record,u64 bytenr)1969 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1970 				     struct btrfs_delayed_ref_root *delayed_refs,
1971 				     struct btrfs_qgroup_extent_record *record,
1972 				     u64 bytenr)
1973 {
1974 	struct btrfs_qgroup_extent_record *existing, *ret;
1975 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1976 
1977 	if (!btrfs_qgroup_full_accounting(fs_info))
1978 		return 1;
1979 
1980 #if BITS_PER_LONG == 32
1981 	if (bytenr >= MAX_LFS_FILESIZE) {
1982 		btrfs_err_rl(fs_info,
1983 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
1984 			     bytenr);
1985 		btrfs_err_32bit_limit(fs_info);
1986 		return -EOVERFLOW;
1987 	}
1988 #endif
1989 
1990 	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
1991 
1992 	xa_lock(&delayed_refs->dirty_extents);
1993 	existing = xa_load(&delayed_refs->dirty_extents, index);
1994 	if (existing) {
1995 		if (record->data_rsv && !existing->data_rsv) {
1996 			existing->data_rsv = record->data_rsv;
1997 			existing->data_rsv_refroot = record->data_rsv_refroot;
1998 		}
1999 		xa_unlock(&delayed_refs->dirty_extents);
2000 		return 1;
2001 	}
2002 
2003 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2004 	xa_unlock(&delayed_refs->dirty_extents);
2005 	if (xa_is_err(ret)) {
2006 		qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
2007 		return xa_err(ret);
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 /*
2014  * Post handler after qgroup_trace_extent_nolock().
2015  *
2016  * NOTE: Current qgroup does the expensive backref walk at transaction
2017  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2018  * new transaction.
2019  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2020  * result.
2021  *
2022  * However for old_roots there is no need to do backref walk at that time,
2023  * since we search commit roots to walk backref and result will always be
2024  * correct.
2025  *
2026  * Due to the nature of no lock version, we can't do backref there.
2027  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2028  * spinlock context.
2029  *
2030  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2031  * using current root, then we can move all expensive backref walk out of
2032  * transaction committing, but not now as qgroup accounting will be wrong again.
2033  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr)2034 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2035 				   struct btrfs_qgroup_extent_record *qrecord,
2036 				   u64 bytenr)
2037 {
2038 	struct btrfs_fs_info *fs_info = trans->fs_info;
2039 	struct btrfs_backref_walk_ctx ctx = {
2040 		.bytenr = bytenr,
2041 		.fs_info = fs_info,
2042 	};
2043 	int ret;
2044 
2045 	if (!btrfs_qgroup_full_accounting(fs_info))
2046 		return 0;
2047 	/*
2048 	 * We are always called in a context where we are already holding a
2049 	 * transaction handle. Often we are called when adding a data delayed
2050 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2051 	 * in which case we will be holding a write lock on extent buffer from a
2052 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2053 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2054 	 * that must be acquired before locking any extent buffers.
2055 	 *
2056 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2057 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2058 	 * it would not use commit roots and would lock extent buffers, causing
2059 	 * a deadlock if it ends up trying to read lock the same extent buffer
2060 	 * that was previously write locked at btrfs_truncate_inode_items().
2061 	 *
2062 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2063 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2064 	 * holding a transaction handle we don't need its protection.
2065 	 */
2066 	ASSERT(trans != NULL);
2067 
2068 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2069 		return 0;
2070 
2071 	ret = btrfs_find_all_roots(&ctx, true);
2072 	if (ret < 0) {
2073 		qgroup_mark_inconsistent(fs_info,
2074 				"error accounting new delayed refs extent: %d", ret);
2075 		return 0;
2076 	}
2077 
2078 	/*
2079 	 * Here we don't need to get the lock of
2080 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2081 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2082 	 *
2083 	 * So modifying qrecord->old_roots is safe here
2084 	 */
2085 	qrecord->old_roots = ctx.roots;
2086 	return 0;
2087 }
2088 
2089 /*
2090  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2091  * @num_bytes.
2092  * So qgroup can account it at commit trans time.
2093  *
2094  * Better encapsulated version, with memory allocation and backref walk for
2095  * commit roots.
2096  * So this can sleep.
2097  *
2098  * Return 0 if the operation is done.
2099  * Return <0 for error, like memory allocation failure or invalid parameter
2100  * (NULL trans)
2101  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2102 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2103 			      u64 num_bytes)
2104 {
2105 	struct btrfs_fs_info *fs_info = trans->fs_info;
2106 	struct btrfs_qgroup_extent_record *record;
2107 	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2108 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2109 	int ret;
2110 
2111 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2112 		return 0;
2113 	record = kzalloc(sizeof(*record), GFP_NOFS);
2114 	if (!record)
2115 		return -ENOMEM;
2116 
2117 	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2118 		kfree(record);
2119 		return -ENOMEM;
2120 	}
2121 
2122 	record->num_bytes = num_bytes;
2123 
2124 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2125 	if (ret) {
2126 		/* Clean up if insertion fails or item exists. */
2127 		xa_release(&delayed_refs->dirty_extents, index);
2128 		kfree(record);
2129 		return 0;
2130 	}
2131 	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2132 }
2133 
2134 /*
2135  * Inform qgroup to trace all leaf items of data
2136  *
2137  * Return 0 for success
2138  * Return <0 for error(ENOMEM)
2139  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2140 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2141 				  struct extent_buffer *eb)
2142 {
2143 	struct btrfs_fs_info *fs_info = trans->fs_info;
2144 	int nr = btrfs_header_nritems(eb);
2145 	int i, extent_type, ret;
2146 	struct btrfs_key key;
2147 	struct btrfs_file_extent_item *fi;
2148 	u64 bytenr, num_bytes;
2149 
2150 	/* We can be called directly from walk_up_proc() */
2151 	if (!btrfs_qgroup_full_accounting(fs_info))
2152 		return 0;
2153 
2154 	for (i = 0; i < nr; i++) {
2155 		btrfs_item_key_to_cpu(eb, &key, i);
2156 
2157 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2158 			continue;
2159 
2160 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2161 		/* filter out non qgroup-accountable extents  */
2162 		extent_type = btrfs_file_extent_type(eb, fi);
2163 
2164 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2165 			continue;
2166 
2167 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2168 		if (!bytenr)
2169 			continue;
2170 
2171 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2172 
2173 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2174 		if (ret)
2175 			return ret;
2176 	}
2177 	cond_resched();
2178 	return 0;
2179 }
2180 
2181 /*
2182  * Walk up the tree from the bottom, freeing leaves and any interior
2183  * nodes which have had all slots visited. If a node (leaf or
2184  * interior) is freed, the node above it will have it's slot
2185  * incremented. The root node will never be freed.
2186  *
2187  * At the end of this function, we should have a path which has all
2188  * slots incremented to the next position for a search. If we need to
2189  * read a new node it will be NULL and the node above it will have the
2190  * correct slot selected for a later read.
2191  *
2192  * If we increment the root nodes slot counter past the number of
2193  * elements, 1 is returned to signal completion of the search.
2194  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2195 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2196 {
2197 	int level = 0;
2198 	int nr, slot;
2199 	struct extent_buffer *eb;
2200 
2201 	if (root_level == 0)
2202 		return 1;
2203 
2204 	while (level <= root_level) {
2205 		eb = path->nodes[level];
2206 		nr = btrfs_header_nritems(eb);
2207 		path->slots[level]++;
2208 		slot = path->slots[level];
2209 		if (slot >= nr || level == 0) {
2210 			/*
2211 			 * Don't free the root -  we will detect this
2212 			 * condition after our loop and return a
2213 			 * positive value for caller to stop walking the tree.
2214 			 */
2215 			if (level != root_level) {
2216 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2217 				path->locks[level] = 0;
2218 
2219 				free_extent_buffer(eb);
2220 				path->nodes[level] = NULL;
2221 				path->slots[level] = 0;
2222 			}
2223 		} else {
2224 			/*
2225 			 * We have a valid slot to walk back down
2226 			 * from. Stop here so caller can process these
2227 			 * new nodes.
2228 			 */
2229 			break;
2230 		}
2231 
2232 		level++;
2233 	}
2234 
2235 	eb = path->nodes[root_level];
2236 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2237 		return 1;
2238 
2239 	return 0;
2240 }
2241 
2242 /*
2243  * Helper function to trace a subtree tree block swap.
2244  *
2245  * The swap will happen in highest tree block, but there may be a lot of
2246  * tree blocks involved.
2247  *
2248  * For example:
2249  *  OO = Old tree blocks
2250  *  NN = New tree blocks allocated during balance
2251  *
2252  *           File tree (257)                  Reloc tree for 257
2253  * L2              OO                                NN
2254  *               /    \                            /    \
2255  * L1          OO      OO (a)                    OO      NN (a)
2256  *            / \     / \                       / \     / \
2257  * L0       OO   OO OO   OO                   OO   OO NN   NN
2258  *                  (b)  (c)                          (b)  (c)
2259  *
2260  * When calling qgroup_trace_extent_swap(), we will pass:
2261  * @src_eb = OO(a)
2262  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2263  * @dst_level = 0
2264  * @root_level = 1
2265  *
2266  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2267  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2268  *
2269  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2270  *
2271  * 1) Tree search from @src_eb
2272  *    It should acts as a simplified btrfs_search_slot().
2273  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2274  *    (first key).
2275  *
2276  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2277  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2278  *    They should be marked during previous (@dst_level = 1) iteration.
2279  *
2280  * 3) Mark file extents in leaves dirty
2281  *    We don't have good way to pick out new file extents only.
2282  *    So we still follow the old method by scanning all file extents in
2283  *    the leave.
2284  *
2285  * This function can free us from keeping two paths, thus later we only need
2286  * to care about how to iterate all new tree blocks in reloc tree.
2287  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2288 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2289 				    struct extent_buffer *src_eb,
2290 				    struct btrfs_path *dst_path,
2291 				    int dst_level, int root_level,
2292 				    bool trace_leaf)
2293 {
2294 	struct btrfs_key key;
2295 	struct btrfs_path *src_path;
2296 	struct btrfs_fs_info *fs_info = trans->fs_info;
2297 	u32 nodesize = fs_info->nodesize;
2298 	int cur_level = root_level;
2299 	int ret;
2300 
2301 	BUG_ON(dst_level > root_level);
2302 	/* Level mismatch */
2303 	if (btrfs_header_level(src_eb) != root_level)
2304 		return -EINVAL;
2305 
2306 	src_path = btrfs_alloc_path();
2307 	if (!src_path) {
2308 		ret = -ENOMEM;
2309 		goto out;
2310 	}
2311 
2312 	if (dst_level)
2313 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2314 	else
2315 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2316 
2317 	/* For src_path */
2318 	refcount_inc(&src_eb->refs);
2319 	src_path->nodes[root_level] = src_eb;
2320 	src_path->slots[root_level] = dst_path->slots[root_level];
2321 	src_path->locks[root_level] = 0;
2322 
2323 	/* A simplified version of btrfs_search_slot() */
2324 	while (cur_level >= dst_level) {
2325 		struct btrfs_key src_key;
2326 		struct btrfs_key dst_key;
2327 
2328 		if (src_path->nodes[cur_level] == NULL) {
2329 			struct extent_buffer *eb;
2330 			int parent_slot;
2331 
2332 			eb = src_path->nodes[cur_level + 1];
2333 			parent_slot = src_path->slots[cur_level + 1];
2334 
2335 			eb = btrfs_read_node_slot(eb, parent_slot);
2336 			if (IS_ERR(eb)) {
2337 				ret = PTR_ERR(eb);
2338 				goto out;
2339 			}
2340 
2341 			src_path->nodes[cur_level] = eb;
2342 
2343 			btrfs_tree_read_lock(eb);
2344 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2345 		}
2346 
2347 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2348 		if (cur_level) {
2349 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2350 					&dst_key, dst_path->slots[cur_level]);
2351 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2352 					&src_key, src_path->slots[cur_level]);
2353 		} else {
2354 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2355 					&dst_key, dst_path->slots[cur_level]);
2356 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2357 					&src_key, src_path->slots[cur_level]);
2358 		}
2359 		/* Content mismatch, something went wrong */
2360 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2361 			ret = -ENOENT;
2362 			goto out;
2363 		}
2364 		cur_level--;
2365 	}
2366 
2367 	/*
2368 	 * Now both @dst_path and @src_path have been populated, record the tree
2369 	 * blocks for qgroup accounting.
2370 	 */
2371 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2372 					nodesize);
2373 	if (ret < 0)
2374 		goto out;
2375 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2376 					nodesize);
2377 	if (ret < 0)
2378 		goto out;
2379 
2380 	/* Record leaf file extents */
2381 	if (dst_level == 0 && trace_leaf) {
2382 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2383 		if (ret < 0)
2384 			goto out;
2385 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2386 	}
2387 out:
2388 	btrfs_free_path(src_path);
2389 	return ret;
2390 }
2391 
2392 /*
2393  * Helper function to do recursive generation-aware depth-first search, to
2394  * locate all new tree blocks in a subtree of reloc tree.
2395  *
2396  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2397  *         reloc tree
2398  * L2         NN (a)
2399  *          /    \
2400  * L1    OO        NN (b)
2401  *      /  \      /  \
2402  * L0  OO  OO    OO  NN
2403  *               (c) (d)
2404  * If we pass:
2405  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2406  * @cur_level = 1
2407  * @root_level = 1
2408  *
2409  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2410  * above tree blocks along with their counter parts in file tree.
2411  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2412  * won't affect OO(c).
2413  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2414 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2415 					   struct extent_buffer *src_eb,
2416 					   struct btrfs_path *dst_path,
2417 					   int cur_level, int root_level,
2418 					   u64 last_snapshot, bool trace_leaf)
2419 {
2420 	struct btrfs_fs_info *fs_info = trans->fs_info;
2421 	struct extent_buffer *eb;
2422 	bool need_cleanup = false;
2423 	int ret = 0;
2424 	int i;
2425 
2426 	/* Level sanity check */
2427 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2428 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2429 	    root_level < cur_level) {
2430 		btrfs_err_rl(fs_info,
2431 			"%s: bad levels, cur_level=%d root_level=%d",
2432 			__func__, cur_level, root_level);
2433 		return -EUCLEAN;
2434 	}
2435 
2436 	/* Read the tree block if needed */
2437 	if (dst_path->nodes[cur_level] == NULL) {
2438 		int parent_slot;
2439 		u64 child_gen;
2440 
2441 		/*
2442 		 * dst_path->nodes[root_level] must be initialized before
2443 		 * calling this function.
2444 		 */
2445 		if (cur_level == root_level) {
2446 			btrfs_err_rl(fs_info,
2447 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2448 				__func__, root_level, root_level, cur_level);
2449 			return -EUCLEAN;
2450 		}
2451 
2452 		/*
2453 		 * We need to get child blockptr/gen from parent before we can
2454 		 * read it.
2455 		  */
2456 		eb = dst_path->nodes[cur_level + 1];
2457 		parent_slot = dst_path->slots[cur_level + 1];
2458 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2459 
2460 		/* This node is old, no need to trace */
2461 		if (child_gen < last_snapshot)
2462 			goto out;
2463 
2464 		eb = btrfs_read_node_slot(eb, parent_slot);
2465 		if (IS_ERR(eb)) {
2466 			ret = PTR_ERR(eb);
2467 			goto out;
2468 		}
2469 
2470 		dst_path->nodes[cur_level] = eb;
2471 		dst_path->slots[cur_level] = 0;
2472 
2473 		btrfs_tree_read_lock(eb);
2474 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2475 		need_cleanup = true;
2476 	}
2477 
2478 	/* Now record this tree block and its counter part for qgroups */
2479 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2480 				       root_level, trace_leaf);
2481 	if (ret < 0)
2482 		goto cleanup;
2483 
2484 	eb = dst_path->nodes[cur_level];
2485 
2486 	if (cur_level > 0) {
2487 		/* Iterate all child tree blocks */
2488 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2489 			/* Skip old tree blocks as they won't be swapped */
2490 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2491 				continue;
2492 			dst_path->slots[cur_level] = i;
2493 
2494 			/* Recursive call (at most 7 times) */
2495 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2496 					dst_path, cur_level - 1, root_level,
2497 					last_snapshot, trace_leaf);
2498 			if (ret < 0)
2499 				goto cleanup;
2500 		}
2501 	}
2502 
2503 cleanup:
2504 	if (need_cleanup) {
2505 		/* Clean up */
2506 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2507 				     dst_path->locks[cur_level]);
2508 		free_extent_buffer(dst_path->nodes[cur_level]);
2509 		dst_path->nodes[cur_level] = NULL;
2510 		dst_path->slots[cur_level] = 0;
2511 		dst_path->locks[cur_level] = 0;
2512 	}
2513 out:
2514 	return ret;
2515 }
2516 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2517 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2518 				struct extent_buffer *src_eb,
2519 				struct extent_buffer *dst_eb,
2520 				u64 last_snapshot, bool trace_leaf)
2521 {
2522 	struct btrfs_fs_info *fs_info = trans->fs_info;
2523 	struct btrfs_path *dst_path = NULL;
2524 	int level;
2525 	int ret;
2526 
2527 	if (!btrfs_qgroup_full_accounting(fs_info))
2528 		return 0;
2529 
2530 	/* Wrong parameter order */
2531 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2532 		btrfs_err_rl(fs_info,
2533 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2534 			     btrfs_header_generation(src_eb),
2535 			     btrfs_header_generation(dst_eb));
2536 		return -EUCLEAN;
2537 	}
2538 
2539 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2540 		ret = -EIO;
2541 		goto out;
2542 	}
2543 
2544 	level = btrfs_header_level(dst_eb);
2545 	dst_path = btrfs_alloc_path();
2546 	if (!dst_path) {
2547 		ret = -ENOMEM;
2548 		goto out;
2549 	}
2550 	/* For dst_path */
2551 	refcount_inc(&dst_eb->refs);
2552 	dst_path->nodes[level] = dst_eb;
2553 	dst_path->slots[level] = 0;
2554 	dst_path->locks[level] = 0;
2555 
2556 	/* Do the generation aware breadth-first search */
2557 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2558 					      level, last_snapshot, trace_leaf);
2559 	if (ret < 0)
2560 		goto out;
2561 	ret = 0;
2562 
2563 out:
2564 	btrfs_free_path(dst_path);
2565 	if (ret < 0)
2566 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
2567 	return ret;
2568 }
2569 
2570 /*
2571  * Inform qgroup to trace a whole subtree, including all its child tree
2572  * blocks and data.
2573  * The root tree block is specified by @root_eb.
2574  *
2575  * Normally used by relocation(tree block swap) and subvolume deletion.
2576  *
2577  * Return 0 for success
2578  * Return <0 for error(ENOMEM or tree search error)
2579  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2580 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2581 			       struct extent_buffer *root_eb,
2582 			       u64 root_gen, int root_level)
2583 {
2584 	struct btrfs_fs_info *fs_info = trans->fs_info;
2585 	int ret = 0;
2586 	int level;
2587 	u8 drop_subptree_thres;
2588 	struct extent_buffer *eb = root_eb;
2589 	struct btrfs_path *path = NULL;
2590 
2591 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2592 	ASSERT(root_eb != NULL);
2593 
2594 	if (!btrfs_qgroup_full_accounting(fs_info))
2595 		return 0;
2596 
2597 	spin_lock(&fs_info->qgroup_lock);
2598 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2599 	spin_unlock(&fs_info->qgroup_lock);
2600 
2601 	/*
2602 	 * This function only gets called for snapshot drop, if we hit a high
2603 	 * node here, it means we are going to change ownership for quite a lot
2604 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2605 	 *
2606 	 * So here if we find a high tree here, we just skip the accounting and
2607 	 * mark qgroup inconsistent.
2608 	 */
2609 	if (root_level >= drop_subptree_thres) {
2610 		qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
2611 		return 0;
2612 	}
2613 
2614 	if (!extent_buffer_uptodate(root_eb)) {
2615 		struct btrfs_tree_parent_check check = {
2616 			.transid = root_gen,
2617 			.level = root_level
2618 		};
2619 
2620 		ret = btrfs_read_extent_buffer(root_eb, &check);
2621 		if (ret)
2622 			goto out;
2623 	}
2624 
2625 	if (root_level == 0) {
2626 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2627 		goto out;
2628 	}
2629 
2630 	path = btrfs_alloc_path();
2631 	if (!path)
2632 		return -ENOMEM;
2633 
2634 	/*
2635 	 * Walk down the tree.  Missing extent blocks are filled in as
2636 	 * we go. Metadata is accounted every time we read a new
2637 	 * extent block.
2638 	 *
2639 	 * When we reach a leaf, we account for file extent items in it,
2640 	 * walk back up the tree (adjusting slot pointers as we go)
2641 	 * and restart the search process.
2642 	 */
2643 	refcount_inc(&root_eb->refs);	/* For path */
2644 	path->nodes[root_level] = root_eb;
2645 	path->slots[root_level] = 0;
2646 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2647 walk_down:
2648 	level = root_level;
2649 	while (level >= 0) {
2650 		if (path->nodes[level] == NULL) {
2651 			int parent_slot;
2652 			u64 child_bytenr;
2653 
2654 			/*
2655 			 * We need to get child blockptr from parent before we
2656 			 * can read it.
2657 			  */
2658 			eb = path->nodes[level + 1];
2659 			parent_slot = path->slots[level + 1];
2660 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2661 
2662 			eb = btrfs_read_node_slot(eb, parent_slot);
2663 			if (IS_ERR(eb)) {
2664 				ret = PTR_ERR(eb);
2665 				goto out;
2666 			}
2667 
2668 			path->nodes[level] = eb;
2669 			path->slots[level] = 0;
2670 
2671 			btrfs_tree_read_lock(eb);
2672 			path->locks[level] = BTRFS_READ_LOCK;
2673 
2674 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2675 							fs_info->nodesize);
2676 			if (ret)
2677 				goto out;
2678 		}
2679 
2680 		if (level == 0) {
2681 			ret = btrfs_qgroup_trace_leaf_items(trans,
2682 							    path->nodes[level]);
2683 			if (ret)
2684 				goto out;
2685 
2686 			/* Nonzero return here means we completed our search */
2687 			ret = adjust_slots_upwards(path, root_level);
2688 			if (ret)
2689 				break;
2690 
2691 			/* Restart search with new slots */
2692 			goto walk_down;
2693 		}
2694 
2695 		level--;
2696 	}
2697 
2698 	ret = 0;
2699 out:
2700 	btrfs_free_path(path);
2701 
2702 	return ret;
2703 }
2704 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2705 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2706 {
2707 	if (!list_empty(&qgroup->nested_iterator))
2708 		return;
2709 
2710 	list_add_tail(&qgroup->nested_iterator, head);
2711 }
2712 
qgroup_iterator_nested_clean(struct list_head * head)2713 static void qgroup_iterator_nested_clean(struct list_head *head)
2714 {
2715 	while (!list_empty(head)) {
2716 		struct btrfs_qgroup *qgroup;
2717 
2718 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2719 		list_del_init(&qgroup->nested_iterator);
2720 	}
2721 }
2722 
2723 #define UPDATE_NEW	0
2724 #define UPDATE_OLD	1
2725 /*
2726  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2727  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,int update_old)2728 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2729 				 struct ulist *roots, struct list_head *qgroups,
2730 				 u64 seq, int update_old)
2731 {
2732 	struct ulist_node *unode;
2733 	struct ulist_iterator uiter;
2734 	struct btrfs_qgroup *qg;
2735 
2736 	if (!roots)
2737 		return;
2738 	ULIST_ITER_INIT(&uiter);
2739 	while ((unode = ulist_next(roots, &uiter))) {
2740 		LIST_HEAD(tmp);
2741 
2742 		qg = find_qgroup_rb(fs_info, unode->val);
2743 		if (!qg)
2744 			continue;
2745 
2746 		qgroup_iterator_nested_add(qgroups, qg);
2747 		qgroup_iterator_add(&tmp, qg);
2748 		list_for_each_entry(qg, &tmp, iterator) {
2749 			struct btrfs_qgroup_list *glist;
2750 
2751 			if (update_old)
2752 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2753 			else
2754 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2755 
2756 			list_for_each_entry(glist, &qg->groups, next_group) {
2757 				qgroup_iterator_nested_add(qgroups, glist->group);
2758 				qgroup_iterator_add(&tmp, glist->group);
2759 			}
2760 		}
2761 		qgroup_iterator_clean(&tmp);
2762 	}
2763 }
2764 
2765 /*
2766  * Update qgroup rfer/excl counters.
2767  * Rfer update is easy, codes can explain themselves.
2768  *
2769  * Excl update is tricky, the update is split into 2 parts.
2770  * Part 1: Possible exclusive <-> sharing detect:
2771  *	|	A	|	!A	|
2772  *  -------------------------------------
2773  *  B	|	*	|	-	|
2774  *  -------------------------------------
2775  *  !B	|	+	|	**	|
2776  *  -------------------------------------
2777  *
2778  * Conditions:
2779  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2780  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2781  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2782  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2783  *
2784  * Results:
2785  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2786  * *: Definitely not changed.		**: Possible unchanged.
2787  *
2788  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2789  *
2790  * To make the logic clear, we first use condition A and B to split
2791  * combination into 4 results.
2792  *
2793  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2794  * only on variant maybe 0.
2795  *
2796  * Lastly, check result **, since there are 2 variants maybe 0, split them
2797  * again(2x2).
2798  * But this time we don't need to consider other things, the codes and logic
2799  * is easy to understand now.
2800  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2801 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2802 				   struct list_head *qgroups, u64 nr_old_roots,
2803 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2804 {
2805 	struct btrfs_qgroup *qg;
2806 
2807 	list_for_each_entry(qg, qgroups, nested_iterator) {
2808 		u64 cur_new_count, cur_old_count;
2809 		bool dirty = false;
2810 
2811 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2812 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2813 
2814 		trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
2815 						   cur_new_count);
2816 
2817 		/* Rfer update part */
2818 		if (cur_old_count == 0 && cur_new_count > 0) {
2819 			qg->rfer += num_bytes;
2820 			qg->rfer_cmpr += num_bytes;
2821 			dirty = true;
2822 		}
2823 		if (cur_old_count > 0 && cur_new_count == 0) {
2824 			qg->rfer -= num_bytes;
2825 			qg->rfer_cmpr -= num_bytes;
2826 			dirty = true;
2827 		}
2828 
2829 		/* Excl update part */
2830 		/* Exclusive/none -> shared case */
2831 		if (cur_old_count == nr_old_roots &&
2832 		    cur_new_count < nr_new_roots) {
2833 			/* Exclusive -> shared */
2834 			if (cur_old_count != 0) {
2835 				qg->excl -= num_bytes;
2836 				qg->excl_cmpr -= num_bytes;
2837 				dirty = true;
2838 			}
2839 		}
2840 
2841 		/* Shared -> exclusive/none case */
2842 		if (cur_old_count < nr_old_roots &&
2843 		    cur_new_count == nr_new_roots) {
2844 			/* Shared->exclusive */
2845 			if (cur_new_count != 0) {
2846 				qg->excl += num_bytes;
2847 				qg->excl_cmpr += num_bytes;
2848 				dirty = true;
2849 			}
2850 		}
2851 
2852 		/* Exclusive/none -> exclusive/none case */
2853 		if (cur_old_count == nr_old_roots &&
2854 		    cur_new_count == nr_new_roots) {
2855 			if (cur_old_count == 0) {
2856 				/* None -> exclusive/none */
2857 
2858 				if (cur_new_count != 0) {
2859 					/* None -> exclusive */
2860 					qg->excl += num_bytes;
2861 					qg->excl_cmpr += num_bytes;
2862 					dirty = true;
2863 				}
2864 				/* None -> none, nothing changed */
2865 			} else {
2866 				/* Exclusive -> exclusive/none */
2867 
2868 				if (cur_new_count == 0) {
2869 					/* Exclusive -> none */
2870 					qg->excl -= num_bytes;
2871 					qg->excl_cmpr -= num_bytes;
2872 					dirty = true;
2873 				}
2874 				/* Exclusive -> exclusive, nothing changed */
2875 			}
2876 		}
2877 
2878 		if (dirty)
2879 			qgroup_dirty(fs_info, qg);
2880 	}
2881 }
2882 
2883 /*
2884  * Check if the @roots potentially is a list of fs tree roots
2885  *
2886  * Return 0 for definitely not a fs/subvol tree roots ulist
2887  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2888  *          one as well)
2889  */
maybe_fs_roots(struct ulist * roots)2890 static int maybe_fs_roots(struct ulist *roots)
2891 {
2892 	struct ulist_node *unode;
2893 	struct ulist_iterator uiter;
2894 
2895 	/* Empty one, still possible for fs roots */
2896 	if (!roots || roots->nnodes == 0)
2897 		return 1;
2898 
2899 	ULIST_ITER_INIT(&uiter);
2900 	unode = ulist_next(roots, &uiter);
2901 	if (!unode)
2902 		return 1;
2903 
2904 	/*
2905 	 * If it contains fs tree roots, then it must belong to fs/subvol
2906 	 * trees.
2907 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2908 	 */
2909 	return btrfs_is_fstree(unode->val);
2910 }
2911 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2912 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2913 				u64 num_bytes, struct ulist *old_roots,
2914 				struct ulist *new_roots)
2915 {
2916 	struct btrfs_fs_info *fs_info = trans->fs_info;
2917 	LIST_HEAD(qgroups);
2918 	u64 seq;
2919 	u64 nr_new_roots = 0;
2920 	u64 nr_old_roots = 0;
2921 	int ret = 0;
2922 
2923 	/*
2924 	 * If quotas get disabled meanwhile, the resources need to be freed and
2925 	 * we can't just exit here.
2926 	 */
2927 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2928 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2929 		goto out_free;
2930 
2931 	if (new_roots) {
2932 		if (!maybe_fs_roots(new_roots))
2933 			goto out_free;
2934 		nr_new_roots = new_roots->nnodes;
2935 	}
2936 	if (old_roots) {
2937 		if (!maybe_fs_roots(old_roots))
2938 			goto out_free;
2939 		nr_old_roots = old_roots->nnodes;
2940 	}
2941 
2942 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2943 	if (nr_old_roots == 0 && nr_new_roots == 0)
2944 		goto out_free;
2945 
2946 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2947 					num_bytes, nr_old_roots, nr_new_roots);
2948 
2949 	mutex_lock(&fs_info->qgroup_rescan_lock);
2950 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2951 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2952 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2953 			ret = 0;
2954 			goto out_free;
2955 		}
2956 	}
2957 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2958 
2959 	spin_lock(&fs_info->qgroup_lock);
2960 	seq = fs_info->qgroup_seq;
2961 
2962 	/* Update old refcnts using old_roots */
2963 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2964 
2965 	/* Update new refcnts using new_roots */
2966 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2967 
2968 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2969 			       num_bytes, seq);
2970 
2971 	/*
2972 	 * We're done using the iterator, release all its qgroups while holding
2973 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
2974 	 * and trigger use-after-free accesses to qgroups.
2975 	 */
2976 	qgroup_iterator_nested_clean(&qgroups);
2977 
2978 	/*
2979 	 * Bump qgroup_seq to avoid seq overlap
2980 	 */
2981 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2982 	spin_unlock(&fs_info->qgroup_lock);
2983 out_free:
2984 	ulist_free(old_roots);
2985 	ulist_free(new_roots);
2986 	return ret;
2987 }
2988 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)2989 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2990 {
2991 	struct btrfs_fs_info *fs_info = trans->fs_info;
2992 	struct btrfs_qgroup_extent_record *record;
2993 	struct btrfs_delayed_ref_root *delayed_refs;
2994 	struct ulist *new_roots = NULL;
2995 	unsigned long index;
2996 	u64 num_dirty_extents = 0;
2997 	u64 qgroup_to_skip;
2998 	int ret = 0;
2999 
3000 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3001 		return 0;
3002 
3003 	delayed_refs = &trans->transaction->delayed_refs;
3004 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3005 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3006 		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
3007 
3008 		num_dirty_extents++;
3009 		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
3010 
3011 		if (!ret && !(fs_info->qgroup_flags &
3012 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3013 			struct btrfs_backref_walk_ctx ctx = { 0 };
3014 
3015 			ctx.bytenr = bytenr;
3016 			ctx.fs_info = fs_info;
3017 
3018 			/*
3019 			 * Old roots should be searched when inserting qgroup
3020 			 * extent record.
3021 			 *
3022 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3023 			 * we may have some record inserted during
3024 			 * NO_ACCOUNTING (thus no old_roots populated), but
3025 			 * later we start rescan, which clears NO_ACCOUNTING,
3026 			 * leaving some inserted records without old_roots
3027 			 * populated.
3028 			 *
3029 			 * Those cases are rare and should not cause too much
3030 			 * time spent during commit_transaction().
3031 			 */
3032 			if (!record->old_roots) {
3033 				/* Search commit root to find old_roots */
3034 				ret = btrfs_find_all_roots(&ctx, false);
3035 				if (ret < 0)
3036 					goto cleanup;
3037 				record->old_roots = ctx.roots;
3038 				ctx.roots = NULL;
3039 			}
3040 
3041 			/*
3042 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3043 			 * which doesn't lock tree or delayed_refs and search
3044 			 * current root. It's safe inside commit_transaction().
3045 			 */
3046 			ctx.trans = trans;
3047 			ctx.time_seq = BTRFS_SEQ_LAST;
3048 			ret = btrfs_find_all_roots(&ctx, false);
3049 			if (ret < 0)
3050 				goto cleanup;
3051 			new_roots = ctx.roots;
3052 			if (qgroup_to_skip) {
3053 				ulist_del(new_roots, qgroup_to_skip, 0);
3054 				ulist_del(record->old_roots, qgroup_to_skip,
3055 					  0);
3056 			}
3057 			ret = btrfs_qgroup_account_extent(trans, bytenr,
3058 							  record->num_bytes,
3059 							  record->old_roots,
3060 							  new_roots);
3061 			record->old_roots = NULL;
3062 			new_roots = NULL;
3063 		}
3064 		/* Free the reserved data space */
3065 		btrfs_qgroup_free_refroot(fs_info,
3066 				record->data_rsv_refroot,
3067 				record->data_rsv,
3068 				BTRFS_QGROUP_RSV_DATA);
3069 cleanup:
3070 		ulist_free(record->old_roots);
3071 		ulist_free(new_roots);
3072 		new_roots = NULL;
3073 		xa_erase(&delayed_refs->dirty_extents, index);
3074 		kfree(record);
3075 
3076 	}
3077 	trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
3078 	return ret;
3079 }
3080 
3081 /*
3082  * Writes all changed qgroups to disk.
3083  * Called by the transaction commit path and the qgroup assign ioctl.
3084  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3085 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3086 {
3087 	struct btrfs_fs_info *fs_info = trans->fs_info;
3088 	int ret = 0;
3089 
3090 	/*
3091 	 * In case we are called from the qgroup assign ioctl, assert that we
3092 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3093 	 * disable operation (ioctl) and access a freed quota root.
3094 	 */
3095 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3096 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3097 
3098 	if (!fs_info->quota_root)
3099 		return ret;
3100 
3101 	spin_lock(&fs_info->qgroup_lock);
3102 	while (!list_empty(&fs_info->dirty_qgroups)) {
3103 		struct btrfs_qgroup *qgroup;
3104 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3105 					  struct btrfs_qgroup, dirty);
3106 		list_del_init(&qgroup->dirty);
3107 		spin_unlock(&fs_info->qgroup_lock);
3108 		ret = update_qgroup_info_item(trans, qgroup);
3109 		if (ret)
3110 			qgroup_mark_inconsistent(fs_info,
3111 						 "qgroup info item update error %d", ret);
3112 		ret = update_qgroup_limit_item(trans, qgroup);
3113 		if (ret)
3114 			qgroup_mark_inconsistent(fs_info,
3115 						 "qgroup limit item update error %d", ret);
3116 		spin_lock(&fs_info->qgroup_lock);
3117 	}
3118 	if (btrfs_qgroup_enabled(fs_info))
3119 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3120 	else
3121 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3122 	spin_unlock(&fs_info->qgroup_lock);
3123 
3124 	ret = update_qgroup_status_item(trans);
3125 	if (ret)
3126 		qgroup_mark_inconsistent(fs_info,
3127 					 "qgroup status item update error %d", ret);
3128 
3129 	return ret;
3130 }
3131 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3132 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3133 			       struct btrfs_qgroup_inherit *inherit,
3134 			       size_t size)
3135 {
3136 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3137 		return -EOPNOTSUPP;
3138 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3139 		return -EINVAL;
3140 
3141 	/*
3142 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3143 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3144 	 * been disabled in userspace for a very long time, but here we should
3145 	 * also disable it in kernel, as this behavior is known to mark qgroup
3146 	 * inconsistent, and a rescan would wipe out the changes anyway.
3147 	 *
3148 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3149 	 */
3150 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3151 		return -EINVAL;
3152 
3153 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3154 		return -EINVAL;
3155 
3156 	/*
3157 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3158 	 * Qgroup can still be later enabled causing problems, but in that case
3159 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3160 	 */
3161 	if (!btrfs_qgroup_enabled(fs_info))
3162 		return 0;
3163 
3164 	/*
3165 	 * Now check all the remaining qgroups, they should all:
3166 	 *
3167 	 * - Exist
3168 	 * - Be higher level qgroups.
3169 	 */
3170 	for (int i = 0; i < inherit->num_qgroups; i++) {
3171 		struct btrfs_qgroup *qgroup;
3172 		u64 qgroupid = inherit->qgroups[i];
3173 
3174 		if (btrfs_qgroup_level(qgroupid) == 0)
3175 			return -EINVAL;
3176 
3177 		spin_lock(&fs_info->qgroup_lock);
3178 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3179 		if (!qgroup) {
3180 			spin_unlock(&fs_info->qgroup_lock);
3181 			return -ENOENT;
3182 		}
3183 		spin_unlock(&fs_info->qgroup_lock);
3184 	}
3185 	return 0;
3186 }
3187 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3188 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3189 			       u64 inode_rootid,
3190 			       struct btrfs_qgroup_inherit **inherit)
3191 {
3192 	int i = 0;
3193 	u64 num_qgroups = 0;
3194 	struct btrfs_qgroup *inode_qg;
3195 	struct btrfs_qgroup_list *qg_list;
3196 	struct btrfs_qgroup_inherit *res;
3197 	size_t struct_sz;
3198 	u64 *qgids;
3199 
3200 	if (*inherit)
3201 		return -EEXIST;
3202 
3203 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3204 	if (!inode_qg)
3205 		return -ENOENT;
3206 
3207 	num_qgroups = list_count_nodes(&inode_qg->groups);
3208 
3209 	if (!num_qgroups)
3210 		return 0;
3211 
3212 	struct_sz = struct_size(res, qgroups, num_qgroups);
3213 	if (struct_sz == SIZE_MAX)
3214 		return -ERANGE;
3215 
3216 	res = kzalloc(struct_sz, GFP_NOFS);
3217 	if (!res)
3218 		return -ENOMEM;
3219 	res->num_qgroups = num_qgroups;
3220 	qgids = res->qgroups;
3221 
3222 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3223 		qgids[i++] = qg_list->group->qgroupid;
3224 
3225 	*inherit = res;
3226 	return 0;
3227 }
3228 
3229 /*
3230  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3231  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3232  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3233  *
3234  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3235  * Return 0 if a quick inherit is done.
3236  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3237  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3238 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3239 					 u64 srcid, u64 parentid)
3240 {
3241 	struct btrfs_qgroup *src;
3242 	struct btrfs_qgroup *parent;
3243 	struct btrfs_qgroup_list *list;
3244 	int nr_parents = 0;
3245 
3246 	src = find_qgroup_rb(fs_info, srcid);
3247 	if (!src)
3248 		return -ENOENT;
3249 	parent = find_qgroup_rb(fs_info, parentid);
3250 	if (!parent)
3251 		return -ENOENT;
3252 
3253 	/*
3254 	 * Source has no parent qgroup, but our new qgroup would have one.
3255 	 * Qgroup numbers would become inconsistent.
3256 	 */
3257 	if (list_empty(&src->groups))
3258 		return 1;
3259 
3260 	list_for_each_entry(list, &src->groups, next_group) {
3261 		/* The parent is not the same, quick update is not possible. */
3262 		if (list->group->qgroupid != parentid)
3263 			return 1;
3264 		nr_parents++;
3265 		/*
3266 		 * More than one parent qgroup, we can't be sure about accounting
3267 		 * consistency.
3268 		 */
3269 		if (nr_parents > 1)
3270 			return 1;
3271 	}
3272 
3273 	/*
3274 	 * The parent is not exclusively owning all its bytes.  We're not sure
3275 	 * if the source has any bytes not fully owned by the parent.
3276 	 */
3277 	if (parent->excl != parent->rfer)
3278 		return 1;
3279 
3280 	parent->excl += fs_info->nodesize;
3281 	parent->rfer += fs_info->nodesize;
3282 	return 0;
3283 }
3284 
3285 /*
3286  * Copy the accounting information between qgroups. This is necessary
3287  * when a snapshot or a subvolume is created. Throwing an error will
3288  * cause a transaction abort so we take extra care here to only error
3289  * when a readonly fs is a reasonable outcome.
3290  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3291 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3292 			 u64 objectid, u64 inode_rootid,
3293 			 struct btrfs_qgroup_inherit *inherit)
3294 {
3295 	int ret = 0;
3296 	u64 *i_qgroups;
3297 	bool committing = false;
3298 	struct btrfs_fs_info *fs_info = trans->fs_info;
3299 	struct btrfs_root *quota_root;
3300 	struct btrfs_qgroup *srcgroup;
3301 	struct btrfs_qgroup *dstgroup;
3302 	struct btrfs_qgroup *prealloc;
3303 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3304 	bool free_inherit = false;
3305 	bool need_rescan = false;
3306 	u32 level_size = 0;
3307 	u64 nums;
3308 
3309 	if (!btrfs_qgroup_enabled(fs_info))
3310 		return 0;
3311 
3312 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3313 	if (!prealloc)
3314 		return -ENOMEM;
3315 
3316 	/*
3317 	 * There are only two callers of this function.
3318 	 *
3319 	 * One in create_subvol() in the ioctl context, which needs to hold
3320 	 * the qgroup_ioctl_lock.
3321 	 *
3322 	 * The other one in create_pending_snapshot() where no other qgroup
3323 	 * code can modify the fs as they all need to either start a new trans
3324 	 * or hold a trans handler, thus we don't need to hold
3325 	 * qgroup_ioctl_lock.
3326 	 * This would avoid long and complex lock chain and make lockdep happy.
3327 	 */
3328 	spin_lock(&fs_info->trans_lock);
3329 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3330 		committing = true;
3331 	spin_unlock(&fs_info->trans_lock);
3332 
3333 	if (!committing)
3334 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3335 
3336 	quota_root = fs_info->quota_root;
3337 	if (!quota_root) {
3338 		ret = -EINVAL;
3339 		goto out;
3340 	}
3341 
3342 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3343 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3344 		if (ret)
3345 			goto out;
3346 		free_inherit = true;
3347 	}
3348 
3349 	if (inherit) {
3350 		i_qgroups = (u64 *)(inherit + 1);
3351 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3352 		       2 * inherit->num_excl_copies;
3353 		for (int i = 0; i < nums; i++) {
3354 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3355 
3356 			/*
3357 			 * Zero out invalid groups so we can ignore
3358 			 * them later.
3359 			 */
3360 			if (!srcgroup ||
3361 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3362 				*i_qgroups = 0ULL;
3363 
3364 			++i_qgroups;
3365 		}
3366 	}
3367 
3368 	/*
3369 	 * create a tracking group for the subvol itself
3370 	 */
3371 	ret = add_qgroup_item(trans, quota_root, objectid);
3372 	if (ret)
3373 		goto out;
3374 
3375 	/*
3376 	 * add qgroup to all inherited groups
3377 	 */
3378 	if (inherit) {
3379 		i_qgroups = (u64 *)(inherit + 1);
3380 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3381 			if (*i_qgroups == 0)
3382 				continue;
3383 			ret = add_qgroup_relation_item(trans, objectid,
3384 						       *i_qgroups);
3385 			if (ret && ret != -EEXIST)
3386 				goto out;
3387 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3388 						       objectid);
3389 			if (ret && ret != -EEXIST)
3390 				goto out;
3391 		}
3392 		ret = 0;
3393 
3394 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3395 					 sizeof(struct btrfs_qgroup_list *),
3396 					 GFP_NOFS);
3397 		if (!qlist_prealloc) {
3398 			ret = -ENOMEM;
3399 			goto out;
3400 		}
3401 		for (int i = 0; i < inherit->num_qgroups; i++) {
3402 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3403 						    GFP_NOFS);
3404 			if (!qlist_prealloc[i]) {
3405 				ret = -ENOMEM;
3406 				goto out;
3407 			}
3408 		}
3409 	}
3410 
3411 	spin_lock(&fs_info->qgroup_lock);
3412 
3413 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3414 	prealloc = NULL;
3415 
3416 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3417 		dstgroup->lim_flags = inherit->lim.flags;
3418 		dstgroup->max_rfer = inherit->lim.max_rfer;
3419 		dstgroup->max_excl = inherit->lim.max_excl;
3420 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3421 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3422 
3423 		qgroup_dirty(fs_info, dstgroup);
3424 	}
3425 
3426 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3427 		srcgroup = find_qgroup_rb(fs_info, srcid);
3428 		if (!srcgroup)
3429 			goto unlock;
3430 
3431 		/*
3432 		 * We call inherit after we clone the root in order to make sure
3433 		 * our counts don't go crazy, so at this point the only
3434 		 * difference between the two roots should be the root node.
3435 		 */
3436 		level_size = fs_info->nodesize;
3437 		dstgroup->rfer = srcgroup->rfer;
3438 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3439 		dstgroup->excl = level_size;
3440 		dstgroup->excl_cmpr = level_size;
3441 		srcgroup->excl = level_size;
3442 		srcgroup->excl_cmpr = level_size;
3443 
3444 		/* inherit the limit info */
3445 		dstgroup->lim_flags = srcgroup->lim_flags;
3446 		dstgroup->max_rfer = srcgroup->max_rfer;
3447 		dstgroup->max_excl = srcgroup->max_excl;
3448 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3449 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3450 
3451 		qgroup_dirty(fs_info, dstgroup);
3452 		qgroup_dirty(fs_info, srcgroup);
3453 
3454 		/*
3455 		 * If the source qgroup has parent but the new one doesn't,
3456 		 * we need a full rescan.
3457 		 */
3458 		if (!inherit && !list_empty(&srcgroup->groups))
3459 			need_rescan = true;
3460 	}
3461 
3462 	if (!inherit)
3463 		goto unlock;
3464 
3465 	i_qgroups = (u64 *)(inherit + 1);
3466 	for (int i = 0; i < inherit->num_qgroups; i++) {
3467 		if (*i_qgroups) {
3468 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3469 					      *i_qgroups);
3470 			qlist_prealloc[i] = NULL;
3471 			if (ret)
3472 				goto unlock;
3473 		}
3474 		if (srcid) {
3475 			/* Check if we can do a quick inherit. */
3476 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3477 			if (ret < 0)
3478 				goto unlock;
3479 			if (ret > 0)
3480 				need_rescan = true;
3481 			ret = 0;
3482 		}
3483 		++i_qgroups;
3484 	}
3485 
3486 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3487 		struct btrfs_qgroup *src;
3488 		struct btrfs_qgroup *dst;
3489 
3490 		if (!i_qgroups[0] || !i_qgroups[1])
3491 			continue;
3492 
3493 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3494 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3495 
3496 		if (!src || !dst) {
3497 			ret = -EINVAL;
3498 			goto unlock;
3499 		}
3500 
3501 		dst->rfer = src->rfer - level_size;
3502 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3503 
3504 		/* Manually tweaking numbers certainly needs a rescan */
3505 		need_rescan = true;
3506 	}
3507 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3508 		struct btrfs_qgroup *src;
3509 		struct btrfs_qgroup *dst;
3510 
3511 		if (!i_qgroups[0] || !i_qgroups[1])
3512 			continue;
3513 
3514 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3515 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3516 
3517 		if (!src || !dst) {
3518 			ret = -EINVAL;
3519 			goto unlock;
3520 		}
3521 
3522 		dst->excl = src->excl + level_size;
3523 		dst->excl_cmpr = src->excl_cmpr + level_size;
3524 		need_rescan = true;
3525 	}
3526 
3527 unlock:
3528 	spin_unlock(&fs_info->qgroup_lock);
3529 	if (!ret)
3530 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3531 out:
3532 	if (!committing)
3533 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3534 	if (need_rescan)
3535 		qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
3536 	if (qlist_prealloc) {
3537 		for (int i = 0; i < inherit->num_qgroups; i++)
3538 			kfree(qlist_prealloc[i]);
3539 		kfree(qlist_prealloc);
3540 	}
3541 	if (free_inherit)
3542 		kfree(inherit);
3543 	kfree(prealloc);
3544 	return ret;
3545 }
3546 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3547 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3548 {
3549 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3550 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3551 		return false;
3552 
3553 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3554 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3555 		return false;
3556 
3557 	return true;
3558 }
3559 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3560 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3561 			  enum btrfs_qgroup_rsv_type type)
3562 {
3563 	struct btrfs_qgroup *qgroup;
3564 	struct btrfs_fs_info *fs_info = root->fs_info;
3565 	u64 ref_root = btrfs_root_id(root);
3566 	int ret = 0;
3567 	LIST_HEAD(qgroup_list);
3568 
3569 	if (!btrfs_is_fstree(ref_root))
3570 		return 0;
3571 
3572 	if (num_bytes == 0)
3573 		return 0;
3574 
3575 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3576 	    capable(CAP_SYS_RESOURCE))
3577 		enforce = false;
3578 
3579 	spin_lock(&fs_info->qgroup_lock);
3580 	if (!fs_info->quota_root)
3581 		goto out;
3582 
3583 	qgroup = find_qgroup_rb(fs_info, ref_root);
3584 	if (!qgroup)
3585 		goto out;
3586 
3587 	qgroup_iterator_add(&qgroup_list, qgroup);
3588 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3589 		struct btrfs_qgroup_list *glist;
3590 
3591 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3592 			ret = -EDQUOT;
3593 			goto out;
3594 		}
3595 
3596 		list_for_each_entry(glist, &qgroup->groups, next_group)
3597 			qgroup_iterator_add(&qgroup_list, glist->group);
3598 	}
3599 
3600 	ret = 0;
3601 	/*
3602 	 * no limits exceeded, now record the reservation into all qgroups
3603 	 */
3604 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3605 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3606 
3607 out:
3608 	qgroup_iterator_clean(&qgroup_list);
3609 	spin_unlock(&fs_info->qgroup_lock);
3610 	return ret;
3611 }
3612 
3613 /*
3614  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3615  * qgroup).
3616  *
3617  * Will handle all higher level qgroup too.
3618  *
3619  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3620  * This special case is only used for META_PERTRANS type.
3621  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3622 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3623 			       u64 ref_root, u64 num_bytes,
3624 			       enum btrfs_qgroup_rsv_type type)
3625 {
3626 	struct btrfs_qgroup *qgroup;
3627 	LIST_HEAD(qgroup_list);
3628 
3629 	if (!btrfs_is_fstree(ref_root))
3630 		return;
3631 
3632 	if (num_bytes == 0)
3633 		return;
3634 
3635 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3636 		WARN(1, "%s: Invalid type to free", __func__);
3637 		return;
3638 	}
3639 	spin_lock(&fs_info->qgroup_lock);
3640 
3641 	if (!fs_info->quota_root)
3642 		goto out;
3643 
3644 	qgroup = find_qgroup_rb(fs_info, ref_root);
3645 	if (!qgroup)
3646 		goto out;
3647 
3648 	if (num_bytes == (u64)-1)
3649 		/*
3650 		 * We're freeing all pertrans rsv, get reserved value from
3651 		 * level 0 qgroup as real num_bytes to free.
3652 		 */
3653 		num_bytes = qgroup->rsv.values[type];
3654 
3655 	qgroup_iterator_add(&qgroup_list, qgroup);
3656 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3657 		struct btrfs_qgroup_list *glist;
3658 
3659 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3660 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3661 			qgroup_iterator_add(&qgroup_list, glist->group);
3662 		}
3663 	}
3664 out:
3665 	qgroup_iterator_clean(&qgroup_list);
3666 	spin_unlock(&fs_info->qgroup_lock);
3667 }
3668 
3669 /*
3670  * Check if the leaf is the last leaf. Which means all node pointers
3671  * are at their last position.
3672  */
is_last_leaf(struct btrfs_path * path)3673 static bool is_last_leaf(struct btrfs_path *path)
3674 {
3675 	int i;
3676 
3677 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3678 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3679 			return false;
3680 	}
3681 	return true;
3682 }
3683 
3684 /*
3685  * returns < 0 on error, 0 when more leafs are to be scanned.
3686  * returns 1 when done.
3687  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3688 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3689 			      struct btrfs_path *path)
3690 {
3691 	struct btrfs_fs_info *fs_info = trans->fs_info;
3692 	struct btrfs_root *extent_root;
3693 	struct btrfs_key found;
3694 	struct extent_buffer *scratch_leaf = NULL;
3695 	u64 num_bytes;
3696 	bool done;
3697 	int slot;
3698 	int ret;
3699 
3700 	if (!btrfs_qgroup_full_accounting(fs_info))
3701 		return 1;
3702 
3703 	mutex_lock(&fs_info->qgroup_rescan_lock);
3704 	extent_root = btrfs_extent_root(fs_info,
3705 				fs_info->qgroup_rescan_progress.objectid);
3706 	ret = btrfs_search_slot_for_read(extent_root,
3707 					 &fs_info->qgroup_rescan_progress,
3708 					 path, 1, 0);
3709 
3710 	btrfs_debug(fs_info,
3711 		"current progress key (%llu %u %llu), search_slot ret %d",
3712 		fs_info->qgroup_rescan_progress.objectid,
3713 		fs_info->qgroup_rescan_progress.type,
3714 		fs_info->qgroup_rescan_progress.offset, ret);
3715 
3716 	if (ret) {
3717 		/*
3718 		 * The rescan is about to end, we will not be scanning any
3719 		 * further blocks. We cannot unset the RESCAN flag here, because
3720 		 * we want to commit the transaction if everything went well.
3721 		 * To make the live accounting work in this phase, we set our
3722 		 * scan progress pointer such that every real extent objectid
3723 		 * will be smaller.
3724 		 */
3725 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3726 		btrfs_release_path(path);
3727 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3728 		return ret;
3729 	}
3730 	done = is_last_leaf(path);
3731 
3732 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3733 			      btrfs_header_nritems(path->nodes[0]) - 1);
3734 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3735 
3736 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3737 	if (!scratch_leaf) {
3738 		ret = -ENOMEM;
3739 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3740 		goto out;
3741 	}
3742 	slot = path->slots[0];
3743 	btrfs_release_path(path);
3744 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3745 
3746 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3747 		struct btrfs_backref_walk_ctx ctx = { 0 };
3748 
3749 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3750 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3751 		    found.type != BTRFS_METADATA_ITEM_KEY)
3752 			continue;
3753 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3754 			num_bytes = fs_info->nodesize;
3755 		else
3756 			num_bytes = found.offset;
3757 
3758 		ctx.bytenr = found.objectid;
3759 		ctx.fs_info = fs_info;
3760 
3761 		ret = btrfs_find_all_roots(&ctx, false);
3762 		if (ret < 0)
3763 			goto out;
3764 		/* For rescan, just pass old_roots as NULL */
3765 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3766 						  num_bytes, NULL, ctx.roots);
3767 		if (ret < 0)
3768 			goto out;
3769 	}
3770 out:
3771 	if (scratch_leaf)
3772 		free_extent_buffer(scratch_leaf);
3773 
3774 	if (done && !ret) {
3775 		ret = 1;
3776 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3777 	}
3778 	return ret;
3779 }
3780 
rescan_should_stop(struct btrfs_fs_info * fs_info)3781 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3782 {
3783 	if (btrfs_fs_closing(fs_info))
3784 		return true;
3785 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3786 		return true;
3787 	if (!btrfs_qgroup_enabled(fs_info))
3788 		return true;
3789 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3790 		return true;
3791 	return false;
3792 }
3793 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3794 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3795 {
3796 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3797 						     qgroup_rescan_work);
3798 	struct btrfs_path *path;
3799 	struct btrfs_trans_handle *trans = NULL;
3800 	int ret = 0;
3801 	bool stopped = false;
3802 	bool did_leaf_rescans = false;
3803 
3804 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3805 		return;
3806 
3807 	path = btrfs_alloc_path();
3808 	if (!path) {
3809 		ret = -ENOMEM;
3810 		goto out;
3811 	}
3812 	/*
3813 	 * Rescan should only search for commit root, and any later difference
3814 	 * should be recorded by qgroup
3815 	 */
3816 	path->search_commit_root = 1;
3817 	path->skip_locking = 1;
3818 
3819 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3820 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3821 		if (IS_ERR(trans)) {
3822 			ret = PTR_ERR(trans);
3823 			break;
3824 		}
3825 
3826 		ret = qgroup_rescan_leaf(trans, path);
3827 		did_leaf_rescans = true;
3828 
3829 		if (ret > 0)
3830 			btrfs_commit_transaction(trans);
3831 		else
3832 			btrfs_end_transaction(trans);
3833 	}
3834 
3835 out:
3836 	btrfs_free_path(path);
3837 
3838 	mutex_lock(&fs_info->qgroup_rescan_lock);
3839 	if (ret > 0 &&
3840 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3841 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3842 	} else if (ret < 0 || stopped) {
3843 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3844 	}
3845 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3846 
3847 	/*
3848 	 * Only update status, since the previous part has already updated the
3849 	 * qgroup info, and only if we did any actual work. This also prevents
3850 	 * race with a concurrent quota disable, which has already set
3851 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3852 	 * btrfs_quota_disable().
3853 	 */
3854 	if (did_leaf_rescans) {
3855 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3856 		if (IS_ERR(trans)) {
3857 			ret = PTR_ERR(trans);
3858 			trans = NULL;
3859 			btrfs_err(fs_info,
3860 				  "fail to start transaction for status update: %d",
3861 				  ret);
3862 		}
3863 	} else {
3864 		trans = NULL;
3865 	}
3866 
3867 	mutex_lock(&fs_info->qgroup_rescan_lock);
3868 	if (!stopped ||
3869 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3870 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3871 	if (trans) {
3872 		int ret2 = update_qgroup_status_item(trans);
3873 
3874 		if (ret2 < 0) {
3875 			ret = ret2;
3876 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3877 		}
3878 	}
3879 	fs_info->qgroup_rescan_running = false;
3880 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3881 	complete_all(&fs_info->qgroup_rescan_completion);
3882 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3883 
3884 	if (!trans)
3885 		return;
3886 
3887 	btrfs_end_transaction(trans);
3888 
3889 	if (stopped) {
3890 		btrfs_info(fs_info, "qgroup scan paused");
3891 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3892 		btrfs_info(fs_info, "qgroup scan cancelled");
3893 	} else if (ret >= 0) {
3894 		btrfs_info(fs_info, "qgroup scan completed%s",
3895 			ret > 0 ? " (inconsistency flag cleared)" : "");
3896 	} else {
3897 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3898 	}
3899 }
3900 
3901 /*
3902  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3903  * memory required for the rescan context.
3904  */
3905 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3906 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3907 		   int init_flags)
3908 {
3909 	int ret = 0;
3910 
3911 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3912 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3913 		return -EINVAL;
3914 	}
3915 
3916 	if (!init_flags) {
3917 		/* we're resuming qgroup rescan at mount time */
3918 		if (!(fs_info->qgroup_flags &
3919 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3920 			btrfs_debug(fs_info,
3921 			"qgroup rescan init failed, qgroup rescan is not queued");
3922 			ret = -EINVAL;
3923 		} else if (!(fs_info->qgroup_flags &
3924 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3925 			btrfs_debug(fs_info,
3926 			"qgroup rescan init failed, qgroup is not enabled");
3927 			ret = -ENOTCONN;
3928 		}
3929 
3930 		if (ret)
3931 			return ret;
3932 	}
3933 
3934 	mutex_lock(&fs_info->qgroup_rescan_lock);
3935 
3936 	if (init_flags) {
3937 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3938 			ret = -EINPROGRESS;
3939 		} else if (!(fs_info->qgroup_flags &
3940 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3941 			btrfs_debug(fs_info,
3942 			"qgroup rescan init failed, qgroup is not enabled");
3943 			ret = -ENOTCONN;
3944 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3945 			/* Quota disable is in progress */
3946 			ret = -EBUSY;
3947 		}
3948 
3949 		if (ret) {
3950 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3951 			return ret;
3952 		}
3953 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3954 	}
3955 
3956 	memset(&fs_info->qgroup_rescan_progress, 0,
3957 		sizeof(fs_info->qgroup_rescan_progress));
3958 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3959 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3960 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3961 	init_completion(&fs_info->qgroup_rescan_completion);
3962 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3963 
3964 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3965 			btrfs_qgroup_rescan_worker, NULL);
3966 	return 0;
3967 }
3968 
3969 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3970 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3971 {
3972 	struct rb_node *n;
3973 	struct btrfs_qgroup *qgroup;
3974 
3975 	spin_lock(&fs_info->qgroup_lock);
3976 	/* clear all current qgroup tracking information */
3977 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3978 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3979 		qgroup->rfer = 0;
3980 		qgroup->rfer_cmpr = 0;
3981 		qgroup->excl = 0;
3982 		qgroup->excl_cmpr = 0;
3983 		qgroup_dirty(fs_info, qgroup);
3984 	}
3985 	spin_unlock(&fs_info->qgroup_lock);
3986 }
3987 
3988 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)3989 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3990 {
3991 	int ret = 0;
3992 
3993 	ret = qgroup_rescan_init(fs_info, 0, 1);
3994 	if (ret)
3995 		return ret;
3996 
3997 	/*
3998 	 * We have set the rescan_progress to 0, which means no more
3999 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4000 	 * However, btrfs_qgroup_account_ref may be right after its call
4001 	 * to btrfs_find_all_roots, in which case it would still do the
4002 	 * accounting.
4003 	 * To solve this, we're committing the transaction, which will
4004 	 * ensure we run all delayed refs and only after that, we are
4005 	 * going to clear all tracking information for a clean start.
4006 	 */
4007 
4008 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4009 	if (ret) {
4010 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4011 		return ret;
4012 	}
4013 
4014 	qgroup_rescan_zero_tracking(fs_info);
4015 
4016 	mutex_lock(&fs_info->qgroup_rescan_lock);
4017 	/*
4018 	 * The rescan worker is only for full accounting qgroups, check if it's
4019 	 * enabled as it is pointless to queue it otherwise. A concurrent quota
4020 	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
4021 	 */
4022 	if (btrfs_qgroup_full_accounting(fs_info)) {
4023 		fs_info->qgroup_rescan_running = true;
4024 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4025 				 &fs_info->qgroup_rescan_work);
4026 	} else {
4027 		ret = -ENOTCONN;
4028 	}
4029 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4030 
4031 	return ret;
4032 }
4033 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4034 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4035 				     bool interruptible)
4036 {
4037 	int running;
4038 	int ret = 0;
4039 
4040 	mutex_lock(&fs_info->qgroup_rescan_lock);
4041 	running = fs_info->qgroup_rescan_running;
4042 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4043 
4044 	if (!running)
4045 		return 0;
4046 
4047 	if (interruptible)
4048 		ret = wait_for_completion_interruptible(
4049 					&fs_info->qgroup_rescan_completion);
4050 	else
4051 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4052 
4053 	return ret;
4054 }
4055 
4056 /*
4057  * this is only called from open_ctree where we're still single threaded, thus
4058  * locking is omitted here.
4059  */
4060 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4061 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4062 {
4063 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4064 		mutex_lock(&fs_info->qgroup_rescan_lock);
4065 		fs_info->qgroup_rescan_running = true;
4066 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4067 				 &fs_info->qgroup_rescan_work);
4068 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4069 	}
4070 }
4071 
4072 #define rbtree_iterate_from_safe(node, next, start)				\
4073        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4074 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4075 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4076 				  struct extent_changeset *reserved, u64 start,
4077 				  u64 len)
4078 {
4079 	struct rb_node *node;
4080 	struct rb_node *next;
4081 	struct ulist_node *entry;
4082 	int ret = 0;
4083 
4084 	node = reserved->range_changed.root.rb_node;
4085 	if (!node)
4086 		return 0;
4087 	while (node) {
4088 		entry = rb_entry(node, struct ulist_node, rb_node);
4089 		if (entry->val < start)
4090 			node = node->rb_right;
4091 		else
4092 			node = node->rb_left;
4093 	}
4094 
4095 	if (entry->val > start && rb_prev(&entry->rb_node))
4096 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4097 				 rb_node);
4098 
4099 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4100 		u64 entry_start;
4101 		u64 entry_end;
4102 		u64 entry_len;
4103 		int clear_ret;
4104 
4105 		entry = rb_entry(node, struct ulist_node, rb_node);
4106 		entry_start = entry->val;
4107 		entry_end = entry->aux;
4108 		entry_len = entry_end - entry_start + 1;
4109 
4110 		if (entry_start >= start + len)
4111 			break;
4112 		if (entry_start + entry_len <= start)
4113 			continue;
4114 		/*
4115 		 * Now the entry is in [start, start + len), revert the
4116 		 * EXTENT_QGROUP_RESERVED bit.
4117 		 */
4118 		clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
4119 						   EXTENT_QGROUP_RESERVED, NULL);
4120 		if (!ret && clear_ret < 0)
4121 			ret = clear_ret;
4122 
4123 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4124 		if (likely(reserved->bytes_changed >= entry_len)) {
4125 			reserved->bytes_changed -= entry_len;
4126 		} else {
4127 			WARN_ON(1);
4128 			reserved->bytes_changed = 0;
4129 		}
4130 	}
4131 
4132 	return ret;
4133 }
4134 
4135 /*
4136  * Try to free some space for qgroup.
4137  *
4138  * For qgroup, there are only 3 ways to free qgroup space:
4139  * - Flush nodatacow write
4140  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4141  *   In theory, we should only flush nodatacow inodes, but it's not yet
4142  *   possible, so we need to flush the whole root.
4143  *
4144  * - Wait for ordered extents
4145  *   When ordered extents are finished, their reserved metadata is finally
4146  *   converted to per_trans status, which can be freed by later commit
4147  *   transaction.
4148  *
4149  * - Commit transaction
4150  *   This would free the meta_per_trans space.
4151  *   In theory this shouldn't provide much space, but any more qgroup space
4152  *   is needed.
4153  */
try_flush_qgroup(struct btrfs_root * root)4154 static int try_flush_qgroup(struct btrfs_root *root)
4155 {
4156 	int ret;
4157 
4158 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4159 	ASSERT(current->journal_info == NULL);
4160 	if (WARN_ON(current->journal_info))
4161 		return 0;
4162 
4163 	/*
4164 	 * We don't want to run flush again and again, so if there is a running
4165 	 * one, we won't try to start a new flush, but exit directly.
4166 	 */
4167 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4168 		wait_event(root->qgroup_flush_wait,
4169 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4170 		return 0;
4171 	}
4172 
4173 	ret = btrfs_start_delalloc_snapshot(root, true);
4174 	if (ret < 0)
4175 		goto out;
4176 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4177 
4178 	/*
4179 	 * After waiting for ordered extents run delayed iputs in order to free
4180 	 * space from unlinked files before committing the current transaction,
4181 	 * as ordered extents may have been holding the last reference of an
4182 	 * inode and they add a delayed iput when they complete.
4183 	 */
4184 	btrfs_run_delayed_iputs(root->fs_info);
4185 	btrfs_wait_on_delayed_iputs(root->fs_info);
4186 
4187 	ret = btrfs_commit_current_transaction(root);
4188 out:
4189 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4190 	wake_up(&root->qgroup_flush_wait);
4191 	return ret;
4192 }
4193 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4194 static int qgroup_reserve_data(struct btrfs_inode *inode,
4195 			struct extent_changeset **reserved_ret, u64 start,
4196 			u64 len)
4197 {
4198 	struct btrfs_root *root = inode->root;
4199 	struct extent_changeset *reserved;
4200 	bool new_reserved = false;
4201 	u64 orig_reserved;
4202 	u64 to_reserve;
4203 	int ret;
4204 
4205 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4206 	    !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
4207 		return 0;
4208 
4209 	/* @reserved parameter is mandatory for qgroup */
4210 	if (WARN_ON(!reserved_ret))
4211 		return -EINVAL;
4212 	if (!*reserved_ret) {
4213 		new_reserved = true;
4214 		*reserved_ret = extent_changeset_alloc();
4215 		if (!*reserved_ret)
4216 			return -ENOMEM;
4217 	}
4218 	reserved = *reserved_ret;
4219 	/* Record already reserved space */
4220 	orig_reserved = reserved->bytes_changed;
4221 	ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
4222 					   start + len - 1, EXTENT_QGROUP_RESERVED,
4223 					   reserved);
4224 
4225 	/* Newly reserved space */
4226 	to_reserve = reserved->bytes_changed - orig_reserved;
4227 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4228 					to_reserve, QGROUP_RESERVE);
4229 	if (ret < 0)
4230 		goto out;
4231 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4232 	if (ret < 0)
4233 		goto cleanup;
4234 
4235 	return ret;
4236 
4237 cleanup:
4238 	qgroup_unreserve_range(inode, reserved, start, len);
4239 out:
4240 	if (new_reserved) {
4241 		extent_changeset_free(reserved);
4242 		*reserved_ret = NULL;
4243 	}
4244 	return ret;
4245 }
4246 
4247 /*
4248  * Reserve qgroup space for range [start, start + len).
4249  *
4250  * This function will either reserve space from related qgroups or do nothing
4251  * if the range is already reserved.
4252  *
4253  * Return 0 for successful reservation
4254  * Return <0 for error (including -EQUOT)
4255  *
4256  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4257  *	 commit transaction. So caller should not hold any dirty page locked.
4258  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4259 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4260 			struct extent_changeset **reserved_ret, u64 start,
4261 			u64 len)
4262 {
4263 	int ret;
4264 
4265 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4266 	if (ret <= 0 && ret != -EDQUOT)
4267 		return ret;
4268 
4269 	ret = try_flush_qgroup(inode->root);
4270 	if (ret < 0)
4271 		return ret;
4272 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4273 }
4274 
4275 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4276 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4277 				     struct extent_changeset *reserved,
4278 				     u64 start, u64 len, u64 *freed_ret)
4279 {
4280 	struct btrfs_root *root = inode->root;
4281 	struct ulist_node *unode;
4282 	struct ulist_iterator uiter;
4283 	struct extent_changeset changeset;
4284 	u64 freed = 0;
4285 	int ret;
4286 
4287 	extent_changeset_init(&changeset);
4288 	len = round_up(start + len, root->fs_info->sectorsize);
4289 	start = round_down(start, root->fs_info->sectorsize);
4290 
4291 	ULIST_ITER_INIT(&uiter);
4292 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4293 		u64 range_start = unode->val;
4294 		/* unode->aux is the inclusive end */
4295 		u64 range_len = unode->aux - range_start + 1;
4296 		u64 free_start;
4297 		u64 free_len;
4298 
4299 		extent_changeset_release(&changeset);
4300 
4301 		/* Only free range in range [start, start + len) */
4302 		if (range_start >= start + len ||
4303 		    range_start + range_len <= start)
4304 			continue;
4305 		free_start = max(range_start, start);
4306 		free_len = min(start + len, range_start + range_len) -
4307 			   free_start;
4308 		/*
4309 		 * TODO: To also modify reserved->ranges_reserved to reflect
4310 		 * the modification.
4311 		 *
4312 		 * However as long as we free qgroup reserved according to
4313 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4314 		 * So not need to rush.
4315 		 */
4316 		ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
4317 						     free_start + free_len - 1,
4318 						     EXTENT_QGROUP_RESERVED,
4319 						     &changeset);
4320 		if (ret < 0)
4321 			goto out;
4322 		freed += changeset.bytes_changed;
4323 	}
4324 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4325 				  BTRFS_QGROUP_RSV_DATA);
4326 	if (freed_ret)
4327 		*freed_ret = freed;
4328 	ret = 0;
4329 out:
4330 	extent_changeset_release(&changeset);
4331 	return ret;
4332 }
4333 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4334 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4335 			struct extent_changeset *reserved, u64 start, u64 len,
4336 			u64 *released, int free)
4337 {
4338 	struct extent_changeset changeset;
4339 	int trace_op = QGROUP_RELEASE;
4340 	int ret;
4341 
4342 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4343 		return btrfs_clear_record_extent_bits(&inode->io_tree, start,
4344 						      start + len - 1,
4345 						      EXTENT_QGROUP_RESERVED, NULL);
4346 	}
4347 
4348 	/* In release case, we shouldn't have @reserved */
4349 	WARN_ON(!free && reserved);
4350 	if (free && reserved)
4351 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4352 	extent_changeset_init(&changeset);
4353 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
4354 					     EXTENT_QGROUP_RESERVED, &changeset);
4355 	if (ret < 0)
4356 		goto out;
4357 
4358 	if (free)
4359 		trace_op = QGROUP_FREE;
4360 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4361 					changeset.bytes_changed, trace_op);
4362 	if (free)
4363 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4364 				btrfs_root_id(inode->root),
4365 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4366 	if (released)
4367 		*released = changeset.bytes_changed;
4368 out:
4369 	extent_changeset_release(&changeset);
4370 	return ret;
4371 }
4372 
4373 /*
4374  * Free a reserved space range from io_tree and related qgroups
4375  *
4376  * Should be called when a range of pages get invalidated before reaching disk.
4377  * Or for error cleanup case.
4378  * if @reserved is given, only reserved range in [@start, @start + @len) will
4379  * be freed.
4380  *
4381  * For data written to disk, use btrfs_qgroup_release_data().
4382  *
4383  * NOTE: This function may sleep for memory allocation.
4384  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4385 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4386 			   struct extent_changeset *reserved,
4387 			   u64 start, u64 len, u64 *freed)
4388 {
4389 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4390 }
4391 
4392 /*
4393  * Release a reserved space range from io_tree only.
4394  *
4395  * Should be called when a range of pages get written to disk and corresponding
4396  * FILE_EXTENT is inserted into corresponding root.
4397  *
4398  * Since new qgroup accounting framework will only update qgroup numbers at
4399  * commit_transaction() time, its reserved space shouldn't be freed from
4400  * related qgroups.
4401  *
4402  * But we should release the range from io_tree, to allow further write to be
4403  * COWed.
4404  *
4405  * NOTE: This function may sleep for memory allocation.
4406  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4407 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4408 {
4409 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4410 }
4411 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4412 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4413 			      enum btrfs_qgroup_rsv_type type)
4414 {
4415 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4416 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4417 		return;
4418 	if (num_bytes == 0)
4419 		return;
4420 
4421 	spin_lock(&root->qgroup_meta_rsv_lock);
4422 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4423 		root->qgroup_meta_rsv_prealloc += num_bytes;
4424 	else
4425 		root->qgroup_meta_rsv_pertrans += num_bytes;
4426 	spin_unlock(&root->qgroup_meta_rsv_lock);
4427 }
4428 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4429 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4430 			     enum btrfs_qgroup_rsv_type type)
4431 {
4432 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4433 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4434 		return 0;
4435 	if (num_bytes == 0)
4436 		return 0;
4437 
4438 	spin_lock(&root->qgroup_meta_rsv_lock);
4439 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4440 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4441 				  num_bytes);
4442 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4443 	} else {
4444 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4445 				  num_bytes);
4446 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4447 	}
4448 	spin_unlock(&root->qgroup_meta_rsv_lock);
4449 	return num_bytes;
4450 }
4451 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4452 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4453 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4454 {
4455 	struct btrfs_fs_info *fs_info = root->fs_info;
4456 	int ret;
4457 
4458 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4459 	    !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4460 		return 0;
4461 
4462 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4463 	trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
4464 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4465 	if (ret < 0)
4466 		return ret;
4467 	/*
4468 	 * Record what we have reserved into root.
4469 	 *
4470 	 * To avoid quota disabled->enabled underflow.
4471 	 * In that case, we may try to free space we haven't reserved
4472 	 * (since quota was disabled), so record what we reserved into root.
4473 	 * And ensure later release won't underflow this number.
4474 	 */
4475 	add_root_meta_rsv(root, num_bytes, type);
4476 	return ret;
4477 }
4478 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4479 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4480 				enum btrfs_qgroup_rsv_type type, bool enforce,
4481 				bool noflush)
4482 {
4483 	int ret;
4484 
4485 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4486 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4487 		return ret;
4488 
4489 	ret = try_flush_qgroup(root);
4490 	if (ret < 0)
4491 		return ret;
4492 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4493 }
4494 
4495 /*
4496  * Per-transaction meta reservation should be all freed at transaction commit
4497  * time
4498  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4499 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4500 {
4501 	struct btrfs_fs_info *fs_info = root->fs_info;
4502 
4503 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4504 	    !btrfs_is_fstree(btrfs_root_id(root)))
4505 		return;
4506 
4507 	/* TODO: Update trace point to handle such free */
4508 	trace_btrfs_qgroup_meta_free_all_pertrans(root);
4509 	/* Special value -1 means to free all reserved space */
4510 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4511 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4512 }
4513 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4514 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4515 			      enum btrfs_qgroup_rsv_type type)
4516 {
4517 	struct btrfs_fs_info *fs_info = root->fs_info;
4518 
4519 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4520 	    !btrfs_is_fstree(btrfs_root_id(root)))
4521 		return;
4522 
4523 	/*
4524 	 * reservation for META_PREALLOC can happen before quota is enabled,
4525 	 * which can lead to underflow.
4526 	 * Here ensure we will only free what we really have reserved.
4527 	 */
4528 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4529 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4530 	trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4531 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4532 }
4533 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4534 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4535 				int num_bytes)
4536 {
4537 	struct btrfs_qgroup *qgroup;
4538 	LIST_HEAD(qgroup_list);
4539 
4540 	if (num_bytes == 0)
4541 		return;
4542 	if (!fs_info->quota_root)
4543 		return;
4544 
4545 	spin_lock(&fs_info->qgroup_lock);
4546 	qgroup = find_qgroup_rb(fs_info, ref_root);
4547 	if (!qgroup)
4548 		goto out;
4549 
4550 	qgroup_iterator_add(&qgroup_list, qgroup);
4551 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4552 		struct btrfs_qgroup_list *glist;
4553 
4554 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4555 				BTRFS_QGROUP_RSV_META_PREALLOC);
4556 		if (!sb_rdonly(fs_info->sb))
4557 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4558 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4559 
4560 		list_for_each_entry(glist, &qgroup->groups, next_group)
4561 			qgroup_iterator_add(&qgroup_list, glist->group);
4562 	}
4563 out:
4564 	qgroup_iterator_clean(&qgroup_list);
4565 	spin_unlock(&fs_info->qgroup_lock);
4566 }
4567 
4568 /*
4569  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4570  *
4571  * This is called when preallocated meta reservation needs to be used.
4572  * Normally after btrfs_join_transaction() call.
4573  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4574 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4575 {
4576 	struct btrfs_fs_info *fs_info = root->fs_info;
4577 
4578 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4579 	    !btrfs_is_fstree(btrfs_root_id(root)))
4580 		return;
4581 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4582 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4583 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4584 	trace_btrfs_qgroup_meta_convert(root, num_bytes);
4585 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4586 	if (!sb_rdonly(fs_info->sb))
4587 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4588 }
4589 
4590 /*
4591  * Check qgroup reserved space leaking, normally at destroy inode
4592  * time
4593  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4594 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4595 {
4596 	struct extent_changeset changeset;
4597 	struct ulist_node *unode;
4598 	struct ulist_iterator iter;
4599 	int ret;
4600 
4601 	extent_changeset_init(&changeset);
4602 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4603 					     EXTENT_QGROUP_RESERVED, &changeset);
4604 
4605 	WARN_ON(ret < 0);
4606 	if (WARN_ON(changeset.bytes_changed)) {
4607 		ULIST_ITER_INIT(&iter);
4608 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4609 			btrfs_warn(inode->root->fs_info,
4610 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4611 				btrfs_ino(inode), unode->val, unode->aux);
4612 		}
4613 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4614 				btrfs_root_id(inode->root),
4615 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4616 
4617 	}
4618 	extent_changeset_release(&changeset);
4619 }
4620 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4621 void btrfs_qgroup_init_swapped_blocks(
4622 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4623 {
4624 	int i;
4625 
4626 	spin_lock_init(&swapped_blocks->lock);
4627 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4628 		swapped_blocks->blocks[i] = RB_ROOT;
4629 	swapped_blocks->swapped = false;
4630 }
4631 
4632 /*
4633  * Delete all swapped blocks record of @root.
4634  * Every record here means we skipped a full subtree scan for qgroup.
4635  *
4636  * Gets called when committing one transaction.
4637  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4638 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4639 {
4640 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4641 	int i;
4642 
4643 	swapped_blocks = &root->swapped_blocks;
4644 
4645 	spin_lock(&swapped_blocks->lock);
4646 	if (!swapped_blocks->swapped)
4647 		goto out;
4648 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4649 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4650 		struct btrfs_qgroup_swapped_block *entry;
4651 		struct btrfs_qgroup_swapped_block *next;
4652 
4653 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4654 						     node)
4655 			kfree(entry);
4656 		swapped_blocks->blocks[i] = RB_ROOT;
4657 	}
4658 	swapped_blocks->swapped = false;
4659 out:
4660 	spin_unlock(&swapped_blocks->lock);
4661 }
4662 
qgroup_swapped_block_bytenr_key_cmp(const void * key,const struct rb_node * node)4663 static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
4664 {
4665 	const u64 *bytenr = key;
4666 	const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
4667 					  struct btrfs_qgroup_swapped_block, node);
4668 
4669 	if (block->subvol_bytenr < *bytenr)
4670 		return -1;
4671 	else if (block->subvol_bytenr > *bytenr)
4672 		return 1;
4673 
4674 	return 0;
4675 }
4676 
qgroup_swapped_block_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)4677 static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
4678 {
4679 	const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
4680 					      struct btrfs_qgroup_swapped_block, node);
4681 
4682 	return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
4683 }
4684 
4685 /*
4686  * Add subtree roots record into @subvol_root.
4687  *
4688  * @subvol_root:	tree root of the subvolume tree get swapped
4689  * @bg:			block group under balance
4690  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4691  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4692  *			BOTH POINTERS ARE BEFORE TREE SWAP
4693  * @last_snapshot:	last snapshot generation of the subvolume tree
4694  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4695 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
4696 		struct btrfs_block_group *bg,
4697 		struct extent_buffer *subvol_parent, int subvol_slot,
4698 		struct extent_buffer *reloc_parent, int reloc_slot,
4699 		u64 last_snapshot)
4700 {
4701 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4702 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4703 	struct btrfs_qgroup_swapped_block *block;
4704 	struct rb_node *node;
4705 	int level = btrfs_header_level(subvol_parent) - 1;
4706 	int ret = 0;
4707 
4708 	if (!btrfs_qgroup_full_accounting(fs_info))
4709 		return 0;
4710 
4711 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4712 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4713 		btrfs_err_rl(fs_info,
4714 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4715 			__func__,
4716 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4717 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4718 		return -EUCLEAN;
4719 	}
4720 
4721 	block = kmalloc(sizeof(*block), GFP_NOFS);
4722 	if (!block) {
4723 		ret = -ENOMEM;
4724 		goto out;
4725 	}
4726 
4727 	/*
4728 	 * @reloc_parent/slot is still before swap, while @block is going to
4729 	 * record the bytenr after swap, so we do the swap here.
4730 	 */
4731 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4732 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4733 							     reloc_slot);
4734 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4735 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4736 							    subvol_slot);
4737 	block->last_snapshot = last_snapshot;
4738 	block->level = level;
4739 
4740 	/*
4741 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4742 	 * no one else can modify tree blocks thus we qgroup will not change
4743 	 * no matter the value of trace_leaf.
4744 	 */
4745 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4746 		block->trace_leaf = true;
4747 	else
4748 		block->trace_leaf = false;
4749 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4750 
4751 	/* Insert @block into @blocks */
4752 	spin_lock(&blocks->lock);
4753 	node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
4754 	if (node) {
4755 		struct btrfs_qgroup_swapped_block *entry;
4756 
4757 		entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4758 
4759 		if (entry->subvol_generation != block->subvol_generation ||
4760 		    entry->reloc_bytenr != block->reloc_bytenr ||
4761 		    entry->reloc_generation != block->reloc_generation) {
4762 			/*
4763 			 * Duplicated but mismatch entry found.  Shouldn't happen.
4764 			 * Marking qgroup inconsistent should be enough for end
4765 			 * users.
4766 			 */
4767 			DEBUG_WARN("duplicated but mismatched entry found");
4768 			ret = -EEXIST;
4769 		}
4770 		kfree(block);
4771 		goto out_unlock;
4772 	}
4773 	blocks->swapped = true;
4774 out_unlock:
4775 	spin_unlock(&blocks->lock);
4776 out:
4777 	if (ret < 0)
4778 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
4779 	return ret;
4780 }
4781 
4782 /*
4783  * Check if the tree block is a subtree root, and if so do the needed
4784  * delayed subtree trace for qgroup.
4785  *
4786  * This is called during btrfs_cow_block().
4787  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4788 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4789 					 struct btrfs_root *root,
4790 					 struct extent_buffer *subvol_eb)
4791 {
4792 	struct btrfs_fs_info *fs_info = root->fs_info;
4793 	struct btrfs_tree_parent_check check = { 0 };
4794 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4795 	struct btrfs_qgroup_swapped_block *block;
4796 	struct extent_buffer *reloc_eb = NULL;
4797 	struct rb_node *node;
4798 	bool swapped = false;
4799 	int level = btrfs_header_level(subvol_eb);
4800 	int ret = 0;
4801 	int i;
4802 
4803 	if (!btrfs_qgroup_full_accounting(fs_info))
4804 		return 0;
4805 	if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4806 		return 0;
4807 
4808 	spin_lock(&blocks->lock);
4809 	if (!blocks->swapped) {
4810 		spin_unlock(&blocks->lock);
4811 		return 0;
4812 	}
4813 	node = rb_find(&subvol_eb->start, &blocks->blocks[level],
4814 			qgroup_swapped_block_bytenr_key_cmp);
4815 	if (!node) {
4816 		spin_unlock(&blocks->lock);
4817 		goto out;
4818 	}
4819 	block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4820 
4821 	/* Found one, remove it from @blocks first and update blocks->swapped */
4822 	rb_erase(&block->node, &blocks->blocks[level]);
4823 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4824 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4825 			swapped = true;
4826 			break;
4827 		}
4828 	}
4829 	blocks->swapped = swapped;
4830 	spin_unlock(&blocks->lock);
4831 
4832 	check.level = block->level;
4833 	check.transid = block->reloc_generation;
4834 	check.has_first_key = true;
4835 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4836 
4837 	/* Read out reloc subtree root */
4838 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4839 	if (IS_ERR(reloc_eb)) {
4840 		ret = PTR_ERR(reloc_eb);
4841 		reloc_eb = NULL;
4842 		goto free_out;
4843 	}
4844 	if (!extent_buffer_uptodate(reloc_eb)) {
4845 		ret = -EIO;
4846 		goto free_out;
4847 	}
4848 
4849 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4850 			block->last_snapshot, block->trace_leaf);
4851 free_out:
4852 	kfree(block);
4853 	free_extent_buffer(reloc_eb);
4854 out:
4855 	if (ret < 0) {
4856 		qgroup_mark_inconsistent(fs_info,
4857 				"failed to account subtree at bytenr %llu: %d",
4858 				subvol_eb->start, ret);
4859 	}
4860 	return ret;
4861 }
4862 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4863 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4864 {
4865 	struct btrfs_qgroup_extent_record *entry;
4866 	unsigned long index;
4867 
4868 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4869 		ulist_free(entry->old_roots);
4870 		kfree(entry);
4871 	}
4872 	xa_destroy(&trans->delayed_refs.dirty_extents);
4873 }
4874 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4875 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4876 			      const struct btrfs_squota_delta *delta)
4877 {
4878 	int ret;
4879 	struct btrfs_qgroup *qgroup;
4880 	struct btrfs_qgroup *qg;
4881 	LIST_HEAD(qgroup_list);
4882 	u64 root = delta->root;
4883 	u64 num_bytes = delta->num_bytes;
4884 	const int sign = (delta->is_inc ? 1 : -1);
4885 
4886 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4887 		return 0;
4888 
4889 	if (!btrfs_is_fstree(root))
4890 		return 0;
4891 
4892 	/* If the extent predates enabling quotas, don't count it. */
4893 	if (delta->generation < fs_info->qgroup_enable_gen)
4894 		return 0;
4895 
4896 	spin_lock(&fs_info->qgroup_lock);
4897 	qgroup = find_qgroup_rb(fs_info, root);
4898 	if (!qgroup) {
4899 		ret = -ENOENT;
4900 		goto out;
4901 	}
4902 
4903 	ret = 0;
4904 	qgroup_iterator_add(&qgroup_list, qgroup);
4905 	list_for_each_entry(qg, &qgroup_list, iterator) {
4906 		struct btrfs_qgroup_list *glist;
4907 
4908 		qg->excl += num_bytes * sign;
4909 		qg->rfer += num_bytes * sign;
4910 		qgroup_dirty(fs_info, qg);
4911 
4912 		list_for_each_entry(glist, &qg->groups, next_group)
4913 			qgroup_iterator_add(&qgroup_list, glist->group);
4914 	}
4915 	qgroup_iterator_clean(&qgroup_list);
4916 
4917 out:
4918 	spin_unlock(&fs_info->qgroup_lock);
4919 	return ret;
4920 }
4921