1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_BLOCK_RSV_H
4 #define BTRFS_BLOCK_RSV_H
5 
6 #include <linux/types.h>
7 #include <linux/compiler.h>
8 #include <linux/spinlock.h>
9 
10 struct btrfs_trans_handle;
11 struct btrfs_root;
12 struct btrfs_space_info;
13 struct btrfs_block_rsv;
14 struct btrfs_fs_info;
15 enum btrfs_reserve_flush_enum;
16 
17 /*
18  * Types of block reserves
19  */
20 enum btrfs_rsv_type {
21 	BTRFS_BLOCK_RSV_GLOBAL,
22 	BTRFS_BLOCK_RSV_DELALLOC,
23 	BTRFS_BLOCK_RSV_TRANS,
24 	BTRFS_BLOCK_RSV_CHUNK,
25 	BTRFS_BLOCK_RSV_DELOPS,
26 	BTRFS_BLOCK_RSV_DELREFS,
27 	BTRFS_BLOCK_RSV_TREELOG,
28 	BTRFS_BLOCK_RSV_EMPTY,
29 	BTRFS_BLOCK_RSV_TEMP,
30 };
31 
32 struct btrfs_block_rsv {
33 	u64 size;
34 	u64 reserved;
35 	struct btrfs_space_info *space_info;
36 	spinlock_t lock;
37 	bool full;
38 	bool failfast;
39 	/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
40 	enum btrfs_rsv_type type:8;
41 
42 	/*
43 	 * Qgroup equivalent for @size @reserved
44 	 *
45 	 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
46 	 * about things like csum size nor how many tree blocks it will need to
47 	 * reserve.
48 	 *
49 	 * Qgroup cares more about net change of the extent usage.
50 	 *
51 	 * So for one newly inserted file extent, in worst case it will cause
52 	 * leaf split and level increase, nodesize for each file extent is
53 	 * already too much.
54 	 *
55 	 * In short, qgroup_size/reserved is the upper limit of possible needed
56 	 * qgroup metadata reservation.
57 	 */
58 	u64 qgroup_rsv_size;
59 	u64 qgroup_rsv_reserved;
60 };
61 
62 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
63 void btrfs_init_root_block_rsv(struct btrfs_root *root);
64 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
65 					      enum btrfs_rsv_type type);
66 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
67 				   struct btrfs_block_rsv *rsv,
68 				   enum btrfs_rsv_type type);
69 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
70 			  struct btrfs_block_rsv *rsv);
71 int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
72 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
73 			enum btrfs_reserve_flush_enum flush);
74 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
75 int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
76 			   struct btrfs_block_rsv *block_rsv, u64 num_bytes,
77 			   enum btrfs_reserve_flush_enum flush);
78 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
79 			    struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
80 			    bool update_size);
81 int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
82 void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
83 			       u64 num_bytes, bool update_size);
84 u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
85 			      struct btrfs_block_rsv *block_rsv,
86 			      u64 num_bytes, u64 *qgroup_to_release);
87 void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info);
88 void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info);
89 void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
90 struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
91 					    struct btrfs_root *root,
92 					    u32 blocksize);
93 int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
94 				       struct btrfs_block_rsv *rsv);
95 static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
96 					 struct btrfs_block_rsv *block_rsv,
97 					 u32 blocksize)
98 {
99 	btrfs_block_rsv_add_bytes(block_rsv, blocksize, false);
100 	btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
101 }
102 
103 /*
104  * Fast path to check if the reserve is full, may be carefully used outside of
105  * locks.
106  */
107 static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
108 {
109 	return data_race(rsv->full);
110 }
111 
112 /*
113  * Get the reserved mount of a block reserve in a context where getting a stale
114  * value is acceptable, instead of accessing it directly and trigger data race
115  * warning from KCSAN.
116  */
117 static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
118 {
119 	u64 ret;
120 
121 	spin_lock(&rsv->lock);
122 	ret = rsv->reserved;
123 	spin_unlock(&rsv->lock);
124 
125 	return ret;
126 }
127 
128 /*
129  * Get the size of a block reserve in a context where getting a stale value is
130  * acceptable, instead of accessing it directly and trigger data race warning
131  * from KCSAN.
132  */
133 static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
134 {
135 	u64 ret;
136 
137 	spin_lock(&rsv->lock);
138 	ret = rsv->size;
139 	spin_unlock(&rsv->lock);
140 
141 	return ret;
142 }
143 
144 #endif /* BTRFS_BLOCK_RSV_H */
145