1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_MISC_H
4 #define BTRFS_MISC_H
5
6 #include <linux/types.h>
7 #include <linux/bitmap.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/math64.h>
13 #include <linux/rbtree.h>
14 #include <linux/bio.h>
15
16 /*
17 * Convenience macros to define a pointer with the __free(kfree) and
18 * __free(kvfree) cleanup attributes and initialized to NULL.
19 */
20 #define AUTO_KFREE(name) *name __free(kfree) = NULL
21 #define AUTO_KVFREE(name) *name __free(kvfree) = NULL
22
23 /*
24 * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
25 */
26 #define ENUM_BIT(name) \
27 __ ## name ## _BIT, \
28 name = (1U << __ ## name ## _BIT), \
29 __ ## name ## _SEQ = __ ## name ## _BIT
30
bio_iter_phys(const struct bio * bio,const struct bvec_iter * iter)31 static inline phys_addr_t bio_iter_phys(const struct bio *bio,
32 const struct bvec_iter *iter)
33 {
34 struct bio_vec bv = bio_iter_iovec(bio, *iter);
35
36 return bvec_phys(&bv);
37 }
38
39 /*
40 * Iterate bio using btrfs block size.
41 *
42 * This will handle large folio and highmem.
43 *
44 * @paddr: Physical memory address of each iteration
45 * @bio: The bio to iterate
46 * @iter: The bvec_iter (pointer) to use.
47 * @blocksize: The blocksize to iterate.
48 *
49 * This requires all folios in the bio to cover at least one block.
50 */
51 #define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
52 for (; (iter)->bi_size && \
53 (paddr = bio_iter_phys((bio), (iter)), 1); \
54 bio_advance_iter_single((bio), (iter), (blocksize)))
55
56 /* Can only be called on a non-cloned bio. */
bio_get_size(struct bio * bio)57 static inline u32 bio_get_size(struct bio *bio)
58 {
59 struct bio_vec *bvec;
60 u32 ret = 0;
61 int i;
62
63 bio_for_each_bvec_all(bvec, bio, i)
64 ret += bvec->bv_len;
65 return ret;
66 }
67
68 /* Initialize a bvec_iter to the size of the specified bio. */
init_bvec_iter_for_bio(struct bio * bio)69 static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
70 {
71 const u32 bio_size = bio_get_size(bio);
72
73 return (struct bvec_iter) {
74 .bi_sector = 0,
75 .bi_size = bio_size,
76 .bi_idx = 0,
77 .bi_bvec_done = 0,
78 };
79 }
80
81 #define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
82 for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
83 (iter).bi_size && \
84 (paddr = bio_iter_phys((bio), &(iter)), 1); \
85 bio_advance_iter_single((bio), &(iter), (blocksize)))
86
cond_wake_up(struct wait_queue_head * wq)87 static inline void cond_wake_up(struct wait_queue_head *wq)
88 {
89 /*
90 * This implies a full smp_mb barrier, see comments for
91 * waitqueue_active why.
92 */
93 if (wq_has_sleeper(wq))
94 wake_up(wq);
95 }
96
cond_wake_up_nomb(struct wait_queue_head * wq)97 static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
98 {
99 /*
100 * Special case for conditional wakeup where the barrier required for
101 * waitqueue_active is implied by some of the preceding code. Eg. one
102 * of such atomic operations (atomic_dec_and_return, ...), or a
103 * unlock/lock sequence, etc.
104 */
105 if (waitqueue_active(wq))
106 wake_up(wq);
107 }
108
mult_perc(u64 num,u32 percent)109 static inline u64 mult_perc(u64 num, u32 percent)
110 {
111 return div_u64(num * percent, 100);
112 }
113 /* Copy of is_power_of_two that is 64bit safe */
is_power_of_two_u64(u64 n)114 static inline bool is_power_of_two_u64(u64 n)
115 {
116 return n != 0 && (n & (n - 1)) == 0;
117 }
118
has_single_bit_set(u64 n)119 static inline bool has_single_bit_set(u64 n)
120 {
121 return is_power_of_two_u64(n);
122 }
123
124 /*
125 * Simple bytenr based rb_tree relate structures
126 *
127 * Any structure wants to use bytenr as single search index should have their
128 * structure start with these members.
129 */
130 struct rb_simple_node {
131 struct rb_node rb_node;
132 u64 bytenr;
133 };
134
rb_simple_search(const struct rb_root * root,u64 bytenr)135 static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
136 {
137 struct rb_node *node = root->rb_node;
138 struct rb_simple_node *entry;
139
140 while (node) {
141 entry = rb_entry(node, struct rb_simple_node, rb_node);
142
143 if (bytenr < entry->bytenr)
144 node = node->rb_left;
145 else if (bytenr > entry->bytenr)
146 node = node->rb_right;
147 else
148 return node;
149 }
150 return NULL;
151 }
152
153 /*
154 * Search @root from an entry that starts or comes after @bytenr.
155 *
156 * @root: the root to search.
157 * @bytenr: bytenr to search from.
158 *
159 * Return the rb_node that start at or after @bytenr. If there is no entry at
160 * or after @bytner return NULL.
161 */
rb_simple_search_first(const struct rb_root * root,u64 bytenr)162 static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
163 u64 bytenr)
164 {
165 struct rb_node *node = root->rb_node, *ret = NULL;
166 struct rb_simple_node *entry, *ret_entry = NULL;
167
168 while (node) {
169 entry = rb_entry(node, struct rb_simple_node, rb_node);
170
171 if (bytenr < entry->bytenr) {
172 if (!ret || entry->bytenr < ret_entry->bytenr) {
173 ret = node;
174 ret_entry = entry;
175 }
176
177 node = node->rb_left;
178 } else if (bytenr > entry->bytenr) {
179 node = node->rb_right;
180 } else {
181 return node;
182 }
183 }
184
185 return ret;
186 }
187
rb_simple_node_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)188 static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
189 {
190 struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
191 struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
192
193 if (new_entry->bytenr < existing_entry->bytenr)
194 return -1;
195 else if (new_entry->bytenr > existing_entry->bytenr)
196 return 1;
197
198 return 0;
199 }
200
rb_simple_insert(struct rb_root * root,struct rb_simple_node * simple_node)201 static inline struct rb_node *rb_simple_insert(struct rb_root *root,
202 struct rb_simple_node *simple_node)
203 {
204 return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
205 }
206
bitmap_test_range_all_set(const unsigned long * addr,unsigned long start,unsigned long nbits)207 static inline bool bitmap_test_range_all_set(const unsigned long *addr,
208 unsigned long start,
209 unsigned long nbits)
210 {
211 unsigned long found_zero;
212
213 found_zero = find_next_zero_bit(addr, start + nbits, start);
214 return (found_zero == start + nbits);
215 }
216
bitmap_test_range_all_zero(const unsigned long * addr,unsigned long start,unsigned long nbits)217 static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
218 unsigned long start,
219 unsigned long nbits)
220 {
221 unsigned long found_set;
222
223 found_set = find_next_bit(addr, start + nbits, start);
224 return (found_set == start + nbits);
225 }
226
227 #endif
228