1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_bit.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "scrub/scrub.h"
15 #include "scrub/bitmap.h"
16
17 #include <linux/interval_tree_generic.h>
18
19 /* u64 bitmap */
20
21 struct xbitmap64_node {
22 struct rb_node bn_rbnode;
23
24 /* First set bit of this interval and subtree. */
25 uint64_t bn_start;
26
27 /* Last set bit of this interval. */
28 uint64_t bn_last;
29
30 /* Last set bit of this subtree. Do not touch this. */
31 uint64_t __bn_subtree_last;
32 };
33
34 /* Define our own interval tree type with uint64_t parameters. */
35
36 #define START(node) ((node)->bn_start)
37 #define LAST(node) ((node)->bn_last)
38
39 /*
40 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41 * forward-declare them anyway for clarity.
42 */
43 static inline void
44 xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
45
46 static inline void
47 xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
48
49 static inline struct xbitmap64_node *
50 xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
51 uint64_t last);
52
53 static inline struct xbitmap64_node *
54 xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
55 uint64_t last);
56
INTERVAL_TREE_DEFINE(struct xbitmap64_node,bn_rbnode,uint64_t,__bn_subtree_last,START,LAST,static inline,xbitmap64_tree)57 INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
58 __bn_subtree_last, START, LAST, static inline, xbitmap64_tree)
59
60 /* Iterate each interval of a bitmap. Do not change the bitmap. */
61 #define for_each_xbitmap64_extent(bn, bitmap) \
62 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
63 struct xbitmap64_node, bn_rbnode); \
64 (bn) != NULL; \
65 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
66 struct xbitmap64_node, bn_rbnode))
67
68 /* Clear a range of this bitmap. */
69 int
70 xbitmap64_clear(
71 struct xbitmap64 *bitmap,
72 uint64_t start,
73 uint64_t len)
74 {
75 struct xbitmap64_node *bn;
76 struct xbitmap64_node *new_bn;
77 uint64_t last = start + len - 1;
78
79 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
80 if (bn->bn_start < start && bn->bn_last > last) {
81 uint64_t old_last = bn->bn_last;
82
83 /* overlaps with the entire clearing range */
84 xbitmap64_tree_remove(bn, &bitmap->xb_root);
85 bn->bn_last = start - 1;
86 xbitmap64_tree_insert(bn, &bitmap->xb_root);
87
88 /* add an extent */
89 new_bn = kmalloc(sizeof(struct xbitmap64_node),
90 XCHK_GFP_FLAGS);
91 if (!new_bn)
92 return -ENOMEM;
93 new_bn->bn_start = last + 1;
94 new_bn->bn_last = old_last;
95 xbitmap64_tree_insert(new_bn, &bitmap->xb_root);
96 } else if (bn->bn_start < start) {
97 /* overlaps with the left side of the clearing range */
98 xbitmap64_tree_remove(bn, &bitmap->xb_root);
99 bn->bn_last = start - 1;
100 xbitmap64_tree_insert(bn, &bitmap->xb_root);
101 } else if (bn->bn_last > last) {
102 /* overlaps with the right side of the clearing range */
103 xbitmap64_tree_remove(bn, &bitmap->xb_root);
104 bn->bn_start = last + 1;
105 xbitmap64_tree_insert(bn, &bitmap->xb_root);
106 break;
107 } else {
108 /* in the middle of the clearing range */
109 xbitmap64_tree_remove(bn, &bitmap->xb_root);
110 kfree(bn);
111 }
112 }
113
114 return 0;
115 }
116
117 /* Set a range of this bitmap. */
118 int
xbitmap64_set(struct xbitmap64 * bitmap,uint64_t start,uint64_t len)119 xbitmap64_set(
120 struct xbitmap64 *bitmap,
121 uint64_t start,
122 uint64_t len)
123 {
124 struct xbitmap64_node *left;
125 struct xbitmap64_node *right;
126 uint64_t last = start + len - 1;
127 int error;
128
129 /* Is this whole range already set? */
130 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
131 if (left && left->bn_start <= start && left->bn_last >= last)
132 return 0;
133
134 /* Clear out everything in the range we want to set. */
135 error = xbitmap64_clear(bitmap, start, len);
136 if (error)
137 return error;
138
139 /* Do we have a left-adjacent extent? */
140 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
141 ASSERT(!left || left->bn_last + 1 == start);
142
143 /* Do we have a right-adjacent extent? */
144 right = xbitmap64_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
145 ASSERT(!right || right->bn_start == last + 1);
146
147 if (left && right) {
148 /* combine left and right adjacent extent */
149 xbitmap64_tree_remove(left, &bitmap->xb_root);
150 xbitmap64_tree_remove(right, &bitmap->xb_root);
151 left->bn_last = right->bn_last;
152 xbitmap64_tree_insert(left, &bitmap->xb_root);
153 kfree(right);
154 } else if (left) {
155 /* combine with left extent */
156 xbitmap64_tree_remove(left, &bitmap->xb_root);
157 left->bn_last = last;
158 xbitmap64_tree_insert(left, &bitmap->xb_root);
159 } else if (right) {
160 /* combine with right extent */
161 xbitmap64_tree_remove(right, &bitmap->xb_root);
162 right->bn_start = start;
163 xbitmap64_tree_insert(right, &bitmap->xb_root);
164 } else {
165 /* add an extent */
166 left = kmalloc(sizeof(struct xbitmap64_node), XCHK_GFP_FLAGS);
167 if (!left)
168 return -ENOMEM;
169 left->bn_start = start;
170 left->bn_last = last;
171 xbitmap64_tree_insert(left, &bitmap->xb_root);
172 }
173
174 return 0;
175 }
176
177 /* Free everything related to this bitmap. */
178 void
xbitmap64_destroy(struct xbitmap64 * bitmap)179 xbitmap64_destroy(
180 struct xbitmap64 *bitmap)
181 {
182 struct xbitmap64_node *bn;
183
184 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
185 xbitmap64_tree_remove(bn, &bitmap->xb_root);
186 kfree(bn);
187 }
188 }
189
190 /* Set up a per-AG block bitmap. */
191 void
xbitmap64_init(struct xbitmap64 * bitmap)192 xbitmap64_init(
193 struct xbitmap64 *bitmap)
194 {
195 bitmap->xb_root = RB_ROOT_CACHED;
196 }
197
198 /*
199 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
200 *
201 * The intent is that callers will iterate the rmapbt for all of its records
202 * for a given owner to generate @bitmap; and iterate all the blocks of the
203 * metadata structures that are not being rebuilt and have the same rmapbt
204 * owner to generate @sub. This routine subtracts all the extents
205 * mentioned in sub from all the extents linked in @bitmap, which leaves
206 * @bitmap as the list of blocks that are not accounted for, which we assume
207 * are the dead blocks of the old metadata structure. The blocks mentioned in
208 * @bitmap can be reaped.
209 *
210 * This is the logical equivalent of bitmap &= ~sub.
211 */
212 int
xbitmap64_disunion(struct xbitmap64 * bitmap,struct xbitmap64 * sub)213 xbitmap64_disunion(
214 struct xbitmap64 *bitmap,
215 struct xbitmap64 *sub)
216 {
217 struct xbitmap64_node *bn;
218 int error;
219
220 if (xbitmap64_empty(bitmap) || xbitmap64_empty(sub))
221 return 0;
222
223 for_each_xbitmap64_extent(bn, sub) {
224 error = xbitmap64_clear(bitmap, bn->bn_start,
225 bn->bn_last - bn->bn_start + 1);
226 if (error)
227 return error;
228 }
229
230 return 0;
231 }
232
233 /* How many bits are set in this bitmap? */
234 uint64_t
xbitmap64_hweight(struct xbitmap64 * bitmap)235 xbitmap64_hweight(
236 struct xbitmap64 *bitmap)
237 {
238 struct xbitmap64_node *bn;
239 uint64_t ret = 0;
240
241 for_each_xbitmap64_extent(bn, bitmap)
242 ret += bn->bn_last - bn->bn_start + 1;
243
244 return ret;
245 }
246
247 /* Call a function for every run of set bits in this bitmap. */
248 int
xbitmap64_walk(struct xbitmap64 * bitmap,xbitmap64_walk_fn fn,void * priv)249 xbitmap64_walk(
250 struct xbitmap64 *bitmap,
251 xbitmap64_walk_fn fn,
252 void *priv)
253 {
254 struct xbitmap64_node *bn;
255 int error = 0;
256
257 for_each_xbitmap64_extent(bn, bitmap) {
258 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
259 if (error)
260 break;
261 }
262
263 return error;
264 }
265
266 /* Does this bitmap have no bits set at all? */
267 bool
xbitmap64_empty(struct xbitmap64 * bitmap)268 xbitmap64_empty(
269 struct xbitmap64 *bitmap)
270 {
271 return bitmap->xb_root.rb_root.rb_node == NULL;
272 }
273
274 /* Is the start of the range set or clear? And for how long? */
275 bool
xbitmap64_test(struct xbitmap64 * bitmap,uint64_t start,uint64_t * len)276 xbitmap64_test(
277 struct xbitmap64 *bitmap,
278 uint64_t start,
279 uint64_t *len)
280 {
281 struct xbitmap64_node *bn;
282 uint64_t last = start + *len - 1;
283
284 bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
285 if (!bn)
286 return false;
287 if (bn->bn_start <= start) {
288 if (bn->bn_last < last)
289 *len = bn->bn_last - start + 1;
290 return true;
291 }
292 *len = bn->bn_start - start;
293 return false;
294 }
295
296 /* u32 bitmap */
297
298 struct xbitmap32_node {
299 struct rb_node bn_rbnode;
300
301 /* First set bit of this interval and subtree. */
302 uint32_t bn_start;
303
304 /* Last set bit of this interval. */
305 uint32_t bn_last;
306
307 /* Last set bit of this subtree. Do not touch this. */
308 uint32_t __bn_subtree_last;
309 };
310
311 /* Define our own interval tree type with uint32_t parameters. */
312
313 /*
314 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
315 * forward-declare them anyway for clarity.
316 */
317 static inline void
318 xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
319
320 static inline void
321 xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
322
323 static inline struct xbitmap32_node *
324 xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
325 uint32_t last);
326
327 static inline struct xbitmap32_node *
328 xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
329 uint32_t last);
330
INTERVAL_TREE_DEFINE(struct xbitmap32_node,bn_rbnode,uint32_t,__bn_subtree_last,START,LAST,static inline,xbitmap32_tree)331 INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
332 __bn_subtree_last, START, LAST, static inline, xbitmap32_tree)
333
334 /* Iterate each interval of a bitmap. Do not change the bitmap. */
335 #define for_each_xbitmap32_extent(bn, bitmap) \
336 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
337 struct xbitmap32_node, bn_rbnode); \
338 (bn) != NULL; \
339 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
340 struct xbitmap32_node, bn_rbnode))
341
342 /* Clear a range of this bitmap. */
343 int
344 xbitmap32_clear(
345 struct xbitmap32 *bitmap,
346 uint32_t start,
347 uint32_t len)
348 {
349 struct xbitmap32_node *bn;
350 struct xbitmap32_node *new_bn;
351 uint32_t last = start + len - 1;
352
353 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last))) {
354 if (bn->bn_start < start && bn->bn_last > last) {
355 uint32_t old_last = bn->bn_last;
356
357 /* overlaps with the entire clearing range */
358 xbitmap32_tree_remove(bn, &bitmap->xb_root);
359 bn->bn_last = start - 1;
360 xbitmap32_tree_insert(bn, &bitmap->xb_root);
361
362 /* add an extent */
363 new_bn = kmalloc(sizeof(struct xbitmap32_node),
364 XCHK_GFP_FLAGS);
365 if (!new_bn)
366 return -ENOMEM;
367 new_bn->bn_start = last + 1;
368 new_bn->bn_last = old_last;
369 xbitmap32_tree_insert(new_bn, &bitmap->xb_root);
370 } else if (bn->bn_start < start) {
371 /* overlaps with the left side of the clearing range */
372 xbitmap32_tree_remove(bn, &bitmap->xb_root);
373 bn->bn_last = start - 1;
374 xbitmap32_tree_insert(bn, &bitmap->xb_root);
375 } else if (bn->bn_last > last) {
376 /* overlaps with the right side of the clearing range */
377 xbitmap32_tree_remove(bn, &bitmap->xb_root);
378 bn->bn_start = last + 1;
379 xbitmap32_tree_insert(bn, &bitmap->xb_root);
380 break;
381 } else {
382 /* in the middle of the clearing range */
383 xbitmap32_tree_remove(bn, &bitmap->xb_root);
384 kfree(bn);
385 }
386 }
387
388 return 0;
389 }
390
391 /* Set a range of this bitmap. */
392 int
xbitmap32_set(struct xbitmap32 * bitmap,uint32_t start,uint32_t len)393 xbitmap32_set(
394 struct xbitmap32 *bitmap,
395 uint32_t start,
396 uint32_t len)
397 {
398 struct xbitmap32_node *left;
399 struct xbitmap32_node *right;
400 uint32_t last = start + len - 1;
401 int error;
402
403 /* Is this whole range already set? */
404 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
405 if (left && left->bn_start <= start && left->bn_last >= last)
406 return 0;
407
408 /* Clear out everything in the range we want to set. */
409 error = xbitmap32_clear(bitmap, start, len);
410 if (error)
411 return error;
412
413 /* Do we have a left-adjacent extent? */
414 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
415 ASSERT(!left || left->bn_last + 1 == start);
416
417 /* Do we have a right-adjacent extent? */
418 right = xbitmap32_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
419 ASSERT(!right || right->bn_start == last + 1);
420
421 if (left && right) {
422 /* combine left and right adjacent extent */
423 xbitmap32_tree_remove(left, &bitmap->xb_root);
424 xbitmap32_tree_remove(right, &bitmap->xb_root);
425 left->bn_last = right->bn_last;
426 xbitmap32_tree_insert(left, &bitmap->xb_root);
427 kfree(right);
428 } else if (left) {
429 /* combine with left extent */
430 xbitmap32_tree_remove(left, &bitmap->xb_root);
431 left->bn_last = last;
432 xbitmap32_tree_insert(left, &bitmap->xb_root);
433 } else if (right) {
434 /* combine with right extent */
435 xbitmap32_tree_remove(right, &bitmap->xb_root);
436 right->bn_start = start;
437 xbitmap32_tree_insert(right, &bitmap->xb_root);
438 } else {
439 /* add an extent */
440 left = kmalloc(sizeof(struct xbitmap32_node), XCHK_GFP_FLAGS);
441 if (!left)
442 return -ENOMEM;
443 left->bn_start = start;
444 left->bn_last = last;
445 xbitmap32_tree_insert(left, &bitmap->xb_root);
446 }
447
448 return 0;
449 }
450
451 /* Free everything related to this bitmap. */
452 void
xbitmap32_destroy(struct xbitmap32 * bitmap)453 xbitmap32_destroy(
454 struct xbitmap32 *bitmap)
455 {
456 struct xbitmap32_node *bn;
457
458 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, 0, -1U))) {
459 xbitmap32_tree_remove(bn, &bitmap->xb_root);
460 kfree(bn);
461 }
462 }
463
464 /* Set up a per-AG block bitmap. */
465 void
xbitmap32_init(struct xbitmap32 * bitmap)466 xbitmap32_init(
467 struct xbitmap32 *bitmap)
468 {
469 bitmap->xb_root = RB_ROOT_CACHED;
470 }
471
472 /*
473 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
474 *
475 * The intent is that callers will iterate the rmapbt for all of its records
476 * for a given owner to generate @bitmap; and iterate all the blocks of the
477 * metadata structures that are not being rebuilt and have the same rmapbt
478 * owner to generate @sub. This routine subtracts all the extents
479 * mentioned in sub from all the extents linked in @bitmap, which leaves
480 * @bitmap as the list of blocks that are not accounted for, which we assume
481 * are the dead blocks of the old metadata structure. The blocks mentioned in
482 * @bitmap can be reaped.
483 *
484 * This is the logical equivalent of bitmap &= ~sub.
485 */
486 int
xbitmap32_disunion(struct xbitmap32 * bitmap,struct xbitmap32 * sub)487 xbitmap32_disunion(
488 struct xbitmap32 *bitmap,
489 struct xbitmap32 *sub)
490 {
491 struct xbitmap32_node *bn;
492 int error;
493
494 if (xbitmap32_empty(bitmap) || xbitmap32_empty(sub))
495 return 0;
496
497 for_each_xbitmap32_extent(bn, sub) {
498 error = xbitmap32_clear(bitmap, bn->bn_start,
499 bn->bn_last - bn->bn_start + 1);
500 if (error)
501 return error;
502 }
503
504 return 0;
505 }
506
507 /* How many bits are set in this bitmap? */
508 uint32_t
xbitmap32_hweight(struct xbitmap32 * bitmap)509 xbitmap32_hweight(
510 struct xbitmap32 *bitmap)
511 {
512 struct xbitmap32_node *bn;
513 uint32_t ret = 0;
514
515 for_each_xbitmap32_extent(bn, bitmap)
516 ret += bn->bn_last - bn->bn_start + 1;
517
518 return ret;
519 }
520
521 /* Call a function for every run of set bits in this bitmap. */
522 int
xbitmap32_walk(struct xbitmap32 * bitmap,xbitmap32_walk_fn fn,void * priv)523 xbitmap32_walk(
524 struct xbitmap32 *bitmap,
525 xbitmap32_walk_fn fn,
526 void *priv)
527 {
528 struct xbitmap32_node *bn;
529 int error = 0;
530
531 for_each_xbitmap32_extent(bn, bitmap) {
532 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
533 if (error)
534 break;
535 }
536
537 return error;
538 }
539
540 /* Does this bitmap have no bits set at all? */
541 bool
xbitmap32_empty(struct xbitmap32 * bitmap)542 xbitmap32_empty(
543 struct xbitmap32 *bitmap)
544 {
545 return bitmap->xb_root.rb_root.rb_node == NULL;
546 }
547
548 /* Is the start of the range set or clear? And for how long? */
549 bool
xbitmap32_test(struct xbitmap32 * bitmap,uint32_t start,uint32_t * len)550 xbitmap32_test(
551 struct xbitmap32 *bitmap,
552 uint32_t start,
553 uint32_t *len)
554 {
555 struct xbitmap32_node *bn;
556 uint32_t last = start + *len - 1;
557
558 bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
559 if (!bn)
560 return false;
561 if (bn->bn_start <= start) {
562 if (bn->bn_last < last)
563 *len = bn->bn_last - start + 1;
564 return true;
565 }
566 *len = bn->bn_start - start;
567 return false;
568 }
569