1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Fusion IO. All rights reserved. 4 */ 5 6 #include <linux/pagemap.h> 7 #include <linux/pagevec.h> 8 #include <linux/sched.h> 9 #include <linux/slab.h> 10 #include <linux/sizes.h> 11 #include "btrfs-tests.h" 12 #include "../ctree.h" 13 #include "../extent_io.h" 14 #include "../disk-io.h" 15 #include "../btrfs_inode.h" 16 17 #define PROCESS_UNLOCK (1U << 0) 18 #define PROCESS_RELEASE (1U << 1) 19 #define PROCESS_TEST_LOCKED (1U << 2) 20 21 static noinline int process_page_range(struct inode *inode, u64 start, u64 end, 22 unsigned long flags) 23 { 24 int ret; 25 struct folio_batch fbatch; 26 unsigned long index = start >> PAGE_SHIFT; 27 unsigned long end_index = end >> PAGE_SHIFT; 28 int i; 29 int count = 0; 30 int loops = 0; 31 32 folio_batch_init(&fbatch); 33 34 while (index <= end_index) { 35 ret = filemap_get_folios_contig(inode->i_mapping, &index, 36 end_index, &fbatch); 37 for (i = 0; i < ret; i++) { 38 struct folio *folio = fbatch.folios[i]; 39 40 if (flags & PROCESS_TEST_LOCKED && 41 !folio_test_locked(folio)) 42 count++; 43 if (flags & PROCESS_UNLOCK && folio_test_locked(folio)) 44 folio_unlock(folio); 45 if (flags & PROCESS_RELEASE) 46 folio_put(folio); 47 } 48 folio_batch_release(&fbatch); 49 cond_resched(); 50 loops++; 51 if (loops > 100000) { 52 printk(KERN_ERR 53 "stuck in a loop, start %llu, end %llu, ret %d\n", 54 start, end, ret); 55 break; 56 } 57 } 58 59 return count; 60 } 61 62 #define STATE_FLAG_STR_LEN 256 63 64 #define PRINT_ONE_FLAG(state, dest, cur, name) \ 65 ({ \ 66 if (state->state & EXTENT_##name) \ 67 cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \ 68 "%s" #name, cur == 0 ? "" : "|"); \ 69 }) 70 71 static void extent_flag_to_str(const struct extent_state *state, char *dest) 72 { 73 int cur = 0; 74 75 dest[0] = 0; 76 PRINT_ONE_FLAG(state, dest, cur, DIRTY); 77 PRINT_ONE_FLAG(state, dest, cur, LOCKED); 78 PRINT_ONE_FLAG(state, dest, cur, NEW); 79 PRINT_ONE_FLAG(state, dest, cur, DELALLOC); 80 PRINT_ONE_FLAG(state, dest, cur, DEFRAG); 81 PRINT_ONE_FLAG(state, dest, cur, BOUNDARY); 82 PRINT_ONE_FLAG(state, dest, cur, NODATASUM); 83 PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV); 84 PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT); 85 PRINT_ONE_FLAG(state, dest, cur, NORESERVE); 86 PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED); 87 PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV); 88 } 89 90 static void dump_extent_io_tree(const struct extent_io_tree *tree) 91 { 92 struct rb_node *node; 93 char flags_str[STATE_FLAG_STR_LEN]; 94 95 node = rb_first(&tree->state); 96 test_msg("io tree content:"); 97 while (node) { 98 struct extent_state *state; 99 100 state = rb_entry(node, struct extent_state, rb_node); 101 extent_flag_to_str(state, flags_str); 102 test_msg(" start=%llu len=%llu flags=%s", state->start, 103 state->end + 1 - state->start, flags_str); 104 node = rb_next(node); 105 } 106 } 107 108 static int test_find_delalloc(u32 sectorsize, u32 nodesize) 109 { 110 struct btrfs_fs_info *fs_info; 111 struct btrfs_root *root = NULL; 112 struct inode *inode = NULL; 113 struct extent_io_tree *tmp; 114 struct page *page; 115 struct page *locked_page = NULL; 116 unsigned long index = 0; 117 /* In this test we need at least 2 file extents at its maximum size */ 118 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; 119 u64 total_dirty = 2 * max_bytes; 120 u64 start, end, test_start; 121 bool found; 122 int ret = -EINVAL; 123 124 test_msg("running find delalloc tests"); 125 126 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); 127 if (!fs_info) { 128 test_std_err(TEST_ALLOC_FS_INFO); 129 return -ENOMEM; 130 } 131 132 root = btrfs_alloc_dummy_root(fs_info); 133 if (IS_ERR(root)) { 134 test_std_err(TEST_ALLOC_ROOT); 135 ret = PTR_ERR(root); 136 goto out; 137 } 138 139 inode = btrfs_new_test_inode(); 140 if (!inode) { 141 test_std_err(TEST_ALLOC_INODE); 142 ret = -ENOMEM; 143 goto out; 144 } 145 tmp = &BTRFS_I(inode)->io_tree; 146 BTRFS_I(inode)->root = root; 147 148 /* 149 * Passing NULL as we don't have fs_info but tracepoints are not used 150 * at this point 151 */ 152 btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST); 153 154 /* 155 * First go through and create and mark all of our pages dirty, we pin 156 * everything to make sure our pages don't get evicted and screw up our 157 * test. 158 */ 159 for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { 160 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); 161 if (!page) { 162 test_err("failed to allocate test page"); 163 ret = -ENOMEM; 164 goto out; 165 } 166 SetPageDirty(page); 167 if (index) { 168 unlock_page(page); 169 } else { 170 get_page(page); 171 locked_page = page; 172 } 173 } 174 175 /* Test this scenario 176 * |--- delalloc ---| 177 * |--- search ---| 178 */ 179 btrfs_set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL); 180 start = 0; 181 end = start + PAGE_SIZE - 1; 182 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, 183 &end); 184 if (!found) { 185 test_err("should have found at least one delalloc"); 186 goto out_bits; 187 } 188 if (start != 0 || end != (sectorsize - 1)) { 189 test_err("expected start 0 end %u, got start %llu end %llu", 190 sectorsize - 1, start, end); 191 goto out_bits; 192 } 193 btrfs_unlock_extent(tmp, start, end, NULL); 194 unlock_page(locked_page); 195 put_page(locked_page); 196 197 /* 198 * Test this scenario 199 * 200 * |--- delalloc ---| 201 * |--- search ---| 202 */ 203 test_start = SZ_64M; 204 locked_page = find_lock_page(inode->i_mapping, 205 test_start >> PAGE_SHIFT); 206 if (!locked_page) { 207 test_err("couldn't find the locked page"); 208 goto out_bits; 209 } 210 btrfs_set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL); 211 start = test_start; 212 end = start + PAGE_SIZE - 1; 213 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, 214 &end); 215 if (!found) { 216 test_err("couldn't find delalloc in our range"); 217 goto out_bits; 218 } 219 if (start != test_start || end != max_bytes - 1) { 220 test_err("expected start %llu end %llu, got start %llu, end %llu", 221 test_start, max_bytes - 1, start, end); 222 goto out_bits; 223 } 224 if (process_page_range(inode, start, end, 225 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { 226 test_err("there were unlocked pages in the range"); 227 goto out_bits; 228 } 229 btrfs_unlock_extent(tmp, start, end, NULL); 230 /* locked_page was unlocked above */ 231 put_page(locked_page); 232 233 /* 234 * Test this scenario 235 * |--- delalloc ---| 236 * |--- search ---| 237 */ 238 test_start = max_bytes + sectorsize; 239 locked_page = find_lock_page(inode->i_mapping, test_start >> 240 PAGE_SHIFT); 241 if (!locked_page) { 242 test_err("couldn't find the locked page"); 243 goto out_bits; 244 } 245 start = test_start; 246 end = start + PAGE_SIZE - 1; 247 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, 248 &end); 249 if (found) { 250 test_err("found range when we shouldn't have"); 251 goto out_bits; 252 } 253 if (end != test_start + PAGE_SIZE - 1) { 254 test_err("did not return the proper end offset"); 255 goto out_bits; 256 } 257 258 /* 259 * Test this scenario 260 * [------- delalloc -------| 261 * [max_bytes]|-- search--| 262 * 263 * We are re-using our test_start from above since it works out well. 264 */ 265 btrfs_set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL); 266 start = test_start; 267 end = start + PAGE_SIZE - 1; 268 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, 269 &end); 270 if (!found) { 271 test_err("didn't find our range"); 272 goto out_bits; 273 } 274 if (start != test_start || end != total_dirty - 1) { 275 test_err("expected start %llu end %llu, got start %llu end %llu", 276 test_start, total_dirty - 1, start, end); 277 goto out_bits; 278 } 279 if (process_page_range(inode, start, end, 280 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { 281 test_err("pages in range were not all locked"); 282 goto out_bits; 283 } 284 btrfs_unlock_extent(tmp, start, end, NULL); 285 286 /* 287 * Now to test where we run into a page that is no longer dirty in the 288 * range we want to find. 289 */ 290 page = find_get_page(inode->i_mapping, 291 (max_bytes + SZ_1M) >> PAGE_SHIFT); 292 if (!page) { 293 test_err("couldn't find our page"); 294 goto out_bits; 295 } 296 ClearPageDirty(page); 297 put_page(page); 298 299 /* We unlocked it in the previous test */ 300 lock_page(locked_page); 301 start = test_start; 302 end = start + PAGE_SIZE - 1; 303 /* 304 * Currently if we fail to find dirty pages in the delalloc range we 305 * will adjust max_bytes down to PAGE_SIZE and then re-search. If 306 * this changes at any point in the future we will need to fix this 307 * tests expected behavior. 308 */ 309 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, 310 &end); 311 if (!found) { 312 test_err("didn't find our range"); 313 goto out_bits; 314 } 315 if (start != test_start && end != test_start + PAGE_SIZE - 1) { 316 test_err("expected start %llu end %llu, got start %llu end %llu", 317 test_start, test_start + PAGE_SIZE - 1, start, end); 318 goto out_bits; 319 } 320 if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | 321 PROCESS_UNLOCK)) { 322 test_err("pages in range were not all locked"); 323 goto out_bits; 324 } 325 ret = 0; 326 out_bits: 327 if (ret) 328 dump_extent_io_tree(tmp); 329 btrfs_clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1); 330 out: 331 if (locked_page) 332 put_page(locked_page); 333 process_page_range(inode, 0, total_dirty - 1, 334 PROCESS_UNLOCK | PROCESS_RELEASE); 335 iput(inode); 336 btrfs_free_dummy_root(root); 337 btrfs_free_dummy_fs_info(fs_info); 338 return ret; 339 } 340 341 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb) 342 { 343 unsigned long i; 344 345 for (i = 0; i < eb->len * BITS_PER_BYTE; i++) { 346 int bit, bit1; 347 348 bit = !!test_bit(i, bitmap); 349 bit1 = !!extent_buffer_test_bit(eb, 0, i); 350 if (bit1 != bit) { 351 u8 has; 352 u8 expect; 353 354 read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1); 355 expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE)); 356 357 test_err( 358 "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x", 359 i, i / BITS_PER_BYTE, has, expect); 360 return -EINVAL; 361 } 362 363 bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, 364 i % BITS_PER_BYTE); 365 if (bit1 != bit) { 366 u8 has; 367 u8 expect; 368 369 read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1); 370 expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE)); 371 372 test_err( 373 "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x", 374 i / BITS_PER_BYTE, i % BITS_PER_BYTE, 375 i / BITS_PER_BYTE, has, expect); 376 return -EINVAL; 377 } 378 } 379 return 0; 380 } 381 382 static int test_bitmap_set(const char *name, unsigned long *bitmap, 383 struct extent_buffer *eb, 384 unsigned long byte_start, unsigned long bit_start, 385 unsigned long bit_len) 386 { 387 int ret; 388 389 bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len); 390 extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len); 391 ret = check_eb_bitmap(bitmap, eb); 392 if (ret < 0) 393 test_err("%s test failed", name); 394 return ret; 395 } 396 397 static int test_bitmap_clear(const char *name, unsigned long *bitmap, 398 struct extent_buffer *eb, 399 unsigned long byte_start, unsigned long bit_start, 400 unsigned long bit_len) 401 { 402 int ret; 403 404 bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len); 405 extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len); 406 ret = check_eb_bitmap(bitmap, eb); 407 if (ret < 0) 408 test_err("%s test failed", name); 409 return ret; 410 } 411 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb) 412 { 413 unsigned long i, j; 414 unsigned long byte_len = eb->len; 415 u32 x; 416 int ret; 417 418 ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0, 419 byte_len * BITS_PER_BYTE); 420 if (ret < 0) 421 return ret; 422 423 ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); 424 if (ret < 0) 425 return ret; 426 427 ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0, 428 byte_len * BITS_PER_BYTE); 429 if (ret < 0) 430 return ret; 431 432 ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4); 433 if (ret < 0) 434 return ret; 435 436 ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1); 437 if (ret < 0) 438 return ret; 439 440 ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8); 441 if (ret < 0) 442 return ret; 443 444 ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24); 445 if (ret < 0) 446 return ret; 447 448 ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4); 449 if (ret < 0) 450 return ret; 451 452 ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20); 453 if (ret < 0) 454 return ret; 455 456 /* Straddling pages test */ 457 if (byte_len > PAGE_SIZE) { 458 ret = test_bitmap_set("cross page set", bitmap, eb, 459 PAGE_SIZE - sizeof(long) / 2, 0, 460 sizeof(long) * BITS_PER_BYTE); 461 if (ret < 0) 462 return ret; 463 464 ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0, 465 byte_len * BITS_PER_BYTE); 466 if (ret < 0) 467 return ret; 468 469 ret = test_bitmap_clear("cross page clear", bitmap, eb, 470 PAGE_SIZE - sizeof(long) / 2, 0, 471 sizeof(long) * BITS_PER_BYTE); 472 if (ret < 0) 473 return ret; 474 } 475 476 /* 477 * Generate a wonky pseudo-random bit pattern for the sake of not using 478 * something repetitive that could miss some hypothetical off-by-n bug. 479 */ 480 x = 0; 481 ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0, 482 byte_len * BITS_PER_BYTE); 483 if (ret < 0) 484 return ret; 485 486 for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) { 487 x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; 488 for (j = 0; j < 32; j++) { 489 if (x & (1U << j)) { 490 bitmap_set(bitmap, i * 32 + j, 1); 491 extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1); 492 } 493 } 494 } 495 496 ret = check_eb_bitmap(bitmap, eb); 497 if (ret) { 498 test_err("random bit pattern failed"); 499 return ret; 500 } 501 502 return 0; 503 } 504 505 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) 506 { 507 struct btrfs_fs_info *fs_info; 508 unsigned long *bitmap = NULL; 509 struct extent_buffer *eb = NULL; 510 int ret; 511 512 test_msg("running extent buffer bitmap tests"); 513 514 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); 515 if (!fs_info) { 516 test_std_err(TEST_ALLOC_FS_INFO); 517 return -ENOMEM; 518 } 519 520 bitmap = kmalloc(nodesize, GFP_KERNEL); 521 if (!bitmap) { 522 test_err("couldn't allocate test bitmap"); 523 ret = -ENOMEM; 524 goto out; 525 } 526 527 eb = alloc_dummy_extent_buffer(fs_info, 0); 528 if (!eb) { 529 test_std_err(TEST_ALLOC_ROOT); 530 ret = -ENOMEM; 531 goto out; 532 } 533 534 ret = __test_eb_bitmaps(bitmap, eb); 535 if (ret) 536 goto out; 537 538 free_extent_buffer(eb); 539 540 /* 541 * Test again for case where the tree block is sectorsize aligned but 542 * not nodesize aligned. 543 */ 544 eb = alloc_dummy_extent_buffer(fs_info, sectorsize); 545 if (!eb) { 546 test_std_err(TEST_ALLOC_ROOT); 547 ret = -ENOMEM; 548 goto out; 549 } 550 551 ret = __test_eb_bitmaps(bitmap, eb); 552 out: 553 free_extent_buffer(eb); 554 kfree(bitmap); 555 btrfs_free_dummy_fs_info(fs_info); 556 return ret; 557 } 558 559 static int test_find_first_clear_extent_bit(void) 560 { 561 struct extent_io_tree tree; 562 u64 start, end; 563 int ret = -EINVAL; 564 565 test_msg("running find_first_clear_extent_bit test"); 566 567 btrfs_extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST); 568 569 /* Test correct handling of empty tree */ 570 btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); 571 if (start != 0 || end != -1) { 572 test_err( 573 "error getting a range from completely empty tree: start %llu end %llu", 574 start, end); 575 goto out; 576 } 577 /* 578 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between 579 * 4M-32M 580 */ 581 btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1, 582 CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); 583 584 btrfs_find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, 585 CHUNK_TRIMMED | CHUNK_ALLOCATED); 586 587 if (start != 0 || end != SZ_1M - 1) { 588 test_err("error finding beginning range: start %llu end %llu", 589 start, end); 590 goto out; 591 } 592 593 /* Now add 32M-64M so that we have a hole between 4M-32M */ 594 btrfs_set_extent_bit(&tree, SZ_32M, SZ_64M - 1, 595 CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); 596 597 /* 598 * Request first hole starting at 12M, we should get 4M-32M 599 */ 600 btrfs_find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, 601 CHUNK_TRIMMED | CHUNK_ALLOCATED); 602 603 if (start != SZ_4M || end != SZ_32M - 1) { 604 test_err("error finding trimmed range: start %llu end %llu", 605 start, end); 606 goto out; 607 } 608 609 /* 610 * Search in the middle of allocated range, should get the next one 611 * available, which happens to be unallocated -> 4M-32M 612 */ 613 btrfs_find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, 614 CHUNK_TRIMMED | CHUNK_ALLOCATED); 615 616 if (start != SZ_4M || end != SZ_32M - 1) { 617 test_err("error finding next unalloc range: start %llu end %llu", 618 start, end); 619 goto out; 620 } 621 622 /* 623 * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag 624 * being unset in this range, we should get the entry in range 64M-72M 625 */ 626 btrfs_set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); 627 btrfs_find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, 628 CHUNK_TRIMMED); 629 630 if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { 631 test_err("error finding exact range: start %llu end %llu", 632 start, end); 633 goto out; 634 } 635 636 btrfs_find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, 637 CHUNK_TRIMMED); 638 639 /* 640 * Search in the middle of set range whose immediate neighbour doesn't 641 * have the bits set so it must be returned 642 */ 643 if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { 644 test_err("error finding next alloc range: start %llu end %llu", 645 start, end); 646 goto out; 647 } 648 649 /* 650 * Search beyond any known range, shall return after last known range 651 * and end should be -1 652 */ 653 btrfs_find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); 654 if (start != SZ_64M + SZ_8M || end != -1) { 655 test_err( 656 "error handling beyond end of range search: start %llu end %llu", 657 start, end); 658 goto out; 659 } 660 661 ret = 0; 662 out: 663 if (ret) 664 dump_extent_io_tree(&tree); 665 btrfs_clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); 666 667 return ret; 668 } 669 670 static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory, 671 const char *test_name) 672 { 673 for (int i = 0; i < eb->len; i++) { 674 struct page *page = folio_page(eb->folios[i >> PAGE_SHIFT], 0); 675 void *addr = page_address(page) + offset_in_page(i); 676 677 if (memcmp(addr, memory + i, 1) != 0) { 678 test_err("%s failed", test_name); 679 test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x", 680 i, *(u8 *)addr, *(u8 *)(memory + i)); 681 return; 682 } 683 } 684 } 685 686 static int verify_eb_and_memory(struct extent_buffer *eb, void *memory, 687 const char *test_name) 688 { 689 for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) { 690 void *eb_addr = folio_address(eb->folios[i]); 691 692 if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) { 693 dump_eb_and_memory_contents(eb, memory, test_name); 694 return -EUCLEAN; 695 } 696 } 697 return 0; 698 } 699 700 /* 701 * Init both memory and extent buffer contents to the same randomly generated 702 * contents. 703 */ 704 static void init_eb_and_memory(struct extent_buffer *eb, void *memory) 705 { 706 get_random_bytes(memory, eb->len); 707 write_extent_buffer(eb, memory, 0, eb->len); 708 } 709 710 static int test_eb_mem_ops(u32 sectorsize, u32 nodesize) 711 { 712 struct btrfs_fs_info *fs_info; 713 struct extent_buffer *eb = NULL; 714 void *memory = NULL; 715 int ret; 716 717 test_msg("running extent buffer memory operation tests"); 718 719 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); 720 if (!fs_info) { 721 test_std_err(TEST_ALLOC_FS_INFO); 722 return -ENOMEM; 723 } 724 725 memory = kvzalloc(nodesize, GFP_KERNEL); 726 if (!memory) { 727 test_err("failed to allocate memory"); 728 ret = -ENOMEM; 729 goto out; 730 } 731 732 eb = alloc_dummy_extent_buffer(fs_info, SZ_1M); 733 if (!eb) { 734 test_std_err(TEST_ALLOC_EXTENT_BUFFER); 735 ret = -ENOMEM; 736 goto out; 737 } 738 739 init_eb_and_memory(eb, memory); 740 ret = verify_eb_and_memory(eb, memory, "full eb write"); 741 if (ret < 0) 742 goto out; 743 744 memcpy(memory, memory + 16, 16); 745 memcpy_extent_buffer(eb, 0, 16, 16); 746 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1"); 747 if (ret < 0) 748 goto out; 749 750 memcpy(memory, memory + 2048, 16); 751 memcpy_extent_buffer(eb, 0, 2048, 16); 752 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2"); 753 if (ret < 0) 754 goto out; 755 memcpy(memory, memory + 2048, 2048); 756 memcpy_extent_buffer(eb, 0, 2048, 2048); 757 ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3"); 758 if (ret < 0) 759 goto out; 760 761 memmove(memory + 512, memory + 256, 512); 762 memmove_extent_buffer(eb, 512, 256, 512); 763 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1"); 764 if (ret < 0) 765 goto out; 766 767 memmove(memory + 2048, memory + 512, 2048); 768 memmove_extent_buffer(eb, 2048, 512, 2048); 769 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2"); 770 if (ret < 0) 771 goto out; 772 memmove(memory + 512, memory + 2048, 2048); 773 memmove_extent_buffer(eb, 512, 2048, 2048); 774 ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3"); 775 if (ret < 0) 776 goto out; 777 778 if (nodesize > PAGE_SIZE) { 779 memcpy(memory, memory + 4096 - 128, 256); 780 memcpy_extent_buffer(eb, 0, 4096 - 128, 256); 781 ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1"); 782 if (ret < 0) 783 goto out; 784 785 memcpy(memory + 4096 - 128, memory + 4096 + 128, 256); 786 memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256); 787 ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2"); 788 if (ret < 0) 789 goto out; 790 791 memmove(memory + 4096 - 128, memory + 4096 - 64, 256); 792 memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256); 793 ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1"); 794 if (ret < 0) 795 goto out; 796 797 memmove(memory + 4096 - 64, memory + 4096 - 128, 256); 798 memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256); 799 ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2"); 800 if (ret < 0) 801 goto out; 802 } 803 out: 804 free_extent_buffer(eb); 805 kvfree(memory); 806 btrfs_free_dummy_fs_info(fs_info); 807 return ret; 808 } 809 810 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) 811 { 812 int ret; 813 814 test_msg("running extent I/O tests"); 815 816 ret = test_find_delalloc(sectorsize, nodesize); 817 if (ret) 818 goto out; 819 820 ret = test_find_first_clear_extent_bit(); 821 if (ret) 822 goto out; 823 824 ret = test_eb_bitmaps(sectorsize, nodesize); 825 if (ret) 826 goto out; 827 828 ret = test_eb_mem_ops(sectorsize, nodesize); 829 out: 830 return ret; 831 } 832