Lines Matching +full:scrubber +full:- +full:done
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
18 #include "disk-io.h"
21 #include "async-thread.h"
22 #include "file-item.h"
51 bioc->logical, bioc->full_stripe_logical, bioc->size, in dump_bioc()
52 bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes, in dump_bioc()
53 bioc->replace_stripe_src, bioc->num_stripes); in dump_bioc()
54 for (int i = 0; i < bioc->num_stripes; i++) { in dump_bioc()
56 i, bioc->stripes[i].dev->devid, in dump_bioc()
57 bioc->stripes[i].physical); in dump_bioc()
67 dump_bioc(fs_info, rbio->bioc); in btrfs_dump_rbio()
70 rbio->flags, rbio->nr_sectors, rbio->nr_data, in btrfs_dump_rbio()
71 rbio->real_stripes, rbio->stripe_nsectors, in btrfs_dump_rbio()
72 rbio->scrubp, rbio->dbitmap); in btrfs_dump_rbio()
78 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
79 (rbio)->bioc->fs_info : NULL; \
89 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
90 (rbio)->bioc->fs_info : NULL; \
101 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
102 (rbio)->bioc->fs_info : NULL; \
113 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
114 (rbio)->bioc->fs_info : NULL; \
157 bitmap_free(rbio->error_bitmap); in free_raid_bio_pointers()
158 kfree(rbio->stripe_pages); in free_raid_bio_pointers()
159 kfree(rbio->bio_sectors); in free_raid_bio_pointers()
160 kfree(rbio->stripe_sectors); in free_raid_bio_pointers()
161 kfree(rbio->finish_pointers); in free_raid_bio_pointers()
168 if (!refcount_dec_and_test(&rbio->refs)) in free_raid_bio()
171 WARN_ON(!list_empty(&rbio->stripe_cache)); in free_raid_bio()
172 WARN_ON(!list_empty(&rbio->hash_list)); in free_raid_bio()
173 WARN_ON(!bio_list_empty(&rbio->bio_list)); in free_raid_bio()
175 for (i = 0; i < rbio->nr_pages; i++) { in free_raid_bio()
176 if (rbio->stripe_pages[i]) { in free_raid_bio()
177 __free_page(rbio->stripe_pages[i]); in free_raid_bio()
178 rbio->stripe_pages[i] = NULL; in free_raid_bio()
182 btrfs_put_bioc(rbio->bioc); in free_raid_bio()
189 INIT_WORK(&rbio->work, work_func); in start_async_work()
190 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
206 if (info->stripe_hash_table) in btrfs_alloc_stripe_hash_table()
218 return -ENOMEM; in btrfs_alloc_stripe_hash_table()
220 spin_lock_init(&table->cache_lock); in btrfs_alloc_stripe_hash_table()
221 INIT_LIST_HEAD(&table->stripe_cache); in btrfs_alloc_stripe_hash_table()
223 h = table->table; in btrfs_alloc_stripe_hash_table()
227 INIT_LIST_HEAD(&cur->hash_list); in btrfs_alloc_stripe_hash_table()
228 spin_lock_init(&cur->lock); in btrfs_alloc_stripe_hash_table()
231 x = cmpxchg(&info->stripe_hash_table, NULL, table); in btrfs_alloc_stripe_hash_table()
242 * once the caching is done, we set the cache ready
254 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
256 if (!rbio->bio_sectors[i].page) { in cache_rbio_pages()
262 if (i < rbio->nr_data * rbio->stripe_nsectors) in cache_rbio_pages()
263 ASSERT(rbio->stripe_sectors[i].uptodate); in cache_rbio_pages()
267 ASSERT(rbio->stripe_sectors[i].page); in cache_rbio_pages()
268 memcpy_page(rbio->stripe_sectors[i].page, in cache_rbio_pages()
269 rbio->stripe_sectors[i].pgoff, in cache_rbio_pages()
270 rbio->bio_sectors[i].page, in cache_rbio_pages()
271 rbio->bio_sectors[i].pgoff, in cache_rbio_pages()
272 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
273 rbio->stripe_sectors[i].uptodate = 1; in cache_rbio_pages()
275 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
283 u64 num = rbio->bioc->full_stripe_logical; in rbio_bucket()
299 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate()
303 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
308 if (!rbio->stripe_sectors[i].uptodate) in full_page_sectors_uptodate()
321 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors()
325 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
328 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
329 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; in index_stripe_sectors()
330 rbio->stripe_sectors[i].pgoff = offset_in_page(offset); in index_stripe_sectors()
337 const u32 sectorsize = src->bioc->fs_info->sectorsize; in steal_rbio_page()
341 if (dest->stripe_pages[page_nr]) in steal_rbio_page()
342 __free_page(dest->stripe_pages[page_nr]); in steal_rbio_page()
343 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; in steal_rbio_page()
344 src->stripe_pages[page_nr] = NULL; in steal_rbio_page()
346 /* Also update the sector->uptodate bits. */ in steal_rbio_page()
349 dest->stripe_sectors[i].uptodate = true; in steal_rbio_page()
355 rbio->bioc->fs_info->sectorsize_bits; in is_data_stripe_page()
364 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); in is_data_stripe_page()
378 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) in steal_rbio()
381 for (i = 0; i < dest->nr_pages; i++) { in steal_rbio()
382 struct page *p = src->stripe_pages[i]; in steal_rbio()
408 * must be called with dest->rbio_list_lock held
413 bio_list_merge_init(&dest->bio_list, &victim->bio_list); in merge_rbio()
414 dest->bio_list_bytes += victim->bio_list_bytes; in merge_rbio()
416 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, in merge_rbio()
417 dest->stripe_nsectors); in merge_rbio()
434 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
437 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
438 h = table->table + bucket; in __remove_rbio_from_cache()
443 spin_lock(&h->lock); in __remove_rbio_from_cache()
449 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
451 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
452 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
453 table->cache_size -= 1; in __remove_rbio_from_cache()
465 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
466 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
467 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
468 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
469 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
474 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
475 spin_unlock(&h->lock); in __remove_rbio_from_cache()
488 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
491 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
493 spin_lock(&table->cache_lock); in remove_rbio_from_cache()
495 spin_unlock(&table->cache_lock); in remove_rbio_from_cache()
506 table = info->stripe_hash_table; in btrfs_clear_rbio_cache()
508 spin_lock(&table->cache_lock); in btrfs_clear_rbio_cache()
509 while (!list_empty(&table->stripe_cache)) { in btrfs_clear_rbio_cache()
510 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
515 spin_unlock(&table->cache_lock); in btrfs_clear_rbio_cache()
524 if (!info->stripe_hash_table) in btrfs_free_stripe_hash_table()
527 kvfree(info->stripe_hash_table); in btrfs_free_stripe_hash_table()
528 info->stripe_hash_table = NULL; in btrfs_free_stripe_hash_table()
546 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
549 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
551 spin_lock(&table->cache_lock); in cache_rbio()
552 spin_lock(&rbio->bio_list_lock); in cache_rbio()
555 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
556 refcount_inc(&rbio->refs); in cache_rbio()
558 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
559 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
561 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
562 table->cache_size += 1; in cache_rbio()
565 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
567 if (table->cache_size > RBIO_CACHE_SIZE) { in cache_rbio()
570 found = list_entry(table->stripe_cache.prev, in cache_rbio()
578 spin_unlock(&table->cache_lock); in cache_rbio()
596 src_cnt -= xor_src_cnt; in run_xor()
607 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
610 spin_lock(&rbio->bio_list_lock); in rbio_is_full()
611 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
613 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
614 spin_unlock(&rbio->bio_list_lock); in rbio_is_full()
632 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || in rbio_can_merge()
633 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) in rbio_can_merge()
643 if (test_bit(RBIO_CACHE_BIT, &last->flags) || in rbio_can_merge()
644 test_bit(RBIO_CACHE_BIT, &cur->flags)) in rbio_can_merge()
647 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical) in rbio_can_merge()
651 if (last->operation != cur->operation) in rbio_can_merge()
661 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) in rbio_can_merge()
664 if (last->operation == BTRFS_RBIO_READ_REBUILD) in rbio_can_merge()
674 ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr); in rbio_stripe_sector_index()
675 ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr); in rbio_stripe_sector_index()
677 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
680 /* Return a sector from rbio->stripe_sectors, not from the bio list */
685 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, in rbio_stripe_sector()
693 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
700 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_sector()
702 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); in rbio_qstripe_sector()
736 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
738 spin_lock(&h->lock); in lock_stripe_add()
739 list_for_each_entry(cur, &h->hash_list, hash_list) { in lock_stripe_add()
740 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) in lock_stripe_add()
743 spin_lock(&cur->bio_list_lock); in lock_stripe_add()
746 if (bio_list_empty(&cur->bio_list) && in lock_stripe_add()
747 list_empty(&cur->plug_list) && in lock_stripe_add()
748 test_bit(RBIO_CACHE_BIT, &cur->flags) && in lock_stripe_add()
749 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { in lock_stripe_add()
750 list_del_init(&cur->hash_list); in lock_stripe_add()
751 refcount_dec(&cur->refs); in lock_stripe_add()
755 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
763 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
775 list_for_each_entry(pending, &cur->plug_list, plug_list) { in lock_stripe_add()
778 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
789 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
790 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
795 refcount_inc(&rbio->refs); in lock_stripe_add()
796 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
798 spin_unlock(&h->lock); in lock_stripe_add()
819 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
821 if (list_empty(&rbio->plug_list)) in unlock_stripe()
824 spin_lock(&h->lock); in unlock_stripe()
825 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
827 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
833 if (list_empty(&rbio->plug_list) && in unlock_stripe()
834 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
836 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
837 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
838 goto done; in unlock_stripe()
841 list_del_init(&rbio->hash_list); in unlock_stripe()
842 refcount_dec(&rbio->refs); in unlock_stripe()
849 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
851 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
856 list_del_init(&rbio->plug_list); in unlock_stripe()
858 list_add(&next->hash_list, &h->hash_list); in unlock_stripe()
859 refcount_inc(&next->refs); in unlock_stripe()
860 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
861 spin_unlock(&h->lock); in unlock_stripe()
863 if (next->operation == BTRFS_RBIO_READ_REBUILD) { in unlock_stripe()
865 } else if (next->operation == BTRFS_RBIO_WRITE) { in unlock_stripe()
868 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { in unlock_stripe()
876 done: in unlock_stripe()
877 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
878 spin_unlock(&h->lock); in unlock_stripe()
890 next = cur->bi_next; in rbio_endio_bio_list()
891 cur->bi_next = NULL; in rbio_endio_bio_list()
892 cur->bi_status = err; in rbio_endio_bio_list()
904 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
907 kfree(rbio->csum_buf); in rbio_orig_end_io()
908 bitmap_free(rbio->csum_bitmap); in rbio_orig_end_io()
909 rbio->csum_buf = NULL; in rbio_orig_end_io()
910 rbio->csum_bitmap = NULL; in rbio_orig_end_io()
917 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
920 * At this moment, rbio->bio_list is empty, however since rbio does not in rbio_orig_end_io()
922 * hash list, rbio may be merged with others so that rbio->bio_list in rbio_orig_end_io()
923 * becomes non-empty. in rbio_orig_end_io()
924 * Once unlock_stripe() is done, rbio->bio_list will not be updated any in rbio_orig_end_io()
928 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
955 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes, in sector_in_rbio()
957 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, in sector_in_rbio()
960 index = stripe_nr * rbio->stripe_nsectors + sector_nr; in sector_in_rbio()
961 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
963 spin_lock(&rbio->bio_list_lock); in sector_in_rbio()
964 sector = &rbio->bio_sectors[index]; in sector_in_rbio()
965 if (sector->page || bio_list_only) { in sector_in_rbio()
967 if (!sector->page) in sector_in_rbio()
969 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
972 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
974 return &rbio->stripe_sectors[index]; in sector_in_rbio()
979 * this does not allocate any pages for rbio->pages.
984 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes; in alloc_rbio()
988 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in alloc_rbio()
993 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); in alloc_rbio()
1009 return ERR_PTR(-ENOMEM); in alloc_rbio()
1010 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *), in alloc_rbio()
1012 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
1014 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
1016 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); in alloc_rbio()
1017 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); in alloc_rbio()
1019 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || in alloc_rbio()
1020 !rbio->finish_pointers || !rbio->error_bitmap) { in alloc_rbio()
1023 return ERR_PTR(-ENOMEM); in alloc_rbio()
1026 bio_list_init(&rbio->bio_list); in alloc_rbio()
1027 init_waitqueue_head(&rbio->io_wait); in alloc_rbio()
1028 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
1029 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
1030 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
1031 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
1033 rbio->bioc = bioc; in alloc_rbio()
1034 rbio->nr_pages = num_pages; in alloc_rbio()
1035 rbio->nr_sectors = num_sectors; in alloc_rbio()
1036 rbio->real_stripes = real_stripes; in alloc_rbio()
1037 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1038 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
1039 refcount_set(&rbio->refs, 1); in alloc_rbio()
1040 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1042 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); in alloc_rbio()
1043 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
1044 ASSERT(rbio->nr_data > 0); in alloc_rbio()
1054 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false); in alloc_rbio_pages()
1065 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
1068 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
1069 rbio->stripe_pages + data_pages, false); in alloc_rbio_parity_pages()
1095 *faila = -1; in get_rbio_veritical_errors()
1096 *failb = -1; in get_rbio_veritical_errors()
1099 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in get_rbio_veritical_errors()
1100 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; in get_rbio_veritical_errors()
1102 if (test_bit(total_sector_nr, rbio->error_bitmap)) { in get_rbio_veritical_errors()
1129 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector()
1130 struct bio *last = bio_list->tail; in rbio_add_io_sector()
1138 * thus it can be larger than rbio->real_stripe. in rbio_add_io_sector()
1139 * So here we check against bioc->num_stripes, not rbio->real_stripes. in rbio_add_io_sector()
1141 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes, in rbio_add_io_sector()
1143 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, in rbio_add_io_sector()
1145 ASSERT(sector->page); in rbio_add_io_sector()
1147 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_sector()
1148 disk_start = stripe->physical + sector_nr * sectorsize; in rbio_add_io_sector()
1151 if (!stripe->dev->bdev) { in rbio_add_io_sector()
1154 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, in rbio_add_io_sector()
1155 rbio->error_bitmap); in rbio_add_io_sector()
1160 if (found_errors > rbio->bioc->max_errors) in rbio_add_io_sector()
1161 return -EIO; in rbio_add_io_sector()
1167 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_io_sector()
1168 last_end += last->bi_iter.bi_size; in rbio_add_io_sector()
1174 if (last_end == disk_start && !last->bi_status && in rbio_add_io_sector()
1175 last->bi_bdev == stripe->dev->bdev) { in rbio_add_io_sector()
1176 ret = bio_add_page(last, sector->page, sectorsize, in rbio_add_io_sector()
1177 sector->pgoff); in rbio_add_io_sector()
1184 bio = bio_alloc(stripe->dev->bdev, in rbio_add_io_sector()
1187 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; in rbio_add_io_sector()
1188 bio->bi_private = rbio; in rbio_add_io_sector()
1190 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff); in rbio_add_io_sector()
1197 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_one_bio()
1200 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in index_one_bio()
1201 rbio->bioc->full_stripe_logical; in index_one_bio()
1209 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio()
1211 sector->page = bvec.bv_page; in index_one_bio()
1212 sector->pgoff = bvec.bv_offset + bvec_offset; in index_one_bio()
1213 ASSERT(sector->pgoff < PAGE_SIZE); in index_one_bio()
1230 spin_lock(&rbio->bio_list_lock); in index_rbio_pages()
1231 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1234 spin_unlock(&rbio->bio_list_lock); in index_rbio_pages()
1240 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1245 /* We rely on bio->bi_bdev to find the stripe number. */ in bio_get_trace_info()
1246 if (!bio->bi_bdev) in bio_get_trace_info()
1249 for (i = 0; i < bioc->num_stripes; i++) { in bio_get_trace_info()
1250 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) in bio_get_trace_info()
1252 trace_info->stripe_nr = i; in bio_get_trace_info()
1253 trace_info->devid = bioc->stripes[i].dev->devid; in bio_get_trace_info()
1254 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in bio_get_trace_info()
1255 bioc->stripes[i].physical; in bio_get_trace_info()
1260 trace_info->devid = -1; in bio_get_trace_info()
1261 trace_info->offset = -1; in bio_get_trace_info()
1262 trace_info->stripe_nr = -1; in bio_get_trace_info()
1282 ASSERT_RBIO(rbio->real_stripes >= 2, rbio); in assert_rbio()
1283 ASSERT_RBIO(rbio->nr_data > 0, rbio); in assert_rbio()
1289 ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio); in assert_rbio()
1295 void **pointers = rbio->finish_pointers; in generate_pq_vertical()
1296 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in generate_pq_vertical()
1299 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; in generate_pq_vertical()
1302 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in generate_pq_vertical()
1304 pointers[stripe] = kmap_local_page(sector->page) + in generate_pq_vertical()
1305 sector->pgoff; in generate_pq_vertical()
1310 sector->uptodate = 1; in generate_pq_vertical()
1311 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; in generate_pq_vertical()
1319 sector->uptodate = 1; in generate_pq_vertical()
1320 pointers[stripe++] = kmap_local_page(sector->page) + in generate_pq_vertical()
1321 sector->pgoff; in generate_pq_vertical()
1324 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in generate_pq_vertical()
1328 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize); in generate_pq_vertical()
1329 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize); in generate_pq_vertical()
1331 for (stripe = stripe - 1; stripe >= 0; stripe--) in generate_pq_vertical()
1347 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in rmw_assemble_write_bios()
1353 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios()
1359 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1363 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1364 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1367 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1370 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1384 if (likely(!rbio->bioc->replace_nr_stripes)) in rmw_assemble_write_bios()
1392 ASSERT(rbio->bioc->replace_stripe_src >= 0); in rmw_assemble_write_bios()
1394 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1398 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1399 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1406 if (stripe != rbio->bioc->replace_stripe_src) { in rmw_assemble_write_bios()
1412 total_sector_nr += rbio->stripe_nsectors - 1; in rmw_assemble_write_bios()
1417 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1420 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1429 rbio->real_stripes, in rmw_assemble_write_bios()
1438 return -EIO; in rmw_assemble_write_bios()
1443 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in set_rbio_range_error()
1444 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in set_rbio_range_error()
1445 rbio->bioc->full_stripe_logical; in set_rbio_range_error()
1446 int total_nr_sector = offset >> fs_info->sectorsize_bits; in set_rbio_range_error()
1448 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); in set_rbio_range_error()
1450 bitmap_set(rbio->error_bitmap, total_nr_sector, in set_rbio_range_error()
1451 bio->bi_iter.bi_size >> fs_info->sectorsize_bits); in set_rbio_range_error()
1459 if (bio->bi_iter.bi_size == 0) { in set_rbio_range_error()
1463 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in set_rbio_range_error()
1464 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { in set_rbio_range_error()
1466 bitmap_set(rbio->error_bitmap, in set_rbio_range_error()
1467 stripe_nr * rbio->stripe_nsectors, in set_rbio_range_error()
1468 rbio->stripe_nsectors); in set_rbio_range_error()
1476 * For subpage case, we can no longer set page Up-to-date directly for
1485 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1486 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in find_stripe_sector()
1488 if (sector->page == page && sector->pgoff == pgoff) in find_stripe_sector()
1500 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1510 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; in set_bio_pages_uptodate()
1512 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); in set_bio_pages_uptodate()
1515 sector->uptodate = 1; in set_bio_pages_uptodate()
1525 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr()
1528 sector = &rbio->stripe_sectors[i]; in get_bio_sector_nr()
1529 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr()
1531 sector = &rbio->bio_sectors[i]; in get_bio_sector_nr()
1532 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr()
1535 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr()
1547 bio_size += bvec->bv_len; in rbio_update_error_bitmap()
1556 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) in rbio_update_error_bitmap()
1557 set_bit(i, rbio->error_bitmap); in rbio_update_error_bitmap()
1564 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_bio_data_sectors()
1570 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_bio_data_sectors()
1574 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) in verify_bio_data_sectors()
1580 for (bv_offset = bvec->bv_offset; in verify_bio_data_sectors()
1581 bv_offset < bvec->bv_offset + bvec->bv_len; in verify_bio_data_sectors()
1582 bv_offset += fs_info->sectorsize, total_sector_nr++) { in verify_bio_data_sectors()
1584 u8 *expected_csum = rbio->csum_buf + in verify_bio_data_sectors()
1585 total_sector_nr * fs_info->csum_size; in verify_bio_data_sectors()
1589 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) in verify_bio_data_sectors()
1592 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page, in verify_bio_data_sectors()
1595 set_bit(total_sector_nr, rbio->error_bitmap); in verify_bio_data_sectors()
1602 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_read_end_io()
1604 if (bio->bi_status) { in raid_wait_read_end_io()
1612 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_read_end_io()
1613 wake_up(&rbio->io_wait); in raid_wait_read_end_io()
1621 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_read_wait_bio_list()
1623 bio->bi_end_io = raid_wait_read_end_io; in submit_read_wait_bio_list()
1634 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in submit_read_wait_bio_list()
1639 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_data_pages()
1642 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false); in alloc_rbio_data_pages()
1673 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1674 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1677 return -1; in plug_cmp()
1689 list_sort(NULL, &plug->rbio_list, plug_cmp); in raid_unplug()
1691 while (!list_empty(&plug->rbio_list)) { in raid_unplug()
1692 cur = list_entry(plug->rbio_list.next, in raid_unplug()
1694 list_del_init(&cur->plug_list); in raid_unplug()
1716 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1719 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1720 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_bio()
1721 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; in rbio_add_bio()
1722 const u32 orig_len = orig_bio->bi_iter.bi_size; in rbio_add_bio()
1723 const u32 sectorsize = fs_info->sectorsize; in rbio_add_bio()
1728 rbio->nr_data * BTRFS_STRIPE_LEN, in rbio_add_bio()
1731 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1732 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1737 int bit = ((u32)(cur_logical - full_stripe_start) >> in rbio_add_bio()
1738 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1740 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1749 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_write()
1756 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_write()
1760 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1771 if (!plug->info) { in raid56_parity_write()
1772 plug->info = fs_info; in raid56_parity_write()
1773 INIT_LIST_HEAD(&plug->rbio_list); in raid56_parity_write()
1775 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1790 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_one_sector()
1796 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_one_sector()
1800 if (stripe_nr >= rbio->nr_data) in verify_one_sector()
1806 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in verify_one_sector()
1812 ASSERT(sector->page); in verify_one_sector()
1814 csum_expected = rbio->csum_buf + in verify_one_sector()
1815 (stripe_nr * rbio->stripe_nsectors + sector_nr) * in verify_one_sector()
1816 fs_info->csum_size; in verify_one_sector()
1817 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff, in verify_one_sector()
1824 * @*pointers are the pre-allocated pointers by the caller, so we don't
1830 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in recover_vertical()
1832 const u32 sectorsize = fs_info->sectorsize; in recover_vertical()
1843 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in recover_vertical()
1844 !test_bit(sector_nr, &rbio->dbitmap)) in recover_vertical()
1856 if (found_errors > rbio->bioc->max_errors) in recover_vertical()
1857 return -EIO; in recover_vertical()
1865 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in recover_vertical()
1870 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_vertical()
1875 ASSERT(sector->page); in recover_vertical()
1876 pointers[stripe_nr] = kmap_local_page(sector->page) + in recover_vertical()
1877 sector->pgoff; in recover_vertical()
1882 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in recover_vertical()
1885 if (faila == rbio->nr_data) in recover_vertical()
1907 if (failb == rbio->real_stripes - 1) { in recover_vertical()
1908 if (faila == rbio->real_stripes - 2) in recover_vertical()
1922 if (failb == rbio->real_stripes - 2) { in recover_vertical()
1923 raid6_datap_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1926 raid6_2data_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1933 ASSERT(failb == -1); in recover_vertical()
1936 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); in recover_vertical()
1940 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; in recover_vertical()
1943 pointers[rbio->nr_data - 1] = p; in recover_vertical()
1946 run_xor(pointers, rbio->nr_data - 1, sectorsize); in recover_vertical()
1966 sector->uptodate = 1; in recover_vertical()
1974 sector->uptodate = 1; in recover_vertical()
1978 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) in recover_vertical()
1996 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1997 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1999 ret = -ENOMEM; in recover_sectors()
2003 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_sectors()
2004 spin_lock(&rbio->bio_list_lock); in recover_sectors()
2005 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in recover_sectors()
2006 spin_unlock(&rbio->bio_list_lock); in recover_sectors()
2011 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in recover_sectors()
2033 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); in recover_rbio()
2048 * So here we always re-read everything in recovery path. in recover_rbio()
2050 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in recover_rbio()
2052 int stripe = total_sector_nr / rbio->stripe_nsectors; in recover_rbio()
2053 int sectornr = total_sector_nr % rbio->stripe_nsectors; in recover_rbio()
2061 if (!rbio->bioc->stripes[stripe].dev->bdev || in recover_rbio()
2062 test_bit(total_sector_nr, rbio->error_bitmap)) { in recover_rbio()
2067 set_bit(total_sector_nr, rbio->error_bitmap); in recover_rbio()
2112 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in set_rbio_raid6_extra_error()
2131 failb = rbio->real_stripes - (mirror_num - 1); in set_rbio_raid6_extra_error()
2133 failb--; in set_rbio_raid6_extra_error()
2137 set_bit(failb * rbio->stripe_nsectors + sector_nr, in set_rbio_raid6_extra_error()
2138 rbio->error_bitmap); in set_rbio_raid6_extra_error()
2154 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_recover()
2159 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2164 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2182 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in fill_data_csums()
2184 rbio->bioc->full_stripe_logical); in fill_data_csums()
2185 const u64 start = rbio->bioc->full_stripe_logical; in fill_data_csums()
2186 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << in fill_data_csums()
2187 fs_info->sectorsize_bits; in fill_data_csums()
2191 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); in fill_data_csums()
2196 * - The rbio doesn't belong to data block groups in fill_data_csums()
2199 * - The rbio belongs to mixed block groups in fill_data_csums()
2204 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || in fill_data_csums()
2205 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) in fill_data_csums()
2208 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * in fill_data_csums()
2209 fs_info->csum_size, GFP_NOFS); in fill_data_csums()
2210 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, in fill_data_csums()
2212 if (!rbio->csum_buf || !rbio->csum_bitmap) { in fill_data_csums()
2213 ret = -ENOMEM; in fill_data_csums()
2217 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1, in fill_data_csums()
2218 rbio->csum_buf, rbio->csum_bitmap); in fill_data_csums()
2221 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) in fill_data_csums()
2229 * longer safe for this particular sub-stripe write. in fill_data_csums()
2232 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d", in fill_data_csums()
2233 rbio->bioc->full_stripe_logical, ret); in fill_data_csums()
2235 kfree(rbio->csum_buf); in fill_data_csums()
2236 bitmap_free(rbio->csum_bitmap); in fill_data_csums()
2237 rbio->csum_buf = NULL; in fill_data_csums()
2238 rbio->csum_bitmap = NULL; in fill_data_csums()
2259 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_read_wait_recover()
2262 int stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_read_wait_recover()
2263 int sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_read_wait_recover()
2284 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_write_end_io()
2285 blk_status_t err = bio->bi_status; in raid_wait_write_end_io()
2290 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_write_end_io()
2291 wake_up(&rbio->io_wait); in raid_wait_write_end_io()
2299 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_write_bios()
2301 bio->bi_end_io = raid_wait_write_end_io; in submit_write_bios()
2321 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { in need_read_stripe_sectors()
2322 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in need_read_stripe_sectors()
2329 if (!sector->page || !sector->uptodate) in need_read_stripe_sectors()
2343 * needed for both full-stripe and sub-stripe writes. in rmw_rbio()
2355 * Now we're doing sub-stripe write, also need all data stripes in rmw_rbio()
2374 spin_lock(&rbio->bio_list_lock); in rmw_rbio()
2375 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in rmw_rbio()
2376 spin_unlock(&rbio->bio_list_lock); in rmw_rbio()
2378 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_rbio()
2391 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in rmw_rbio()
2393 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) in rmw_rbio()
2404 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in rmw_rbio()
2407 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in rmw_rbio()
2411 if (found_errors > rbio->bioc->max_errors) { in rmw_rbio()
2412 ret = -EIO; in rmw_rbio()
2449 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_alloc_scrub_rbio()
2456 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2461 ASSERT(!bio->bi_iter.bi_size); in raid56_parity_alloc_scrub_rbio()
2462 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2469 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2470 if (bioc->stripes[i].dev == scrub_dev) { in raid56_parity_alloc_scrub_rbio()
2471 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2475 ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i); in raid56_parity_alloc_scrub_rbio()
2477 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2487 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in alloc_rbio_essential_pages()
2490 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2493 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2496 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2498 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2502 return -ENOMEM; in alloc_rbio_essential_pages()
2503 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2511 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2512 const u32 sectorsize = bioc->fs_info->sectorsize; in finish_parity_scrub()
2513 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2514 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2515 int nr_data = rbio->nr_data; in finish_parity_scrub()
2527 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2529 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2538 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { in finish_parity_scrub()
2540 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2544 * Because the higher layers(scrubber) are unlikely to in finish_parity_scrub()
2548 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2552 return -ENOMEM; in finish_parity_scrub()
2562 return -ENOMEM; in finish_parity_scrub()
2566 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); in finish_parity_scrub()
2569 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in finish_parity_scrub()
2574 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2581 pointers[stripe] = kmap_local_page(sector->page) + in finish_parity_scrub()
2582 sector->pgoff; in finish_parity_scrub()
2588 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_parity_scrub()
2593 run_xor(pointers + 1, nr_data - 1, sectorsize); in finish_parity_scrub()
2597 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2598 parity = kmap_local_page(sector->page) + sector->pgoff; in finish_parity_scrub()
2599 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) in finish_parity_scrub()
2600 memcpy(parity, pointers[rbio->scrubp], sectorsize); in finish_parity_scrub()
2603 bitmap_clear(&rbio->dbitmap, sectornr, 1); in finish_parity_scrub()
2606 for (stripe = nr_data - 1; stripe >= 0; stripe--) in finish_parity_scrub()
2614 kunmap_local(pointers[rbio->real_stripes - 1]); in finish_parity_scrub()
2624 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2627 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2628 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, in finish_parity_scrub()
2641 ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio); in finish_parity_scrub()
2642 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2645 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2647 rbio->real_stripes, in finish_parity_scrub()
2664 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2682 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2683 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2685 ret = -ENOMEM; in recover_scrub_rbio()
2689 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in recover_scrub_rbio()
2690 int dfail = 0, failp = -1; in recover_scrub_rbio()
2697 if (found_errors > rbio->bioc->max_errors) { in recover_scrub_rbio()
2698 ret = -EIO; in recover_scrub_rbio()
2721 if (dfail > rbio->bioc->max_errors - 1) { in recover_scrub_rbio()
2722 ret = -EIO; in recover_scrub_rbio()
2738 if (failp != rbio->scrubp) { in recover_scrub_rbio()
2739 ret = -EIO; in recover_scrub_rbio()
2760 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in scrub_assemble_read_bios()
2762 int sectornr = total_sector_nr % rbio->stripe_nsectors; in scrub_assemble_read_bios()
2763 int stripe = total_sector_nr / rbio->stripe_nsectors; in scrub_assemble_read_bios()
2767 if (!test_bit(sectornr, &rbio->dbitmap)) in scrub_assemble_read_bios()
2784 if (sector->uptodate) in scrub_assemble_read_bios()
2808 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in scrub_rbio()
2824 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in scrub_rbio()
2825 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in scrub_rbio()
2829 if (found_errors > rbio->bioc->max_errors) { in scrub_rbio()
2830 ret = -EIO; in scrub_rbio()
2859 const u64 offset_in_full_stripe = data_logical - in raid56_parity_cache_data_pages()
2860 rbio->bioc->full_stripe_logical; in raid56_parity_cache_data_pages()
2862 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in raid56_parity_cache_data_pages()
2880 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); in raid56_parity_cache_data_pages()
2883 struct page *dst = rbio->stripe_pages[page_nr + page_index]; in raid56_parity_cache_data_pages()
2890 rbio->stripe_sectors[sector_nr].uptodate = true; in raid56_parity_cache_data_pages()