Lines Matching +full:scrubber +full:- +full:done

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
19 #include "disk-io.h"
22 #include "async-thread.h"
23 #include "file-item.h"
79 bitmap_free(rbio->error_bitmap); in free_raid_bio_pointers()
80 kfree(rbio->stripe_pages); in free_raid_bio_pointers()
81 kfree(rbio->bio_sectors); in free_raid_bio_pointers()
82 kfree(rbio->stripe_sectors); in free_raid_bio_pointers()
83 kfree(rbio->finish_pointers); in free_raid_bio_pointers()
90 if (!refcount_dec_and_test(&rbio->refs)) in free_raid_bio()
93 WARN_ON(!list_empty(&rbio->stripe_cache)); in free_raid_bio()
94 WARN_ON(!list_empty(&rbio->hash_list)); in free_raid_bio()
95 WARN_ON(!bio_list_empty(&rbio->bio_list)); in free_raid_bio()
97 for (i = 0; i < rbio->nr_pages; i++) { in free_raid_bio()
98 if (rbio->stripe_pages[i]) { in free_raid_bio()
99 __free_page(rbio->stripe_pages[i]); in free_raid_bio()
100 rbio->stripe_pages[i] = NULL; in free_raid_bio()
104 btrfs_put_bioc(rbio->bioc); in free_raid_bio()
111 INIT_WORK(&rbio->work, work_func); in start_async_work()
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
128 if (info->stripe_hash_table) in btrfs_alloc_stripe_hash_table()
140 return -ENOMEM; in btrfs_alloc_stripe_hash_table()
142 spin_lock_init(&table->cache_lock); in btrfs_alloc_stripe_hash_table()
143 INIT_LIST_HEAD(&table->stripe_cache); in btrfs_alloc_stripe_hash_table()
145 h = table->table; in btrfs_alloc_stripe_hash_table()
149 INIT_LIST_HEAD(&cur->hash_list); in btrfs_alloc_stripe_hash_table()
150 spin_lock_init(&cur->lock); in btrfs_alloc_stripe_hash_table()
153 x = cmpxchg(&info->stripe_hash_table, NULL, table); in btrfs_alloc_stripe_hash_table()
164 * once the caching is done, we set the cache ready
176 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
178 if (!rbio->bio_sectors[i].page) { in cache_rbio_pages()
184 if (i < rbio->nr_data * rbio->stripe_nsectors) in cache_rbio_pages()
185 ASSERT(rbio->stripe_sectors[i].uptodate); in cache_rbio_pages()
189 ASSERT(rbio->stripe_sectors[i].page); in cache_rbio_pages()
190 memcpy_page(rbio->stripe_sectors[i].page, in cache_rbio_pages()
191 rbio->stripe_sectors[i].pgoff, in cache_rbio_pages()
192 rbio->bio_sectors[i].page, in cache_rbio_pages()
193 rbio->bio_sectors[i].pgoff, in cache_rbio_pages()
194 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
195 rbio->stripe_sectors[i].uptodate = 1; in cache_rbio_pages()
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
205 u64 num = rbio->bioc->full_stripe_logical; in rbio_bucket()
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate()
225 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
230 if (!rbio->stripe_sectors[i].uptodate) in full_page_sectors_uptodate()
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors()
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
250 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; in index_stripe_sectors()
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset); in index_stripe_sectors()
259 const u32 sectorsize = src->bioc->fs_info->sectorsize; in steal_rbio_page()
263 if (dest->stripe_pages[page_nr]) in steal_rbio_page()
264 __free_page(dest->stripe_pages[page_nr]); in steal_rbio_page()
265 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; in steal_rbio_page()
266 src->stripe_pages[page_nr] = NULL; in steal_rbio_page()
268 /* Also update the sector->uptodate bits. */ in steal_rbio_page()
271 dest->stripe_sectors[i].uptodate = true; in steal_rbio_page()
277 rbio->bioc->fs_info->sectorsize_bits; in is_data_stripe_page()
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); in is_data_stripe_page()
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) in steal_rbio()
303 for (i = 0; i < dest->nr_pages; i++) { in steal_rbio()
304 struct page *p = src->stripe_pages[i]; in steal_rbio()
330 * must be called with dest->rbio_list_lock held
335 bio_list_merge(&dest->bio_list, &victim->bio_list); in merge_rbio()
336 dest->bio_list_bytes += victim->bio_list_bytes; in merge_rbio()
338 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, in merge_rbio()
339 dest->stripe_nsectors); in merge_rbio()
340 bio_list_init(&victim->bio_list); in merge_rbio()
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
360 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
361 h = table->table + bucket; in __remove_rbio_from_cache()
366 spin_lock(&h->lock); in __remove_rbio_from_cache()
372 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
375 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
376 table->cache_size -= 1; in __remove_rbio_from_cache()
388 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
389 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
390 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
391 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
392 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
397 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
398 spin_unlock(&h->lock); in __remove_rbio_from_cache()
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
414 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
416 spin_lock(&table->cache_lock); in remove_rbio_from_cache()
418 spin_unlock(&table->cache_lock); in remove_rbio_from_cache()
429 table = info->stripe_hash_table; in btrfs_clear_rbio_cache()
431 spin_lock(&table->cache_lock); in btrfs_clear_rbio_cache()
432 while (!list_empty(&table->stripe_cache)) { in btrfs_clear_rbio_cache()
433 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
438 spin_unlock(&table->cache_lock); in btrfs_clear_rbio_cache()
447 if (!info->stripe_hash_table) in btrfs_free_stripe_hash_table()
450 kvfree(info->stripe_hash_table); in btrfs_free_stripe_hash_table()
451 info->stripe_hash_table = NULL; in btrfs_free_stripe_hash_table()
469 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
472 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
474 spin_lock(&table->cache_lock); in cache_rbio()
475 spin_lock(&rbio->bio_list_lock); in cache_rbio()
478 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
479 refcount_inc(&rbio->refs); in cache_rbio()
481 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
482 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
484 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
485 table->cache_size += 1; in cache_rbio()
488 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
490 if (table->cache_size > RBIO_CACHE_SIZE) { in cache_rbio()
493 found = list_entry(table->stripe_cache.prev, in cache_rbio()
501 spin_unlock(&table->cache_lock); in cache_rbio()
519 src_cnt -= xor_src_cnt; in run_xor()
530 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
533 spin_lock(&rbio->bio_list_lock); in rbio_is_full()
534 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
536 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
537 spin_unlock(&rbio->bio_list_lock); in rbio_is_full()
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || in rbio_can_merge()
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) in rbio_can_merge()
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) || in rbio_can_merge()
567 test_bit(RBIO_CACHE_BIT, &cur->flags)) in rbio_can_merge()
570 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical) in rbio_can_merge()
574 if (last->operation != cur->operation) in rbio_can_merge()
584 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) in rbio_can_merge()
587 if (last->operation == BTRFS_RBIO_READ_REBUILD) in rbio_can_merge()
597 ASSERT(stripe_nr < rbio->real_stripes); in rbio_stripe_sector_index()
598 ASSERT(sector_nr < rbio->stripe_nsectors); in rbio_stripe_sector_index()
600 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
603 /* Return a sector from rbio->stripe_sectors, not from the bio list */
608 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, in rbio_stripe_sector()
616 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
623 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_sector()
625 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); in rbio_qstripe_sector()
659 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
661 spin_lock(&h->lock); in lock_stripe_add()
662 list_for_each_entry(cur, &h->hash_list, hash_list) { in lock_stripe_add()
663 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) in lock_stripe_add()
666 spin_lock(&cur->bio_list_lock); in lock_stripe_add()
669 if (bio_list_empty(&cur->bio_list) && in lock_stripe_add()
670 list_empty(&cur->plug_list) && in lock_stripe_add()
671 test_bit(RBIO_CACHE_BIT, &cur->flags) && in lock_stripe_add()
672 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { in lock_stripe_add()
673 list_del_init(&cur->hash_list); in lock_stripe_add()
674 refcount_dec(&cur->refs); in lock_stripe_add()
678 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
686 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
698 list_for_each_entry(pending, &cur->plug_list, plug_list) { in lock_stripe_add()
701 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
712 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
713 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
718 refcount_inc(&rbio->refs); in lock_stripe_add()
719 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
721 spin_unlock(&h->lock); in lock_stripe_add()
742 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
744 if (list_empty(&rbio->plug_list)) in unlock_stripe()
747 spin_lock(&h->lock); in unlock_stripe()
748 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
750 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
756 if (list_empty(&rbio->plug_list) && in unlock_stripe()
757 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
759 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
760 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
761 goto done; in unlock_stripe()
764 list_del_init(&rbio->hash_list); in unlock_stripe()
765 refcount_dec(&rbio->refs); in unlock_stripe()
772 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
774 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
779 list_del_init(&rbio->plug_list); in unlock_stripe()
781 list_add(&next->hash_list, &h->hash_list); in unlock_stripe()
782 refcount_inc(&next->refs); in unlock_stripe()
783 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
784 spin_unlock(&h->lock); in unlock_stripe()
786 if (next->operation == BTRFS_RBIO_READ_REBUILD) { in unlock_stripe()
788 } else if (next->operation == BTRFS_RBIO_WRITE) { in unlock_stripe()
791 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { in unlock_stripe()
799 done: in unlock_stripe()
800 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
801 spin_unlock(&h->lock); in unlock_stripe()
813 next = cur->bi_next; in rbio_endio_bio_list()
814 cur->bi_next = NULL; in rbio_endio_bio_list()
815 cur->bi_status = err; in rbio_endio_bio_list()
827 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
830 kfree(rbio->csum_buf); in rbio_orig_end_io()
831 bitmap_free(rbio->csum_bitmap); in rbio_orig_end_io()
832 rbio->csum_buf = NULL; in rbio_orig_end_io()
833 rbio->csum_bitmap = NULL; in rbio_orig_end_io()
840 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
843 * At this moment, rbio->bio_list is empty, however since rbio does not in rbio_orig_end_io()
845 * hash list, rbio may be merged with others so that rbio->bio_list in rbio_orig_end_io()
846 * becomes non-empty. in rbio_orig_end_io()
847 * Once unlock_stripe() is done, rbio->bio_list will not be updated any in rbio_orig_end_io()
851 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
878 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); in sector_in_rbio()
879 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in sector_in_rbio()
881 index = stripe_nr * rbio->stripe_nsectors + sector_nr; in sector_in_rbio()
882 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
884 spin_lock(&rbio->bio_list_lock); in sector_in_rbio()
885 sector = &rbio->bio_sectors[index]; in sector_in_rbio()
886 if (sector->page || bio_list_only) { in sector_in_rbio()
888 if (!sector->page) in sector_in_rbio()
890 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
893 spin_unlock(&rbio->bio_list_lock); in sector_in_rbio()
895 return &rbio->stripe_sectors[index]; in sector_in_rbio()
900 * this does not allocate any pages for rbio->pages.
905 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes; in alloc_rbio()
909 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in alloc_rbio()
914 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); in alloc_rbio()
923 return ERR_PTR(-ENOMEM); in alloc_rbio()
924 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *), in alloc_rbio()
926 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
928 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio()
930 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); in alloc_rbio()
931 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); in alloc_rbio()
933 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || in alloc_rbio()
934 !rbio->finish_pointers || !rbio->error_bitmap) { in alloc_rbio()
937 return ERR_PTR(-ENOMEM); in alloc_rbio()
940 bio_list_init(&rbio->bio_list); in alloc_rbio()
941 init_waitqueue_head(&rbio->io_wait); in alloc_rbio()
942 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
943 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
944 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
945 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
947 rbio->bioc = bioc; in alloc_rbio()
948 rbio->nr_pages = num_pages; in alloc_rbio()
949 rbio->nr_sectors = num_sectors; in alloc_rbio()
950 rbio->real_stripes = real_stripes; in alloc_rbio()
951 rbio->stripe_npages = stripe_npages; in alloc_rbio()
952 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
953 refcount_set(&rbio->refs, 1); in alloc_rbio()
954 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
956 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); in alloc_rbio()
957 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
967 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, 0); in alloc_rbio_pages()
978 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
981 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
982 rbio->stripe_pages + data_pages, 0); in alloc_rbio_parity_pages()
1008 *faila = -1; in get_rbio_veritical_errors()
1009 *failb = -1; in get_rbio_veritical_errors()
1012 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in get_rbio_veritical_errors()
1013 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; in get_rbio_veritical_errors()
1015 if (test_bit(total_sector_nr, rbio->error_bitmap)) { in get_rbio_veritical_errors()
1042 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector()
1043 struct bio *last = bio_list->tail; in rbio_add_io_sector()
1051 * thus it can be larger than rbio->real_stripe. in rbio_add_io_sector()
1052 * So here we check against bioc->num_stripes, not rbio->real_stripes. in rbio_add_io_sector()
1054 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); in rbio_add_io_sector()
1055 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in rbio_add_io_sector()
1056 ASSERT(sector->page); in rbio_add_io_sector()
1058 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_sector()
1059 disk_start = stripe->physical + sector_nr * sectorsize; in rbio_add_io_sector()
1062 if (!stripe->dev->bdev) { in rbio_add_io_sector()
1065 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, in rbio_add_io_sector()
1066 rbio->error_bitmap); in rbio_add_io_sector()
1071 if (found_errors > rbio->bioc->max_errors) in rbio_add_io_sector()
1072 return -EIO; in rbio_add_io_sector()
1078 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_io_sector()
1079 last_end += last->bi_iter.bi_size; in rbio_add_io_sector()
1085 if (last_end == disk_start && !last->bi_status && in rbio_add_io_sector()
1086 last->bi_bdev == stripe->dev->bdev) { in rbio_add_io_sector()
1087 ret = bio_add_page(last, sector->page, sectorsize, in rbio_add_io_sector()
1088 sector->pgoff); in rbio_add_io_sector()
1095 bio = bio_alloc(stripe->dev->bdev, in rbio_add_io_sector()
1098 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; in rbio_add_io_sector()
1099 bio->bi_private = rbio; in rbio_add_io_sector()
1101 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff); in rbio_add_io_sector()
1108 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_one_bio()
1111 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in index_one_bio()
1112 rbio->bioc->full_stripe_logical; in index_one_bio()
1120 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio()
1122 sector->page = bvec.bv_page; in index_one_bio()
1123 sector->pgoff = bvec.bv_offset + bvec_offset; in index_one_bio()
1124 ASSERT(sector->pgoff < PAGE_SIZE); in index_one_bio()
1141 spin_lock(&rbio->bio_list_lock); in index_rbio_pages()
1142 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1145 spin_unlock(&rbio->bio_list_lock); in index_rbio_pages()
1151 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1156 /* We rely on bio->bi_bdev to find the stripe number. */ in bio_get_trace_info()
1157 if (!bio->bi_bdev) in bio_get_trace_info()
1160 for (i = 0; i < bioc->num_stripes; i++) { in bio_get_trace_info()
1161 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) in bio_get_trace_info()
1163 trace_info->stripe_nr = i; in bio_get_trace_info()
1164 trace_info->devid = bioc->stripes[i].dev->devid; in bio_get_trace_info()
1165 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in bio_get_trace_info()
1166 bioc->stripes[i].physical; in bio_get_trace_info()
1171 trace_info->devid = -1; in bio_get_trace_info()
1172 trace_info->offset = -1; in bio_get_trace_info()
1173 trace_info->stripe_nr = -1; in bio_get_trace_info()
1187 void **pointers = rbio->finish_pointers; in generate_pq_vertical()
1188 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in generate_pq_vertical()
1191 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; in generate_pq_vertical()
1194 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in generate_pq_vertical()
1196 pointers[stripe] = kmap_local_page(sector->page) + in generate_pq_vertical()
1197 sector->pgoff; in generate_pq_vertical()
1202 sector->uptodate = 1; in generate_pq_vertical()
1203 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; in generate_pq_vertical()
1211 sector->uptodate = 1; in generate_pq_vertical()
1212 pointers[stripe++] = kmap_local_page(sector->page) + in generate_pq_vertical()
1213 sector->pgoff; in generate_pq_vertical()
1215 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in generate_pq_vertical()
1219 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize); in generate_pq_vertical()
1220 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize); in generate_pq_vertical()
1222 for (stripe = stripe - 1; stripe >= 0; stripe--) in generate_pq_vertical()
1238 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in rmw_assemble_write_bios()
1244 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios()
1250 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1254 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1255 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1258 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1261 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1275 if (likely(!rbio->bioc->replace_nr_stripes)) in rmw_assemble_write_bios()
1283 ASSERT(rbio->bioc->replace_stripe_src >= 0); in rmw_assemble_write_bios()
1285 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1289 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1290 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1297 if (stripe != rbio->bioc->replace_stripe_src) { in rmw_assemble_write_bios()
1303 total_sector_nr += rbio->stripe_nsectors - 1; in rmw_assemble_write_bios()
1308 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1311 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1320 rbio->real_stripes, in rmw_assemble_write_bios()
1329 return -EIO; in rmw_assemble_write_bios()
1334 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in set_rbio_range_error()
1335 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in set_rbio_range_error()
1336 rbio->bioc->full_stripe_logical; in set_rbio_range_error()
1337 int total_nr_sector = offset >> fs_info->sectorsize_bits; in set_rbio_range_error()
1339 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); in set_rbio_range_error()
1341 bitmap_set(rbio->error_bitmap, total_nr_sector, in set_rbio_range_error()
1342 bio->bi_iter.bi_size >> fs_info->sectorsize_bits); in set_rbio_range_error()
1350 if (bio->bi_iter.bi_size == 0) { in set_rbio_range_error()
1354 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in set_rbio_range_error()
1355 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { in set_rbio_range_error()
1357 bitmap_set(rbio->error_bitmap, in set_rbio_range_error()
1358 stripe_nr * rbio->stripe_nsectors, in set_rbio_range_error()
1359 rbio->stripe_nsectors); in set_rbio_range_error()
1367 * For subpage case, we can no longer set page Up-to-date directly for
1376 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1377 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in find_stripe_sector()
1379 if (sector->page == page && sector->pgoff == pgoff) in find_stripe_sector()
1391 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1401 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; in set_bio_pages_uptodate()
1403 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); in set_bio_pages_uptodate()
1406 sector->uptodate = 1; in set_bio_pages_uptodate()
1416 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr()
1419 sector = &rbio->stripe_sectors[i]; in get_bio_sector_nr()
1420 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr()
1422 sector = &rbio->bio_sectors[i]; in get_bio_sector_nr()
1423 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) in get_bio_sector_nr()
1426 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr()
1438 bio_size += bvec->bv_len; in rbio_update_error_bitmap()
1447 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) in rbio_update_error_bitmap()
1448 set_bit(i, rbio->error_bitmap); in rbio_update_error_bitmap()
1455 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_bio_data_sectors()
1461 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_bio_data_sectors()
1465 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) in verify_bio_data_sectors()
1471 for (bv_offset = bvec->bv_offset; in verify_bio_data_sectors()
1472 bv_offset < bvec->bv_offset + bvec->bv_len; in verify_bio_data_sectors()
1473 bv_offset += fs_info->sectorsize, total_sector_nr++) { in verify_bio_data_sectors()
1475 u8 *expected_csum = rbio->csum_buf + in verify_bio_data_sectors()
1476 total_sector_nr * fs_info->csum_size; in verify_bio_data_sectors()
1480 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) in verify_bio_data_sectors()
1483 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page, in verify_bio_data_sectors()
1486 set_bit(total_sector_nr, rbio->error_bitmap); in verify_bio_data_sectors()
1493 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_read_end_io()
1495 if (bio->bi_status) { in raid_wait_read_end_io()
1503 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_read_end_io()
1504 wake_up(&rbio->io_wait); in raid_wait_read_end_io()
1512 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_read_wait_bio_list()
1514 bio->bi_end_io = raid_wait_read_end_io; in submit_read_wait_bio_list()
1525 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in submit_read_wait_bio_list()
1530 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_data_pages()
1533 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, 0); in alloc_rbio_data_pages()
1564 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1565 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1568 return -1; in plug_cmp()
1580 list_sort(NULL, &plug->rbio_list, plug_cmp); in raid_unplug()
1582 while (!list_empty(&plug->rbio_list)) { in raid_unplug()
1583 cur = list_entry(plug->rbio_list.next, in raid_unplug()
1585 list_del_init(&cur->plug_list); in raid_unplug()
1607 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1610 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1611 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_bio()
1612 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; in rbio_add_bio()
1613 const u32 orig_len = orig_bio->bi_iter.bi_size; in rbio_add_bio()
1614 const u32 sectorsize = fs_info->sectorsize; in rbio_add_bio()
1619 rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_add_bio()
1621 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1622 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1627 int bit = ((u32)(cur_logical - full_stripe_start) >> in rbio_add_bio()
1628 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1630 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1639 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_write()
1646 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_write()
1650 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1661 if (!plug->info) { in raid56_parity_write()
1662 plug->info = fs_info; in raid56_parity_write()
1663 INIT_LIST_HEAD(&plug->rbio_list); in raid56_parity_write()
1665 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1680 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_one_sector()
1686 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_one_sector()
1690 if (stripe_nr >= rbio->nr_data) in verify_one_sector()
1696 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in verify_one_sector()
1702 ASSERT(sector->page); in verify_one_sector()
1704 csum_expected = rbio->csum_buf + in verify_one_sector()
1705 (stripe_nr * rbio->stripe_nsectors + sector_nr) * in verify_one_sector()
1706 fs_info->csum_size; in verify_one_sector()
1707 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff, in verify_one_sector()
1714 * @*pointers are the pre-allocated pointers by the caller, so we don't
1720 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in recover_vertical()
1722 const u32 sectorsize = fs_info->sectorsize; in recover_vertical()
1733 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in recover_vertical()
1734 !test_bit(sector_nr, &rbio->dbitmap)) in recover_vertical()
1746 if (found_errors > rbio->bioc->max_errors) in recover_vertical()
1747 return -EIO; in recover_vertical()
1755 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in recover_vertical()
1760 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_vertical()
1765 ASSERT(sector->page); in recover_vertical()
1766 pointers[stripe_nr] = kmap_local_page(sector->page) + in recover_vertical()
1767 sector->pgoff; in recover_vertical()
1772 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in recover_vertical()
1775 if (faila == rbio->nr_data) in recover_vertical()
1797 if (failb == rbio->real_stripes - 1) { in recover_vertical()
1798 if (faila == rbio->real_stripes - 2) in recover_vertical()
1812 if (failb == rbio->real_stripes - 2) { in recover_vertical()
1813 raid6_datap_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1816 raid6_2data_recov(rbio->real_stripes, sectorsize, in recover_vertical()
1823 ASSERT(failb == -1); in recover_vertical()
1826 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); in recover_vertical()
1830 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; in recover_vertical()
1833 pointers[rbio->nr_data - 1] = p; in recover_vertical()
1836 run_xor(pointers, rbio->nr_data - 1, sectorsize); in recover_vertical()
1856 sector->uptodate = 1; in recover_vertical()
1864 sector->uptodate = 1; in recover_vertical()
1868 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) in recover_vertical()
1886 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1887 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_sectors()
1889 ret = -ENOMEM; in recover_sectors()
1893 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_sectors()
1894 spin_lock(&rbio->bio_list_lock); in recover_sectors()
1895 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in recover_sectors()
1896 spin_unlock(&rbio->bio_list_lock); in recover_sectors()
1901 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in recover_sectors()
1923 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); in recover_rbio()
1938 * So here we always re-read everything in recovery path. in recover_rbio()
1940 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in recover_rbio()
1942 int stripe = total_sector_nr / rbio->stripe_nsectors; in recover_rbio()
1943 int sectornr = total_sector_nr % rbio->stripe_nsectors; in recover_rbio()
1951 if (!rbio->bioc->stripes[stripe].dev->bdev || in recover_rbio()
1952 test_bit(total_sector_nr, rbio->error_bitmap)) { in recover_rbio()
1957 set_bit(total_sector_nr, rbio->error_bitmap); in recover_rbio()
2002 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in set_rbio_raid6_extra_error()
2021 failb = rbio->real_stripes - (mirror_num - 1); in set_rbio_raid6_extra_error()
2023 failb--; in set_rbio_raid6_extra_error()
2027 set_bit(failb * rbio->stripe_nsectors + sector_nr, in set_rbio_raid6_extra_error()
2028 rbio->error_bitmap); in set_rbio_raid6_extra_error()
2044 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_recover()
2049 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2054 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2072 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in fill_data_csums()
2074 rbio->bioc->full_stripe_logical); in fill_data_csums()
2075 const u64 start = rbio->bioc->full_stripe_logical; in fill_data_csums()
2076 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << in fill_data_csums()
2077 fs_info->sectorsize_bits; in fill_data_csums()
2081 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); in fill_data_csums()
2086 * - The rbio doesn't belong to data block groups in fill_data_csums()
2089 * - The rbio belongs to mixed block groups in fill_data_csums()
2094 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || in fill_data_csums()
2095 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) in fill_data_csums()
2098 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * in fill_data_csums()
2099 fs_info->csum_size, GFP_NOFS); in fill_data_csums()
2100 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, in fill_data_csums()
2102 if (!rbio->csum_buf || !rbio->csum_bitmap) { in fill_data_csums()
2103 ret = -ENOMEM; in fill_data_csums()
2107 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1, in fill_data_csums()
2108 rbio->csum_buf, rbio->csum_bitmap); in fill_data_csums()
2111 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) in fill_data_csums()
2119 * longer safe for this particular sub-stripe write. in fill_data_csums()
2122 "sub-stripe write for full stripe %llu is not safe, failed to get csum: %d", in fill_data_csums()
2123 rbio->bioc->full_stripe_logical, ret); in fill_data_csums()
2125 kfree(rbio->csum_buf); in fill_data_csums()
2126 bitmap_free(rbio->csum_bitmap); in fill_data_csums()
2127 rbio->csum_buf = NULL; in fill_data_csums()
2128 rbio->csum_bitmap = NULL; in fill_data_csums()
2149 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_read_wait_recover()
2152 int stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_read_wait_recover()
2153 int sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_read_wait_recover()
2174 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_write_end_io()
2175 blk_status_t err = bio->bi_status; in raid_wait_write_end_io()
2180 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_write_end_io()
2181 wake_up(&rbio->io_wait); in raid_wait_write_end_io()
2189 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_write_bios()
2191 bio->bi_end_io = raid_wait_write_end_io; in submit_write_bios()
2211 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { in need_read_stripe_sectors()
2212 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in need_read_stripe_sectors()
2219 if (!sector->page || !sector->uptodate) in need_read_stripe_sectors()
2233 * needed for both full-stripe and sub-stripe writes. in rmw_rbio()
2245 * Now we're doing sub-stripe write, also need all data stripes in rmw_rbio()
2264 spin_lock(&rbio->bio_list_lock); in rmw_rbio()
2265 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in rmw_rbio()
2266 spin_unlock(&rbio->bio_list_lock); in rmw_rbio()
2268 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_rbio()
2281 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in rmw_rbio()
2283 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) in rmw_rbio()
2294 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in rmw_rbio()
2297 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in rmw_rbio()
2301 if (found_errors > rbio->bioc->max_errors) { in rmw_rbio()
2302 ret = -EIO; in rmw_rbio()
2339 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_alloc_scrub_rbio()
2346 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2351 ASSERT(!bio->bi_iter.bi_size); in raid56_parity_alloc_scrub_rbio()
2352 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2359 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2360 if (bioc->stripes[i].dev == scrub_dev) { in raid56_parity_alloc_scrub_rbio()
2361 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2365 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2367 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2377 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in alloc_rbio_essential_pages()
2380 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2383 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2386 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2388 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2392 return -ENOMEM; in alloc_rbio_essential_pages()
2393 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2401 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2402 const u32 sectorsize = bioc->fs_info->sectorsize; in finish_parity_scrub()
2403 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2404 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2405 int nr_data = rbio->nr_data; in finish_parity_scrub()
2417 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2419 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2428 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { in finish_parity_scrub()
2430 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2434 * Because the higher layers(scrubber) are unlikely to in finish_parity_scrub()
2438 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2442 return -ENOMEM; in finish_parity_scrub()
2452 return -ENOMEM; in finish_parity_scrub()
2456 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); in finish_parity_scrub()
2459 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in finish_parity_scrub()
2464 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2471 pointers[stripe] = kmap_local_page(sector->page) + in finish_parity_scrub()
2472 sector->pgoff; in finish_parity_scrub()
2477 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_parity_scrub()
2482 run_xor(pointers + 1, nr_data - 1, sectorsize); in finish_parity_scrub()
2486 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2487 parity = kmap_local_page(sector->page) + sector->pgoff; in finish_parity_scrub()
2488 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) in finish_parity_scrub()
2489 memcpy(parity, pointers[rbio->scrubp], sectorsize); in finish_parity_scrub()
2492 bitmap_clear(&rbio->dbitmap, sectornr, 1); in finish_parity_scrub()
2495 for (stripe = nr_data - 1; stripe >= 0; stripe--) in finish_parity_scrub()
2503 kunmap_local(pointers[rbio->real_stripes - 1]); in finish_parity_scrub()
2513 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2516 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2517 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, in finish_parity_scrub()
2530 ASSERT(rbio->bioc->replace_stripe_src >= 0); in finish_parity_scrub()
2531 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2534 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2536 rbio->real_stripes, in finish_parity_scrub()
2553 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2571 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2572 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in recover_scrub_rbio()
2574 ret = -ENOMEM; in recover_scrub_rbio()
2578 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in recover_scrub_rbio()
2579 int dfail = 0, failp = -1; in recover_scrub_rbio()
2586 if (found_errors > rbio->bioc->max_errors) { in recover_scrub_rbio()
2587 ret = -EIO; in recover_scrub_rbio()
2610 if (dfail > rbio->bioc->max_errors - 1) { in recover_scrub_rbio()
2611 ret = -EIO; in recover_scrub_rbio()
2627 if (failp != rbio->scrubp) { in recover_scrub_rbio()
2628 ret = -EIO; in recover_scrub_rbio()
2649 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in scrub_assemble_read_bios()
2651 int sectornr = total_sector_nr % rbio->stripe_nsectors; in scrub_assemble_read_bios()
2652 int stripe = total_sector_nr / rbio->stripe_nsectors; in scrub_assemble_read_bios()
2656 if (!test_bit(sectornr, &rbio->dbitmap)) in scrub_assemble_read_bios()
2673 if (sector->uptodate) in scrub_assemble_read_bios()
2697 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in scrub_rbio()
2713 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in scrub_rbio()
2714 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in scrub_rbio()
2718 if (found_errors > rbio->bioc->max_errors) { in scrub_rbio()
2719 ret = -EIO; in scrub_rbio()
2748 const u64 offset_in_full_stripe = data_logical - in raid56_parity_cache_data_pages()
2749 rbio->bioc->full_stripe_logical; in raid56_parity_cache_data_pages()
2751 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in raid56_parity_cache_data_pages()
2769 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); in raid56_parity_cache_data_pages()
2772 struct page *dst = rbio->stripe_pages[page_nr + page_index]; in raid56_parity_cache_data_pages()
2779 rbio->stripe_sectors[sector_nr].uptodate = true; in raid56_parity_cache_data_pages()