Lines Matching refs:rbio

62 			    const struct btrfs_raid_bio *rbio)  in btrfs_dump_rbio()  argument
67 dump_bioc(fs_info, rbio->bioc); in btrfs_dump_rbio()
70 rbio->flags, rbio->nr_sectors, rbio->nr_data, in btrfs_dump_rbio()
71 rbio->real_stripes, rbio->stripe_nsectors, in btrfs_dump_rbio()
72 rbio->sector_nsteps, rbio->scrubp, rbio->dbitmap); in btrfs_dump_rbio()
75 #define ASSERT_RBIO(expr, rbio) \ argument
78 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
79 (rbio)->bioc->fs_info : NULL; \
81 btrfs_dump_rbio(__fs_info, (rbio)); \
86 #define ASSERT_RBIO_STRIPE(expr, rbio, stripe_nr) \ argument
89 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
90 (rbio)->bioc->fs_info : NULL; \
92 btrfs_dump_rbio(__fs_info, (rbio)); \
98 #define ASSERT_RBIO_SECTOR(expr, rbio, sector_nr) \ argument
101 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
102 (rbio)->bioc->fs_info : NULL; \
104 btrfs_dump_rbio(__fs_info, (rbio)); \
110 #define ASSERT_RBIO_LOGICAL(expr, rbio, logical) \ argument
113 const struct btrfs_fs_info *__fs_info = (rbio)->bioc ? \
114 (rbio)->bioc->fs_info : NULL; \
116 btrfs_dump_rbio(__fs_info, (rbio)); \
144 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
145 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
147 static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
150 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) in free_raid_bio_pointers() argument
152 bitmap_free(rbio->error_bitmap); in free_raid_bio_pointers()
153 bitmap_free(rbio->stripe_uptodate_bitmap); in free_raid_bio_pointers()
154 kfree(rbio->stripe_pages); in free_raid_bio_pointers()
155 kfree(rbio->bio_paddrs); in free_raid_bio_pointers()
156 kfree(rbio->stripe_paddrs); in free_raid_bio_pointers()
157 kfree(rbio->finish_pointers); in free_raid_bio_pointers()
160 static void free_raid_bio(struct btrfs_raid_bio *rbio) in free_raid_bio() argument
164 if (!refcount_dec_and_test(&rbio->refs)) in free_raid_bio()
167 WARN_ON(!list_empty(&rbio->stripe_cache)); in free_raid_bio()
168 WARN_ON(!list_empty(&rbio->hash_list)); in free_raid_bio()
169 WARN_ON(!bio_list_empty(&rbio->bio_list)); in free_raid_bio()
171 for (i = 0; i < rbio->nr_pages; i++) { in free_raid_bio()
172 if (rbio->stripe_pages[i]) { in free_raid_bio()
173 __free_page(rbio->stripe_pages[i]); in free_raid_bio()
174 rbio->stripe_pages[i] = NULL; in free_raid_bio()
178 btrfs_put_bioc(rbio->bioc); in free_raid_bio()
179 free_raid_bio_pointers(rbio); in free_raid_bio()
180 kfree(rbio); in free_raid_bio()
183 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) in start_async_work() argument
185 INIT_WORK(&rbio->work, work_func); in start_async_work()
186 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
231 static void memcpy_from_bio_to_stripe(struct btrfs_raid_bio *rbio, unsigned int sector_nr) in memcpy_from_bio_to_stripe() argument
233 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); in memcpy_from_bio_to_stripe()
235 ASSERT(sector_nr < rbio->nr_sectors); in memcpy_from_bio_to_stripe()
236 for (int i = 0; i < rbio->sector_nsteps; i++) { in memcpy_from_bio_to_stripe()
237 unsigned int index = sector_nr * rbio->sector_nsteps + i; in memcpy_from_bio_to_stripe()
238 phys_addr_t dst = rbio->stripe_paddrs[index]; in memcpy_from_bio_to_stripe()
239 phys_addr_t src = rbio->bio_paddrs[index]; in memcpy_from_bio_to_stripe()
258 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
263 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
267 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
269 if (rbio->bio_paddrs[i * rbio->sector_nsteps] == INVALID_PADDR) { in cache_rbio_pages()
275 if (i < rbio->nr_data * rbio->stripe_nsectors) in cache_rbio_pages()
276 ASSERT(test_bit(i, rbio->stripe_uptodate_bitmap)); in cache_rbio_pages()
280 memcpy_from_bio_to_stripe(rbio, i); in cache_rbio_pages()
281 set_bit(i, rbio->stripe_uptodate_bitmap); in cache_rbio_pages()
283 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
289 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
291 u64 num = rbio->bioc->full_stripe_logical; in rbio_bucket()
305 static u32 page_nr_to_sector_nr(struct btrfs_raid_bio *rbio, unsigned int page_nr) in page_nr_to_sector_nr() argument
309 ASSERT(page_nr < rbio->nr_pages); in page_nr_to_sector_nr()
311 sector_nr = (page_nr << PAGE_SHIFT) >> rbio->bioc->fs_info->sectorsize_bits; in page_nr_to_sector_nr()
312 ASSERT(sector_nr < rbio->nr_sectors); in page_nr_to_sector_nr()
322 static u32 page_nr_to_num_sectors(struct btrfs_raid_bio *rbio, unsigned int page_nr) in page_nr_to_num_sectors() argument
324 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in page_nr_to_num_sectors()
327 ASSERT(page_nr < rbio->nr_pages); in page_nr_to_num_sectors()
334 static __maybe_unused bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio, in full_page_sectors_uptodate() argument
337 const u32 sector_nr = page_nr_to_sector_nr(rbio, page_nr); in full_page_sectors_uptodate()
338 const u32 nr_bits = page_nr_to_num_sectors(rbio, page_nr); in full_page_sectors_uptodate()
341 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
342 ASSERT(sector_nr + nr_bits < rbio->nr_sectors); in full_page_sectors_uptodate()
345 if (!test_bit(i, rbio->stripe_uptodate_bitmap)) in full_page_sectors_uptodate()
356 static void index_stripe_sectors(struct btrfs_raid_bio *rbio) in index_stripe_sectors() argument
358 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); in index_stripe_sectors()
362 for (i = 0, offset = 0; i < rbio->nr_sectors * rbio->sector_nsteps; in index_stripe_sectors()
366 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
367 if (!rbio->stripe_pages[page_index]) in index_stripe_sectors()
370 rbio->stripe_paddrs[i] = page_to_phys(rbio->stripe_pages[page_index]) + in index_stripe_sectors()
393 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr) in is_data_stripe_page() argument
395 const int sector_nr = page_nr_to_sector_nr(rbio, page_nr); in is_data_stripe_page()
404 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); in is_data_stripe_page()
464 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
466 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
474 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
477 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
489 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
491 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
492 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
505 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
506 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
507 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
508 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
509 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
514 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
518 free_raid_bio(rbio); in __remove_rbio_from_cache()
524 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
528 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
531 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
534 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
544 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
550 rbio = list_first_entry(&table->stripe_cache, in btrfs_clear_rbio_cache()
552 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
581 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
585 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
588 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
591 spin_lock(&rbio->bio_list_lock); in cache_rbio()
594 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
595 refcount_inc(&rbio->refs); in cache_rbio()
597 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
598 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
600 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
604 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
613 if (found != rbio) in cache_rbio()
644 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
646 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
649 spin_lock(&rbio->bio_list_lock); in rbio_is_full()
650 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
652 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
653 spin_unlock(&rbio->bio_list_lock); in rbio_is_full()
710 static unsigned int rbio_sector_index(const struct btrfs_raid_bio *rbio, in rbio_sector_index() argument
716 ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr); in rbio_sector_index()
717 ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr); in rbio_sector_index()
719 ret = stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_sector_index()
720 ASSERT(ret < rbio->nr_sectors); in rbio_sector_index()
725 static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio, in rbio_paddr_index() argument
732 ASSERT_RBIO_SECTOR(step_nr < rbio->sector_nsteps, rbio, step_nr); in rbio_paddr_index()
734 ret = rbio_sector_index(rbio, stripe_nr, sector_nr) * rbio->sector_nsteps + step_nr; in rbio_paddr_index()
735 ASSERT(ret < rbio->nr_sectors * rbio->sector_nsteps); in rbio_paddr_index()
739 static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio, in rbio_stripe_paddr() argument
743 return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr)]; in rbio_stripe_paddr()
746 static phys_addr_t rbio_pstripe_paddr(const struct btrfs_raid_bio *rbio, in rbio_pstripe_paddr() argument
749 return rbio_stripe_paddr(rbio, rbio->nr_data, sector_nr, step_nr); in rbio_pstripe_paddr()
752 static phys_addr_t rbio_qstripe_paddr(const struct btrfs_raid_bio *rbio, in rbio_qstripe_paddr() argument
755 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_paddr()
757 return rbio_stripe_paddr(rbio, rbio->nr_data + 1, sector_nr, step_nr); in rbio_qstripe_paddr()
761 static phys_addr_t *rbio_stripe_paddrs(const struct btrfs_raid_bio *rbio, in rbio_stripe_paddrs() argument
764 return &rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)]; in rbio_stripe_paddrs()
789 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
798 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
802 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical) in lock_stripe_add()
815 steal_rbio(cur, rbio); in lock_stripe_add()
823 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
824 merge_rbio(cur, rbio); in lock_stripe_add()
826 freeit = rbio; in lock_stripe_add()
838 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
839 merge_rbio(pending, rbio); in lock_stripe_add()
841 freeit = rbio; in lock_stripe_add()
851 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
857 refcount_inc(&rbio->refs); in lock_stripe_add()
858 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
874 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
880 bucket = rbio_bucket(rbio); in unlock_stripe()
881 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
883 if (list_empty(&rbio->plug_list)) in unlock_stripe()
884 cache_rbio(rbio); in unlock_stripe()
887 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
889 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
895 if (list_empty(&rbio->plug_list) && in unlock_stripe()
896 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
898 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
899 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
903 list_del_init(&rbio->hash_list); in unlock_stripe()
904 refcount_dec(&rbio->refs); in unlock_stripe()
911 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
913 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
918 list_del_init(&rbio->plug_list); in unlock_stripe()
922 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
928 steal_rbio(rbio, next); in unlock_stripe()
931 steal_rbio(rbio, next); in unlock_stripe()
939 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
944 remove_rbio_from_cache(rbio); in unlock_stripe()
964 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status) in rbio_orig_end_io() argument
966 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
969 kfree(rbio->csum_buf); in rbio_orig_end_io()
970 bitmap_free(rbio->csum_bitmap); in rbio_orig_end_io()
971 rbio->csum_buf = NULL; in rbio_orig_end_io()
972 rbio->csum_bitmap = NULL; in rbio_orig_end_io()
979 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
989 unlock_stripe(rbio); in rbio_orig_end_io()
990 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
991 free_raid_bio(rbio); in rbio_orig_end_io()
1013 static phys_addr_t *sector_paddrs_in_rbio(struct btrfs_raid_bio *rbio, in sector_paddrs_in_rbio() argument
1018 const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, 0); in sector_paddrs_in_rbio()
1020 ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps); in sector_paddrs_in_rbio()
1022 scoped_guard(spinlock, &rbio->bio_list_lock) { in sector_paddrs_in_rbio()
1023 if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) { in sector_paddrs_in_rbio()
1025 if (rbio->bio_paddrs[index] != INVALID_PADDR) in sector_paddrs_in_rbio()
1026 ret = &rbio->bio_paddrs[index]; in sector_paddrs_in_rbio()
1030 return &rbio->stripe_paddrs[index]; in sector_paddrs_in_rbio()
1037 static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio, in sector_paddr_in_rbio() argument
1042 const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr); in sector_paddr_in_rbio()
1044 ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps); in sector_paddr_in_rbio()
1046 scoped_guard(spinlock, &rbio->bio_list_lock) { in sector_paddr_in_rbio()
1047 if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) { in sector_paddr_in_rbio()
1049 if (rbio->bio_paddrs[index] != INVALID_PADDR) in sector_paddr_in_rbio()
1050 ret = rbio->bio_paddrs[index]; in sector_paddr_in_rbio()
1054 return rbio->stripe_paddrs[index]; in sector_paddr_in_rbio()
1072 struct btrfs_raid_bio *rbio; in alloc_rbio() local
1093 rbio = kzalloc_obj(*rbio, GFP_NOFS); in alloc_rbio()
1094 if (!rbio) in alloc_rbio()
1096 rbio->stripe_pages = kzalloc_objs(struct page *, num_pages, GFP_NOFS); in alloc_rbio()
1097 rbio->bio_paddrs = kzalloc_objs(phys_addr_t, in alloc_rbio()
1099 rbio->stripe_paddrs = kzalloc_objs(phys_addr_t, in alloc_rbio()
1102 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS); in alloc_rbio()
1103 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); in alloc_rbio()
1104 rbio->stripe_uptodate_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS); in alloc_rbio()
1106 if (!rbio->stripe_pages || !rbio->bio_paddrs || !rbio->stripe_paddrs || in alloc_rbio()
1107 !rbio->finish_pointers || !rbio->error_bitmap || !rbio->stripe_uptodate_bitmap) { in alloc_rbio()
1108 free_raid_bio_pointers(rbio); in alloc_rbio()
1109 kfree(rbio); in alloc_rbio()
1113 rbio->stripe_paddrs[i] = INVALID_PADDR; in alloc_rbio()
1114 rbio->bio_paddrs[i] = INVALID_PADDR; in alloc_rbio()
1117 bio_list_init(&rbio->bio_list); in alloc_rbio()
1118 init_waitqueue_head(&rbio->io_wait); in alloc_rbio()
1119 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
1120 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
1121 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
1122 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
1124 rbio->bioc = bioc; in alloc_rbio()
1125 rbio->nr_pages = num_pages; in alloc_rbio()
1126 rbio->nr_sectors = num_sectors; in alloc_rbio()
1127 rbio->real_stripes = real_stripes; in alloc_rbio()
1128 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1129 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
1130 rbio->sector_nsteps = sector_nsteps; in alloc_rbio()
1131 refcount_set(&rbio->refs, 1); in alloc_rbio()
1132 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1135 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
1136 ASSERT(rbio->nr_data > 0); in alloc_rbio()
1138 return rbio; in alloc_rbio()
1142 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1146 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false); in alloc_rbio_pages()
1150 index_stripe_sectors(rbio); in alloc_rbio_pages()
1155 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1157 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
1160 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
1161 rbio->stripe_pages + data_pages, false); in alloc_rbio_parity_pages()
1165 index_stripe_sectors(rbio); in alloc_rbio_parity_pages()
1175 static int get_rbio_vertical_errors(struct btrfs_raid_bio *rbio, int sector_nr, in get_rbio_vertical_errors() argument
1191 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in get_rbio_vertical_errors()
1192 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr; in get_rbio_vertical_errors()
1194 if (test_bit(total_sector_nr, rbio->error_bitmap)) { in get_rbio_vertical_errors()
1237 static int rbio_add_io_paddrs(struct btrfs_raid_bio *rbio, struct bio_list *bio_list, in rbio_add_io_paddrs() argument
1241 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_paddrs()
1254 ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes, in rbio_add_io_paddrs()
1255 rbio, stripe_nr); in rbio_add_io_paddrs()
1256 ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, in rbio_add_io_paddrs()
1257 rbio, sector_nr); in rbio_add_io_paddrs()
1260 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_paddrs()
1267 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr, in rbio_add_io_paddrs()
1268 rbio->error_bitmap); in rbio_add_io_paddrs()
1271 found_errors = get_rbio_vertical_errors(rbio, sector_nr, in rbio_add_io_paddrs()
1273 if (unlikely(found_errors > rbio->bioc->max_errors)) in rbio_add_io_paddrs()
1289 ret = bio_add_paddrs(last, paddrs, rbio->sector_nsteps, step); in rbio_add_io_paddrs()
1300 bio->bi_private = rbio; in rbio_add_io_paddrs()
1302 ret = bio_add_paddrs(bio, paddrs, rbio->sector_nsteps, step); in rbio_add_io_paddrs()
1308 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) in index_one_bio() argument
1310 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in index_one_bio()
1316 rbio->bioc->full_stripe_logical; in index_one_bio()
1321 rbio->bio_paddrs[index] = paddr; in index_one_bio()
1334 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1338 spin_lock(&rbio->bio_list_lock); in index_rbio_pages()
1339 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1340 index_one_bio(rbio, bio); in index_rbio_pages()
1342 spin_unlock(&rbio->bio_list_lock); in index_rbio_pages()
1345 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio, in bio_get_trace_info() argument
1348 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1381 static void assert_rbio(struct btrfs_raid_bio *rbio) in assert_rbio() argument
1390 ASSERT_RBIO(rbio->real_stripes >= 2, rbio); in assert_rbio()
1391 ASSERT_RBIO(rbio->nr_data > 0, rbio); in assert_rbio()
1397 ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio); in assert_rbio()
1408 static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int sector_nr, in generate_pq_vertical_step() argument
1411 void **pointers = rbio->finish_pointers; in generate_pq_vertical_step()
1412 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); in generate_pq_vertical_step()
1414 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6; in generate_pq_vertical_step()
1417 for (stripe = 0; stripe < rbio->nr_data; stripe++) in generate_pq_vertical_step()
1419 sector_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0)); in generate_pq_vertical_step()
1422 pointers[stripe++] = kmap_local_paddr(rbio_pstripe_paddr(rbio, sector_nr, step_nr)); in generate_pq_vertical_step()
1430 rbio_qstripe_paddr(rbio, sector_nr, step_nr)); in generate_pq_vertical_step()
1432 assert_rbio(rbio); in generate_pq_vertical_step()
1433 raid6_call.gen_syndrome(rbio->real_stripes, step, pointers); in generate_pq_vertical_step()
1436 memcpy(pointers[rbio->nr_data], pointers[0], step); in generate_pq_vertical_step()
1437 run_xor(pointers + 1, rbio->nr_data - 1, step); in generate_pq_vertical_step()
1444 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) in generate_pq_vertical() argument
1446 const bool has_qstripe = (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6); in generate_pq_vertical()
1448 for (int i = 0; i < rbio->sector_nsteps; i++) in generate_pq_vertical()
1449 generate_pq_vertical_step(rbio, sectornr, i); in generate_pq_vertical()
1451 set_bit(rbio_sector_index(rbio, rbio->nr_data, sectornr), in generate_pq_vertical()
1452 rbio->stripe_uptodate_bitmap); in generate_pq_vertical()
1454 set_bit(rbio_sector_index(rbio, rbio->nr_data + 1, sectornr), in generate_pq_vertical()
1455 rbio->stripe_uptodate_bitmap); in generate_pq_vertical()
1458 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio, in rmw_assemble_write_bios() argument
1470 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in rmw_assemble_write_bios()
1476 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios()
1482 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1486 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1487 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1490 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1493 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1494 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); in rmw_assemble_write_bios()
1498 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); in rmw_assemble_write_bios()
1501 ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, stripe, in rmw_assemble_write_bios()
1507 if (likely(!rbio->bioc->replace_nr_stripes)) in rmw_assemble_write_bios()
1515 ASSERT(rbio->bioc->replace_stripe_src >= 0); in rmw_assemble_write_bios()
1517 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1521 stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_assemble_write_bios()
1522 sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_assemble_write_bios()
1529 if (stripe != rbio->bioc->replace_stripe_src) { in rmw_assemble_write_bios()
1535 total_sector_nr += rbio->stripe_nsectors - 1; in rmw_assemble_write_bios()
1540 if (!test_bit(sectornr, &rbio->dbitmap)) in rmw_assemble_write_bios()
1543 if (stripe < rbio->nr_data) { in rmw_assemble_write_bios()
1544 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); in rmw_assemble_write_bios()
1548 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); in rmw_assemble_write_bios()
1551 ret = rbio_add_io_paddrs(rbio, bio_list, paddrs, in rmw_assemble_write_bios()
1552 rbio->real_stripes, in rmw_assemble_write_bios()
1564 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio) in set_rbio_range_error() argument
1566 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in set_rbio_range_error()
1568 rbio->bioc->full_stripe_logical; in set_rbio_range_error()
1571 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors); in set_rbio_range_error()
1573 bitmap_set(rbio->error_bitmap, total_nr_sector, in set_rbio_range_error()
1586 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in set_rbio_range_error()
1587 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) { in set_rbio_range_error()
1589 bitmap_set(rbio->error_bitmap, in set_rbio_range_error()
1590 stripe_nr * rbio->stripe_nsectors, in set_rbio_range_error()
1591 rbio->stripe_nsectors); in set_rbio_range_error()
1603 static int find_stripe_sector_nr(struct btrfs_raid_bio *rbio, phys_addr_t paddr) in find_stripe_sector_nr() argument
1605 for (int i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector_nr()
1606 if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == paddr) in find_stripe_sector_nr()
1616 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) in set_bio_pages_uptodate() argument
1618 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1628 int sector_nr = find_stripe_sector_nr(rbio, paddr); in set_bio_pages_uptodate()
1632 set_bit(sector_nr, rbio->stripe_uptodate_bitmap); in set_bio_pages_uptodate()
1638 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio) in get_bio_sector_nr() argument
1643 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr()
1644 if (rbio->stripe_paddrs[i * rbio->sector_nsteps] == bvec_paddr) in get_bio_sector_nr()
1646 if (rbio->bio_paddrs[i * rbio->sector_nsteps] == bvec_paddr) in get_bio_sector_nr()
1649 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr()
1653 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio) in rbio_update_error_bitmap() argument
1655 int total_sector_nr = get_bio_sector_nr(rbio, bio); in rbio_update_error_bitmap()
1665 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++) in rbio_update_error_bitmap()
1666 set_bit(i, rbio->error_bitmap); in rbio_update_error_bitmap()
1670 static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio, in verify_bio_data_sectors() argument
1673 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_bio_data_sectors()
1675 const u32 nr_steps = rbio->sector_nsteps; in verify_bio_data_sectors()
1676 int total_sector_nr = get_bio_sector_nr(rbio, bio); in verify_bio_data_sectors()
1682 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_bio_data_sectors()
1686 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors) in verify_bio_data_sectors()
1701 if (!test_bit(total_sector_nr, rbio->csum_bitmap)) in verify_bio_data_sectors()
1704 expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size; in verify_bio_data_sectors()
1707 set_bit(total_sector_nr, rbio->error_bitmap); in verify_bio_data_sectors()
1714 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_read_end_io() local
1717 rbio_update_error_bitmap(rbio, bio); in raid_wait_read_end_io()
1719 set_bio_pages_uptodate(rbio, bio); in raid_wait_read_end_io()
1720 verify_bio_data_sectors(rbio, bio); in raid_wait_read_end_io()
1724 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_read_end_io()
1725 wake_up(&rbio->io_wait); in raid_wait_read_end_io()
1728 static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio, in submit_read_wait_bio_list() argument
1733 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_read_wait_bio_list()
1740 bio_get_trace_info(rbio, bio, &trace_info); in submit_read_wait_bio_list()
1741 trace_raid56_read(rbio, bio, &trace_info); in submit_read_wait_bio_list()
1746 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in submit_read_wait_bio_list()
1749 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_data_pages() argument
1751 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_data_pages()
1754 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false); in alloc_rbio_data_pages()
1758 index_stripe_sectors(rbio); in alloc_rbio_data_pages()
1829 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) in rbio_add_bio() argument
1831 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1833 const u64 full_stripe_start = rbio->bioc->full_stripe_logical; in rbio_add_bio()
1840 rbio->nr_data * BTRFS_STRIPE_LEN, in rbio_add_bio()
1841 rbio, orig_logical); in rbio_add_bio()
1843 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1844 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1850 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1852 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1862 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1866 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_write()
1867 if (IS_ERR(rbio)) { in raid56_parity_write()
1868 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_write()
1872 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1873 rbio_add_bio(rbio, bio); in raid56_parity_write()
1879 if (!rbio_is_full(rbio)) { in raid56_parity_write()
1887 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1896 start_async_work(rbio, rmw_rbio_work); in raid56_parity_write()
1899 static int verify_one_sector(struct btrfs_raid_bio *rbio, in verify_one_sector() argument
1902 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in verify_one_sector()
1907 if (!rbio->csum_bitmap || !rbio->csum_buf) in verify_one_sector()
1911 if (stripe_nr >= rbio->nr_data) in verify_one_sector()
1917 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in verify_one_sector()
1918 paddrs = sector_paddrs_in_rbio(rbio, stripe_nr, sector_nr, 0); in verify_one_sector()
1920 paddrs = rbio_stripe_paddrs(rbio, stripe_nr, sector_nr); in verify_one_sector()
1923 csum_expected = rbio->csum_buf + in verify_one_sector()
1924 (stripe_nr * rbio->stripe_nsectors + sector_nr) * in verify_one_sector()
1932 static void recover_vertical_step(struct btrfs_raid_bio *rbio, in recover_vertical_step() argument
1938 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in recover_vertical_step()
1942 ASSERT(step_nr < rbio->sector_nsteps); in recover_vertical_step()
1943 ASSERT(sector_nr < rbio->stripe_nsectors); in recover_vertical_step()
1951 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) { in recover_vertical_step()
1958 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_vertical_step()
1959 paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, step_nr, 0); in recover_vertical_step()
1961 paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr, step_nr); in recover_vertical_step()
1968 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in recover_vertical_step()
1971 if (faila == rbio->nr_data) in recover_vertical_step()
1993 if (failb == rbio->real_stripes - 1) { in recover_vertical_step()
1994 if (faila == rbio->real_stripes - 2) in recover_vertical_step()
2008 if (failb == rbio->real_stripes - 2) { in recover_vertical_step()
2009 raid6_datap_recov(rbio->real_stripes, step, in recover_vertical_step()
2012 raid6_2data_recov(rbio->real_stripes, step, in recover_vertical_step()
2022 memcpy(pointers[faila], pointers[rbio->nr_data], step); in recover_vertical_step()
2026 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1; in recover_vertical_step()
2029 pointers[rbio->nr_data - 1] = p; in recover_vertical_step()
2032 run_xor(pointers, rbio->nr_data - 1, step); in recover_vertical_step()
2036 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--) in recover_vertical_step()
2045 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, in recover_vertical() argument
2057 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in recover_vertical()
2058 !test_bit(sector_nr, &rbio->dbitmap)) in recover_vertical()
2061 found_errors = get_rbio_vertical_errors(rbio, sector_nr, &faila, in recover_vertical()
2070 if (unlikely(found_errors > rbio->bioc->max_errors)) in recover_vertical()
2073 for (int i = 0; i < rbio->sector_nsteps; i++) in recover_vertical()
2074 recover_vertical_step(rbio, sector_nr, i, faila, failb, in recover_vertical()
2077 ret = verify_one_sector(rbio, faila, sector_nr); in recover_vertical()
2081 set_bit(rbio_sector_index(rbio, faila, sector_nr), in recover_vertical()
2082 rbio->stripe_uptodate_bitmap); in recover_vertical()
2085 ret = verify_one_sector(rbio, failb, sector_nr); in recover_vertical()
2089 set_bit(rbio_sector_index(rbio, failb, sector_nr), in recover_vertical()
2090 rbio->stripe_uptodate_bitmap); in recover_vertical()
2095 static int recover_sectors(struct btrfs_raid_bio *rbio) in recover_sectors() argument
2108 pointers = kzalloc_objs(void *, rbio->real_stripes, GFP_NOFS); in recover_sectors()
2109 unmap_array = kzalloc_objs(void *, rbio->real_stripes, GFP_NOFS); in recover_sectors()
2115 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in recover_sectors()
2116 spin_lock(&rbio->bio_list_lock); in recover_sectors()
2117 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in recover_sectors()
2118 spin_unlock(&rbio->bio_list_lock); in recover_sectors()
2121 index_rbio_pages(rbio); in recover_sectors()
2123 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in recover_sectors()
2124 ret = recover_vertical(rbio, sectornr, pointers, unmap_array); in recover_sectors()
2135 static void recover_rbio(struct btrfs_raid_bio *rbio) in recover_rbio() argument
2145 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors)); in recover_rbio()
2148 ret = alloc_rbio_pages(rbio); in recover_rbio()
2152 index_rbio_pages(rbio); in recover_rbio()
2162 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in recover_rbio()
2164 int stripe = total_sector_nr / rbio->stripe_nsectors; in recover_rbio()
2165 int sectornr = total_sector_nr % rbio->stripe_nsectors; in recover_rbio()
2173 if (!rbio->bioc->stripes[stripe].dev->bdev || in recover_rbio()
2174 test_bit(total_sector_nr, rbio->error_bitmap)) { in recover_rbio()
2179 set_bit(total_sector_nr, rbio->error_bitmap); in recover_rbio()
2183 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); in recover_rbio()
2184 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, in recover_rbio()
2192 submit_read_wait_bio_list(rbio, &bio_list); in recover_rbio()
2193 ret = recover_sectors(rbio); in recover_rbio()
2195 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); in recover_rbio()
2200 struct btrfs_raid_bio *rbio; in recover_rbio_work() local
2202 rbio = container_of(work, struct btrfs_raid_bio, work); in recover_rbio_work()
2203 if (!lock_stripe_add(rbio)) in recover_rbio_work()
2204 recover_rbio(rbio); in recover_rbio_work()
2212 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num) in set_rbio_raid6_extra_error() argument
2224 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in set_rbio_raid6_extra_error()
2229 found_errors = get_rbio_vertical_errors(rbio, sector_nr, in set_rbio_raid6_extra_error()
2243 failb = rbio->real_stripes - (mirror_num - 1); in set_rbio_raid6_extra_error()
2249 set_bit(failb * rbio->stripe_nsectors + sector_nr, in set_rbio_raid6_extra_error()
2250 rbio->error_bitmap); in set_rbio_raid6_extra_error()
2267 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2269 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_recover()
2270 if (IS_ERR(rbio)) { in raid56_parity_recover()
2271 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2276 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2277 rbio_add_bio(rbio, bio); in raid56_parity_recover()
2279 set_rbio_range_error(rbio, bio); in raid56_parity_recover()
2287 set_rbio_raid6_extra_error(rbio, mirror_num); in raid56_parity_recover()
2289 start_async_work(rbio, recover_rbio_work); in raid56_parity_recover()
2292 static void fill_data_csums(struct btrfs_raid_bio *rbio) in fill_data_csums() argument
2294 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in fill_data_csums()
2296 const u64 start = rbio->bioc->full_stripe_logical; in fill_data_csums()
2297 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << in fill_data_csums()
2302 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap); in fill_data_csums()
2315 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) || in fill_data_csums()
2316 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA) in fill_data_csums()
2319 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors * in fill_data_csums()
2321 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors, in fill_data_csums()
2323 if (!rbio->csum_buf || !rbio->csum_bitmap) { in fill_data_csums()
2328 csum_root = btrfs_csum_root(fs_info, rbio->bioc->full_stripe_logical); in fill_data_csums()
2332 rbio->bioc->full_stripe_logical); in fill_data_csums()
2338 rbio->csum_buf, rbio->csum_bitmap); in fill_data_csums()
2341 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits)) in fill_data_csums()
2353 rbio->bioc->full_stripe_logical, ret); in fill_data_csums()
2355 kfree(rbio->csum_buf); in fill_data_csums()
2356 bitmap_free(rbio->csum_bitmap); in fill_data_csums()
2357 rbio->csum_buf = NULL; in fill_data_csums()
2358 rbio->csum_bitmap = NULL; in fill_data_csums()
2361 static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio) in rmw_read_wait_recover() argument
2372 fill_data_csums(rbio); in rmw_read_wait_recover()
2379 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_read_wait_recover()
2381 int stripe = total_sector_nr / rbio->stripe_nsectors; in rmw_read_wait_recover()
2382 int sectornr = total_sector_nr % rbio->stripe_nsectors; in rmw_read_wait_recover()
2385 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); in rmw_read_wait_recover()
2386 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, in rmw_read_wait_recover()
2398 submit_read_wait_bio_list(rbio, &bio_list); in rmw_read_wait_recover()
2399 return recover_sectors(rbio); in rmw_read_wait_recover()
2404 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_wait_write_end_io() local
2407 rbio_update_error_bitmap(rbio, bio); in raid_wait_write_end_io()
2409 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid_wait_write_end_io()
2410 wake_up(&rbio->io_wait); in raid_wait_write_end_io()
2413 static void submit_write_bios(struct btrfs_raid_bio *rbio, in submit_write_bios() argument
2418 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list)); in submit_write_bios()
2425 bio_get_trace_info(rbio, bio, &trace_info); in submit_write_bios()
2426 trace_raid56_write(rbio, bio, &trace_info); in submit_write_bios()
2436 static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) in need_read_stripe_sectors() argument
2440 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) { in need_read_stripe_sectors()
2441 phys_addr_t paddr = rbio->stripe_paddrs[i * rbio->sector_nsteps]; in need_read_stripe_sectors()
2449 !test_bit(i, rbio->stripe_uptodate_bitmap)) in need_read_stripe_sectors()
2455 static void rmw_rbio(struct btrfs_raid_bio *rbio) in rmw_rbio() argument
2465 ret = alloc_rbio_parity_pages(rbio); in rmw_rbio()
2473 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) { in rmw_rbio()
2478 ret = alloc_rbio_data_pages(rbio); in rmw_rbio()
2482 index_rbio_pages(rbio); in rmw_rbio()
2484 ret = rmw_read_wait_recover(rbio); in rmw_rbio()
2494 spin_lock(&rbio->bio_list_lock); in rmw_rbio()
2495 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in rmw_rbio()
2496 spin_unlock(&rbio->bio_list_lock); in rmw_rbio()
2498 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_rbio()
2500 index_rbio_pages(rbio); in rmw_rbio()
2508 if (!rbio_is_full(rbio)) in rmw_rbio()
2509 cache_rbio_pages(rbio); in rmw_rbio()
2511 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in rmw_rbio()
2513 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) in rmw_rbio()
2514 generate_pq_vertical(rbio, sectornr); in rmw_rbio()
2517 ret = rmw_assemble_write_bios(rbio, &bio_list); in rmw_rbio()
2523 submit_write_bios(rbio, &bio_list); in rmw_rbio()
2524 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in rmw_rbio()
2527 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in rmw_rbio()
2530 found_errors = get_rbio_vertical_errors(rbio, sectornr, NULL, NULL); in rmw_rbio()
2531 if (unlikely(found_errors > rbio->bioc->max_errors)) { in rmw_rbio()
2537 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); in rmw_rbio()
2542 struct btrfs_raid_bio *rbio; in rmw_rbio_work() local
2544 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_rbio_work()
2545 if (lock_stripe_add(rbio) == 0) in rmw_rbio_work()
2546 rmw_rbio(rbio); in rmw_rbio_work()
2570 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2573 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_alloc_scrub_rbio()
2574 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2576 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2582 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2589 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2591 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2595 ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i); in raid56_parity_alloc_scrub_rbio()
2597 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2598 return rbio; in raid56_parity_alloc_scrub_rbio()
2601 static int alloc_rbio_sector_pages(struct btrfs_raid_bio *rbio, in alloc_rbio_sector_pages() argument
2604 const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize); in alloc_rbio_sector_pages()
2605 const u32 base = sector_nr * rbio->sector_nsteps; in alloc_rbio_sector_pages()
2607 for (int i = base; i < base + rbio->sector_nsteps; i++) { in alloc_rbio_sector_pages()
2611 if (rbio->stripe_pages[page_index]) in alloc_rbio_sector_pages()
2616 rbio->stripe_pages[page_index] = page; in alloc_rbio_sector_pages()
2625 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2629 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2631 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2634 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2636 ret = alloc_rbio_sector_pages(rbio, total_sector_nr); in alloc_rbio_essential_pages()
2640 index_stripe_sectors(rbio); in alloc_rbio_essential_pages()
2645 static bool verify_one_parity_step(struct btrfs_raid_bio *rbio, in verify_one_parity_step() argument
2649 const unsigned int nr_data = rbio->nr_data; in verify_one_parity_step()
2650 const bool has_qstripe = (rbio->real_stripes - rbio->nr_data == 2); in verify_one_parity_step()
2651 const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE); in verify_one_parity_step()
2655 ASSERT(step_nr < rbio->sector_nsteps); in verify_one_parity_step()
2660 sector_paddr_in_rbio(rbio, stripe, sector_nr, in verify_one_parity_step()
2664 assert_rbio(rbio); in verify_one_parity_step()
2666 raid6_call.gen_syndrome(rbio->real_stripes, step, pointers); in verify_one_parity_step()
2674 parity = kmap_local_paddr(rbio_stripe_paddr(rbio, rbio->scrubp, sector_nr, step_nr)); in verify_one_parity_step()
2675 if (memcmp(parity, pointers[rbio->scrubp], step) != 0) in verify_one_parity_step()
2676 memcpy(parity, pointers[rbio->scrubp], step); in verify_one_parity_step()
2689 static void verify_one_parity_sector(struct btrfs_raid_bio *rbio, in verify_one_parity_sector() argument
2694 for (int step_nr = 0; step_nr < rbio->sector_nsteps; step_nr++) { in verify_one_parity_sector()
2697 match = verify_one_parity_step(rbio, pointers, sector_nr, step_nr); in verify_one_parity_sector()
2702 bitmap_clear(&rbio->dbitmap, sector_nr, 1); in verify_one_parity_sector()
2705 static int finish_parity_scrub(struct btrfs_raid_bio *rbio) in finish_parity_scrub() argument
2707 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2708 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2709 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2710 int nr_data = rbio->nr_data; in finish_parity_scrub()
2722 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2724 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2733 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) { in finish_parity_scrub()
2735 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2743 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2762 pointers[rbio->real_stripes - 1] = kmap_local_paddr(q_paddr); in finish_parity_scrub()
2765 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in finish_parity_scrub()
2769 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) in finish_parity_scrub()
2770 verify_one_parity_sector(rbio, pointers, sectornr); in finish_parity_scrub()
2785 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2788 paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2789 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->scrubp, in finish_parity_scrub()
2802 ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio); in finish_parity_scrub()
2803 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2806 paddrs = rbio_stripe_paddrs(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2807 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, rbio->real_stripes, in finish_parity_scrub()
2814 submit_write_bios(rbio, &bio_list); in finish_parity_scrub()
2822 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2824 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2829 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio) in recover_scrub_rbio() argument
2842 pointers = kzalloc_objs(void *, rbio->real_stripes, GFP_NOFS); in recover_scrub_rbio()
2843 unmap_array = kzalloc_objs(void *, rbio->real_stripes, GFP_NOFS); in recover_scrub_rbio()
2849 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in recover_scrub_rbio()
2855 found_errors = get_rbio_vertical_errors(rbio, sector_nr, in recover_scrub_rbio()
2857 if (unlikely(found_errors > rbio->bioc->max_errors)) { in recover_scrub_rbio()
2867 if (is_data_stripe(rbio, faila)) in recover_scrub_rbio()
2872 if (is_data_stripe(rbio, failb)) in recover_scrub_rbio()
2881 if (unlikely(dfail > rbio->bioc->max_errors - 1)) { in recover_scrub_rbio()
2898 if (unlikely(failp != rbio->scrubp)) { in recover_scrub_rbio()
2903 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array); in recover_scrub_rbio()
2913 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio) in scrub_assemble_read_bios() argument
2920 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in scrub_assemble_read_bios()
2922 int sectornr = total_sector_nr % rbio->stripe_nsectors; in scrub_assemble_read_bios()
2923 int stripe = total_sector_nr / rbio->stripe_nsectors; in scrub_assemble_read_bios()
2927 if (!test_bit(sectornr, &rbio->dbitmap)) in scrub_assemble_read_bios()
2935 paddrs = sector_paddrs_in_rbio(rbio, stripe, sectornr, 1); in scrub_assemble_read_bios()
2939 paddrs = rbio_stripe_paddrs(rbio, stripe, sectornr); in scrub_assemble_read_bios()
2944 if (test_bit(rbio_sector_index(rbio, stripe, sectornr), in scrub_assemble_read_bios()
2945 rbio->stripe_uptodate_bitmap)) in scrub_assemble_read_bios()
2948 ret = rbio_add_io_paddrs(rbio, &bio_list, paddrs, stripe, in scrub_assemble_read_bios()
2956 submit_read_wait_bio_list(rbio, &bio_list); in scrub_assemble_read_bios()
2960 static void scrub_rbio(struct btrfs_raid_bio *rbio) in scrub_rbio() argument
2965 ret = alloc_rbio_essential_pages(rbio); in scrub_rbio()
2969 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in scrub_rbio()
2971 ret = scrub_assemble_read_bios(rbio); in scrub_rbio()
2976 ret = recover_scrub_rbio(rbio); in scrub_rbio()
2984 ret = finish_parity_scrub(rbio); in scrub_rbio()
2985 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0); in scrub_rbio()
2986 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) { in scrub_rbio()
2989 found_errors = get_rbio_vertical_errors(rbio, sector_nr, NULL, NULL); in scrub_rbio()
2990 if (unlikely(found_errors > rbio->bioc->max_errors)) { in scrub_rbio()
2996 rbio_orig_end_io(rbio, errno_to_blk_status(ret)); in scrub_rbio()
3004 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
3006 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
3007 start_async_work(rbio, scrub_rbio_work_locked); in raid56_parity_submit_scrub_rbio()
3017 void raid56_parity_cache_data_folios(struct btrfs_raid_bio *rbio, in raid56_parity_cache_data_folios() argument
3020 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in raid56_parity_cache_data_folios()
3022 rbio->bioc->full_stripe_logical; in raid56_parity_cache_data_folios()
3035 ret = alloc_rbio_data_pages(rbio); in raid56_parity_cache_data_folios()
3041 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT)); in raid56_parity_cache_data_folios()
3049 kaddr = kmap_local_page(rbio->stripe_pages[pindex]); in raid56_parity_cache_data_folios()
3060 bitmap_set(rbio->stripe_uptodate_bitmap, in raid56_parity_cache_data_folios()