Lines Matching refs:sctx
52 * How many groups we have for each sctx.
139 struct scrub_ctx *sctx;
351 stripe->sctx = NULL;
395 static void scrub_put_ctx(struct scrub_ctx *sctx);
429 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
433 if (!sctx)
437 release_scrub_stripe(&sctx->stripes[i]);
439 kvfree(sctx);
442 static void scrub_put_ctx(struct scrub_ctx *sctx)
444 if (refcount_dec_and_test(&sctx->refs))
445 scrub_free_ctx(sctx);
451 struct scrub_ctx *sctx;
454 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
457 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
458 if (!sctx)
460 refcount_set(&sctx->refs, 1);
461 sctx->is_dev_replace = is_dev_replace;
462 sctx->fs_info = fs_info;
463 sctx->extent_path.search_commit_root = 1;
464 sctx->extent_path.skip_locking = 1;
465 sctx->csum_path.search_commit_root = 1;
466 sctx->csum_path.skip_locking = 1;
470 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
473 sctx->stripes[i].sctx = sctx;
475 sctx->first_free = 0;
476 atomic_set(&sctx->cancel_req, 0);
478 spin_lock_init(&sctx->stat_lock);
479 sctx->throttle_deadline = 0;
481 mutex_init(&sctx->wr_lock);
484 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
487 return sctx;
490 scrub_free_ctx(sctx);
666 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
671 if (!btrfs_is_zoned(sctx->fs_info))
674 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
677 if (sctx->write_pointer < physical) {
678 length = physical - sctx->write_pointer;
680 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
681 sctx->write_pointer, length);
683 sctx->write_pointer = physical;
972 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
978 struct btrfs_fs_info *fs_info = sctx->fs_info;
1100 spin_lock(&sctx->stat_lock);
1101 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
1102 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
1103 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
1104 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
1105 sctx->stat.no_csum += nr_nodatacsum_sectors;
1106 sctx->stat.read_errors += errors->nr_io_errors;
1107 sctx->stat.csum_errors += errors->nr_csum_errors;
1108 sctx->stat.verify_errors += errors->nr_meta_errors +
1110 sctx->stat.uncorrectable_errors +=
1112 sctx->stat.corrected_errors += nr_repaired_sectors;
1113 spin_unlock(&sctx->stat_lock);
1116 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1135 struct scrub_ctx *sctx = stripe->sctx;
1136 struct btrfs_fs_info *fs_info = sctx->fs_info;
1209 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
1211 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1213 scrub_write_sectors(sctx, stripe, repaired, false);
1218 scrub_stripe_report_errors(sctx, stripe, &errors);
1280 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1284 struct btrfs_fs_info *fs_info = sctx->fs_info;
1289 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1306 sctx->write_pointer += bio_len;
1322 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1335 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1348 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1355 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1377 if (sctx->throttle_deadline == 0) {
1378 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1379 sctx->throttle_sent = 0;
1383 if (ktime_before(now, sctx->throttle_deadline)) {
1385 sctx->throttle_sent += bio_size;
1386 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1390 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1404 sctx->throttle_deadline = 0;
1581 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1584 struct btrfs_fs_info *fs_info = sctx->fs_info;
1590 mutex_lock(&sctx->wr_lock);
1591 if (sctx->write_pointer < physical_end) {
1592 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1594 sctx->write_pointer);
1598 mutex_unlock(&sctx->wr_lock);
1599 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1857 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1860 struct btrfs_fs_info *fs_info = sctx->fs_info;
1887 if (sctx->is_dev_replace &&
1918 static void submit_initial_group_read(struct scrub_ctx *sctx,
1927 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1931 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1935 scrub_submit_initial_read(sctx, stripe);
1940 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1942 struct btrfs_fs_info *fs_info = sctx->fs_info;
1944 const int nr_stripes = sctx->cur_stripe;
1950 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1956 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1960 stripe = &sctx->stripes[i];
1967 if (sctx->is_dev_replace) {
1973 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1983 stripe = &sctx->stripes[i];
1990 scrub_write_sectors(sctx, stripe, good, true);
1996 stripe = &sctx->stripes[i];
1999 spin_lock(&sctx->stat_lock);
2000 sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
2001 spin_unlock(&sctx->stat_lock);
2005 sctx->cur_stripe = 0;
2014 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
2026 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
2031 stripe = &sctx->stripes[sctx->cur_stripe];
2033 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
2034 &sctx->csum_path, dev, physical,
2040 sctx->cur_stripe++;
2043 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
2044 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
2046 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
2050 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
2051 return flush_scrub_stripes(sctx);
2055 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
2062 struct btrfs_fs_info *fs_info = sctx->fs_info;
2075 ASSERT(sctx->raid56_data_stripes);
2092 stripe = &sctx->raid56_data_stripes[i];
2122 stripe = &sctx->raid56_data_stripes[i];
2134 stripe = &sctx->raid56_data_stripes[i];
2135 scrub_submit_initial_read(sctx, stripe);
2138 stripe = &sctx->raid56_data_stripes[i];
2144 ASSERT(!btrfs_is_zoned(sctx->fs_info));
2157 stripe = &sctx->raid56_data_stripes[i];
2203 stripe = &sctx->raid56_data_stripes[i];
2228 static int scrub_simple_mirror(struct scrub_ctx *sctx,
2234 struct btrfs_fs_info *fs_info = sctx->fs_info;
2249 atomic_read(&sctx->cancel_req)) {
2267 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2272 spin_lock(&sctx->stat_lock);
2273 sctx->stat.last_physical = physical + logical_length;
2274 spin_unlock(&sctx->stat_lock);
2328 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2348 ret = scrub_simple_mirror(sctx, bg, cur_logical,
2361 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2367 struct btrfs_fs_info *fs_info = sctx->fs_info;
2384 ASSERT(sctx->extent_path.nodes[0] == NULL);
2388 if (sctx->is_dev_replace &&
2389 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2390 mutex_lock(&sctx->wr_lock);
2391 sctx->write_pointer = physical;
2392 mutex_unlock(&sctx->wr_lock);
2397 ASSERT(sctx->raid56_data_stripes == NULL);
2399 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2402 if (!sctx->raid56_data_stripes) {
2408 &sctx->raid56_data_stripes[i]);
2411 sctx->raid56_data_stripes[i].bg = bg;
2412 sctx->raid56_data_stripes[i].sctx = sctx;
2432 ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
2439 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2468 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2470 spin_lock(&sctx->stat_lock);
2471 sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
2473 spin_unlock(&sctx->stat_lock);
2487 ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
2494 spin_lock(&sctx->stat_lock);
2495 sctx->stat.last_physical = physical;
2496 spin_unlock(&sctx->stat_lock);
2499 ret2 = flush_scrub_stripes(sctx);
2502 btrfs_release_path(&sctx->extent_path);
2503 btrfs_release_path(&sctx->csum_path);
2505 if (sctx->raid56_data_stripes) {
2507 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2508 kfree(sctx->raid56_data_stripes);
2509 sctx->raid56_data_stripes = NULL;
2512 if (sctx->is_dev_replace && ret >= 0) {
2515 ret2 = sync_write_pointer_for_zoned(sctx,
2526 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2532 struct btrfs_fs_info *fs_info = sctx->fs_info;
2558 ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2585 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2590 struct btrfs_fs_info *fs_info = sctx->fs_info;
2696 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2760 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2761 if (!ret && sctx->is_dev_replace) {
2773 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2810 if (sctx->is_dev_replace) {
2822 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2824 if (sctx->is_dev_replace &&
2861 if (sctx->is_dev_replace &&
2866 if (sctx->stat.malloc_errors > 0) {
2880 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2883 struct btrfs_fs_info *fs_info = sctx->fs_info;
2909 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2917 struct btrfs_fs_info *fs_info = sctx->fs_info;
2924 spin_lock(&sctx->stat_lock);
2925 sctx->stat.malloc_errors++;
2926 spin_unlock(&sctx->stat_lock);
2942 spin_lock(&sctx->stat_lock);
2943 sctx->stat.super_errors++;
2944 spin_unlock(&sctx->stat_lock);
2954 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2956 spin_lock(&sctx->stat_lock);
2957 sctx->stat.super_errors++;
2958 spin_unlock(&sctx->stat_lock);
3019 struct scrub_ctx *sctx;
3040 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
3041 if (IS_ERR(sctx))
3042 return PTR_ERR(sctx);
3088 sctx->readonly = readonly;
3089 dev->scrub_ctx = sctx;
3113 spin_lock(&sctx->stat_lock);
3114 old_super_errors = sctx->stat.super_errors;
3115 spin_unlock(&sctx->stat_lock);
3123 ret = scrub_supers(sctx, dev);
3126 spin_lock(&sctx->stat_lock);
3132 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
3134 spin_unlock(&sctx->stat_lock);
3138 ret = scrub_enumerate_chunks(sctx, dev, start, end);
3145 memcpy(progress, &sctx->stat, sizeof(*progress));
3156 scrub_put_ctx(sctx);
3181 scrub_free_ctx(sctx);
3231 struct scrub_ctx *sctx;
3234 sctx = dev->scrub_ctx;
3235 if (!sctx) {
3239 atomic_inc(&sctx->cancel_req);
3256 struct scrub_ctx *sctx = NULL;
3261 sctx = dev->scrub_ctx;
3262 if (sctx)
3263 memcpy(progress, &sctx->stat, sizeof(*progress));
3266 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;