Lines Matching refs:log
18 #include "raid5-log.h"
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
30 * In write through mode, the reclaim runs every log->max_free_space.
70 * writes are committed from the log device. Therefore, a stripe in
72 * - write to log device
87 sector_t device_size; /* log device size, round to
92 sector_t last_checkpoint; /* log tail. where recovery scan
94 u64 last_cp_seq; /* log tail sequence */
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
107 * written to the log */
109 * written to the log but not yet written
111 struct list_head flushing_ios; /* io_units which are waiting for log
113 struct list_head finished_ios; /* io_units which settle down in log disk */
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
205 * unit is written to log disk with normal write, as we always flush log disk
210 struct r5l_log *log;
221 struct list_head log_sibling; /* log->running_ios */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
250 bool r5c_is_writeback(struct r5l_log *log)
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
259 if (start >= log->device_size)
260 start = start - log->device_size;
264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
270 return end + log->device_size - start;
273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
280 return log->device_size > used_size + size;
320 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
326 struct r5l_log *log = READ_ONCE(conf->log);
328 if (!r5c_is_writeback(log))
344 r5l_wake_reclaim(log, 0);
353 struct r5l_log *log = READ_ONCE(conf->log);
355 if (!r5c_is_writeback(log))
365 r5l_wake_reclaim(log, 0);
369 * Total log space (in sectors) needed to flush all data in cache
371 * To avoid deadlock due to log space, it is necessary to reserve log
372 * space to flush critical stripes (stripes that occupying log space near
373 * last_checkpoint). This function helps check how much log space is
376 * To reduce log space requirements, two mechanisms are used to give cache
398 struct r5l_log *log = READ_ONCE(conf->log);
400 if (!r5c_is_writeback(log))
404 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
409 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
411 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
412 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
415 static inline void r5c_update_log_state(struct r5l_log *log)
417 struct r5conf *conf = log->rdev->mddev->private;
422 if (!r5c_is_writeback(log))
425 free_space = r5l_ring_distance(log, log->log_start,
426 log->last_checkpoint);
441 r5l_wake_reclaim(log, 0);
451 struct r5l_log *log = READ_ONCE(conf->log);
453 BUG_ON(!r5c_is_writeback(log));
489 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
493 struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
495 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
526 static void r5l_log_run_stripes(struct r5l_log *log)
530 lockdep_assert_held(&log->io_list_lock);
532 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
537 list_move_tail(&io->log_sibling, &log->finished_ios);
542 static void r5l_move_to_end_ios(struct r5l_log *log)
546 lockdep_assert_held(&log->io_list_lock);
548 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
552 list_move_tail(&io->log_sibling, &log->io_end_ios);
561 struct r5l_log *log = io->log;
567 md_error(log->rdev->mddev, log->rdev);
570 mempool_free(io->meta_page, &log->meta_pool);
572 spin_lock_irqsave(&log->io_list_lock, flags);
584 if (log->need_cache_flush && !list_empty(&io->stripe_list))
585 r5l_move_to_end_ios(log);
587 r5l_log_run_stripes(log);
588 if (!list_empty(&log->running_ios)) {
593 io_deferred = list_first_entry(&log->running_ios,
596 schedule_work(&log->deferred_io_work);
599 spin_unlock_irqrestore(&log->io_list_lock, flags);
601 if (log->need_cache_flush)
602 md_wakeup_thread(log->rdev->mddev->thread);
623 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
627 spin_lock_irqsave(&log->io_list_lock, flags);
629 spin_unlock_irqrestore(&log->io_list_lock, flags);
660 struct r5l_log *log = container_of(work, struct r5l_log,
665 spin_lock_irqsave(&log->io_list_lock, flags);
666 if (!list_empty(&log->running_ios)) {
667 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
674 spin_unlock_irqrestore(&log->io_list_lock, flags);
676 r5l_do_submit_io(log, io);
681 struct r5l_log *log = container_of(work, struct r5l_log,
683 struct mddev *mddev = log->rdev->mddev;
686 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
693 !READ_ONCE(conf->log) ||
696 log = READ_ONCE(conf->log);
697 if (log) {
699 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
704 static void r5l_submit_current_io(struct r5l_log *log)
706 struct r5l_io_unit *io = log->current_io;
717 crc = crc32c(log->uuid_checksum, block, PAGE_SIZE);
720 log->current_io = NULL;
721 spin_lock_irqsave(&log->io_list_lock, flags);
723 if (io != list_first_entry(&log->running_ios,
729 spin_unlock_irqrestore(&log->io_list_lock, flags);
731 r5l_do_submit_io(log, io);
734 static struct bio *r5l_bio_alloc(struct r5l_log *log)
736 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
737 REQ_OP_WRITE, GFP_NOIO, &log->bs);
739 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
744 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
746 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
748 r5c_update_log_state(log);
750 * If we filled up the log device start from the beginning again,
753 * Note: for this to work properly the log size needs to me a multiple
756 if (log->log_start == 0)
759 io->log_end = log->log_start;
762 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
767 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
772 io->log = log;
778 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
783 block->seq = cpu_to_le64(log->seq);
784 block->position = cpu_to_le64(log->log_start);
786 io->log_start = log->log_start;
788 io->seq = log->seq++;
790 io->current_bio = r5l_bio_alloc(log);
795 r5_reserve_log_entry(log, io);
797 spin_lock_irq(&log->io_list_lock);
798 list_add_tail(&io->log_sibling, &log->running_ios);
799 spin_unlock_irq(&log->io_list_lock);
804 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
806 if (log->current_io &&
807 log->current_io->meta_offset + payload_size > PAGE_SIZE)
808 r5l_submit_current_io(log);
810 if (!log->current_io) {
811 log->current_io = r5l_new_meta(log);
812 if (!log->current_io)
819 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
824 struct r5l_io_unit *io = log->current_io;
841 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
843 struct r5l_io_unit *io = log->current_io;
848 io->current_bio = r5l_bio_alloc(log);
856 r5_reserve_log_entry(log, io);
859 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
861 struct mddev *mddev = log->rdev->mddev;
875 mutex_lock(&log->io_mutex);
878 if (r5l_get_meta(log, meta_size)) {
879 mutex_unlock(&log->io_mutex);
884 io = log->current_io;
896 mutex_unlock(&log->io_mutex);
899 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
913 ret = r5l_get_meta(log, meta_size);
917 io = log->current_io;
929 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
937 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
940 r5l_append_payload_page(log, sh->dev[i].page);
944 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
947 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
948 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
950 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
953 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
961 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
967 spin_lock_irq(&log->stripe_in_journal_lock);
969 &log->stripe_in_journal_list);
970 spin_unlock_irq(&log->stripe_in_journal_lock);
971 atomic_inc(&log->stripe_in_journal_count);
977 static inline void r5l_add_no_space_stripe(struct r5l_log *log,
980 spin_lock(&log->no_space_stripes_lock);
981 list_add_tail(&sh->log_list, &log->no_space_stripes);
982 spin_unlock(&log->no_space_stripes_lock);
987 * data from log to raid disks), so we shouldn't wait for reclaim here
989 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
999 if (!log)
1004 /* the stripe is written to log, we start writing it to raid */
1023 sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
1038 mutex_lock(&log->io_mutex);
1042 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1043 if (!r5l_has_free_space(log, reserve)) {
1044 r5l_add_no_space_stripe(log, sh);
1047 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1049 spin_lock_irq(&log->io_list_lock);
1051 &log->no_mem_stripes);
1052 spin_unlock_irq(&log->io_list_lock);
1057 * log space critical, do not process stripes that are
1062 r5l_add_no_space_stripe(log, sh);
1065 } else if (!r5l_has_free_space(log, reserve)) {
1066 if (sh->log_start == log->last_checkpoint)
1069 r5l_add_no_space_stripe(log, sh);
1071 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1073 spin_lock_irq(&log->io_list_lock);
1075 &log->no_mem_stripes);
1076 spin_unlock_irq(&log->io_list_lock);
1081 mutex_unlock(&log->io_mutex);
1083 r5l_wake_reclaim(log, reserve);
1087 void r5l_write_stripe_run(struct r5l_log *log)
1089 if (!log)
1091 mutex_lock(&log->io_mutex);
1092 r5l_submit_current_io(log);
1093 mutex_unlock(&log->io_mutex);
1096 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1098 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1101 * we flush log disk cache first, then write stripe data to
1102 * raid disks. So if bio is finished, the log disk cache is
1104 * the bio from log disk, so we don't need to flush again
1114 mutex_lock(&log->io_mutex);
1115 r5l_get_meta(log, 0);
1116 bio_list_add(&log->current_io->flush_barriers, bio);
1117 log->current_io->has_flush = 1;
1118 log->current_io->has_null_flush = 1;
1119 atomic_inc(&log->current_io->pending_stripe);
1120 r5l_submit_current_io(log);
1121 mutex_unlock(&log->io_mutex);
1128 /* This will run after log space is reclaimed */
1129 static void r5l_run_no_space_stripes(struct r5l_log *log)
1133 spin_lock(&log->no_space_stripes_lock);
1134 while (!list_empty(&log->no_space_stripes)) {
1135 sh = list_first_entry(&log->no_space_stripes,
1141 spin_unlock(&log->no_space_stripes_lock);
1146 * for write through mode, returns log->next_checkpoint
1152 struct r5l_log *log = READ_ONCE(conf->log);
1156 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1157 return log->next_checkpoint;
1159 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1160 if (list_empty(&log->stripe_in_journal_list)) {
1162 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1163 return log->next_checkpoint;
1165 sh = list_first_entry(&log->stripe_in_journal_list,
1168 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1172 static sector_t r5l_reclaimable_space(struct r5l_log *log)
1174 struct r5conf *conf = log->rdev->mddev->private;
1176 return r5l_ring_distance(log, log->last_checkpoint,
1180 static void r5l_run_no_mem_stripe(struct r5l_log *log)
1184 lockdep_assert_held(&log->io_list_lock);
1186 if (!list_empty(&log->no_mem_stripes)) {
1187 sh = list_first_entry(&log->no_mem_stripes,
1195 static bool r5l_complete_finished_ios(struct r5l_log *log)
1200 lockdep_assert_held(&log->io_list_lock);
1202 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1207 log->next_checkpoint = io->log_start;
1210 mempool_free(io, &log->io_pool);
1211 r5l_run_no_mem_stripe(log);
1221 struct r5l_log *log = io->log;
1222 struct r5conf *conf = log->rdev->mddev->private;
1225 spin_lock_irqsave(&log->io_list_lock, flags);
1228 if (!r5l_complete_finished_ios(log)) {
1229 spin_unlock_irqrestore(&log->io_list_lock, flags);
1233 if (r5l_reclaimable_space(log) > log->max_free_space ||
1235 r5l_wake_reclaim(log, 0);
1237 spin_unlock_irqrestore(&log->io_list_lock, flags);
1238 wake_up(&log->iounit_wait);
1254 struct r5l_log *log = container_of(bio, struct r5l_log,
1260 md_error(log->rdev->mddev, log->rdev);
1263 spin_lock_irqsave(&log->io_list_lock, flags);
1264 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1266 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1267 spin_unlock_irqrestore(&log->io_list_lock, flags);
1272 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1273 * broken meta in the middle of a log causes recovery can't find meta at the
1274 * head of log. If operations require meta at the head persistent in log, we
1275 * must make sure meta before it persistent in log too. A case is:
1277 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1278 * data/parity must be persistent in log before we do the write to raid disks.
1282 * one whose data/parity is in log.
1284 void r5l_flush_stripe_to_raid(struct r5l_log *log)
1288 if (!log || !log->need_cache_flush)
1291 spin_lock_irq(&log->io_list_lock);
1293 if (!list_empty(&log->flushing_ios)) {
1294 spin_unlock_irq(&log->io_list_lock);
1297 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1298 do_flush = !list_empty(&log->flushing_ios);
1299 spin_unlock_irq(&log->io_list_lock);
1303 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1305 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1306 submit_bio(&log->flush_bio);
1309 static void r5l_write_super(struct r5l_log *log, sector_t cp);
1310 static void r5l_write_super_and_discard_space(struct r5l_log *log,
1313 struct block_device *bdev = log->rdev->bdev;
1316 r5l_write_super(log, end);
1321 mddev = log->rdev->mddev;
1324 * superblock is updated to new log tail. Updating superblock (either
1341 if (log->last_checkpoint < end) {
1343 log->last_checkpoint + log->rdev->data_offset,
1344 end - log->last_checkpoint, GFP_NOIO);
1347 log->last_checkpoint + log->rdev->data_offset,
1348 log->device_size - log->last_checkpoint,
1350 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1400 if (!READ_ONCE(conf->log))
1421 struct r5l_log *log = READ_ONCE(conf->log);
1429 if (!r5c_is_writeback(log))
1463 /* if log space is tight, flush stripes on stripe_in_journal_list */
1465 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1467 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1485 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1489 r5l_run_no_space_stripes(log);
1494 static void r5l_do_reclaim(struct r5l_log *log)
1496 struct r5conf *conf = log->rdev->mddev->private;
1497 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1502 spin_lock_irq(&log->io_list_lock);
1503 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1504 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1511 reclaimable = r5l_reclaimable_space(log);
1513 (list_empty(&log->running_ios) &&
1514 list_empty(&log->io_end_ios) &&
1515 list_empty(&log->flushing_ios) &&
1516 list_empty(&log->finished_ios)))
1519 md_wakeup_thread(log->rdev->mddev->thread);
1520 wait_event_lock_irq(log->iounit_wait,
1521 r5l_reclaimable_space(log) > reclaimable,
1522 log->io_list_lock);
1526 spin_unlock_irq(&log->io_list_lock);
1533 * here, because the log area might be reused soon and we don't want to
1536 r5l_write_super_and_discard_space(log, next_checkpoint);
1538 mutex_lock(&log->io_mutex);
1539 log->last_checkpoint = next_checkpoint;
1540 r5c_update_log_state(log);
1541 mutex_unlock(&log->io_mutex);
1543 r5l_run_no_space_stripes(log);
1550 struct r5l_log *log = READ_ONCE(conf->log);
1552 if (!log)
1555 r5l_do_reclaim(log);
1558 void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1563 if (!log)
1566 target = READ_ONCE(log->reclaim_target);
1570 } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1571 md_wakeup_thread(log->reclaim_thread);
1574 void r5l_quiesce(struct r5l_log *log, int quiesce)
1576 struct mddev *mddev = log->rdev->mddev;
1578 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1584 r5l_wake_reclaim(log, MaxSector);
1585 r5l_do_reclaim(log);
1592 struct r5l_log *log = READ_ONCE(conf->log);
1595 if (!log)
1598 return test_bit(Faulty, &log->rdev->flags);
1614 * in recovery, log is read sequentially. It is not efficient to
1616 * reads multiple pages with one IO, so further log read can
1626 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1649 static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1664 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1671 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1673 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1683 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1698 static int r5l_recovery_read_page(struct r5l_log *log,
1707 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1722 static int r5l_recovery_read_meta_block(struct r5l_log *log,
1730 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1744 crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
1757 r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1772 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1781 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1783 mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum, mb, PAGE_SIZE));
1784 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1800 static void r5l_recovery_load_data(struct r5l_log *log,
1806 struct mddev *mddev = log->rdev->mddev;
1813 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1822 static void r5l_recovery_load_parity(struct r5l_log *log,
1828 struct mddev *mddev = log->rdev->mddev;
1832 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1839 log, ctx, sh->dev[sh->qd_idx].page,
1840 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1968 r5l_recovery_verify_data_checksum(struct r5l_log *log,
1976 r5l_recovery_read_page(log, ctx, page, log_offset);
1978 checksum = crc32c(log->uuid_checksum, addr, PAGE_SIZE);
1988 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1991 struct mddev *mddev = log->rdev->mddev;
1995 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2010 log, ctx, page, log_offset,
2015 log, ctx, page, log_offset,
2020 log, ctx, page,
2021 r5l_ring_add(log, log_offset,
2035 log_offset = r5l_ring_add(log, log_offset,
2061 r5c_recovery_analyze_meta_block(struct r5l_log *log,
2065 struct mddev *mddev = log->rdev->mddev;
2081 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2089 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2172 r5l_recovery_load_data(log, sh, ctx, payload,
2175 r5l_recovery_load_parity(log, sh, ctx, payload,
2180 log_offset = r5l_ring_add(log, log_offset,
2195 static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2211 * Scan through the log for all to-be-flushed data
2226 static int r5c_recovery_flush_log(struct r5l_log *log,
2232 /* scan through the log */
2234 if (r5l_recovery_read_meta_block(log, ctx))
2237 ret = r5c_recovery_analyze_meta_block(log, ctx,
2246 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2260 r5c_recovery_load_one_stripe(log, sh);
2269 * log will start here. but we can't let superblock point to last valid
2270 * meta block. The log might looks like:
2282 * Before recovery, the log looks like the following
2285 * | valid log | invalid log |
2288 * |- log->last_checkpoint
2289 * |- log->last_cp_seq
2291 * Now we scan through the log until we see invalid entry
2294 * | valid log | invalid log |
2297 * |- log->last_checkpoint |- ctx->pos
2298 * |- log->last_cp_seq |- ctx->seq
2304 * | valid log | invalid log |
2307 * |- log->last_checkpoint |- ctx->pos+1
2308 * |- log->last_cp_seq |- ctx->seq+10001
2315 * | valid log | data only stripes | invalid log |
2318 * |- log->last_checkpoint |- ctx->pos+n
2319 * |- log->last_cp_seq |- ctx->seq+10000+n
2322 * again from log->last_checkpoint.
2327 * | old log | data only stripes | invalid log |
2330 * |- log->last_checkpoint |- ctx->pos+n
2331 * |- log->last_cp_seq |- ctx->seq+10000+n
2334 * point on, the recovery will start from new log->last_checkpoint.
2337 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2341 struct mddev *mddev = log->rdev->mddev;
2361 r5l_recovery_create_empty_meta_block(log, page,
2365 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2381 crc32c(log->uuid_checksum, addr,
2384 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2386 write_pos = r5l_ring_add(log, write_pos,
2394 mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum,
2396 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2399 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2400 atomic_inc(&log->stripe_in_journal_count);
2405 log->next_checkpoint = next_checkpoint;
2410 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2413 struct mddev *mddev = log->rdev->mddev;
2425 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2438 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2443 static int r5l_recovery_log(struct r5l_log *log)
2445 struct mddev *mddev = log->rdev->mddev;
2454 ctx->pos = log->last_checkpoint;
2455 ctx->seq = log->last_cp_seq;
2464 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2469 ret = r5c_recovery_flush_log(log, ctx);
2486 log->next_checkpoint = ctx->pos;
2487 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2488 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2489 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2496 log->log_start = ctx->pos;
2497 log->seq = ctx->seq;
2498 log->last_checkpoint = pos;
2499 r5l_write_super(log, pos);
2501 r5c_recovery_flush_data_only_stripes(log, ctx);
2504 r5l_recovery_free_ra_pool(log, ctx);
2512 static void r5l_write_super(struct r5l_log *log, sector_t cp)
2514 struct mddev *mddev = log->rdev->mddev;
2516 log->rdev->journal_tail = cp;
2530 if (!conf || !conf->log)
2533 switch (conf->log->r5c_journal_mode) {
2570 if (!conf || !conf->log)
2577 conf->log->r5c_journal_mode = mode;
2627 struct r5l_log *log = READ_ONCE(conf->log);
2636 BUG_ON(!r5c_is_writeback(log));
2686 spin_lock(&log->tree_lock);
2687 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2691 pslot, &log->tree_lock) >>
2694 &log->big_stripe_tree, pslot,
2702 &log->big_stripe_tree, tree_index,
2705 spin_unlock(&log->tree_lock);
2710 spin_unlock(&log->tree_lock);
2794 struct r5l_log *log = READ_ONCE(conf->log);
2800 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2806 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2825 spin_lock_irq(&log->stripe_in_journal_lock);
2827 spin_unlock_irq(&log->stripe_in_journal_lock);
2830 atomic_dec(&log->stripe_in_journal_count);
2831 r5c_update_log_state(log);
2837 spin_lock(&log->tree_lock);
2838 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2842 pslot, &log->tree_lock) >>
2845 radix_tree_delete(&log->big_stripe_tree, tree_index);
2848 &log->big_stripe_tree, pslot,
2850 spin_unlock(&log->tree_lock);
2865 r5l_append_flush_payload(log, sh->sector);
2871 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2879 BUG_ON(!log);
2887 sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
2901 mutex_lock(&log->io_mutex);
2907 r5l_add_no_space_stripe(log, sh);
2908 else if (!r5l_has_free_space(log, reserve)) {
2909 if (sh->log_start == log->last_checkpoint)
2912 r5l_add_no_space_stripe(log, sh);
2914 ret = r5l_log_stripe(log, sh, pages, 0);
2916 spin_lock_irq(&log->io_list_lock);
2917 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2918 spin_unlock_irq(&log->io_list_lock);
2922 mutex_unlock(&log->io_mutex);
2929 struct r5l_log *log = READ_ONCE(conf->log);
2933 if (!log)
2937 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2941 static int r5l_load_log(struct r5l_log *log)
2943 struct md_rdev *rdev = log->rdev;
2946 sector_t cp = log->rdev->journal_tail;
2971 expected_crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
2982 log->last_cp_seq = get_random_u32();
2984 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2987 * data very soon. If super hasn't correct log tail address,
2988 * recovery can't find the log
2990 r5l_write_super(log, cp);
2992 log->last_cp_seq = le64_to_cpu(mb->seq);
2994 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
2995 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
2996 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
2997 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
2998 log->last_checkpoint = cp;
3003 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3004 log->seq = log->last_cp_seq + 1;
3005 log->next_checkpoint = cp;
3007 ret = r5l_recovery_log(log);
3009 r5c_update_log_state(log);
3016 int r5l_start(struct r5l_log *log)
3020 if (!log)
3023 ret = r5l_load_log(log);
3025 struct mddev *mddev = log->rdev->mddev;
3036 struct r5l_log *log = READ_ONCE(conf->log);
3038 if (!log)
3043 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3044 schedule_work(&log->disable_writeback_work);
3049 struct r5l_log *log;
3074 log = kzalloc(sizeof(*log), GFP_KERNEL);
3075 if (!log)
3077 log->rdev = rdev;
3078 log->need_cache_flush = bdev_write_cache(rdev->bdev);
3079 log->uuid_checksum = crc32c(~0, rdev->mddev->uuid,
3082 mutex_init(&log->io_mutex);
3084 spin_lock_init(&log->io_list_lock);
3085 INIT_LIST_HEAD(&log->running_ios);
3086 INIT_LIST_HEAD(&log->io_end_ios);
3087 INIT_LIST_HEAD(&log->flushing_ios);
3088 INIT_LIST_HEAD(&log->finished_ios);
3090 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3091 if (!log->io_kc)
3094 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3098 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3102 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3106 spin_lock_init(&log->tree_lock);
3107 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3109 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3115 rcu_assign_pointer(log->reclaim_thread, thread);
3117 init_waitqueue_head(&log->iounit_wait);
3119 INIT_LIST_HEAD(&log->no_mem_stripes);
3121 INIT_LIST_HEAD(&log->no_space_stripes);
3122 spin_lock_init(&log->no_space_stripes_lock);
3124 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3125 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3127 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3128 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3129 spin_lock_init(&log->stripe_in_journal_lock);
3130 atomic_set(&log->stripe_in_journal_count, 0);
3132 WRITE_ONCE(conf->log, log);
3138 mempool_exit(&log->meta_pool);
3140 bioset_exit(&log->bs);
3142 mempool_exit(&log->io_pool);
3144 kmem_cache_destroy(log->io_kc);
3146 kfree(log);
3152 struct r5l_log *log = conf->log;
3154 md_unregister_thread(conf->mddev, &log->reclaim_thread);
3157 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3160 WRITE_ONCE(conf->log, NULL);
3162 flush_work(&log->disable_writeback_work);
3164 mempool_exit(&log->meta_pool);
3165 bioset_exit(&log->bs);
3166 mempool_exit(&log->io_pool);
3167 kmem_cache_destroy(log->io_kc);
3168 kfree(log);