Lines Matching full:ic
99 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) argument
288 struct dm_integrity_c *ic; member
306 struct dm_integrity_c *ic; member
318 struct dm_integrity_c *ic; member
376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) in dm_integrity_io_error() argument
379 atomic64_inc(&ic->number_of_mismatches); in dm_integrity_io_error()
380 if (!cmpxchg(&ic->failed, 0, err)) in dm_integrity_io_error()
384 static int dm_integrity_failed(struct dm_integrity_c *ic) in dm_integrity_failed() argument
386 return READ_ONCE(ic->failed); in dm_integrity_failed()
389 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, in dm_integrity_commit_id() argument
396 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); in dm_integrity_commit_id()
399 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, in get_area_and_offset() argument
402 if (!ic->meta_dev) { in get_area_and_offset()
403 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; in get_area_and_offset()
412 #define sector_to_block(ic, n) \ argument
414 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
415 (n) >>= (ic)->sb->log2_sectors_per_block; \
418 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, in get_metadata_sector_and_offset() argument
424 ms = area << ic->sb->log2_interleave_sectors; in get_metadata_sector_and_offset()
425 if (likely(ic->log2_metadata_run >= 0)) in get_metadata_sector_and_offset()
426 ms += area << ic->log2_metadata_run; in get_metadata_sector_and_offset()
428 ms += area * ic->metadata_run; in get_metadata_sector_and_offset()
429 ms >>= ic->log2_buffer_sectors; in get_metadata_sector_and_offset()
431 sector_to_block(ic, offset); in get_metadata_sector_and_offset()
433 if (likely(ic->log2_tag_size >= 0)) { in get_metadata_sector_and_offset()
434 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); in get_metadata_sector_and_offset()
435 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
437 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); in get_metadata_sector_and_offset()
438 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
444 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) in get_data_sector() argument
448 if (ic->meta_dev) in get_data_sector()
451 result = area << ic->sb->log2_interleave_sectors; in get_data_sector()
452 if (likely(ic->log2_metadata_run >= 0)) in get_data_sector()
453 result += (area + 1) << ic->log2_metadata_run; in get_data_sector()
455 result += (area + 1) * ic->metadata_run; in get_data_sector()
457 result += (sector_t)ic->initial_sectors + offset; in get_data_sector()
458 result += ic->start; in get_data_sector()
463 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) in wraparound_section() argument
465 if (unlikely(*sec_ptr >= ic->journal_sections)) in wraparound_section()
466 *sec_ptr -= ic->journal_sections; in wraparound_section()
469 static void sb_set_version(struct dm_integrity_c *ic) in sb_set_version() argument
471 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) in sb_set_version()
472 ic->sb->version = SB_VERSION_4; in sb_set_version()
473 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) in sb_set_version()
474 ic->sb->version = SB_VERSION_3; in sb_set_version()
475 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in sb_set_version()
476 ic->sb->version = SB_VERSION_2; in sb_set_version()
478 ic->sb->version = SB_VERSION_1; in sb_set_version()
481 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) in sync_rw_sb() argument
489 io_req.mem.ptr.addr = ic->sb; in sync_rw_sb()
491 io_req.client = ic->io; in sync_rw_sb()
492 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in sync_rw_sb()
493 io_loc.sector = ic->start; in sync_rw_sb()
497 sb_set_version(ic); in sync_rw_sb()
507 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, in block_bitmap_op() argument
513 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { in block_bitmap_op()
517 ic->sb->log2_sectors_per_block, in block_bitmap_op()
518 ic->log2_blocks_per_bitmap_bit, in block_bitmap_op()
526 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in block_bitmap_op()
528 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in block_bitmap_op()
612 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *s… in block_bitmap_copy() argument
614 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); in block_bitmap_copy()
624 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t secto… in sector_to_bitmap_block() argument
626 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in sector_to_bitmap_block()
629 BUG_ON(bitmap_block >= ic->n_bitmap_blocks); in sector_to_bitmap_block()
630 return &ic->bbs[bitmap_block]; in sector_to_bitmap_block()
633 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, in access_journal_check() argument
637 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; in access_journal_check()
639 if (unlikely(section >= ic->journal_sections) || in access_journal_check()
642 function, section, offset, ic->journal_sections, limit); in access_journal_check()
648 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, in page_list_location() argument
653 access_journal_check(ic, section, offset, false, "page_list_location"); in page_list_location()
655 sector = section * ic->journal_section_sectors + offset; in page_list_location()
661 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, in access_page_list() argument
667 page_list_location(ic, section, offset, &pl_index, &pl_offset); in access_page_list()
677 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned … in access_journal() argument
679 return access_page_list(ic, ic->journal, section, offset, NULL); in access_journal()
682 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsi… in access_journal_entry() argument
687 access_journal_check(ic, section, n, true, "access_journal_entry"); in access_journal_entry()
692 js = access_journal(ic, section, rel_sector); in access_journal_entry()
693 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); in access_journal_entry()
696 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsi… in access_journal_data() argument
698 n <<= ic->sb->log2_sectors_per_block; in access_journal_data()
702 access_journal_check(ic, section, n, false, "access_journal_data"); in access_journal_data()
704 return access_journal(ic, section, n); in access_journal_data()
707 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) in section_mac() argument
709 SHASH_DESC_ON_STACK(desc, ic->journal_mac); in section_mac()
713 desc->tfm = ic->journal_mac; in section_mac()
717 dm_integrity_io_error(ic, "crypto_shash_init", r); in section_mac()
721 for (j = 0; j < ic->journal_section_entries; j++) { in section_mac()
722 struct journal_entry *je = access_journal_entry(ic, section, j); in section_mac()
725 dm_integrity_io_error(ic, "crypto_shash_update", r); in section_mac()
730 size = crypto_shash_digestsize(ic->journal_mac); in section_mac()
735 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
743 dm_integrity_io_error(ic, "digest_size", -EINVAL); in section_mac()
748 dm_integrity_io_error(ic, "crypto_shash_final", r); in section_mac()
759 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) in rw_section_mac() argument
764 if (!ic->journal_mac) in rw_section_mac()
767 section_mac(ic, section, result); in rw_section_mac()
770 struct journal_sector *js = access_journal(ic, section, j); in rw_section_mac()
776 dm_integrity_io_error(ic, "journal mac", -EILSEQ); in rw_section_mac()
789 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in xor_journal() argument
793 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; in xor_journal()
798 source_pl = ic->journal; in xor_journal()
799 target_pl = ic->journal_io; in xor_journal()
801 source_pl = ic->journal_io; in xor_journal()
802 target_pl = ic->journal; in xor_journal()
805 page_list_location(ic, section, 0, &pl_index, &pl_offset); in xor_journal()
821 rw_section_mac(ic, section, true); in xor_journal()
826 page_list_location(ic, section, 0, §ion_index, &dummy); in xor_journal()
832 src_pages[1] = ic->journal_xor[pl_index].page; in xor_journal()
851 complete(&comp->ic->crypto_backoff); in complete_journal_encrypt()
854 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); in complete_journal_encrypt()
873 wait_for_completion(&comp->ic->crypto_backoff); in do_crypt()
874 reinit_completion(&comp->ic->crypto_backoff); in do_crypt()
877 dm_integrity_io_error(comp->ic, "encrypt", r); in do_crypt()
881 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in crypt_journal() argument
890 source_sg = ic->journal_scatterlist; in crypt_journal()
891 target_sg = ic->journal_io_scatterlist; in crypt_journal()
893 source_sg = ic->journal_io_scatterlist; in crypt_journal()
894 target_sg = ic->journal_scatterlist; in crypt_journal()
903 rw_section_mac(ic, section, true); in crypt_journal()
905 req = ic->sk_requests[section]; in crypt_journal()
906 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in crypt_journal()
925 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, in encrypt_journal() argument
928 if (ic->journal_xor) in encrypt_journal()
929 return xor_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
931 return crypt_journal(ic, encrypt, section, n_sections, comp); in encrypt_journal()
938 dm_integrity_io_error(comp->ic, "writing journal", -EIO); in complete_journal_io()
942 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, in rw_journal_sectors() argument
950 if (unlikely(dm_integrity_failed(ic))) { in rw_journal_sectors()
962 if (ic->journal_io) in rw_journal_sectors()
963 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; in rw_journal_sectors()
965 io_req.mem.ptr.pl = &ic->journal[pl_index]; in rw_journal_sectors()
973 io_req.client = ic->io; in rw_journal_sectors()
974 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; in rw_journal_sectors()
975 io_loc.sector = ic->start + SB_SECTORS + sector; in rw_journal_sectors()
980 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); in rw_journal_sectors()
988 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, in rw_journal() argument
993 sector = section * ic->journal_section_sectors; in rw_journal()
994 n_sectors = n_sections * ic->journal_section_sectors; in rw_journal()
996 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp); in rw_journal()
999 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_section… in write_journal() argument
1006 io_comp.ic = ic; in write_journal()
1009 if (commit_start + commit_sections <= ic->journal_sections) { in write_journal()
1011 if (ic->journal_io) { in write_journal()
1012 crypt_comp_1.ic = ic; in write_journal()
1015 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); in write_journal()
1019 rw_section_mac(ic, commit_start + i, true); in write_journal()
1021 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, in write_journal()
1026 to_end = ic->journal_sections - commit_start; in write_journal()
1027 if (ic->journal_io) { in write_journal()
1028 crypt_comp_1.ic = ic; in write_journal()
1031 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); in write_journal()
1033 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
1036 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); in write_journal()
1039 crypt_comp_2.ic = ic; in write_journal()
1042 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); in write_journal()
1044 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
1049 rw_section_mac(ic, commit_start + i, true); in write_journal()
1050 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); in write_journal()
1052 rw_section_mac(ic, i, true); in write_journal()
1054 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); in write_journal()
1060 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, in copy_from_journal() argument
1068 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); in copy_from_journal()
1070 if (unlikely(dm_integrity_failed(ic))) { in copy_from_journal()
1075 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; in copy_from_journal()
1083 io_req.mem.ptr.pl = &ic->journal[pl_index]; in copy_from_journal()
1087 io_req.client = ic->io; in copy_from_journal()
1088 io_loc.bdev = ic->dev->bdev; in copy_from_journal()
1105 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool che… in add_new_range() argument
1107 struct rb_node **n = &ic->in_progress.rb_node; in add_new_range()
1110 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); in add_new_range()
1114 list_for_each_entry(range, &ic->wait_list, wait_entry) { in add_new_range()
1136 rb_insert_color(&new_range->node, &ic->in_progress); in add_new_range()
1141 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range_unlocked() argument
1143 rb_erase(&range->node, &ic->in_progress); in remove_range_unlocked()
1144 while (unlikely(!list_empty(&ic->wait_list))) { in remove_range_unlocked()
1146 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); in remove_range_unlocked()
1150 if (!add_new_range(ic, last_range, false)) { in remove_range_unlocked()
1152 list_add(&last_range->wait_entry, &ic->wait_list); in remove_range_unlocked()
1160 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) in remove_range() argument
1164 spin_lock_irqsave(&ic->endio_wait.lock, flags); in remove_range()
1165 remove_range_unlocked(ic, range); in remove_range()
1166 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in remove_range()
1169 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) in wait_and_add_new_range() argument
1172 list_add_tail(&new_range->wait_entry, &ic->wait_list); in wait_and_add_new_range()
1176 spin_unlock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
1178 spin_lock_irq(&ic->endio_wait.lock); in wait_and_add_new_range()
1182 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) in add_new_range_and_wait() argument
1184 if (unlikely(!add_new_range(ic, new_range, true))) in add_new_range_and_wait()
1185 wait_and_add_new_range(ic, new_range); in add_new_range_and_wait()
1194 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) in add_journal_node() argument
1202 link = &ic->journal_tree_root.rb_node; in add_journal_node()
1216 rb_insert_color(&node->node, &ic->journal_tree_root); in add_journal_node()
1219 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) in remove_journal_node() argument
1222 rb_erase(&node->node, &ic->journal_tree_root); in remove_journal_node()
1228 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) in find_journal_node() argument
1230 struct rb_node *n = ic->journal_tree_root.rb_node; in find_journal_node()
1236 found = j - ic->journal_tree; in find_journal_node()
1249 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) in test_journal_node() argument
1254 if (unlikely(pos >= ic->journal_entries)) in test_journal_node()
1256 node = &ic->journal_tree[pos]; in test_journal_node()
1270 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) in find_newer_committed_node() argument
1287 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; in find_newer_committed_node()
1288 if (next_section >= ic->committed_section && in find_newer_committed_node()
1289 next_section < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1291 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) in find_newer_committed_node()
1301 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_bl… in dm_integrity_rw_tag() argument
1307 unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); in dm_integrity_rw_tag()
1315 r = dm_integrity_failed(ic); in dm_integrity_rw_tag()
1319 data = dm_bufio_read(ic->bufio, *metadata_block, &b); in dm_integrity_rw_tag()
1323 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); in dm_integrity_rw_tag()
1333 if (likely(is_power_of_2(ic->tag_size))) { in dm_integrity_rw_tag()
1335 if (unlikely(!ic->discard) || in dm_integrity_rw_tag()
1350 if (unlikely(hash_offset == ic->tag_size)) { in dm_integrity_rw_tag()
1356 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); in dm_integrity_rw_tag()
1365 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { in dm_integrity_rw_tag()
1370 if (unlikely(!is_power_of_2(ic->tag_size))) { in dm_integrity_rw_tag()
1371 hash_offset = (hash_offset + to_copy) % ic->tag_size; in dm_integrity_rw_tag()
1382 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) in dm_integrity_flush_buffers() argument
1385 r = dm_bufio_write_dirty_buffers(ic->bufio); in dm_integrity_flush_buffers()
1387 dm_integrity_io_error(ic, "writing tags", r); in dm_integrity_flush_buffers()
1390 static void sleep_on_endio_wait(struct dm_integrity_c *ic) in sleep_on_endio_wait() argument
1393 __add_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1395 spin_unlock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1397 spin_lock_irq(&ic->endio_wait.lock); in sleep_on_endio_wait()
1398 __remove_wait_queue(&ic->endio_wait, &wait); in sleep_on_endio_wait()
1403 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); in autocommit_fn() local
1405 if (likely(!dm_integrity_failed(ic))) in autocommit_fn()
1406 queue_work(ic->commit_wq, &ic->commit_work); in autocommit_fn()
1409 static void schedule_autocommit(struct dm_integrity_c *ic) in schedule_autocommit() argument
1411 if (!timer_pending(&ic->autocommit_timer)) in schedule_autocommit()
1412 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); in schedule_autocommit()
1415 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in submit_flush_bio() argument
1420 spin_lock_irqsave(&ic->endio_wait.lock, flags); in submit_flush_bio()
1422 bio_list_add(&ic->flush_bio_list, bio); in submit_flush_bio()
1423 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in submit_flush_bio()
1425 queue_work(ic->commit_wq, &ic->commit_work); in submit_flush_bio()
1428 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) in do_endio() argument
1430 int r = dm_integrity_failed(ic); in do_endio()
1433 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { in do_endio()
1435 spin_lock_irqsave(&ic->endio_wait.lock, flags); in do_endio()
1436 bio_list_add(&ic->synchronous_bios, bio); in do_endio()
1437 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in do_endio()
1438 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); in do_endio()
1444 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in do_endio_flush() argument
1448 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) in do_endio_flush()
1449 submit_flush_bio(ic, dio); in do_endio_flush()
1451 do_endio(ic, bio); in do_endio_flush()
1457 struct dm_integrity_c *ic = dio->ic; in dec_in_flight() local
1460 remove_range(ic, &dio->range); in dec_in_flight()
1463 schedule_autocommit(ic); in dec_in_flight()
1473 queue_work(ic->offload_wq, &dio->work); in dec_in_flight()
1476 do_endio_flush(ic, dio); in dec_in_flight()
1494 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, in integrity_sector_checksum() argument
1498 SHASH_DESC_ON_STACK(req, ic->internal_hash); in integrity_sector_checksum()
1502 req->tfm = ic->internal_hash; in integrity_sector_checksum()
1506 dm_integrity_io_error(ic, "crypto_shash_init", r); in integrity_sector_checksum()
1512 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1516 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); in integrity_sector_checksum()
1518 dm_integrity_io_error(ic, "crypto_shash_update", r); in integrity_sector_checksum()
1524 dm_integrity_io_error(ic, "crypto_shash_final", r); in integrity_sector_checksum()
1528 digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_sector_checksum()
1529 if (unlikely(digest_size < ic->tag_size)) in integrity_sector_checksum()
1530 memset(result + digest_size, 0, ic->tag_size - digest_size); in integrity_sector_checksum()
1536 get_random_bytes(result, ic->tag_size); in integrity_sector_checksum()
1542 struct dm_integrity_c *ic = dio->ic; in integrity_metadata() local
1546 if (ic->internal_hash) { in integrity_metadata()
1549 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); in integrity_metadata()
1552 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; in integrity_metadata()
1557 if (unlikely(ic->mode == 'R')) in integrity_metadata()
1561 …checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size +… in integrity_metadata()
1578 unsigned max_blocks = max_size / ic->tag_size; in integrity_metadata()
1582 unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); in integrity_metadata()
1584 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1585 this_step_blocks * ic->tag_size, TAG_WRITE); in integrity_metadata()
1592 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) { in integrity_metadata()
1597 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); in integrity_metadata()
1598 bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block; in integrity_metadata()
1618 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); in integrity_metadata()
1619 checksums_ptr += ic->tag_size; in integrity_metadata()
1620 sectors_to_process -= ic->sectors_per_block; in integrity_metadata()
1621 pos += ic->sectors_per_block << SECTOR_SHIFT; in integrity_metadata()
1622 sector += ic->sectors_per_block; in integrity_metadata()
1626 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1632 (sector - ((r + ic->tag_size - 1) / ic->tag_size))); in integrity_metadata()
1634 atomic64_inc(&ic->number_of_mismatches); in integrity_metadata()
1660 sector_to_block(ic, data_to_process); in integrity_metadata()
1661 data_to_process *= ic->tag_size; in integrity_metadata()
1670 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1690 struct dm_integrity_c *ic = ti->private; in dm_integrity_map() local
1696 dio->ic = ic; in dm_integrity_map()
1714 submit_flush_bio(ic, dio); in dm_integrity_map()
1727 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map()
1730 ic->provided_data_sectors); in dm_integrity_map()
1733 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map()
1735 ic->sectors_per_block, in dm_integrity_map()
1740 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) { in dm_integrity_map()
1744 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { in dm_integrity_map()
1746 bv.bv_offset, bv.bv_len, ic->sectors_per_block); in dm_integrity_map()
1753 if (!ic->internal_hash) { in dm_integrity_map()
1755 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map()
1756 if (ic->log2_tag_size >= 0) in dm_integrity_map()
1757 wanted_tag_size <<= ic->log2_tag_size; in dm_integrity_map()
1759 wanted_tag_size *= ic->tag_size; in dm_integrity_map()
1773 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) in dm_integrity_map()
1776 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in dm_integrity_map()
1777 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in dm_integrity_map()
1778 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); in dm_integrity_map()
1787 struct dm_integrity_c *ic = dio->ic; in __journal_read_write() local
1807 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); in __journal_read_write()
1818 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in __journal_read_write()
1823 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
1831 } while (++s < ic->sectors_per_block); in __journal_read_write()
1833 if (ic->internal_hash) { in __journal_read_write()
1836 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); in __journal_read_write()
1837 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { in __journal_read_write()
1845 if (!ic->internal_hash) { in __journal_read_write()
1847 unsigned tag_todo = ic->tag_size; in __journal_read_write()
1848 char *tag_ptr = journal_entry_tag(ic, je); in __journal_read_write()
1873 js = access_journal_data(ic, journal_section, journal_entry); in __journal_read_write()
1874 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
1879 } while (++s < ic->sectors_per_block); in __journal_read_write()
1881 if (ic->internal_hash) { in __journal_read_write()
1882 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); in __journal_read_write()
1883 if (unlikely(digest_size > ic->tag_size)) { in __journal_read_write()
1885 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); in __journal_read_write()
1886 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); in __journal_read_write()
1888 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); in __journal_read_write()
1893 logical_sector += ic->sectors_per_block; in __journal_read_write()
1896 if (unlikely(journal_entry == ic->journal_section_entries)) { in __journal_read_write()
1899 wraparound_section(ic, &journal_section); in __journal_read_write()
1902 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; in __journal_read_write()
1903 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
1912 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) in __journal_read_write()
1913 wake_up(&ic->copy_to_journal_wait); in __journal_read_write()
1914 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { in __journal_read_write()
1915 queue_work(ic->commit_wq, &ic->commit_work); in __journal_read_write()
1917 schedule_autocommit(ic); in __journal_read_write()
1920 remove_range(ic, &dio->range); in __journal_read_write()
1927 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in __journal_read_write()
1928 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in __journal_read_write()
1937 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_continue() local
1943 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; in dm_integrity_map_continue()
1944 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') in dm_integrity_map_continue()
1949 queue_work(ic->offload_wq, &dio->work); in dm_integrity_map_continue()
1954 spin_lock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1956 if (unlikely(dm_integrity_failed(ic))) { in dm_integrity_map_continue()
1957 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
1958 do_endio(ic, bio); in dm_integrity_map_continue()
1963 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { in dm_integrity_map_continue()
1969 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); in dm_integrity_map_continue()
1973 sleep_on_endio_wait(ic); in dm_integrity_map_continue()
1976 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; in dm_integrity_map_continue()
1977 ic->free_sectors -= range_sectors; in dm_integrity_map_continue()
1978 journal_section = ic->free_section; in dm_integrity_map_continue()
1979 journal_entry = ic->free_section_entry; in dm_integrity_map_continue()
1981 next_entry = ic->free_section_entry + range_sectors; in dm_integrity_map_continue()
1982 ic->free_section_entry = next_entry % ic->journal_section_entries; in dm_integrity_map_continue()
1983 ic->free_section += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
1984 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; in dm_integrity_map_continue()
1985 wraparound_section(ic, &ic->free_section); in dm_integrity_map_continue()
1987 pos = journal_section * ic->journal_section_entries + journal_entry; in dm_integrity_map_continue()
1994 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); in dm_integrity_map_continue()
1996 if (unlikely(pos >= ic->journal_entries)) in dm_integrity_map_continue()
1999 je = access_journal_entry(ic, ws, we); in dm_integrity_map_continue()
2003 if (unlikely(we == ic->journal_section_entries)) { in dm_integrity_map_continue()
2006 wraparound_section(ic, &ws); in dm_integrity_map_continue()
2008 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); in dm_integrity_map_continue()
2010 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2014 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2021 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { in dm_integrity_map_continue()
2022 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) in dm_integrity_map_continue()
2029 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_continue()
2037 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2039 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_continue()
2043 dio->range.n_sectors = ic->sectors_per_block; in dm_integrity_map_continue()
2044 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_continue()
2052 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2054 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2059 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { in dm_integrity_map_continue()
2061 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
2064 remove_range_unlocked(ic, &dio->range); in dm_integrity_map_continue()
2065 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2066 queue_work(ic->commit_wq, &ic->commit_work); in dm_integrity_map_continue()
2067 flush_workqueue(ic->commit_wq); in dm_integrity_map_continue()
2068 queue_work(ic->writer_wq, &ic->writer_work); in dm_integrity_map_continue()
2069 flush_workqueue(ic->writer_wq); in dm_integrity_map_continue()
2074 spin_unlock_irq(&ic->endio_wait.lock); in dm_integrity_map_continue()
2077 journal_section = journal_read_pos / ic->journal_section_entries; in dm_integrity_map_continue()
2078 journal_entry = journal_read_pos % ic->journal_section_entries; in dm_integrity_map_continue()
2082 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { in dm_integrity_map_continue()
2083 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2087 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); in dm_integrity_map_continue()
2091 queue_work(ic->writer_wq, &bbs->work); in dm_integrity_map_continue()
2105 bio_set_dev(bio, ic->dev->bdev); in dm_integrity_map_continue()
2111 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { in dm_integrity_map_continue()
2113 dm_integrity_flush_buffers(ic); in dm_integrity_map_continue()
2127 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_map_continue()
2128 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) in dm_integrity_map_continue()
2130 if (ic->mode == 'B') { in dm_integrity_map_continue()
2131 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, in dm_integrity_map_continue()
2144 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
2153 do_endio_flush(ic, dio); in dm_integrity_map_continue()
2164 static void pad_uncommitted(struct dm_integrity_c *ic) in pad_uncommitted() argument
2166 if (ic->free_section_entry) { in pad_uncommitted()
2167 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; in pad_uncommitted()
2168 ic->free_section_entry = 0; in pad_uncommitted()
2169 ic->free_section++; in pad_uncommitted()
2170 wraparound_section(ic, &ic->free_section); in pad_uncommitted()
2171 ic->n_uncommitted_sections++; in pad_uncommitted()
2173 if (WARN_ON(ic->journal_sections * ic->journal_section_entries != in pad_uncommitted()
2174 (ic->n_uncommitted_sections + ic->n_committed_sections) * in pad_uncommitted()
2175 ic->journal_section_entries + ic->free_sectors)) { in pad_uncommitted()
2179 ic->journal_sections, ic->journal_section_entries, in pad_uncommitted()
2180 ic->n_uncommitted_sections, ic->n_committed_sections, in pad_uncommitted()
2181 ic->journal_section_entries, ic->free_sectors); in pad_uncommitted()
2187 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); in integrity_commit() local
2192 del_timer(&ic->autocommit_timer); in integrity_commit()
2194 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
2195 flushes = bio_list_get(&ic->flush_bio_list); in integrity_commit()
2196 if (unlikely(ic->mode != 'J')) { in integrity_commit()
2197 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2198 dm_integrity_flush_buffers(ic); in integrity_commit()
2202 pad_uncommitted(ic); in integrity_commit()
2203 commit_start = ic->uncommitted_section; in integrity_commit()
2204 commit_sections = ic->n_uncommitted_sections; in integrity_commit()
2205 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2212 for (j = 0; j < ic->journal_section_entries; j++) { in integrity_commit()
2214 je = access_journal_entry(ic, i, j); in integrity_commit()
2215 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); in integrity_commit()
2217 for (j = 0; j < ic->journal_section_sectors; j++) { in integrity_commit()
2219 js = access_journal(ic, i, j); in integrity_commit()
2220 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); in integrity_commit()
2223 if (unlikely(i >= ic->journal_sections)) in integrity_commit()
2224 ic->commit_seq = next_commit_seq(ic->commit_seq); in integrity_commit()
2225 wraparound_section(ic, &i); in integrity_commit()
2229 write_journal(ic, commit_start, commit_sections); in integrity_commit()
2231 spin_lock_irq(&ic->endio_wait.lock); in integrity_commit()
2232 ic->uncommitted_section += commit_sections; in integrity_commit()
2233 wraparound_section(ic, &ic->uncommitted_section); in integrity_commit()
2234 ic->n_uncommitted_sections -= commit_sections; in integrity_commit()
2235 ic->n_committed_sections += commit_sections; in integrity_commit()
2236 spin_unlock_irq(&ic->endio_wait.lock); in integrity_commit()
2238 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) in integrity_commit()
2239 queue_work(ic->writer_wq, &ic->writer_work); in integrity_commit()
2245 do_endio(ic, flushes); in integrity_commit()
2254 struct dm_integrity_c *ic = comp->ic; in complete_copy_from_journal() local
2255 remove_range(ic, &io->range); in complete_copy_from_journal()
2256 mempool_free(io, &ic->journal_io_mempool); in complete_copy_from_journal()
2258 dm_integrity_io_error(ic, "copying from journal", -EIO); in complete_copy_from_journal()
2262 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, in restore_last_bytes() argument
2269 } while (++s < ic->sectors_per_block); in restore_last_bytes()
2272 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, in do_journal_write() argument
2281 comp.ic = ic; in do_journal_write()
2286 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { in do_journal_write()
2290 rw_section_mac(ic, i, false); in do_journal_write()
2291 for (j = 0; j < ic->journal_section_entries; j++) { in do_journal_write()
2292 struct journal_entry *je = access_journal_entry(ic, i, j); in do_journal_write()
2304 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { in do_journal_write()
2305 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); in do_journal_write()
2306 sec &= ~(sector_t)(ic->sectors_per_block - 1); in do_journal_write()
2309 if (unlikely(sec >= ic->provided_data_sectors)) in do_journal_write()
2311 get_area_and_offset(ic, sec, &area, &offset); in do_journal_write()
2312 restore_last_bytes(ic, access_journal_data(ic, i, j), je); in do_journal_write()
2313 for (k = j + 1; k < ic->journal_section_entries; k++) { in do_journal_write()
2314 struct journal_entry *je2 = access_journal_entry(ic, i, k); in do_journal_write()
2320 if (unlikely(sec2 >= ic->provided_data_sectors)) in do_journal_write()
2322 get_area_and_offset(ic, sec2, &area2, &offset2); in do_journal_write()
2323 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) in do_journal_write()
2325 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); in do_journal_write()
2329 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); in do_journal_write()
2332 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; in do_journal_write()
2334 spin_lock_irq(&ic->endio_wait.lock); in do_journal_write()
2335 add_new_range_and_wait(ic, &io->range); in do_journal_write()
2338 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; in do_journal_write()
2341 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { in do_journal_write()
2342 struct journal_entry *je2 = access_journal_entry(ic, i, j); in do_journal_write()
2345 remove_journal_node(ic, §ion_node[j]); in do_journal_write()
2347 sec += ic->sectors_per_block; in do_journal_write()
2348 offset += ic->sectors_per_block; in do_journal_write()
2350 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { in do_journal_write()
2351 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); in do_journal_write()
2354 remove_journal_node(ic, §ion_node[k - 1]); in do_journal_write()
2358 remove_range_unlocked(ic, &io->range); in do_journal_write()
2359 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2360 mempool_free(io, &ic->journal_io_mempool); in do_journal_write()
2364 remove_journal_node(ic, §ion_node[l]); in do_journal_write()
2367 spin_unlock_irq(&ic->endio_wait.lock); in do_journal_write()
2369 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in do_journal_write()
2372 struct journal_entry *je2 = access_journal_entry(ic, i, l); in do_journal_write()
2378 ic->internal_hash) { in do_journal_write()
2381 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), in do_journal_write()
2382 (char *)access_journal_data(ic, i, l), test_tag); in do_journal_write()
2383 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) in do_journal_write()
2384 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); in do_journal_write()
2388 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, in do_journal_write()
2389 ic->tag_size, TAG_WRITE); in do_journal_write()
2391 dm_integrity_io_error(ic, "reading tags", r); in do_journal_write()
2396 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, in do_journal_write()
2397 (k - j) << ic->sb->log2_sectors_per_block, in do_journal_write()
2398 get_data_sector(ic, area, offset), in do_journal_write()
2405 dm_bufio_write_dirty_buffers_async(ic->bufio); in do_journal_write()
2412 dm_integrity_flush_buffers(ic); in do_journal_write()
2417 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); in integrity_writer() local
2423 if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) in integrity_writer()
2426 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2427 write_start = ic->committed_section; in integrity_writer()
2428 write_sections = ic->n_committed_sections; in integrity_writer()
2429 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2434 do_journal_write(ic, write_start, write_sections, false); in integrity_writer()
2436 spin_lock_irq(&ic->endio_wait.lock); in integrity_writer()
2438 ic->committed_section += write_sections; in integrity_writer()
2439 wraparound_section(ic, &ic->committed_section); in integrity_writer()
2440 ic->n_committed_sections -= write_sections; in integrity_writer()
2442 prev_free_sectors = ic->free_sectors; in integrity_writer()
2443 ic->free_sectors += write_sections * ic->journal_section_entries; in integrity_writer()
2445 wake_up_locked(&ic->endio_wait); in integrity_writer()
2447 spin_unlock_irq(&ic->endio_wait.lock); in integrity_writer()
2450 static void recalc_write_super(struct dm_integrity_c *ic) in recalc_write_super() argument
2454 dm_integrity_flush_buffers(ic); in recalc_write_super()
2455 if (dm_integrity_failed(ic)) in recalc_write_super()
2458 r = sync_rw_sb(ic, REQ_OP_WRITE, 0); in recalc_write_super()
2460 dm_integrity_io_error(ic, "writing superblock", r); in recalc_write_super()
2465 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); in integrity_recalc() local
2478 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); in integrity_recalc()
2480 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
2484 if (unlikely(dm_post_suspending(ic->ti))) in integrity_recalc()
2487 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); in integrity_recalc()
2488 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { in integrity_recalc()
2489 if (ic->mode == 'B') { in integrity_recalc()
2490 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in integrity_recalc()
2492 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in integrity_recalc()
2497 get_area_and_offset(ic, range.logical_sector, &area, &offset); in integrity_recalc()
2498 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); in integrity_recalc()
2499 if (!ic->meta_dev) in integrity_recalc()
2500 …range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsign… in integrity_recalc()
2502 add_new_range_and_wait(ic, &range); in integrity_recalc()
2503 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
2507 if (ic->mode == 'B') { in integrity_recalc()
2508 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) { in integrity_recalc()
2511 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, in integrity_recalc()
2512 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { in integrity_recalc()
2513 logical_sector += ic->sectors_per_block; in integrity_recalc()
2514 n_sectors -= ic->sectors_per_block; in integrity_recalc()
2517 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, in integrity_recalc()
2518 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { in integrity_recalc()
2519 n_sectors -= ic->sectors_per_block; in integrity_recalc()
2522 get_area_and_offset(ic, logical_sector, &area, &offset); in integrity_recalc()
2528 recalc_write_super(ic); in integrity_recalc()
2529 if (ic->mode == 'B') { in integrity_recalc()
2530 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); in integrity_recalc()
2535 if (unlikely(dm_integrity_failed(ic))) in integrity_recalc()
2541 io_req.mem.ptr.addr = ic->recalc_buffer; in integrity_recalc()
2543 io_req.client = ic->io; in integrity_recalc()
2544 io_loc.bdev = ic->dev->bdev; in integrity_recalc()
2545 io_loc.sector = get_data_sector(ic, area, offset); in integrity_recalc()
2550 dm_integrity_io_error(ic, "reading data", r); in integrity_recalc()
2554 t = ic->recalc_tags; in integrity_recalc()
2555 for (i = 0; i < n_sectors; i += ic->sectors_per_block) { in integrity_recalc()
2556 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); in integrity_recalc()
2557 t += ic->tag_size; in integrity_recalc()
2560 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); in integrity_recalc()
2562 …r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tag… in integrity_recalc()
2564 dm_integrity_io_error(ic, "writing tags", r); in integrity_recalc()
2568 if (ic->mode == 'B') { in integrity_recalc()
2571 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << in integrity_recalc()
2572 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in integrity_recalc()
2574 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << in integrity_recalc()
2575 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in integrity_recalc()
2576 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); in integrity_recalc()
2582 spin_lock_irq(&ic->endio_wait.lock); in integrity_recalc()
2583 remove_range_unlocked(ic, &range); in integrity_recalc()
2584 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); in integrity_recalc()
2588 remove_range(ic, &range); in integrity_recalc()
2592 spin_unlock_irq(&ic->endio_wait.lock); in integrity_recalc()
2594 recalc_write_super(ic); in integrity_recalc()
2600 struct dm_integrity_c *ic = bbs->ic; in bitmap_block_work() local
2617 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
2619 remove_range(ic, &dio->range); in bitmap_block_work()
2621 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()
2623 block_bitmap_op(ic, ic->journal, dio->range.logical_sector, in bitmap_block_work()
2632 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, in bitmap_block_work()
2639 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, in bitmap_block_work()
2642 remove_range(ic, &dio->range); in bitmap_block_work()
2644 queue_work(ic->offload_wq, &dio->work); in bitmap_block_work()
2647 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); in bitmap_block_work()
2652 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); in bitmap_flush_work() local
2657 dm_integrity_flush_buffers(ic); in bitmap_flush_work()
2660 range.n_sectors = ic->provided_data_sectors; in bitmap_flush_work()
2662 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2663 add_new_range_and_wait(ic, &range); in bitmap_flush_work()
2664 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2666 dm_integrity_flush_buffers(ic); in bitmap_flush_work()
2667 if (ic->meta_dev) in bitmap_flush_work()
2668 blkdev_issue_flush(ic->dev->bdev, GFP_NOIO); in bitmap_flush_work()
2670 limit = ic->provided_data_sectors; in bitmap_flush_work()
2671 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in bitmap_flush_work()
2672 limit = le64_to_cpu(ic->sb->recalc_sector) in bitmap_flush_work()
2673 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) in bitmap_flush_work()
2674 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); in bitmap_flush_work()
2677 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); in bitmap_flush_work()
2678 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); in bitmap_flush_work()
2680 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, in bitmap_flush_work()
2681 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in bitmap_flush_work()
2683 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2684 remove_range_unlocked(ic, &range); in bitmap_flush_work()
2685 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { in bitmap_flush_work()
2687 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2688 spin_lock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2690 spin_unlock_irq(&ic->endio_wait.lock); in bitmap_flush_work()
2694 static void init_journal(struct dm_integrity_c *ic, unsigned start_section, in init_journal() argument
2704 wraparound_section(ic, &i); in init_journal()
2705 for (j = 0; j < ic->journal_section_sectors; j++) { in init_journal()
2706 struct journal_sector *js = access_journal(ic, i, j); in init_journal()
2708 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); in init_journal()
2710 for (j = 0; j < ic->journal_section_entries; j++) { in init_journal()
2711 struct journal_entry *je = access_journal_entry(ic, i, j); in init_journal()
2716 write_journal(ic, start_section, n_sections); in init_journal()
2719 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id) in find_commit_seq() argument
2723 if (dm_integrity_commit_id(ic, i, j, k) == id) in find_commit_seq()
2726 dm_integrity_io_error(ic, "journal commit id", -EIO); in find_commit_seq()
2730 static void replay_journal(struct dm_integrity_c *ic) in replay_journal() argument
2740 if (ic->mode == 'R') in replay_journal()
2743 if (ic->journal_uptodate) in replay_journal()
2749 if (!ic->just_formatted) { in replay_journal()
2751 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); in replay_journal()
2752 if (ic->journal_io) in replay_journal()
2753 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); in replay_journal()
2754 if (ic->journal_io) { in replay_journal()
2756 crypt_comp.ic = ic; in replay_journal()
2759 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); in replay_journal()
2762 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); in replay_journal()
2765 if (dm_integrity_failed(ic)) in replay_journal()
2771 for (i = 0; i < ic->journal_sections; i++) { in replay_journal()
2772 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
2774 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
2775 k = find_commit_seq(ic, i, j, js->commit_id); in replay_journal()
2782 for (j = 0; j < ic->journal_section_entries; j++) { in replay_journal()
2783 struct journal_entry *je = access_journal_entry(ic, i, j); in replay_journal()
2801 dm_integrity_io_error(ic, "journal commit ids", -EIO); in replay_journal()
2816 if (unlikely(write_start >= ic->journal_sections)) in replay_journal()
2818 wraparound_section(ic, &write_start); in replay_journal()
2821 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { in replay_journal()
2822 for (j = 0; j < ic->journal_section_sectors; j++) { in replay_journal()
2823 struct journal_sector *js = access_journal(ic, i, j); in replay_journal()
2825 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { in replay_journal()
2832 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); in replay_journal()
2837 if (unlikely(i >= ic->journal_sections)) in replay_journal()
2839 wraparound_section(ic, &i); in replay_journal()
2846 do_journal_write(ic, write_start, write_sections, true); in replay_journal()
2849 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { in replay_journal()
2851 ic->commit_seq = want_commit_seq; in replay_journal()
2852 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); in replay_journal()
2861 init_journal(ic, s, 1, erase_seq); in replay_journal()
2863 wraparound_section(ic, &s); in replay_journal()
2864 if (ic->journal_sections >= 2) { in replay_journal()
2865 init_journal(ic, s, ic->journal_sections - 2, erase_seq); in replay_journal()
2866 s += ic->journal_sections - 2; in replay_journal()
2867 wraparound_section(ic, &s); in replay_journal()
2868 init_journal(ic, s, 1, erase_seq); in replay_journal()
2872 ic->commit_seq = next_commit_seq(erase_seq); in replay_journal()
2875 ic->committed_section = continue_section; in replay_journal()
2876 ic->n_committed_sections = 0; in replay_journal()
2878 ic->uncommitted_section = continue_section; in replay_journal()
2879 ic->n_uncommitted_sections = 0; in replay_journal()
2881 ic->free_section = continue_section; in replay_journal()
2882 ic->free_section_entry = 0; in replay_journal()
2883 ic->free_sectors = ic->journal_entries; in replay_journal()
2885 ic->journal_tree_root = RB_ROOT; in replay_journal()
2886 for (i = 0; i < ic->journal_entries; i++) in replay_journal()
2887 init_journal_node(&ic->journal_tree[i]); in replay_journal()
2890 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) in dm_integrity_enter_synchronous_mode() argument
2894 if (ic->mode == 'B') { in dm_integrity_enter_synchronous_mode()
2895 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1; in dm_integrity_enter_synchronous_mode()
2896 ic->synchronous_mode = 1; in dm_integrity_enter_synchronous_mode()
2898 cancel_delayed_work_sync(&ic->bitmap_flush_work); in dm_integrity_enter_synchronous_mode()
2899 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); in dm_integrity_enter_synchronous_mode()
2900 flush_workqueue(ic->commit_wq); in dm_integrity_enter_synchronous_mode()
2906 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); in dm_integrity_reboot() local
2910 dm_integrity_enter_synchronous_mode(ic); in dm_integrity_reboot()
2917 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_postsuspend() local
2920 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); in dm_integrity_postsuspend()
2922 del_timer_sync(&ic->autocommit_timer); in dm_integrity_postsuspend()
2924 if (ic->recalc_wq) in dm_integrity_postsuspend()
2925 drain_workqueue(ic->recalc_wq); in dm_integrity_postsuspend()
2927 if (ic->mode == 'B') in dm_integrity_postsuspend()
2928 cancel_delayed_work_sync(&ic->bitmap_flush_work); in dm_integrity_postsuspend()
2930 queue_work(ic->commit_wq, &ic->commit_work); in dm_integrity_postsuspend()
2931 drain_workqueue(ic->commit_wq); in dm_integrity_postsuspend()
2933 if (ic->mode == 'J') { in dm_integrity_postsuspend()
2934 if (ic->meta_dev) in dm_integrity_postsuspend()
2935 queue_work(ic->writer_wq, &ic->writer_work); in dm_integrity_postsuspend()
2936 drain_workqueue(ic->writer_wq); in dm_integrity_postsuspend()
2937 dm_integrity_flush_buffers(ic); in dm_integrity_postsuspend()
2940 if (ic->mode == 'B') { in dm_integrity_postsuspend()
2941 dm_integrity_flush_buffers(ic); in dm_integrity_postsuspend()
2944 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_postsuspend()
2945 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_postsuspend()
2946 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_postsuspend()
2948 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_postsuspend()
2952 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_postsuspend()
2954 ic->journal_uptodate = true; in dm_integrity_postsuspend()
2959 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_resume() local
2960 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); in dm_integrity_resume()
2965 if (ic->provided_data_sectors != old_provided_data_sectors) { in dm_integrity_resume()
2966 if (ic->provided_data_sectors > old_provided_data_sectors && in dm_integrity_resume()
2967 ic->mode == 'B' && in dm_integrity_resume()
2968 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { in dm_integrity_resume()
2969 rw_journal_sectors(ic, REQ_OP_READ, 0, 0, in dm_integrity_resume()
2970 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2971 block_bitmap_op(ic, ic->journal, old_provided_data_sectors, in dm_integrity_resume()
2972 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
2973 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
2974 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2977 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); in dm_integrity_resume()
2978 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_resume()
2980 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
2983 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { in dm_integrity_resume()
2985 rw_journal_sectors(ic, REQ_OP_READ, 0, 0, in dm_integrity_resume()
2986 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2987 if (ic->mode == 'B') { in dm_integrity_resume()
2988 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { in dm_integrity_resume()
2989 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); in dm_integrity_resume()
2990 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); in dm_integrity_resume()
2991 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, in dm_integrity_resume()
2993 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
2994 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
2998 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); in dm_integrity_resume()
2999 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; in dm_integrity_resume()
3000 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3001 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3002 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); in dm_integrity_resume()
3003 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
3004 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3005 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3006 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3009 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && in dm_integrity_resume()
3010 … block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) { in dm_integrity_resume()
3011 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_resume()
3012 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_resume()
3014 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_resume()
3015 replay_journal(ic); in dm_integrity_resume()
3016 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_resume()
3018 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_resume()
3020 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
3022 replay_journal(ic); in dm_integrity_resume()
3023 if (ic->mode == 'B') { in dm_integrity_resume()
3024 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); in dm_integrity_resume()
3025 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; in dm_integrity_resume()
3026 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_resume()
3028 dm_integrity_io_error(ic, "writing superblock", r); in dm_integrity_resume()
3030 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3031 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3032 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); in dm_integrity_resume()
3033 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && in dm_integrity_resume()
3034 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { in dm_integrity_resume()
3035 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3036 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3037 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3038 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3039 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), in dm_integrity_resume()
3040 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); in dm_integrity_resume()
3042 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, in dm_integrity_resume()
3043 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
3047 DEBUG_print("testing recalc: %x\n", ic->sb->flags); in dm_integrity_resume()
3048 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { in dm_integrity_resume()
3049 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); in dm_integrity_resume()
3050 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors); in dm_integrity_resume()
3051 if (recalc_pos < ic->provided_data_sectors) { in dm_integrity_resume()
3052 queue_work(ic->recalc_wq, &ic->recalc_work); in dm_integrity_resume()
3053 } else if (recalc_pos > ic->provided_data_sectors) { in dm_integrity_resume()
3054 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); in dm_integrity_resume()
3055 recalc_write_super(ic); in dm_integrity_resume()
3059 ic->reboot_notifier.notifier_call = dm_integrity_reboot; in dm_integrity_resume()
3060 ic->reboot_notifier.next = NULL; in dm_integrity_resume()
3061 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ in dm_integrity_resume()
3062 WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); in dm_integrity_resume()
3066 dm_integrity_enter_synchronous_mode(ic); in dm_integrity_resume()
3073 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; in dm_integrity_status() local
3080 (unsigned long long)atomic64_read(&ic->number_of_mismatches), in dm_integrity_status()
3081 ic->provided_data_sectors); in dm_integrity_status()
3082 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
3083 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector)); in dm_integrity_status()
3089 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; in dm_integrity_status()
3090 watermark_percentage += ic->journal_entries / 2; in dm_integrity_status()
3091 do_div(watermark_percentage, ic->journal_entries); in dm_integrity_status()
3093 arg_count += !!ic->meta_dev; in dm_integrity_status()
3094 arg_count += ic->sectors_per_block != 1; in dm_integrity_status()
3095 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); in dm_integrity_status()
3096 arg_count += ic->discard; in dm_integrity_status()
3097 arg_count += ic->mode == 'J'; in dm_integrity_status()
3098 arg_count += ic->mode == 'J'; in dm_integrity_status()
3099 arg_count += ic->mode == 'B'; in dm_integrity_status()
3100 arg_count += ic->mode == 'B'; in dm_integrity_status()
3101 arg_count += !!ic->internal_hash_alg.alg_string; in dm_integrity_status()
3102 arg_count += !!ic->journal_crypt_alg.alg_string; in dm_integrity_status()
3103 arg_count += !!ic->journal_mac_alg.alg_string; in dm_integrity_status()
3104 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; in dm_integrity_status()
3105 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, in dm_integrity_status()
3106 ic->tag_size, ic->mode, arg_count); in dm_integrity_status()
3107 if (ic->meta_dev) in dm_integrity_status()
3108 DMEMIT(" meta_device:%s", ic->meta_dev->name); in dm_integrity_status()
3109 if (ic->sectors_per_block != 1) in dm_integrity_status()
3110 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_status()
3111 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) in dm_integrity_status()
3113 if (ic->discard) in dm_integrity_status()
3115 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); in dm_integrity_status()
3116 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); in dm_integrity_status()
3117 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); in dm_integrity_status()
3118 if (ic->mode == 'J') { in dm_integrity_status()
3120 DMEMIT(" commit_time:%u", ic->autocommit_msec); in dm_integrity_status()
3122 if (ic->mode == 'B') { in dm_integrity_status()
3123 …DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); in dm_integrity_status()
3124 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); in dm_integrity_status()
3126 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) in dm_integrity_status()
3131 if (ic->a.alg_string) { \ in dm_integrity_status()
3132 DMEMIT(" %s:%s", n, ic->a.alg_string); \ in dm_integrity_status()
3133 if (ic->a.key_string) \ in dm_integrity_status()
3134 DMEMIT(":%s", ic->a.key_string);\ in dm_integrity_status()
3148 struct dm_integrity_c *ic = ti->private; in dm_integrity_iterate_devices() local
3150 if (!ic->meta_dev) in dm_integrity_iterate_devices()
3151 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); in dm_integrity_iterate_devices()
3153 return fn(ti, ic->dev, 0, ti->len, data); in dm_integrity_iterate_devices()
3158 struct dm_integrity_c *ic = ti->private; in dm_integrity_io_hints() local
3160 if (ic->sectors_per_block > 1) { in dm_integrity_io_hints()
3161 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3162 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3163 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_io_hints()
3167 static void calculate_journal_section_size(struct dm_integrity_c *ic) in calculate_journal_section_size() argument
3171 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); in calculate_journal_section_size()
3172 …ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block])… in calculate_journal_section_size()
3175 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) in calculate_journal_section_size()
3177 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; in calculate_journal_section_size()
3178 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; in calculate_journal_section_size()
3179 …ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JO… in calculate_journal_section_size()
3180 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; in calculate_journal_section_size()
3183 static int calculate_device_limits(struct dm_integrity_c *ic) in calculate_device_limits() argument
3187 calculate_journal_section_size(ic); in calculate_device_limits()
3188 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; in calculate_device_limits()
3189 …if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UIN… in calculate_device_limits()
3191 ic->initial_sectors = initial_sectors; in calculate_device_limits()
3193 if (!ic->meta_dev) { in calculate_device_limits()
3198 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ? in calculate_device_limits()
3202 …ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2… in calculate_device_limits()
3204 if (!(ic->metadata_run & (ic->metadata_run - 1))) in calculate_device_limits()
3205 ic->log2_metadata_run = __ffs(ic->metadata_run); in calculate_device_limits()
3207 ic->log2_metadata_run = -1; in calculate_device_limits()
3209 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); in calculate_device_limits()
3210 last_sector = get_data_sector(ic, last_area, last_offset); in calculate_device_limits()
3211 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) in calculate_device_limits()
3214 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; in calculate_device_limits()
3215 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) in calculate_device_limits()
3216 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); in calculate_device_limits()
3217 meta_size <<= ic->log2_buffer_sectors; in calculate_device_limits()
3218 if (ic->initial_sectors + meta_size < ic->initial_sectors || in calculate_device_limits()
3219 ic->initial_sectors + meta_size > ic->meta_device_sectors) in calculate_device_limits()
3221 ic->metadata_run = 1; in calculate_device_limits()
3222 ic->log2_metadata_run = 0; in calculate_device_limits()
3228 static void get_provided_data_sectors(struct dm_integrity_c *ic) in get_provided_data_sectors() argument
3230 if (!ic->meta_dev) { in get_provided_data_sectors()
3232 ic->provided_data_sectors = 0; in get_provided_data_sectors()
3233 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { in get_provided_data_sectors()
3234 __u64 prev_data_sectors = ic->provided_data_sectors; in get_provided_data_sectors()
3236 ic->provided_data_sectors |= (sector_t)1 << test_bit; in get_provided_data_sectors()
3237 if (calculate_device_limits(ic)) in get_provided_data_sectors()
3238 ic->provided_data_sectors = prev_data_sectors; in get_provided_data_sectors()
3241 ic->provided_data_sectors = ic->data_device_sectors; in get_provided_data_sectors()
3242 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); in get_provided_data_sectors()
3246 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned inte… in initialize_superblock() argument
3251 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); in initialize_superblock()
3252 memcpy(ic->sb->magic, SB_MAGIC, 8); in initialize_superblock()
3253 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); in initialize_superblock()
3254 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); in initialize_superblock()
3255 if (ic->journal_mac_alg.alg_string) in initialize_superblock()
3256 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); in initialize_superblock()
3258 calculate_journal_section_size(ic); in initialize_superblock()
3259 journal_sections = journal_sectors / ic->journal_section_sectors; in initialize_superblock()
3263 if (!ic->meta_dev) { in initialize_superblock()
3264 if (ic->fix_padding) in initialize_superblock()
3265 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); in initialize_superblock()
3266 ic->sb->journal_sections = cpu_to_le32(journal_sections); in initialize_superblock()
3269 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); in initialize_superblock()
3270 …ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_s… in initialize_superblock()
3271 …ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_s… in initialize_superblock()
3273 get_provided_data_sectors(ic); in initialize_superblock()
3274 if (!ic->provided_data_sectors) in initialize_superblock()
3277 ic->sb->log2_interleave_sectors = 0; in initialize_superblock()
3279 get_provided_data_sectors(ic); in initialize_superblock()
3280 if (!ic->provided_data_sectors) in initialize_superblock()
3284 ic->sb->journal_sections = cpu_to_le32(0); in initialize_superblock()
3286 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); in initialize_superblock()
3290 ic->sb->journal_sections = cpu_to_le32(test_journal_sections); in initialize_superblock()
3291 if (calculate_device_limits(ic)) in initialize_superblock()
3292 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); in initialize_superblock()
3295 if (!le32_to_cpu(ic->sb->journal_sections)) { in initialize_superblock()
3296 if (ic->log2_buffer_sectors > 3) { in initialize_superblock()
3297 ic->log2_buffer_sectors--; in initialize_superblock()
3304 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); in initialize_superblock()
3306 sb_set_version(ic); in initialize_superblock()
3311 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) in dm_integrity_set() argument
3318 bi.tuple_size = ic->tag_size; in dm_integrity_set()
3320 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; in dm_integrity_set()
3361 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **s… in dm_integrity_free_journal_scatterlist() argument
3364 for (i = 0; i < ic->journal_sections; i++) in dm_integrity_free_journal_scatterlist()
3369 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, in dm_integrity_alloc_journal_scatterlist() argument
3375 sl = kvmalloc_array(ic->journal_sections, in dm_integrity_alloc_journal_scatterlist()
3381 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_alloc_journal_scatterlist()
3388 page_list_location(ic, i, 0, &start_index, &start_offset); in dm_integrity_alloc_journal_scatterlist()
3389 page_list_location(ic, i, ic->journal_section_sectors - 1, in dm_integrity_alloc_journal_scatterlist()
3397 dm_integrity_free_journal_scatterlist(ic, sl); in dm_integrity_alloc_journal_scatterlist()
3488 static int create_journal(struct dm_integrity_c *ic, char **error) in create_journal() argument
3496 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); in create_journal()
3497 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); in create_journal()
3498 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); in create_journal()
3499 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); in create_journal()
3501 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, in create_journal()
3509 ic->journal_pages = journal_pages; in create_journal()
3511 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
3512 if (!ic->journal) { in create_journal()
3517 if (ic->journal_crypt_alg.alg_string) { in create_journal()
3521 comp.ic = ic; in create_journal()
3522 …ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATE… in create_journal()
3523 if (IS_ERR(ic->journal_crypt)) { in create_journal()
3525 r = PTR_ERR(ic->journal_crypt); in create_journal()
3526 ic->journal_crypt = NULL; in create_journal()
3529 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); in create_journal()
3530 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); in create_journal()
3532 if (ic->journal_crypt_alg.key) { in create_journal()
3533 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, in create_journal()
3534 ic->journal_crypt_alg.key_size); in create_journal()
3541 ic->journal_crypt_alg.alg_string, blocksize, ivsize); in create_journal()
3543 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
3544 if (!ic->journal_io) { in create_journal()
3553 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
3567 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages); in create_journal()
3568 if (!ic->journal_xor) { in create_journal()
3574 sg = kvmalloc_array(ic->journal_pages + 1, in create_journal()
3582 sg_init_table(sg, ic->journal_pages + 1); in create_journal()
3583 for (i = 0; i < ic->journal_pages; i++) { in create_journal()
3584 char *va = lowmem_page_address(ic->journal_xor[i].page); in create_journal()
3588 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); in create_journal()
3591 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); in create_journal()
3597 r = dm_integrity_failed(ic); in create_journal()
3602 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); in create_journal()
3604 crypto_free_skcipher(ic->journal_crypt); in create_journal()
3605 ic->journal_crypt = NULL; in create_journal()
3609 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
3630 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); in create_journal()
3631 if (!ic->journal_scatterlist) { in create_journal()
3636 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); in create_journal()
3637 if (!ic->journal_io_scatterlist) { in create_journal()
3642 ic->sk_requests = kvmalloc_array(ic->journal_sections, in create_journal()
3645 if (!ic->sk_requests) { in create_journal()
3650 for (i = 0; i < ic->journal_sections; i++) { in create_journal()
3666 r = dm_integrity_failed(ic); in create_journal()
3672 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); in create_journal()
3687 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; in create_journal()
3688 ic->sk_requests[i] = section_req; in create_journal()
3698 if (ic->commit_ids[j] == ic->commit_ids[i]) { in create_journal()
3699 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); in create_journal()
3703 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); in create_journal()
3706 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); in create_journal()
3712 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); in create_journal()
3713 if (!ic->journal_tree) { in create_journal()
3751 struct dm_integrity_c *ic; in dm_integrity_ctr() local
3775 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); in dm_integrity_ctr()
3776 if (!ic) { in dm_integrity_ctr()
3780 ti->private = ic; in dm_integrity_ctr()
3782 ic->ti = ti; in dm_integrity_ctr()
3784 ic->in_progress = RB_ROOT; in dm_integrity_ctr()
3785 INIT_LIST_HEAD(&ic->wait_list); in dm_integrity_ctr()
3786 init_waitqueue_head(&ic->endio_wait); in dm_integrity_ctr()
3787 bio_list_init(&ic->flush_bio_list); in dm_integrity_ctr()
3788 init_waitqueue_head(&ic->copy_to_journal_wait); in dm_integrity_ctr()
3789 init_completion(&ic->crypto_backoff); in dm_integrity_ctr()
3790 atomic64_set(&ic->number_of_mismatches, 0); in dm_integrity_ctr()
3791 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; in dm_integrity_ctr()
3793 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); in dm_integrity_ctr()
3804 ic->start = start; in dm_integrity_ctr()
3807 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { in dm_integrity_ctr()
3816 ic->mode = argv[3][0]; in dm_integrity_ctr()
3828 ic->sectors_per_block = 1; in dm_integrity_ctr()
3857 if (ic->meta_dev) { in dm_integrity_ctr()
3858 dm_put_device(ti, ic->meta_dev); in dm_integrity_ctr()
3859 ic->meta_dev = NULL; in dm_integrity_ctr()
3862 dm_table_get_mode(ti->table), &ic->meta_dev); in dm_integrity_ctr()
3875 ic->sectors_per_block = val >> SECTOR_SHIFT; in dm_integrity_ctr()
3883 ic->bitmap_flush_interval = msecs_to_jiffies(val); in dm_integrity_ctr()
3885 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
3890 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, in dm_integrity_ctr()
3895 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
3900 ic->recalculate_flag = true; in dm_integrity_ctr()
3902 ic->discard = true; in dm_integrity_ctr()
3904 ic->fix_padding = true; in dm_integrity_ctr()
3912 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3913 if (!ic->meta_dev) in dm_integrity_ctr()
3914 ic->meta_device_sectors = ic->data_device_sectors; in dm_integrity_ctr()
3916 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3920 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); in dm_integrity_ctr()
3925 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); in dm_integrity_ctr()
3927 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, in dm_integrity_ctr()
3932 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, in dm_integrity_ctr()
3937 if (!ic->tag_size) { in dm_integrity_ctr()
3938 if (!ic->internal_hash) { in dm_integrity_ctr()
3943 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); in dm_integrity_ctr()
3945 if (ic->tag_size > MAX_TAG_SIZE) { in dm_integrity_ctr()
3950 if (!(ic->tag_size & (ic->tag_size - 1))) in dm_integrity_ctr()
3951 ic->log2_tag_size = __ffs(ic->tag_size); in dm_integrity_ctr()
3953 ic->log2_tag_size = -1; in dm_integrity_ctr()
3955 if (ic->mode == 'B' && !ic->internal_hash) { in dm_integrity_ctr()
3961 if (ic->discard && !ic->internal_hash) { in dm_integrity_ctr()
3967 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); in dm_integrity_ctr()
3968 ic->autocommit_msec = sync_msec; in dm_integrity_ctr()
3969 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); in dm_integrity_ctr()
3971 ic->io = dm_io_client_create(); in dm_integrity_ctr()
3972 if (IS_ERR(ic->io)) { in dm_integrity_ctr()
3973 r = PTR_ERR(ic->io); in dm_integrity_ctr()
3974 ic->io = NULL; in dm_integrity_ctr()
3979 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); in dm_integrity_ctr()
3985 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", in dm_integrity_ctr()
3987 if (!ic->metadata_wq) { in dm_integrity_ctr()
3997 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); in dm_integrity_ctr()
3998 if (!ic->wait_wq) { in dm_integrity_ctr()
4004 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, in dm_integrity_ctr()
4006 if (!ic->offload_wq) { in dm_integrity_ctr()
4012 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
4013 if (!ic->commit_wq) { in dm_integrity_ctr()
4018 INIT_WORK(&ic->commit_work, integrity_commit); in dm_integrity_ctr()
4020 if (ic->mode == 'J' || ic->mode == 'B') { in dm_integrity_ctr()
4021 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
4022 if (!ic->writer_wq) { in dm_integrity_ctr()
4027 INIT_WORK(&ic->writer_work, integrity_writer); in dm_integrity_ctr()
4030 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); in dm_integrity_ctr()
4031 if (!ic->sb) { in dm_integrity_ctr()
4037 r = sync_rw_sb(ic, REQ_OP_READ, 0); in dm_integrity_ctr()
4043 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { in dm_integrity_ctr()
4044 if (ic->mode != 'R') { in dm_integrity_ctr()
4045 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { in dm_integrity_ctr()
4052 r = initialize_superblock(ic, journal_sectors, interleave_sectors); in dm_integrity_ctr()
4057 if (ic->mode != 'R') in dm_integrity_ctr()
4061 if (!ic->sb->version || ic->sb->version > SB_VERSION_4) { in dm_integrity_ctr()
4066 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { in dm_integrity_ctr()
4071 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { in dm_integrity_ctr()
4076 if (!le32_to_cpu(ic->sb->journal_sections)) { in dm_integrity_ctr()
4082 if (!ic->meta_dev) { in dm_integrity_ctr()
4083 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || in dm_integrity_ctr()
4084 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { in dm_integrity_ctr()
4090 if (ic->sb->log2_interleave_sectors) { in dm_integrity_ctr()
4096 …if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string)… in dm_integrity_ctr()
4102 get_provided_data_sectors(ic); in dm_integrity_ctr()
4103 if (!ic->provided_data_sectors) { in dm_integrity_ctr()
4110 r = calculate_device_limits(ic); in dm_integrity_ctr()
4112 if (ic->meta_dev) { in dm_integrity_ctr()
4113 if (ic->log2_buffer_sectors > 3) { in dm_integrity_ctr()
4114 ic->log2_buffer_sectors--; in dm_integrity_ctr()
4124 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) in dm_integrity_ctr()
4125 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; in dm_integrity_ctr()
4127 …bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3… in dm_integrity_ctr()
4130 …while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit)… in dm_integrity_ctr()
4133 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; in dm_integrity_ctr()
4134 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; in dm_integrity_ctr()
4136 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; in dm_integrity_ctr()
4138 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) in dm_integrity_ctr()
4140 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); in dm_integrity_ctr()
4142 if (!ic->meta_dev) in dm_integrity_ctr()
4143 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); in dm_integrity_ctr()
4145 if (ti->len > ic->provided_data_sectors) { in dm_integrity_ctr()
4152 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); in dm_integrity_ctr()
4155 ic->free_sectors_threshold = threshold; in dm_integrity_ctr()
4158 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); in dm_integrity_ctr()
4159 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); in dm_integrity_ctr()
4160 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); in dm_integrity_ctr()
4161 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); in dm_integrity_ctr()
4162 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); in dm_integrity_ctr()
4163 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); in dm_integrity_ctr()
4164 DEBUG_print(" journal_entries %u\n", ic->journal_entries); in dm_integrity_ctr()
4165 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
4166 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT); in dm_integrity_ctr()
4167 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); in dm_integrity_ctr()
4168 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); in dm_integrity_ctr()
4169 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); in dm_integrity_ctr()
4170 …DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data… in dm_integrity_ctr()
4171 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); in dm_integrity_ctr()
4174 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { in dm_integrity_ctr()
4175 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); in dm_integrity_ctr()
4176 ic->sb->recalc_sector = cpu_to_le64(0); in dm_integrity_ctr()
4179 if (ic->internal_hash) { in dm_integrity_ctr()
4180 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); in dm_integrity_ctr()
4181 if (!ic->recalc_wq ) { in dm_integrity_ctr()
4186 INIT_WORK(&ic->recalc_work, integrity_recalc); in dm_integrity_ctr()
4187 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); in dm_integrity_ctr()
4188 if (!ic->recalc_buffer) { in dm_integrity_ctr()
4193 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, in dm_integrity_ctr()
4194 ic->tag_size, GFP_KERNEL); in dm_integrity_ctr()
4195 if (!ic->recalc_tags) { in dm_integrity_ctr()
4202 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, in dm_integrity_ctr()
4203 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); in dm_integrity_ctr()
4204 if (IS_ERR(ic->bufio)) { in dm_integrity_ctr()
4205 r = PTR_ERR(ic->bufio); in dm_integrity_ctr()
4207 ic->bufio = NULL; in dm_integrity_ctr()
4210 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); in dm_integrity_ctr()
4212 if (ic->mode != 'R') { in dm_integrity_ctr()
4213 r = create_journal(ic, &ti->error); in dm_integrity_ctr()
4219 if (ic->mode == 'B') { in dm_integrity_ctr()
4221 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); in dm_integrity_ctr()
4223 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); in dm_integrity_ctr()
4224 if (!ic->recalc_bitmap) { in dm_integrity_ctr()
4228 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); in dm_integrity_ctr()
4229 if (!ic->may_write_bitmap) { in dm_integrity_ctr()
4233 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); in dm_integrity_ctr()
4234 if (!ic->bbs) { in dm_integrity_ctr()
4238 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); in dm_integrity_ctr()
4239 for (i = 0; i < ic->n_bitmap_blocks; i++) { in dm_integrity_ctr()
4240 struct bitmap_block_status *bbs = &ic->bbs[i]; in dm_integrity_ctr()
4244 bbs->ic = ic; in dm_integrity_ctr()
4253 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; in dm_integrity_ctr()
4260 init_journal(ic, 0, ic->journal_sections, 0); in dm_integrity_ctr()
4261 r = dm_integrity_failed(ic); in dm_integrity_ctr()
4266 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); in dm_integrity_ctr()
4271 ic->just_formatted = true; in dm_integrity_ctr()
4274 if (!ic->meta_dev) { in dm_integrity_ctr()
4275 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); in dm_integrity_ctr()
4279 if (ic->mode == 'B') { in dm_integrity_ctr()
4280 …unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMA… in dm_integrity_ctr()
4291 if (!ic->internal_hash) in dm_integrity_ctr()
4292 dm_integrity_set(ti, ic); in dm_integrity_ctr()
4296 if (ic->discard) in dm_integrity_ctr()
4308 struct dm_integrity_c *ic = ti->private; in dm_integrity_dtr() local
4310 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); in dm_integrity_dtr()
4311 BUG_ON(!list_empty(&ic->wait_list)); in dm_integrity_dtr()
4313 if (ic->metadata_wq) in dm_integrity_dtr()
4314 destroy_workqueue(ic->metadata_wq); in dm_integrity_dtr()
4315 if (ic->wait_wq) in dm_integrity_dtr()
4316 destroy_workqueue(ic->wait_wq); in dm_integrity_dtr()
4317 if (ic->offload_wq) in dm_integrity_dtr()
4318 destroy_workqueue(ic->offload_wq); in dm_integrity_dtr()
4319 if (ic->commit_wq) in dm_integrity_dtr()
4320 destroy_workqueue(ic->commit_wq); in dm_integrity_dtr()
4321 if (ic->writer_wq) in dm_integrity_dtr()
4322 destroy_workqueue(ic->writer_wq); in dm_integrity_dtr()
4323 if (ic->recalc_wq) in dm_integrity_dtr()
4324 destroy_workqueue(ic->recalc_wq); in dm_integrity_dtr()
4325 vfree(ic->recalc_buffer); in dm_integrity_dtr()
4326 kvfree(ic->recalc_tags); in dm_integrity_dtr()
4327 kvfree(ic->bbs); in dm_integrity_dtr()
4328 if (ic->bufio) in dm_integrity_dtr()
4329 dm_bufio_client_destroy(ic->bufio); in dm_integrity_dtr()
4330 mempool_exit(&ic->journal_io_mempool); in dm_integrity_dtr()
4331 if (ic->io) in dm_integrity_dtr()
4332 dm_io_client_destroy(ic->io); in dm_integrity_dtr()
4333 if (ic->dev) in dm_integrity_dtr()
4334 dm_put_device(ti, ic->dev); in dm_integrity_dtr()
4335 if (ic->meta_dev) in dm_integrity_dtr()
4336 dm_put_device(ti, ic->meta_dev); in dm_integrity_dtr()
4337 dm_integrity_free_page_list(ic->journal); in dm_integrity_dtr()
4338 dm_integrity_free_page_list(ic->journal_io); in dm_integrity_dtr()
4339 dm_integrity_free_page_list(ic->journal_xor); in dm_integrity_dtr()
4340 dm_integrity_free_page_list(ic->recalc_bitmap); in dm_integrity_dtr()
4341 dm_integrity_free_page_list(ic->may_write_bitmap); in dm_integrity_dtr()
4342 if (ic->journal_scatterlist) in dm_integrity_dtr()
4343 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); in dm_integrity_dtr()
4344 if (ic->journal_io_scatterlist) in dm_integrity_dtr()
4345 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); in dm_integrity_dtr()
4346 if (ic->sk_requests) { in dm_integrity_dtr()
4349 for (i = 0; i < ic->journal_sections; i++) { in dm_integrity_dtr()
4350 struct skcipher_request *req = ic->sk_requests[i]; in dm_integrity_dtr()
4356 kvfree(ic->sk_requests); in dm_integrity_dtr()
4358 kvfree(ic->journal_tree); in dm_integrity_dtr()
4359 if (ic->sb) in dm_integrity_dtr()
4360 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); in dm_integrity_dtr()
4362 if (ic->internal_hash) in dm_integrity_dtr()
4363 crypto_free_shash(ic->internal_hash); in dm_integrity_dtr()
4364 free_alg(&ic->internal_hash_alg); in dm_integrity_dtr()
4366 if (ic->journal_crypt) in dm_integrity_dtr()
4367 crypto_free_skcipher(ic->journal_crypt); in dm_integrity_dtr()
4368 free_alg(&ic->journal_crypt_alg); in dm_integrity_dtr()
4370 if (ic->journal_mac) in dm_integrity_dtr()
4371 crypto_free_shash(ic->journal_mac); in dm_integrity_dtr()
4372 free_alg(&ic->journal_mac_alg); in dm_integrity_dtr()
4374 kfree(ic); in dm_integrity_dtr()