Lines Matching defs:dio

365 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
366 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
1556 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1562 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1588 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1590 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1592 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1593 submit_flush_bio(ic, dio);
1598 static void dec_in_flight(struct dm_integrity_io *dio)
1600 if (atomic_dec_and_test(&dio->in_flight)) {
1601 struct dm_integrity_c *ic = dio->ic;
1604 remove_range(ic, &dio->range);
1606 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1609 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1610 if (unlikely(dio->bi_status) && !bio->bi_status)
1611 bio->bi_status = dio->bi_status;
1612 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1613 dio->range.logical_sector += dio->range.n_sectors;
1614 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1615 INIT_WORK(&dio->work, integrity_bio_wait);
1616 queue_work(ic->offload_wq, &dio->work);
1619 do_endio_flush(ic, dio);
1625 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1627 dm_bio_restore(&dio->bio_details, bio);
1631 if (dio->completion)
1632 complete(dio->completion);
1634 dec_in_flight(dio);
1690 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
1692 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1693 struct dm_integrity_c *ic = dio->ic;
1699 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1700 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
1701 &dio->metadata_offset);
1703 logical_sector = dio->range.logical_sector;
1707 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1727 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
1736 dio->bi_status = errno_to_blk_status(r);
1741 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
1742 &dio->metadata_offset, ic->tag_size, TAG_CMP);
1752 dio->bi_status = errno_to_blk_status(r);
1771 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1772 struct dm_integrity_c *ic = dio->ic;
1780 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1790 if (likely(dio->op != REQ_OP_DISCARD))
1804 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1805 unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
1815 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1831 sector = dio->range.logical_sector;
1832 sectors_to_process = dio->range.n_sectors;
1834 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1852 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1853 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1858 integrity_recheck(dio, checksums_onstack);
1877 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1882 unsigned int data_to_process = dio->range.n_sectors;
1894 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1895 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1905 dec_in_flight(dio);
1908 dio->bi_status = errno_to_blk_status(r);
1909 dec_in_flight(dio);
1944 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1949 dio->ic = ic;
1950 dio->bi_status = 0;
1951 dio->op = bio_op(bio);
1955 dio->integrity_payload = NULL;
1956 dio->integrity_payload_from_mempool = false;
1957 dio->integrity_range_locked = false;
1958 return dm_integrity_map_inline(dio, true);
1961 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1977 submit_flush_bio(ic, dio);
1981 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1982 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1983 if (unlikely(dio->fua)) {
1990 if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio)))
2015 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
2018 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2019 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2022 dm_integrity_map_continue(dio, true);
2026 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
2029 struct dm_integrity_c *ic = dio->ic;
2033 logical_sector = dio->range.logical_sector;
2034 n_sectors = dio->range.n_sectors;
2045 if (likely(dio->op == REQ_OP_WRITE))
2051 if (unlikely(dio->op == REQ_OP_READ)) {
2102 if (likely(dio->op == REQ_OP_WRITE))
2110 } else if (likely(dio->op == REQ_OP_WRITE))
2114 if (likely(dio->op == REQ_OP_WRITE)) {
2152 if (unlikely(dio->op == REQ_OP_READ))
2157 if (likely(dio->op == REQ_OP_WRITE)) {
2166 remove_range(ic, &dio->range);
2171 dio->range.logical_sector = logical_sector;
2172 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2173 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2180 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2182 struct dm_integrity_c *ic = dio->ic;
2183 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2189 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2191 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2195 INIT_WORK(&dio->work, integrity_bio_wait);
2196 queue_work(ic->offload_wq, &dio->work);
2208 dio->range.n_sectors = bio_sectors(bio);
2210 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2211 if (dio->op == REQ_OP_WRITE) {
2215 dio->range.n_sectors = min(dio->range.n_sectors,
2217 if (unlikely(!dio->range.n_sectors)) {
2223 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2241 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2255 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2262 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2264 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2265 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2270 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2271 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2274 dio->range.n_sectors = i;
2278 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2287 INIT_WORK(&dio->work, integrity_bio_wait);
2288 queue_work(ic->wait_wq, &dio->work);
2292 dio->range.n_sectors = ic->sectors_per_block;
2293 wait_and_add_new_range(ic, &dio->range);
2303 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2305 remove_range_unlocked(ic, &dio->range);
2310 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2314 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2316 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2317 remove_range_unlocked(ic, &dio->range);
2336 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2337 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2338 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2341 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2350 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2354 dio->completion = &read_comp;
2356 dio->completion = NULL;
2358 dm_bio_record(&dio->bio_details, bio);
2363 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2365 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2366 integrity_metadata(&dio->work);
2369 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2370 dio->completion = NULL;
2382 dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
2385 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2386 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2391 integrity_metadata(&dio->work);
2394 dec_in_flight(dio);
2396 INIT_WORK(&dio->work, integrity_metadata);
2397 queue_work(ic->metadata_wq, &dio->work);
2403 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2406 do_endio_flush(ic, dio);
2409 static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map)
2411 struct dm_integrity_c *ic = dio->ic;
2412 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2428 if (!dio->integrity_payload) {
2430 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
2433 dio->payload_len += extra_size;
2434 dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
2435 if (unlikely(!dio->integrity_payload)) {
2437 if (dio->payload_len > x_size) {
2450 dio->range.logical_sector = bio->bi_iter.bi_sector;
2451 dio->range.n_sectors = bio_sectors(bio);
2463 if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
2468 if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
2470 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2473 INIT_WORK(&dio->work, integrity_bio_wait);
2474 queue_work(ic->wait_wq, &dio->work);
2477 wait_and_add_new_range(ic, &dio->range);
2479 dio->integrity_range_locked = true;
2484 if (unlikely(!dio->integrity_payload)) {
2485 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
2486 dio->integrity_payload_from_mempool = true;
2489 dio->bio_details.bi_iter = bio->bi_iter;
2504 if (dio->op == REQ_OP_WRITE) {
2506 while (dio->bio_details.bi_iter.bi_size) {
2507 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
2510 memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
2511 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + pos);
2514 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
2518 ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
2519 dio->payload_len, offset_in_page(dio->integrity_payload));
2520 if (unlikely(ret != dio->payload_len)) {
2529 static inline void dm_integrity_free_payload(struct dm_integrity_io *dio)
2531 struct dm_integrity_c *ic = dio->ic;
2532 if (unlikely(dio->integrity_payload_from_mempool))
2533 mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool);
2535 kfree(dio->integrity_payload);
2536 dio->integrity_payload = NULL;
2537 dio->integrity_payload_from_mempool = false;
2542 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2543 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2544 struct dm_integrity_c *ic = dio->ic;
2548 dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
2549 dio->integrity_payload_from_mempool = true;
2551 outgoing_data = dio->integrity_payload + PAGE_SIZE;
2553 while (dio->bio_details.bi_iter.bi_size) {
2572 r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0);
2580 outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS;
2591 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
2592 if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
2594 ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
2597 bio, dio->bio_details.bi_iter.bi_sector, 0);
2604 bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
2609 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
2619 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2620 if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
2623 unlikely(dio->integrity_range_locked))
2625 while (dio->bio_details.bi_iter.bi_size) {
2627 struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
2630 integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
2631 if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
2634 dm_integrity_free_payload(dio);
2635 INIT_WORK(&dio->work, dm_integrity_inline_recheck);
2636 queue_work(ic->offload_wq, &dio->work);
2641 bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
2645 dm_integrity_free_payload(dio);
2646 if (unlikely(dio->integrity_range_locked))
2647 remove_range(ic, &dio->range);
2654 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2655 struct dm_integrity_c *ic = dio->ic;
2658 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2659 int r = dm_integrity_map_inline(dio, false);
2673 dm_integrity_map_continue(dio, false);
3291 struct dm_integrity_io *dio;
3293 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
3295 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
3296 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
3297 remove_range(ic, &dio->range);
3298 INIT_WORK(&dio->work, integrity_bio_wait);
3299 queue_work(ic->offload_wq, &dio->work);
3301 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
3302 dio->range.n_sectors, BITMAP_OP_SET);
3315 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
3317 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
3318 dio->range.n_sectors, BITMAP_OP_SET);
3320 remove_range(ic, &dio->range);
3321 INIT_WORK(&dio->work, integrity_bio_wait);
3322 queue_work(ic->offload_wq, &dio->work);