1f31e7e40SDmitry Monakhov /* 2f31e7e40SDmitry Monakhov * Functions related to generic helpers functions 3f31e7e40SDmitry Monakhov */ 4f31e7e40SDmitry Monakhov #include <linux/kernel.h> 5f31e7e40SDmitry Monakhov #include <linux/module.h> 6f31e7e40SDmitry Monakhov #include <linux/bio.h> 7f31e7e40SDmitry Monakhov #include <linux/blkdev.h> 8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h> 9f31e7e40SDmitry Monakhov 10f31e7e40SDmitry Monakhov #include "blk.h" 11f31e7e40SDmitry Monakhov 125dba3089SLukas Czerner struct bio_batch { 135dba3089SLukas Czerner atomic_t done; 145dba3089SLukas Czerner unsigned long flags; 155dba3089SLukas Czerner struct completion *wait; 165dba3089SLukas Czerner }; 175dba3089SLukas Czerner 185dba3089SLukas Czerner static void bio_batch_end_io(struct bio *bio, int err) 19f31e7e40SDmitry Monakhov { 205dba3089SLukas Czerner struct bio_batch *bb = bio->bi_private; 215dba3089SLukas Czerner 228af1954dSLukas Czerner if (err && (err != -EOPNOTSUPP)) 235dba3089SLukas Czerner clear_bit(BIO_UPTODATE, &bb->flags); 245dba3089SLukas Czerner if (atomic_dec_and_test(&bb->done)) 255dba3089SLukas Czerner complete(bb->wait); 26f31e7e40SDmitry Monakhov bio_put(bio); 27f31e7e40SDmitry Monakhov } 28f31e7e40SDmitry Monakhov 29f31e7e40SDmitry Monakhov /** 30f31e7e40SDmitry Monakhov * blkdev_issue_discard - queue a discard 31f31e7e40SDmitry Monakhov * @bdev: blockdev to issue discard for 32f31e7e40SDmitry Monakhov * @sector: start sector 33f31e7e40SDmitry Monakhov * @nr_sects: number of sectors to discard 34f31e7e40SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 35f31e7e40SDmitry Monakhov * @flags: BLKDEV_IFL_* flags to control behaviour 36f31e7e40SDmitry Monakhov * 37f31e7e40SDmitry Monakhov * Description: 38f31e7e40SDmitry Monakhov * Issue a discard request for the sectors in question. 39f31e7e40SDmitry Monakhov */ 40f31e7e40SDmitry Monakhov int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 41f31e7e40SDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 42f31e7e40SDmitry Monakhov { 43f31e7e40SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 44f31e7e40SDmitry Monakhov struct request_queue *q = bdev_get_queue(bdev); 458c555367SChristoph Hellwig int type = REQ_WRITE | REQ_DISCARD; 4697597dc0SGeert Uytterhoeven unsigned int max_discard_sectors, granularity; 4797597dc0SGeert Uytterhoeven int alignment; 485dba3089SLukas Czerner struct bio_batch bb; 49f31e7e40SDmitry Monakhov struct bio *bio; 50f31e7e40SDmitry Monakhov int ret = 0; 510cfbcafcSShaohua Li struct blk_plug plug; 52f31e7e40SDmitry Monakhov 53f31e7e40SDmitry Monakhov if (!q) 54f31e7e40SDmitry Monakhov return -ENXIO; 55f31e7e40SDmitry Monakhov 56f31e7e40SDmitry Monakhov if (!blk_queue_discard(q)) 57f31e7e40SDmitry Monakhov return -EOPNOTSUPP; 58f31e7e40SDmitry Monakhov 59f6ff53d3SPaolo Bonzini /* Zero-sector (unknown) and one-sector granularities are the same. */ 60f6ff53d3SPaolo Bonzini granularity = max(q->limits.discard_granularity >> 9, 1U); 6197597dc0SGeert Uytterhoeven alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 62f6ff53d3SPaolo Bonzini 6310d1f9e2SJens Axboe /* 6410d1f9e2SJens Axboe * Ensure that max_discard_sectors is of the proper 65c6e66634SPaolo Bonzini * granularity, so that requests stay aligned after a split. 6610d1f9e2SJens Axboe */ 6710d1f9e2SJens Axboe max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 6897597dc0SGeert Uytterhoeven max_discard_sectors -= max_discard_sectors % granularity; 694c64500eSJens Axboe if (unlikely(!max_discard_sectors)) { 700f799603SMike Snitzer /* Avoid infinite loop below. Being cautious never hurts. */ 710f799603SMike Snitzer return -EOPNOTSUPP; 7210d1f9e2SJens Axboe } 7310d1f9e2SJens Axboe 74dd3932edSChristoph Hellwig if (flags & BLKDEV_DISCARD_SECURE) { 758d57a98cSAdrian Hunter if (!blk_queue_secdiscard(q)) 768d57a98cSAdrian Hunter return -EOPNOTSUPP; 778c555367SChristoph Hellwig type |= REQ_SECURE; 788d57a98cSAdrian Hunter } 798d57a98cSAdrian Hunter 805dba3089SLukas Czerner atomic_set(&bb.done, 1); 815dba3089SLukas Czerner bb.flags = 1 << BIO_UPTODATE; 825dba3089SLukas Czerner bb.wait = &wait; 835dba3089SLukas Czerner 840cfbcafcSShaohua Li blk_start_plug(&plug); 855dba3089SLukas Czerner while (nr_sects) { 86c6e66634SPaolo Bonzini unsigned int req_sects; 878dd2cb7eSShaohua Li sector_t end_sect, tmp; 88c6e66634SPaolo Bonzini 89f31e7e40SDmitry Monakhov bio = bio_alloc(gfp_mask, 1); 9066ac0280SChristoph Hellwig if (!bio) { 9166ac0280SChristoph Hellwig ret = -ENOMEM; 9266ac0280SChristoph Hellwig break; 9366ac0280SChristoph Hellwig } 9466ac0280SChristoph Hellwig 95c6e66634SPaolo Bonzini req_sects = min_t(sector_t, nr_sects, max_discard_sectors); 96c6e66634SPaolo Bonzini 97c6e66634SPaolo Bonzini /* 98c6e66634SPaolo Bonzini * If splitting a request, and the next starting sector would be 99c6e66634SPaolo Bonzini * misaligned, stop the discard at the previous aligned sector. 100c6e66634SPaolo Bonzini */ 101c6e66634SPaolo Bonzini end_sect = sector + req_sects; 1028dd2cb7eSShaohua Li tmp = end_sect; 1038dd2cb7eSShaohua Li if (req_sects < nr_sects && 1048dd2cb7eSShaohua Li sector_div(tmp, granularity) != alignment) { 1058dd2cb7eSShaohua Li end_sect = end_sect - alignment; 1068dd2cb7eSShaohua Li sector_div(end_sect, granularity); 1078dd2cb7eSShaohua Li end_sect = end_sect * granularity + alignment; 108c6e66634SPaolo Bonzini req_sects = end_sect - sector; 109c6e66634SPaolo Bonzini } 110c6e66634SPaolo Bonzini 1114f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 1125dba3089SLukas Czerner bio->bi_end_io = bio_batch_end_io; 113f31e7e40SDmitry Monakhov bio->bi_bdev = bdev; 1145dba3089SLukas Czerner bio->bi_private = &bb; 115f31e7e40SDmitry Monakhov 1164f024f37SKent Overstreet bio->bi_iter.bi_size = req_sects << 9; 117c6e66634SPaolo Bonzini nr_sects -= req_sects; 118c6e66634SPaolo Bonzini sector = end_sect; 119f31e7e40SDmitry Monakhov 1205dba3089SLukas Czerner atomic_inc(&bb.done); 121f31e7e40SDmitry Monakhov submit_bio(type, bio); 122c8123f8cSJens Axboe 123c8123f8cSJens Axboe /* 124c8123f8cSJens Axboe * We can loop for a long time in here, if someone does 125c8123f8cSJens Axboe * full device discards (like mkfs). Be nice and allow 126c8123f8cSJens Axboe * us to schedule out to avoid softlocking if preempt 127c8123f8cSJens Axboe * is disabled. 128c8123f8cSJens Axboe */ 129c8123f8cSJens Axboe cond_resched(); 1305dba3089SLukas Czerner } 1310cfbcafcSShaohua Li blk_finish_plug(&plug); 132f31e7e40SDmitry Monakhov 1335dba3089SLukas Czerner /* Wait for bios in-flight */ 1345dba3089SLukas Czerner if (!atomic_dec_and_test(&bb.done)) 1355577022fSVladimir Davydov wait_for_completion_io(&wait); 136f31e7e40SDmitry Monakhov 1378af1954dSLukas Czerner if (!test_bit(BIO_UPTODATE, &bb.flags)) 138f31e7e40SDmitry Monakhov ret = -EIO; 13966ac0280SChristoph Hellwig 140f31e7e40SDmitry Monakhov return ret; 141f31e7e40SDmitry Monakhov } 142f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard); 1433f14d792SDmitry Monakhov 1443f14d792SDmitry Monakhov /** 1454363ac7cSMartin K. Petersen * blkdev_issue_write_same - queue a write same operation 1464363ac7cSMartin K. Petersen * @bdev: target blockdev 1474363ac7cSMartin K. Petersen * @sector: start sector 1484363ac7cSMartin K. Petersen * @nr_sects: number of sectors to write 1494363ac7cSMartin K. Petersen * @gfp_mask: memory allocation flags (for bio_alloc) 1504363ac7cSMartin K. Petersen * @page: page containing data to write 1514363ac7cSMartin K. Petersen * 1524363ac7cSMartin K. Petersen * Description: 1534363ac7cSMartin K. Petersen * Issue a write same request for the sectors in question. 1544363ac7cSMartin K. Petersen */ 1554363ac7cSMartin K. Petersen int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1564363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, 1574363ac7cSMartin K. Petersen struct page *page) 1584363ac7cSMartin K. Petersen { 1594363ac7cSMartin K. Petersen DECLARE_COMPLETION_ONSTACK(wait); 1604363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1614363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 1624363ac7cSMartin K. Petersen struct bio_batch bb; 1634363ac7cSMartin K. Petersen struct bio *bio; 1644363ac7cSMartin K. Petersen int ret = 0; 1654363ac7cSMartin K. Petersen 1664363ac7cSMartin K. Petersen if (!q) 1674363ac7cSMartin K. Petersen return -ENXIO; 1684363ac7cSMartin K. Petersen 1694363ac7cSMartin K. Petersen max_write_same_sectors = q->limits.max_write_same_sectors; 1704363ac7cSMartin K. Petersen 1714363ac7cSMartin K. Petersen if (max_write_same_sectors == 0) 1724363ac7cSMartin K. Petersen return -EOPNOTSUPP; 1734363ac7cSMartin K. Petersen 1744363ac7cSMartin K. Petersen atomic_set(&bb.done, 1); 1754363ac7cSMartin K. Petersen bb.flags = 1 << BIO_UPTODATE; 1764363ac7cSMartin K. Petersen bb.wait = &wait; 1774363ac7cSMartin K. Petersen 1784363ac7cSMartin K. Petersen while (nr_sects) { 1794363ac7cSMartin K. Petersen bio = bio_alloc(gfp_mask, 1); 1804363ac7cSMartin K. Petersen if (!bio) { 1814363ac7cSMartin K. Petersen ret = -ENOMEM; 1824363ac7cSMartin K. Petersen break; 1834363ac7cSMartin K. Petersen } 1844363ac7cSMartin K. Petersen 1854f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 1864363ac7cSMartin K. Petersen bio->bi_end_io = bio_batch_end_io; 1874363ac7cSMartin K. Petersen bio->bi_bdev = bdev; 1884363ac7cSMartin K. Petersen bio->bi_private = &bb; 1894363ac7cSMartin K. Petersen bio->bi_vcnt = 1; 1904363ac7cSMartin K. Petersen bio->bi_io_vec->bv_page = page; 1914363ac7cSMartin K. Petersen bio->bi_io_vec->bv_offset = 0; 1924363ac7cSMartin K. Petersen bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 1934363ac7cSMartin K. Petersen 1944363ac7cSMartin K. Petersen if (nr_sects > max_write_same_sectors) { 1954f024f37SKent Overstreet bio->bi_iter.bi_size = max_write_same_sectors << 9; 1964363ac7cSMartin K. Petersen nr_sects -= max_write_same_sectors; 1974363ac7cSMartin K. Petersen sector += max_write_same_sectors; 1984363ac7cSMartin K. Petersen } else { 1994f024f37SKent Overstreet bio->bi_iter.bi_size = nr_sects << 9; 2004363ac7cSMartin K. Petersen nr_sects = 0; 2014363ac7cSMartin K. Petersen } 2024363ac7cSMartin K. Petersen 2034363ac7cSMartin K. Petersen atomic_inc(&bb.done); 2044363ac7cSMartin K. Petersen submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); 2054363ac7cSMartin K. Petersen } 2064363ac7cSMartin K. Petersen 2074363ac7cSMartin K. Petersen /* Wait for bios in-flight */ 2084363ac7cSMartin K. Petersen if (!atomic_dec_and_test(&bb.done)) 2095577022fSVladimir Davydov wait_for_completion_io(&wait); 2104363ac7cSMartin K. Petersen 2114363ac7cSMartin K. Petersen if (!test_bit(BIO_UPTODATE, &bb.flags)) 2124363ac7cSMartin K. Petersen ret = -ENOTSUPP; 2134363ac7cSMartin K. Petersen 2144363ac7cSMartin K. Petersen return ret; 2154363ac7cSMartin K. Petersen } 2164363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same); 2174363ac7cSMartin K. Petersen 2184363ac7cSMartin K. Petersen /** 219291d24f6SBen Hutchings * blkdev_issue_zeroout - generate number of zero filed write bios 2203f14d792SDmitry Monakhov * @bdev: blockdev to issue 2213f14d792SDmitry Monakhov * @sector: start sector 2223f14d792SDmitry Monakhov * @nr_sects: number of sectors to write 2233f14d792SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 2243f14d792SDmitry Monakhov * 2253f14d792SDmitry Monakhov * Description: 2263f14d792SDmitry Monakhov * Generate and issue number of bios with zerofiled pages. 2273f14d792SDmitry Monakhov */ 2283f14d792SDmitry Monakhov 229*35086784SFabian Frederick static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 230dd3932edSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask) 2313f14d792SDmitry Monakhov { 23218edc8eaSDmitry Monakhov int ret; 2333f14d792SDmitry Monakhov struct bio *bio; 2343f14d792SDmitry Monakhov struct bio_batch bb; 2350aeea189SLukas Czerner unsigned int sz; 2363f14d792SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 2373f14d792SDmitry Monakhov 2380aeea189SLukas Czerner atomic_set(&bb.done, 1); 2393f14d792SDmitry Monakhov bb.flags = 1 << BIO_UPTODATE; 2403f14d792SDmitry Monakhov bb.wait = &wait; 2413f14d792SDmitry Monakhov 24218edc8eaSDmitry Monakhov ret = 0; 2433f14d792SDmitry Monakhov while (nr_sects != 0) { 2443f14d792SDmitry Monakhov bio = bio_alloc(gfp_mask, 2453f14d792SDmitry Monakhov min(nr_sects, (sector_t)BIO_MAX_PAGES)); 24618edc8eaSDmitry Monakhov if (!bio) { 24718edc8eaSDmitry Monakhov ret = -ENOMEM; 2483f14d792SDmitry Monakhov break; 24918edc8eaSDmitry Monakhov } 2503f14d792SDmitry Monakhov 2514f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 2523f14d792SDmitry Monakhov bio->bi_bdev = bdev; 2533f14d792SDmitry Monakhov bio->bi_end_io = bio_batch_end_io; 2543f14d792SDmitry Monakhov bio->bi_private = &bb; 2553f14d792SDmitry Monakhov 2563f14d792SDmitry Monakhov while (nr_sects != 0) { 2570341aafbSJens Axboe sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 2583f14d792SDmitry Monakhov ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 2593f14d792SDmitry Monakhov nr_sects -= ret >> 9; 2603f14d792SDmitry Monakhov sector += ret >> 9; 2613f14d792SDmitry Monakhov if (ret < (sz << 9)) 2623f14d792SDmitry Monakhov break; 2633f14d792SDmitry Monakhov } 26418edc8eaSDmitry Monakhov ret = 0; 2650aeea189SLukas Czerner atomic_inc(&bb.done); 2663f14d792SDmitry Monakhov submit_bio(WRITE, bio); 2673f14d792SDmitry Monakhov } 2683f14d792SDmitry Monakhov 2693f14d792SDmitry Monakhov /* Wait for bios in-flight */ 2700aeea189SLukas Czerner if (!atomic_dec_and_test(&bb.done)) 2715577022fSVladimir Davydov wait_for_completion_io(&wait); 2723f14d792SDmitry Monakhov 2733f14d792SDmitry Monakhov if (!test_bit(BIO_UPTODATE, &bb.flags)) 2743f14d792SDmitry Monakhov /* One of bios in the batch was completed with error.*/ 2753f14d792SDmitry Monakhov ret = -EIO; 2763f14d792SDmitry Monakhov 2773f14d792SDmitry Monakhov return ret; 2783f14d792SDmitry Monakhov } 279579e8f3cSMartin K. Petersen 280579e8f3cSMartin K. Petersen /** 281579e8f3cSMartin K. Petersen * blkdev_issue_zeroout - zero-fill a block range 282579e8f3cSMartin K. Petersen * @bdev: blockdev to write 283579e8f3cSMartin K. Petersen * @sector: start sector 284579e8f3cSMartin K. Petersen * @nr_sects: number of sectors to write 285579e8f3cSMartin K. Petersen * @gfp_mask: memory allocation flags (for bio_alloc) 286579e8f3cSMartin K. Petersen * 287579e8f3cSMartin K. Petersen * Description: 288579e8f3cSMartin K. Petersen * Generate and issue number of bios with zerofiled pages. 289579e8f3cSMartin K. Petersen */ 290579e8f3cSMartin K. Petersen 291579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 292579e8f3cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask) 293579e8f3cSMartin K. Petersen { 294579e8f3cSMartin K. Petersen if (bdev_write_same(bdev)) { 295579e8f3cSMartin K. Petersen unsigned char bdn[BDEVNAME_SIZE]; 296579e8f3cSMartin K. Petersen 297579e8f3cSMartin K. Petersen if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, 298579e8f3cSMartin K. Petersen ZERO_PAGE(0))) 299579e8f3cSMartin K. Petersen return 0; 300579e8f3cSMartin K. Petersen 301579e8f3cSMartin K. Petersen bdevname(bdev, bdn); 302579e8f3cSMartin K. Petersen pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn); 303579e8f3cSMartin K. Petersen } 304579e8f3cSMartin K. Petersen 305579e8f3cSMartin K. Petersen return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); 306579e8f3cSMartin K. Petersen } 3073f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout); 308