1f31e7e40SDmitry Monakhov /* 2f31e7e40SDmitry Monakhov * Functions related to generic helpers functions 3f31e7e40SDmitry Monakhov */ 4f31e7e40SDmitry Monakhov #include <linux/kernel.h> 5f31e7e40SDmitry Monakhov #include <linux/module.h> 6f31e7e40SDmitry Monakhov #include <linux/bio.h> 7f31e7e40SDmitry Monakhov #include <linux/blkdev.h> 8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h> 9f31e7e40SDmitry Monakhov 10f31e7e40SDmitry Monakhov #include "blk.h" 11f31e7e40SDmitry Monakhov 125dba3089SLukas Czerner struct bio_batch { 135dba3089SLukas Czerner atomic_t done; 145dba3089SLukas Czerner unsigned long flags; 155dba3089SLukas Czerner struct completion *wait; 165dba3089SLukas Czerner }; 175dba3089SLukas Czerner 185dba3089SLukas Czerner static void bio_batch_end_io(struct bio *bio, int err) 19f31e7e40SDmitry Monakhov { 205dba3089SLukas Czerner struct bio_batch *bb = bio->bi_private; 215dba3089SLukas Czerner 228af1954dSLukas Czerner if (err && (err != -EOPNOTSUPP)) 235dba3089SLukas Czerner clear_bit(BIO_UPTODATE, &bb->flags); 245dba3089SLukas Czerner if (atomic_dec_and_test(&bb->done)) 255dba3089SLukas Czerner complete(bb->wait); 26f31e7e40SDmitry Monakhov bio_put(bio); 27f31e7e40SDmitry Monakhov } 28f31e7e40SDmitry Monakhov 29f31e7e40SDmitry Monakhov /** 30f31e7e40SDmitry Monakhov * blkdev_issue_discard - queue a discard 31f31e7e40SDmitry Monakhov * @bdev: blockdev to issue discard for 32f31e7e40SDmitry Monakhov * @sector: start sector 33f31e7e40SDmitry Monakhov * @nr_sects: number of sectors to discard 34f31e7e40SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 35f31e7e40SDmitry Monakhov * @flags: BLKDEV_IFL_* flags to control behaviour 36f31e7e40SDmitry Monakhov * 37f31e7e40SDmitry Monakhov * Description: 38f31e7e40SDmitry Monakhov * Issue a discard request for the sectors in question. 39f31e7e40SDmitry Monakhov */ 40f31e7e40SDmitry Monakhov int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 41f31e7e40SDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 42f31e7e40SDmitry Monakhov { 43f31e7e40SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 44f31e7e40SDmitry Monakhov struct request_queue *q = bdev_get_queue(bdev); 458c555367SChristoph Hellwig int type = REQ_WRITE | REQ_DISCARD; 46*8dd2cb7eSShaohua Li sector_t max_discard_sectors; 47*8dd2cb7eSShaohua Li sector_t granularity, alignment; 485dba3089SLukas Czerner struct bio_batch bb; 49f31e7e40SDmitry Monakhov struct bio *bio; 50f31e7e40SDmitry Monakhov int ret = 0; 51f31e7e40SDmitry Monakhov 52f31e7e40SDmitry Monakhov if (!q) 53f31e7e40SDmitry Monakhov return -ENXIO; 54f31e7e40SDmitry Monakhov 55f31e7e40SDmitry Monakhov if (!blk_queue_discard(q)) 56f31e7e40SDmitry Monakhov return -EOPNOTSUPP; 57f31e7e40SDmitry Monakhov 58f6ff53d3SPaolo Bonzini /* Zero-sector (unknown) and one-sector granularities are the same. */ 59f6ff53d3SPaolo Bonzini granularity = max(q->limits.discard_granularity >> 9, 1U); 60*8dd2cb7eSShaohua Li alignment = bdev_discard_alignment(bdev) >> 9; 61*8dd2cb7eSShaohua Li alignment = sector_div(alignment, granularity); 62f6ff53d3SPaolo Bonzini 6310d1f9e2SJens Axboe /* 6410d1f9e2SJens Axboe * Ensure that max_discard_sectors is of the proper 65c6e66634SPaolo Bonzini * granularity, so that requests stay aligned after a split. 6610d1f9e2SJens Axboe */ 6710d1f9e2SJens Axboe max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 68*8dd2cb7eSShaohua Li sector_div(max_discard_sectors, granularity); 69*8dd2cb7eSShaohua Li max_discard_sectors *= granularity; 704c64500eSJens Axboe if (unlikely(!max_discard_sectors)) { 710f799603SMike Snitzer /* Avoid infinite loop below. Being cautious never hurts. */ 720f799603SMike Snitzer return -EOPNOTSUPP; 7310d1f9e2SJens Axboe } 7410d1f9e2SJens Axboe 75dd3932edSChristoph Hellwig if (flags & BLKDEV_DISCARD_SECURE) { 768d57a98cSAdrian Hunter if (!blk_queue_secdiscard(q)) 778d57a98cSAdrian Hunter return -EOPNOTSUPP; 788c555367SChristoph Hellwig type |= REQ_SECURE; 798d57a98cSAdrian Hunter } 808d57a98cSAdrian Hunter 815dba3089SLukas Czerner atomic_set(&bb.done, 1); 825dba3089SLukas Czerner bb.flags = 1 << BIO_UPTODATE; 835dba3089SLukas Czerner bb.wait = &wait; 845dba3089SLukas Czerner 855dba3089SLukas Czerner while (nr_sects) { 86c6e66634SPaolo Bonzini unsigned int req_sects; 87*8dd2cb7eSShaohua Li sector_t end_sect, tmp; 88c6e66634SPaolo Bonzini 89f31e7e40SDmitry Monakhov bio = bio_alloc(gfp_mask, 1); 9066ac0280SChristoph Hellwig if (!bio) { 9166ac0280SChristoph Hellwig ret = -ENOMEM; 9266ac0280SChristoph Hellwig break; 9366ac0280SChristoph Hellwig } 9466ac0280SChristoph Hellwig 95c6e66634SPaolo Bonzini req_sects = min_t(sector_t, nr_sects, max_discard_sectors); 96c6e66634SPaolo Bonzini 97c6e66634SPaolo Bonzini /* 98c6e66634SPaolo Bonzini * If splitting a request, and the next starting sector would be 99c6e66634SPaolo Bonzini * misaligned, stop the discard at the previous aligned sector. 100c6e66634SPaolo Bonzini */ 101c6e66634SPaolo Bonzini end_sect = sector + req_sects; 102*8dd2cb7eSShaohua Li tmp = end_sect; 103*8dd2cb7eSShaohua Li if (req_sects < nr_sects && 104*8dd2cb7eSShaohua Li sector_div(tmp, granularity) != alignment) { 105*8dd2cb7eSShaohua Li end_sect = end_sect - alignment; 106*8dd2cb7eSShaohua Li sector_div(end_sect, granularity); 107*8dd2cb7eSShaohua Li end_sect = end_sect * granularity + alignment; 108c6e66634SPaolo Bonzini req_sects = end_sect - sector; 109c6e66634SPaolo Bonzini } 110c6e66634SPaolo Bonzini 111f31e7e40SDmitry Monakhov bio->bi_sector = sector; 1125dba3089SLukas Czerner bio->bi_end_io = bio_batch_end_io; 113f31e7e40SDmitry Monakhov bio->bi_bdev = bdev; 1145dba3089SLukas Czerner bio->bi_private = &bb; 115f31e7e40SDmitry Monakhov 116c6e66634SPaolo Bonzini bio->bi_size = req_sects << 9; 117c6e66634SPaolo Bonzini nr_sects -= req_sects; 118c6e66634SPaolo Bonzini sector = end_sect; 119f31e7e40SDmitry Monakhov 1205dba3089SLukas Czerner atomic_inc(&bb.done); 121f31e7e40SDmitry Monakhov submit_bio(type, bio); 1225dba3089SLukas Czerner } 123f31e7e40SDmitry Monakhov 1245dba3089SLukas Czerner /* Wait for bios in-flight */ 1255dba3089SLukas Czerner if (!atomic_dec_and_test(&bb.done)) 126f31e7e40SDmitry Monakhov wait_for_completion(&wait); 127f31e7e40SDmitry Monakhov 1288af1954dSLukas Czerner if (!test_bit(BIO_UPTODATE, &bb.flags)) 129f31e7e40SDmitry Monakhov ret = -EIO; 13066ac0280SChristoph Hellwig 131f31e7e40SDmitry Monakhov return ret; 132f31e7e40SDmitry Monakhov } 133f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard); 1343f14d792SDmitry Monakhov 1353f14d792SDmitry Monakhov /** 1364363ac7cSMartin K. Petersen * blkdev_issue_write_same - queue a write same operation 1374363ac7cSMartin K. Petersen * @bdev: target blockdev 1384363ac7cSMartin K. Petersen * @sector: start sector 1394363ac7cSMartin K. Petersen * @nr_sects: number of sectors to write 1404363ac7cSMartin K. Petersen * @gfp_mask: memory allocation flags (for bio_alloc) 1414363ac7cSMartin K. Petersen * @page: page containing data to write 1424363ac7cSMartin K. Petersen * 1434363ac7cSMartin K. Petersen * Description: 1444363ac7cSMartin K. Petersen * Issue a write same request for the sectors in question. 1454363ac7cSMartin K. Petersen */ 1464363ac7cSMartin K. Petersen int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1474363ac7cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask, 1484363ac7cSMartin K. Petersen struct page *page) 1494363ac7cSMartin K. Petersen { 1504363ac7cSMartin K. Petersen DECLARE_COMPLETION_ONSTACK(wait); 1514363ac7cSMartin K. Petersen struct request_queue *q = bdev_get_queue(bdev); 1524363ac7cSMartin K. Petersen unsigned int max_write_same_sectors; 1534363ac7cSMartin K. Petersen struct bio_batch bb; 1544363ac7cSMartin K. Petersen struct bio *bio; 1554363ac7cSMartin K. Petersen int ret = 0; 1564363ac7cSMartin K. Petersen 1574363ac7cSMartin K. Petersen if (!q) 1584363ac7cSMartin K. Petersen return -ENXIO; 1594363ac7cSMartin K. Petersen 1604363ac7cSMartin K. Petersen max_write_same_sectors = q->limits.max_write_same_sectors; 1614363ac7cSMartin K. Petersen 1624363ac7cSMartin K. Petersen if (max_write_same_sectors == 0) 1634363ac7cSMartin K. Petersen return -EOPNOTSUPP; 1644363ac7cSMartin K. Petersen 1654363ac7cSMartin K. Petersen atomic_set(&bb.done, 1); 1664363ac7cSMartin K. Petersen bb.flags = 1 << BIO_UPTODATE; 1674363ac7cSMartin K. Petersen bb.wait = &wait; 1684363ac7cSMartin K. Petersen 1694363ac7cSMartin K. Petersen while (nr_sects) { 1704363ac7cSMartin K. Petersen bio = bio_alloc(gfp_mask, 1); 1714363ac7cSMartin K. Petersen if (!bio) { 1724363ac7cSMartin K. Petersen ret = -ENOMEM; 1734363ac7cSMartin K. Petersen break; 1744363ac7cSMartin K. Petersen } 1754363ac7cSMartin K. Petersen 1764363ac7cSMartin K. Petersen bio->bi_sector = sector; 1774363ac7cSMartin K. Petersen bio->bi_end_io = bio_batch_end_io; 1784363ac7cSMartin K. Petersen bio->bi_bdev = bdev; 1794363ac7cSMartin K. Petersen bio->bi_private = &bb; 1804363ac7cSMartin K. Petersen bio->bi_vcnt = 1; 1814363ac7cSMartin K. Petersen bio->bi_io_vec->bv_page = page; 1824363ac7cSMartin K. Petersen bio->bi_io_vec->bv_offset = 0; 1834363ac7cSMartin K. Petersen bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 1844363ac7cSMartin K. Petersen 1854363ac7cSMartin K. Petersen if (nr_sects > max_write_same_sectors) { 1864363ac7cSMartin K. Petersen bio->bi_size = max_write_same_sectors << 9; 1874363ac7cSMartin K. Petersen nr_sects -= max_write_same_sectors; 1884363ac7cSMartin K. Petersen sector += max_write_same_sectors; 1894363ac7cSMartin K. Petersen } else { 1904363ac7cSMartin K. Petersen bio->bi_size = nr_sects << 9; 1914363ac7cSMartin K. Petersen nr_sects = 0; 1924363ac7cSMartin K. Petersen } 1934363ac7cSMartin K. Petersen 1944363ac7cSMartin K. Petersen atomic_inc(&bb.done); 1954363ac7cSMartin K. Petersen submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); 1964363ac7cSMartin K. Petersen } 1974363ac7cSMartin K. Petersen 1984363ac7cSMartin K. Petersen /* Wait for bios in-flight */ 1994363ac7cSMartin K. Petersen if (!atomic_dec_and_test(&bb.done)) 2004363ac7cSMartin K. Petersen wait_for_completion(&wait); 2014363ac7cSMartin K. Petersen 2024363ac7cSMartin K. Petersen if (!test_bit(BIO_UPTODATE, &bb.flags)) 2034363ac7cSMartin K. Petersen ret = -ENOTSUPP; 2044363ac7cSMartin K. Petersen 2054363ac7cSMartin K. Petersen return ret; 2064363ac7cSMartin K. Petersen } 2074363ac7cSMartin K. Petersen EXPORT_SYMBOL(blkdev_issue_write_same); 2084363ac7cSMartin K. Petersen 2094363ac7cSMartin K. Petersen /** 210291d24f6SBen Hutchings * blkdev_issue_zeroout - generate number of zero filed write bios 2113f14d792SDmitry Monakhov * @bdev: blockdev to issue 2123f14d792SDmitry Monakhov * @sector: start sector 2133f14d792SDmitry Monakhov * @nr_sects: number of sectors to write 2143f14d792SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 2153f14d792SDmitry Monakhov * 2163f14d792SDmitry Monakhov * Description: 2173f14d792SDmitry Monakhov * Generate and issue number of bios with zerofiled pages. 2183f14d792SDmitry Monakhov */ 2193f14d792SDmitry Monakhov 220579e8f3cSMartin K. Petersen int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 221dd3932edSChristoph Hellwig sector_t nr_sects, gfp_t gfp_mask) 2223f14d792SDmitry Monakhov { 22318edc8eaSDmitry Monakhov int ret; 2243f14d792SDmitry Monakhov struct bio *bio; 2253f14d792SDmitry Monakhov struct bio_batch bb; 2260aeea189SLukas Czerner unsigned int sz; 2273f14d792SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 2283f14d792SDmitry Monakhov 2290aeea189SLukas Czerner atomic_set(&bb.done, 1); 2303f14d792SDmitry Monakhov bb.flags = 1 << BIO_UPTODATE; 2313f14d792SDmitry Monakhov bb.wait = &wait; 2323f14d792SDmitry Monakhov 23318edc8eaSDmitry Monakhov ret = 0; 2343f14d792SDmitry Monakhov while (nr_sects != 0) { 2353f14d792SDmitry Monakhov bio = bio_alloc(gfp_mask, 2363f14d792SDmitry Monakhov min(nr_sects, (sector_t)BIO_MAX_PAGES)); 23718edc8eaSDmitry Monakhov if (!bio) { 23818edc8eaSDmitry Monakhov ret = -ENOMEM; 2393f14d792SDmitry Monakhov break; 24018edc8eaSDmitry Monakhov } 2413f14d792SDmitry Monakhov 2423f14d792SDmitry Monakhov bio->bi_sector = sector; 2433f14d792SDmitry Monakhov bio->bi_bdev = bdev; 2443f14d792SDmitry Monakhov bio->bi_end_io = bio_batch_end_io; 2453f14d792SDmitry Monakhov bio->bi_private = &bb; 2463f14d792SDmitry Monakhov 2473f14d792SDmitry Monakhov while (nr_sects != 0) { 2480341aafbSJens Axboe sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 2493f14d792SDmitry Monakhov ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 2503f14d792SDmitry Monakhov nr_sects -= ret >> 9; 2513f14d792SDmitry Monakhov sector += ret >> 9; 2523f14d792SDmitry Monakhov if (ret < (sz << 9)) 2533f14d792SDmitry Monakhov break; 2543f14d792SDmitry Monakhov } 25518edc8eaSDmitry Monakhov ret = 0; 2560aeea189SLukas Czerner atomic_inc(&bb.done); 2573f14d792SDmitry Monakhov submit_bio(WRITE, bio); 2583f14d792SDmitry Monakhov } 2593f14d792SDmitry Monakhov 2603f14d792SDmitry Monakhov /* Wait for bios in-flight */ 2610aeea189SLukas Czerner if (!atomic_dec_and_test(&bb.done)) 2623f14d792SDmitry Monakhov wait_for_completion(&wait); 2633f14d792SDmitry Monakhov 2643f14d792SDmitry Monakhov if (!test_bit(BIO_UPTODATE, &bb.flags)) 2653f14d792SDmitry Monakhov /* One of bios in the batch was completed with error.*/ 2663f14d792SDmitry Monakhov ret = -EIO; 2673f14d792SDmitry Monakhov 2683f14d792SDmitry Monakhov return ret; 2693f14d792SDmitry Monakhov } 270579e8f3cSMartin K. Petersen 271579e8f3cSMartin K. Petersen /** 272579e8f3cSMartin K. Petersen * blkdev_issue_zeroout - zero-fill a block range 273579e8f3cSMartin K. Petersen * @bdev: blockdev to write 274579e8f3cSMartin K. Petersen * @sector: start sector 275579e8f3cSMartin K. Petersen * @nr_sects: number of sectors to write 276579e8f3cSMartin K. Petersen * @gfp_mask: memory allocation flags (for bio_alloc) 277579e8f3cSMartin K. Petersen * 278579e8f3cSMartin K. Petersen * Description: 279579e8f3cSMartin K. Petersen * Generate and issue number of bios with zerofiled pages. 280579e8f3cSMartin K. Petersen */ 281579e8f3cSMartin K. Petersen 282579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 283579e8f3cSMartin K. Petersen sector_t nr_sects, gfp_t gfp_mask) 284579e8f3cSMartin K. Petersen { 285579e8f3cSMartin K. Petersen if (bdev_write_same(bdev)) { 286579e8f3cSMartin K. Petersen unsigned char bdn[BDEVNAME_SIZE]; 287579e8f3cSMartin K. Petersen 288579e8f3cSMartin K. Petersen if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, 289579e8f3cSMartin K. Petersen ZERO_PAGE(0))) 290579e8f3cSMartin K. Petersen return 0; 291579e8f3cSMartin K. Petersen 292579e8f3cSMartin K. Petersen bdevname(bdev, bdn); 293579e8f3cSMartin K. Petersen pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn); 294579e8f3cSMartin K. Petersen } 295579e8f3cSMartin K. Petersen 296579e8f3cSMartin K. Petersen return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); 297579e8f3cSMartin K. Petersen } 2983f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout); 299