1f31e7e40SDmitry Monakhov /* 2f31e7e40SDmitry Monakhov * Functions related to generic helpers functions 3f31e7e40SDmitry Monakhov */ 4f31e7e40SDmitry Monakhov #include <linux/kernel.h> 5f31e7e40SDmitry Monakhov #include <linux/module.h> 6f31e7e40SDmitry Monakhov #include <linux/bio.h> 7f31e7e40SDmitry Monakhov #include <linux/blkdev.h> 8f31e7e40SDmitry Monakhov #include <linux/scatterlist.h> 9f31e7e40SDmitry Monakhov 10f31e7e40SDmitry Monakhov #include "blk.h" 11f31e7e40SDmitry Monakhov 12f31e7e40SDmitry Monakhov static void blkdev_discard_end_io(struct bio *bio, int err) 13f31e7e40SDmitry Monakhov { 14f31e7e40SDmitry Monakhov if (err) { 15f31e7e40SDmitry Monakhov if (err == -EOPNOTSUPP) 16f31e7e40SDmitry Monakhov set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 17f31e7e40SDmitry Monakhov clear_bit(BIO_UPTODATE, &bio->bi_flags); 18f31e7e40SDmitry Monakhov } 19f31e7e40SDmitry Monakhov 20f31e7e40SDmitry Monakhov if (bio->bi_private) 21f31e7e40SDmitry Monakhov complete(bio->bi_private); 22f31e7e40SDmitry Monakhov __free_page(bio_page(bio)); 23f31e7e40SDmitry Monakhov 24f31e7e40SDmitry Monakhov bio_put(bio); 25f31e7e40SDmitry Monakhov } 26f31e7e40SDmitry Monakhov 27f31e7e40SDmitry Monakhov /** 28f31e7e40SDmitry Monakhov * blkdev_issue_discard - queue a discard 29f31e7e40SDmitry Monakhov * @bdev: blockdev to issue discard for 30f31e7e40SDmitry Monakhov * @sector: start sector 31f31e7e40SDmitry Monakhov * @nr_sects: number of sectors to discard 32f31e7e40SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 33f31e7e40SDmitry Monakhov * @flags: BLKDEV_IFL_* flags to control behaviour 34f31e7e40SDmitry Monakhov * 35f31e7e40SDmitry Monakhov * Description: 36f31e7e40SDmitry Monakhov * Issue a discard request for the sectors in question. 37f31e7e40SDmitry Monakhov */ 38f31e7e40SDmitry Monakhov int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 39f31e7e40SDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 40f31e7e40SDmitry Monakhov { 41f31e7e40SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 42f31e7e40SDmitry Monakhov struct request_queue *q = bdev_get_queue(bdev); 43f31e7e40SDmitry Monakhov int type = flags & BLKDEV_IFL_BARRIER ? 44f31e7e40SDmitry Monakhov DISCARD_BARRIER : DISCARD_NOBARRIER; 45f31e7e40SDmitry Monakhov struct bio *bio; 46f31e7e40SDmitry Monakhov struct page *page; 47f31e7e40SDmitry Monakhov int ret = 0; 48f31e7e40SDmitry Monakhov 49f31e7e40SDmitry Monakhov if (!q) 50f31e7e40SDmitry Monakhov return -ENXIO; 51f31e7e40SDmitry Monakhov 52f31e7e40SDmitry Monakhov if (!blk_queue_discard(q)) 53f31e7e40SDmitry Monakhov return -EOPNOTSUPP; 54f31e7e40SDmitry Monakhov 55f31e7e40SDmitry Monakhov while (nr_sects && !ret) { 56f31e7e40SDmitry Monakhov unsigned int sector_size = q->limits.logical_block_size; 57f31e7e40SDmitry Monakhov unsigned int max_discard_sectors = 58f31e7e40SDmitry Monakhov min(q->limits.max_discard_sectors, UINT_MAX >> 9); 59f31e7e40SDmitry Monakhov 60f31e7e40SDmitry Monakhov bio = bio_alloc(gfp_mask, 1); 61f31e7e40SDmitry Monakhov if (!bio) 62f31e7e40SDmitry Monakhov goto out; 63f31e7e40SDmitry Monakhov bio->bi_sector = sector; 64f31e7e40SDmitry Monakhov bio->bi_end_io = blkdev_discard_end_io; 65f31e7e40SDmitry Monakhov bio->bi_bdev = bdev; 66f31e7e40SDmitry Monakhov if (flags & BLKDEV_IFL_WAIT) 67f31e7e40SDmitry Monakhov bio->bi_private = &wait; 68f31e7e40SDmitry Monakhov 69f31e7e40SDmitry Monakhov /* 70f31e7e40SDmitry Monakhov * Add a zeroed one-sector payload as that's what 71f31e7e40SDmitry Monakhov * our current implementations need. If we'll ever need 72f31e7e40SDmitry Monakhov * more the interface will need revisiting. 73f31e7e40SDmitry Monakhov */ 74f31e7e40SDmitry Monakhov page = alloc_page(gfp_mask | __GFP_ZERO); 75f31e7e40SDmitry Monakhov if (!page) 76f31e7e40SDmitry Monakhov goto out_free_bio; 77f31e7e40SDmitry Monakhov if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) 78f31e7e40SDmitry Monakhov goto out_free_page; 79f31e7e40SDmitry Monakhov 80f31e7e40SDmitry Monakhov /* 81f31e7e40SDmitry Monakhov * And override the bio size - the way discard works we 82f31e7e40SDmitry Monakhov * touch many more blocks on disk than the actual payload 83f31e7e40SDmitry Monakhov * length. 84f31e7e40SDmitry Monakhov */ 85f31e7e40SDmitry Monakhov if (nr_sects > max_discard_sectors) { 86f31e7e40SDmitry Monakhov bio->bi_size = max_discard_sectors << 9; 87f31e7e40SDmitry Monakhov nr_sects -= max_discard_sectors; 88f31e7e40SDmitry Monakhov sector += max_discard_sectors; 89f31e7e40SDmitry Monakhov } else { 90f31e7e40SDmitry Monakhov bio->bi_size = nr_sects << 9; 91f31e7e40SDmitry Monakhov nr_sects = 0; 92f31e7e40SDmitry Monakhov } 93f31e7e40SDmitry Monakhov 94f31e7e40SDmitry Monakhov bio_get(bio); 95f31e7e40SDmitry Monakhov submit_bio(type, bio); 96f31e7e40SDmitry Monakhov 97f31e7e40SDmitry Monakhov if (flags & BLKDEV_IFL_WAIT) 98f31e7e40SDmitry Monakhov wait_for_completion(&wait); 99f31e7e40SDmitry Monakhov 100f31e7e40SDmitry Monakhov if (bio_flagged(bio, BIO_EOPNOTSUPP)) 101f31e7e40SDmitry Monakhov ret = -EOPNOTSUPP; 102f31e7e40SDmitry Monakhov else if (!bio_flagged(bio, BIO_UPTODATE)) 103f31e7e40SDmitry Monakhov ret = -EIO; 104f31e7e40SDmitry Monakhov bio_put(bio); 105f31e7e40SDmitry Monakhov } 106f31e7e40SDmitry Monakhov return ret; 107f31e7e40SDmitry Monakhov out_free_page: 108f31e7e40SDmitry Monakhov __free_page(page); 109f31e7e40SDmitry Monakhov out_free_bio: 110f31e7e40SDmitry Monakhov bio_put(bio); 111f31e7e40SDmitry Monakhov out: 112f31e7e40SDmitry Monakhov return -ENOMEM; 113f31e7e40SDmitry Monakhov } 114f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard); 1153f14d792SDmitry Monakhov 1163f14d792SDmitry Monakhov struct bio_batch 1173f14d792SDmitry Monakhov { 1183f14d792SDmitry Monakhov atomic_t done; 1193f14d792SDmitry Monakhov unsigned long flags; 1203f14d792SDmitry Monakhov struct completion *wait; 1213f14d792SDmitry Monakhov bio_end_io_t *end_io; 1223f14d792SDmitry Monakhov }; 1233f14d792SDmitry Monakhov 1243f14d792SDmitry Monakhov static void bio_batch_end_io(struct bio *bio, int err) 1253f14d792SDmitry Monakhov { 1263f14d792SDmitry Monakhov struct bio_batch *bb = bio->bi_private; 127*0341aafbSJens Axboe 1283f14d792SDmitry Monakhov if (err) { 1293f14d792SDmitry Monakhov if (err == -EOPNOTSUPP) 1303f14d792SDmitry Monakhov set_bit(BIO_EOPNOTSUPP, &bb->flags); 1313f14d792SDmitry Monakhov else 1323f14d792SDmitry Monakhov clear_bit(BIO_UPTODATE, &bb->flags); 1333f14d792SDmitry Monakhov } 1343f14d792SDmitry Monakhov if (bb) { 1353f14d792SDmitry Monakhov if (bb->end_io) 1363f14d792SDmitry Monakhov bb->end_io(bio, err); 1373f14d792SDmitry Monakhov atomic_inc(&bb->done); 1383f14d792SDmitry Monakhov complete(bb->wait); 1393f14d792SDmitry Monakhov } 1403f14d792SDmitry Monakhov bio_put(bio); 1413f14d792SDmitry Monakhov } 1423f14d792SDmitry Monakhov 1433f14d792SDmitry Monakhov /** 1443f14d792SDmitry Monakhov * blkdev_issue_zeroout generate number of zero filed write bios 1453f14d792SDmitry Monakhov * @bdev: blockdev to issue 1463f14d792SDmitry Monakhov * @sector: start sector 1473f14d792SDmitry Monakhov * @nr_sects: number of sectors to write 1483f14d792SDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 1493f14d792SDmitry Monakhov * @flags: BLKDEV_IFL_* flags to control behaviour 1503f14d792SDmitry Monakhov * 1513f14d792SDmitry Monakhov * Description: 1523f14d792SDmitry Monakhov * Generate and issue number of bios with zerofiled pages. 1533f14d792SDmitry Monakhov * Send barrier at the beginning and at the end if requested. This guarantie 1543f14d792SDmitry Monakhov * correct request ordering. Empty barrier allow us to avoid post queue flush. 1553f14d792SDmitry Monakhov */ 1563f14d792SDmitry Monakhov 1573f14d792SDmitry Monakhov int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1583f14d792SDmitry Monakhov sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 1593f14d792SDmitry Monakhov { 1603f14d792SDmitry Monakhov int ret = 0; 1613f14d792SDmitry Monakhov struct bio *bio; 1623f14d792SDmitry Monakhov struct bio_batch bb; 1633f14d792SDmitry Monakhov unsigned int sz, issued = 0; 1643f14d792SDmitry Monakhov DECLARE_COMPLETION_ONSTACK(wait); 1653f14d792SDmitry Monakhov 1663f14d792SDmitry Monakhov atomic_set(&bb.done, 0); 1673f14d792SDmitry Monakhov bb.flags = 1 << BIO_UPTODATE; 1683f14d792SDmitry Monakhov bb.wait = &wait; 1693f14d792SDmitry Monakhov bb.end_io = NULL; 1703f14d792SDmitry Monakhov 1713f14d792SDmitry Monakhov if (flags & BLKDEV_IFL_BARRIER) { 1723f14d792SDmitry Monakhov /* issue async barrier before the data */ 1733f14d792SDmitry Monakhov ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0); 1743f14d792SDmitry Monakhov if (ret) 1753f14d792SDmitry Monakhov return ret; 1763f14d792SDmitry Monakhov } 1773f14d792SDmitry Monakhov submit: 1783f14d792SDmitry Monakhov while (nr_sects != 0) { 1793f14d792SDmitry Monakhov bio = bio_alloc(gfp_mask, 1803f14d792SDmitry Monakhov min(nr_sects, (sector_t)BIO_MAX_PAGES)); 1813f14d792SDmitry Monakhov if (!bio) 1823f14d792SDmitry Monakhov break; 1833f14d792SDmitry Monakhov 1843f14d792SDmitry Monakhov bio->bi_sector = sector; 1853f14d792SDmitry Monakhov bio->bi_bdev = bdev; 1863f14d792SDmitry Monakhov bio->bi_end_io = bio_batch_end_io; 1873f14d792SDmitry Monakhov if (flags & BLKDEV_IFL_WAIT) 1883f14d792SDmitry Monakhov bio->bi_private = &bb; 1893f14d792SDmitry Monakhov 1903f14d792SDmitry Monakhov while (nr_sects != 0) { 191*0341aafbSJens Axboe sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 1923f14d792SDmitry Monakhov if (sz == 0) 1933f14d792SDmitry Monakhov /* bio has maximum size possible */ 1943f14d792SDmitry Monakhov break; 1953f14d792SDmitry Monakhov ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 1963f14d792SDmitry Monakhov nr_sects -= ret >> 9; 1973f14d792SDmitry Monakhov sector += ret >> 9; 1983f14d792SDmitry Monakhov if (ret < (sz << 9)) 1993f14d792SDmitry Monakhov break; 2003f14d792SDmitry Monakhov } 2013f14d792SDmitry Monakhov issued++; 2023f14d792SDmitry Monakhov submit_bio(WRITE, bio); 2033f14d792SDmitry Monakhov } 2043f14d792SDmitry Monakhov /* 2053f14d792SDmitry Monakhov * When all data bios are in flight. Send final barrier if requeted. 2063f14d792SDmitry Monakhov */ 2073f14d792SDmitry Monakhov if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER) 2083f14d792SDmitry Monakhov ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 2093f14d792SDmitry Monakhov flags & BLKDEV_IFL_WAIT); 2103f14d792SDmitry Monakhov 2113f14d792SDmitry Monakhov 2123f14d792SDmitry Monakhov if (flags & BLKDEV_IFL_WAIT) 2133f14d792SDmitry Monakhov /* Wait for bios in-flight */ 2143f14d792SDmitry Monakhov while ( issued != atomic_read(&bb.done)) 2153f14d792SDmitry Monakhov wait_for_completion(&wait); 2163f14d792SDmitry Monakhov 2173f14d792SDmitry Monakhov if (!test_bit(BIO_UPTODATE, &bb.flags)) 2183f14d792SDmitry Monakhov /* One of bios in the batch was completed with error.*/ 2193f14d792SDmitry Monakhov ret = -EIO; 2203f14d792SDmitry Monakhov 2213f14d792SDmitry Monakhov if (ret) 2223f14d792SDmitry Monakhov goto out; 2233f14d792SDmitry Monakhov 2243f14d792SDmitry Monakhov if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) { 2253f14d792SDmitry Monakhov ret = -EOPNOTSUPP; 2263f14d792SDmitry Monakhov goto out; 2273f14d792SDmitry Monakhov } 2283f14d792SDmitry Monakhov if (nr_sects != 0) 2293f14d792SDmitry Monakhov goto submit; 2303f14d792SDmitry Monakhov out: 2313f14d792SDmitry Monakhov return ret; 2323f14d792SDmitry Monakhov } 2333f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout); 234