1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2024, Alibaba Cloud 4 */ 5 #include "internal.h" 6 #include <trace/events/erofs.h> 7 8 struct erofs_fileio_rq { 9 struct bio_vec bvecs[16]; 10 struct bio bio; 11 struct kiocb iocb; 12 struct super_block *sb; 13 }; 14 15 struct erofs_fileio { 16 struct erofs_map_blocks map; 17 struct erofs_map_dev dev; 18 struct erofs_fileio_rq *rq; 19 }; 20 21 static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) 22 { 23 struct erofs_fileio_rq *rq = 24 container_of(iocb, struct erofs_fileio_rq, iocb); 25 struct folio_iter fi; 26 27 if (ret > 0) { 28 if (ret != rq->bio.bi_iter.bi_size) { 29 bio_advance(&rq->bio, ret); 30 zero_fill_bio(&rq->bio); 31 } 32 ret = 0; 33 } 34 if (rq->bio.bi_end_io) { 35 if (ret < 0 && !rq->bio.bi_status) 36 rq->bio.bi_status = errno_to_blk_status(ret); 37 rq->bio.bi_end_io(&rq->bio); 38 } else { 39 bio_for_each_folio_all(fi, &rq->bio) { 40 DBG_BUGON(folio_test_uptodate(fi.folio)); 41 erofs_onlinefolio_end(fi.folio, ret); 42 } 43 } 44 bio_uninit(&rq->bio); 45 kfree(rq); 46 } 47 48 static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq) 49 { 50 struct iov_iter iter; 51 int ret; 52 53 if (!rq) 54 return; 55 rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT; 56 rq->iocb.ki_ioprio = get_current_ioprio(); 57 rq->iocb.ki_complete = erofs_fileio_ki_complete; 58 if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) && 59 rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) 60 rq->iocb.ki_flags = IOCB_DIRECT; 61 iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt, 62 rq->bio.bi_iter.bi_size); 63 ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter); 64 if (ret != -EIOCBQUEUED) 65 erofs_fileio_ki_complete(&rq->iocb, ret); 66 } 67 68 static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev) 69 { 70 struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq), 71 GFP_KERNEL | __GFP_NOFAIL); 72 73 bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ); 74 rq->iocb.ki_filp = mdev->m_dif->file; 75 rq->sb = mdev->m_sb; 76 return rq; 77 } 78 79 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) 80 { 81 return &erofs_fileio_rq_alloc(mdev)->bio; 82 } 83 84 void erofs_fileio_submit_bio(struct bio *bio) 85 { 86 return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq, 87 bio)); 88 } 89 90 static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) 91 { 92 struct inode *inode = folio_inode(folio); 93 struct erofs_map_blocks *map = &io->map; 94 unsigned int cur = 0, end = folio_size(folio), len, attached = 0; 95 loff_t pos = folio_pos(folio), ofs; 96 struct iov_iter iter; 97 struct bio_vec bv; 98 int err = 0; 99 100 erofs_onlinefolio_init(folio); 101 while (cur < end) { 102 if (!in_range(pos + cur, map->m_la, map->m_llen)) { 103 map->m_la = pos + cur; 104 map->m_llen = end - cur; 105 err = erofs_map_blocks(inode, map); 106 if (err) 107 break; 108 } 109 110 ofs = folio_pos(folio) + cur - map->m_la; 111 len = min_t(loff_t, map->m_llen - ofs, end - cur); 112 if (map->m_flags & EROFS_MAP_META) { 113 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 114 void *src; 115 116 src = erofs_read_metabuf(&buf, inode->i_sb, 117 map->m_pa + ofs, true); 118 if (IS_ERR(src)) { 119 err = PTR_ERR(src); 120 break; 121 } 122 bvec_set_folio(&bv, folio, len, cur); 123 iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len); 124 if (copy_to_iter(src, len, &iter) != len) { 125 erofs_put_metabuf(&buf); 126 err = -EIO; 127 break; 128 } 129 erofs_put_metabuf(&buf); 130 } else if (!(map->m_flags & EROFS_MAP_MAPPED)) { 131 folio_zero_segment(folio, cur, cur + len); 132 attached = 0; 133 } else { 134 if (io->rq && (map->m_pa + ofs != io->dev.m_pa || 135 map->m_deviceid != io->dev.m_deviceid)) { 136 io_retry: 137 erofs_fileio_rq_submit(io->rq); 138 io->rq = NULL; 139 } 140 141 if (!io->rq) { 142 io->dev = (struct erofs_map_dev) { 143 .m_pa = io->map.m_pa + ofs, 144 .m_deviceid = io->map.m_deviceid, 145 }; 146 err = erofs_map_dev(inode->i_sb, &io->dev); 147 if (err) 148 break; 149 io->rq = erofs_fileio_rq_alloc(&io->dev); 150 io->rq->bio.bi_iter.bi_sector = 151 (io->dev.m_dif->fsoff + io->dev.m_pa) >> 9; 152 attached = 0; 153 } 154 if (!bio_add_folio(&io->rq->bio, folio, len, cur)) 155 goto io_retry; 156 if (!attached++) 157 erofs_onlinefolio_split(folio); 158 io->dev.m_pa += len; 159 } 160 cur += len; 161 } 162 erofs_onlinefolio_end(folio, err); 163 return err; 164 } 165 166 static int erofs_fileio_read_folio(struct file *file, struct folio *folio) 167 { 168 struct erofs_fileio io = {}; 169 int err; 170 171 trace_erofs_read_folio(folio, true); 172 err = erofs_fileio_scan_folio(&io, folio); 173 erofs_fileio_rq_submit(io.rq); 174 return err; 175 } 176 177 static void erofs_fileio_readahead(struct readahead_control *rac) 178 { 179 struct inode *inode = rac->mapping->host; 180 struct erofs_fileio io = {}; 181 struct folio *folio; 182 int err; 183 184 trace_erofs_readahead(inode, readahead_index(rac), 185 readahead_count(rac), true); 186 while ((folio = readahead_folio(rac))) { 187 err = erofs_fileio_scan_folio(&io, folio); 188 if (err && err != -EINTR) 189 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", 190 folio->index, EROFS_I(inode)->nid); 191 } 192 erofs_fileio_rq_submit(io.rq); 193 } 194 195 const struct address_space_operations erofs_fileio_aops = { 196 .read_folio = erofs_fileio_read_folio, 197 .readahead = erofs_fileio_readahead, 198 }; 199