Lines Matching +full:no +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
13 if (buf->kmap_type == EROFS_KMAP) in erofs_unmap_metabuf()
14 kunmap_local(buf->base); in erofs_unmap_metabuf()
15 buf->base = NULL; in erofs_unmap_metabuf()
16 buf->kmap_type = EROFS_NO_KMAP; in erofs_unmap_metabuf()
21 if (!buf->page) in erofs_put_metabuf()
24 put_page(buf->page); in erofs_put_metabuf()
25 buf->page = NULL; in erofs_put_metabuf()
29 * Derive the block size from inode->i_blkbits to make compatible with
35 struct inode *inode = buf->inode; in erofs_bread()
36 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; in erofs_bread()
38 struct page *page = buf->page; in erofs_bread()
42 if (!page || page->index != index) { in erofs_bread()
46 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); in erofs_bread()
51 /* should already be PageUptodate, no need to lock page */ in erofs_bread()
53 buf->page = page; in erofs_bread()
55 if (buf->kmap_type == EROFS_NO_KMAP) { in erofs_bread()
57 buf->base = kmap_local_page(page); in erofs_bread()
58 buf->kmap_type = type; in erofs_bread()
59 } else if (buf->kmap_type != type) { in erofs_bread()
61 return ERR_PTR(-EFAULT); in erofs_bread()
65 return buf->base + (offset & ~PAGE_MASK); in erofs_bread()
71 buf->inode = EROFS_SB(sb)->s_fscache->inode; in erofs_init_metabuf()
73 buf->inode = sb->s_bdev->bd_inode; in erofs_init_metabuf()
84 struct erofs_map_blocks *map) in erofs_map_blocks_flatmode() argument
87 u64 offset = map->m_la; in erofs_map_blocks_flatmode()
89 struct super_block *sb = inode->i_sb; in erofs_map_blocks_flatmode()
90 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); in erofs_map_blocks_flatmode()
93 lastblk = nblocks - tailendpacking; in erofs_map_blocks_flatmode()
95 /* there is no hole in flatmode */ in erofs_map_blocks_flatmode()
96 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks_flatmode()
98 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; in erofs_map_blocks_flatmode()
99 map->m_plen = erofs_pos(sb, lastblk) - offset; in erofs_map_blocks_flatmode()
101 map->m_pa = erofs_iloc(inode) + vi->inode_isize + in erofs_map_blocks_flatmode()
102 vi->xattr_isize + erofs_blkoff(sb, offset); in erofs_map_blocks_flatmode()
103 map->m_plen = inode->i_size - offset; in erofs_map_blocks_flatmode()
106 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { in erofs_map_blocks_flatmode()
108 vi->nid); in erofs_map_blocks_flatmode()
110 return -EFSCORRUPTED; in erofs_map_blocks_flatmode()
112 map->m_flags |= EROFS_MAP_META; in erofs_map_blocks_flatmode()
115 vi->nid, inode->i_size, map->m_la); in erofs_map_blocks_flatmode()
117 return -EIO; in erofs_map_blocks_flatmode()
122 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) in erofs_map_blocks() argument
124 struct super_block *sb = inode->i_sb; in erofs_map_blocks()
134 trace_erofs_map_blocks_enter(inode, map, 0); in erofs_map_blocks()
135 map->m_deviceid = 0; in erofs_map_blocks()
136 if (map->m_la >= inode->i_size) { in erofs_map_blocks()
137 /* leave out-of-bound access unmapped */ in erofs_map_blocks()
138 map->m_flags = 0; in erofs_map_blocks()
139 map->m_plen = 0; in erofs_map_blocks()
143 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { in erofs_map_blocks()
144 err = erofs_map_blocks_flatmode(inode, map); in erofs_map_blocks()
148 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) in erofs_map_blocks()
151 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ in erofs_map_blocks()
153 chunknr = map->m_la >> vi->chunkbits; in erofs_map_blocks()
154 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + in erofs_map_blocks()
155 vi->xattr_isize, unit) + unit * chunknr; in erofs_map_blocks()
162 map->m_la = chunknr << vi->chunkbits; in erofs_map_blocks()
163 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, in erofs_map_blocks()
164 round_up(inode->i_size - map->m_la, sb->s_blocksize)); in erofs_map_blocks()
166 /* handle block map */ in erofs_map_blocks()
167 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { in erofs_map_blocks()
171 map->m_flags = 0; in erofs_map_blocks()
173 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr)); in erofs_map_blocks()
174 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
180 switch (le32_to_cpu(idx->blkaddr)) { in erofs_map_blocks()
182 map->m_flags = 0; in erofs_map_blocks()
185 map->m_deviceid = le16_to_cpu(idx->device_id) & in erofs_map_blocks()
186 EROFS_SB(sb)->device_id_mask; in erofs_map_blocks()
187 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr)); in erofs_map_blocks()
188 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
195 map->m_llen = map->m_plen; in erofs_map_blocks()
196 trace_erofs_map_blocks_exit(inode, map, 0, err); in erofs_map_blocks()
200 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) in erofs_map_dev() argument
202 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; in erofs_map_dev()
206 map->m_bdev = sb->s_bdev; in erofs_map_dev()
207 map->m_daxdev = EROFS_SB(sb)->dax_dev; in erofs_map_dev()
208 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; in erofs_map_dev()
209 map->m_fscache = EROFS_SB(sb)->s_fscache; in erofs_map_dev()
211 if (map->m_deviceid) { in erofs_map_dev()
212 down_read(&devs->rwsem); in erofs_map_dev()
213 dif = idr_find(&devs->tree, map->m_deviceid - 1); in erofs_map_dev()
215 up_read(&devs->rwsem); in erofs_map_dev()
216 return -ENODEV; in erofs_map_dev()
218 if (devs->flatdev) { in erofs_map_dev()
219 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr); in erofs_map_dev()
220 up_read(&devs->rwsem); in erofs_map_dev()
223 map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; in erofs_map_dev()
224 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
225 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
226 map->m_fscache = dif->fscache; in erofs_map_dev()
227 up_read(&devs->rwsem); in erofs_map_dev()
228 } else if (devs->extra_devices && !devs->flatdev) { in erofs_map_dev()
229 down_read(&devs->rwsem); in erofs_map_dev()
230 idr_for_each_entry(&devs->tree, dif, id) { in erofs_map_dev()
233 if (!dif->mapped_blkaddr) in erofs_map_dev()
235 startoff = erofs_pos(sb, dif->mapped_blkaddr); in erofs_map_dev()
236 length = erofs_pos(sb, dif->blocks); in erofs_map_dev()
238 if (map->m_pa >= startoff && in erofs_map_dev()
239 map->m_pa < startoff + length) { in erofs_map_dev()
240 map->m_pa -= startoff; in erofs_map_dev()
241 map->m_bdev = dif->bdev_handle ? in erofs_map_dev()
242 dif->bdev_handle->bdev : NULL; in erofs_map_dev()
243 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
244 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
245 map->m_fscache = dif->fscache; in erofs_map_dev()
249 up_read(&devs->rwsem); in erofs_map_dev()
258 struct super_block *sb = inode->i_sb; in erofs_iomap_begin()
259 struct erofs_map_blocks map; in erofs_iomap_begin() local
262 map.m_la = offset; in erofs_iomap_begin()
263 map.m_llen = length; in erofs_iomap_begin()
265 ret = erofs_map_blocks(inode, &map); in erofs_iomap_begin()
270 .m_deviceid = map.m_deviceid, in erofs_iomap_begin()
271 .m_pa = map.m_pa, in erofs_iomap_begin()
277 iomap->offset = map.m_la; in erofs_iomap_begin()
279 iomap->dax_dev = mdev.m_daxdev; in erofs_iomap_begin()
281 iomap->bdev = mdev.m_bdev; in erofs_iomap_begin()
282 iomap->length = map.m_llen; in erofs_iomap_begin()
283 iomap->flags = 0; in erofs_iomap_begin()
284 iomap->private = NULL; in erofs_iomap_begin()
286 if (!(map.m_flags & EROFS_MAP_MAPPED)) { in erofs_iomap_begin()
287 iomap->type = IOMAP_HOLE; in erofs_iomap_begin()
288 iomap->addr = IOMAP_NULL_ADDR; in erofs_iomap_begin()
289 if (!iomap->length) in erofs_iomap_begin()
290 iomap->length = length; in erofs_iomap_begin()
294 if (map.m_flags & EROFS_MAP_META) { in erofs_iomap_begin()
298 iomap->type = IOMAP_INLINE; in erofs_iomap_begin()
303 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); in erofs_iomap_begin()
304 iomap->private = buf.base; in erofs_iomap_begin()
306 iomap->type = IOMAP_MAPPED; in erofs_iomap_begin()
307 iomap->addr = mdev.m_pa; in erofs_iomap_begin()
309 iomap->addr += mdev.m_dax_part_off; in erofs_iomap_begin()
317 void *ptr = iomap->private; in erofs_iomap_end()
326 DBG_BUGON(iomap->type != IOMAP_INLINE); in erofs_iomap_end()
329 DBG_BUGON(iomap->type == IOMAP_INLINE); in erofs_iomap_end()
342 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { in erofs_fiemap()
347 return -EOPNOTSUPP; in erofs_fiemap()
354 * since we dont have write or truncate flows, so no inode
374 struct inode *inode = file_inode(iocb->ki_filp); in erofs_file_read_iter()
376 /* no need taking (shared) inode lock since it's a ro filesystem */ in erofs_file_read_iter()
384 if (iocb->ki_flags & IOCB_DIRECT) { in erofs_file_read_iter()
385 struct block_device *bdev = inode->i_sb->s_bdev; in erofs_file_read_iter()
389 blksize_mask = bdev_logical_block_size(bdev) - 1; in erofs_file_read_iter()
391 blksize_mask = i_blocksize(inode) - 1; in erofs_file_read_iter()
393 if ((iocb->ki_pos | iov_iter_count(to) | in erofs_file_read_iter()
395 return -EINVAL; in erofs_file_read_iter()
435 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in erofs_file_mmap()
436 return -EINVAL; in erofs_file_mmap()
438 vma->vm_ops = &erofs_dax_vm_ops; in erofs_file_mmap()