1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #include "internal.h"
8 #include <linux/filelock.h>
9 #include <linux/sched/mm.h>
10 #include <trace/events/erofs.h>
11
erofs_unmap_metabuf(struct erofs_buf * buf)12 void erofs_unmap_metabuf(struct erofs_buf *buf)
13 {
14 if (!buf->base)
15 return;
16 kunmap_local(buf->base);
17 buf->base = NULL;
18 }
19
erofs_put_metabuf(struct erofs_buf * buf)20 void erofs_put_metabuf(struct erofs_buf *buf)
21 {
22 if (!buf->page)
23 return;
24 erofs_unmap_metabuf(buf);
25 folio_put(page_folio(buf->page));
26 buf->page = NULL;
27 }
28
erofs_bread(struct erofs_buf * buf,erofs_off_t offset,bool need_kmap)29 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
30 {
31 pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
32 struct folio *folio = NULL;
33 loff_t fpos;
34 int err;
35
36 /*
37 * Metadata access for file-backed mounts reuses page cache of backing
38 * fs inodes (only folio data will be needed) to prevent double caching.
39 * However, the data access range must be verified here in advance.
40 */
41 if (buf->file) {
42 fpos = index << PAGE_SHIFT;
43 err = rw_verify_area(READ, buf->file, &fpos, PAGE_SIZE);
44 if (err < 0)
45 return ERR_PTR(err);
46 }
47
48 if (buf->page) {
49 folio = page_folio(buf->page);
50 if (folio_file_page(folio, index) != buf->page)
51 erofs_unmap_metabuf(buf);
52 }
53 if (!folio || !folio_contains(folio, index)) {
54 erofs_put_metabuf(buf);
55 folio = read_mapping_folio(buf->mapping, index, buf->file);
56 if (IS_ERR(folio))
57 return folio;
58 }
59 buf->page = folio_file_page(folio, index);
60 if (!need_kmap)
61 return NULL;
62 if (!buf->base)
63 buf->base = kmap_local_page(buf->page);
64 return buf->base + (offset & ~PAGE_MASK);
65 }
66
erofs_init_metabuf(struct erofs_buf * buf,struct super_block * sb,bool in_metabox)67 int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
68 bool in_metabox)
69 {
70 struct erofs_sb_info *sbi = EROFS_SB(sb);
71
72 buf->file = NULL;
73 if (in_metabox) {
74 if (unlikely(!sbi->metabox_inode))
75 return -EFSCORRUPTED;
76 buf->mapping = sbi->metabox_inode->i_mapping;
77 return 0;
78 }
79 buf->off = sbi->dif0.fsoff;
80 if (erofs_is_fileio_mode(sbi)) {
81 buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
82 buf->mapping = buf->file->f_mapping;
83 } else if (erofs_is_fscache_mode(sb))
84 buf->mapping = sbi->dif0.fscache->inode->i_mapping;
85 else
86 buf->mapping = sb->s_bdev->bd_mapping;
87 return 0;
88 }
89
erofs_read_metabuf(struct erofs_buf * buf,struct super_block * sb,erofs_off_t offset,bool in_metabox)90 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
91 erofs_off_t offset, bool in_metabox)
92 {
93 int err;
94
95 err = erofs_init_metabuf(buf, sb, in_metabox);
96 if (err)
97 return ERR_PTR(err);
98 return erofs_bread(buf, offset, true);
99 }
100
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map)101 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
102 {
103 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
104 struct super_block *sb = inode->i_sb;
105 unsigned int unit, blksz = sb->s_blocksize;
106 struct erofs_inode *vi = EROFS_I(inode);
107 struct erofs_inode_chunk_index *idx;
108 erofs_blk_t startblk, addrmask;
109 bool tailpacking;
110 erofs_off_t pos;
111 u64 chunknr;
112 int err = 0;
113
114 trace_erofs_map_blocks_enter(inode, map, 0);
115 map->m_deviceid = 0;
116 map->m_flags = 0;
117 if (map->m_la >= inode->i_size)
118 goto out;
119
120 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
121 tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
122 if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
123 goto out;
124 pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
125
126 map->m_flags = EROFS_MAP_MAPPED;
127 if (map->m_la < pos) {
128 map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
129 map->m_llen = pos - map->m_la;
130 } else {
131 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
132 vi->xattr_isize + erofs_blkoff(sb, map->m_la);
133 map->m_llen = inode->i_size - map->m_la;
134 map->m_flags |= EROFS_MAP_META;
135 }
136 goto out;
137 }
138
139 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
140 unit = sizeof(*idx); /* chunk index */
141 else
142 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
143
144 chunknr = map->m_la >> vi->chunkbits;
145 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
146 vi->xattr_isize, unit) + unit * chunknr;
147
148 idx = erofs_read_metabuf(&buf, sb, pos, erofs_inode_in_metabox(inode));
149 if (IS_ERR(idx)) {
150 err = PTR_ERR(idx);
151 goto out;
152 }
153 map->m_la = chunknr << vi->chunkbits;
154 map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
155 round_up(inode->i_size - map->m_la, blksz));
156 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
157 addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
158 BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
159 startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
160 le32_to_cpu(idx->startblk_lo)) & addrmask;
161 if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
162 map->m_deviceid = le16_to_cpu(idx->device_id) &
163 EROFS_SB(sb)->device_id_mask;
164 map->m_pa = erofs_pos(sb, startblk);
165 map->m_flags = EROFS_MAP_MAPPED;
166 }
167 } else {
168 startblk = le32_to_cpu(*(__le32 *)idx);
169 if (startblk != (u32)EROFS_NULL_ADDR) {
170 map->m_pa = erofs_pos(sb, startblk);
171 map->m_flags = EROFS_MAP_MAPPED;
172 }
173 }
174 erofs_put_metabuf(&buf);
175 out:
176 if (!err) {
177 map->m_plen = map->m_llen;
178 /* inline data should be located in the same meta block */
179 if ((map->m_flags & EROFS_MAP_META) &&
180 erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
181 erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
182 DBG_BUGON(1);
183 return -EFSCORRUPTED;
184 }
185 }
186 trace_erofs_map_blocks_exit(inode, map, 0, err);
187 return err;
188 }
189
erofs_fill_from_devinfo(struct erofs_map_dev * map,struct super_block * sb,struct erofs_device_info * dif)190 static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
191 struct super_block *sb, struct erofs_device_info *dif)
192 {
193 map->m_sb = sb;
194 map->m_dif = dif;
195 map->m_bdev = NULL;
196 if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
197 map->m_bdev = file_bdev(dif->file);
198 }
199
erofs_map_dev(struct super_block * sb,struct erofs_map_dev * map)200 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
201 {
202 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
203 struct erofs_device_info *dif;
204 erofs_off_t startoff;
205 int id;
206
207 erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
208 map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
209 if (map->m_deviceid) {
210 down_read(&devs->rwsem);
211 dif = idr_find(&devs->tree, map->m_deviceid - 1);
212 if (!dif) {
213 up_read(&devs->rwsem);
214 return -ENODEV;
215 }
216 if (devs->flatdev) {
217 map->m_pa += erofs_pos(sb, dif->uniaddr);
218 up_read(&devs->rwsem);
219 return 0;
220 }
221 erofs_fill_from_devinfo(map, sb, dif);
222 up_read(&devs->rwsem);
223 } else if (devs->extra_devices && !devs->flatdev) {
224 down_read(&devs->rwsem);
225 idr_for_each_entry(&devs->tree, dif, id) {
226 if (!dif->uniaddr)
227 continue;
228
229 startoff = erofs_pos(sb, dif->uniaddr);
230 if (map->m_pa >= startoff &&
231 map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
232 map->m_pa -= startoff;
233 erofs_fill_from_devinfo(map, sb, dif);
234 break;
235 }
236 }
237 up_read(&devs->rwsem);
238 }
239 return 0;
240 }
241
242 /*
243 * bit 30: I/O error occurred on this folio
244 * bit 29: CPU has dirty data in D-cache (needs aliasing handling);
245 * bit 0 - 29: remaining parts to complete this folio
246 */
247 #define EROFS_ONLINEFOLIO_EIO 30
248 #define EROFS_ONLINEFOLIO_DIRTY 29
249
erofs_onlinefolio_init(struct folio * folio)250 void erofs_onlinefolio_init(struct folio *folio)
251 {
252 union {
253 atomic_t o;
254 void *v;
255 } u = { .o = ATOMIC_INIT(1) };
256
257 folio->private = u.v; /* valid only if file-backed folio is locked */
258 }
259
erofs_onlinefolio_split(struct folio * folio)260 void erofs_onlinefolio_split(struct folio *folio)
261 {
262 atomic_inc((atomic_t *)&folio->private);
263 }
264
erofs_onlinefolio_end(struct folio * folio,int err,bool dirty)265 void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
266 {
267 int orig, v;
268
269 do {
270 orig = atomic_read((atomic_t *)&folio->private);
271 DBG_BUGON(orig <= 0);
272 v = dirty << EROFS_ONLINEFOLIO_DIRTY;
273 v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
274 } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
275
276 if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
277 return;
278 folio->private = 0;
279 if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
280 flush_dcache_folio(folio);
281 folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
282 }
283
284 struct erofs_iomap_iter_ctx {
285 struct page *page;
286 void *base;
287 struct inode *realinode;
288 };
289
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)290 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
291 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
292 {
293 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
294 struct erofs_iomap_iter_ctx *ctx = iter->private;
295 struct inode *realinode = ctx ? ctx->realinode : inode;
296 struct super_block *sb = realinode->i_sb;
297 struct erofs_map_blocks map;
298 struct erofs_map_dev mdev;
299 int ret;
300
301 map.m_la = offset;
302 map.m_llen = length;
303 ret = erofs_map_blocks(realinode, &map);
304 if (ret < 0)
305 return ret;
306
307 iomap->offset = map.m_la;
308 iomap->length = map.m_llen;
309 iomap->flags = 0;
310 iomap->addr = IOMAP_NULL_ADDR;
311 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
312 iomap->type = IOMAP_HOLE;
313 return 0;
314 }
315
316 if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(realinode)) {
317 mdev = (struct erofs_map_dev) {
318 .m_deviceid = map.m_deviceid,
319 .m_pa = map.m_pa,
320 };
321 ret = erofs_map_dev(sb, &mdev);
322 if (ret)
323 return ret;
324
325 if (flags & IOMAP_DAX)
326 iomap->dax_dev = mdev.m_dif->dax_dev;
327 else
328 iomap->bdev = mdev.m_bdev;
329 iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
330 if (flags & IOMAP_DAX)
331 iomap->addr += mdev.m_dif->dax_part_off;
332 }
333
334 if (map.m_flags & EROFS_MAP_META) {
335 iomap->type = IOMAP_INLINE;
336 /* read context should read the inlined data */
337 if (ctx) {
338 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
339 void *ptr;
340
341 ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
342 erofs_inode_in_metabox(realinode));
343 if (IS_ERR(ptr))
344 return PTR_ERR(ptr);
345 iomap->inline_data = ptr;
346 ctx->page = buf.page;
347 ctx->base = buf.base;
348 }
349 } else {
350 iomap->type = IOMAP_MAPPED;
351 }
352 return 0;
353 }
354
erofs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)355 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
356 ssize_t written, unsigned int flags, struct iomap *iomap)
357 {
358 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
359 struct erofs_iomap_iter_ctx *ctx = iter->private;
360
361 if (ctx && ctx->base) {
362 struct erofs_buf buf = {
363 .page = ctx->page,
364 .base = ctx->base,
365 };
366
367 DBG_BUGON(iomap->type != IOMAP_INLINE);
368 erofs_put_metabuf(&buf);
369 ctx->base = NULL;
370 }
371 return written;
372 }
373
374 static const struct iomap_ops erofs_iomap_ops = {
375 .iomap_begin = erofs_iomap_begin,
376 .iomap_end = erofs_iomap_end,
377 };
378
erofs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)379 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
380 u64 start, u64 len)
381 {
382 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
383 if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
384 return -EOPNOTSUPP;
385 return iomap_fiemap(inode, fieinfo, start, len,
386 &z_erofs_iomap_report_ops);
387 }
388 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
389 }
390
391 /*
392 * since we dont have write or truncate flows, so no inode
393 * locking needs to be held at the moment.
394 */
erofs_read_folio(struct file * file,struct folio * folio)395 static int erofs_read_folio(struct file *file, struct folio *folio)
396 {
397 struct iomap_read_folio_ctx read_ctx = {
398 .ops = &iomap_bio_read_ops,
399 .cur_folio = folio,
400 };
401 bool need_iput;
402 struct erofs_iomap_iter_ctx iter_ctx = {
403 .realinode = erofs_real_inode(folio_inode(folio), &need_iput),
404 };
405
406 trace_erofs_read_folio(iter_ctx.realinode, folio, true);
407 iomap_read_folio(&erofs_iomap_ops, &read_ctx, &iter_ctx);
408 if (need_iput)
409 iput(iter_ctx.realinode);
410 return 0;
411 }
412
erofs_readahead(struct readahead_control * rac)413 static void erofs_readahead(struct readahead_control *rac)
414 {
415 struct iomap_read_folio_ctx read_ctx = {
416 .ops = &iomap_bio_read_ops,
417 .rac = rac,
418 };
419 bool need_iput;
420 struct erofs_iomap_iter_ctx iter_ctx = {
421 .realinode = erofs_real_inode(rac->mapping->host, &need_iput),
422 };
423
424 trace_erofs_readahead(iter_ctx.realinode, readahead_index(rac),
425 readahead_count(rac), true);
426 iomap_readahead(&erofs_iomap_ops, &read_ctx, &iter_ctx);
427 if (need_iput)
428 iput(iter_ctx.realinode);
429 }
430
erofs_bmap(struct address_space * mapping,sector_t block)431 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
432 {
433 return iomap_bmap(mapping, block, &erofs_iomap_ops);
434 }
435
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)436 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
437 {
438 struct inode *inode = file_inode(iocb->ki_filp);
439
440 /* no need taking (shared) inode lock since it's a ro filesystem */
441 if (!iov_iter_count(to))
442 return 0;
443
444 if (IS_ENABLED(CONFIG_FS_DAX) && IS_DAX(inode))
445 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
446
447 if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) {
448 struct erofs_iomap_iter_ctx iter_ctx = {
449 .realinode = inode,
450 };
451
452 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
453 NULL, 0, &iter_ctx, 0);
454 }
455 return filemap_read(iocb, to, 0);
456 }
457
458 /* for uncompressed (aligned) files and raw access for other files */
459 const struct address_space_operations erofs_aops = {
460 .read_folio = erofs_read_folio,
461 .readahead = erofs_readahead,
462 .bmap = erofs_bmap,
463 .direct_IO = noop_direct_IO,
464 .release_folio = iomap_release_folio,
465 .invalidate_folio = iomap_invalidate_folio,
466 };
467
468 #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,unsigned int order)469 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
470 unsigned int order)
471 {
472 return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
473 }
474
erofs_dax_fault(struct vm_fault * vmf)475 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
476 {
477 return erofs_dax_huge_fault(vmf, 0);
478 }
479
480 static const struct vm_operations_struct erofs_dax_vm_ops = {
481 .fault = erofs_dax_fault,
482 .huge_fault = erofs_dax_huge_fault,
483 };
484
erofs_file_mmap_prepare(struct vm_area_desc * desc)485 static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
486 {
487 if (!IS_DAX(file_inode(desc->file)))
488 return generic_file_readonly_mmap_prepare(desc);
489
490 if (vma_desc_test_all(desc, VMA_SHARED_BIT, VMA_MAYWRITE_BIT))
491 return -EINVAL;
492
493 desc->vm_ops = &erofs_dax_vm_ops;
494 vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
495 return 0;
496 }
497 #else
498 #define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
499 #endif
500
erofs_file_llseek(struct file * file,loff_t offset,int whence)501 static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
502 {
503 struct inode *inode = file->f_mapping->host;
504 const struct iomap_ops *ops = &erofs_iomap_ops;
505
506 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
507 if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
508 return generic_file_llseek(file, offset, whence);
509 ops = &z_erofs_iomap_report_ops;
510 }
511
512 if (whence == SEEK_HOLE)
513 offset = iomap_seek_hole(inode, offset, ops);
514 else if (whence == SEEK_DATA)
515 offset = iomap_seek_data(inode, offset, ops);
516 else
517 return generic_file_llseek(file, offset, whence);
518
519 if (offset < 0)
520 return offset;
521 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
522 }
523
524 const struct file_operations erofs_file_fops = {
525 .llseek = erofs_file_llseek,
526 .read_iter = erofs_file_read_iter,
527 .unlocked_ioctl = erofs_ioctl,
528 #ifdef CONFIG_COMPAT
529 .compat_ioctl = erofs_compat_ioctl,
530 #endif
531 .mmap_prepare = erofs_file_mmap_prepare,
532 .get_unmapped_area = thp_get_unmapped_area,
533 .splice_read = filemap_splice_read,
534 .setlease = generic_setlease,
535 };
536