1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
10 
11 void erofs_unmap_metabuf(struct erofs_buf *buf)
12 {
13 	if (!buf->base)
14 		return;
15 	kunmap_local(buf->base);
16 	buf->base = NULL;
17 }
18 
19 void erofs_put_metabuf(struct erofs_buf *buf)
20 {
21 	if (!buf->page)
22 		return;
23 	erofs_unmap_metabuf(buf);
24 	folio_put(page_folio(buf->page));
25 	buf->page = NULL;
26 }
27 
28 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
29 {
30 	pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
31 	struct folio *folio = NULL;
32 
33 	if (buf->page) {
34 		folio = page_folio(buf->page);
35 		if (folio_file_page(folio, index) != buf->page)
36 			erofs_unmap_metabuf(buf);
37 	}
38 	if (!folio || !folio_contains(folio, index)) {
39 		erofs_put_metabuf(buf);
40 		folio = read_mapping_folio(buf->mapping, index, buf->file);
41 		if (IS_ERR(folio))
42 			return folio;
43 	}
44 	buf->page = folio_file_page(folio, index);
45 	if (!need_kmap)
46 		return NULL;
47 	if (!buf->base)
48 		buf->base = kmap_local_page(buf->page);
49 	return buf->base + (offset & ~PAGE_MASK);
50 }
51 
52 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
53 {
54 	struct erofs_sb_info *sbi = EROFS_SB(sb);
55 
56 	buf->file = NULL;
57 	buf->off = sbi->dif0.fsoff;
58 	if (erofs_is_fileio_mode(sbi)) {
59 		buf->file = sbi->dif0.file;	/* some fs like FUSE needs it */
60 		buf->mapping = buf->file->f_mapping;
61 	} else if (erofs_is_fscache_mode(sb))
62 		buf->mapping = sbi->dif0.fscache->inode->i_mapping;
63 	else
64 		buf->mapping = sb->s_bdev->bd_mapping;
65 }
66 
67 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
68 			 erofs_off_t offset, bool need_kmap)
69 {
70 	erofs_init_metabuf(buf, sb);
71 	return erofs_bread(buf, offset, need_kmap);
72 }
73 
74 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
75 {
76 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
77 	struct super_block *sb = inode->i_sb;
78 	unsigned int unit, blksz = sb->s_blocksize;
79 	struct erofs_inode *vi = EROFS_I(inode);
80 	struct erofs_inode_chunk_index *idx;
81 	erofs_blk_t startblk, addrmask;
82 	bool tailpacking;
83 	erofs_off_t pos;
84 	u64 chunknr;
85 	int err = 0;
86 
87 	trace_erofs_map_blocks_enter(inode, map, 0);
88 	map->m_deviceid = 0;
89 	map->m_flags = 0;
90 	if (map->m_la >= inode->i_size)
91 		goto out;
92 
93 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
94 		tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
95 		if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
96 			goto out;
97 		pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
98 
99 		map->m_flags = EROFS_MAP_MAPPED;
100 		if (map->m_la < pos) {
101 			map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
102 			map->m_llen = pos - map->m_la;
103 		} else {
104 			map->m_pa = erofs_iloc(inode) + vi->inode_isize +
105 				vi->xattr_isize + erofs_blkoff(sb, map->m_la);
106 			map->m_llen = inode->i_size - map->m_la;
107 			map->m_flags |= EROFS_MAP_META;
108 		}
109 		goto out;
110 	}
111 
112 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
113 		unit = sizeof(*idx);			/* chunk index */
114 	else
115 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
116 
117 	chunknr = map->m_la >> vi->chunkbits;
118 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
119 		    vi->xattr_isize, unit) + unit * chunknr;
120 
121 	idx = erofs_read_metabuf(&buf, sb, pos, true);
122 	if (IS_ERR(idx)) {
123 		err = PTR_ERR(idx);
124 		goto out;
125 	}
126 	map->m_la = chunknr << vi->chunkbits;
127 	map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
128 			    round_up(inode->i_size - map->m_la, blksz));
129 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
130 		addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
131 			BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
132 		startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
133 			    le32_to_cpu(idx->startblk_lo)) & addrmask;
134 		if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
135 			map->m_deviceid = le16_to_cpu(idx->device_id) &
136 				EROFS_SB(sb)->device_id_mask;
137 			map->m_pa = erofs_pos(sb, startblk);
138 			map->m_flags = EROFS_MAP_MAPPED;
139 		}
140 	} else {
141 		startblk = le32_to_cpu(*(__le32 *)idx);
142 		if (startblk != (u32)EROFS_NULL_ADDR) {
143 			map->m_pa = erofs_pos(sb, startblk);
144 			map->m_flags = EROFS_MAP_MAPPED;
145 		}
146 	}
147 	erofs_put_metabuf(&buf);
148 out:
149 	if (!err) {
150 		map->m_plen = map->m_llen;
151 		/* inline data should be located in the same meta block */
152 		if ((map->m_flags & EROFS_MAP_META) &&
153 		    erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
154 			erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
155 			DBG_BUGON(1);
156 			return -EFSCORRUPTED;
157 		}
158 	}
159 	trace_erofs_map_blocks_exit(inode, map, 0, err);
160 	return err;
161 }
162 
163 static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
164 		struct super_block *sb, struct erofs_device_info *dif)
165 {
166 	map->m_sb = sb;
167 	map->m_dif = dif;
168 	map->m_bdev = NULL;
169 	if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
170 		map->m_bdev = file_bdev(dif->file);
171 }
172 
173 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
174 {
175 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
176 	struct erofs_device_info *dif;
177 	erofs_off_t startoff;
178 	int id;
179 
180 	erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
181 	map->m_bdev = sb->s_bdev;	/* use s_bdev for the primary device */
182 	if (map->m_deviceid) {
183 		down_read(&devs->rwsem);
184 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
185 		if (!dif) {
186 			up_read(&devs->rwsem);
187 			return -ENODEV;
188 		}
189 		if (devs->flatdev) {
190 			map->m_pa += erofs_pos(sb, dif->uniaddr);
191 			up_read(&devs->rwsem);
192 			return 0;
193 		}
194 		erofs_fill_from_devinfo(map, sb, dif);
195 		up_read(&devs->rwsem);
196 	} else if (devs->extra_devices && !devs->flatdev) {
197 		down_read(&devs->rwsem);
198 		idr_for_each_entry(&devs->tree, dif, id) {
199 			if (!dif->uniaddr)
200 				continue;
201 
202 			startoff = erofs_pos(sb, dif->uniaddr);
203 			if (map->m_pa >= startoff &&
204 			    map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
205 				map->m_pa -= startoff;
206 				erofs_fill_from_devinfo(map, sb, dif);
207 				break;
208 			}
209 		}
210 		up_read(&devs->rwsem);
211 	}
212 	return 0;
213 }
214 
215 /*
216  * bit 30: I/O error occurred on this folio
217  * bit 0 - 29: remaining parts to complete this folio
218  */
219 #define EROFS_ONLINEFOLIO_EIO			(1 << 30)
220 
221 void erofs_onlinefolio_init(struct folio *folio)
222 {
223 	union {
224 		atomic_t o;
225 		void *v;
226 	} u = { .o = ATOMIC_INIT(1) };
227 
228 	folio->private = u.v;	/* valid only if file-backed folio is locked */
229 }
230 
231 void erofs_onlinefolio_split(struct folio *folio)
232 {
233 	atomic_inc((atomic_t *)&folio->private);
234 }
235 
236 void erofs_onlinefolio_end(struct folio *folio, int err)
237 {
238 	int orig, v;
239 
240 	do {
241 		orig = atomic_read((atomic_t *)&folio->private);
242 		v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
243 	} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
244 
245 	if (v & ~EROFS_ONLINEFOLIO_EIO)
246 		return;
247 	folio->private = 0;
248 	folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
249 }
250 
251 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
252 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
253 {
254 	int ret;
255 	struct super_block *sb = inode->i_sb;
256 	struct erofs_map_blocks map;
257 	struct erofs_map_dev mdev;
258 
259 	map.m_la = offset;
260 	map.m_llen = length;
261 
262 	ret = erofs_map_blocks(inode, &map);
263 	if (ret < 0)
264 		return ret;
265 
266 	mdev = (struct erofs_map_dev) {
267 		.m_deviceid = map.m_deviceid,
268 		.m_pa = map.m_pa,
269 	};
270 	ret = erofs_map_dev(sb, &mdev);
271 	if (ret)
272 		return ret;
273 
274 	iomap->offset = map.m_la;
275 	if (flags & IOMAP_DAX)
276 		iomap->dax_dev = mdev.m_dif->dax_dev;
277 	else
278 		iomap->bdev = mdev.m_bdev;
279 	iomap->length = map.m_llen;
280 	iomap->flags = 0;
281 	iomap->private = NULL;
282 
283 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
284 		iomap->type = IOMAP_HOLE;
285 		iomap->addr = IOMAP_NULL_ADDR;
286 		if (!iomap->length)
287 			iomap->length = length;
288 		return 0;
289 	}
290 
291 	if (map.m_flags & EROFS_MAP_META) {
292 		void *ptr;
293 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
294 
295 		iomap->type = IOMAP_INLINE;
296 		ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, true);
297 		if (IS_ERR(ptr))
298 			return PTR_ERR(ptr);
299 		iomap->inline_data = ptr;
300 		iomap->private = buf.base;
301 	} else {
302 		iomap->type = IOMAP_MAPPED;
303 		iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
304 		if (flags & IOMAP_DAX)
305 			iomap->addr += mdev.m_dif->dax_part_off;
306 	}
307 	return 0;
308 }
309 
310 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
311 		ssize_t written, unsigned int flags, struct iomap *iomap)
312 {
313 	void *ptr = iomap->private;
314 
315 	if (ptr) {
316 		struct erofs_buf buf = {
317 			.page = kmap_to_page(ptr),
318 			.base = ptr,
319 		};
320 
321 		DBG_BUGON(iomap->type != IOMAP_INLINE);
322 		erofs_put_metabuf(&buf);
323 	} else {
324 		DBG_BUGON(iomap->type == IOMAP_INLINE);
325 	}
326 	return written;
327 }
328 
329 static const struct iomap_ops erofs_iomap_ops = {
330 	.iomap_begin = erofs_iomap_begin,
331 	.iomap_end = erofs_iomap_end,
332 };
333 
334 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
335 		 u64 start, u64 len)
336 {
337 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
338 #ifdef CONFIG_EROFS_FS_ZIP
339 		return iomap_fiemap(inode, fieinfo, start, len,
340 				    &z_erofs_iomap_report_ops);
341 #else
342 		return -EOPNOTSUPP;
343 #endif
344 	}
345 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
346 }
347 
348 /*
349  * since we dont have write or truncate flows, so no inode
350  * locking needs to be held at the moment.
351  */
352 static int erofs_read_folio(struct file *file, struct folio *folio)
353 {
354 	return iomap_read_folio(folio, &erofs_iomap_ops);
355 }
356 
357 static void erofs_readahead(struct readahead_control *rac)
358 {
359 	return iomap_readahead(rac, &erofs_iomap_ops);
360 }
361 
362 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
363 {
364 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
365 }
366 
367 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
368 {
369 	struct inode *inode = file_inode(iocb->ki_filp);
370 
371 	/* no need taking (shared) inode lock since it's a ro filesystem */
372 	if (!iov_iter_count(to))
373 		return 0;
374 
375 #ifdef CONFIG_FS_DAX
376 	if (IS_DAX(inode))
377 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
378 #endif
379 	if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev)
380 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
381 				    NULL, 0, NULL, 0);
382 	return filemap_read(iocb, to, 0);
383 }
384 
385 /* for uncompressed (aligned) files and raw access for other files */
386 const struct address_space_operations erofs_aops = {
387 	.read_folio = erofs_read_folio,
388 	.readahead = erofs_readahead,
389 	.bmap = erofs_bmap,
390 	.direct_IO = noop_direct_IO,
391 	.release_folio = iomap_release_folio,
392 	.invalidate_folio = iomap_invalidate_folio,
393 };
394 
395 #ifdef CONFIG_FS_DAX
396 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
397 		unsigned int order)
398 {
399 	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
400 }
401 
402 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
403 {
404 	return erofs_dax_huge_fault(vmf, 0);
405 }
406 
407 static const struct vm_operations_struct erofs_dax_vm_ops = {
408 	.fault		= erofs_dax_fault,
409 	.huge_fault	= erofs_dax_huge_fault,
410 };
411 
412 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
413 {
414 	if (!IS_DAX(file_inode(file)))
415 		return generic_file_readonly_mmap(file, vma);
416 
417 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
418 		return -EINVAL;
419 
420 	vma->vm_ops = &erofs_dax_vm_ops;
421 	vm_flags_set(vma, VM_HUGEPAGE);
422 	return 0;
423 }
424 #else
425 #define erofs_file_mmap	generic_file_readonly_mmap
426 #endif
427 
428 static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
429 {
430 	struct inode *inode = file->f_mapping->host;
431 	const struct iomap_ops *ops = &erofs_iomap_ops;
432 
433 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
434 #ifdef CONFIG_EROFS_FS_ZIP
435 		ops = &z_erofs_iomap_report_ops;
436 #else
437 		return generic_file_llseek(file, offset, whence);
438 #endif
439 
440 	if (whence == SEEK_HOLE)
441 		offset = iomap_seek_hole(inode, offset, ops);
442 	else if (whence == SEEK_DATA)
443 		offset = iomap_seek_data(inode, offset, ops);
444 	else
445 		return generic_file_llseek(file, offset, whence);
446 
447 	if (offset < 0)
448 		return offset;
449 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
450 }
451 
452 const struct file_operations erofs_file_fops = {
453 	.llseek		= erofs_file_llseek,
454 	.read_iter	= erofs_file_read_iter,
455 	.mmap		= erofs_file_mmap,
456 	.get_unmapped_area = thp_get_unmapped_area,
457 	.splice_read	= filemap_splice_read,
458 };
459