1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6 #include "internal.h"
7 #include <linux/unaligned.h>
8 #include <trace/events/erofs.h>
9
10 struct z_erofs_maprecorder {
11 struct inode *inode;
12 struct erofs_map_blocks *map;
13 unsigned long lcn;
14 /* compression extent information gathered */
15 u8 type, headtype;
16 u16 clusterofs;
17 u16 delta[2];
18 erofs_blk_t pblk, compressedblks;
19 erofs_off_t nextpackoff;
20 bool partialref, in_mbox;
21 };
22
z_erofs_load_full_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn)23 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
24 unsigned long lcn)
25 {
26 struct inode *const inode = m->inode;
27 struct erofs_inode *const vi = EROFS_I(inode);
28 const erofs_off_t pos = Z_EROFS_FULL_INDEX_START(erofs_iloc(inode) +
29 vi->inode_isize + vi->xattr_isize) +
30 lcn * sizeof(struct z_erofs_lcluster_index);
31 struct z_erofs_lcluster_index *di;
32 unsigned int advise;
33
34 di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
35 if (IS_ERR(di))
36 return PTR_ERR(di);
37 m->lcn = lcn;
38 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
39
40 advise = le16_to_cpu(di->di_advise);
41 m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
42 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
43 m->clusterofs = 1 << vi->z_lclusterbits;
44 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
45 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
46 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
47 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
48 DBG_BUGON(1);
49 return -EFSCORRUPTED;
50 }
51 m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT;
52 m->delta[0] = 1;
53 }
54 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
55 } else {
56 m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
57 m->clusterofs = le16_to_cpu(di->di_clusterofs);
58 if (m->clusterofs >= 1 << vi->z_lclusterbits) {
59 DBG_BUGON(1);
60 return -EFSCORRUPTED;
61 }
62 m->pblk = le32_to_cpu(di->di_u.blkaddr);
63 }
64 return 0;
65 }
66
decode_compactedbits(unsigned int lobits,u8 * in,unsigned int pos,u8 * type)67 static unsigned int decode_compactedbits(unsigned int lobits,
68 u8 *in, unsigned int pos, u8 *type)
69 {
70 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
71 const unsigned int lo = v & ((1 << lobits) - 1);
72
73 *type = (v >> lobits) & 3;
74 return lo;
75 }
76
get_compacted_la_distance(unsigned int lobits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)77 static int get_compacted_la_distance(unsigned int lobits,
78 unsigned int encodebits,
79 unsigned int vcnt, u8 *in, int i)
80 {
81 unsigned int lo, d1 = 0;
82 u8 type;
83
84 DBG_BUGON(i >= vcnt);
85
86 do {
87 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
88
89 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
90 return d1;
91 ++d1;
92 } while (++i < vcnt);
93
94 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
95 if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
96 d1 += lo - 1;
97 return d1;
98 }
99
z_erofs_load_compact_lcluster(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)100 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
101 unsigned long lcn, bool lookahead)
102 {
103 struct inode *const inode = m->inode;
104 struct erofs_inode *const vi = EROFS_I(inode);
105 const erofs_off_t ebase = Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
106 vi->inode_isize + vi->xattr_isize);
107 const unsigned int lclusterbits = vi->z_lclusterbits;
108 const unsigned int totalidx = erofs_iblks(inode);
109 unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
110 unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
111 bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
112 erofs_off_t pos;
113 u8 *in, type;
114 int i;
115
116 if (lcn >= totalidx || lclusterbits > 14)
117 return -EINVAL;
118
119 m->lcn = lcn;
120 /* used to align to 32-byte (compacted_2b) alignment */
121 compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
122 compacted_2b = 0;
123 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
124 compacted_4b_initial < totalidx)
125 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
126
127 pos = ebase;
128 amortizedshift = 2; /* compact_4b */
129 if (lcn >= compacted_4b_initial) {
130 pos += compacted_4b_initial * 4;
131 lcn -= compacted_4b_initial;
132 if (lcn < compacted_2b) {
133 amortizedshift = 1;
134 } else {
135 pos += compacted_2b * 2;
136 lcn -= compacted_2b;
137 }
138 }
139 pos += lcn * (1 << amortizedshift);
140
141 /* figure out the lcluster count in this pack */
142 if (1 << amortizedshift == 4 && lclusterbits <= 14)
143 vcnt = 2;
144 else if (1 << amortizedshift == 2 && lclusterbits <= 12)
145 vcnt = 16;
146 else
147 return -EOPNOTSUPP;
148
149 in = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox);
150 if (IS_ERR(in))
151 return PTR_ERR(in);
152
153 /* it doesn't equal to round_up(..) */
154 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
155 (vcnt << amortizedshift);
156 lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
157 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
158 bytes = pos & ((vcnt << amortizedshift) - 1);
159 in -= bytes;
160 i = bytes >> amortizedshift;
161
162 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
163 m->type = type;
164 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
165 m->clusterofs = 1 << lclusterbits;
166
167 /* figure out lookahead_distance: delta[1] if needed */
168 if (lookahead)
169 m->delta[1] = get_compacted_la_distance(lobits,
170 encodebits, vcnt, in, i);
171 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
172 if (!big_pcluster) {
173 DBG_BUGON(1);
174 return -EFSCORRUPTED;
175 }
176 m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
177 m->delta[0] = 1;
178 return 0;
179 } else if (i + 1 != (int)vcnt) {
180 m->delta[0] = lo;
181 return 0;
182 }
183 /*
184 * since the last lcluster in the pack is special,
185 * of which lo saves delta[1] rather than delta[0].
186 * Hence, get delta[0] by the previous lcluster indirectly.
187 */
188 lo = decode_compactedbits(lobits, in,
189 encodebits * (i - 1), &type);
190 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
191 lo = 0;
192 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
193 lo = 1;
194 m->delta[0] = lo + 1;
195 return 0;
196 }
197 m->clusterofs = lo;
198 m->delta[0] = 0;
199 /* figout out blkaddr (pblk) for HEAD lclusters */
200 if (!big_pcluster) {
201 nblk = 1;
202 while (i > 0) {
203 --i;
204 lo = decode_compactedbits(lobits, in,
205 encodebits * i, &type);
206 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
207 i -= lo;
208
209 if (i >= 0)
210 ++nblk;
211 }
212 } else {
213 nblk = 0;
214 while (i > 0) {
215 --i;
216 lo = decode_compactedbits(lobits, in,
217 encodebits * i, &type);
218 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
219 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
220 --i;
221 nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
222 continue;
223 }
224 /* bigpcluster shouldn't have plain d0 == 1 */
225 if (lo <= 1) {
226 DBG_BUGON(1);
227 return -EFSCORRUPTED;
228 }
229 i -= lo - 2;
230 continue;
231 }
232 ++nblk;
233 }
234 }
235 in += (vcnt << amortizedshift) - sizeof(__le32);
236 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
237 return 0;
238 }
239
z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)240 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
241 unsigned int lcn, bool lookahead)
242 {
243 if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
244 erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
245 m->type, lcn, EROFS_I(m->inode)->nid);
246 DBG_BUGON(1);
247 return -EOPNOTSUPP;
248 }
249
250 switch (EROFS_I(m->inode)->datalayout) {
251 case EROFS_INODE_COMPRESSED_FULL:
252 return z_erofs_load_full_lcluster(m, lcn);
253 case EROFS_INODE_COMPRESSED_COMPACT:
254 return z_erofs_load_compact_lcluster(m, lcn, lookahead);
255 default:
256 return -EINVAL;
257 }
258 }
259
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)260 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
261 unsigned int lookback_distance)
262 {
263 struct super_block *sb = m->inode->i_sb;
264 struct erofs_inode *const vi = EROFS_I(m->inode);
265 const unsigned int lclusterbits = vi->z_lclusterbits;
266
267 while (m->lcn >= lookback_distance) {
268 unsigned long lcn = m->lcn - lookback_distance;
269 int err;
270
271 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
272 if (err)
273 return err;
274
275 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
276 lookback_distance = m->delta[0];
277 if (!lookback_distance)
278 break;
279 continue;
280 } else {
281 m->headtype = m->type;
282 m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
283 return 0;
284 }
285 }
286 erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
287 lookback_distance, m->lcn, vi->nid);
288 DBG_BUGON(1);
289 return -EFSCORRUPTED;
290 }
291
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)292 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
293 unsigned int initial_lcn)
294 {
295 struct inode *inode = m->inode;
296 struct super_block *sb = inode->i_sb;
297 struct erofs_inode *vi = EROFS_I(inode);
298 bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
299 bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
300 unsigned long lcn = m->lcn + 1;
301 int err;
302
303 DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
304 DBG_BUGON(m->type != m->headtype);
305
306 if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
307 ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
308 m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
309 (lcn << vi->z_lclusterbits) >= inode->i_size)
310 m->compressedblks = 1;
311
312 if (m->compressedblks)
313 goto out;
314
315 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
316 if (err)
317 return err;
318
319 /*
320 * If the 1st NONHEAD lcluster has already been handled initially w/o
321 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
322 * an internal implemenatation error is detected.
323 *
324 * The following code can also handle it properly anyway, but let's
325 * BUG_ON in the debugging mode only for developers to notice that.
326 */
327 DBG_BUGON(lcn == initial_lcn &&
328 m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
329
330 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD && m->delta[0] != 1) {
331 erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
332 DBG_BUGON(1);
333 return -EFSCORRUPTED;
334 }
335
336 /*
337 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type rather
338 * than CBLKCNT, it's a 1 block-sized pcluster.
339 */
340 if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD || !m->compressedblks)
341 m->compressedblks = 1;
342 out:
343 m->map->m_plen = erofs_pos(sb, m->compressedblks);
344 return 0;
345 }
346
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)347 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
348 {
349 struct inode *inode = m->inode;
350 struct erofs_inode *vi = EROFS_I(inode);
351 struct erofs_map_blocks *map = m->map;
352 unsigned int lclusterbits = vi->z_lclusterbits;
353 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
354 int err;
355
356 while (1) {
357 /* handle the last EOF pcluster (no next HEAD lcluster) */
358 if ((lcn << lclusterbits) >= inode->i_size) {
359 map->m_llen = inode->i_size - map->m_la;
360 return 0;
361 }
362
363 err = z_erofs_load_lcluster_from_disk(m, lcn, true);
364 if (err)
365 return err;
366
367 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
368 /* work around invalid d1 generated by pre-1.0 mkfs */
369 if (unlikely(!m->delta[1])) {
370 m->delta[1] = 1;
371 DBG_BUGON(1);
372 }
373 } else if (m->type < Z_EROFS_LCLUSTER_TYPE_MAX) {
374 if (lcn != headlcn)
375 break; /* ends at the next HEAD lcluster */
376 m->delta[1] = 1;
377 }
378 lcn += m->delta[1];
379 }
380 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
381 return 0;
382 }
383
z_erofs_map_blocks_fo(struct inode * inode,struct erofs_map_blocks * map,int flags)384 static int z_erofs_map_blocks_fo(struct inode *inode,
385 struct erofs_map_blocks *map, int flags)
386 {
387 struct erofs_inode *vi = EROFS_I(inode);
388 struct super_block *sb = inode->i_sb;
389 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
390 bool ztailpacking = vi->z_idata_size;
391 unsigned int lclusterbits = vi->z_lclusterbits;
392 struct z_erofs_maprecorder m = {
393 .inode = inode,
394 .map = map,
395 .in_mbox = erofs_inode_in_metabox(inode),
396 };
397 int err = 0;
398 unsigned int endoff, afmt;
399 unsigned long initial_lcn;
400 unsigned long long ofs, end;
401
402 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
403 if (fragment && !(flags & EROFS_GET_BLOCKS_FINDTAIL) &&
404 !vi->z_tailextent_headlcn) {
405 map->m_la = 0;
406 map->m_llen = inode->i_size;
407 map->m_flags = EROFS_MAP_FRAGMENT;
408 return 0;
409 }
410 initial_lcn = ofs >> lclusterbits;
411 endoff = ofs & ((1 << lclusterbits) - 1);
412
413 err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
414 if (err)
415 goto unmap_out;
416
417 if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
418 vi->z_fragmentoff = m.nextpackoff;
419 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
420 end = (m.lcn + 1ULL) << lclusterbits;
421
422 if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
423 m.headtype = m.type;
424 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
425 /*
426 * For ztailpacking files, in order to inline data more
427 * effectively, special EOF lclusters are now supported
428 * which can have three parts at most.
429 */
430 if (ztailpacking && end > inode->i_size)
431 end = inode->i_size;
432 } else {
433 if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
434 /* m.lcn should be >= 1 if endoff < m.clusterofs */
435 if (!m.lcn) {
436 erofs_err(sb, "invalid logical cluster 0 at nid %llu",
437 vi->nid);
438 err = -EFSCORRUPTED;
439 goto unmap_out;
440 }
441 end = (m.lcn << lclusterbits) | m.clusterofs;
442 map->m_flags |= EROFS_MAP_FULL_MAPPED;
443 m.delta[0] = 1;
444 }
445 /* get the corresponding first chunk */
446 err = z_erofs_extent_lookback(&m, m.delta[0]);
447 if (err)
448 goto unmap_out;
449 }
450 if (m.partialref)
451 map->m_flags |= EROFS_MAP_PARTIAL_REF;
452 map->m_llen = end - map->m_la;
453
454 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
455 vi->z_tailextent_headlcn = m.lcn;
456 /* for non-compact indexes, fragmentoff is 64 bits */
457 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
458 vi->z_fragmentoff |= (u64)m.pblk << 32;
459 }
460 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
461 map->m_flags |= EROFS_MAP_META;
462 map->m_pa = vi->z_fragmentoff;
463 map->m_plen = vi->z_idata_size;
464 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
465 erofs_err(sb, "invalid tail-packing pclustersize %llu",
466 map->m_plen);
467 err = -EFSCORRUPTED;
468 goto unmap_out;
469 }
470 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
471 map->m_flags = EROFS_MAP_FRAGMENT;
472 } else {
473 map->m_pa = erofs_pos(sb, m.pblk);
474 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
475 if (err)
476 goto unmap_out;
477 }
478
479 if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
480 if (map->m_llen > map->m_plen) {
481 DBG_BUGON(1);
482 err = -EFSCORRUPTED;
483 goto unmap_out;
484 }
485 afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
486 Z_EROFS_COMPRESSION_INTERLACED :
487 Z_EROFS_COMPRESSION_SHIFTED;
488 } else {
489 afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
490 vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
491 if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
492 erofs_err(sb, "inconsistent algorithmtype %u for nid %llu",
493 afmt, vi->nid);
494 err = -EFSCORRUPTED;
495 goto unmap_out;
496 }
497 }
498 map->m_algorithmformat = afmt;
499
500 if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
501 ((flags & EROFS_GET_BLOCKS_READMORE) &&
502 (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
503 map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
504 map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
505 map->m_llen >= i_blocksize(inode))) {
506 err = z_erofs_get_extent_decompressedlen(&m);
507 if (!err)
508 map->m_flags |= EROFS_MAP_FULL_MAPPED;
509 }
510
511 unmap_out:
512 erofs_unmap_metabuf(&m.map->buf);
513 return err;
514 }
515
z_erofs_map_blocks_ext(struct inode * inode,struct erofs_map_blocks * map,int flags)516 static int z_erofs_map_blocks_ext(struct inode *inode,
517 struct erofs_map_blocks *map, int flags)
518 {
519 struct erofs_inode *vi = EROFS_I(inode);
520 struct super_block *sb = inode->i_sb;
521 bool interlaced = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER;
522 unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
523 erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
524 vi->inode_isize + vi->xattr_isize), recsz);
525 bool in_mbox = erofs_inode_in_metabox(inode);
526 erofs_off_t lend = inode->i_size;
527 erofs_off_t l, r, mid, pa, la, lstart;
528 struct z_erofs_extent *ext;
529 unsigned int fmt;
530 bool last;
531
532 map->m_flags = 0;
533 if (recsz <= offsetof(struct z_erofs_extent, pstart_hi)) {
534 if (recsz <= offsetof(struct z_erofs_extent, pstart_lo)) {
535 ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
536 if (IS_ERR(ext))
537 return PTR_ERR(ext);
538 pa = le64_to_cpu(*(__le64 *)ext);
539 pos += sizeof(__le64);
540 lstart = 0;
541 } else {
542 lstart = round_down(map->m_la, 1 << vi->z_lclusterbits);
543 pos += (lstart >> vi->z_lclusterbits) * recsz;
544 pa = EROFS_NULL_ADDR;
545 }
546
547 for (; lstart <= map->m_la; lstart += 1 << vi->z_lclusterbits) {
548 ext = erofs_read_metabuf(&map->buf, sb, pos, in_mbox);
549 if (IS_ERR(ext))
550 return PTR_ERR(ext);
551 map->m_plen = le32_to_cpu(ext->plen);
552 if (pa != EROFS_NULL_ADDR) {
553 map->m_pa = pa;
554 pa += map->m_plen & Z_EROFS_EXTENT_PLEN_MASK;
555 } else {
556 map->m_pa = le32_to_cpu(ext->pstart_lo);
557 }
558 pos += recsz;
559 }
560 last = (lstart >= round_up(lend, 1 << vi->z_lclusterbits));
561 lend = min(lstart, lend);
562 lstart -= 1 << vi->z_lclusterbits;
563 } else {
564 lstart = lend;
565 for (l = 0, r = vi->z_extents; l < r; ) {
566 mid = l + (r - l) / 2;
567 ext = erofs_read_metabuf(&map->buf, sb,
568 pos + mid * recsz, in_mbox);
569 if (IS_ERR(ext))
570 return PTR_ERR(ext);
571
572 la = le32_to_cpu(ext->lstart_lo);
573 pa = le32_to_cpu(ext->pstart_lo) |
574 (u64)le32_to_cpu(ext->pstart_hi) << 32;
575 if (recsz > offsetof(struct z_erofs_extent, lstart_hi))
576 la |= (u64)le32_to_cpu(ext->lstart_hi) << 32;
577
578 if (la > map->m_la) {
579 r = mid;
580 if (la > lend) {
581 DBG_BUGON(1);
582 return -EFSCORRUPTED;
583 }
584 lend = la;
585 } else {
586 l = mid + 1;
587 if (map->m_la == la)
588 r = min(l + 1, r);
589 lstart = la;
590 map->m_plen = le32_to_cpu(ext->plen);
591 map->m_pa = pa;
592 }
593 }
594 last = (l >= vi->z_extents);
595 }
596
597 if (lstart < lend) {
598 map->m_la = lstart;
599 if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
600 map->m_flags = EROFS_MAP_FRAGMENT;
601 vi->z_fragmentoff = map->m_plen;
602 if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
603 vi->z_fragmentoff |= map->m_pa << 32;
604 } else if (map->m_plen) {
605 map->m_flags |= EROFS_MAP_MAPPED |
606 EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
607 fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
608 if (fmt)
609 map->m_algorithmformat = fmt - 1;
610 else if (interlaced && !erofs_blkoff(sb, map->m_pa))
611 map->m_algorithmformat =
612 Z_EROFS_COMPRESSION_INTERLACED;
613 else
614 map->m_algorithmformat =
615 Z_EROFS_COMPRESSION_SHIFTED;
616 if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
617 map->m_flags |= EROFS_MAP_PARTIAL_REF;
618 map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
619 }
620 }
621 map->m_llen = lend - map->m_la;
622 return 0;
623 }
624
z_erofs_fill_inode(struct inode * inode,struct erofs_map_blocks * map)625 static int z_erofs_fill_inode(struct inode *inode, struct erofs_map_blocks *map)
626 {
627 struct erofs_inode *const vi = EROFS_I(inode);
628 struct super_block *const sb = inode->i_sb;
629 int err, headnr;
630 erofs_off_t pos;
631 struct z_erofs_map_header *h;
632
633 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
634 /*
635 * paired with smp_mb() at the end of the function to ensure
636 * fields will only be observed after the bit is set.
637 */
638 smp_mb();
639 return 0;
640 }
641
642 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
643 return -ERESTARTSYS;
644
645 err = 0;
646 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
647 goto out_unlock;
648
649 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
650 h = erofs_read_metabuf(&map->buf, sb, pos, erofs_inode_in_metabox(inode));
651 if (IS_ERR(h)) {
652 err = PTR_ERR(h);
653 goto out_unlock;
654 }
655
656 /*
657 * if the highest bit of the 8-byte map header is set, the whole file
658 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
659 */
660 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
661 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
662 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
663 vi->z_tailextent_headlcn = 0;
664 goto done;
665 }
666 vi->z_advise = le16_to_cpu(h->h_advise);
667 vi->z_lclusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 15);
668 if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
669 (vi->z_advise & Z_EROFS_ADVISE_EXTENTS)) {
670 vi->z_extents = le32_to_cpu(h->h_extents_lo) |
671 ((u64)le16_to_cpu(h->h_extents_hi) << 32);
672 goto done;
673 }
674
675 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
676 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
677 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)
678 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
679 else if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER)
680 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
681
682 headnr = 0;
683 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
684 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
685 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
686 headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
687 err = -EOPNOTSUPP;
688 goto out_unlock;
689 }
690
691 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
692 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
693 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
694 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
695 vi->nid);
696 err = -EFSCORRUPTED;
697 goto out_unlock;
698 }
699 if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
700 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
701 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
702 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
703 vi->nid);
704 err = -EFSCORRUPTED;
705 goto out_unlock;
706 }
707
708 if (vi->z_idata_size ||
709 (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) {
710 struct erofs_map_blocks tm = {
711 .buf = __EROFS_BUF_INITIALIZER
712 };
713
714 err = z_erofs_map_blocks_fo(inode, &tm,
715 EROFS_GET_BLOCKS_FINDTAIL);
716 erofs_put_metabuf(&tm.buf);
717 if (err < 0)
718 goto out_unlock;
719 }
720 done:
721 /* paired with smp_mb() at the beginning of the function */
722 smp_mb();
723 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
724 out_unlock:
725 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
726 return err;
727 }
728
z_erofs_map_blocks_iter(struct inode * inode,struct erofs_map_blocks * map,int flags)729 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
730 int flags)
731 {
732 struct erofs_inode *const vi = EROFS_I(inode);
733 int err = 0;
734
735 trace_erofs_map_blocks_enter(inode, map, flags);
736 if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
737 map->m_llen = map->m_la + 1 - inode->i_size;
738 map->m_la = inode->i_size;
739 map->m_flags = 0;
740 } else {
741 err = z_erofs_fill_inode(inode, map);
742 if (!err) {
743 if (vi->datalayout == EROFS_INODE_COMPRESSED_FULL &&
744 (vi->z_advise & Z_EROFS_ADVISE_EXTENTS))
745 err = z_erofs_map_blocks_ext(inode, map, flags);
746 else
747 err = z_erofs_map_blocks_fo(inode, map, flags);
748 }
749 if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
750 unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
751 map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
752 err = -EOPNOTSUPP;
753 if (err)
754 map->m_llen = 0;
755 }
756 trace_erofs_map_blocks_exit(inode, map, flags, err);
757 return err;
758 }
759
z_erofs_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)760 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
761 loff_t length, unsigned int flags,
762 struct iomap *iomap, struct iomap *srcmap)
763 {
764 int ret;
765 struct erofs_map_blocks map = { .m_la = offset };
766
767 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
768 erofs_put_metabuf(&map.buf);
769 if (ret < 0)
770 return ret;
771
772 iomap->bdev = inode->i_sb->s_bdev;
773 iomap->offset = map.m_la;
774 iomap->length = map.m_llen;
775 if (map.m_flags & EROFS_MAP_MAPPED) {
776 iomap->type = IOMAP_MAPPED;
777 iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
778 IOMAP_NULL_ADDR : map.m_pa;
779 } else {
780 iomap->type = IOMAP_HOLE;
781 iomap->addr = IOMAP_NULL_ADDR;
782 /*
783 * No strict rule on how to describe extents for post EOF, yet
784 * we need to do like below. Otherwise, iomap itself will get
785 * into an endless loop on post EOF.
786 *
787 * Calculate the effective offset by subtracting extent start
788 * (map.m_la) from the requested offset, and add it to length.
789 * (NB: offset >= map.m_la always)
790 */
791 if (iomap->offset >= inode->i_size)
792 iomap->length = length + offset - map.m_la;
793 }
794 iomap->flags = 0;
795 return 0;
796 }
797
798 const struct iomap_ops z_erofs_iomap_report_ops = {
799 .iomap_begin = z_erofs_iomap_begin_report,
800 };
801