1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #include "xattr.h"
8 #include <trace/events/erofs.h>
9
erofs_fill_symlink(struct inode * inode,void * kaddr,unsigned int m_pofs)10 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
11 unsigned int m_pofs)
12 {
13 struct erofs_inode *vi = EROFS_I(inode);
14 loff_t off;
15
16 m_pofs += vi->xattr_isize;
17 /* check if it cannot be handled with fast symlink scheme */
18 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
19 check_add_overflow(m_pofs, inode->i_size, &off) ||
20 off > i_blocksize(inode))
21 return 0;
22
23 inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL);
24 return inode->i_link ? 0 : -ENOMEM;
25 }
26
erofs_read_inode(struct inode * inode)27 static int erofs_read_inode(struct inode *inode)
28 {
29 struct super_block *sb = inode->i_sb;
30 erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode));
31 unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode));
32 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
33 struct erofs_sb_info *sbi = EROFS_SB(sb);
34 erofs_blk_t addrmask = BIT_ULL(48) - 1;
35 struct erofs_inode *vi = EROFS_I(inode);
36 struct erofs_inode_extended *die, copied;
37 struct erofs_inode_compact *dic;
38 unsigned int ifmt;
39 void *ptr;
40 int err = 0;
41
42 ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), true);
43 if (IS_ERR(ptr)) {
44 err = PTR_ERR(ptr);
45 erofs_err(sb, "failed to get inode (nid: %llu) page, err %d",
46 vi->nid, err);
47 goto err_out;
48 }
49
50 dic = ptr + ofs;
51 ifmt = le16_to_cpu(dic->i_format);
52 if (ifmt & ~EROFS_I_ALL) {
53 erofs_err(sb, "unsupported i_format %u of nid %llu",
54 ifmt, vi->nid);
55 err = -EOPNOTSUPP;
56 goto err_out;
57 }
58
59 vi->datalayout = erofs_inode_datalayout(ifmt);
60 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
61 erofs_err(sb, "unsupported datalayout %u of nid %llu",
62 vi->datalayout, vi->nid);
63 err = -EOPNOTSUPP;
64 goto err_out;
65 }
66
67 switch (erofs_inode_version(ifmt)) {
68 case EROFS_INODE_LAYOUT_EXTENDED:
69 vi->inode_isize = sizeof(struct erofs_inode_extended);
70 /* check if the extended inode acrosses block boundary */
71 if (ofs + vi->inode_isize <= sb->s_blocksize) {
72 ofs += vi->inode_isize;
73 die = (struct erofs_inode_extended *)dic;
74 copied.i_u = die->i_u;
75 copied.i_nb = die->i_nb;
76 } else {
77 const unsigned int gotten = sb->s_blocksize - ofs;
78
79 memcpy(&copied, dic, gotten);
80 ptr = erofs_read_metabuf(&buf, sb,
81 erofs_pos(sb, blkaddr + 1), true);
82 if (IS_ERR(ptr)) {
83 err = PTR_ERR(ptr);
84 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %d",
85 vi->nid, err);
86 goto err_out;
87 }
88 ofs = vi->inode_isize - gotten;
89 memcpy((u8 *)&copied + gotten, ptr, ofs);
90 die = &copied;
91 }
92 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
93
94 inode->i_mode = le16_to_cpu(die->i_mode);
95 i_uid_write(inode, le32_to_cpu(die->i_uid));
96 i_gid_write(inode, le32_to_cpu(die->i_gid));
97 set_nlink(inode, le32_to_cpu(die->i_nlink));
98 inode_set_mtime(inode, le64_to_cpu(die->i_mtime),
99 le32_to_cpu(die->i_mtime_nsec));
100
101 inode->i_size = le64_to_cpu(die->i_size);
102 break;
103 case EROFS_INODE_LAYOUT_COMPACT:
104 vi->inode_isize = sizeof(struct erofs_inode_compact);
105 ofs += vi->inode_isize;
106 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
107
108 inode->i_mode = le16_to_cpu(dic->i_mode);
109 copied.i_u = dic->i_u;
110 i_uid_write(inode, le16_to_cpu(dic->i_uid));
111 i_gid_write(inode, le16_to_cpu(dic->i_gid));
112 if (!S_ISDIR(inode->i_mode) &&
113 ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) {
114 set_nlink(inode, 1);
115 copied.i_nb = dic->i_nb;
116 } else {
117 set_nlink(inode, le16_to_cpu(dic->i_nb.nlink));
118 copied.i_nb.startblk_hi = 0;
119 addrmask = BIT_ULL(32) - 1;
120 }
121 inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime),
122 sbi->fixed_nsec);
123
124 inode->i_size = le32_to_cpu(dic->i_size);
125 break;
126 default:
127 erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
128 erofs_inode_version(ifmt), vi->nid);
129 err = -EOPNOTSUPP;
130 goto err_out;
131 }
132
133 if (unlikely(inode->i_size < 0)) {
134 erofs_err(sb, "negative i_size @ nid %llu", vi->nid);
135 err = -EFSCORRUPTED;
136 goto err_out;
137 }
138 switch (inode->i_mode & S_IFMT) {
139 case S_IFDIR:
140 vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1;
141 fallthrough;
142 case S_IFREG:
143 case S_IFLNK:
144 vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) |
145 ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32);
146 if (vi->datalayout == EROFS_INODE_FLAT_PLAIN &&
147 !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask))
148 vi->startblk = EROFS_NULL_ADDR;
149
150 if(S_ISLNK(inode->i_mode)) {
151 err = erofs_fill_symlink(inode, ptr, ofs);
152 if (err)
153 goto err_out;
154 }
155 break;
156 case S_IFCHR:
157 case S_IFBLK:
158 inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev));
159 break;
160 case S_IFIFO:
161 case S_IFSOCK:
162 inode->i_rdev = 0;
163 break;
164 default:
165 erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
166 vi->nid);
167 err = -EFSCORRUPTED;
168 goto err_out;
169 }
170
171 if (erofs_inode_is_data_compressed(vi->datalayout))
172 inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) <<
173 (sb->s_blocksize_bits - 9);
174 else
175 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
176
177 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
178 /* fill chunked inode summary info */
179 vi->chunkformat = le16_to_cpu(copied.i_u.c.format);
180 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
181 erofs_err(sb, "unsupported chunk format %x of nid %llu",
182 vi->chunkformat, vi->nid);
183 err = -EOPNOTSUPP;
184 goto err_out;
185 }
186 vi->chunkbits = sb->s_blocksize_bits +
187 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
188 }
189 inode_set_atime_to_ts(inode,
190 inode_set_ctime_to_ts(inode, inode_get_mtime(inode)));
191
192 inode->i_flags &= ~S_DAX;
193 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
194 (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
195 vi->datalayout == EROFS_INODE_CHUNK_BASED))
196 inode->i_flags |= S_DAX;
197 err_out:
198 erofs_put_metabuf(&buf);
199 return err;
200 }
201
erofs_fill_inode(struct inode * inode)202 static int erofs_fill_inode(struct inode *inode)
203 {
204 struct erofs_inode *vi = EROFS_I(inode);
205 int err;
206
207 trace_erofs_fill_inode(inode);
208 err = erofs_read_inode(inode);
209 if (err)
210 return err;
211
212 switch (inode->i_mode & S_IFMT) {
213 case S_IFREG:
214 inode->i_op = &erofs_generic_iops;
215 if (erofs_inode_is_data_compressed(vi->datalayout))
216 inode->i_fop = &generic_ro_fops;
217 else
218 inode->i_fop = &erofs_file_fops;
219 break;
220 case S_IFDIR:
221 inode->i_op = &erofs_dir_iops;
222 inode->i_fop = &erofs_dir_fops;
223 inode_nohighmem(inode);
224 break;
225 case S_IFLNK:
226 if (inode->i_link)
227 inode->i_op = &erofs_fast_symlink_iops;
228 else
229 inode->i_op = &erofs_symlink_iops;
230 inode_nohighmem(inode);
231 break;
232 default:
233 inode->i_op = &erofs_generic_iops;
234 init_special_inode(inode, inode->i_mode, inode->i_rdev);
235 return 0;
236 }
237
238 mapping_set_large_folios(inode->i_mapping);
239 if (erofs_inode_is_data_compressed(vi->datalayout)) {
240 #ifdef CONFIG_EROFS_FS_ZIP
241 DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
242 erofs_info, inode->i_sb,
243 "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
244 inode->i_mapping->a_ops = &z_erofs_aops;
245 #else
246 err = -EOPNOTSUPP;
247 #endif
248 } else {
249 inode->i_mapping->a_ops = &erofs_aops;
250 #ifdef CONFIG_EROFS_FS_ONDEMAND
251 if (erofs_is_fscache_mode(inode->i_sb))
252 inode->i_mapping->a_ops = &erofs_fscache_access_aops;
253 #endif
254 #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
255 if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb)))
256 inode->i_mapping->a_ops = &erofs_fileio_aops;
257 #endif
258 }
259
260 return err;
261 }
262
263 /*
264 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
265 * so that it will fit.
266 */
erofs_squash_ino(erofs_nid_t nid)267 static ino_t erofs_squash_ino(erofs_nid_t nid)
268 {
269 ino_t ino = (ino_t)nid;
270
271 if (sizeof(ino_t) < sizeof(erofs_nid_t))
272 ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
273 return ino;
274 }
275
erofs_iget5_eq(struct inode * inode,void * opaque)276 static int erofs_iget5_eq(struct inode *inode, void *opaque)
277 {
278 return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
279 }
280
erofs_iget5_set(struct inode * inode,void * opaque)281 static int erofs_iget5_set(struct inode *inode, void *opaque)
282 {
283 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
284
285 inode->i_ino = erofs_squash_ino(nid);
286 EROFS_I(inode)->nid = nid;
287 return 0;
288 }
289
erofs_iget(struct super_block * sb,erofs_nid_t nid)290 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
291 {
292 struct inode *inode;
293
294 inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
295 erofs_iget5_set, &nid);
296 if (!inode)
297 return ERR_PTR(-ENOMEM);
298
299 if (inode->i_state & I_NEW) {
300 int err = erofs_fill_inode(inode);
301
302 if (err) {
303 iget_failed(inode);
304 return ERR_PTR(err);
305 }
306 unlock_new_inode(inode);
307 }
308 return inode;
309 }
310
erofs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)311 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
312 struct kstat *stat, u32 request_mask,
313 unsigned int query_flags)
314 {
315 struct inode *const inode = d_inode(path->dentry);
316 struct block_device *bdev = inode->i_sb->s_bdev;
317 bool compressed =
318 erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout);
319
320 if (compressed)
321 stat->attributes |= STATX_ATTR_COMPRESSED;
322 stat->attributes |= STATX_ATTR_IMMUTABLE;
323 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
324 STATX_ATTR_IMMUTABLE);
325
326 /*
327 * Return the DIO alignment restrictions if requested.
328 *
329 * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode
330 * and uncompressed inodes, otherwise we report no DIO support.
331 */
332 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
333 stat->result_mask |= STATX_DIOALIGN;
334 if (bdev && !compressed) {
335 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
336 stat->dio_offset_align = bdev_logical_block_size(bdev);
337 }
338 }
339 generic_fillattr(idmap, request_mask, inode, stat);
340 return 0;
341 }
342
343 const struct inode_operations erofs_generic_iops = {
344 .getattr = erofs_getattr,
345 .listxattr = erofs_listxattr,
346 .get_inode_acl = erofs_get_acl,
347 .fiemap = erofs_fiemap,
348 };
349
350 const struct inode_operations erofs_symlink_iops = {
351 .get_link = page_get_link,
352 .getattr = erofs_getattr,
353 .listxattr = erofs_listxattr,
354 .get_inode_acl = erofs_get_acl,
355 };
356
357 const struct inode_operations erofs_fast_symlink_iops = {
358 .get_link = simple_get_link,
359 .getattr = erofs_getattr,
360 .listxattr = erofs_listxattr,
361 .get_inode_acl = erofs_get_acl,
362 };
363