1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Resizable virtual memory filesystem for Linux.
4 *
5 * Copyright (C) 2000 Linus Torvalds.
6 * 2000 Transmeta Corp.
7 * 2000-2001 Christoph Rohland
8 * 2000-2001 SAP AG
9 * 2002 Red Hat Inc.
10 * Copyright (C) 2002-2011 Hugh Dickins.
11 * Copyright (C) 2011 Google Inc.
12 * Copyright (C) 2002-2005 VERITAS Software Corporation.
13 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 *
15 * Extended attribute support for tmpfs:
16 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
17 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 *
19 * tiny-shmem:
20 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/ramfs.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/fileattr.h>
31 #include <linux/filelock.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include <linux/unicode.h>
44 #include "swap.h"
45
46 static struct vfsmount *shm_mnt __ro_after_init;
47
48 #ifdef CONFIG_SHMEM
49 /*
50 * This virtual memory filesystem is heavily based on the ramfs. It
51 * extends ramfs by the ability to use swap and honor resource limits
52 * which makes it a completely usable filesystem.
53 */
54
55 #include <linux/xattr.h>
56 #include <linux/exportfs.h>
57 #include <linux/posix_acl.h>
58 #include <linux/posix_acl_xattr.h>
59 #include <linux/mman.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/backing-dev.h>
63 #include <linux/writeback.h>
64 #include <linux/folio_batch.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/leafops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/rmap.h>
81 #include <linux/uuid.h>
82 #include <linux/quotaops.h>
83 #include <linux/rcupdate_wait.h>
84
85 #include <linux/uaccess.h>
86
87 #include "internal.h"
88
89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93
94 /* Pretend that one inode + its dentry occupy this much memory */
95 #define BOGO_INODE_SIZE 1024
96
97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98 #define SHORT_SYMLINK_LEN 128
99
100 /*
101 * shmem_fallocate communicates with shmem_fault or shmem_writeout via
102 * inode->i_private (with i_rwsem making sure that it has only one user at
103 * a time): we would prefer not to enlarge the shmem inode just for that.
104 */
105 struct shmem_falloc {
106 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 pgoff_t start; /* start of range currently being fallocated */
108 pgoff_t next; /* the next page offset to be fallocated */
109 pgoff_t nr_falloced; /* how many new pages have been fallocated */
110 pgoff_t nr_unswapped; /* how often writeout refused to swap out */
111 };
112
113 struct shmem_options {
114 unsigned long long blocks;
115 unsigned long long inodes;
116 struct mempolicy *mpol;
117 kuid_t uid;
118 kgid_t gid;
119 umode_t mode;
120 bool full_inums;
121 int huge;
122 int seen;
123 bool noswap;
124 unsigned short quota_types;
125 struct shmem_quota_limits qlimits;
126 #if IS_ENABLED(CONFIG_UNICODE)
127 struct unicode_map *encoding;
128 bool strict_encoding;
129 #endif
130 #define SHMEM_SEEN_BLOCKS 1
131 #define SHMEM_SEEN_INODES 2
132 #define SHMEM_SEEN_HUGE 4
133 #define SHMEM_SEEN_INUMS 8
134 #define SHMEM_SEEN_QUOTA 16
135 };
136
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 static unsigned long huge_shmem_orders_always __read_mostly;
139 static unsigned long huge_shmem_orders_madvise __read_mostly;
140 static unsigned long huge_shmem_orders_inherit __read_mostly;
141 static unsigned long huge_shmem_orders_within_size __read_mostly;
142 static bool shmem_orders_configured __initdata;
143 #endif
144
145 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)146 static unsigned long shmem_default_max_blocks(void)
147 {
148 return totalram_pages() / 2;
149 }
150
shmem_default_max_inodes(void)151 static unsigned long shmem_default_max_inodes(void)
152 {
153 unsigned long nr_pages = totalram_pages();
154
155 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
156 ULONG_MAX / BOGO_INODE_SIZE);
157 }
158 #endif
159
160 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
161 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
162 struct vm_area_struct *vma, vm_fault_t *fault_type);
163
SHMEM_SB(struct super_block * sb)164 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
165 {
166 return sb->s_fs_info;
167 }
168
169 /*
170 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
171 * for shared memory and for shared anonymous (/dev/zero) mappings
172 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
173 * consistent with the pre-accounting of private mappings ...
174 */
shmem_acct_size(unsigned long flags,loff_t size)175 static inline int shmem_acct_size(unsigned long flags, loff_t size)
176 {
177 return (flags & SHMEM_F_NORESERVE) ?
178 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
179 }
180
shmem_unacct_size(unsigned long flags,loff_t size)181 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
182 {
183 if (!(flags & SHMEM_F_NORESERVE))
184 vm_unacct_memory(VM_ACCT(size));
185 }
186
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)187 static inline int shmem_reacct_size(unsigned long flags,
188 loff_t oldsize, loff_t newsize)
189 {
190 if (!(flags & SHMEM_F_NORESERVE)) {
191 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
192 return security_vm_enough_memory_mm(current->mm,
193 VM_ACCT(newsize) - VM_ACCT(oldsize));
194 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
195 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
196 }
197 return 0;
198 }
199
200 /*
201 * ... whereas tmpfs objects are accounted incrementally as
202 * pages are allocated, in order to allow large sparse files.
203 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
204 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
205 */
shmem_acct_blocks(unsigned long flags,long pages)206 static inline int shmem_acct_blocks(unsigned long flags, long pages)
207 {
208 if (!(flags & SHMEM_F_NORESERVE))
209 return 0;
210
211 return security_vm_enough_memory_mm(current->mm,
212 pages * VM_ACCT(PAGE_SIZE));
213 }
214
shmem_unacct_blocks(unsigned long flags,long pages)215 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
216 {
217 if (flags & SHMEM_F_NORESERVE)
218 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
219 }
220
shmem_inode_acct_blocks(struct inode * inode,long pages)221 int shmem_inode_acct_blocks(struct inode *inode, long pages)
222 {
223 struct shmem_inode_info *info = SHMEM_I(inode);
224 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
225 int err = -ENOSPC;
226
227 if (shmem_acct_blocks(info->flags, pages))
228 return err;
229
230 might_sleep(); /* when quotas */
231 if (sbinfo->max_blocks) {
232 if (!percpu_counter_limited_add(&sbinfo->used_blocks,
233 sbinfo->max_blocks, pages))
234 goto unacct;
235
236 err = dquot_alloc_block_nodirty(inode, pages);
237 if (err) {
238 percpu_counter_sub(&sbinfo->used_blocks, pages);
239 goto unacct;
240 }
241 } else {
242 err = dquot_alloc_block_nodirty(inode, pages);
243 if (err)
244 goto unacct;
245 }
246
247 return 0;
248
249 unacct:
250 shmem_unacct_blocks(info->flags, pages);
251 return err;
252 }
253
shmem_inode_unacct_blocks(struct inode * inode,long pages)254 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
255 {
256 struct shmem_inode_info *info = SHMEM_I(inode);
257 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
258
259 might_sleep(); /* when quotas */
260 dquot_free_block_nodirty(inode, pages);
261
262 if (sbinfo->max_blocks)
263 percpu_counter_sub(&sbinfo->used_blocks, pages);
264 shmem_unacct_blocks(info->flags, pages);
265 }
266
267 static const struct super_operations shmem_ops;
268 static const struct address_space_operations shmem_aops;
269 static const struct file_operations shmem_file_operations;
270 static const struct inode_operations shmem_inode_operations;
271 static const struct inode_operations shmem_dir_inode_operations;
272 static const struct inode_operations shmem_special_inode_operations;
273 static const struct vm_operations_struct shmem_vm_ops;
274 static const struct vm_operations_struct shmem_anon_vm_ops;
275 static struct file_system_type shmem_fs_type;
276
shmem_mapping(const struct address_space * mapping)277 bool shmem_mapping(const struct address_space *mapping)
278 {
279 return mapping->a_ops == &shmem_aops;
280 }
281 EXPORT_SYMBOL_GPL(shmem_mapping);
282
vma_is_anon_shmem(const struct vm_area_struct * vma)283 bool vma_is_anon_shmem(const struct vm_area_struct *vma)
284 {
285 return vma->vm_ops == &shmem_anon_vm_ops;
286 }
287
vma_is_shmem(const struct vm_area_struct * vma)288 bool vma_is_shmem(const struct vm_area_struct *vma)
289 {
290 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
291 }
292
293 static LIST_HEAD(shmem_swaplist);
294 static DEFINE_SPINLOCK(shmem_swaplist_lock);
295
296 #ifdef CONFIG_TMPFS_QUOTA
297
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)298 static int shmem_enable_quotas(struct super_block *sb,
299 unsigned short quota_types)
300 {
301 int type, err = 0;
302
303 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
304 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
305 if (!(quota_types & (1 << type)))
306 continue;
307 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
308 DQUOT_USAGE_ENABLED |
309 DQUOT_LIMITS_ENABLED);
310 if (err)
311 goto out_err;
312 }
313 return 0;
314
315 out_err:
316 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
317 type, err);
318 for (type--; type >= 0; type--)
319 dquot_quota_off(sb, type);
320 return err;
321 }
322
shmem_disable_quotas(struct super_block * sb)323 static void shmem_disable_quotas(struct super_block *sb)
324 {
325 int type;
326
327 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
328 dquot_quota_off(sb, type);
329 }
330
shmem_get_dquots(struct inode * inode)331 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
332 {
333 return SHMEM_I(inode)->i_dquot;
334 }
335 #endif /* CONFIG_TMPFS_QUOTA */
336
337 /*
338 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
339 * produces a novel ino for the newly allocated inode.
340 *
341 * It may also be called when making a hard link to permit the space needed by
342 * each dentry. However, in that case, no new inode number is needed since that
343 * internally draws from another pool of inode numbers (currently global
344 * get_next_ino()). This case is indicated by passing NULL as inop.
345 */
346 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)347 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
348 {
349 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
350 ino_t ino;
351
352 if (!(sb->s_flags & SB_KERNMOUNT)) {
353 raw_spin_lock(&sbinfo->stat_lock);
354 if (sbinfo->max_inodes) {
355 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
356 raw_spin_unlock(&sbinfo->stat_lock);
357 return -ENOSPC;
358 }
359 sbinfo->free_ispace -= BOGO_INODE_SIZE;
360 }
361 if (inop) {
362 ino = sbinfo->next_ino++;
363 if (unlikely(is_zero_ino(ino)))
364 ino = sbinfo->next_ino++;
365 if (unlikely(!sbinfo->full_inums &&
366 ino > UINT_MAX)) {
367 /*
368 * Emulate get_next_ino uint wraparound for
369 * compatibility
370 */
371 if (IS_ENABLED(CONFIG_64BIT))
372 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
373 __func__, MINOR(sb->s_dev));
374 sbinfo->next_ino = 1;
375 ino = sbinfo->next_ino++;
376 }
377 *inop = ino;
378 }
379 raw_spin_unlock(&sbinfo->stat_lock);
380 } else if (inop) {
381 /*
382 * __shmem_file_setup, one of our callers, is lock-free: it
383 * doesn't hold stat_lock in shmem_reserve_inode since
384 * max_inodes is always 0, and is called from potentially
385 * unknown contexts. As such, use a per-cpu batched allocator
386 * which doesn't require the per-sb stat_lock unless we are at
387 * the batch boundary.
388 *
389 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
390 * shmem mounts are not exposed to userspace, so we don't need
391 * to worry about things like glibc compatibility.
392 */
393 ino_t *next_ino;
394
395 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
396 ino = *next_ino;
397 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
398 raw_spin_lock(&sbinfo->stat_lock);
399 ino = sbinfo->next_ino;
400 sbinfo->next_ino += SHMEM_INO_BATCH;
401 raw_spin_unlock(&sbinfo->stat_lock);
402 if (unlikely(is_zero_ino(ino)))
403 ino++;
404 }
405 *inop = ino;
406 *next_ino = ++ino;
407 put_cpu();
408 }
409
410 return 0;
411 }
412
shmem_free_inode(struct super_block * sb,size_t freed_ispace)413 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
414 {
415 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
416 if (sbinfo->max_inodes) {
417 raw_spin_lock(&sbinfo->stat_lock);
418 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
419 raw_spin_unlock(&sbinfo->stat_lock);
420 }
421 }
422
423 /**
424 * shmem_recalc_inode - recalculate the block usage of an inode
425 * @inode: inode to recalc
426 * @alloced: the change in number of pages allocated to inode
427 * @swapped: the change in number of pages swapped from inode
428 *
429 * We have to calculate the free blocks since the mm can drop
430 * undirtied hole pages behind our back.
431 *
432 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
433 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
434 *
435 * Return: true if swapped was incremented from 0, for shmem_writeout().
436 */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)437 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438 {
439 struct shmem_inode_info *info = SHMEM_I(inode);
440 bool first_swapped = false;
441 long freed;
442
443 spin_lock(&info->lock);
444 info->alloced += alloced;
445 info->swapped += swapped;
446 freed = info->alloced - info->swapped -
447 READ_ONCE(inode->i_mapping->nrpages);
448 /*
449 * Special case: whereas normally shmem_recalc_inode() is called
450 * after i_mapping->nrpages has already been adjusted (up or down),
451 * shmem_writeout() has to raise swapped before nrpages is lowered -
452 * to stop a racing shmem_recalc_inode() from thinking that a page has
453 * been freed. Compensate here, to avoid the need for a followup call.
454 */
455 if (swapped > 0) {
456 if (info->swapped == swapped)
457 first_swapped = true;
458 freed += swapped;
459 }
460 if (freed > 0)
461 info->alloced -= freed;
462 spin_unlock(&info->lock);
463
464 /* The quota case may block */
465 if (freed > 0)
466 shmem_inode_unacct_blocks(inode, freed);
467 return first_swapped;
468 }
469
shmem_charge(struct inode * inode,long pages)470 bool shmem_charge(struct inode *inode, long pages)
471 {
472 struct address_space *mapping = inode->i_mapping;
473
474 if (shmem_inode_acct_blocks(inode, pages))
475 return false;
476
477 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
478 xa_lock_irq(&mapping->i_pages);
479 mapping->nrpages += pages;
480 xa_unlock_irq(&mapping->i_pages);
481
482 shmem_recalc_inode(inode, pages, 0);
483 return true;
484 }
485
shmem_uncharge(struct inode * inode,long pages)486 void shmem_uncharge(struct inode *inode, long pages)
487 {
488 /* pages argument is currently unused: keep it to help debugging */
489 /* nrpages adjustment done by __filemap_remove_folio() or caller */
490
491 shmem_recalc_inode(inode, 0, 0);
492 }
493
494 /*
495 * Replace item expected in xarray by a new item, while holding xa_lock.
496 */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)497 static int shmem_replace_entry(struct address_space *mapping,
498 pgoff_t index, void *expected, void *replacement)
499 {
500 XA_STATE(xas, &mapping->i_pages, index);
501 void *item;
502
503 VM_BUG_ON(!expected);
504 VM_BUG_ON(!replacement);
505 item = xas_load(&xas);
506 if (item != expected)
507 return -ENOENT;
508 xas_store(&xas, replacement);
509 return 0;
510 }
511
512 /*
513 * Sometimes, before we decide whether to proceed or to fail, we must check
514 * that an entry was not already brought back or split by a racing thread.
515 *
516 * Checking folio is not enough: by the time a swapcache folio is locked, it
517 * might be reused, and again be swapcache, using the same swap as before.
518 * Returns the swap entry's order if it still presents, else returns -1.
519 */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)520 static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
521 swp_entry_t swap)
522 {
523 XA_STATE(xas, &mapping->i_pages, index);
524 int ret = -1;
525 void *entry;
526
527 rcu_read_lock();
528 do {
529 entry = xas_load(&xas);
530 if (entry == swp_to_radix_entry(swap))
531 ret = xas_get_order(&xas);
532 } while (xas_retry(&xas, entry));
533 rcu_read_unlock();
534 return ret;
535 }
536
537 /*
538 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
539 *
540 * SHMEM_HUGE_NEVER:
541 * disables huge pages for the mount;
542 * SHMEM_HUGE_ALWAYS:
543 * enables huge pages for the mount;
544 * SHMEM_HUGE_WITHIN_SIZE:
545 * only allocate huge pages if the page will be fully within i_size,
546 * also respect madvise() hints;
547 * SHMEM_HUGE_ADVISE:
548 * only allocate huge pages if requested with madvise();
549 */
550
551 #define SHMEM_HUGE_NEVER 0
552 #define SHMEM_HUGE_ALWAYS 1
553 #define SHMEM_HUGE_WITHIN_SIZE 2
554 #define SHMEM_HUGE_ADVISE 3
555
556 /*
557 * Special values.
558 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
559 *
560 * SHMEM_HUGE_DENY:
561 * disables huge on shm_mnt and all mounts, for emergency use;
562 * SHMEM_HUGE_FORCE:
563 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
564 *
565 */
566 #define SHMEM_HUGE_DENY (-1)
567 #define SHMEM_HUGE_FORCE (-2)
568
569 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
570 /* ifdef here to avoid bloating shmem.o when not necessary */
571
572 #if defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_NEVER)
573 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_NEVER
574 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ALWAYS)
575 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_ALWAYS
576 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_WITHIN_SIZE)
577 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_WITHIN_SIZE
578 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ADVISE)
579 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_ADVISE
580 #else
581 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_NEVER
582 #endif
583
584 static int shmem_huge __read_mostly = SHMEM_HUGE_DEFAULT;
585
586 #undef SHMEM_HUGE_DEFAULT
587
588 #if defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_NEVER)
589 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_NEVER
590 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ALWAYS)
591 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_ALWAYS
592 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_WITHIN_SIZE)
593 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_WITHIN_SIZE
594 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ADVISE)
595 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_ADVISE
596 #else
597 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_NEVER
598 #endif
599
600 static int tmpfs_huge __read_mostly = TMPFS_HUGE_DEFAULT;
601
602 #undef TMPFS_HUGE_DEFAULT
603
shmem_get_orders_within_size(struct inode * inode,unsigned long within_size_orders,pgoff_t index,loff_t write_end)604 static unsigned int shmem_get_orders_within_size(struct inode *inode,
605 unsigned long within_size_orders, pgoff_t index,
606 loff_t write_end)
607 {
608 pgoff_t aligned_index;
609 unsigned long order;
610 loff_t i_size;
611
612 order = highest_order(within_size_orders);
613 while (within_size_orders) {
614 aligned_index = round_up(index + 1, 1 << order);
615 i_size = max(write_end, i_size_read(inode));
616 i_size = round_up(i_size, PAGE_SIZE);
617 if (i_size >> PAGE_SHIFT >= aligned_index)
618 return within_size_orders;
619
620 order = next_order(&within_size_orders, order);
621 }
622
623 return 0;
624 }
625
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)626 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
627 loff_t write_end, bool shmem_huge_force,
628 struct vm_area_struct *vma,
629 vm_flags_t vm_flags)
630 {
631 unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
632 0 : BIT(HPAGE_PMD_ORDER);
633 unsigned long within_size_orders;
634
635 if (!S_ISREG(inode->i_mode))
636 return 0;
637 if (shmem_huge == SHMEM_HUGE_DENY)
638 return 0;
639 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
640 return maybe_pmd_order;
641
642 /*
643 * The huge order allocation for anon shmem is controlled through
644 * the mTHP interface, so we still use PMD-sized huge order to
645 * check whether global control is enabled.
646 *
647 * For tmpfs with 'huge=always' or 'huge=within_size' mount option,
648 * we will always try PMD-sized order first. If that failed, it will
649 * fall back to small large folios.
650 */
651 switch (SHMEM_SB(inode->i_sb)->huge) {
652 case SHMEM_HUGE_ALWAYS:
653 return THP_ORDERS_ALL_FILE_DEFAULT;
654 case SHMEM_HUGE_WITHIN_SIZE:
655 within_size_orders = shmem_get_orders_within_size(inode,
656 THP_ORDERS_ALL_FILE_DEFAULT, index, write_end);
657 if (within_size_orders > 0)
658 return within_size_orders;
659
660 fallthrough;
661 case SHMEM_HUGE_ADVISE:
662 if (vm_flags & VM_HUGEPAGE)
663 return THP_ORDERS_ALL_FILE_DEFAULT;
664 fallthrough;
665 default:
666 return 0;
667 }
668 }
669
shmem_parse_huge(const char * str)670 static int shmem_parse_huge(const char *str)
671 {
672 int huge;
673
674 if (!str)
675 return -EINVAL;
676
677 if (!strcmp(str, "never"))
678 huge = SHMEM_HUGE_NEVER;
679 else if (!strcmp(str, "always"))
680 huge = SHMEM_HUGE_ALWAYS;
681 else if (!strcmp(str, "within_size"))
682 huge = SHMEM_HUGE_WITHIN_SIZE;
683 else if (!strcmp(str, "advise"))
684 huge = SHMEM_HUGE_ADVISE;
685 else if (!strcmp(str, "deny"))
686 huge = SHMEM_HUGE_DENY;
687 else if (!strcmp(str, "force"))
688 huge = SHMEM_HUGE_FORCE;
689 else
690 return -EINVAL;
691
692 if (!has_transparent_hugepage() &&
693 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
694 return -EINVAL;
695
696 /* Do not override huge allocation policy with non-PMD sized mTHP */
697 if (huge == SHMEM_HUGE_FORCE &&
698 huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
699 return -EINVAL;
700
701 return huge;
702 }
703
704 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)705 static const char *shmem_format_huge(int huge)
706 {
707 switch (huge) {
708 case SHMEM_HUGE_NEVER:
709 return "never";
710 case SHMEM_HUGE_ALWAYS:
711 return "always";
712 case SHMEM_HUGE_WITHIN_SIZE:
713 return "within_size";
714 case SHMEM_HUGE_ADVISE:
715 return "advise";
716 case SHMEM_HUGE_DENY:
717 return "deny";
718 case SHMEM_HUGE_FORCE:
719 return "force";
720 default:
721 VM_BUG_ON(1);
722 return "bad_val";
723 }
724 }
725 #endif
726
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)727 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
728 struct shrink_control *sc, unsigned long nr_to_free)
729 {
730 LIST_HEAD(list), *pos, *next;
731 struct inode *inode;
732 struct shmem_inode_info *info;
733 struct folio *folio;
734 unsigned long batch = sc ? sc->nr_to_scan : 128;
735 unsigned long split = 0, freed = 0;
736
737 if (list_empty(&sbinfo->shrinklist))
738 return SHRINK_STOP;
739
740 spin_lock(&sbinfo->shrinklist_lock);
741 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
742 info = list_entry(pos, struct shmem_inode_info, shrinklist);
743
744 /* pin the inode */
745 inode = igrab(&info->vfs_inode);
746
747 /* inode is about to be evicted */
748 if (!inode) {
749 list_del_init(&info->shrinklist);
750 goto next;
751 }
752
753 list_move(&info->shrinklist, &list);
754 next:
755 sbinfo->shrinklist_len--;
756 if (!--batch)
757 break;
758 }
759 spin_unlock(&sbinfo->shrinklist_lock);
760
761 list_for_each_safe(pos, next, &list) {
762 pgoff_t next, end;
763 loff_t i_size;
764 int ret;
765
766 info = list_entry(pos, struct shmem_inode_info, shrinklist);
767 inode = &info->vfs_inode;
768
769 if (nr_to_free && freed >= nr_to_free)
770 goto move_back;
771
772 i_size = i_size_read(inode);
773 folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
774 if (!folio || xa_is_value(folio))
775 goto drop;
776
777 /* No large folio at the end of the file: nothing to split */
778 if (!folio_test_large(folio)) {
779 folio_put(folio);
780 goto drop;
781 }
782
783 /* Check if there is anything to gain from splitting */
784 next = folio_next_index(folio);
785 end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
786 if (end <= folio->index || end >= next) {
787 folio_put(folio);
788 goto drop;
789 }
790
791 /*
792 * Move the inode on the list back to shrinklist if we failed
793 * to lock the page at this time.
794 *
795 * Waiting for the lock may lead to deadlock in the
796 * reclaim path.
797 */
798 if (!folio_trylock(folio)) {
799 folio_put(folio);
800 goto move_back;
801 }
802
803 ret = split_folio(folio);
804 folio_unlock(folio);
805 folio_put(folio);
806
807 /* If split failed move the inode on the list back to shrinklist */
808 if (ret)
809 goto move_back;
810
811 freed += next - end;
812 split++;
813 drop:
814 list_del_init(&info->shrinklist);
815 goto put;
816 move_back:
817 /*
818 * Make sure the inode is either on the global list or deleted
819 * from any local list before iput() since it could be deleted
820 * in another thread once we put the inode (then the local list
821 * is corrupted).
822 */
823 spin_lock(&sbinfo->shrinklist_lock);
824 list_move(&info->shrinklist, &sbinfo->shrinklist);
825 sbinfo->shrinklist_len++;
826 spin_unlock(&sbinfo->shrinklist_lock);
827 put:
828 iput(inode);
829 }
830
831 return split;
832 }
833
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)834 static long shmem_unused_huge_scan(struct super_block *sb,
835 struct shrink_control *sc)
836 {
837 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
838
839 if (!READ_ONCE(sbinfo->shrinklist_len))
840 return SHRINK_STOP;
841
842 return shmem_unused_huge_shrink(sbinfo, sc, 0);
843 }
844
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)845 static long shmem_unused_huge_count(struct super_block *sb,
846 struct shrink_control *sc)
847 {
848 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
849 return READ_ONCE(sbinfo->shrinklist_len);
850 }
851 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
852
853 #define shmem_huge SHMEM_HUGE_DENY
854
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)855 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
856 struct shrink_control *sc, unsigned long nr_to_free)
857 {
858 return 0;
859 }
860
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)861 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
862 loff_t write_end, bool shmem_huge_force,
863 struct vm_area_struct *vma,
864 vm_flags_t vm_flags)
865 {
866 return 0;
867 }
868 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
869
shmem_update_stats(struct folio * folio,int nr_pages)870 static void shmem_update_stats(struct folio *folio, int nr_pages)
871 {
872 if (folio_test_pmd_mappable(folio))
873 lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
874 lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
875 lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
876 }
877
878 /*
879 * Somewhat like filemap_add_folio, but error if expected item has gone.
880 */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)881 int shmem_add_to_page_cache(struct folio *folio,
882 struct address_space *mapping,
883 pgoff_t index, void *expected, gfp_t gfp)
884 {
885 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
886 unsigned long nr = folio_nr_pages(folio);
887 swp_entry_t iter, swap;
888 void *entry;
889
890 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
891 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
892 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
893
894 folio_ref_add(folio, nr);
895 folio->mapping = mapping;
896 folio->index = index;
897
898 gfp &= GFP_RECLAIM_MASK;
899 folio_throttle_swaprate(folio, gfp);
900 swap = radix_to_swp_entry(expected);
901
902 do {
903 iter = swap;
904 xas_lock_irq(&xas);
905 xas_for_each_conflict(&xas, entry) {
906 /*
907 * The range must either be empty, or filled with
908 * expected swap entries. Shmem swap entries are never
909 * partially freed without split of both entry and
910 * folio, so there shouldn't be any holes.
911 */
912 if (!expected || entry != swp_to_radix_entry(iter)) {
913 xas_set_err(&xas, -EEXIST);
914 goto unlock;
915 }
916 iter.val += 1 << xas_get_order(&xas);
917 }
918 if (expected && iter.val - nr != swap.val) {
919 xas_set_err(&xas, -EEXIST);
920 goto unlock;
921 }
922 xas_store(&xas, folio);
923 if (xas_error(&xas))
924 goto unlock;
925 shmem_update_stats(folio, nr);
926 mapping->nrpages += nr;
927 unlock:
928 xas_unlock_irq(&xas);
929 } while (xas_nomem(&xas, gfp));
930
931 if (xas_error(&xas)) {
932 folio->mapping = NULL;
933 folio_ref_sub(folio, nr);
934 return xas_error(&xas);
935 }
936
937 return 0;
938 }
939
940 /*
941 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
942 */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)943 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
944 {
945 struct address_space *mapping = folio->mapping;
946 long nr = folio_nr_pages(folio);
947 int error;
948
949 xa_lock_irq(&mapping->i_pages);
950 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
951 folio->mapping = NULL;
952 mapping->nrpages -= nr;
953 shmem_update_stats(folio, -nr);
954 xa_unlock_irq(&mapping->i_pages);
955 folio_put_refs(folio, nr);
956 BUG_ON(error);
957 }
958
959 /*
960 * Remove swap entry from page cache, free the swap and its page cache. Returns
961 * the number of pages being freed. 0 means entry not found in XArray (0 pages
962 * being freed).
963 */
shmem_free_swap(struct address_space * mapping,pgoff_t index,pgoff_t end,void * radswap)964 static long shmem_free_swap(struct address_space *mapping,
965 pgoff_t index, pgoff_t end, void *radswap)
966 {
967 XA_STATE(xas, &mapping->i_pages, index);
968 unsigned int nr_pages = 0;
969 pgoff_t base;
970 void *entry;
971
972 xas_lock_irq(&xas);
973 entry = xas_load(&xas);
974 if (entry == radswap) {
975 nr_pages = 1 << xas_get_order(&xas);
976 base = round_down(xas.xa_index, nr_pages);
977 if (base < index || base + nr_pages - 1 > end)
978 nr_pages = 0;
979 else
980 xas_store(&xas, NULL);
981 }
982 xas_unlock_irq(&xas);
983
984 if (nr_pages)
985 swap_put_entries_direct(radix_to_swp_entry(radswap), nr_pages);
986
987 return nr_pages;
988 }
989
990 /*
991 * Determine (in bytes) how many of the shmem object's pages mapped by the
992 * given offsets are swapped out.
993 *
994 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
995 * as long as the inode doesn't go away and racy results are not a problem.
996 */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)997 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
998 pgoff_t start, pgoff_t end)
999 {
1000 XA_STATE(xas, &mapping->i_pages, start);
1001 struct folio *folio;
1002 unsigned long swapped = 0;
1003 unsigned long max = end - 1;
1004
1005 rcu_read_lock();
1006 xas_for_each(&xas, folio, max) {
1007 if (xas_retry(&xas, folio))
1008 continue;
1009 if (xa_is_value(folio))
1010 swapped += 1 << xas_get_order(&xas);
1011 if (xas.xa_index == max)
1012 break;
1013 if (need_resched()) {
1014 xas_pause(&xas);
1015 cond_resched_rcu();
1016 }
1017 }
1018 rcu_read_unlock();
1019
1020 return swapped << PAGE_SHIFT;
1021 }
1022
1023 /*
1024 * Determine (in bytes) how many of the shmem object's pages mapped by the
1025 * given vma is swapped out.
1026 *
1027 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
1028 * as long as the inode doesn't go away and racy results are not a problem.
1029 */
shmem_swap_usage(struct vm_area_struct * vma)1030 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
1031 {
1032 struct inode *inode = file_inode(vma->vm_file);
1033 struct shmem_inode_info *info = SHMEM_I(inode);
1034 struct address_space *mapping = inode->i_mapping;
1035 unsigned long swapped;
1036
1037 /* Be careful as we don't hold info->lock */
1038 swapped = READ_ONCE(info->swapped);
1039
1040 /*
1041 * The easier cases are when the shmem object has nothing in swap, or
1042 * the vma maps it whole. Then we can simply use the stats that we
1043 * already track.
1044 */
1045 if (!swapped)
1046 return 0;
1047
1048 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
1049 return swapped << PAGE_SHIFT;
1050
1051 /* Here comes the more involved part */
1052 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
1053 vma->vm_pgoff + vma_pages(vma));
1054 }
1055
1056 /*
1057 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
1058 */
shmem_unlock_mapping(struct address_space * mapping)1059 void shmem_unlock_mapping(struct address_space *mapping)
1060 {
1061 struct folio_batch fbatch;
1062 pgoff_t index = 0;
1063
1064 folio_batch_init(&fbatch);
1065 /*
1066 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
1067 */
1068 while (!mapping_unevictable(mapping) &&
1069 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
1070 check_move_unevictable_folios(&fbatch);
1071 folio_batch_release(&fbatch);
1072 cond_resched();
1073 }
1074 }
1075
shmem_get_partial_folio(struct inode * inode,pgoff_t index)1076 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
1077 {
1078 struct folio *folio;
1079
1080 /*
1081 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
1082 * beyond i_size, and reports fallocated folios as holes.
1083 */
1084 folio = filemap_get_entry(inode->i_mapping, index);
1085 if (!folio)
1086 return folio;
1087 if (!xa_is_value(folio)) {
1088 folio_lock(folio);
1089 if (folio->mapping == inode->i_mapping)
1090 return folio;
1091 /* The folio has been swapped out */
1092 folio_unlock(folio);
1093 folio_put(folio);
1094 }
1095 /*
1096 * But read a folio back from swap if any of it is within i_size
1097 * (although in some cases this is just a waste of time).
1098 */
1099 folio = NULL;
1100 shmem_get_folio(inode, index, 0, &folio, SGP_READ);
1101 return folio;
1102 }
1103
1104 /*
1105 * Remove range of pages and swap entries from page cache, and free them.
1106 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1107 */
shmem_undo_range(struct inode * inode,loff_t lstart,uoff_t lend,bool unfalloc)1108 static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
1109 bool unfalloc)
1110 {
1111 struct address_space *mapping = inode->i_mapping;
1112 struct shmem_inode_info *info = SHMEM_I(inode);
1113 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1114 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1115 struct folio_batch fbatch;
1116 pgoff_t indices[FOLIO_BATCH_SIZE];
1117 struct folio *folio;
1118 bool same_folio;
1119 long nr_swaps_freed = 0;
1120 pgoff_t index;
1121 int i;
1122
1123 if (lend == -1)
1124 end = -1; /* unsigned, so actually very big */
1125
1126 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1127 info->fallocend = start;
1128
1129 folio_batch_init(&fbatch);
1130 index = start;
1131 while (index < end && find_lock_entries(mapping, &index, end - 1,
1132 &fbatch, indices)) {
1133 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1134 folio = fbatch.folios[i];
1135
1136 if (xa_is_value(folio)) {
1137 if (unfalloc)
1138 continue;
1139 nr_swaps_freed += shmem_free_swap(mapping, indices[i],
1140 end - 1, folio);
1141 continue;
1142 }
1143
1144 if (!unfalloc || !folio_test_uptodate(folio))
1145 truncate_inode_folio(mapping, folio);
1146 folio_unlock(folio);
1147 }
1148 folio_batch_remove_exceptionals(&fbatch);
1149 folio_batch_release(&fbatch);
1150 cond_resched();
1151 }
1152
1153 /*
1154 * When undoing a failed fallocate, we want none of the partial folio
1155 * zeroing and splitting below, but shall want to truncate the whole
1156 * folio when !uptodate indicates that it was added by this fallocate,
1157 * even when [lstart, lend] covers only a part of the folio.
1158 */
1159 if (unfalloc)
1160 goto whole_folios;
1161
1162 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1163 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1164 if (folio) {
1165 same_folio = lend < folio_next_pos(folio);
1166 folio_mark_dirty(folio);
1167 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1168 start = folio_next_index(folio);
1169 if (same_folio)
1170 end = folio->index;
1171 }
1172 folio_unlock(folio);
1173 folio_put(folio);
1174 folio = NULL;
1175 }
1176
1177 if (!same_folio)
1178 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1179 if (folio) {
1180 folio_mark_dirty(folio);
1181 if (!truncate_inode_partial_folio(folio, lstart, lend))
1182 end = folio->index;
1183 folio_unlock(folio);
1184 folio_put(folio);
1185 }
1186
1187 whole_folios:
1188
1189 index = start;
1190 while (index < end) {
1191 cond_resched();
1192
1193 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1194 indices)) {
1195 /* If all gone or hole-punch or unfalloc, we're done */
1196 if (index == start || end != -1)
1197 break;
1198 /* But if truncating, restart to make sure all gone */
1199 index = start;
1200 continue;
1201 }
1202 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1203 folio = fbatch.folios[i];
1204
1205 if (xa_is_value(folio)) {
1206 int order;
1207 long swaps_freed;
1208
1209 if (unfalloc)
1210 continue;
1211 swaps_freed = shmem_free_swap(mapping, indices[i],
1212 end - 1, folio);
1213 if (!swaps_freed) {
1214 pgoff_t base = indices[i];
1215
1216 order = shmem_confirm_swap(mapping, indices[i],
1217 radix_to_swp_entry(folio));
1218 /*
1219 * If found a large swap entry cross the end or start
1220 * border, skip it as the truncate_inode_partial_folio
1221 * above should have at least zerod its content once.
1222 */
1223 if (order > 0) {
1224 base = round_down(base, 1 << order);
1225 if (base < start || base + (1 << order) > end)
1226 continue;
1227 }
1228 /* Swap was replaced by page or extended, retry */
1229 index = base;
1230 break;
1231 }
1232 nr_swaps_freed += swaps_freed;
1233 continue;
1234 }
1235
1236 folio_lock(folio);
1237
1238 if (!unfalloc || !folio_test_uptodate(folio)) {
1239 if (folio_mapping(folio) != mapping) {
1240 /* Page was replaced by swap: retry */
1241 folio_unlock(folio);
1242 index = indices[i];
1243 break;
1244 }
1245 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1246 folio);
1247
1248 if (!folio_test_large(folio)) {
1249 truncate_inode_folio(mapping, folio);
1250 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1251 /*
1252 * If we split a page, reset the loop so
1253 * that we pick up the new sub pages.
1254 * Otherwise the THP was entirely
1255 * dropped or the target range was
1256 * zeroed, so just continue the loop as
1257 * is.
1258 */
1259 if (!folio_test_large(folio)) {
1260 folio_unlock(folio);
1261 index = start;
1262 break;
1263 }
1264 }
1265 }
1266 folio_unlock(folio);
1267 }
1268 folio_batch_remove_exceptionals(&fbatch);
1269 folio_batch_release(&fbatch);
1270 }
1271
1272 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1273 }
1274
shmem_truncate_range(struct inode * inode,loff_t lstart,uoff_t lend)1275 void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
1276 {
1277 shmem_undo_range(inode, lstart, lend, false);
1278 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1279 inode_inc_iversion(inode);
1280 }
1281 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1282
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1283 static int shmem_getattr(struct mnt_idmap *idmap,
1284 const struct path *path, struct kstat *stat,
1285 u32 request_mask, unsigned int query_flags)
1286 {
1287 struct inode *inode = path->dentry->d_inode;
1288 struct shmem_inode_info *info = SHMEM_I(inode);
1289
1290 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1291 shmem_recalc_inode(inode, 0, 0);
1292
1293 if (info->fsflags & FS_APPEND_FL)
1294 stat->attributes |= STATX_ATTR_APPEND;
1295 if (info->fsflags & FS_IMMUTABLE_FL)
1296 stat->attributes |= STATX_ATTR_IMMUTABLE;
1297 if (info->fsflags & FS_NODUMP_FL)
1298 stat->attributes |= STATX_ATTR_NODUMP;
1299 stat->attributes_mask |= (STATX_ATTR_APPEND |
1300 STATX_ATTR_IMMUTABLE |
1301 STATX_ATTR_NODUMP);
1302 generic_fillattr(idmap, request_mask, inode, stat);
1303
1304 if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1305 stat->blksize = HPAGE_PMD_SIZE;
1306
1307 if (request_mask & STATX_BTIME) {
1308 stat->result_mask |= STATX_BTIME;
1309 stat->btime.tv_sec = info->i_crtime.tv_sec;
1310 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1311 }
1312
1313 return 0;
1314 }
1315
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1316 static int shmem_setattr(struct mnt_idmap *idmap,
1317 struct dentry *dentry, struct iattr *attr)
1318 {
1319 struct inode *inode = d_inode(dentry);
1320 struct shmem_inode_info *info = SHMEM_I(inode);
1321 int error;
1322 bool update_mtime = false;
1323 bool update_ctime = true;
1324
1325 error = setattr_prepare(idmap, dentry, attr);
1326 if (error)
1327 return error;
1328
1329 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1330 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1331 return -EPERM;
1332 }
1333 }
1334
1335 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1336 loff_t oldsize = inode->i_size;
1337 loff_t newsize = attr->ia_size;
1338
1339 /* protected by i_rwsem */
1340 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1341 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1342 return -EPERM;
1343
1344 if (newsize != oldsize) {
1345 if (info->flags & SHMEM_F_MAPPING_FROZEN)
1346 return -EPERM;
1347 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1348 oldsize, newsize);
1349 if (error)
1350 return error;
1351 i_size_write(inode, newsize);
1352 update_mtime = true;
1353 } else {
1354 update_ctime = false;
1355 }
1356 if (newsize <= oldsize) {
1357 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1358 if (oldsize > holebegin)
1359 unmap_mapping_range(inode->i_mapping,
1360 holebegin, 0, 1);
1361 if (info->alloced)
1362 shmem_truncate_range(inode,
1363 newsize, (loff_t)-1);
1364 /* unmap again to remove racily COWed private pages */
1365 if (oldsize > holebegin)
1366 unmap_mapping_range(inode->i_mapping,
1367 holebegin, 0, 1);
1368 }
1369 }
1370
1371 if (is_quota_modification(idmap, inode, attr)) {
1372 error = dquot_initialize(inode);
1373 if (error)
1374 return error;
1375 }
1376
1377 /* Transfer quota accounting */
1378 if (i_uid_needs_update(idmap, attr, inode) ||
1379 i_gid_needs_update(idmap, attr, inode)) {
1380 error = dquot_transfer(idmap, inode, attr);
1381 if (error)
1382 return error;
1383 }
1384
1385 setattr_copy(idmap, inode, attr);
1386 if (attr->ia_valid & ATTR_MODE)
1387 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1388 if (!error && update_ctime) {
1389 inode_set_ctime_current(inode);
1390 if (update_mtime)
1391 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1392 inode_inc_iversion(inode);
1393 }
1394 return error;
1395 }
1396
shmem_evict_inode(struct inode * inode)1397 static void shmem_evict_inode(struct inode *inode)
1398 {
1399 struct shmem_inode_info *info = SHMEM_I(inode);
1400 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1401 size_t freed = 0;
1402
1403 if (shmem_mapping(inode->i_mapping)) {
1404 shmem_unacct_size(info->flags, inode->i_size);
1405 inode->i_size = 0;
1406 mapping_set_exiting(inode->i_mapping);
1407 shmem_truncate_range(inode, 0, (loff_t)-1);
1408 if (!list_empty(&info->shrinklist)) {
1409 spin_lock(&sbinfo->shrinklist_lock);
1410 if (!list_empty(&info->shrinklist)) {
1411 list_del_init(&info->shrinklist);
1412 sbinfo->shrinklist_len--;
1413 }
1414 spin_unlock(&sbinfo->shrinklist_lock);
1415 }
1416 while (!list_empty(&info->swaplist)) {
1417 /* Wait while shmem_unuse() is scanning this inode... */
1418 wait_var_event(&info->stop_eviction,
1419 !atomic_read(&info->stop_eviction));
1420 spin_lock(&shmem_swaplist_lock);
1421 /* ...but beware of the race if we peeked too early */
1422 if (!atomic_read(&info->stop_eviction))
1423 list_del_init(&info->swaplist);
1424 spin_unlock(&shmem_swaplist_lock);
1425 }
1426 }
1427
1428 if (info->xattrs) {
1429 simple_xattrs_free(info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1430 kfree(info->xattrs);
1431 }
1432 shmem_free_inode(inode->i_sb, freed);
1433 WARN_ON(inode->i_blocks);
1434 clear_inode(inode);
1435 #ifdef CONFIG_TMPFS_QUOTA
1436 dquot_free_inode(inode);
1437 dquot_drop(inode);
1438 #endif
1439 }
1440
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1441 static unsigned int shmem_find_swap_entries(struct address_space *mapping,
1442 pgoff_t start, struct folio_batch *fbatch,
1443 pgoff_t *indices, unsigned int type)
1444 {
1445 XA_STATE(xas, &mapping->i_pages, start);
1446 struct folio *folio;
1447 swp_entry_t entry;
1448
1449 rcu_read_lock();
1450 xas_for_each(&xas, folio, ULONG_MAX) {
1451 if (xas_retry(&xas, folio))
1452 continue;
1453
1454 if (!xa_is_value(folio))
1455 continue;
1456
1457 entry = radix_to_swp_entry(folio);
1458 /*
1459 * swapin error entries can be found in the mapping. But they're
1460 * deliberately ignored here as we've done everything we can do.
1461 */
1462 if (swp_type(entry) != type)
1463 continue;
1464
1465 indices[folio_batch_count(fbatch)] = xas.xa_index;
1466 if (!folio_batch_add(fbatch, folio))
1467 break;
1468
1469 if (need_resched()) {
1470 xas_pause(&xas);
1471 cond_resched_rcu();
1472 }
1473 }
1474 rcu_read_unlock();
1475
1476 return folio_batch_count(fbatch);
1477 }
1478
1479 /*
1480 * Move the swapped pages for an inode to page cache. Returns the count
1481 * of pages swapped in, or the error in case of failure.
1482 */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1483 static int shmem_unuse_swap_entries(struct inode *inode,
1484 struct folio_batch *fbatch, pgoff_t *indices)
1485 {
1486 int i = 0;
1487 int ret = 0;
1488 int error = 0;
1489 struct address_space *mapping = inode->i_mapping;
1490
1491 for (i = 0; i < folio_batch_count(fbatch); i++) {
1492 struct folio *folio = fbatch->folios[i];
1493
1494 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1495 mapping_gfp_mask(mapping), NULL, NULL);
1496 if (error == 0) {
1497 folio_unlock(folio);
1498 folio_put(folio);
1499 ret++;
1500 }
1501 if (error == -ENOMEM)
1502 break;
1503 error = 0;
1504 }
1505 return error ? error : ret;
1506 }
1507
1508 /*
1509 * If swap found in inode, free it and move page from swapcache to filecache.
1510 */
shmem_unuse_inode(struct inode * inode,unsigned int type)1511 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1512 {
1513 struct address_space *mapping = inode->i_mapping;
1514 pgoff_t start = 0;
1515 struct folio_batch fbatch;
1516 pgoff_t indices[FOLIO_BATCH_SIZE];
1517 int ret = 0;
1518
1519 do {
1520 folio_batch_init(&fbatch);
1521 if (!shmem_find_swap_entries(mapping, start, &fbatch,
1522 indices, type)) {
1523 ret = 0;
1524 break;
1525 }
1526
1527 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1528 if (ret < 0)
1529 break;
1530
1531 start = indices[folio_batch_count(&fbatch) - 1];
1532 } while (true);
1533
1534 return ret;
1535 }
1536
1537 /*
1538 * Read all the shared memory data that resides in the swap
1539 * device 'type' back into memory, so the swap device can be
1540 * unused.
1541 */
shmem_unuse(unsigned int type)1542 int shmem_unuse(unsigned int type)
1543 {
1544 struct shmem_inode_info *info, *next;
1545 int error = 0;
1546
1547 if (list_empty(&shmem_swaplist))
1548 return 0;
1549
1550 spin_lock(&shmem_swaplist_lock);
1551 start_over:
1552 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1553 if (!info->swapped) {
1554 list_del_init(&info->swaplist);
1555 continue;
1556 }
1557 /*
1558 * Drop the swaplist mutex while searching the inode for swap;
1559 * but before doing so, make sure shmem_evict_inode() will not
1560 * remove placeholder inode from swaplist, nor let it be freed
1561 * (igrab() would protect from unlink, but not from unmount).
1562 */
1563 atomic_inc(&info->stop_eviction);
1564 spin_unlock(&shmem_swaplist_lock);
1565
1566 error = shmem_unuse_inode(&info->vfs_inode, type);
1567 cond_resched();
1568
1569 spin_lock(&shmem_swaplist_lock);
1570 if (atomic_dec_and_test(&info->stop_eviction))
1571 wake_up_var(&info->stop_eviction);
1572 if (error)
1573 break;
1574 if (list_empty(&info->swaplist))
1575 goto start_over;
1576 next = list_next_entry(info, swaplist);
1577 if (!info->swapped)
1578 list_del_init(&info->swaplist);
1579 }
1580 spin_unlock(&shmem_swaplist_lock);
1581
1582 return error;
1583 }
1584
1585 /**
1586 * shmem_writeout - Write the folio to swap
1587 * @folio: The folio to write
1588 * @plug: swap plug
1589 * @folio_list: list to put back folios on split
1590 *
1591 * Move the folio from the page cache to the swap cache.
1592 */
shmem_writeout(struct folio * folio,struct swap_iocb ** plug,struct list_head * folio_list)1593 int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
1594 struct list_head *folio_list)
1595 {
1596 struct address_space *mapping = folio->mapping;
1597 struct inode *inode = mapping->host;
1598 struct shmem_inode_info *info = SHMEM_I(inode);
1599 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1600 pgoff_t index;
1601 int nr_pages;
1602 bool split = false;
1603
1604 if ((info->flags & SHMEM_F_LOCKED) || sbinfo->noswap)
1605 goto redirty;
1606
1607 if (!total_swap_pages)
1608 goto redirty;
1609
1610 /*
1611 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1612 * split when swapping.
1613 *
1614 * And shrinkage of pages beyond i_size does not split swap, so
1615 * swapout of a large folio crossing i_size needs to split too
1616 * (unless fallocate has been used to preallocate beyond EOF).
1617 */
1618 if (folio_test_large(folio)) {
1619 index = shmem_fallocend(inode,
1620 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1621 if ((index > folio->index && index < folio_next_index(folio)) ||
1622 !IS_ENABLED(CONFIG_THP_SWAP))
1623 split = true;
1624 }
1625
1626 if (split) {
1627 int order;
1628
1629 try_split:
1630 order = folio_order(folio);
1631 /* Ensure the subpages are still dirty */
1632 folio_test_set_dirty(folio);
1633 if (split_folio_to_list(folio, folio_list))
1634 goto redirty;
1635
1636 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1637 if (order >= HPAGE_PMD_ORDER) {
1638 count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1);
1639 count_vm_event(THP_SWPOUT_FALLBACK);
1640 }
1641 #endif
1642 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
1643
1644 folio_clear_dirty(folio);
1645 }
1646
1647 index = folio->index;
1648 nr_pages = folio_nr_pages(folio);
1649
1650 /*
1651 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1652 * value into swapfile.c, the only way we can correctly account for a
1653 * fallocated folio arriving here is now to initialize it and write it.
1654 *
1655 * That's okay for a folio already fallocated earlier, but if we have
1656 * not yet completed the fallocation, then (a) we want to keep track
1657 * of this folio in case we have to undo it, and (b) it may not be a
1658 * good idea to continue anyway, once we're pushing into swap. So
1659 * reactivate the folio, and let shmem_fallocate() quit when too many.
1660 */
1661 if (!folio_test_uptodate(folio)) {
1662 if (inode->i_private) {
1663 struct shmem_falloc *shmem_falloc;
1664 spin_lock(&inode->i_lock);
1665 shmem_falloc = inode->i_private;
1666 if (shmem_falloc &&
1667 !shmem_falloc->waitq &&
1668 index >= shmem_falloc->start &&
1669 index < shmem_falloc->next)
1670 shmem_falloc->nr_unswapped += nr_pages;
1671 else
1672 shmem_falloc = NULL;
1673 spin_unlock(&inode->i_lock);
1674 if (shmem_falloc)
1675 goto redirty;
1676 }
1677 folio_zero_range(folio, 0, folio_size(folio));
1678 flush_dcache_folio(folio);
1679 folio_mark_uptodate(folio);
1680 }
1681
1682 if (!folio_alloc_swap(folio)) {
1683 bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
1684 int error;
1685
1686 /*
1687 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1688 * if it's not already there. Do it now before the folio is
1689 * removed from page cache, when its pagelock no longer
1690 * protects the inode from eviction. And do it now, after
1691 * we've incremented swapped, because shmem_unuse() will
1692 * prune a !swapped inode from the swaplist.
1693 */
1694 if (first_swapped) {
1695 spin_lock(&shmem_swaplist_lock);
1696 if (list_empty(&info->swaplist))
1697 list_add(&info->swaplist, &shmem_swaplist);
1698 spin_unlock(&shmem_swaplist_lock);
1699 }
1700
1701 folio_dup_swap(folio, NULL);
1702 shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
1703
1704 BUG_ON(folio_mapped(folio));
1705 error = swap_writeout(folio, plug);
1706 if (error != AOP_WRITEPAGE_ACTIVATE) {
1707 /* folio has been unlocked */
1708 return error;
1709 }
1710
1711 /*
1712 * The intention here is to avoid holding on to the swap when
1713 * zswap was unable to compress and unable to writeback; but
1714 * it will be appropriate if other reactivate cases are added.
1715 */
1716 error = shmem_add_to_page_cache(folio, mapping, index,
1717 swp_to_radix_entry(folio->swap),
1718 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
1719 /* Swap entry might be erased by racing shmem_free_swap() */
1720 if (!error) {
1721 shmem_recalc_inode(inode, 0, -nr_pages);
1722 folio_put_swap(folio, NULL);
1723 }
1724
1725 /*
1726 * The swap_cache_del_folio() below could be left for
1727 * shrink_folio_list()'s folio_free_swap() to dispose of;
1728 * but I'm a little nervous about letting this folio out of
1729 * shmem_writeout() in a hybrid half-tmpfs-half-swap state
1730 * e.g. folio_mapping(folio) might give an unexpected answer.
1731 */
1732 swap_cache_del_folio(folio);
1733 goto redirty;
1734 }
1735 if (nr_pages > 1)
1736 goto try_split;
1737 redirty:
1738 folio_mark_dirty(folio);
1739 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1740 }
1741 EXPORT_SYMBOL_GPL(shmem_writeout);
1742
1743 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1744 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1745 {
1746 char buffer[64];
1747
1748 if (!mpol || mpol->mode == MPOL_DEFAULT)
1749 return; /* show nothing */
1750
1751 mpol_to_str(buffer, sizeof(buffer), mpol);
1752
1753 seq_printf(seq, ",mpol=%s", buffer);
1754 }
1755
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1756 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1757 {
1758 struct mempolicy *mpol = NULL;
1759 if (sbinfo->mpol) {
1760 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1761 mpol = sbinfo->mpol;
1762 mpol_get(mpol);
1763 raw_spin_unlock(&sbinfo->stat_lock);
1764 }
1765 return mpol;
1766 }
1767 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1768 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1769 {
1770 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1771 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1772 {
1773 return NULL;
1774 }
1775 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1776
1777 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1778 pgoff_t index, unsigned int order, pgoff_t *ilx);
1779
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1780 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1781 struct shmem_inode_info *info, pgoff_t index)
1782 {
1783 struct mempolicy *mpol;
1784 pgoff_t ilx;
1785 struct folio *folio;
1786
1787 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1788 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1789 mpol_cond_put(mpol);
1790
1791 return folio;
1792 }
1793
1794 /*
1795 * Make sure huge_gfp is always more limited than limit_gfp.
1796 * Some of the flags set permissions, while others set limitations.
1797 */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1798 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1799 {
1800 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1801 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1802 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1803 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1804
1805 /* Allow allocations only from the originally specified zones. */
1806 result |= zoneflags;
1807
1808 /*
1809 * Minimize the result gfp by taking the union with the deny flags,
1810 * and the intersection of the allow flags.
1811 */
1812 result |= (limit_gfp & denyflags);
1813 result |= (huge_gfp & limit_gfp) & allowflags;
1814
1815 return result;
1816 }
1817
1818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_hpage_pmd_enabled(void)1819 bool shmem_hpage_pmd_enabled(void)
1820 {
1821 if (shmem_huge == SHMEM_HUGE_DENY)
1822 return false;
1823 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1824 return true;
1825 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1826 return true;
1827 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1828 return true;
1829 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1830 shmem_huge != SHMEM_HUGE_NEVER)
1831 return true;
1832
1833 return false;
1834 }
1835
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1836 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1837 struct vm_area_struct *vma, pgoff_t index,
1838 loff_t write_end, bool shmem_huge_force)
1839 {
1840 unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1841 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1842 vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
1843 unsigned int global_orders;
1844
1845 if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
1846 return 0;
1847
1848 global_orders = shmem_huge_global_enabled(inode, index, write_end,
1849 shmem_huge_force, vma, vm_flags);
1850 /* Tmpfs huge pages allocation */
1851 if (!vma || !vma_is_anon_shmem(vma))
1852 return global_orders;
1853
1854 /*
1855 * Following the 'deny' semantics of the top level, force the huge
1856 * option off from all mounts.
1857 */
1858 if (shmem_huge == SHMEM_HUGE_DENY)
1859 return 0;
1860
1861 /*
1862 * Only allow inherit orders if the top-level value is 'force', which
1863 * means non-PMD sized THP can not override 'huge' mount option now.
1864 */
1865 if (shmem_huge == SHMEM_HUGE_FORCE)
1866 return READ_ONCE(huge_shmem_orders_inherit);
1867
1868 /* Allow mTHP that will be fully within i_size. */
1869 mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
1870
1871 if (vm_flags & VM_HUGEPAGE)
1872 mask |= READ_ONCE(huge_shmem_orders_madvise);
1873
1874 if (global_orders > 0)
1875 mask |= READ_ONCE(huge_shmem_orders_inherit);
1876
1877 return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1878 }
1879
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1880 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1881 struct address_space *mapping, pgoff_t index,
1882 unsigned long orders)
1883 {
1884 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1885 pgoff_t aligned_index;
1886 unsigned long pages;
1887 int order;
1888
1889 if (vma) {
1890 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1891 if (!orders)
1892 return 0;
1893 }
1894
1895 /* Find the highest order that can add into the page cache */
1896 order = highest_order(orders);
1897 while (orders) {
1898 pages = 1UL << order;
1899 aligned_index = round_down(index, pages);
1900 /*
1901 * Check for conflict before waiting on a huge allocation.
1902 * Conflict might be that a huge page has just been allocated
1903 * and added to page cache by a racing thread, or that there
1904 * is already at least one small page in the huge extent.
1905 * Be careful to retry when appropriate, but not forever!
1906 * Elsewhere -EEXIST would be the right code, but not here.
1907 */
1908 if (!xa_find(&mapping->i_pages, &aligned_index,
1909 aligned_index + pages - 1, XA_PRESENT))
1910 break;
1911 order = next_order(&orders, order);
1912 }
1913
1914 return orders;
1915 }
1916 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1917 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1918 struct address_space *mapping, pgoff_t index,
1919 unsigned long orders)
1920 {
1921 return 0;
1922 }
1923 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1924
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1925 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1926 struct shmem_inode_info *info, pgoff_t index)
1927 {
1928 struct mempolicy *mpol;
1929 pgoff_t ilx;
1930 struct folio *folio;
1931
1932 mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1933 folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1934 mpol_cond_put(mpol);
1935
1936 return folio;
1937 }
1938
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1939 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1940 gfp_t gfp, struct inode *inode, pgoff_t index,
1941 struct mm_struct *fault_mm, unsigned long orders)
1942 {
1943 struct address_space *mapping = inode->i_mapping;
1944 struct shmem_inode_info *info = SHMEM_I(inode);
1945 unsigned long suitable_orders = 0;
1946 struct folio *folio = NULL;
1947 pgoff_t aligned_index;
1948 long pages;
1949 int error, order;
1950
1951 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1952 orders = 0;
1953
1954 if (orders > 0) {
1955 suitable_orders = shmem_suitable_orders(inode, vmf,
1956 mapping, index, orders);
1957
1958 order = highest_order(suitable_orders);
1959 while (suitable_orders) {
1960 pages = 1UL << order;
1961 aligned_index = round_down(index, pages);
1962 folio = shmem_alloc_folio(gfp, order, info, aligned_index);
1963 if (folio) {
1964 index = aligned_index;
1965 goto allocated;
1966 }
1967
1968 if (pages == HPAGE_PMD_NR)
1969 count_vm_event(THP_FILE_FALLBACK);
1970 count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1971 order = next_order(&suitable_orders, order);
1972 }
1973 } else {
1974 pages = 1;
1975 folio = shmem_alloc_folio(gfp, 0, info, index);
1976 }
1977 if (!folio)
1978 return ERR_PTR(-ENOMEM);
1979
1980 allocated:
1981 __folio_set_locked(folio);
1982 __folio_set_swapbacked(folio);
1983
1984 gfp &= GFP_RECLAIM_MASK;
1985 error = mem_cgroup_charge(folio, fault_mm, gfp);
1986 if (error) {
1987 if (xa_find(&mapping->i_pages, &index,
1988 index + pages - 1, XA_PRESENT)) {
1989 error = -EEXIST;
1990 } else if (pages > 1) {
1991 if (pages == HPAGE_PMD_NR) {
1992 count_vm_event(THP_FILE_FALLBACK);
1993 count_vm_event(THP_FILE_FALLBACK_CHARGE);
1994 }
1995 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1996 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1997 }
1998 goto unlock;
1999 }
2000
2001 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
2002 if (error)
2003 goto unlock;
2004
2005 error = shmem_inode_acct_blocks(inode, pages);
2006 if (error) {
2007 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2008 long freed;
2009 /*
2010 * Try to reclaim some space by splitting a few
2011 * large folios beyond i_size on the filesystem.
2012 */
2013 shmem_unused_huge_shrink(sbinfo, NULL, pages);
2014 /*
2015 * And do a shmem_recalc_inode() to account for freed pages:
2016 * except our folio is there in cache, so not quite balanced.
2017 */
2018 spin_lock(&info->lock);
2019 freed = pages + info->alloced - info->swapped -
2020 READ_ONCE(mapping->nrpages);
2021 if (freed > 0)
2022 info->alloced -= freed;
2023 spin_unlock(&info->lock);
2024 if (freed > 0)
2025 shmem_inode_unacct_blocks(inode, freed);
2026 error = shmem_inode_acct_blocks(inode, pages);
2027 if (error) {
2028 filemap_remove_folio(folio);
2029 goto unlock;
2030 }
2031 }
2032
2033 shmem_recalc_inode(inode, pages, 0);
2034 folio_add_lru(folio);
2035 return folio;
2036
2037 unlock:
2038 folio_unlock(folio);
2039 folio_put(folio);
2040 return ERR_PTR(error);
2041 }
2042
shmem_swap_alloc_folio(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,swp_entry_t entry,int order,gfp_t gfp)2043 static struct folio *shmem_swap_alloc_folio(struct inode *inode,
2044 struct vm_area_struct *vma, pgoff_t index,
2045 swp_entry_t entry, int order, gfp_t gfp)
2046 {
2047 struct shmem_inode_info *info = SHMEM_I(inode);
2048 struct folio *new, *swapcache;
2049 int nr_pages = 1 << order;
2050 gfp_t alloc_gfp = gfp;
2051
2052 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
2053 if (WARN_ON_ONCE(order))
2054 return ERR_PTR(-EINVAL);
2055 } else if (order) {
2056 /*
2057 * If uffd is active for the vma, we need per-page fault
2058 * fidelity to maintain the uffd semantics, then fallback
2059 * to swapin order-0 folio, as well as for zswap case.
2060 * Any existing sub folio in the swap cache also blocks
2061 * mTHP swapin.
2062 */
2063 if ((vma && unlikely(userfaultfd_armed(vma))) ||
2064 !zswap_never_enabled() ||
2065 non_swapcache_batch(entry, nr_pages) != nr_pages)
2066 goto fallback;
2067
2068 alloc_gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
2069 }
2070 retry:
2071 new = shmem_alloc_folio(alloc_gfp, order, info, index);
2072 if (!new) {
2073 new = ERR_PTR(-ENOMEM);
2074 goto fallback;
2075 }
2076
2077 if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
2078 alloc_gfp, entry)) {
2079 folio_put(new);
2080 new = ERR_PTR(-ENOMEM);
2081 goto fallback;
2082 }
2083
2084 swapcache = swapin_folio(entry, new);
2085 if (swapcache != new) {
2086 folio_put(new);
2087 if (!swapcache) {
2088 /*
2089 * The new folio is charged already, swapin can
2090 * only fail due to another raced swapin.
2091 */
2092 new = ERR_PTR(-EEXIST);
2093 goto fallback;
2094 }
2095 }
2096 return swapcache;
2097 fallback:
2098 /* Order 0 swapin failed, nothing to fallback to, abort */
2099 if (!order)
2100 return new;
2101 entry.val += index - round_down(index, nr_pages);
2102 alloc_gfp = gfp;
2103 nr_pages = 1;
2104 order = 0;
2105 goto retry;
2106 }
2107
2108 /*
2109 * When a page is moved from swapcache to shmem filecache (either by the
2110 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
2111 * shmem_unuse_inode()), it may have been read in earlier from swap, in
2112 * ignorance of the mapping it belongs to. If that mapping has special
2113 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
2114 * we may need to copy to a suitable page before moving to filecache.
2115 *
2116 * In a future release, this may well be extended to respect cpuset and
2117 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
2118 * but for now it is a simple matter of zone.
2119 */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)2120 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
2121 {
2122 return folio_zonenum(folio) > gfp_zone(gfp);
2123 }
2124
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)2125 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
2126 struct shmem_inode_info *info, pgoff_t index,
2127 struct vm_area_struct *vma)
2128 {
2129 struct swap_cluster_info *ci;
2130 struct folio *new, *old = *foliop;
2131 swp_entry_t entry = old->swap;
2132 int nr_pages = folio_nr_pages(old);
2133 int error = 0;
2134
2135 /*
2136 * We have arrived here because our zones are constrained, so don't
2137 * limit chance of success by further cpuset and node constraints.
2138 */
2139 gfp &= ~GFP_CONSTRAINT_MASK;
2140 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2141 if (nr_pages > 1) {
2142 gfp_t huge_gfp = vma_thp_gfp_mask(vma);
2143
2144 gfp = limit_gfp_mask(huge_gfp, gfp);
2145 }
2146 #endif
2147
2148 new = shmem_alloc_folio(gfp, folio_order(old), info, index);
2149 if (!new)
2150 return -ENOMEM;
2151
2152 folio_ref_add(new, nr_pages);
2153 folio_copy(new, old);
2154 flush_dcache_folio(new);
2155
2156 __folio_set_locked(new);
2157 __folio_set_swapbacked(new);
2158 folio_mark_uptodate(new);
2159 new->swap = entry;
2160 folio_set_swapcache(new);
2161
2162 ci = swap_cluster_get_and_lock_irq(old);
2163 __swap_cache_replace_folio(ci, old, new);
2164 mem_cgroup_replace_folio(old, new);
2165 shmem_update_stats(new, nr_pages);
2166 shmem_update_stats(old, -nr_pages);
2167 swap_cluster_unlock_irq(ci);
2168
2169 folio_add_lru(new);
2170 *foliop = new;
2171
2172 folio_clear_swapcache(old);
2173 old->private = NULL;
2174
2175 folio_unlock(old);
2176 /*
2177 * The old folio are removed from swap cache, drop the 'nr_pages'
2178 * reference, as well as one temporary reference getting from swap
2179 * cache.
2180 */
2181 folio_put_refs(old, nr_pages + 1);
2182 return error;
2183 }
2184
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap)2185 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2186 struct folio *folio, swp_entry_t swap)
2187 {
2188 struct address_space *mapping = inode->i_mapping;
2189 swp_entry_t swapin_error;
2190 void *old;
2191 int nr_pages;
2192
2193 swapin_error = make_poisoned_swp_entry();
2194 old = xa_cmpxchg_irq(&mapping->i_pages, index,
2195 swp_to_radix_entry(swap),
2196 swp_to_radix_entry(swapin_error), 0);
2197 if (old != swp_to_radix_entry(swap))
2198 return;
2199
2200 nr_pages = folio_nr_pages(folio);
2201 folio_wait_writeback(folio);
2202 folio_put_swap(folio, NULL);
2203 swap_cache_del_folio(folio);
2204 /*
2205 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2206 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2207 * in shmem_evict_inode().
2208 */
2209 shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2210 }
2211
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2212 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2213 swp_entry_t swap, gfp_t gfp)
2214 {
2215 struct address_space *mapping = inode->i_mapping;
2216 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2217 int split_order = 0;
2218 int i;
2219
2220 /* Convert user data gfp flags to xarray node gfp flags */
2221 gfp &= GFP_RECLAIM_MASK;
2222
2223 for (;;) {
2224 void *old = NULL;
2225 int cur_order;
2226 pgoff_t swap_index;
2227
2228 xas_lock_irq(&xas);
2229 old = xas_load(&xas);
2230 if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2231 xas_set_err(&xas, -EEXIST);
2232 goto unlock;
2233 }
2234
2235 cur_order = xas_get_order(&xas);
2236 if (!cur_order)
2237 goto unlock;
2238
2239 /* Try to split large swap entry in pagecache */
2240 swap_index = round_down(index, 1 << cur_order);
2241 split_order = xas_try_split_min_order(cur_order);
2242
2243 while (cur_order > 0) {
2244 pgoff_t aligned_index =
2245 round_down(index, 1 << cur_order);
2246 pgoff_t swap_offset = aligned_index - swap_index;
2247
2248 xas_set_order(&xas, index, split_order);
2249 xas_try_split(&xas, old, cur_order);
2250 if (xas_error(&xas))
2251 goto unlock;
2252
2253 /*
2254 * Re-set the swap entry after splitting, and the swap
2255 * offset of the original large entry must be continuous.
2256 */
2257 for (i = 0; i < 1 << cur_order;
2258 i += (1 << split_order)) {
2259 swp_entry_t tmp;
2260
2261 tmp = swp_entry(swp_type(swap),
2262 swp_offset(swap) + swap_offset +
2263 i);
2264 __xa_store(&mapping->i_pages, aligned_index + i,
2265 swp_to_radix_entry(tmp), 0);
2266 }
2267 cur_order = split_order;
2268 split_order = xas_try_split_min_order(split_order);
2269 }
2270
2271 unlock:
2272 xas_unlock_irq(&xas);
2273
2274 if (!xas_nomem(&xas, gfp))
2275 break;
2276 }
2277
2278 if (xas_error(&xas))
2279 return xas_error(&xas);
2280
2281 return 0;
2282 }
2283
2284 /*
2285 * Swap in the folio pointed to by *foliop.
2286 * Caller has to make sure that *foliop contains a valid swapped folio.
2287 * Returns 0 and the folio in foliop if success. On failure, returns the
2288 * error code and NULL in *foliop.
2289 */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2290 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2291 struct folio **foliop, enum sgp_type sgp,
2292 gfp_t gfp, struct vm_area_struct *vma,
2293 vm_fault_t *fault_type)
2294 {
2295 struct address_space *mapping = inode->i_mapping;
2296 struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2297 struct shmem_inode_info *info = SHMEM_I(inode);
2298 swp_entry_t swap;
2299 softleaf_t index_entry;
2300 struct swap_info_struct *si;
2301 struct folio *folio = NULL;
2302 int error, nr_pages, order;
2303 pgoff_t offset;
2304
2305 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2306 index_entry = radix_to_swp_entry(*foliop);
2307 swap = index_entry;
2308 *foliop = NULL;
2309
2310 if (softleaf_is_poison_marker(index_entry))
2311 return -EIO;
2312
2313 si = get_swap_device(index_entry);
2314 order = shmem_confirm_swap(mapping, index, index_entry);
2315 if (unlikely(!si)) {
2316 if (order < 0)
2317 return -EEXIST;
2318 else
2319 return -EINVAL;
2320 }
2321 if (unlikely(order < 0)) {
2322 put_swap_device(si);
2323 return -EEXIST;
2324 }
2325
2326 /* index may point to the middle of a large entry, get the sub entry */
2327 if (order) {
2328 offset = index - round_down(index, 1 << order);
2329 swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2330 }
2331
2332 /* Look it up and read it in.. */
2333 folio = swap_cache_get_folio(swap);
2334 if (!folio) {
2335 if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
2336 /* Direct swapin skipping swap cache & readahead */
2337 folio = shmem_swap_alloc_folio(inode, vma, index,
2338 index_entry, order, gfp);
2339 if (IS_ERR(folio)) {
2340 error = PTR_ERR(folio);
2341 folio = NULL;
2342 goto failed;
2343 }
2344 } else {
2345 /* Cached swapin only supports order 0 folio */
2346 folio = shmem_swapin_cluster(swap, gfp, info, index);
2347 if (!folio) {
2348 error = -ENOMEM;
2349 goto failed;
2350 }
2351 }
2352 if (fault_type) {
2353 *fault_type |= VM_FAULT_MAJOR;
2354 count_vm_event(PGMAJFAULT);
2355 count_memcg_event_mm(fault_mm, PGMAJFAULT);
2356 }
2357 } else {
2358 swap_update_readahead(folio, NULL, 0);
2359 }
2360
2361 if (order > folio_order(folio)) {
2362 /*
2363 * Swapin may get smaller folios due to various reasons:
2364 * It may fallback to order 0 due to memory pressure or race,
2365 * swap readahead may swap in order 0 folios into swapcache
2366 * asynchronously, while the shmem mapping can still stores
2367 * large swap entries. In such cases, we should split the
2368 * large swap entry to prevent possible data corruption.
2369 */
2370 error = shmem_split_large_entry(inode, index, index_entry, gfp);
2371 if (error)
2372 goto failed_nolock;
2373 }
2374
2375 /*
2376 * If the folio is large, round down swap and index by folio size.
2377 * No matter what race occurs, the swap layer ensures we either get
2378 * a valid folio that has its swap entry aligned by size, or a
2379 * temporarily invalid one which we'll abort very soon and retry.
2380 *
2381 * shmem_add_to_page_cache ensures the whole range contains expected
2382 * entries and prevents any corruption, so any race split is fine
2383 * too, it will succeed as long as the entries are still there.
2384 */
2385 nr_pages = folio_nr_pages(folio);
2386 if (nr_pages > 1) {
2387 swap.val = round_down(swap.val, nr_pages);
2388 index = round_down(index, nr_pages);
2389 }
2390
2391 /*
2392 * We have to do this with the folio locked to prevent races.
2393 * The shmem_confirm_swap below only checks if the first swap
2394 * entry matches the folio, that's enough to ensure the folio
2395 * is not used outside of shmem, as shmem swap entries
2396 * and swap cache folios are never partially freed.
2397 */
2398 folio_lock(folio);
2399 if (!folio_matches_swap_entry(folio, swap) ||
2400 shmem_confirm_swap(mapping, index, swap) < 0) {
2401 error = -EEXIST;
2402 goto unlock;
2403 }
2404 if (!folio_test_uptodate(folio)) {
2405 error = -EIO;
2406 goto failed;
2407 }
2408 folio_wait_writeback(folio);
2409
2410 /*
2411 * Some architectures may have to restore extra metadata to the
2412 * folio after reading from swap.
2413 */
2414 arch_swap_restore(folio_swap(swap, folio), folio);
2415
2416 if (shmem_should_replace_folio(folio, gfp)) {
2417 error = shmem_replace_folio(&folio, gfp, info, index, vma);
2418 if (error)
2419 goto failed;
2420 }
2421
2422 error = shmem_add_to_page_cache(folio, mapping, index,
2423 swp_to_radix_entry(swap), gfp);
2424 if (error)
2425 goto failed;
2426
2427 shmem_recalc_inode(inode, 0, -nr_pages);
2428
2429 if (sgp == SGP_WRITE)
2430 folio_mark_accessed(folio);
2431
2432 folio_put_swap(folio, NULL);
2433 swap_cache_del_folio(folio);
2434 folio_mark_dirty(folio);
2435 put_swap_device(si);
2436
2437 *foliop = folio;
2438 return 0;
2439 failed:
2440 if (shmem_confirm_swap(mapping, index, swap) < 0)
2441 error = -EEXIST;
2442 if (error == -EIO)
2443 shmem_set_folio_swapin_error(inode, index, folio, swap);
2444 unlock:
2445 if (folio)
2446 folio_unlock(folio);
2447 failed_nolock:
2448 if (folio)
2449 folio_put(folio);
2450 put_swap_device(si);
2451
2452 return error;
2453 }
2454
2455 /*
2456 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2457 *
2458 * If we allocate a new one we do not mark it dirty. That's up to the
2459 * vm. If we swap it in we mark it dirty since we also free the swap
2460 * entry since a page cannot live in both the swap and page cache.
2461 *
2462 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2463 */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2464 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2465 loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2466 gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2467 {
2468 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2469 struct mm_struct *fault_mm;
2470 struct folio *folio;
2471 int error;
2472 bool alloced;
2473 unsigned long orders = 0;
2474
2475 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2476 return -EINVAL;
2477
2478 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2479 return -EFBIG;
2480 repeat:
2481 if (sgp <= SGP_CACHE &&
2482 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2483 return -EINVAL;
2484
2485 alloced = false;
2486 fault_mm = vma ? vma->vm_mm : NULL;
2487
2488 folio = filemap_get_entry(inode->i_mapping, index);
2489 if (folio && vma && userfaultfd_minor(vma)) {
2490 if (!xa_is_value(folio))
2491 folio_put(folio);
2492 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2493 return 0;
2494 }
2495
2496 if (xa_is_value(folio)) {
2497 error = shmem_swapin_folio(inode, index, &folio,
2498 sgp, gfp, vma, fault_type);
2499 if (error == -EEXIST)
2500 goto repeat;
2501
2502 *foliop = folio;
2503 return error;
2504 }
2505
2506 if (folio) {
2507 folio_lock(folio);
2508
2509 /* Has the folio been truncated or swapped out? */
2510 if (unlikely(folio->mapping != inode->i_mapping)) {
2511 folio_unlock(folio);
2512 folio_put(folio);
2513 goto repeat;
2514 }
2515 if (sgp == SGP_WRITE)
2516 folio_mark_accessed(folio);
2517 if (folio_test_uptodate(folio))
2518 goto out;
2519 /* fallocated folio */
2520 if (sgp != SGP_READ)
2521 goto clear;
2522 folio_unlock(folio);
2523 folio_put(folio);
2524 }
2525
2526 /*
2527 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2528 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2529 */
2530 *foliop = NULL;
2531 if (sgp == SGP_READ)
2532 return 0;
2533 if (sgp == SGP_NOALLOC)
2534 return -ENOENT;
2535
2536 /*
2537 * Fast cache lookup and swap lookup did not find it: allocate.
2538 */
2539
2540 if (vma && userfaultfd_missing(vma)) {
2541 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2542 return 0;
2543 }
2544
2545 /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2546 orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2547 if (orders > 0) {
2548 gfp_t huge_gfp;
2549
2550 huge_gfp = vma_thp_gfp_mask(vma);
2551 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2552 folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2553 inode, index, fault_mm, orders);
2554 if (!IS_ERR(folio)) {
2555 if (folio_test_pmd_mappable(folio))
2556 count_vm_event(THP_FILE_ALLOC);
2557 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2558 goto alloced;
2559 }
2560 if (PTR_ERR(folio) == -EEXIST)
2561 goto repeat;
2562 }
2563
2564 folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2565 if (IS_ERR(folio)) {
2566 error = PTR_ERR(folio);
2567 if (error == -EEXIST)
2568 goto repeat;
2569 folio = NULL;
2570 goto unlock;
2571 }
2572
2573 alloced:
2574 alloced = true;
2575 if (folio_test_large(folio) &&
2576 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2577 folio_next_index(folio)) {
2578 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2579 struct shmem_inode_info *info = SHMEM_I(inode);
2580 /*
2581 * Part of the large folio is beyond i_size: subject
2582 * to shrink under memory pressure.
2583 */
2584 spin_lock(&sbinfo->shrinklist_lock);
2585 /*
2586 * _careful to defend against unlocked access to
2587 * ->shrink_list in shmem_unused_huge_shrink()
2588 */
2589 if (list_empty_careful(&info->shrinklist)) {
2590 list_add_tail(&info->shrinklist,
2591 &sbinfo->shrinklist);
2592 sbinfo->shrinklist_len++;
2593 }
2594 spin_unlock(&sbinfo->shrinklist_lock);
2595 }
2596
2597 if (sgp == SGP_WRITE)
2598 folio_set_referenced(folio);
2599 /*
2600 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2601 */
2602 if (sgp == SGP_FALLOC)
2603 sgp = SGP_WRITE;
2604 clear:
2605 /*
2606 * Let SGP_WRITE caller clear ends if write does not fill folio;
2607 * but SGP_FALLOC on a folio fallocated earlier must initialize
2608 * it now, lest undo on failure cancel our earlier guarantee.
2609 */
2610 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2611 long i, n = folio_nr_pages(folio);
2612
2613 for (i = 0; i < n; i++)
2614 clear_highpage(folio_page(folio, i));
2615 flush_dcache_folio(folio);
2616 folio_mark_uptodate(folio);
2617 }
2618
2619 /* Perhaps the file has been truncated since we checked */
2620 if (sgp <= SGP_CACHE &&
2621 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2622 error = -EINVAL;
2623 goto unlock;
2624 }
2625 out:
2626 *foliop = folio;
2627 return 0;
2628
2629 /*
2630 * Error recovery.
2631 */
2632 unlock:
2633 if (alloced)
2634 filemap_remove_folio(folio);
2635 shmem_recalc_inode(inode, 0, 0);
2636 if (folio) {
2637 folio_unlock(folio);
2638 folio_put(folio);
2639 }
2640 return error;
2641 }
2642
2643 /**
2644 * shmem_get_folio - find, and lock a shmem folio.
2645 * @inode: inode to search
2646 * @index: the page index.
2647 * @write_end: end of a write, could extend inode size
2648 * @foliop: pointer to the folio if found
2649 * @sgp: SGP_* flags to control behavior
2650 *
2651 * Looks up the page cache entry at @inode & @index. If a folio is
2652 * present, it is returned locked with an increased refcount.
2653 *
2654 * If the caller modifies data in the folio, it must call folio_mark_dirty()
2655 * before unlocking the folio to ensure that the folio is not reclaimed.
2656 * There is no need to reserve space before calling folio_mark_dirty().
2657 *
2658 * When no folio is found, the behavior depends on @sgp:
2659 * - for SGP_READ, *@foliop is %NULL and 0 is returned
2660 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2661 * - for all other flags a new folio is allocated, inserted into the
2662 * page cache and returned locked in @foliop.
2663 *
2664 * Context: May sleep.
2665 * Return: 0 if successful, else a negative error code.
2666 */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2667 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2668 struct folio **foliop, enum sgp_type sgp)
2669 {
2670 return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2671 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2672 }
2673 EXPORT_SYMBOL_GPL(shmem_get_folio);
2674
2675 /*
2676 * This is like autoremove_wake_function, but it removes the wait queue
2677 * entry unconditionally - even if something else had already woken the
2678 * target.
2679 */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2680 static int synchronous_wake_function(wait_queue_entry_t *wait,
2681 unsigned int mode, int sync, void *key)
2682 {
2683 int ret = default_wake_function(wait, mode, sync, key);
2684 list_del_init(&wait->entry);
2685 return ret;
2686 }
2687
2688 /*
2689 * Trinity finds that probing a hole which tmpfs is punching can
2690 * prevent the hole-punch from ever completing: which in turn
2691 * locks writers out with its hold on i_rwsem. So refrain from
2692 * faulting pages into the hole while it's being punched. Although
2693 * shmem_undo_range() does remove the additions, it may be unable to
2694 * keep up, as each new page needs its own unmap_mapping_range() call,
2695 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2696 *
2697 * It does not matter if we sometimes reach this check just before the
2698 * hole-punch begins, so that one fault then races with the punch:
2699 * we just need to make racing faults a rare case.
2700 *
2701 * The implementation below would be much simpler if we just used a
2702 * standard mutex or completion: but we cannot take i_rwsem in fault,
2703 * and bloating every shmem inode for this unlikely case would be sad.
2704 */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2705 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2706 {
2707 struct shmem_falloc *shmem_falloc;
2708 struct file *fpin = NULL;
2709 vm_fault_t ret = 0;
2710
2711 spin_lock(&inode->i_lock);
2712 shmem_falloc = inode->i_private;
2713 if (shmem_falloc &&
2714 shmem_falloc->waitq &&
2715 vmf->pgoff >= shmem_falloc->start &&
2716 vmf->pgoff < shmem_falloc->next) {
2717 wait_queue_head_t *shmem_falloc_waitq;
2718 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2719
2720 ret = VM_FAULT_NOPAGE;
2721 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2722 shmem_falloc_waitq = shmem_falloc->waitq;
2723 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2724 TASK_UNINTERRUPTIBLE);
2725 spin_unlock(&inode->i_lock);
2726 schedule();
2727
2728 /*
2729 * shmem_falloc_waitq points into the shmem_fallocate()
2730 * stack of the hole-punching task: shmem_falloc_waitq
2731 * is usually invalid by the time we reach here, but
2732 * finish_wait() does not dereference it in that case;
2733 * though i_lock needed lest racing with wake_up_all().
2734 */
2735 spin_lock(&inode->i_lock);
2736 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2737 }
2738 spin_unlock(&inode->i_lock);
2739 if (fpin) {
2740 fput(fpin);
2741 ret = VM_FAULT_RETRY;
2742 }
2743 return ret;
2744 }
2745
shmem_fault(struct vm_fault * vmf)2746 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2747 {
2748 struct inode *inode = file_inode(vmf->vma->vm_file);
2749 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2750 struct folio *folio = NULL;
2751 vm_fault_t ret = 0;
2752 int err;
2753
2754 /*
2755 * Trinity finds that probing a hole which tmpfs is punching can
2756 * prevent the hole-punch from ever completing: noted in i_private.
2757 */
2758 if (unlikely(inode->i_private)) {
2759 ret = shmem_falloc_wait(vmf, inode);
2760 if (ret)
2761 return ret;
2762 }
2763
2764 WARN_ON_ONCE(vmf->page != NULL);
2765 err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2766 gfp, vmf, &ret);
2767 if (err)
2768 return vmf_error(err);
2769 if (folio) {
2770 vmf->page = folio_file_page(folio, vmf->pgoff);
2771 ret |= VM_FAULT_LOCKED;
2772 }
2773 return ret;
2774 }
2775
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2776 unsigned long shmem_get_unmapped_area(struct file *file,
2777 unsigned long uaddr, unsigned long len,
2778 unsigned long pgoff, unsigned long flags)
2779 {
2780 unsigned long addr;
2781 unsigned long offset;
2782 unsigned long inflated_len;
2783 unsigned long inflated_addr;
2784 unsigned long inflated_offset;
2785 unsigned long hpage_size;
2786
2787 if (len > TASK_SIZE)
2788 return -ENOMEM;
2789
2790 addr = mm_get_unmapped_area(file, uaddr, len, pgoff, flags);
2791
2792 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2793 return addr;
2794 if (IS_ERR_VALUE(addr))
2795 return addr;
2796 if (addr & ~PAGE_MASK)
2797 return addr;
2798 if (addr > TASK_SIZE - len)
2799 return addr;
2800
2801 if (shmem_huge == SHMEM_HUGE_DENY)
2802 return addr;
2803 if (flags & MAP_FIXED)
2804 return addr;
2805 /*
2806 * Our priority is to support MAP_SHARED mapped hugely;
2807 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2808 * But if caller specified an address hint and we allocated area there
2809 * successfully, respect that as before.
2810 */
2811 if (uaddr == addr)
2812 return addr;
2813
2814 hpage_size = HPAGE_PMD_SIZE;
2815 if (shmem_huge != SHMEM_HUGE_FORCE) {
2816 struct super_block *sb;
2817 unsigned long __maybe_unused hpage_orders;
2818 int order = 0;
2819
2820 if (file) {
2821 VM_BUG_ON(file->f_op != &shmem_file_operations);
2822 sb = file_inode(file)->i_sb;
2823 } else {
2824 /*
2825 * Called directly from mm/mmap.c, or drivers/char/mem.c
2826 * for "/dev/zero", to create a shared anonymous object.
2827 */
2828 if (IS_ERR(shm_mnt))
2829 return addr;
2830 sb = shm_mnt->mnt_sb;
2831
2832 /*
2833 * Find the highest mTHP order used for anonymous shmem to
2834 * provide a suitable alignment address.
2835 */
2836 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2837 hpage_orders = READ_ONCE(huge_shmem_orders_always);
2838 hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2839 hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2840 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2841 hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2842
2843 if (hpage_orders > 0) {
2844 order = highest_order(hpage_orders);
2845 hpage_size = PAGE_SIZE << order;
2846 }
2847 #endif
2848 }
2849 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2850 return addr;
2851 }
2852
2853 if (len < hpage_size)
2854 return addr;
2855
2856 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2857 if (offset && offset + len < 2 * hpage_size)
2858 return addr;
2859 if ((addr & (hpage_size - 1)) == offset)
2860 return addr;
2861
2862 inflated_len = len + hpage_size - PAGE_SIZE;
2863 if (inflated_len > TASK_SIZE)
2864 return addr;
2865 if (inflated_len < len)
2866 return addr;
2867
2868 inflated_addr = mm_get_unmapped_area(NULL, uaddr, inflated_len, 0, flags);
2869 if (IS_ERR_VALUE(inflated_addr))
2870 return addr;
2871 if (inflated_addr & ~PAGE_MASK)
2872 return addr;
2873
2874 inflated_offset = inflated_addr & (hpage_size - 1);
2875 inflated_addr += offset - inflated_offset;
2876 if (inflated_offset > offset)
2877 inflated_addr += hpage_size;
2878
2879 if (inflated_addr > TASK_SIZE - len)
2880 return addr;
2881 return inflated_addr;
2882 }
2883
2884 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2885 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2886 {
2887 struct inode *inode = file_inode(vma->vm_file);
2888 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2889 }
2890
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2891 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2892 unsigned long addr, pgoff_t *ilx)
2893 {
2894 struct inode *inode = file_inode(vma->vm_file);
2895 pgoff_t index;
2896
2897 /*
2898 * Bias interleave by inode number to distribute better across nodes;
2899 * but this interface is independent of which page order is used, so
2900 * supplies only that bias, letting caller apply the offset (adjusted
2901 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2902 */
2903 *ilx = inode->i_ino;
2904 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2905 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2906 }
2907
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2908 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2909 pgoff_t index, unsigned int order, pgoff_t *ilx)
2910 {
2911 struct mempolicy *mpol;
2912
2913 /* Bias interleave by inode number to distribute better across nodes */
2914 *ilx = info->vfs_inode.i_ino + (index >> order);
2915
2916 mpol = mpol_shared_policy_lookup(&info->policy, index);
2917 return mpol ? mpol : get_task_policy(current);
2918 }
2919 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2920 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2921 pgoff_t index, unsigned int order, pgoff_t *ilx)
2922 {
2923 *ilx = 0;
2924 return NULL;
2925 }
2926 #endif /* CONFIG_NUMA */
2927
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2928 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2929 {
2930 struct inode *inode = file_inode(file);
2931 struct shmem_inode_info *info = SHMEM_I(inode);
2932 int retval = -ENOMEM;
2933
2934 /*
2935 * What serializes the accesses to info->flags?
2936 * ipc_lock_object() when called from shmctl_do_lock(),
2937 * no serialization needed when called from shm_destroy().
2938 */
2939 if (lock && !(info->flags & SHMEM_F_LOCKED)) {
2940 if (!user_shm_lock(inode->i_size, ucounts))
2941 goto out_nomem;
2942 info->flags |= SHMEM_F_LOCKED;
2943 mapping_set_unevictable(file->f_mapping);
2944 }
2945 if (!lock && (info->flags & SHMEM_F_LOCKED) && ucounts) {
2946 user_shm_unlock(inode->i_size, ucounts);
2947 info->flags &= ~SHMEM_F_LOCKED;
2948 mapping_clear_unevictable(file->f_mapping);
2949 }
2950 retval = 0;
2951
2952 out_nomem:
2953 return retval;
2954 }
2955
shmem_mmap_prepare(struct vm_area_desc * desc)2956 static int shmem_mmap_prepare(struct vm_area_desc *desc)
2957 {
2958 struct file *file = desc->file;
2959 struct inode *inode = file_inode(file);
2960
2961 file_accessed(file);
2962 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2963 if (inode->i_nlink)
2964 desc->vm_ops = &shmem_vm_ops;
2965 else
2966 desc->vm_ops = &shmem_anon_vm_ops;
2967 return 0;
2968 }
2969
shmem_file_open(struct inode * inode,struct file * file)2970 static int shmem_file_open(struct inode *inode, struct file *file)
2971 {
2972 file->f_mode |= FMODE_CAN_ODIRECT;
2973 return generic_file_open(inode, file);
2974 }
2975
2976 #ifdef CONFIG_TMPFS_XATTR
2977 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2978
2979 #if IS_ENABLED(CONFIG_UNICODE)
2980 /*
2981 * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2982 *
2983 * The casefold file attribute needs some special checks. I can just be added to
2984 * an empty dir, and can't be removed from a non-empty dir.
2985 */
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2986 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2987 struct dentry *dentry, unsigned int *i_flags)
2988 {
2989 unsigned int old = inode->i_flags;
2990 struct super_block *sb = inode->i_sb;
2991
2992 if (fsflags & FS_CASEFOLD_FL) {
2993 if (!(old & S_CASEFOLD)) {
2994 if (!sb->s_encoding)
2995 return -EOPNOTSUPP;
2996
2997 if (!S_ISDIR(inode->i_mode))
2998 return -ENOTDIR;
2999
3000 if (dentry && !simple_empty(dentry))
3001 return -ENOTEMPTY;
3002 }
3003
3004 *i_flags = *i_flags | S_CASEFOLD;
3005 } else if (old & S_CASEFOLD) {
3006 if (dentry && !simple_empty(dentry))
3007 return -ENOTEMPTY;
3008 }
3009
3010 return 0;
3011 }
3012 #else
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)3013 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
3014 struct dentry *dentry, unsigned int *i_flags)
3015 {
3016 if (fsflags & FS_CASEFOLD_FL)
3017 return -EOPNOTSUPP;
3018
3019 return 0;
3020 }
3021 #endif
3022
3023 /*
3024 * chattr's fsflags are unrelated to extended attributes,
3025 * but tmpfs has chosen to enable them under the same config option.
3026 */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3027 static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3028 {
3029 unsigned int i_flags = 0;
3030 int ret;
3031
3032 ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
3033 if (ret)
3034 return ret;
3035
3036 if (fsflags & FS_NOATIME_FL)
3037 i_flags |= S_NOATIME;
3038 if (fsflags & FS_APPEND_FL)
3039 i_flags |= S_APPEND;
3040 if (fsflags & FS_IMMUTABLE_FL)
3041 i_flags |= S_IMMUTABLE;
3042 /*
3043 * But FS_NODUMP_FL does not require any action in i_flags.
3044 */
3045 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
3046
3047 return 0;
3048 }
3049 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3050 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3051 {
3052 }
3053 #define shmem_initxattrs NULL
3054 #endif
3055
shmem_get_offset_ctx(struct inode * inode)3056 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
3057 {
3058 return &SHMEM_I(inode)->dir_offsets;
3059 }
3060
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3061 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
3062 struct super_block *sb,
3063 struct inode *dir, umode_t mode,
3064 dev_t dev, vma_flags_t flags)
3065 {
3066 struct inode *inode;
3067 struct shmem_inode_info *info;
3068 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3069 ino_t ino;
3070 int err;
3071
3072 err = shmem_reserve_inode(sb, &ino);
3073 if (err)
3074 return ERR_PTR(err);
3075
3076 inode = new_inode(sb);
3077 if (!inode) {
3078 shmem_free_inode(sb, 0);
3079 return ERR_PTR(-ENOSPC);
3080 }
3081
3082 inode->i_ino = ino;
3083 inode_init_owner(idmap, inode, dir, mode);
3084 inode->i_blocks = 0;
3085 simple_inode_init_ts(inode);
3086 inode->i_generation = get_random_u32();
3087 info = SHMEM_I(inode);
3088 memset(info, 0, (char *)inode - (char *)info);
3089 spin_lock_init(&info->lock);
3090 atomic_set(&info->stop_eviction, 0);
3091 info->seals = F_SEAL_SEAL;
3092 info->flags = vma_flags_test(&flags, VMA_NORESERVE_BIT)
3093 ? SHMEM_F_NORESERVE : 0;
3094 info->i_crtime = inode_get_mtime(inode);
3095 info->fsflags = (dir == NULL) ? 0 :
3096 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
3097 if (info->fsflags)
3098 shmem_set_inode_flags(inode, info->fsflags, NULL);
3099 INIT_LIST_HEAD(&info->shrinklist);
3100 INIT_LIST_HEAD(&info->swaplist);
3101 cache_no_acl(inode);
3102 if (sbinfo->noswap)
3103 mapping_set_unevictable(inode->i_mapping);
3104
3105 /* Don't consider 'deny' for emergencies and 'force' for testing */
3106 if (sbinfo->huge)
3107 mapping_set_large_folios(inode->i_mapping);
3108
3109 switch (mode & S_IFMT) {
3110 default:
3111 inode->i_op = &shmem_special_inode_operations;
3112 init_special_inode(inode, mode, dev);
3113 break;
3114 case S_IFREG:
3115 inode->i_mapping->a_ops = &shmem_aops;
3116 inode->i_op = &shmem_inode_operations;
3117 inode->i_fop = &shmem_file_operations;
3118 mpol_shared_policy_init(&info->policy,
3119 shmem_get_sbmpol(sbinfo));
3120 break;
3121 case S_IFDIR:
3122 inc_nlink(inode);
3123 /* Some things misbehave if size == 0 on a directory */
3124 inode->i_size = 2 * BOGO_DIRENT_SIZE;
3125 inode->i_op = &shmem_dir_inode_operations;
3126 inode->i_fop = &simple_offset_dir_operations;
3127 simple_offset_init(shmem_get_offset_ctx(inode));
3128 break;
3129 case S_IFLNK:
3130 /*
3131 * Must not load anything in the rbtree,
3132 * mpol_free_shared_policy will not be called.
3133 */
3134 mpol_shared_policy_init(&info->policy, NULL);
3135 break;
3136 }
3137
3138 lockdep_annotate_inode_mutex_key(inode);
3139 return inode;
3140 }
3141
3142 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3143 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3144 struct super_block *sb, struct inode *dir,
3145 umode_t mode, dev_t dev, vma_flags_t flags)
3146 {
3147 int err;
3148 struct inode *inode;
3149
3150 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3151 if (IS_ERR(inode))
3152 return inode;
3153
3154 err = dquot_initialize(inode);
3155 if (err)
3156 goto errout;
3157
3158 err = dquot_alloc_inode(inode);
3159 if (err) {
3160 dquot_drop(inode);
3161 goto errout;
3162 }
3163 return inode;
3164
3165 errout:
3166 inode->i_flags |= S_NOQUOTA;
3167 iput(inode);
3168 return ERR_PTR(err);
3169 }
3170 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3171 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3172 struct super_block *sb, struct inode *dir,
3173 umode_t mode, dev_t dev, vma_flags_t flags)
3174 {
3175 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3176 }
3177 #endif /* CONFIG_TMPFS_QUOTA */
3178
3179 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)3180 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
3181 struct vm_area_struct *dst_vma,
3182 unsigned long dst_addr,
3183 unsigned long src_addr,
3184 uffd_flags_t flags,
3185 struct folio **foliop)
3186 {
3187 struct inode *inode = file_inode(dst_vma->vm_file);
3188 struct shmem_inode_info *info = SHMEM_I(inode);
3189 struct address_space *mapping = inode->i_mapping;
3190 gfp_t gfp = mapping_gfp_mask(mapping);
3191 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
3192 void *page_kaddr;
3193 struct folio *folio;
3194 int ret;
3195 pgoff_t max_off;
3196
3197 if (shmem_inode_acct_blocks(inode, 1)) {
3198 /*
3199 * We may have got a page, returned -ENOENT triggering a retry,
3200 * and now we find ourselves with -ENOMEM. Release the page, to
3201 * avoid a BUG_ON in our caller.
3202 */
3203 if (unlikely(*foliop)) {
3204 folio_put(*foliop);
3205 *foliop = NULL;
3206 }
3207 return -ENOMEM;
3208 }
3209
3210 if (!*foliop) {
3211 ret = -ENOMEM;
3212 folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3213 if (!folio)
3214 goto out_unacct_blocks;
3215
3216 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3217 page_kaddr = kmap_local_folio(folio, 0);
3218 /*
3219 * The read mmap_lock is held here. Despite the
3220 * mmap_lock being read recursive a deadlock is still
3221 * possible if a writer has taken a lock. For example:
3222 *
3223 * process A thread 1 takes read lock on own mmap_lock
3224 * process A thread 2 calls mmap, blocks taking write lock
3225 * process B thread 1 takes page fault, read lock on own mmap lock
3226 * process B thread 2 calls mmap, blocks taking write lock
3227 * process A thread 1 blocks taking read lock on process B
3228 * process B thread 1 blocks taking read lock on process A
3229 *
3230 * Disable page faults to prevent potential deadlock
3231 * and retry the copy outside the mmap_lock.
3232 */
3233 pagefault_disable();
3234 ret = copy_from_user(page_kaddr,
3235 (const void __user *)src_addr,
3236 PAGE_SIZE);
3237 pagefault_enable();
3238 kunmap_local(page_kaddr);
3239
3240 /* fallback to copy_from_user outside mmap_lock */
3241 if (unlikely(ret)) {
3242 *foliop = folio;
3243 ret = -ENOENT;
3244 /* don't free the page */
3245 goto out_unacct_blocks;
3246 }
3247
3248 flush_dcache_folio(folio);
3249 } else { /* ZEROPAGE */
3250 clear_user_highpage(&folio->page, dst_addr);
3251 }
3252 } else {
3253 folio = *foliop;
3254 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3255 *foliop = NULL;
3256 }
3257
3258 VM_BUG_ON(folio_test_locked(folio));
3259 VM_BUG_ON(folio_test_swapbacked(folio));
3260 __folio_set_locked(folio);
3261 __folio_set_swapbacked(folio);
3262 __folio_mark_uptodate(folio);
3263
3264 ret = -EFAULT;
3265 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3266 if (unlikely(pgoff >= max_off))
3267 goto out_release;
3268
3269 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3270 if (ret)
3271 goto out_release;
3272 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3273 if (ret)
3274 goto out_release;
3275
3276 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3277 &folio->page, true, flags);
3278 if (ret)
3279 goto out_delete_from_cache;
3280
3281 shmem_recalc_inode(inode, 1, 0);
3282 folio_unlock(folio);
3283 return 0;
3284 out_delete_from_cache:
3285 filemap_remove_folio(folio);
3286 out_release:
3287 folio_unlock(folio);
3288 folio_put(folio);
3289 out_unacct_blocks:
3290 shmem_inode_unacct_blocks(inode, 1);
3291 return ret;
3292 }
3293 #endif /* CONFIG_USERFAULTFD */
3294
3295 #ifdef CONFIG_TMPFS
3296 static const struct inode_operations shmem_symlink_inode_operations;
3297 static const struct inode_operations shmem_short_symlink_operations;
3298
3299 static int
shmem_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3300 shmem_write_begin(const struct kiocb *iocb, struct address_space *mapping,
3301 loff_t pos, unsigned len,
3302 struct folio **foliop, void **fsdata)
3303 {
3304 struct inode *inode = mapping->host;
3305 struct shmem_inode_info *info = SHMEM_I(inode);
3306 pgoff_t index = pos >> PAGE_SHIFT;
3307 struct folio *folio;
3308 int ret = 0;
3309
3310 /* i_rwsem is held by caller */
3311 if (unlikely(info->seals & (F_SEAL_GROW |
3312 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3313 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3314 return -EPERM;
3315 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3316 return -EPERM;
3317 }
3318
3319 if (unlikely((info->flags & SHMEM_F_MAPPING_FROZEN) &&
3320 pos + len > inode->i_size))
3321 return -EPERM;
3322
3323 ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3324 if (ret)
3325 return ret;
3326
3327 if (folio_contain_hwpoisoned_page(folio)) {
3328 folio_unlock(folio);
3329 folio_put(folio);
3330 return -EIO;
3331 }
3332
3333 *foliop = folio;
3334 return 0;
3335 }
3336
3337 static int
shmem_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3338 shmem_write_end(const struct kiocb *iocb, struct address_space *mapping,
3339 loff_t pos, unsigned len, unsigned copied,
3340 struct folio *folio, void *fsdata)
3341 {
3342 struct inode *inode = mapping->host;
3343
3344 if (pos + copied > inode->i_size)
3345 i_size_write(inode, pos + copied);
3346
3347 if (!folio_test_uptodate(folio)) {
3348 if (copied < folio_size(folio)) {
3349 size_t from = offset_in_folio(folio, pos);
3350 folio_zero_segments(folio, 0, from,
3351 from + copied, folio_size(folio));
3352 }
3353 folio_mark_uptodate(folio);
3354 }
3355 folio_mark_dirty(folio);
3356 folio_unlock(folio);
3357 folio_put(folio);
3358
3359 return copied;
3360 }
3361
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3362 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3363 {
3364 struct file *file = iocb->ki_filp;
3365 struct inode *inode = file_inode(file);
3366 struct address_space *mapping = inode->i_mapping;
3367 pgoff_t index;
3368 unsigned long offset;
3369 int error = 0;
3370 ssize_t retval = 0;
3371
3372 for (;;) {
3373 struct folio *folio = NULL;
3374 struct page *page = NULL;
3375 unsigned long nr, ret;
3376 loff_t end_offset, i_size = i_size_read(inode);
3377 bool fallback_page_copy = false;
3378 size_t fsize;
3379
3380 if (unlikely(iocb->ki_pos >= i_size))
3381 break;
3382
3383 index = iocb->ki_pos >> PAGE_SHIFT;
3384 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3385 if (error) {
3386 if (error == -EINVAL)
3387 error = 0;
3388 break;
3389 }
3390 if (folio) {
3391 folio_unlock(folio);
3392
3393 page = folio_file_page(folio, index);
3394 if (PageHWPoison(page)) {
3395 folio_put(folio);
3396 error = -EIO;
3397 break;
3398 }
3399
3400 if (folio_test_large(folio) &&
3401 folio_test_has_hwpoisoned(folio))
3402 fallback_page_copy = true;
3403 }
3404
3405 /*
3406 * We must evaluate after, since reads (unlike writes)
3407 * are called without i_rwsem protection against truncate
3408 */
3409 i_size = i_size_read(inode);
3410 if (unlikely(iocb->ki_pos >= i_size)) {
3411 if (folio)
3412 folio_put(folio);
3413 break;
3414 }
3415 end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3416 if (folio && likely(!fallback_page_copy))
3417 fsize = folio_size(folio);
3418 else
3419 fsize = PAGE_SIZE;
3420 offset = iocb->ki_pos & (fsize - 1);
3421 nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3422
3423 if (folio) {
3424 /*
3425 * If users can be writing to this page using arbitrary
3426 * virtual addresses, take care about potential aliasing
3427 * before reading the page on the kernel side.
3428 */
3429 if (mapping_writably_mapped(mapping)) {
3430 if (likely(!fallback_page_copy))
3431 flush_dcache_folio(folio);
3432 else
3433 flush_dcache_page(page);
3434 }
3435
3436 /*
3437 * Mark the folio accessed if we read the beginning.
3438 */
3439 if (!offset)
3440 folio_mark_accessed(folio);
3441 /*
3442 * Ok, we have the page, and it's up-to-date, so
3443 * now we can copy it to user space...
3444 */
3445 if (likely(!fallback_page_copy))
3446 ret = copy_folio_to_iter(folio, offset, nr, to);
3447 else
3448 ret = copy_page_to_iter(page, offset, nr, to);
3449 folio_put(folio);
3450 } else if (user_backed_iter(to)) {
3451 /*
3452 * Copy to user tends to be so well optimized, but
3453 * clear_user() not so much, that it is noticeably
3454 * faster to copy the zero page instead of clearing.
3455 */
3456 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3457 } else {
3458 /*
3459 * But submitting the same page twice in a row to
3460 * splice() - or others? - can result in confusion:
3461 * so don't attempt that optimization on pipes etc.
3462 */
3463 ret = iov_iter_zero(nr, to);
3464 }
3465
3466 retval += ret;
3467 iocb->ki_pos += ret;
3468
3469 if (!iov_iter_count(to))
3470 break;
3471 if (ret < nr) {
3472 error = -EFAULT;
3473 break;
3474 }
3475 cond_resched();
3476 }
3477
3478 file_accessed(file);
3479 return retval ? retval : error;
3480 }
3481
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3482 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3483 {
3484 struct file *file = iocb->ki_filp;
3485 struct inode *inode = file->f_mapping->host;
3486 ssize_t ret;
3487
3488 inode_lock(inode);
3489 ret = generic_write_checks(iocb, from);
3490 if (ret <= 0)
3491 goto unlock;
3492 ret = file_remove_privs(file);
3493 if (ret)
3494 goto unlock;
3495 ret = file_update_time(file);
3496 if (ret)
3497 goto unlock;
3498 ret = generic_perform_write(iocb, from);
3499 unlock:
3500 inode_unlock(inode);
3501 return ret;
3502 }
3503
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3504 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3505 struct pipe_buffer *buf)
3506 {
3507 return true;
3508 }
3509
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3510 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3511 struct pipe_buffer *buf)
3512 {
3513 }
3514
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3515 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3516 struct pipe_buffer *buf)
3517 {
3518 return false;
3519 }
3520
3521 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3522 .release = zero_pipe_buf_release,
3523 .try_steal = zero_pipe_buf_try_steal,
3524 .get = zero_pipe_buf_get,
3525 };
3526
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3527 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3528 loff_t fpos, size_t size)
3529 {
3530 size_t offset = fpos & ~PAGE_MASK;
3531
3532 size = min_t(size_t, size, PAGE_SIZE - offset);
3533
3534 if (!pipe_is_full(pipe)) {
3535 struct pipe_buffer *buf = pipe_head_buf(pipe);
3536
3537 *buf = (struct pipe_buffer) {
3538 .ops = &zero_pipe_buf_ops,
3539 .page = ZERO_PAGE(0),
3540 .offset = offset,
3541 .len = size,
3542 };
3543 pipe->head++;
3544 }
3545
3546 return size;
3547 }
3548
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3549 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3550 struct pipe_inode_info *pipe,
3551 size_t len, unsigned int flags)
3552 {
3553 struct inode *inode = file_inode(in);
3554 struct address_space *mapping = inode->i_mapping;
3555 struct folio *folio = NULL;
3556 size_t total_spliced = 0, used, npages, n, part;
3557 loff_t isize;
3558 int error = 0;
3559
3560 /* Work out how much data we can actually add into the pipe */
3561 used = pipe_buf_usage(pipe);
3562 npages = max_t(ssize_t, pipe->max_usage - used, 0);
3563 len = min_t(size_t, len, npages * PAGE_SIZE);
3564
3565 do {
3566 bool fallback_page_splice = false;
3567 struct page *page = NULL;
3568 pgoff_t index;
3569 size_t size;
3570
3571 if (*ppos >= i_size_read(inode))
3572 break;
3573
3574 index = *ppos >> PAGE_SHIFT;
3575 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3576 if (error) {
3577 if (error == -EINVAL)
3578 error = 0;
3579 break;
3580 }
3581 if (folio) {
3582 folio_unlock(folio);
3583
3584 page = folio_file_page(folio, index);
3585 if (PageHWPoison(page)) {
3586 error = -EIO;
3587 break;
3588 }
3589
3590 if (folio_test_large(folio) &&
3591 folio_test_has_hwpoisoned(folio))
3592 fallback_page_splice = true;
3593 }
3594
3595 /*
3596 * i_size must be checked after we know the pages are Uptodate.
3597 *
3598 * Checking i_size after the check allows us to calculate
3599 * the correct value for "nr", which means the zero-filled
3600 * part of the page is not copied back to userspace (unless
3601 * another truncate extends the file - this is desired though).
3602 */
3603 isize = i_size_read(inode);
3604 if (unlikely(*ppos >= isize))
3605 break;
3606 /*
3607 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3608 * pages.
3609 */
3610 size = len;
3611 if (unlikely(fallback_page_splice)) {
3612 size_t offset = *ppos & ~PAGE_MASK;
3613
3614 size = umin(size, PAGE_SIZE - offset);
3615 }
3616 part = min_t(loff_t, isize - *ppos, size);
3617
3618 if (folio) {
3619 /*
3620 * If users can be writing to this page using arbitrary
3621 * virtual addresses, take care about potential aliasing
3622 * before reading the page on the kernel side.
3623 */
3624 if (mapping_writably_mapped(mapping)) {
3625 if (likely(!fallback_page_splice))
3626 flush_dcache_folio(folio);
3627 else
3628 flush_dcache_page(page);
3629 }
3630 folio_mark_accessed(folio);
3631 /*
3632 * Ok, we have the page, and it's up-to-date, so we can
3633 * now splice it into the pipe.
3634 */
3635 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3636 folio_put(folio);
3637 folio = NULL;
3638 } else {
3639 n = splice_zeropage_into_pipe(pipe, *ppos, part);
3640 }
3641
3642 if (!n)
3643 break;
3644 len -= n;
3645 total_spliced += n;
3646 *ppos += n;
3647 in->f_ra.prev_pos = *ppos;
3648 if (pipe_is_full(pipe))
3649 break;
3650
3651 cond_resched();
3652 } while (len);
3653
3654 if (folio)
3655 folio_put(folio);
3656
3657 file_accessed(in);
3658 return total_spliced ? total_spliced : error;
3659 }
3660
shmem_file_llseek(struct file * file,loff_t offset,int whence)3661 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3662 {
3663 struct address_space *mapping = file->f_mapping;
3664 struct inode *inode = mapping->host;
3665
3666 if (whence != SEEK_DATA && whence != SEEK_HOLE)
3667 return generic_file_llseek_size(file, offset, whence,
3668 MAX_LFS_FILESIZE, i_size_read(inode));
3669 if (offset < 0)
3670 return -ENXIO;
3671
3672 inode_lock(inode);
3673 /* We're holding i_rwsem so we can access i_size directly */
3674 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3675 if (offset >= 0)
3676 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3677 inode_unlock(inode);
3678 return offset;
3679 }
3680
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3681 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3682 loff_t len)
3683 {
3684 struct inode *inode = file_inode(file);
3685 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3686 struct shmem_inode_info *info = SHMEM_I(inode);
3687 struct shmem_falloc shmem_falloc;
3688 pgoff_t start, index, end, undo_fallocend;
3689 int error;
3690
3691 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3692 return -EOPNOTSUPP;
3693
3694 inode_lock(inode);
3695
3696 if (info->flags & SHMEM_F_MAPPING_FROZEN) {
3697 error = -EPERM;
3698 goto out;
3699 }
3700
3701 if (mode & FALLOC_FL_PUNCH_HOLE) {
3702 struct address_space *mapping = file->f_mapping;
3703 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3704 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3705 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3706
3707 /* protected by i_rwsem */
3708 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3709 error = -EPERM;
3710 goto out;
3711 }
3712
3713 shmem_falloc.waitq = &shmem_falloc_waitq;
3714 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3715 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3716 spin_lock(&inode->i_lock);
3717 inode->i_private = &shmem_falloc;
3718 spin_unlock(&inode->i_lock);
3719
3720 if ((u64)unmap_end > (u64)unmap_start)
3721 unmap_mapping_range(mapping, unmap_start,
3722 1 + unmap_end - unmap_start, 0);
3723 shmem_truncate_range(inode, offset, offset + len - 1);
3724 /* No need to unmap again: hole-punching leaves COWed pages */
3725
3726 spin_lock(&inode->i_lock);
3727 inode->i_private = NULL;
3728 wake_up_all(&shmem_falloc_waitq);
3729 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3730 spin_unlock(&inode->i_lock);
3731 error = 0;
3732 goto out;
3733 }
3734
3735 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3736 error = inode_newsize_ok(inode, offset + len);
3737 if (error)
3738 goto out;
3739
3740 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3741 error = -EPERM;
3742 goto out;
3743 }
3744
3745 start = offset >> PAGE_SHIFT;
3746 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3747 /* Try to avoid a swapstorm if len is impossible to satisfy */
3748 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3749 error = -ENOSPC;
3750 goto out;
3751 }
3752
3753 shmem_falloc.waitq = NULL;
3754 shmem_falloc.start = start;
3755 shmem_falloc.next = start;
3756 shmem_falloc.nr_falloced = 0;
3757 shmem_falloc.nr_unswapped = 0;
3758 spin_lock(&inode->i_lock);
3759 inode->i_private = &shmem_falloc;
3760 spin_unlock(&inode->i_lock);
3761
3762 /*
3763 * info->fallocend is only relevant when huge pages might be
3764 * involved: to prevent split_huge_page() freeing fallocated
3765 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3766 */
3767 undo_fallocend = info->fallocend;
3768 if (info->fallocend < end)
3769 info->fallocend = end;
3770
3771 for (index = start; index < end; ) {
3772 struct folio *folio;
3773
3774 /*
3775 * Check for fatal signal so that we abort early in OOM
3776 * situations. We don't want to abort in case of non-fatal
3777 * signals as large fallocate can take noticeable time and
3778 * e.g. periodic timers may result in fallocate constantly
3779 * restarting.
3780 */
3781 if (fatal_signal_pending(current))
3782 error = -EINTR;
3783 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3784 error = -ENOMEM;
3785 else
3786 error = shmem_get_folio(inode, index, offset + len,
3787 &folio, SGP_FALLOC);
3788 if (error) {
3789 info->fallocend = undo_fallocend;
3790 /* Remove the !uptodate folios we added */
3791 if (index > start) {
3792 shmem_undo_range(inode,
3793 (loff_t)start << PAGE_SHIFT,
3794 ((loff_t)index << PAGE_SHIFT) - 1, true);
3795 }
3796 goto undone;
3797 }
3798
3799 /*
3800 * Here is a more important optimization than it appears:
3801 * a second SGP_FALLOC on the same large folio will clear it,
3802 * making it uptodate and un-undoable if we fail later.
3803 */
3804 index = folio_next_index(folio);
3805 /* Beware 32-bit wraparound */
3806 if (!index)
3807 index--;
3808
3809 /*
3810 * Inform shmem_writeout() how far we have reached.
3811 * No need for lock or barrier: we have the page lock.
3812 */
3813 if (!folio_test_uptodate(folio))
3814 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3815 shmem_falloc.next = index;
3816
3817 /*
3818 * If !uptodate, leave it that way so that freeable folios
3819 * can be recognized if we need to rollback on error later.
3820 * But mark it dirty so that memory pressure will swap rather
3821 * than free the folios we are allocating (and SGP_CACHE folios
3822 * might still be clean: we now need to mark those dirty too).
3823 */
3824 folio_mark_dirty(folio);
3825 folio_unlock(folio);
3826 folio_put(folio);
3827 cond_resched();
3828 }
3829
3830 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3831 i_size_write(inode, offset + len);
3832 undone:
3833 spin_lock(&inode->i_lock);
3834 inode->i_private = NULL;
3835 spin_unlock(&inode->i_lock);
3836 out:
3837 if (!error)
3838 file_modified(file);
3839 inode_unlock(inode);
3840 return error;
3841 }
3842
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3843 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3844 {
3845 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3846
3847 buf->f_type = TMPFS_MAGIC;
3848 buf->f_bsize = PAGE_SIZE;
3849 buf->f_namelen = NAME_MAX;
3850 if (sbinfo->max_blocks) {
3851 buf->f_blocks = sbinfo->max_blocks;
3852 buf->f_bavail =
3853 buf->f_bfree = sbinfo->max_blocks -
3854 percpu_counter_sum(&sbinfo->used_blocks);
3855 }
3856 if (sbinfo->max_inodes) {
3857 buf->f_files = sbinfo->max_inodes;
3858 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3859 }
3860 /* else leave those fields 0 like simple_statfs */
3861
3862 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3863
3864 return 0;
3865 }
3866
3867 /*
3868 * File creation. Allocate an inode, and we're done..
3869 */
3870 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3871 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3872 struct dentry *dentry, umode_t mode, dev_t dev)
3873 {
3874 struct inode *inode;
3875 int error;
3876
3877 if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3878 return -EINVAL;
3879
3880 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev,
3881 mk_vma_flags(VMA_NORESERVE_BIT));
3882 if (IS_ERR(inode))
3883 return PTR_ERR(inode);
3884
3885 error = simple_acl_create(dir, inode);
3886 if (error)
3887 goto out_iput;
3888 error = security_inode_init_security(inode, dir, &dentry->d_name,
3889 shmem_initxattrs, NULL);
3890 if (error && error != -EOPNOTSUPP)
3891 goto out_iput;
3892
3893 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3894 if (error)
3895 goto out_iput;
3896
3897 dir->i_size += BOGO_DIRENT_SIZE;
3898 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3899 inode_inc_iversion(dir);
3900
3901 d_make_persistent(dentry, inode);
3902 return error;
3903
3904 out_iput:
3905 iput(inode);
3906 return error;
3907 }
3908
3909 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3910 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3911 struct file *file, umode_t mode)
3912 {
3913 struct inode *inode;
3914 int error;
3915
3916 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0,
3917 mk_vma_flags(VMA_NORESERVE_BIT));
3918 if (IS_ERR(inode)) {
3919 error = PTR_ERR(inode);
3920 goto err_out;
3921 }
3922 error = security_inode_init_security(inode, dir, NULL,
3923 shmem_initxattrs, NULL);
3924 if (error && error != -EOPNOTSUPP)
3925 goto out_iput;
3926 error = simple_acl_create(dir, inode);
3927 if (error)
3928 goto out_iput;
3929 d_tmpfile(file, inode);
3930
3931 err_out:
3932 return finish_open_simple(file, error);
3933 out_iput:
3934 iput(inode);
3935 return error;
3936 }
3937
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3938 static struct dentry *shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3939 struct dentry *dentry, umode_t mode)
3940 {
3941 int error;
3942
3943 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3944 if (error)
3945 return ERR_PTR(error);
3946 inc_nlink(dir);
3947 return NULL;
3948 }
3949
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3950 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3951 struct dentry *dentry, umode_t mode, bool excl)
3952 {
3953 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3954 }
3955
3956 /*
3957 * Link a file..
3958 */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3959 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3960 struct dentry *dentry)
3961 {
3962 struct inode *inode = d_inode(old_dentry);
3963 int ret;
3964
3965 /*
3966 * No ordinary (disk based) filesystem counts links as inodes;
3967 * but each new link needs a new dentry, pinning lowmem, and
3968 * tmpfs dentries cannot be pruned until they are unlinked.
3969 * But if an O_TMPFILE file is linked into the tmpfs, the
3970 * first link must skip that, to get the accounting right.
3971 */
3972 if (inode->i_nlink) {
3973 ret = shmem_reserve_inode(inode->i_sb, NULL);
3974 if (ret)
3975 return ret;
3976 }
3977
3978 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3979 if (ret) {
3980 if (inode->i_nlink)
3981 shmem_free_inode(inode->i_sb, 0);
3982 return ret;
3983 }
3984
3985 dir->i_size += BOGO_DIRENT_SIZE;
3986 inode_inc_iversion(dir);
3987 return simple_link(old_dentry, dir, dentry);
3988 }
3989
shmem_unlink(struct inode * dir,struct dentry * dentry)3990 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3991 {
3992 struct inode *inode = d_inode(dentry);
3993
3994 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3995 shmem_free_inode(inode->i_sb, 0);
3996
3997 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3998
3999 dir->i_size -= BOGO_DIRENT_SIZE;
4000 inode_inc_iversion(dir);
4001 simple_unlink(dir, dentry);
4002
4003 /*
4004 * For now, VFS can't deal with case-insensitive negative dentries, so
4005 * we invalidate them
4006 */
4007 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
4008 d_invalidate(dentry);
4009
4010 return 0;
4011 }
4012
shmem_rmdir(struct inode * dir,struct dentry * dentry)4013 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
4014 {
4015 if (!simple_empty(dentry))
4016 return -ENOTEMPTY;
4017
4018 drop_nlink(d_inode(dentry));
4019 drop_nlink(dir);
4020 return shmem_unlink(dir, dentry);
4021 }
4022
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)4023 static int shmem_whiteout(struct mnt_idmap *idmap,
4024 struct inode *old_dir, struct dentry *old_dentry)
4025 {
4026 struct dentry *whiteout;
4027 int error;
4028
4029 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
4030 if (!whiteout)
4031 return -ENOMEM;
4032 error = shmem_mknod(idmap, old_dir, whiteout,
4033 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4034 dput(whiteout);
4035 return error;
4036 }
4037
4038 /*
4039 * The VFS layer already does all the dentry stuff for rename,
4040 * we just have to decrement the usage count for the target if
4041 * it exists so that the VFS layer correctly free's it when it
4042 * gets overwritten.
4043 */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)4044 static int shmem_rename2(struct mnt_idmap *idmap,
4045 struct inode *old_dir, struct dentry *old_dentry,
4046 struct inode *new_dir, struct dentry *new_dentry,
4047 unsigned int flags)
4048 {
4049 struct inode *inode = d_inode(old_dentry);
4050 int they_are_dirs = S_ISDIR(inode->i_mode);
4051 bool had_offset = false;
4052 int error;
4053
4054 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4055 return -EINVAL;
4056
4057 if (flags & RENAME_EXCHANGE)
4058 return simple_offset_rename_exchange(old_dir, old_dentry,
4059 new_dir, new_dentry);
4060
4061 if (!simple_empty(new_dentry))
4062 return -ENOTEMPTY;
4063
4064 error = simple_offset_add(shmem_get_offset_ctx(new_dir), new_dentry);
4065 if (error == -EBUSY)
4066 had_offset = true;
4067 else if (unlikely(error))
4068 return error;
4069
4070 if (flags & RENAME_WHITEOUT) {
4071 error = shmem_whiteout(idmap, old_dir, old_dentry);
4072 if (error) {
4073 if (!had_offset)
4074 simple_offset_remove(shmem_get_offset_ctx(new_dir),
4075 new_dentry);
4076 return error;
4077 }
4078 }
4079
4080 simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
4081 if (d_really_is_positive(new_dentry)) {
4082 (void) shmem_unlink(new_dir, new_dentry);
4083 if (they_are_dirs) {
4084 drop_nlink(d_inode(new_dentry));
4085 drop_nlink(old_dir);
4086 }
4087 } else if (they_are_dirs) {
4088 drop_nlink(old_dir);
4089 inc_nlink(new_dir);
4090 }
4091
4092 old_dir->i_size -= BOGO_DIRENT_SIZE;
4093 new_dir->i_size += BOGO_DIRENT_SIZE;
4094 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
4095 inode_inc_iversion(old_dir);
4096 inode_inc_iversion(new_dir);
4097 return 0;
4098 }
4099
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)4100 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
4101 struct dentry *dentry, const char *symname)
4102 {
4103 int error;
4104 int len;
4105 struct inode *inode;
4106 struct folio *folio;
4107 char *link;
4108
4109 len = strlen(symname) + 1;
4110 if (len > PAGE_SIZE)
4111 return -ENAMETOOLONG;
4112
4113 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
4114 mk_vma_flags(VMA_NORESERVE_BIT));
4115 if (IS_ERR(inode))
4116 return PTR_ERR(inode);
4117
4118 error = security_inode_init_security(inode, dir, &dentry->d_name,
4119 shmem_initxattrs, NULL);
4120 if (error && error != -EOPNOTSUPP)
4121 goto out_iput;
4122
4123 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
4124 if (error)
4125 goto out_iput;
4126
4127 inode->i_size = len-1;
4128 if (len <= SHORT_SYMLINK_LEN) {
4129 link = kmemdup(symname, len, GFP_KERNEL);
4130 if (!link) {
4131 error = -ENOMEM;
4132 goto out_remove_offset;
4133 }
4134 inode->i_op = &shmem_short_symlink_operations;
4135 inode_set_cached_link(inode, link, len - 1);
4136 } else {
4137 inode_nohighmem(inode);
4138 inode->i_mapping->a_ops = &shmem_aops;
4139 error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
4140 if (error)
4141 goto out_remove_offset;
4142 inode->i_op = &shmem_symlink_inode_operations;
4143 memcpy(folio_address(folio), symname, len);
4144 folio_mark_uptodate(folio);
4145 folio_mark_dirty(folio);
4146 folio_unlock(folio);
4147 folio_put(folio);
4148 }
4149 dir->i_size += BOGO_DIRENT_SIZE;
4150 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
4151 inode_inc_iversion(dir);
4152 d_make_persistent(dentry, inode);
4153 return 0;
4154
4155 out_remove_offset:
4156 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
4157 out_iput:
4158 iput(inode);
4159 return error;
4160 }
4161
shmem_put_link(void * arg)4162 static void shmem_put_link(void *arg)
4163 {
4164 folio_mark_accessed(arg);
4165 folio_put(arg);
4166 }
4167
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)4168 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
4169 struct delayed_call *done)
4170 {
4171 struct folio *folio = NULL;
4172 int error;
4173
4174 if (!dentry) {
4175 folio = filemap_get_folio(inode->i_mapping, 0);
4176 if (IS_ERR(folio))
4177 return ERR_PTR(-ECHILD);
4178 if (PageHWPoison(folio_page(folio, 0)) ||
4179 !folio_test_uptodate(folio)) {
4180 folio_put(folio);
4181 return ERR_PTR(-ECHILD);
4182 }
4183 } else {
4184 error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4185 if (error)
4186 return ERR_PTR(error);
4187 if (!folio)
4188 return ERR_PTR(-ECHILD);
4189 if (PageHWPoison(folio_page(folio, 0))) {
4190 folio_unlock(folio);
4191 folio_put(folio);
4192 return ERR_PTR(-ECHILD);
4193 }
4194 folio_unlock(folio);
4195 }
4196 set_delayed_call(done, shmem_put_link, folio);
4197 return folio_address(folio);
4198 }
4199
4200 #ifdef CONFIG_TMPFS_XATTR
4201
shmem_fileattr_get(struct dentry * dentry,struct file_kattr * fa)4202 static int shmem_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
4203 {
4204 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4205
4206 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4207
4208 return 0;
4209 }
4210
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)4211 static int shmem_fileattr_set(struct mnt_idmap *idmap,
4212 struct dentry *dentry, struct file_kattr *fa)
4213 {
4214 struct inode *inode = d_inode(dentry);
4215 struct shmem_inode_info *info = SHMEM_I(inode);
4216 int ret, flags;
4217
4218 if (fileattr_has_fsx(fa))
4219 return -EOPNOTSUPP;
4220 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4221 return -EOPNOTSUPP;
4222
4223 flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4224 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
4225
4226 ret = shmem_set_inode_flags(inode, flags, dentry);
4227
4228 if (ret)
4229 return ret;
4230
4231 info->fsflags = flags;
4232
4233 inode_set_ctime_current(inode);
4234 inode_inc_iversion(inode);
4235 return 0;
4236 }
4237
4238 /*
4239 * Superblocks without xattr inode operations may get some security.* xattr
4240 * support from the LSM "for free". As soon as we have any other xattrs
4241 * like ACLs, we also need to implement the security.* handlers at
4242 * filesystem level, though.
4243 */
4244
4245 /*
4246 * Callback for security_inode_init_security() for acquiring xattrs.
4247 */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)4248 static int shmem_initxattrs(struct inode *inode,
4249 const struct xattr *xattr_array, void *fs_info)
4250 {
4251 struct shmem_inode_info *info = SHMEM_I(inode);
4252 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4253 const struct xattr *xattr;
4254 size_t ispace = 0;
4255 size_t len;
4256
4257 CLASS(simple_xattrs, xattrs)();
4258 if (IS_ERR(xattrs))
4259 return PTR_ERR(xattrs);
4260
4261 if (sbinfo->max_inodes) {
4262 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4263 ispace += simple_xattr_space(xattr->name,
4264 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4265 }
4266 if (ispace) {
4267 raw_spin_lock(&sbinfo->stat_lock);
4268 if (sbinfo->free_ispace < ispace)
4269 ispace = 0;
4270 else
4271 sbinfo->free_ispace -= ispace;
4272 raw_spin_unlock(&sbinfo->stat_lock);
4273 if (!ispace)
4274 return -ENOSPC;
4275 }
4276 }
4277
4278 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4279 CLASS(simple_xattr, new_xattr)(xattr->value, xattr->value_len);
4280 if (IS_ERR(new_xattr))
4281 break;
4282
4283 len = strlen(xattr->name) + 1;
4284 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4285 GFP_KERNEL_ACCOUNT);
4286 if (!new_xattr->name)
4287 break;
4288
4289 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4290 XATTR_SECURITY_PREFIX_LEN);
4291 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4292 xattr->name, len);
4293
4294 if (simple_xattr_add(xattrs, new_xattr))
4295 break;
4296 retain_and_null_ptr(new_xattr);
4297 }
4298
4299 if (xattr->name != NULL) {
4300 if (ispace) {
4301 raw_spin_lock(&sbinfo->stat_lock);
4302 sbinfo->free_ispace += ispace;
4303 raw_spin_unlock(&sbinfo->stat_lock);
4304 }
4305 return -ENOMEM;
4306 }
4307
4308 smp_store_release(&info->xattrs, no_free_ptr(xattrs));
4309 return 0;
4310 }
4311
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)4312 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4313 struct dentry *unused, struct inode *inode,
4314 const char *name, void *buffer, size_t size)
4315 {
4316 struct shmem_inode_info *info = SHMEM_I(inode);
4317 struct simple_xattrs *xattrs;
4318
4319 xattrs = READ_ONCE(info->xattrs);
4320 if (!xattrs)
4321 return -ENODATA;
4322
4323 name = xattr_full_name(handler, name);
4324 return simple_xattr_get(xattrs, name, buffer, size);
4325 }
4326
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4327 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4328 struct mnt_idmap *idmap,
4329 struct dentry *unused, struct inode *inode,
4330 const char *name, const void *value,
4331 size_t size, int flags)
4332 {
4333 struct shmem_inode_info *info = SHMEM_I(inode);
4334 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4335 struct simple_xattrs *xattrs;
4336 struct simple_xattr *old_xattr;
4337 size_t ispace = 0;
4338
4339 name = xattr_full_name(handler, name);
4340
4341 xattrs = simple_xattrs_lazy_alloc(&info->xattrs, value, flags);
4342 if (IS_ERR_OR_NULL(xattrs))
4343 return PTR_ERR(xattrs);
4344
4345 if (value && sbinfo->max_inodes) {
4346 ispace = simple_xattr_space(name, size);
4347 raw_spin_lock(&sbinfo->stat_lock);
4348 if (sbinfo->free_ispace < ispace)
4349 ispace = 0;
4350 else
4351 sbinfo->free_ispace -= ispace;
4352 raw_spin_unlock(&sbinfo->stat_lock);
4353 if (!ispace)
4354 return -ENOSPC;
4355 }
4356
4357 old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
4358 if (!IS_ERR(old_xattr)) {
4359 ispace = 0;
4360 if (old_xattr && sbinfo->max_inodes)
4361 ispace = simple_xattr_space(old_xattr->name,
4362 old_xattr->size);
4363 simple_xattr_free_rcu(old_xattr);
4364 old_xattr = NULL;
4365 inode_set_ctime_current(inode);
4366 inode_inc_iversion(inode);
4367 }
4368 if (ispace) {
4369 raw_spin_lock(&sbinfo->stat_lock);
4370 sbinfo->free_ispace += ispace;
4371 raw_spin_unlock(&sbinfo->stat_lock);
4372 }
4373 return PTR_ERR(old_xattr);
4374 }
4375
4376 static const struct xattr_handler shmem_security_xattr_handler = {
4377 .prefix = XATTR_SECURITY_PREFIX,
4378 .get = shmem_xattr_handler_get,
4379 .set = shmem_xattr_handler_set,
4380 };
4381
4382 static const struct xattr_handler shmem_trusted_xattr_handler = {
4383 .prefix = XATTR_TRUSTED_PREFIX,
4384 .get = shmem_xattr_handler_get,
4385 .set = shmem_xattr_handler_set,
4386 };
4387
4388 static const struct xattr_handler shmem_user_xattr_handler = {
4389 .prefix = XATTR_USER_PREFIX,
4390 .get = shmem_xattr_handler_get,
4391 .set = shmem_xattr_handler_set,
4392 };
4393
4394 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4395 &shmem_security_xattr_handler,
4396 &shmem_trusted_xattr_handler,
4397 &shmem_user_xattr_handler,
4398 NULL
4399 };
4400
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4401 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4402 {
4403 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4404
4405 return simple_xattr_list(d_inode(dentry), READ_ONCE(info->xattrs),
4406 buffer, size);
4407 }
4408 #endif /* CONFIG_TMPFS_XATTR */
4409
4410 static const struct inode_operations shmem_short_symlink_operations = {
4411 .getattr = shmem_getattr,
4412 .setattr = shmem_setattr,
4413 .get_link = simple_get_link,
4414 #ifdef CONFIG_TMPFS_XATTR
4415 .listxattr = shmem_listxattr,
4416 #endif
4417 };
4418
4419 static const struct inode_operations shmem_symlink_inode_operations = {
4420 .getattr = shmem_getattr,
4421 .setattr = shmem_setattr,
4422 .get_link = shmem_get_link,
4423 #ifdef CONFIG_TMPFS_XATTR
4424 .listxattr = shmem_listxattr,
4425 #endif
4426 };
4427
shmem_get_parent(struct dentry * child)4428 static struct dentry *shmem_get_parent(struct dentry *child)
4429 {
4430 return ERR_PTR(-ESTALE);
4431 }
4432
shmem_match(struct inode * ino,void * vfh)4433 static int shmem_match(struct inode *ino, void *vfh)
4434 {
4435 __u32 *fh = vfh;
4436 __u64 inum = fh[2];
4437 inum = (inum << 32) | fh[1];
4438 return ino->i_ino == inum && fh[0] == ino->i_generation;
4439 }
4440
4441 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4442 static struct dentry *shmem_find_alias(struct inode *inode)
4443 {
4444 struct dentry *alias = d_find_alias(inode);
4445
4446 return alias ?: d_find_any_alias(inode);
4447 }
4448
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4449 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4450 struct fid *fid, int fh_len, int fh_type)
4451 {
4452 struct inode *inode;
4453 struct dentry *dentry = NULL;
4454 u64 inum;
4455
4456 if (fh_len < 3)
4457 return NULL;
4458
4459 inum = fid->raw[2];
4460 inum = (inum << 32) | fid->raw[1];
4461
4462 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4463 shmem_match, fid->raw);
4464 if (inode) {
4465 dentry = shmem_find_alias(inode);
4466 iput(inode);
4467 }
4468
4469 return dentry;
4470 }
4471
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4472 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4473 struct inode *parent)
4474 {
4475 if (*len < 3) {
4476 *len = 3;
4477 return FILEID_INVALID;
4478 }
4479
4480 if (inode_unhashed(inode)) {
4481 /* Unfortunately insert_inode_hash is not idempotent,
4482 * so as we hash inodes here rather than at creation
4483 * time, we need a lock to ensure we only try
4484 * to do it once
4485 */
4486 static DEFINE_SPINLOCK(lock);
4487 spin_lock(&lock);
4488 if (inode_unhashed(inode))
4489 __insert_inode_hash(inode,
4490 inode->i_ino + inode->i_generation);
4491 spin_unlock(&lock);
4492 }
4493
4494 fh[0] = inode->i_generation;
4495 fh[1] = inode->i_ino;
4496 fh[2] = ((__u64)inode->i_ino) >> 32;
4497
4498 *len = 3;
4499 return 1;
4500 }
4501
4502 static const struct export_operations shmem_export_ops = {
4503 .get_parent = shmem_get_parent,
4504 .encode_fh = shmem_encode_fh,
4505 .fh_to_dentry = shmem_fh_to_dentry,
4506 };
4507
4508 enum shmem_param {
4509 Opt_gid,
4510 Opt_huge,
4511 Opt_mode,
4512 Opt_mpol,
4513 Opt_nr_blocks,
4514 Opt_nr_inodes,
4515 Opt_size,
4516 Opt_uid,
4517 Opt_inode32,
4518 Opt_inode64,
4519 Opt_noswap,
4520 Opt_quota,
4521 Opt_usrquota,
4522 Opt_grpquota,
4523 Opt_usrquota_block_hardlimit,
4524 Opt_usrquota_inode_hardlimit,
4525 Opt_grpquota_block_hardlimit,
4526 Opt_grpquota_inode_hardlimit,
4527 Opt_casefold_version,
4528 Opt_casefold,
4529 Opt_strict_encoding,
4530 };
4531
4532 static const struct constant_table shmem_param_enums_huge[] = {
4533 {"never", SHMEM_HUGE_NEVER },
4534 {"always", SHMEM_HUGE_ALWAYS },
4535 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
4536 {"advise", SHMEM_HUGE_ADVISE },
4537 {}
4538 };
4539
4540 const struct fs_parameter_spec shmem_fs_parameters[] = {
4541 fsparam_gid ("gid", Opt_gid),
4542 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
4543 fsparam_u32oct("mode", Opt_mode),
4544 fsparam_string("mpol", Opt_mpol),
4545 fsparam_string("nr_blocks", Opt_nr_blocks),
4546 fsparam_string("nr_inodes", Opt_nr_inodes),
4547 fsparam_string("size", Opt_size),
4548 fsparam_uid ("uid", Opt_uid),
4549 fsparam_flag ("inode32", Opt_inode32),
4550 fsparam_flag ("inode64", Opt_inode64),
4551 fsparam_flag ("noswap", Opt_noswap),
4552 #ifdef CONFIG_TMPFS_QUOTA
4553 fsparam_flag ("quota", Opt_quota),
4554 fsparam_flag ("usrquota", Opt_usrquota),
4555 fsparam_flag ("grpquota", Opt_grpquota),
4556 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4557 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4558 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4559 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4560 #endif
4561 fsparam_string("casefold", Opt_casefold_version),
4562 fsparam_flag ("casefold", Opt_casefold),
4563 fsparam_flag ("strict_encoding", Opt_strict_encoding),
4564 {}
4565 };
4566
4567 #if IS_ENABLED(CONFIG_UNICODE)
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4568 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4569 bool latest_version)
4570 {
4571 struct shmem_options *ctx = fc->fs_private;
4572 int version = UTF8_LATEST;
4573 struct unicode_map *encoding;
4574 char *version_str = param->string + 5;
4575
4576 if (!latest_version) {
4577 if (strncmp(param->string, "utf8-", 5))
4578 return invalfc(fc, "Only UTF-8 encodings are supported "
4579 "in the format: utf8-<version number>");
4580
4581 version = utf8_parse_version(version_str);
4582 if (version < 0)
4583 return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4584 }
4585
4586 encoding = utf8_load(version);
4587
4588 if (IS_ERR(encoding)) {
4589 return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4590 unicode_major(version), unicode_minor(version),
4591 unicode_rev(version));
4592 }
4593
4594 pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4595 unicode_major(version), unicode_minor(version), unicode_rev(version));
4596
4597 ctx->encoding = encoding;
4598
4599 return 0;
4600 }
4601 #else
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4602 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4603 bool latest_version)
4604 {
4605 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4606 }
4607 #endif
4608
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4609 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4610 {
4611 struct shmem_options *ctx = fc->fs_private;
4612 struct fs_parse_result result;
4613 unsigned long long size;
4614 char *rest;
4615 int opt;
4616 kuid_t kuid;
4617 kgid_t kgid;
4618
4619 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4620 if (opt < 0)
4621 return opt;
4622
4623 switch (opt) {
4624 case Opt_size:
4625 size = memparse(param->string, &rest);
4626 if (*rest == '%') {
4627 size <<= PAGE_SHIFT;
4628 size *= totalram_pages();
4629 do_div(size, 100);
4630 rest++;
4631 }
4632 if (*rest)
4633 goto bad_value;
4634 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4635 ctx->seen |= SHMEM_SEEN_BLOCKS;
4636 break;
4637 case Opt_nr_blocks:
4638 ctx->blocks = memparse(param->string, &rest);
4639 if (*rest || ctx->blocks > LONG_MAX)
4640 goto bad_value;
4641 ctx->seen |= SHMEM_SEEN_BLOCKS;
4642 break;
4643 case Opt_nr_inodes:
4644 ctx->inodes = memparse(param->string, &rest);
4645 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4646 goto bad_value;
4647 ctx->seen |= SHMEM_SEEN_INODES;
4648 break;
4649 case Opt_mode:
4650 ctx->mode = result.uint_32 & 07777;
4651 break;
4652 case Opt_uid:
4653 kuid = result.uid;
4654
4655 /*
4656 * The requested uid must be representable in the
4657 * filesystem's idmapping.
4658 */
4659 if (!kuid_has_mapping(fc->user_ns, kuid))
4660 goto bad_value;
4661
4662 ctx->uid = kuid;
4663 break;
4664 case Opt_gid:
4665 kgid = result.gid;
4666
4667 /*
4668 * The requested gid must be representable in the
4669 * filesystem's idmapping.
4670 */
4671 if (!kgid_has_mapping(fc->user_ns, kgid))
4672 goto bad_value;
4673
4674 ctx->gid = kgid;
4675 break;
4676 case Opt_huge:
4677 ctx->huge = result.uint_32;
4678 if (ctx->huge != SHMEM_HUGE_NEVER &&
4679 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4680 has_transparent_hugepage()))
4681 goto unsupported_parameter;
4682 ctx->seen |= SHMEM_SEEN_HUGE;
4683 break;
4684 case Opt_mpol:
4685 if (IS_ENABLED(CONFIG_NUMA)) {
4686 mpol_put(ctx->mpol);
4687 ctx->mpol = NULL;
4688 if (mpol_parse_str(param->string, &ctx->mpol))
4689 goto bad_value;
4690 break;
4691 }
4692 goto unsupported_parameter;
4693 case Opt_inode32:
4694 ctx->full_inums = false;
4695 ctx->seen |= SHMEM_SEEN_INUMS;
4696 break;
4697 case Opt_inode64:
4698 if (sizeof(ino_t) < 8) {
4699 return invalfc(fc,
4700 "Cannot use inode64 with <64bit inums in kernel\n");
4701 }
4702 ctx->full_inums = true;
4703 ctx->seen |= SHMEM_SEEN_INUMS;
4704 break;
4705 case Opt_noswap:
4706 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4707 return invalfc(fc,
4708 "Turning off swap in unprivileged tmpfs mounts unsupported");
4709 }
4710 ctx->noswap = true;
4711 break;
4712 case Opt_quota:
4713 if (fc->user_ns != &init_user_ns)
4714 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4715 ctx->seen |= SHMEM_SEEN_QUOTA;
4716 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4717 break;
4718 case Opt_usrquota:
4719 if (fc->user_ns != &init_user_ns)
4720 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4721 ctx->seen |= SHMEM_SEEN_QUOTA;
4722 ctx->quota_types |= QTYPE_MASK_USR;
4723 break;
4724 case Opt_grpquota:
4725 if (fc->user_ns != &init_user_ns)
4726 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4727 ctx->seen |= SHMEM_SEEN_QUOTA;
4728 ctx->quota_types |= QTYPE_MASK_GRP;
4729 break;
4730 case Opt_usrquota_block_hardlimit:
4731 size = memparse(param->string, &rest);
4732 if (*rest || !size)
4733 goto bad_value;
4734 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4735 return invalfc(fc,
4736 "User quota block hardlimit too large.");
4737 ctx->qlimits.usrquota_bhardlimit = size;
4738 break;
4739 case Opt_grpquota_block_hardlimit:
4740 size = memparse(param->string, &rest);
4741 if (*rest || !size)
4742 goto bad_value;
4743 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4744 return invalfc(fc,
4745 "Group quota block hardlimit too large.");
4746 ctx->qlimits.grpquota_bhardlimit = size;
4747 break;
4748 case Opt_usrquota_inode_hardlimit:
4749 size = memparse(param->string, &rest);
4750 if (*rest || !size)
4751 goto bad_value;
4752 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4753 return invalfc(fc,
4754 "User quota inode hardlimit too large.");
4755 ctx->qlimits.usrquota_ihardlimit = size;
4756 break;
4757 case Opt_grpquota_inode_hardlimit:
4758 size = memparse(param->string, &rest);
4759 if (*rest || !size)
4760 goto bad_value;
4761 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4762 return invalfc(fc,
4763 "Group quota inode hardlimit too large.");
4764 ctx->qlimits.grpquota_ihardlimit = size;
4765 break;
4766 case Opt_casefold_version:
4767 return shmem_parse_opt_casefold(fc, param, false);
4768 case Opt_casefold:
4769 return shmem_parse_opt_casefold(fc, param, true);
4770 case Opt_strict_encoding:
4771 #if IS_ENABLED(CONFIG_UNICODE)
4772 ctx->strict_encoding = true;
4773 break;
4774 #else
4775 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4776 #endif
4777 }
4778 return 0;
4779
4780 unsupported_parameter:
4781 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4782 bad_value:
4783 return invalfc(fc, "Bad value for '%s'", param->key);
4784 }
4785
shmem_next_opt(char ** s)4786 static char *shmem_next_opt(char **s)
4787 {
4788 char *sbegin = *s;
4789 char *p;
4790
4791 if (sbegin == NULL)
4792 return NULL;
4793
4794 /*
4795 * NUL-terminate this option: unfortunately,
4796 * mount options form a comma-separated list,
4797 * but mpol's nodelist may also contain commas.
4798 */
4799 for (;;) {
4800 p = strchr(*s, ',');
4801 if (p == NULL)
4802 break;
4803 *s = p + 1;
4804 if (!isdigit(*(p+1))) {
4805 *p = '\0';
4806 return sbegin;
4807 }
4808 }
4809
4810 *s = NULL;
4811 return sbegin;
4812 }
4813
shmem_parse_monolithic(struct fs_context * fc,void * data)4814 static int shmem_parse_monolithic(struct fs_context *fc, void *data)
4815 {
4816 return vfs_parse_monolithic_sep(fc, data, shmem_next_opt);
4817 }
4818
4819 /*
4820 * Reconfigure a shmem filesystem.
4821 */
shmem_reconfigure(struct fs_context * fc)4822 static int shmem_reconfigure(struct fs_context *fc)
4823 {
4824 struct shmem_options *ctx = fc->fs_private;
4825 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4826 unsigned long used_isp;
4827 struct mempolicy *mpol = NULL;
4828 const char *err;
4829
4830 raw_spin_lock(&sbinfo->stat_lock);
4831 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4832
4833 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4834 if (!sbinfo->max_blocks) {
4835 err = "Cannot retroactively limit size";
4836 goto out;
4837 }
4838 if (percpu_counter_compare(&sbinfo->used_blocks,
4839 ctx->blocks) > 0) {
4840 err = "Too small a size for current use";
4841 goto out;
4842 }
4843 }
4844 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4845 if (!sbinfo->max_inodes) {
4846 err = "Cannot retroactively limit inodes";
4847 goto out;
4848 }
4849 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4850 err = "Too few inodes for current use";
4851 goto out;
4852 }
4853 }
4854
4855 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4856 sbinfo->next_ino > UINT_MAX) {
4857 err = "Current inum too high to switch to 32-bit inums";
4858 goto out;
4859 }
4860
4861 /*
4862 * "noswap" doesn't use fsparam_flag_no, i.e. there's no "swap"
4863 * counterpart for (re-)enabling swap.
4864 */
4865 if (ctx->noswap && !sbinfo->noswap) {
4866 err = "Cannot disable swap on remount";
4867 goto out;
4868 }
4869
4870 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4871 !sb_any_quota_loaded(fc->root->d_sb)) {
4872 err = "Cannot enable quota on remount";
4873 goto out;
4874 }
4875
4876 #ifdef CONFIG_TMPFS_QUOTA
4877 #define CHANGED_LIMIT(name) \
4878 (ctx->qlimits.name## hardlimit && \
4879 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4880
4881 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4882 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4883 err = "Cannot change global quota limit on remount";
4884 goto out;
4885 }
4886 #endif /* CONFIG_TMPFS_QUOTA */
4887
4888 if (ctx->seen & SHMEM_SEEN_HUGE)
4889 sbinfo->huge = ctx->huge;
4890 if (ctx->seen & SHMEM_SEEN_INUMS)
4891 sbinfo->full_inums = ctx->full_inums;
4892 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4893 sbinfo->max_blocks = ctx->blocks;
4894 if (ctx->seen & SHMEM_SEEN_INODES) {
4895 sbinfo->max_inodes = ctx->inodes;
4896 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4897 }
4898
4899 /*
4900 * Preserve previous mempolicy unless mpol remount option was specified.
4901 */
4902 if (ctx->mpol) {
4903 mpol = sbinfo->mpol;
4904 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4905 ctx->mpol = NULL;
4906 }
4907
4908 if (ctx->noswap)
4909 sbinfo->noswap = true;
4910
4911 raw_spin_unlock(&sbinfo->stat_lock);
4912 mpol_put(mpol);
4913 return 0;
4914 out:
4915 raw_spin_unlock(&sbinfo->stat_lock);
4916 return invalfc(fc, "%s", err);
4917 }
4918
shmem_show_options(struct seq_file * seq,struct dentry * root)4919 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4920 {
4921 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4922 struct mempolicy *mpol;
4923
4924 if (sbinfo->max_blocks != shmem_default_max_blocks())
4925 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4926 if (sbinfo->max_inodes != shmem_default_max_inodes())
4927 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4928 if (sbinfo->mode != (0777 | S_ISVTX))
4929 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4930 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4931 seq_printf(seq, ",uid=%u",
4932 from_kuid_munged(&init_user_ns, sbinfo->uid));
4933 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4934 seq_printf(seq, ",gid=%u",
4935 from_kgid_munged(&init_user_ns, sbinfo->gid));
4936
4937 /*
4938 * Showing inode{64,32} might be useful even if it's the system default,
4939 * since then people don't have to resort to checking both here and
4940 * /proc/config.gz to confirm 64-bit inums were successfully applied
4941 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4942 *
4943 * We hide it when inode64 isn't the default and we are using 32-bit
4944 * inodes, since that probably just means the feature isn't even under
4945 * consideration.
4946 *
4947 * As such:
4948 *
4949 * +-----------------+-----------------+
4950 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4951 * +------------------+-----------------+-----------------+
4952 * | full_inums=true | show | show |
4953 * | full_inums=false | show | hide |
4954 * +------------------+-----------------+-----------------+
4955 *
4956 */
4957 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4958 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4959 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4960 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4961 if (sbinfo->huge)
4962 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4963 #endif
4964 mpol = shmem_get_sbmpol(sbinfo);
4965 shmem_show_mpol(seq, mpol);
4966 mpol_put(mpol);
4967 if (sbinfo->noswap)
4968 seq_printf(seq, ",noswap");
4969 #ifdef CONFIG_TMPFS_QUOTA
4970 if (sb_has_quota_active(root->d_sb, USRQUOTA))
4971 seq_printf(seq, ",usrquota");
4972 if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4973 seq_printf(seq, ",grpquota");
4974 if (sbinfo->qlimits.usrquota_bhardlimit)
4975 seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4976 sbinfo->qlimits.usrquota_bhardlimit);
4977 if (sbinfo->qlimits.grpquota_bhardlimit)
4978 seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4979 sbinfo->qlimits.grpquota_bhardlimit);
4980 if (sbinfo->qlimits.usrquota_ihardlimit)
4981 seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4982 sbinfo->qlimits.usrquota_ihardlimit);
4983 if (sbinfo->qlimits.grpquota_ihardlimit)
4984 seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4985 sbinfo->qlimits.grpquota_ihardlimit);
4986 #endif
4987 return 0;
4988 }
4989
4990 #endif /* CONFIG_TMPFS */
4991
shmem_put_super(struct super_block * sb)4992 static void shmem_put_super(struct super_block *sb)
4993 {
4994 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4995
4996 #if IS_ENABLED(CONFIG_UNICODE)
4997 if (sb->s_encoding)
4998 utf8_unload(sb->s_encoding);
4999 #endif
5000
5001 #ifdef CONFIG_TMPFS_QUOTA
5002 shmem_disable_quotas(sb);
5003 #endif
5004 free_percpu(sbinfo->ino_batch);
5005 percpu_counter_destroy(&sbinfo->used_blocks);
5006 mpol_put(sbinfo->mpol);
5007 kfree(sbinfo);
5008 sb->s_fs_info = NULL;
5009 }
5010
5011 #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
5012 static const struct dentry_operations shmem_ci_dentry_ops = {
5013 .d_hash = generic_ci_d_hash,
5014 .d_compare = generic_ci_d_compare,
5015 };
5016 #endif
5017
shmem_fill_super(struct super_block * sb,struct fs_context * fc)5018 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
5019 {
5020 struct shmem_options *ctx = fc->fs_private;
5021 struct inode *inode;
5022 struct shmem_sb_info *sbinfo;
5023 int error = -ENOMEM;
5024
5025 /* Round up to L1_CACHE_BYTES to resist false sharing */
5026 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
5027 L1_CACHE_BYTES), GFP_KERNEL);
5028 if (!sbinfo)
5029 return error;
5030
5031 sb->s_fs_info = sbinfo;
5032
5033 #ifdef CONFIG_TMPFS
5034 /*
5035 * Per default we only allow half of the physical ram per
5036 * tmpfs instance, limiting inodes to one per page of lowmem;
5037 * but the internal instance is left unlimited.
5038 */
5039 if (!(sb->s_flags & SB_KERNMOUNT)) {
5040 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
5041 ctx->blocks = shmem_default_max_blocks();
5042 if (!(ctx->seen & SHMEM_SEEN_INODES))
5043 ctx->inodes = shmem_default_max_inodes();
5044 if (!(ctx->seen & SHMEM_SEEN_INUMS))
5045 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
5046 sbinfo->noswap = ctx->noswap;
5047 } else {
5048 sb->s_flags |= SB_NOUSER;
5049 }
5050 sb->s_export_op = &shmem_export_ops;
5051 sb->s_flags |= SB_NOSEC;
5052
5053 #if IS_ENABLED(CONFIG_UNICODE)
5054 if (!ctx->encoding && ctx->strict_encoding) {
5055 pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
5056 error = -EINVAL;
5057 goto failed;
5058 }
5059
5060 if (ctx->encoding) {
5061 sb->s_encoding = ctx->encoding;
5062 set_default_d_op(sb, &shmem_ci_dentry_ops);
5063 if (ctx->strict_encoding)
5064 sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
5065 }
5066 #endif
5067
5068 #else
5069 sb->s_flags |= SB_NOUSER;
5070 #endif /* CONFIG_TMPFS */
5071 sb->s_d_flags |= DCACHE_DONTCACHE;
5072 sbinfo->max_blocks = ctx->blocks;
5073 sbinfo->max_inodes = ctx->inodes;
5074 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
5075 if (sb->s_flags & SB_KERNMOUNT) {
5076 sbinfo->ino_batch = alloc_percpu(ino_t);
5077 if (!sbinfo->ino_batch)
5078 goto failed;
5079 }
5080 sbinfo->uid = ctx->uid;
5081 sbinfo->gid = ctx->gid;
5082 sbinfo->full_inums = ctx->full_inums;
5083 sbinfo->mode = ctx->mode;
5084 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5085 if (ctx->seen & SHMEM_SEEN_HUGE)
5086 sbinfo->huge = ctx->huge;
5087 else
5088 sbinfo->huge = tmpfs_huge;
5089 #endif
5090 sbinfo->mpol = ctx->mpol;
5091 ctx->mpol = NULL;
5092
5093 raw_spin_lock_init(&sbinfo->stat_lock);
5094 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
5095 goto failed;
5096 spin_lock_init(&sbinfo->shrinklist_lock);
5097 INIT_LIST_HEAD(&sbinfo->shrinklist);
5098
5099 sb->s_maxbytes = MAX_LFS_FILESIZE;
5100 sb->s_blocksize = PAGE_SIZE;
5101 sb->s_blocksize_bits = PAGE_SHIFT;
5102 sb->s_magic = TMPFS_MAGIC;
5103 sb->s_op = &shmem_ops;
5104 sb->s_time_gran = 1;
5105 #ifdef CONFIG_TMPFS_XATTR
5106 sb->s_xattr = shmem_xattr_handlers;
5107 #endif
5108 #ifdef CONFIG_TMPFS_POSIX_ACL
5109 sb->s_flags |= SB_POSIXACL;
5110 #endif
5111 uuid_t uuid;
5112 uuid_gen(&uuid);
5113 super_set_uuid(sb, uuid.b, sizeof(uuid));
5114
5115 #ifdef CONFIG_TMPFS_QUOTA
5116 if (ctx->seen & SHMEM_SEEN_QUOTA) {
5117 sb->dq_op = &shmem_quota_operations;
5118 sb->s_qcop = &dquot_quotactl_sysfile_ops;
5119 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
5120
5121 /* Copy the default limits from ctx into sbinfo */
5122 memcpy(&sbinfo->qlimits, &ctx->qlimits,
5123 sizeof(struct shmem_quota_limits));
5124
5125 if (shmem_enable_quotas(sb, ctx->quota_types))
5126 goto failed;
5127 }
5128 #endif /* CONFIG_TMPFS_QUOTA */
5129
5130 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
5131 S_IFDIR | sbinfo->mode, 0,
5132 mk_vma_flags(VMA_NORESERVE_BIT));
5133 if (IS_ERR(inode)) {
5134 error = PTR_ERR(inode);
5135 goto failed;
5136 }
5137 inode->i_uid = sbinfo->uid;
5138 inode->i_gid = sbinfo->gid;
5139 sb->s_root = d_make_root(inode);
5140 if (!sb->s_root)
5141 goto failed;
5142 return 0;
5143
5144 failed:
5145 shmem_put_super(sb);
5146 return error;
5147 }
5148
shmem_get_tree(struct fs_context * fc)5149 static int shmem_get_tree(struct fs_context *fc)
5150 {
5151 return get_tree_nodev(fc, shmem_fill_super);
5152 }
5153
shmem_free_fc(struct fs_context * fc)5154 static void shmem_free_fc(struct fs_context *fc)
5155 {
5156 struct shmem_options *ctx = fc->fs_private;
5157
5158 if (ctx) {
5159 mpol_put(ctx->mpol);
5160 kfree(ctx);
5161 }
5162 }
5163
5164 static const struct fs_context_operations shmem_fs_context_ops = {
5165 .free = shmem_free_fc,
5166 .get_tree = shmem_get_tree,
5167 #ifdef CONFIG_TMPFS
5168 .parse_monolithic = shmem_parse_monolithic,
5169 .parse_param = shmem_parse_one,
5170 .reconfigure = shmem_reconfigure,
5171 #endif
5172 };
5173
5174 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
5175
shmem_alloc_inode(struct super_block * sb)5176 static struct inode *shmem_alloc_inode(struct super_block *sb)
5177 {
5178 struct shmem_inode_info *info;
5179 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
5180 if (!info)
5181 return NULL;
5182 return &info->vfs_inode;
5183 }
5184
shmem_free_in_core_inode(struct inode * inode)5185 static void shmem_free_in_core_inode(struct inode *inode)
5186 {
5187 if (S_ISLNK(inode->i_mode))
5188 kfree(inode->i_link);
5189 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
5190 }
5191
shmem_destroy_inode(struct inode * inode)5192 static void shmem_destroy_inode(struct inode *inode)
5193 {
5194 if (S_ISREG(inode->i_mode))
5195 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5196 if (S_ISDIR(inode->i_mode))
5197 simple_offset_destroy(shmem_get_offset_ctx(inode));
5198 }
5199
shmem_init_inode(void * foo)5200 static void shmem_init_inode(void *foo)
5201 {
5202 struct shmem_inode_info *info = foo;
5203 inode_init_once(&info->vfs_inode);
5204 }
5205
shmem_init_inodecache(void)5206 static void __init shmem_init_inodecache(void)
5207 {
5208 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5209 sizeof(struct shmem_inode_info),
5210 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5211 }
5212
shmem_destroy_inodecache(void)5213 static void __init shmem_destroy_inodecache(void)
5214 {
5215 kmem_cache_destroy(shmem_inode_cachep);
5216 }
5217
5218 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)5219 static int shmem_error_remove_folio(struct address_space *mapping,
5220 struct folio *folio)
5221 {
5222 return 0;
5223 }
5224
5225 static const struct address_space_operations shmem_aops = {
5226 .dirty_folio = noop_dirty_folio,
5227 #ifdef CONFIG_TMPFS
5228 .write_begin = shmem_write_begin,
5229 .write_end = shmem_write_end,
5230 #endif
5231 #ifdef CONFIG_MIGRATION
5232 .migrate_folio = migrate_folio,
5233 #endif
5234 .error_remove_folio = shmem_error_remove_folio,
5235 };
5236
5237 static const struct file_operations shmem_file_operations = {
5238 .mmap_prepare = shmem_mmap_prepare,
5239 .open = shmem_file_open,
5240 .get_unmapped_area = shmem_get_unmapped_area,
5241 #ifdef CONFIG_TMPFS
5242 .llseek = shmem_file_llseek,
5243 .read_iter = shmem_file_read_iter,
5244 .write_iter = shmem_file_write_iter,
5245 .fsync = noop_fsync,
5246 .splice_read = shmem_file_splice_read,
5247 .splice_write = iter_file_splice_write,
5248 .fallocate = shmem_fallocate,
5249 .setlease = generic_setlease,
5250 #endif
5251 };
5252
5253 static const struct inode_operations shmem_inode_operations = {
5254 .getattr = shmem_getattr,
5255 .setattr = shmem_setattr,
5256 #ifdef CONFIG_TMPFS_XATTR
5257 .listxattr = shmem_listxattr,
5258 .set_acl = simple_set_acl,
5259 .fileattr_get = shmem_fileattr_get,
5260 .fileattr_set = shmem_fileattr_set,
5261 #endif
5262 };
5263
5264 static const struct inode_operations shmem_dir_inode_operations = {
5265 #ifdef CONFIG_TMPFS
5266 .getattr = shmem_getattr,
5267 .create = shmem_create,
5268 .lookup = simple_lookup,
5269 .link = shmem_link,
5270 .unlink = shmem_unlink,
5271 .symlink = shmem_symlink,
5272 .mkdir = shmem_mkdir,
5273 .rmdir = shmem_rmdir,
5274 .mknod = shmem_mknod,
5275 .rename = shmem_rename2,
5276 .tmpfile = shmem_tmpfile,
5277 .get_offset_ctx = shmem_get_offset_ctx,
5278 #endif
5279 #ifdef CONFIG_TMPFS_XATTR
5280 .listxattr = shmem_listxattr,
5281 .fileattr_get = shmem_fileattr_get,
5282 .fileattr_set = shmem_fileattr_set,
5283 #endif
5284 #ifdef CONFIG_TMPFS_POSIX_ACL
5285 .setattr = shmem_setattr,
5286 .set_acl = simple_set_acl,
5287 #endif
5288 };
5289
5290 static const struct inode_operations shmem_special_inode_operations = {
5291 .getattr = shmem_getattr,
5292 #ifdef CONFIG_TMPFS_XATTR
5293 .listxattr = shmem_listxattr,
5294 #endif
5295 #ifdef CONFIG_TMPFS_POSIX_ACL
5296 .setattr = shmem_setattr,
5297 .set_acl = simple_set_acl,
5298 #endif
5299 };
5300
5301 static const struct super_operations shmem_ops = {
5302 .alloc_inode = shmem_alloc_inode,
5303 .free_inode = shmem_free_in_core_inode,
5304 .destroy_inode = shmem_destroy_inode,
5305 #ifdef CONFIG_TMPFS
5306 .statfs = shmem_statfs,
5307 .show_options = shmem_show_options,
5308 #endif
5309 #ifdef CONFIG_TMPFS_QUOTA
5310 .get_dquots = shmem_get_dquots,
5311 #endif
5312 .evict_inode = shmem_evict_inode,
5313 .drop_inode = inode_just_drop,
5314 .put_super = shmem_put_super,
5315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5316 .nr_cached_objects = shmem_unused_huge_count,
5317 .free_cached_objects = shmem_unused_huge_scan,
5318 #endif
5319 };
5320
5321 static const struct vm_operations_struct shmem_vm_ops = {
5322 .fault = shmem_fault,
5323 .map_pages = filemap_map_pages,
5324 #ifdef CONFIG_NUMA
5325 .set_policy = shmem_set_policy,
5326 .get_policy = shmem_get_policy,
5327 #endif
5328 };
5329
5330 static const struct vm_operations_struct shmem_anon_vm_ops = {
5331 .fault = shmem_fault,
5332 .map_pages = filemap_map_pages,
5333 #ifdef CONFIG_NUMA
5334 .set_policy = shmem_set_policy,
5335 .get_policy = shmem_get_policy,
5336 #endif
5337 };
5338
shmem_init_fs_context(struct fs_context * fc)5339 int shmem_init_fs_context(struct fs_context *fc)
5340 {
5341 struct shmem_options *ctx;
5342
5343 ctx = kzalloc_obj(struct shmem_options);
5344 if (!ctx)
5345 return -ENOMEM;
5346
5347 ctx->mode = 0777 | S_ISVTX;
5348 ctx->uid = current_fsuid();
5349 ctx->gid = current_fsgid();
5350
5351 #if IS_ENABLED(CONFIG_UNICODE)
5352 ctx->encoding = NULL;
5353 #endif
5354
5355 fc->fs_private = ctx;
5356 fc->ops = &shmem_fs_context_ops;
5357 #ifdef CONFIG_TMPFS
5358 fc->sb_flags |= SB_I_VERSION;
5359 #endif
5360 return 0;
5361 }
5362
5363 static struct file_system_type shmem_fs_type = {
5364 .owner = THIS_MODULE,
5365 .name = "tmpfs",
5366 .init_fs_context = shmem_init_fs_context,
5367 #ifdef CONFIG_TMPFS
5368 .parameters = shmem_fs_parameters,
5369 #endif
5370 .kill_sb = kill_anon_super,
5371 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5372 };
5373
5374 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5375
5376 #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
5377 { \
5378 .attr = { .name = __stringify(_name), .mode = _mode }, \
5379 .show = _show, \
5380 .store = _store, \
5381 }
5382
5383 #define TMPFS_ATTR_W(_name, _store) \
5384 static struct kobj_attribute tmpfs_attr_##_name = \
5385 __INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5386
5387 #define TMPFS_ATTR_RW(_name, _show, _store) \
5388 static struct kobj_attribute tmpfs_attr_##_name = \
5389 __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5390
5391 #define TMPFS_ATTR_RO(_name, _show) \
5392 static struct kobj_attribute tmpfs_attr_##_name = \
5393 __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5394
5395 #if IS_ENABLED(CONFIG_UNICODE)
casefold_show(struct kobject * kobj,struct kobj_attribute * a,char * buf)5396 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5397 char *buf)
5398 {
5399 return sysfs_emit(buf, "supported\n");
5400 }
5401 TMPFS_ATTR_RO(casefold, casefold_show);
5402 #endif
5403
5404 static struct attribute *tmpfs_attributes[] = {
5405 #if IS_ENABLED(CONFIG_UNICODE)
5406 &tmpfs_attr_casefold.attr,
5407 #endif
5408 NULL
5409 };
5410
5411 static const struct attribute_group tmpfs_attribute_group = {
5412 .attrs = tmpfs_attributes,
5413 .name = "features"
5414 };
5415
5416 static struct kobject *tmpfs_kobj;
5417
tmpfs_sysfs_init(void)5418 static int __init tmpfs_sysfs_init(void)
5419 {
5420 int ret;
5421
5422 tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5423 if (!tmpfs_kobj)
5424 return -ENOMEM;
5425
5426 ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5427 if (ret)
5428 kobject_put(tmpfs_kobj);
5429
5430 return ret;
5431 }
5432 #endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5433
shmem_init(void)5434 void __init shmem_init(void)
5435 {
5436 int error;
5437
5438 shmem_init_inodecache();
5439
5440 #ifdef CONFIG_TMPFS_QUOTA
5441 register_quota_format(&shmem_quota_format);
5442 #endif
5443
5444 error = register_filesystem(&shmem_fs_type);
5445 if (error) {
5446 pr_err("Could not register tmpfs\n");
5447 goto out2;
5448 }
5449
5450 shm_mnt = kern_mount(&shmem_fs_type);
5451 if (IS_ERR(shm_mnt)) {
5452 error = PTR_ERR(shm_mnt);
5453 pr_err("Could not kern_mount tmpfs\n");
5454 goto out1;
5455 }
5456
5457 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5458 error = tmpfs_sysfs_init();
5459 if (error) {
5460 pr_err("Could not init tmpfs sysfs\n");
5461 goto out1;
5462 }
5463 #endif
5464
5465 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5466 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5467 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5468 else
5469 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5470
5471 /*
5472 * Default to setting PMD-sized THP to inherit the global setting and
5473 * disable all other multi-size THPs.
5474 */
5475 if (!shmem_orders_configured)
5476 huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5477 #endif
5478 return;
5479
5480 out1:
5481 unregister_filesystem(&shmem_fs_type);
5482 out2:
5483 #ifdef CONFIG_TMPFS_QUOTA
5484 unregister_quota_format(&shmem_quota_format);
5485 #endif
5486 shmem_destroy_inodecache();
5487 shm_mnt = ERR_PTR(error);
5488 }
5489
5490 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5491 static ssize_t shmem_enabled_show(struct kobject *kobj,
5492 struct kobj_attribute *attr, char *buf)
5493 {
5494 static const int values[] = {
5495 SHMEM_HUGE_ALWAYS,
5496 SHMEM_HUGE_WITHIN_SIZE,
5497 SHMEM_HUGE_ADVISE,
5498 SHMEM_HUGE_NEVER,
5499 SHMEM_HUGE_DENY,
5500 SHMEM_HUGE_FORCE,
5501 };
5502 int len = 0;
5503 int i;
5504
5505 for (i = 0; i < ARRAY_SIZE(values); i++) {
5506 len += sysfs_emit_at(buf, len,
5507 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5508 i ? " " : "", shmem_format_huge(values[i]));
5509 }
5510 len += sysfs_emit_at(buf, len, "\n");
5511
5512 return len;
5513 }
5514
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5515 static ssize_t shmem_enabled_store(struct kobject *kobj,
5516 struct kobj_attribute *attr, const char *buf, size_t count)
5517 {
5518 char tmp[16];
5519 int huge, err;
5520
5521 if (count + 1 > sizeof(tmp))
5522 return -EINVAL;
5523 memcpy(tmp, buf, count);
5524 tmp[count] = '\0';
5525 if (count && tmp[count - 1] == '\n')
5526 tmp[count - 1] = '\0';
5527
5528 huge = shmem_parse_huge(tmp);
5529 if (huge == -EINVAL)
5530 return huge;
5531
5532 shmem_huge = huge;
5533 if (shmem_huge > SHMEM_HUGE_DENY)
5534 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5535
5536 err = start_stop_khugepaged();
5537 return err ? err : count;
5538 }
5539
5540 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5541 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5542
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5543 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5544 struct kobj_attribute *attr, char *buf)
5545 {
5546 int order = to_thpsize(kobj)->order;
5547 const char *output;
5548
5549 if (test_bit(order, &huge_shmem_orders_always))
5550 output = "[always] inherit within_size advise never";
5551 else if (test_bit(order, &huge_shmem_orders_inherit))
5552 output = "always [inherit] within_size advise never";
5553 else if (test_bit(order, &huge_shmem_orders_within_size))
5554 output = "always inherit [within_size] advise never";
5555 else if (test_bit(order, &huge_shmem_orders_madvise))
5556 output = "always inherit within_size [advise] never";
5557 else
5558 output = "always inherit within_size advise [never]";
5559
5560 return sysfs_emit(buf, "%s\n", output);
5561 }
5562
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5563 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5564 struct kobj_attribute *attr,
5565 const char *buf, size_t count)
5566 {
5567 int order = to_thpsize(kobj)->order;
5568 ssize_t ret = count;
5569
5570 if (sysfs_streq(buf, "always")) {
5571 spin_lock(&huge_shmem_orders_lock);
5572 clear_bit(order, &huge_shmem_orders_inherit);
5573 clear_bit(order, &huge_shmem_orders_madvise);
5574 clear_bit(order, &huge_shmem_orders_within_size);
5575 set_bit(order, &huge_shmem_orders_always);
5576 spin_unlock(&huge_shmem_orders_lock);
5577 } else if (sysfs_streq(buf, "inherit")) {
5578 /* Do not override huge allocation policy with non-PMD sized mTHP */
5579 if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
5580 return -EINVAL;
5581
5582 spin_lock(&huge_shmem_orders_lock);
5583 clear_bit(order, &huge_shmem_orders_always);
5584 clear_bit(order, &huge_shmem_orders_madvise);
5585 clear_bit(order, &huge_shmem_orders_within_size);
5586 set_bit(order, &huge_shmem_orders_inherit);
5587 spin_unlock(&huge_shmem_orders_lock);
5588 } else if (sysfs_streq(buf, "within_size")) {
5589 spin_lock(&huge_shmem_orders_lock);
5590 clear_bit(order, &huge_shmem_orders_always);
5591 clear_bit(order, &huge_shmem_orders_inherit);
5592 clear_bit(order, &huge_shmem_orders_madvise);
5593 set_bit(order, &huge_shmem_orders_within_size);
5594 spin_unlock(&huge_shmem_orders_lock);
5595 } else if (sysfs_streq(buf, "advise")) {
5596 spin_lock(&huge_shmem_orders_lock);
5597 clear_bit(order, &huge_shmem_orders_always);
5598 clear_bit(order, &huge_shmem_orders_inherit);
5599 clear_bit(order, &huge_shmem_orders_within_size);
5600 set_bit(order, &huge_shmem_orders_madvise);
5601 spin_unlock(&huge_shmem_orders_lock);
5602 } else if (sysfs_streq(buf, "never")) {
5603 spin_lock(&huge_shmem_orders_lock);
5604 clear_bit(order, &huge_shmem_orders_always);
5605 clear_bit(order, &huge_shmem_orders_inherit);
5606 clear_bit(order, &huge_shmem_orders_within_size);
5607 clear_bit(order, &huge_shmem_orders_madvise);
5608 spin_unlock(&huge_shmem_orders_lock);
5609 } else {
5610 ret = -EINVAL;
5611 }
5612
5613 if (ret > 0) {
5614 int err = start_stop_khugepaged();
5615
5616 if (err)
5617 ret = err;
5618 }
5619 return ret;
5620 }
5621
5622 struct kobj_attribute thpsize_shmem_enabled_attr =
5623 __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5624 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5625
5626 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5627
setup_transparent_hugepage_shmem(char * str)5628 static int __init setup_transparent_hugepage_shmem(char *str)
5629 {
5630 int huge;
5631
5632 huge = shmem_parse_huge(str);
5633 if (huge == -EINVAL) {
5634 pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5635 return huge;
5636 }
5637
5638 shmem_huge = huge;
5639 return 1;
5640 }
5641 __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5642
setup_transparent_hugepage_tmpfs(char * str)5643 static int __init setup_transparent_hugepage_tmpfs(char *str)
5644 {
5645 int huge;
5646
5647 huge = shmem_parse_huge(str);
5648 if (huge < 0) {
5649 pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n");
5650 return huge;
5651 }
5652
5653 tmpfs_huge = huge;
5654 return 1;
5655 }
5656 __setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs);
5657
5658 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_shmem(char * str)5659 static int __init setup_thp_shmem(char *str)
5660 {
5661 char *token, *range, *policy, *subtoken;
5662 unsigned long always, inherit, madvise, within_size;
5663 char *start_size, *end_size;
5664 int start, end, nr;
5665 char *p;
5666
5667 if (!str || strlen(str) + 1 > PAGE_SIZE)
5668 goto err;
5669 strscpy(str_dup, str);
5670
5671 always = huge_shmem_orders_always;
5672 inherit = huge_shmem_orders_inherit;
5673 madvise = huge_shmem_orders_madvise;
5674 within_size = huge_shmem_orders_within_size;
5675 p = str_dup;
5676 while ((token = strsep(&p, ";")) != NULL) {
5677 range = strsep(&token, ":");
5678 policy = token;
5679
5680 if (!policy)
5681 goto err;
5682
5683 while ((subtoken = strsep(&range, ",")) != NULL) {
5684 if (strchr(subtoken, '-')) {
5685 start_size = strsep(&subtoken, "-");
5686 end_size = subtoken;
5687
5688 start = get_order_from_str(start_size,
5689 THP_ORDERS_ALL_FILE_DEFAULT);
5690 end = get_order_from_str(end_size,
5691 THP_ORDERS_ALL_FILE_DEFAULT);
5692 } else {
5693 start_size = end_size = subtoken;
5694 start = end = get_order_from_str(subtoken,
5695 THP_ORDERS_ALL_FILE_DEFAULT);
5696 }
5697
5698 if (start < 0) {
5699 pr_err("invalid size %s in thp_shmem boot parameter\n",
5700 start_size);
5701 goto err;
5702 }
5703
5704 if (end < 0) {
5705 pr_err("invalid size %s in thp_shmem boot parameter\n",
5706 end_size);
5707 goto err;
5708 }
5709
5710 if (start > end)
5711 goto err;
5712
5713 nr = end - start + 1;
5714 if (!strcmp(policy, "always")) {
5715 bitmap_set(&always, start, nr);
5716 bitmap_clear(&inherit, start, nr);
5717 bitmap_clear(&madvise, start, nr);
5718 bitmap_clear(&within_size, start, nr);
5719 } else if (!strcmp(policy, "advise")) {
5720 bitmap_set(&madvise, start, nr);
5721 bitmap_clear(&inherit, start, nr);
5722 bitmap_clear(&always, start, nr);
5723 bitmap_clear(&within_size, start, nr);
5724 } else if (!strcmp(policy, "inherit")) {
5725 bitmap_set(&inherit, start, nr);
5726 bitmap_clear(&madvise, start, nr);
5727 bitmap_clear(&always, start, nr);
5728 bitmap_clear(&within_size, start, nr);
5729 } else if (!strcmp(policy, "within_size")) {
5730 bitmap_set(&within_size, start, nr);
5731 bitmap_clear(&inherit, start, nr);
5732 bitmap_clear(&madvise, start, nr);
5733 bitmap_clear(&always, start, nr);
5734 } else if (!strcmp(policy, "never")) {
5735 bitmap_clear(&inherit, start, nr);
5736 bitmap_clear(&madvise, start, nr);
5737 bitmap_clear(&always, start, nr);
5738 bitmap_clear(&within_size, start, nr);
5739 } else {
5740 pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5741 goto err;
5742 }
5743 }
5744 }
5745
5746 huge_shmem_orders_always = always;
5747 huge_shmem_orders_madvise = madvise;
5748 huge_shmem_orders_inherit = inherit;
5749 huge_shmem_orders_within_size = within_size;
5750 shmem_orders_configured = true;
5751 return 1;
5752
5753 err:
5754 pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5755 return 0;
5756 }
5757 __setup("thp_shmem=", setup_thp_shmem);
5758
5759 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5760
5761 #else /* !CONFIG_SHMEM */
5762
5763 /*
5764 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5765 *
5766 * This is intended for small system where the benefits of the full
5767 * shmem code (swap-backed and resource-limited) are outweighed by
5768 * their complexity. On systems without swap this code should be
5769 * effectively equivalent, but much lighter weight.
5770 */
5771
5772 static struct file_system_type shmem_fs_type = {
5773 .name = "tmpfs",
5774 .init_fs_context = ramfs_init_fs_context,
5775 .parameters = ramfs_fs_parameters,
5776 .kill_sb = ramfs_kill_sb,
5777 .fs_flags = FS_USERNS_MOUNT,
5778 };
5779
shmem_init(void)5780 void __init shmem_init(void)
5781 {
5782 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5783
5784 shm_mnt = kern_mount(&shmem_fs_type);
5785 BUG_ON(IS_ERR(shm_mnt));
5786 }
5787
shmem_unuse(unsigned int type)5788 int shmem_unuse(unsigned int type)
5789 {
5790 return 0;
5791 }
5792
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5793 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5794 {
5795 return 0;
5796 }
5797
shmem_unlock_mapping(struct address_space * mapping)5798 void shmem_unlock_mapping(struct address_space *mapping)
5799 {
5800 }
5801
5802 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5803 unsigned long shmem_get_unmapped_area(struct file *file,
5804 unsigned long addr, unsigned long len,
5805 unsigned long pgoff, unsigned long flags)
5806 {
5807 return mm_get_unmapped_area(file, addr, len, pgoff, flags);
5808 }
5809 #endif
5810
shmem_truncate_range(struct inode * inode,loff_t lstart,uoff_t lend)5811 void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
5812 {
5813 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5814 }
5815 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5816
5817 #define shmem_vm_ops generic_file_vm_ops
5818 #define shmem_anon_vm_ops generic_file_vm_ops
5819 #define shmem_file_operations ramfs_file_operations
5820
shmem_acct_size(unsigned long flags,loff_t size)5821 static inline int shmem_acct_size(unsigned long flags, loff_t size)
5822 {
5823 return 0;
5824 }
5825
shmem_unacct_size(unsigned long flags,loff_t size)5826 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
5827 {
5828 }
5829
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)5830 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5831 struct super_block *sb, struct inode *dir,
5832 umode_t mode, dev_t dev, vma_flags_t flags)
5833 {
5834 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5835 return inode ? inode : ERR_PTR(-ENOSPC);
5836 }
5837
5838 #endif /* CONFIG_SHMEM */
5839
5840 /* common code */
5841
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,vma_flags_t flags,unsigned int i_flags)5842 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5843 loff_t size, vma_flags_t flags,
5844 unsigned int i_flags)
5845 {
5846 const unsigned long shmem_flags =
5847 vma_flags_test(&flags, VMA_NORESERVE_BIT) ? SHMEM_F_NORESERVE : 0;
5848 struct inode *inode;
5849 struct file *res;
5850
5851 if (IS_ERR(mnt))
5852 return ERR_CAST(mnt);
5853
5854 if (size < 0 || size > MAX_LFS_FILESIZE)
5855 return ERR_PTR(-EINVAL);
5856
5857 if (is_idmapped_mnt(mnt))
5858 return ERR_PTR(-EINVAL);
5859
5860 if (shmem_acct_size(shmem_flags, size))
5861 return ERR_PTR(-ENOMEM);
5862
5863 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5864 S_IFREG | S_IRWXUGO, 0, flags);
5865 if (IS_ERR(inode)) {
5866 shmem_unacct_size(shmem_flags, size);
5867 return ERR_CAST(inode);
5868 }
5869 inode->i_flags |= i_flags;
5870 inode->i_size = size;
5871 clear_nlink(inode); /* It is unlinked */
5872 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5873 if (!IS_ERR(res))
5874 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5875 &shmem_file_operations);
5876 if (IS_ERR(res))
5877 iput(inode);
5878 return res;
5879 }
5880
5881 /**
5882 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5883 * kernel internal. There will be NO LSM permission checks against the
5884 * underlying inode. So users of this interface must do LSM checks at a
5885 * higher layer. The users are the big_key and shm implementations. LSM
5886 * checks are provided at the key or shm level rather than the inode.
5887 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5888 * @size: size to be set for the file
5889 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5890 */
shmem_kernel_file_setup(const char * name,loff_t size,vma_flags_t flags)5891 struct file *shmem_kernel_file_setup(const char *name, loff_t size,
5892 vma_flags_t flags)
5893 {
5894 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5895 }
5896 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5897
5898 /**
5899 * shmem_file_setup - get an unlinked file living in tmpfs
5900 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5901 * @size: size to be set for the file
5902 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5903 */
shmem_file_setup(const char * name,loff_t size,vma_flags_t flags)5904 struct file *shmem_file_setup(const char *name, loff_t size, vma_flags_t flags)
5905 {
5906 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5907 }
5908 EXPORT_SYMBOL_GPL(shmem_file_setup);
5909
5910 /**
5911 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5912 * @mnt: the tmpfs mount where the file will be created
5913 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5914 * @size: size to be set for the file
5915 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5916 */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,vma_flags_t flags)5917 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5918 loff_t size, vma_flags_t flags)
5919 {
5920 return __shmem_file_setup(mnt, name, size, flags, 0);
5921 }
5922 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5923
__shmem_zero_setup(unsigned long start,unsigned long end,vma_flags_t flags)5924 static struct file *__shmem_zero_setup(unsigned long start, unsigned long end,
5925 vma_flags_t flags)
5926 {
5927 loff_t size = end - start;
5928
5929 /*
5930 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5931 * between XFS directory reading and selinux: since this file is only
5932 * accessible to the user through its mapping, use S_PRIVATE flag to
5933 * bypass file security, in the same way as shmem_kernel_file_setup().
5934 */
5935 return shmem_kernel_file_setup("dev/zero", size, flags);
5936 }
5937
5938 /**
5939 * shmem_zero_setup - setup a shared anonymous mapping
5940 * @vma: the vma to be mmapped is prepared by do_mmap
5941 * Returns: 0 on success, or error
5942 */
shmem_zero_setup(struct vm_area_struct * vma)5943 int shmem_zero_setup(struct vm_area_struct *vma)
5944 {
5945 struct file *file = __shmem_zero_setup(vma->vm_start, vma->vm_end, vma->flags);
5946
5947 if (IS_ERR(file))
5948 return PTR_ERR(file);
5949
5950 if (vma->vm_file)
5951 fput(vma->vm_file);
5952 vma->vm_file = file;
5953 vma->vm_ops = &shmem_anon_vm_ops;
5954
5955 return 0;
5956 }
5957
5958 /**
5959 * shmem_zero_setup_desc - same as shmem_zero_setup, but determined by VMA
5960 * descriptor for convenience.
5961 * @desc: Describes VMA
5962 * Returns: 0 on success, or error
5963 */
shmem_zero_setup_desc(struct vm_area_desc * desc)5964 int shmem_zero_setup_desc(struct vm_area_desc *desc)
5965 {
5966 struct file *file = __shmem_zero_setup(desc->start, desc->end, desc->vma_flags);
5967
5968 if (IS_ERR(file))
5969 return PTR_ERR(file);
5970
5971 desc->vm_file = file;
5972 desc->vm_ops = &shmem_anon_vm_ops;
5973
5974 return 0;
5975 }
5976
5977 /**
5978 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5979 * @mapping: the folio's address_space
5980 * @index: the folio index
5981 * @gfp: the page allocator flags to use if allocating
5982 *
5983 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5984 * with any new page allocations done using the specified allocation flags.
5985 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5986 * suit tmpfs, since it may have pages in swapcache, and needs to find those
5987 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5988 *
5989 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5990 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5991 */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5992 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5993 pgoff_t index, gfp_t gfp)
5994 {
5995 #ifdef CONFIG_SHMEM
5996 struct inode *inode = mapping->host;
5997 struct folio *folio;
5998 int error;
5999
6000 error = shmem_get_folio_gfp(inode, index, i_size_read(inode),
6001 &folio, SGP_CACHE, gfp, NULL, NULL);
6002 if (error)
6003 return ERR_PTR(error);
6004
6005 folio_unlock(folio);
6006 return folio;
6007 #else
6008 /*
6009 * The tiny !SHMEM case uses ramfs without swap
6010 */
6011 return mapping_read_folio_gfp(mapping, index, gfp);
6012 #endif
6013 }
6014 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
6015
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)6016 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
6017 pgoff_t index, gfp_t gfp)
6018 {
6019 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
6020 struct page *page;
6021
6022 if (IS_ERR(folio))
6023 return &folio->page;
6024
6025 page = folio_file_page(folio, index);
6026 if (PageHWPoison(page)) {
6027 folio_put(folio);
6028 return ERR_PTR(-EIO);
6029 }
6030
6031 return page;
6032 }
6033 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
6034