xref: /linux/include/linux/shmem_fs.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __SHMEM_FS_H
3 #define __SHMEM_FS_H
4 
5 #include <linux/file.h>
6 #include <linux/swap.h>
7 #include <linux/mempolicy.h>
8 #include <linux/pagemap.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/xattr.h>
11 #include <linux/fs_parser.h>
12 #include <linux/userfaultfd_k.h>
13 
14 struct swap_iocb;
15 
16 /* inode in-kernel data */
17 
18 #ifdef CONFIG_TMPFS_QUOTA
19 #define SHMEM_MAXQUOTAS 2
20 #endif
21 
22 struct shmem_inode_info {
23 	spinlock_t		lock;
24 	unsigned int		seals;		/* shmem seals */
25 	unsigned long		flags;
26 	unsigned long		alloced;	/* data pages alloced to file */
27 	unsigned long		swapped;	/* subtotal assigned to swap */
28 	union {
29 	    struct offset_ctx	dir_offsets;	/* stable directory offsets */
30 	    struct {
31 		struct list_head shrinklist;	/* shrinkable hpage inodes */
32 		struct list_head swaplist;	/* chain of maybes on swap */
33 	    };
34 	};
35 	struct timespec64	i_crtime;	/* file creation time */
36 	struct shared_policy	policy;		/* NUMA memory alloc policy */
37 	struct simple_xattrs	xattrs;		/* list of xattrs */
38 	pgoff_t			fallocend;	/* highest fallocate endindex */
39 	unsigned int		fsflags;	/* for FS_IOC_[SG]ETFLAGS */
40 	atomic_t		stop_eviction;	/* hold when working on inode */
41 #ifdef CONFIG_TMPFS_QUOTA
42 	struct dquot __rcu	*i_dquot[MAXQUOTAS];
43 #endif
44 	struct inode		vfs_inode;
45 };
46 
47 #define SHMEM_FL_USER_VISIBLE		(FS_FL_USER_VISIBLE | FS_CASEFOLD_FL)
48 #define SHMEM_FL_USER_MODIFIABLE \
49 	(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
50 #define SHMEM_FL_INHERITED		(FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
51 
52 struct shmem_quota_limits {
53 	qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
54 	qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
55 	qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
56 	qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
57 };
58 
59 struct shmem_sb_info {
60 	unsigned long max_blocks;   /* How many blocks are allowed */
61 	struct percpu_counter used_blocks;  /* How many are allocated */
62 	unsigned long max_inodes;   /* How many inodes are allowed */
63 	unsigned long free_ispace;  /* How much ispace left for allocation */
64 	raw_spinlock_t stat_lock;   /* Serialize shmem_sb_info changes */
65 	umode_t mode;		    /* Mount mode for root directory */
66 	unsigned char huge;	    /* Whether to try for hugepages */
67 	kuid_t uid;		    /* Mount uid for root directory */
68 	kgid_t gid;		    /* Mount gid for root directory */
69 	bool full_inums;	    /* If i_ino should be uint or ino_t */
70 	bool noswap;		    /* ignores VM reclaim / swap requests */
71 	ino_t next_ino;		    /* The next per-sb inode number to use */
72 	ino_t __percpu *ino_batch;  /* The next per-cpu inode number to use */
73 	struct mempolicy *mpol;     /* default memory policy for mappings */
74 	spinlock_t shrinklist_lock;   /* Protects shrinklist */
75 	struct list_head shrinklist;  /* List of shinkable inodes */
76 	unsigned long shrinklist_len; /* Length of shrinklist */
77 	struct shmem_quota_limits qlimits; /* Default quota limits */
78 };
79 
SHMEM_I(struct inode * inode)80 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
81 {
82 	return container_of(inode, struct shmem_inode_info, vfs_inode);
83 }
84 
85 /*
86  * Functions in mm/shmem.c called directly from elsewhere:
87  */
88 extern const struct fs_parameter_spec shmem_fs_parameters[];
89 extern void shmem_init(void);
90 extern int shmem_init_fs_context(struct fs_context *fc);
91 extern struct file *shmem_file_setup(const char *name,
92 					loff_t size, unsigned long flags);
93 extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
94 					    unsigned long flags);
95 extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
96 		const char *name, loff_t size, unsigned long flags);
97 extern int shmem_zero_setup(struct vm_area_struct *);
98 extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
99 		unsigned long len, unsigned long pgoff, unsigned long flags);
100 extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
101 #ifdef CONFIG_SHMEM
102 bool shmem_mapping(struct address_space *mapping);
103 #else
shmem_mapping(struct address_space * mapping)104 static inline bool shmem_mapping(struct address_space *mapping)
105 {
106 	return false;
107 }
108 #endif /* CONFIG_SHMEM */
109 void shmem_unlock_mapping(struct address_space *mapping);
110 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
111 					pgoff_t index, gfp_t gfp_mask);
112 int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
113 		struct list_head *folio_list);
114 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
115 int shmem_unuse(unsigned int type);
116 
117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
118 unsigned long shmem_allowable_huge_orders(struct inode *inode,
119 				struct vm_area_struct *vma, pgoff_t index,
120 				loff_t write_end, bool shmem_huge_force);
121 bool shmem_hpage_pmd_enabled(void);
122 #else
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)123 static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
124 				struct vm_area_struct *vma, pgoff_t index,
125 				loff_t write_end, bool shmem_huge_force)
126 {
127 	return 0;
128 }
129 
shmem_hpage_pmd_enabled(void)130 static inline bool shmem_hpage_pmd_enabled(void)
131 {
132 	return false;
133 }
134 #endif
135 
136 #ifdef CONFIG_SHMEM
137 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
138 #else
shmem_swap_usage(struct vm_area_struct * vma)139 static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
140 {
141 	return 0;
142 }
143 #endif
144 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
145 						pgoff_t start, pgoff_t end);
146 
147 /* Flag allocation requirements to shmem_get_folio */
148 enum sgp_type {
149 	SGP_READ,	/* don't exceed i_size, don't allocate page */
150 	SGP_NOALLOC,	/* similar, but fail on hole or use fallocated page */
151 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
152 	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
153 	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
154 };
155 
156 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
157 		struct folio **foliop, enum sgp_type sgp);
158 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
159 		pgoff_t index, gfp_t gfp);
160 
shmem_read_folio(struct address_space * mapping,pgoff_t index)161 static inline struct folio *shmem_read_folio(struct address_space *mapping,
162 		pgoff_t index)
163 {
164 	return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
165 }
166 
shmem_read_mapping_page(struct address_space * mapping,pgoff_t index)167 static inline struct page *shmem_read_mapping_page(
168 				struct address_space *mapping, pgoff_t index)
169 {
170 	return shmem_read_mapping_page_gfp(mapping, index,
171 					mapping_gfp_mask(mapping));
172 }
173 
shmem_file(struct file * file)174 static inline bool shmem_file(struct file *file)
175 {
176 	if (!IS_ENABLED(CONFIG_SHMEM))
177 		return false;
178 	if (!file || !file->f_mapping)
179 		return false;
180 	return shmem_mapping(file->f_mapping);
181 }
182 
183 /*
184  * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
185  * beyond i_size's notion of EOF, which fallocate has committed to reserving:
186  * which split_huge_page() must therefore not delete.  This use of a single
187  * "fallocend" per inode errs on the side of not deleting a reservation when
188  * in doubt: there are plenty of cases when it preserves unreserved pages.
189  */
shmem_fallocend(struct inode * inode,pgoff_t eof)190 static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
191 {
192 	return max(eof, SHMEM_I(inode)->fallocend);
193 }
194 
195 extern bool shmem_charge(struct inode *inode, long pages);
196 extern void shmem_uncharge(struct inode *inode, long pages);
197 
198 #ifdef CONFIG_USERFAULTFD
199 #ifdef CONFIG_SHMEM
200 extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
201 				  struct vm_area_struct *dst_vma,
202 				  unsigned long dst_addr,
203 				  unsigned long src_addr,
204 				  uffd_flags_t flags,
205 				  struct folio **foliop);
206 #else /* !CONFIG_SHMEM */
207 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
208 			       src_addr, flags, foliop) ({ BUG(); 0; })
209 #endif /* CONFIG_SHMEM */
210 #endif /* CONFIG_USERFAULTFD */
211 
212 /*
213  * Used space is stored as unsigned 64-bit value in bytes but
214  * quota core supports only signed 64-bit values so use that
215  * as a limit
216  */
217 #define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
218 #define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
219 
220 #ifdef CONFIG_TMPFS_QUOTA
221 extern const struct dquot_operations shmem_quota_operations;
222 extern struct quota_format_type shmem_quota_format;
223 #endif /* CONFIG_TMPFS_QUOTA */
224 
225 #endif
226