1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/mincore.c
4 *
5 * Copyright (C) 1994-2006 Linus Torvalds
6 */
7
8 /*
9 * The mincore() system call.
10 */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21
22 #include <linux/uaccess.h>
23 #include "swap.h"
24 #include "internal.h"
25
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 unsigned char present;
31 unsigned char *vec = walk->private;
32 spinlock_t *ptl;
33
34 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
35 /*
36 * Hugepages under user process are always in RAM and never
37 * swapped out, but theoretically it needs to be checked.
38 */
39 present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
40 for (; addr != end; vec++, addr += PAGE_SIZE)
41 *vec = present;
42 walk->private = vec;
43 spin_unlock(ptl);
44 #else
45 BUG();
46 #endif
47 return 0;
48 }
49
50 /*
51 * Later we can get more picky about what "in core" means precisely.
52 * For now, simply check to see if the page is in the page cache,
53 * and is up to date; i.e. that no page-in operation would be required
54 * at this time if an application were to map and access this page.
55 */
mincore_page(struct address_space * mapping,pgoff_t index)56 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
57 {
58 unsigned char present = 0;
59 struct folio *folio;
60
61 /*
62 * When tmpfs swaps out a page from a file, any process mapping that
63 * file will not get a swp_entry_t in its pte, but rather it is like
64 * any other file mapping (ie. marked !present and faulted in with
65 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
66 */
67 folio = filemap_get_incore_folio(mapping, index);
68 if (!IS_ERR(folio)) {
69 present = folio_test_uptodate(folio);
70 folio_put(folio);
71 }
72
73 return present;
74 }
75
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)76 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
77 struct vm_area_struct *vma, unsigned char *vec)
78 {
79 unsigned long nr = (end - addr) >> PAGE_SHIFT;
80 int i;
81
82 if (vma->vm_file) {
83 pgoff_t pgoff;
84
85 pgoff = linear_page_index(vma, addr);
86 for (i = 0; i < nr; i++, pgoff++)
87 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
88 } else {
89 for (i = 0; i < nr; i++)
90 vec[i] = 0;
91 }
92 return nr;
93 }
94
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)95 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
96 __always_unused int depth,
97 struct mm_walk *walk)
98 {
99 walk->private += __mincore_unmapped_range(addr, end,
100 walk->vma, walk->private);
101 return 0;
102 }
103
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)104 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
105 struct mm_walk *walk)
106 {
107 spinlock_t *ptl;
108 struct vm_area_struct *vma = walk->vma;
109 pte_t *ptep;
110 unsigned char *vec = walk->private;
111 int nr = (end - addr) >> PAGE_SHIFT;
112 int step, i;
113
114 ptl = pmd_trans_huge_lock(pmd, vma);
115 if (ptl) {
116 memset(vec, 1, nr);
117 spin_unlock(ptl);
118 goto out;
119 }
120
121 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
122 if (!ptep) {
123 walk->action = ACTION_AGAIN;
124 return 0;
125 }
126 for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
127 pte_t pte = ptep_get(ptep);
128
129 step = 1;
130 /* We need to do cache lookup too for pte markers */
131 if (pte_none_mostly(pte))
132 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
133 vma, vec);
134 else if (pte_present(pte)) {
135 unsigned int batch = pte_batch_hint(ptep, pte);
136
137 if (batch > 1) {
138 unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
139
140 step = min_t(unsigned int, batch, max_nr);
141 }
142
143 for (i = 0; i < step; i++)
144 vec[i] = 1;
145 } else { /* pte is a swap entry */
146 swp_entry_t entry = pte_to_swp_entry(pte);
147
148 if (non_swap_entry(entry)) {
149 /*
150 * migration or hwpoison entries are always
151 * uptodate
152 */
153 *vec = 1;
154 } else {
155 #ifdef CONFIG_SWAP
156 *vec = mincore_page(swap_address_space(entry),
157 swap_cache_index(entry));
158 #else
159 WARN_ON(1);
160 *vec = 1;
161 #endif
162 }
163 }
164 vec += step;
165 }
166 pte_unmap_unlock(ptep - 1, ptl);
167 out:
168 walk->private += nr;
169 cond_resched();
170 return 0;
171 }
172
can_do_mincore(struct vm_area_struct * vma)173 static inline bool can_do_mincore(struct vm_area_struct *vma)
174 {
175 if (vma_is_anonymous(vma))
176 return true;
177 if (!vma->vm_file)
178 return false;
179 /*
180 * Reveal pagecache information only for non-anonymous mappings that
181 * correspond to the files the calling process could (if tried) open
182 * for writing; otherwise we'd be including shared non-exclusive
183 * mappings, which opens a side channel.
184 */
185 return inode_owner_or_capable(&nop_mnt_idmap,
186 file_inode(vma->vm_file)) ||
187 file_permission(vma->vm_file, MAY_WRITE) == 0;
188 }
189
190 static const struct mm_walk_ops mincore_walk_ops = {
191 .pmd_entry = mincore_pte_range,
192 .pte_hole = mincore_unmapped_range,
193 .hugetlb_entry = mincore_hugetlb,
194 .walk_lock = PGWALK_RDLOCK,
195 };
196
197 /*
198 * Do a chunk of "sys_mincore()". We've already checked
199 * all the arguments, we hold the mmap semaphore: we should
200 * just return the amount of info we're asked for.
201 */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)202 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
203 {
204 struct vm_area_struct *vma;
205 unsigned long end;
206 int err;
207
208 vma = vma_lookup(current->mm, addr);
209 if (!vma)
210 return -ENOMEM;
211 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
212 if (!can_do_mincore(vma)) {
213 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
214 memset(vec, 1, pages);
215 return pages;
216 }
217 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
218 if (err < 0)
219 return err;
220 return (end - addr) >> PAGE_SHIFT;
221 }
222
223 /*
224 * The mincore(2) system call.
225 *
226 * mincore() returns the memory residency status of the pages in the
227 * current process's address space specified by [addr, addr + len).
228 * The status is returned in a vector of bytes. The least significant
229 * bit of each byte is 1 if the referenced page is in memory, otherwise
230 * it is zero.
231 *
232 * Because the status of a page can change after mincore() checks it
233 * but before it returns to the application, the returned vector may
234 * contain stale information. Only locked pages are guaranteed to
235 * remain in memory.
236 *
237 * return values:
238 * zero - success
239 * -EFAULT - vec points to an illegal address
240 * -EINVAL - addr is not a multiple of PAGE_SIZE
241 * -ENOMEM - Addresses in the range [addr, addr + len] are
242 * invalid for the address space of this process, or
243 * specify one or more pages which are not currently
244 * mapped
245 * -EAGAIN - A kernel resource was temporarily unavailable.
246 */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)247 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
248 unsigned char __user *, vec)
249 {
250 long retval;
251 unsigned long pages;
252 unsigned char *tmp;
253
254 start = untagged_addr(start);
255
256 /* Check the start address: needs to be page-aligned.. */
257 if (unlikely(start & ~PAGE_MASK))
258 return -EINVAL;
259
260 /* ..and we need to be passed a valid user-space range */
261 if (!access_ok((void __user *) start, len))
262 return -ENOMEM;
263
264 /* This also avoids any overflows on PAGE_ALIGN */
265 pages = len >> PAGE_SHIFT;
266 pages += (offset_in_page(len)) != 0;
267
268 if (!access_ok(vec, pages))
269 return -EFAULT;
270
271 tmp = (void *) __get_free_page(GFP_USER);
272 if (!tmp)
273 return -EAGAIN;
274
275 retval = 0;
276 while (pages) {
277 /*
278 * Do at most PAGE_SIZE entries per iteration, due to
279 * the temporary buffer size.
280 */
281 mmap_read_lock(current->mm);
282 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
283 mmap_read_unlock(current->mm);
284
285 if (retval <= 0)
286 break;
287 if (copy_to_user(vec, tmp, retval)) {
288 retval = -EFAULT;
289 break;
290 }
291 pages -= retval;
292 vec += retval;
293 start += retval << PAGE_SHIFT;
294 retval = 0;
295 }
296 free_page((unsigned long) tmp);
297 return retval;
298 }
299