1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/ksm.h>
8 #include <linux/seq_file.h>
9 #include <linux/highmem.h>
10 #include <linux/ptrace.h>
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/mempolicy.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/sched/mm.h>
17 #include <linux/swapops.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/page_idle.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/pkeys.h>
23 #include <linux/minmax.h>
24 #include <linux/overflow.h>
25 #include <linux/buildid.h>
26
27 #include <asm/elf.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include "internal.h"
31
32 #define SEQ_PUT_DEC(str, val) \
33 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
task_mem(struct seq_file * m,struct mm_struct * mm)34 void task_mem(struct seq_file *m, struct mm_struct *mm)
35 {
36 unsigned long text, lib, swap, anon, file, shmem;
37 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
38
39 anon = get_mm_counter(mm, MM_ANONPAGES);
40 file = get_mm_counter(mm, MM_FILEPAGES);
41 shmem = get_mm_counter(mm, MM_SHMEMPAGES);
42
43 /*
44 * Note: to minimize their overhead, mm maintains hiwater_vm and
45 * hiwater_rss only when about to *lower* total_vm or rss. Any
46 * collector of these hiwater stats must therefore get total_vm
47 * and rss too, which will usually be the higher. Barriers? not
48 * worth the effort, such snapshots can always be inconsistent.
49 */
50 hiwater_vm = total_vm = mm->total_vm;
51 if (hiwater_vm < mm->hiwater_vm)
52 hiwater_vm = mm->hiwater_vm;
53 hiwater_rss = total_rss = anon + file + shmem;
54 if (hiwater_rss < mm->hiwater_rss)
55 hiwater_rss = mm->hiwater_rss;
56
57 /* split executable areas between text and lib */
58 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
59 text = min(text, mm->exec_vm << PAGE_SHIFT);
60 lib = (mm->exec_vm << PAGE_SHIFT) - text;
61
62 swap = get_mm_counter(mm, MM_SWAPENTS);
63 SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
64 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
65 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
66 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
67 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
68 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
69 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
70 SEQ_PUT_DEC(" kB\nRssFile:\t", file);
71 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
72 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
73 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
74 seq_put_decimal_ull_width(m,
75 " kB\nVmExe:\t", text >> 10, 8);
76 seq_put_decimal_ull_width(m,
77 " kB\nVmLib:\t", lib >> 10, 8);
78 seq_put_decimal_ull_width(m,
79 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
80 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
81 seq_puts(m, " kB\n");
82 hugetlb_report_usage(m, mm);
83 }
84 #undef SEQ_PUT_DEC
85
task_vsize(struct mm_struct * mm)86 unsigned long task_vsize(struct mm_struct *mm)
87 {
88 return PAGE_SIZE * mm->total_vm;
89 }
90
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)91 unsigned long task_statm(struct mm_struct *mm,
92 unsigned long *shared, unsigned long *text,
93 unsigned long *data, unsigned long *resident)
94 {
95 *shared = get_mm_counter(mm, MM_FILEPAGES) +
96 get_mm_counter(mm, MM_SHMEMPAGES);
97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
98 >> PAGE_SHIFT;
99 *data = mm->data_vm + mm->stack_vm;
100 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
101 return mm->total_vm;
102 }
103
104 #ifdef CONFIG_NUMA
105 /*
106 * Save get_task_policy() for show_numa_map().
107 */
hold_task_mempolicy(struct proc_maps_private * priv)108 static void hold_task_mempolicy(struct proc_maps_private *priv)
109 {
110 struct task_struct *task = priv->task;
111
112 task_lock(task);
113 priv->task_mempolicy = get_task_policy(task);
114 mpol_get(priv->task_mempolicy);
115 task_unlock(task);
116 }
release_task_mempolicy(struct proc_maps_private * priv)117 static void release_task_mempolicy(struct proc_maps_private *priv)
118 {
119 mpol_put(priv->task_mempolicy);
120 }
121 #else
hold_task_mempolicy(struct proc_maps_private * priv)122 static void hold_task_mempolicy(struct proc_maps_private *priv)
123 {
124 }
release_task_mempolicy(struct proc_maps_private * priv)125 static void release_task_mempolicy(struct proc_maps_private *priv)
126 {
127 }
128 #endif
129
proc_get_vma(struct proc_maps_private * priv,loff_t * ppos)130 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
131 loff_t *ppos)
132 {
133 struct vm_area_struct *vma = vma_next(&priv->iter);
134
135 if (vma) {
136 *ppos = vma->vm_start;
137 } else {
138 *ppos = -2UL;
139 vma = get_gate_vma(priv->mm);
140 }
141
142 return vma;
143 }
144
m_start(struct seq_file * m,loff_t * ppos)145 static void *m_start(struct seq_file *m, loff_t *ppos)
146 {
147 struct proc_maps_private *priv = m->private;
148 unsigned long last_addr = *ppos;
149 struct mm_struct *mm;
150
151 /* See m_next(). Zero at the start or after lseek. */
152 if (last_addr == -1UL)
153 return NULL;
154
155 priv->task = get_proc_task(priv->inode);
156 if (!priv->task)
157 return ERR_PTR(-ESRCH);
158
159 mm = priv->mm;
160 if (!mm || !mmget_not_zero(mm)) {
161 put_task_struct(priv->task);
162 priv->task = NULL;
163 return NULL;
164 }
165
166 if (mmap_read_lock_killable(mm)) {
167 mmput(mm);
168 put_task_struct(priv->task);
169 priv->task = NULL;
170 return ERR_PTR(-EINTR);
171 }
172
173 vma_iter_init(&priv->iter, mm, last_addr);
174 hold_task_mempolicy(priv);
175 if (last_addr == -2UL)
176 return get_gate_vma(mm);
177
178 return proc_get_vma(priv, ppos);
179 }
180
m_next(struct seq_file * m,void * v,loff_t * ppos)181 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
182 {
183 if (*ppos == -2UL) {
184 *ppos = -1UL;
185 return NULL;
186 }
187 return proc_get_vma(m->private, ppos);
188 }
189
m_stop(struct seq_file * m,void * v)190 static void m_stop(struct seq_file *m, void *v)
191 {
192 struct proc_maps_private *priv = m->private;
193 struct mm_struct *mm = priv->mm;
194
195 if (!priv->task)
196 return;
197
198 release_task_mempolicy(priv);
199 mmap_read_unlock(mm);
200 mmput(mm);
201 put_task_struct(priv->task);
202 priv->task = NULL;
203 }
204
proc_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops,int psize)205 static int proc_maps_open(struct inode *inode, struct file *file,
206 const struct seq_operations *ops, int psize)
207 {
208 struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
209
210 if (!priv)
211 return -ENOMEM;
212
213 priv->inode = inode;
214 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
215 if (IS_ERR(priv->mm)) {
216 int err = PTR_ERR(priv->mm);
217
218 seq_release_private(inode, file);
219 return err;
220 }
221
222 return 0;
223 }
224
proc_map_release(struct inode * inode,struct file * file)225 static int proc_map_release(struct inode *inode, struct file *file)
226 {
227 struct seq_file *seq = file->private_data;
228 struct proc_maps_private *priv = seq->private;
229
230 if (priv->mm)
231 mmdrop(priv->mm);
232
233 return seq_release_private(inode, file);
234 }
235
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)236 static int do_maps_open(struct inode *inode, struct file *file,
237 const struct seq_operations *ops)
238 {
239 return proc_maps_open(inode, file, ops,
240 sizeof(struct proc_maps_private));
241 }
242
get_vma_name(struct vm_area_struct * vma,const struct path ** path,const char ** name,const char ** name_fmt)243 static void get_vma_name(struct vm_area_struct *vma,
244 const struct path **path,
245 const char **name,
246 const char **name_fmt)
247 {
248 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
249
250 *name = NULL;
251 *path = NULL;
252 *name_fmt = NULL;
253
254 /*
255 * Print the dentry name for named mappings, and a
256 * special [heap] marker for the heap:
257 */
258 if (vma->vm_file) {
259 /*
260 * If user named this anon shared memory via
261 * prctl(PR_SET_VMA ..., use the provided name.
262 */
263 if (anon_name) {
264 *name_fmt = "[anon_shmem:%s]";
265 *name = anon_name->name;
266 } else {
267 *path = file_user_path(vma->vm_file);
268 }
269 return;
270 }
271
272 if (vma->vm_ops && vma->vm_ops->name) {
273 *name = vma->vm_ops->name(vma);
274 if (*name)
275 return;
276 }
277
278 *name = arch_vma_name(vma);
279 if (*name)
280 return;
281
282 if (!vma->vm_mm) {
283 *name = "[vdso]";
284 return;
285 }
286
287 if (vma_is_initial_heap(vma)) {
288 *name = "[heap]";
289 return;
290 }
291
292 if (vma_is_initial_stack(vma)) {
293 *name = "[stack]";
294 return;
295 }
296
297 if (anon_name) {
298 *name_fmt = "[anon:%s]";
299 *name = anon_name->name;
300 return;
301 }
302 }
303
show_vma_header_prefix(struct seq_file * m,unsigned long start,unsigned long end,vm_flags_t flags,unsigned long long pgoff,dev_t dev,unsigned long ino)304 static void show_vma_header_prefix(struct seq_file *m,
305 unsigned long start, unsigned long end,
306 vm_flags_t flags, unsigned long long pgoff,
307 dev_t dev, unsigned long ino)
308 {
309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310 seq_put_hex_ll(m, NULL, start, 8);
311 seq_put_hex_ll(m, "-", end, 8);
312 seq_putc(m, ' ');
313 seq_putc(m, flags & VM_READ ? 'r' : '-');
314 seq_putc(m, flags & VM_WRITE ? 'w' : '-');
315 seq_putc(m, flags & VM_EXEC ? 'x' : '-');
316 seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
317 seq_put_hex_ll(m, " ", pgoff, 8);
318 seq_put_hex_ll(m, " ", MAJOR(dev), 2);
319 seq_put_hex_ll(m, ":", MINOR(dev), 2);
320 seq_put_decimal_ull(m, " ", ino);
321 seq_putc(m, ' ');
322 }
323
324 static void
show_map_vma(struct seq_file * m,struct vm_area_struct * vma)325 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
326 {
327 const struct path *path;
328 const char *name_fmt, *name;
329 vm_flags_t flags = vma->vm_flags;
330 unsigned long ino = 0;
331 unsigned long long pgoff = 0;
332 unsigned long start, end;
333 dev_t dev = 0;
334
335 if (vma->vm_file) {
336 const struct inode *inode = file_user_inode(vma->vm_file);
337
338 dev = inode->i_sb->s_dev;
339 ino = inode->i_ino;
340 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
341 }
342
343 start = vma->vm_start;
344 end = vma->vm_end;
345 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
346
347 get_vma_name(vma, &path, &name, &name_fmt);
348 if (path) {
349 seq_pad(m, ' ');
350 seq_path(m, path, "\n");
351 } else if (name_fmt) {
352 seq_pad(m, ' ');
353 seq_printf(m, name_fmt, name);
354 } else if (name) {
355 seq_pad(m, ' ');
356 seq_puts(m, name);
357 }
358 seq_putc(m, '\n');
359 }
360
show_map(struct seq_file * m,void * v)361 static int show_map(struct seq_file *m, void *v)
362 {
363 show_map_vma(m, v);
364 return 0;
365 }
366
367 static const struct seq_operations proc_pid_maps_op = {
368 .start = m_start,
369 .next = m_next,
370 .stop = m_stop,
371 .show = show_map
372 };
373
pid_maps_open(struct inode * inode,struct file * file)374 static int pid_maps_open(struct inode *inode, struct file *file)
375 {
376 return do_maps_open(inode, file, &proc_pid_maps_op);
377 }
378
379 #define PROCMAP_QUERY_VMA_FLAGS ( \
380 PROCMAP_QUERY_VMA_READABLE | \
381 PROCMAP_QUERY_VMA_WRITABLE | \
382 PROCMAP_QUERY_VMA_EXECUTABLE | \
383 PROCMAP_QUERY_VMA_SHARED \
384 )
385
386 #define PROCMAP_QUERY_VALID_FLAGS_MASK ( \
387 PROCMAP_QUERY_COVERING_OR_NEXT_VMA | \
388 PROCMAP_QUERY_FILE_BACKED_VMA | \
389 PROCMAP_QUERY_VMA_FLAGS \
390 )
391
query_vma_setup(struct mm_struct * mm)392 static int query_vma_setup(struct mm_struct *mm)
393 {
394 return mmap_read_lock_killable(mm);
395 }
396
query_vma_teardown(struct mm_struct * mm,struct vm_area_struct * vma)397 static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
398 {
399 mmap_read_unlock(mm);
400 }
401
query_vma_find_by_addr(struct mm_struct * mm,unsigned long addr)402 static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
403 {
404 return find_vma(mm, addr);
405 }
406
query_matching_vma(struct mm_struct * mm,unsigned long addr,u32 flags)407 static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
408 unsigned long addr, u32 flags)
409 {
410 struct vm_area_struct *vma;
411
412 next_vma:
413 vma = query_vma_find_by_addr(mm, addr);
414 if (!vma)
415 goto no_vma;
416
417 /* user requested only file-backed VMA, keep iterating */
418 if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
419 goto skip_vma;
420
421 /* VMA permissions should satisfy query flags */
422 if (flags & PROCMAP_QUERY_VMA_FLAGS) {
423 u32 perm = 0;
424
425 if (flags & PROCMAP_QUERY_VMA_READABLE)
426 perm |= VM_READ;
427 if (flags & PROCMAP_QUERY_VMA_WRITABLE)
428 perm |= VM_WRITE;
429 if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
430 perm |= VM_EXEC;
431 if (flags & PROCMAP_QUERY_VMA_SHARED)
432 perm |= VM_MAYSHARE;
433
434 if ((vma->vm_flags & perm) != perm)
435 goto skip_vma;
436 }
437
438 /* found covering VMA or user is OK with the matching next VMA */
439 if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
440 return vma;
441
442 skip_vma:
443 /*
444 * If the user needs closest matching VMA, keep iterating.
445 */
446 addr = vma->vm_end;
447 if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
448 goto next_vma;
449
450 no_vma:
451 return ERR_PTR(-ENOENT);
452 }
453
do_procmap_query(struct proc_maps_private * priv,void __user * uarg)454 static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
455 {
456 struct procmap_query karg;
457 struct vm_area_struct *vma;
458 struct mm_struct *mm;
459 const char *name = NULL;
460 char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
461 __u64 usize;
462 int err;
463
464 if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
465 return -EFAULT;
466 /* argument struct can never be that large, reject abuse */
467 if (usize > PAGE_SIZE)
468 return -E2BIG;
469 /* argument struct should have at least query_flags and query_addr fields */
470 if (usize < offsetofend(struct procmap_query, query_addr))
471 return -EINVAL;
472 err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
473 if (err)
474 return err;
475
476 /* reject unknown flags */
477 if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
478 return -EINVAL;
479 /* either both buffer address and size are set, or both should be zero */
480 if (!!karg.vma_name_size != !!karg.vma_name_addr)
481 return -EINVAL;
482 if (!!karg.build_id_size != !!karg.build_id_addr)
483 return -EINVAL;
484
485 mm = priv->mm;
486 if (!mm || !mmget_not_zero(mm))
487 return -ESRCH;
488
489 err = query_vma_setup(mm);
490 if (err) {
491 mmput(mm);
492 return err;
493 }
494
495 vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
496 if (IS_ERR(vma)) {
497 err = PTR_ERR(vma);
498 vma = NULL;
499 goto out;
500 }
501
502 karg.vma_start = vma->vm_start;
503 karg.vma_end = vma->vm_end;
504
505 karg.vma_flags = 0;
506 if (vma->vm_flags & VM_READ)
507 karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
508 if (vma->vm_flags & VM_WRITE)
509 karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
510 if (vma->vm_flags & VM_EXEC)
511 karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
512 if (vma->vm_flags & VM_MAYSHARE)
513 karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
514
515 karg.vma_page_size = vma_kernel_pagesize(vma);
516
517 if (vma->vm_file) {
518 const struct inode *inode = file_user_inode(vma->vm_file);
519
520 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
521 karg.dev_major = MAJOR(inode->i_sb->s_dev);
522 karg.dev_minor = MINOR(inode->i_sb->s_dev);
523 karg.inode = inode->i_ino;
524 } else {
525 karg.vma_offset = 0;
526 karg.dev_major = 0;
527 karg.dev_minor = 0;
528 karg.inode = 0;
529 }
530
531 if (karg.build_id_size) {
532 __u32 build_id_sz;
533
534 err = build_id_parse(vma, build_id_buf, &build_id_sz);
535 if (err) {
536 karg.build_id_size = 0;
537 } else {
538 if (karg.build_id_size < build_id_sz) {
539 err = -ENAMETOOLONG;
540 goto out;
541 }
542 karg.build_id_size = build_id_sz;
543 }
544 }
545
546 if (karg.vma_name_size) {
547 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
548 const struct path *path;
549 const char *name_fmt;
550 size_t name_sz = 0;
551
552 get_vma_name(vma, &path, &name, &name_fmt);
553
554 if (path || name_fmt || name) {
555 name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
556 if (!name_buf) {
557 err = -ENOMEM;
558 goto out;
559 }
560 }
561 if (path) {
562 name = d_path(path, name_buf, name_buf_sz);
563 if (IS_ERR(name)) {
564 err = PTR_ERR(name);
565 goto out;
566 }
567 name_sz = name_buf + name_buf_sz - name;
568 } else if (name || name_fmt) {
569 name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
570 name = name_buf;
571 }
572 if (name_sz > name_buf_sz) {
573 err = -ENAMETOOLONG;
574 goto out;
575 }
576 karg.vma_name_size = name_sz;
577 }
578
579 /* unlock vma or mmap_lock, and put mm_struct before copying data to user */
580 query_vma_teardown(mm, vma);
581 mmput(mm);
582
583 if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
584 name, karg.vma_name_size)) {
585 kfree(name_buf);
586 return -EFAULT;
587 }
588 kfree(name_buf);
589
590 if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
591 build_id_buf, karg.build_id_size))
592 return -EFAULT;
593
594 if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
595 return -EFAULT;
596
597 return 0;
598
599 out:
600 query_vma_teardown(mm, vma);
601 mmput(mm);
602 kfree(name_buf);
603 return err;
604 }
605
procfs_procmap_ioctl(struct file * file,unsigned int cmd,unsigned long arg)606 static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
607 {
608 struct seq_file *seq = file->private_data;
609 struct proc_maps_private *priv = seq->private;
610
611 switch (cmd) {
612 case PROCMAP_QUERY:
613 return do_procmap_query(priv, (void __user *)arg);
614 default:
615 return -ENOIOCTLCMD;
616 }
617 }
618
619 const struct file_operations proc_pid_maps_operations = {
620 .open = pid_maps_open,
621 .read = seq_read,
622 .llseek = seq_lseek,
623 .release = proc_map_release,
624 .unlocked_ioctl = procfs_procmap_ioctl,
625 .compat_ioctl = compat_ptr_ioctl,
626 };
627
628 /*
629 * Proportional Set Size(PSS): my share of RSS.
630 *
631 * PSS of a process is the count of pages it has in memory, where each
632 * page is divided by the number of processes sharing it. So if a
633 * process has 1000 pages all to itself, and 1000 shared with one other
634 * process, its PSS will be 1500.
635 *
636 * To keep (accumulated) division errors low, we adopt a 64bit
637 * fixed-point pss counter to minimize division errors. So (pss >>
638 * PSS_SHIFT) would be the real byte count.
639 *
640 * A shift of 12 before division means (assuming 4K page size):
641 * - 1M 3-user-pages add up to 8KB errors;
642 * - supports mapcount up to 2^24, or 16M;
643 * - supports PSS up to 2^52 bytes, or 4PB.
644 */
645 #define PSS_SHIFT 12
646
647 #ifdef CONFIG_PROC_PAGE_MONITOR
648 struct mem_size_stats {
649 unsigned long resident;
650 unsigned long shared_clean;
651 unsigned long shared_dirty;
652 unsigned long private_clean;
653 unsigned long private_dirty;
654 unsigned long referenced;
655 unsigned long anonymous;
656 unsigned long lazyfree;
657 unsigned long anonymous_thp;
658 unsigned long shmem_thp;
659 unsigned long file_thp;
660 unsigned long swap;
661 unsigned long shared_hugetlb;
662 unsigned long private_hugetlb;
663 unsigned long ksm;
664 u64 pss;
665 u64 pss_anon;
666 u64 pss_file;
667 u64 pss_shmem;
668 u64 pss_dirty;
669 u64 pss_locked;
670 u64 swap_pss;
671 };
672
smaps_page_accumulate(struct mem_size_stats * mss,struct folio * folio,unsigned long size,unsigned long pss,bool dirty,bool locked,bool private)673 static void smaps_page_accumulate(struct mem_size_stats *mss,
674 struct folio *folio, unsigned long size, unsigned long pss,
675 bool dirty, bool locked, bool private)
676 {
677 mss->pss += pss;
678
679 if (folio_test_anon(folio))
680 mss->pss_anon += pss;
681 else if (folio_test_swapbacked(folio))
682 mss->pss_shmem += pss;
683 else
684 mss->pss_file += pss;
685
686 if (locked)
687 mss->pss_locked += pss;
688
689 if (dirty || folio_test_dirty(folio)) {
690 mss->pss_dirty += pss;
691 if (private)
692 mss->private_dirty += size;
693 else
694 mss->shared_dirty += size;
695 } else {
696 if (private)
697 mss->private_clean += size;
698 else
699 mss->shared_clean += size;
700 }
701 }
702
smaps_account(struct mem_size_stats * mss,struct page * page,bool compound,bool young,bool dirty,bool locked,bool present)703 static void smaps_account(struct mem_size_stats *mss, struct page *page,
704 bool compound, bool young, bool dirty, bool locked,
705 bool present)
706 {
707 struct folio *folio = page_folio(page);
708 int i, nr = compound ? compound_nr(page) : 1;
709 unsigned long size = nr * PAGE_SIZE;
710 bool exclusive;
711 int mapcount;
712
713 /*
714 * First accumulate quantities that depend only on |size| and the type
715 * of the compound page.
716 */
717 if (folio_test_anon(folio)) {
718 mss->anonymous += size;
719 if (!folio_test_swapbacked(folio) && !dirty &&
720 !folio_test_dirty(folio))
721 mss->lazyfree += size;
722 }
723
724 if (folio_test_ksm(folio))
725 mss->ksm += size;
726
727 mss->resident += size;
728 /* Accumulate the size in pages that have been accessed. */
729 if (young || folio_test_young(folio) || folio_test_referenced(folio))
730 mss->referenced += size;
731
732 /*
733 * Then accumulate quantities that may depend on sharing, or that may
734 * differ page-by-page.
735 *
736 * refcount == 1 for present entries guarantees that the folio is mapped
737 * exactly once. For large folios this implies that exactly one
738 * PTE/PMD/... maps (a part of) this folio.
739 *
740 * Treat all non-present entries (where relying on the mapcount and
741 * refcount doesn't make sense) as "maybe shared, but not sure how
742 * often". We treat device private entries as being fake-present.
743 *
744 * Note that it would not be safe to read the mapcount especially for
745 * pages referenced by migration entries, even with the PTL held.
746 */
747 if (folio_ref_count(folio) == 1 || !present) {
748 smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
749 dirty, locked, present);
750 return;
751 }
752
753 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
754 mapcount = folio_average_page_mapcount(folio);
755 exclusive = !folio_maybe_mapped_shared(folio);
756 }
757
758 /*
759 * We obtain a snapshot of the mapcount. Without holding the folio lock
760 * this snapshot can be slightly wrong as we cannot always read the
761 * mapcount atomically.
762 */
763 for (i = 0; i < nr; i++, page++) {
764 unsigned long pss = PAGE_SIZE << PSS_SHIFT;
765
766 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
767 mapcount = folio_precise_page_mapcount(folio, page);
768 exclusive = mapcount < 2;
769 }
770
771 if (mapcount >= 2)
772 pss /= mapcount;
773 smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
774 dirty, locked, exclusive);
775 }
776 }
777
778 #ifdef CONFIG_SHMEM
smaps_pte_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)779 static int smaps_pte_hole(unsigned long addr, unsigned long end,
780 __always_unused int depth, struct mm_walk *walk)
781 {
782 struct mem_size_stats *mss = walk->private;
783 struct vm_area_struct *vma = walk->vma;
784
785 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
786 linear_page_index(vma, addr),
787 linear_page_index(vma, end));
788
789 return 0;
790 }
791 #else
792 #define smaps_pte_hole NULL
793 #endif /* CONFIG_SHMEM */
794
smaps_pte_hole_lookup(unsigned long addr,struct mm_walk * walk)795 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
796 {
797 #ifdef CONFIG_SHMEM
798 if (walk->ops->pte_hole) {
799 /* depth is not used */
800 smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
801 }
802 #endif
803 }
804
smaps_pte_entry(pte_t * pte,unsigned long addr,struct mm_walk * walk)805 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
806 struct mm_walk *walk)
807 {
808 struct mem_size_stats *mss = walk->private;
809 struct vm_area_struct *vma = walk->vma;
810 bool locked = !!(vma->vm_flags & VM_LOCKED);
811 struct page *page = NULL;
812 bool present = false, young = false, dirty = false;
813 pte_t ptent = ptep_get(pte);
814
815 if (pte_present(ptent)) {
816 page = vm_normal_page(vma, addr, ptent);
817 young = pte_young(ptent);
818 dirty = pte_dirty(ptent);
819 present = true;
820 } else if (is_swap_pte(ptent)) {
821 swp_entry_t swpent = pte_to_swp_entry(ptent);
822
823 if (!non_swap_entry(swpent)) {
824 int mapcount;
825
826 mss->swap += PAGE_SIZE;
827 mapcount = swp_swapcount(swpent);
828 if (mapcount >= 2) {
829 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
830
831 do_div(pss_delta, mapcount);
832 mss->swap_pss += pss_delta;
833 } else {
834 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
835 }
836 } else if (is_pfn_swap_entry(swpent)) {
837 if (is_device_private_entry(swpent))
838 present = true;
839 page = pfn_swap_entry_to_page(swpent);
840 }
841 } else {
842 smaps_pte_hole_lookup(addr, walk);
843 return;
844 }
845
846 if (!page)
847 return;
848
849 smaps_account(mss, page, false, young, dirty, locked, present);
850 }
851
852 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)853 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
854 struct mm_walk *walk)
855 {
856 struct mem_size_stats *mss = walk->private;
857 struct vm_area_struct *vma = walk->vma;
858 bool locked = !!(vma->vm_flags & VM_LOCKED);
859 struct page *page = NULL;
860 bool present = false;
861 struct folio *folio;
862
863 if (pmd_present(*pmd)) {
864 page = vm_normal_page_pmd(vma, addr, *pmd);
865 present = true;
866 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
867 swp_entry_t entry = pmd_to_swp_entry(*pmd);
868
869 if (is_pfn_swap_entry(entry))
870 page = pfn_swap_entry_to_page(entry);
871 }
872 if (IS_ERR_OR_NULL(page))
873 return;
874 folio = page_folio(page);
875 if (folio_test_anon(folio))
876 mss->anonymous_thp += HPAGE_PMD_SIZE;
877 else if (folio_test_swapbacked(folio))
878 mss->shmem_thp += HPAGE_PMD_SIZE;
879 else if (folio_is_zone_device(folio))
880 /* pass */;
881 else
882 mss->file_thp += HPAGE_PMD_SIZE;
883
884 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
885 locked, present);
886 }
887 #else
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)888 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
889 struct mm_walk *walk)
890 {
891 }
892 #endif
893
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)894 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
895 struct mm_walk *walk)
896 {
897 struct vm_area_struct *vma = walk->vma;
898 pte_t *pte;
899 spinlock_t *ptl;
900
901 ptl = pmd_trans_huge_lock(pmd, vma);
902 if (ptl) {
903 smaps_pmd_entry(pmd, addr, walk);
904 spin_unlock(ptl);
905 goto out;
906 }
907
908 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
909 if (!pte) {
910 walk->action = ACTION_AGAIN;
911 return 0;
912 }
913 for (; addr != end; pte++, addr += PAGE_SIZE)
914 smaps_pte_entry(pte, addr, walk);
915 pte_unmap_unlock(pte - 1, ptl);
916 out:
917 cond_resched();
918 return 0;
919 }
920
show_smap_vma_flags(struct seq_file * m,struct vm_area_struct * vma)921 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
922 {
923 /*
924 * Don't forget to update Documentation/ on changes.
925 *
926 * The length of the second argument of mnemonics[]
927 * needs to be 3 instead of previously set 2
928 * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
929 * to avoid spurious
930 * -Werror=unterminated-string-initialization warning
931 * with GCC 15
932 */
933 static const char mnemonics[BITS_PER_LONG][3] = {
934 /*
935 * In case if we meet a flag we don't know about.
936 */
937 [0 ... (BITS_PER_LONG-1)] = "??",
938
939 [ilog2(VM_READ)] = "rd",
940 [ilog2(VM_WRITE)] = "wr",
941 [ilog2(VM_EXEC)] = "ex",
942 [ilog2(VM_SHARED)] = "sh",
943 [ilog2(VM_MAYREAD)] = "mr",
944 [ilog2(VM_MAYWRITE)] = "mw",
945 [ilog2(VM_MAYEXEC)] = "me",
946 [ilog2(VM_MAYSHARE)] = "ms",
947 [ilog2(VM_GROWSDOWN)] = "gd",
948 [ilog2(VM_PFNMAP)] = "pf",
949 [ilog2(VM_LOCKED)] = "lo",
950 [ilog2(VM_IO)] = "io",
951 [ilog2(VM_SEQ_READ)] = "sr",
952 [ilog2(VM_RAND_READ)] = "rr",
953 [ilog2(VM_DONTCOPY)] = "dc",
954 [ilog2(VM_DONTEXPAND)] = "de",
955 [ilog2(VM_LOCKONFAULT)] = "lf",
956 [ilog2(VM_ACCOUNT)] = "ac",
957 [ilog2(VM_NORESERVE)] = "nr",
958 [ilog2(VM_HUGETLB)] = "ht",
959 [ilog2(VM_SYNC)] = "sf",
960 [ilog2(VM_ARCH_1)] = "ar",
961 [ilog2(VM_WIPEONFORK)] = "wf",
962 [ilog2(VM_DONTDUMP)] = "dd",
963 #ifdef CONFIG_ARM64_BTI
964 [ilog2(VM_ARM64_BTI)] = "bt",
965 #endif
966 #ifdef CONFIG_MEM_SOFT_DIRTY
967 [ilog2(VM_SOFTDIRTY)] = "sd",
968 #endif
969 [ilog2(VM_MIXEDMAP)] = "mm",
970 [ilog2(VM_HUGEPAGE)] = "hg",
971 [ilog2(VM_NOHUGEPAGE)] = "nh",
972 [ilog2(VM_MERGEABLE)] = "mg",
973 [ilog2(VM_UFFD_MISSING)]= "um",
974 [ilog2(VM_UFFD_WP)] = "uw",
975 #ifdef CONFIG_ARM64_MTE
976 [ilog2(VM_MTE)] = "mt",
977 [ilog2(VM_MTE_ALLOWED)] = "",
978 #endif
979 #ifdef CONFIG_ARCH_HAS_PKEYS
980 /* These come out via ProtectionKey: */
981 [ilog2(VM_PKEY_BIT0)] = "",
982 [ilog2(VM_PKEY_BIT1)] = "",
983 [ilog2(VM_PKEY_BIT2)] = "",
984 #if VM_PKEY_BIT3
985 [ilog2(VM_PKEY_BIT3)] = "",
986 #endif
987 #if VM_PKEY_BIT4
988 [ilog2(VM_PKEY_BIT4)] = "",
989 #endif
990 #endif /* CONFIG_ARCH_HAS_PKEYS */
991 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
992 [ilog2(VM_UFFD_MINOR)] = "ui",
993 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
994 #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
995 [ilog2(VM_SHADOW_STACK)] = "ss",
996 #endif
997 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
998 [ilog2(VM_DROPPABLE)] = "dp",
999 #endif
1000 #ifdef CONFIG_64BIT
1001 [ilog2(VM_SEALED)] = "sl",
1002 #endif
1003 };
1004 size_t i;
1005
1006 seq_puts(m, "VmFlags: ");
1007 for (i = 0; i < BITS_PER_LONG; i++) {
1008 if (!mnemonics[i][0])
1009 continue;
1010 if (vma->vm_flags & (1UL << i))
1011 seq_printf(m, "%s ", mnemonics[i]);
1012 }
1013 seq_putc(m, '\n');
1014 }
1015
1016 #ifdef CONFIG_HUGETLB_PAGE
smaps_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1017 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1018 unsigned long addr, unsigned long end,
1019 struct mm_walk *walk)
1020 {
1021 struct mem_size_stats *mss = walk->private;
1022 struct vm_area_struct *vma = walk->vma;
1023 pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
1024 struct folio *folio = NULL;
1025 bool present = false;
1026
1027 if (pte_present(ptent)) {
1028 folio = page_folio(pte_page(ptent));
1029 present = true;
1030 } else if (is_swap_pte(ptent)) {
1031 swp_entry_t swpent = pte_to_swp_entry(ptent);
1032
1033 if (is_pfn_swap_entry(swpent))
1034 folio = pfn_swap_entry_folio(swpent);
1035 }
1036
1037 if (folio) {
1038 /* We treat non-present entries as "maybe shared". */
1039 if (!present || folio_maybe_mapped_shared(folio) ||
1040 hugetlb_pmd_shared(pte))
1041 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1042 else
1043 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1044 }
1045 return 0;
1046 }
1047 #else
1048 #define smaps_hugetlb_range NULL
1049 #endif /* HUGETLB_PAGE */
1050
1051 static const struct mm_walk_ops smaps_walk_ops = {
1052 .pmd_entry = smaps_pte_range,
1053 .hugetlb_entry = smaps_hugetlb_range,
1054 .walk_lock = PGWALK_RDLOCK,
1055 };
1056
1057 static const struct mm_walk_ops smaps_shmem_walk_ops = {
1058 .pmd_entry = smaps_pte_range,
1059 .hugetlb_entry = smaps_hugetlb_range,
1060 .pte_hole = smaps_pte_hole,
1061 .walk_lock = PGWALK_RDLOCK,
1062 };
1063
1064 /*
1065 * Gather mem stats from @vma with the indicated beginning
1066 * address @start, and keep them in @mss.
1067 *
1068 * Use vm_start of @vma as the beginning address if @start is 0.
1069 */
smap_gather_stats(struct vm_area_struct * vma,struct mem_size_stats * mss,unsigned long start)1070 static void smap_gather_stats(struct vm_area_struct *vma,
1071 struct mem_size_stats *mss, unsigned long start)
1072 {
1073 const struct mm_walk_ops *ops = &smaps_walk_ops;
1074
1075 /* Invalid start */
1076 if (start >= vma->vm_end)
1077 return;
1078
1079 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
1080 /*
1081 * For shared or readonly shmem mappings we know that all
1082 * swapped out pages belong to the shmem object, and we can
1083 * obtain the swap value much more efficiently. For private
1084 * writable mappings, we might have COW pages that are
1085 * not affected by the parent swapped out pages of the shmem
1086 * object, so we have to distinguish them during the page walk.
1087 * Unless we know that the shmem object (or the part mapped by
1088 * our VMA) has no swapped out pages at all.
1089 */
1090 unsigned long shmem_swapped = shmem_swap_usage(vma);
1091
1092 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
1093 !(vma->vm_flags & VM_WRITE))) {
1094 mss->swap += shmem_swapped;
1095 } else {
1096 ops = &smaps_shmem_walk_ops;
1097 }
1098 }
1099
1100 /* mmap_lock is held in m_start */
1101 if (!start)
1102 walk_page_vma(vma, ops, mss);
1103 else
1104 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
1105 }
1106
1107 #define SEQ_PUT_DEC(str, val) \
1108 seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
1109
1110 /* Show the contents common for smaps and smaps_rollup */
__show_smap(struct seq_file * m,const struct mem_size_stats * mss,bool rollup_mode)1111 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
1112 bool rollup_mode)
1113 {
1114 SEQ_PUT_DEC("Rss: ", mss->resident);
1115 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
1116 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT);
1117 if (rollup_mode) {
1118 /*
1119 * These are meaningful only for smaps_rollup, otherwise two of
1120 * them are zero, and the other one is the same as Pss.
1121 */
1122 SEQ_PUT_DEC(" kB\nPss_Anon: ",
1123 mss->pss_anon >> PSS_SHIFT);
1124 SEQ_PUT_DEC(" kB\nPss_File: ",
1125 mss->pss_file >> PSS_SHIFT);
1126 SEQ_PUT_DEC(" kB\nPss_Shmem: ",
1127 mss->pss_shmem >> PSS_SHIFT);
1128 }
1129 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean);
1130 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty);
1131 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean);
1132 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
1133 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
1134 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
1135 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm);
1136 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
1137 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
1138 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
1139 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
1140 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
1141 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
1142 mss->private_hugetlb >> 10, 7);
1143 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
1144 SEQ_PUT_DEC(" kB\nSwapPss: ",
1145 mss->swap_pss >> PSS_SHIFT);
1146 SEQ_PUT_DEC(" kB\nLocked: ",
1147 mss->pss_locked >> PSS_SHIFT);
1148 seq_puts(m, " kB\n");
1149 }
1150
show_smap(struct seq_file * m,void * v)1151 static int show_smap(struct seq_file *m, void *v)
1152 {
1153 struct vm_area_struct *vma = v;
1154 struct mem_size_stats mss = {};
1155
1156 smap_gather_stats(vma, &mss, 0);
1157
1158 show_map_vma(m, vma);
1159
1160 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
1161 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
1162 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
1163 seq_puts(m, " kB\n");
1164
1165 __show_smap(m, &mss, false);
1166
1167 seq_printf(m, "THPeligible: %8u\n",
1168 !!thp_vma_allowable_orders(vma, vma->vm_flags,
1169 TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
1170
1171 if (arch_pkeys_enabled())
1172 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
1173 show_smap_vma_flags(m, vma);
1174
1175 return 0;
1176 }
1177
show_smaps_rollup(struct seq_file * m,void * v)1178 static int show_smaps_rollup(struct seq_file *m, void *v)
1179 {
1180 struct proc_maps_private *priv = m->private;
1181 struct mem_size_stats mss = {};
1182 struct mm_struct *mm = priv->mm;
1183 struct vm_area_struct *vma;
1184 unsigned long vma_start = 0, last_vma_end = 0;
1185 int ret = 0;
1186 VMA_ITERATOR(vmi, mm, 0);
1187
1188 priv->task = get_proc_task(priv->inode);
1189 if (!priv->task)
1190 return -ESRCH;
1191
1192 if (!mm || !mmget_not_zero(mm)) {
1193 ret = -ESRCH;
1194 goto out_put_task;
1195 }
1196
1197 ret = mmap_read_lock_killable(mm);
1198 if (ret)
1199 goto out_put_mm;
1200
1201 hold_task_mempolicy(priv);
1202 vma = vma_next(&vmi);
1203
1204 if (unlikely(!vma))
1205 goto empty_set;
1206
1207 vma_start = vma->vm_start;
1208 do {
1209 smap_gather_stats(vma, &mss, 0);
1210 last_vma_end = vma->vm_end;
1211
1212 /*
1213 * Release mmap_lock temporarily if someone wants to
1214 * access it for write request.
1215 */
1216 if (mmap_lock_is_contended(mm)) {
1217 vma_iter_invalidate(&vmi);
1218 mmap_read_unlock(mm);
1219 ret = mmap_read_lock_killable(mm);
1220 if (ret) {
1221 release_task_mempolicy(priv);
1222 goto out_put_mm;
1223 }
1224
1225 /*
1226 * After dropping the lock, there are four cases to
1227 * consider. See the following example for explanation.
1228 *
1229 * +------+------+-----------+
1230 * | VMA1 | VMA2 | VMA3 |
1231 * +------+------+-----------+
1232 * | | | |
1233 * 4k 8k 16k 400k
1234 *
1235 * Suppose we drop the lock after reading VMA2 due to
1236 * contention, then we get:
1237 *
1238 * last_vma_end = 16k
1239 *
1240 * 1) VMA2 is freed, but VMA3 exists:
1241 *
1242 * vma_next(vmi) will return VMA3.
1243 * In this case, just continue from VMA3.
1244 *
1245 * 2) VMA2 still exists:
1246 *
1247 * vma_next(vmi) will return VMA3.
1248 * In this case, just continue from VMA3.
1249 *
1250 * 3) No more VMAs can be found:
1251 *
1252 * vma_next(vmi) will return NULL.
1253 * No more things to do, just break.
1254 *
1255 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
1256 *
1257 * vma_next(vmi) will return VMA' whose range
1258 * contains last_vma_end.
1259 * Iterate VMA' from last_vma_end.
1260 */
1261 vma = vma_next(&vmi);
1262 /* Case 3 above */
1263 if (!vma)
1264 break;
1265
1266 /* Case 1 and 2 above */
1267 if (vma->vm_start >= last_vma_end) {
1268 smap_gather_stats(vma, &mss, 0);
1269 last_vma_end = vma->vm_end;
1270 continue;
1271 }
1272
1273 /* Case 4 above */
1274 if (vma->vm_end > last_vma_end) {
1275 smap_gather_stats(vma, &mss, last_vma_end);
1276 last_vma_end = vma->vm_end;
1277 }
1278 }
1279 } for_each_vma(vmi, vma);
1280
1281 empty_set:
1282 show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
1283 seq_pad(m, ' ');
1284 seq_puts(m, "[rollup]\n");
1285
1286 __show_smap(m, &mss, true);
1287
1288 release_task_mempolicy(priv);
1289 mmap_read_unlock(mm);
1290
1291 out_put_mm:
1292 mmput(mm);
1293 out_put_task:
1294 put_task_struct(priv->task);
1295 priv->task = NULL;
1296
1297 return ret;
1298 }
1299 #undef SEQ_PUT_DEC
1300
1301 static const struct seq_operations proc_pid_smaps_op = {
1302 .start = m_start,
1303 .next = m_next,
1304 .stop = m_stop,
1305 .show = show_smap
1306 };
1307
pid_smaps_open(struct inode * inode,struct file * file)1308 static int pid_smaps_open(struct inode *inode, struct file *file)
1309 {
1310 return do_maps_open(inode, file, &proc_pid_smaps_op);
1311 }
1312
smaps_rollup_open(struct inode * inode,struct file * file)1313 static int smaps_rollup_open(struct inode *inode, struct file *file)
1314 {
1315 int ret;
1316 struct proc_maps_private *priv;
1317
1318 priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1319 if (!priv)
1320 return -ENOMEM;
1321
1322 ret = single_open(file, show_smaps_rollup, priv);
1323 if (ret)
1324 goto out_free;
1325
1326 priv->inode = inode;
1327 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1328 if (IS_ERR(priv->mm)) {
1329 ret = PTR_ERR(priv->mm);
1330
1331 single_release(inode, file);
1332 goto out_free;
1333 }
1334
1335 return 0;
1336
1337 out_free:
1338 kfree(priv);
1339 return ret;
1340 }
1341
smaps_rollup_release(struct inode * inode,struct file * file)1342 static int smaps_rollup_release(struct inode *inode, struct file *file)
1343 {
1344 struct seq_file *seq = file->private_data;
1345 struct proc_maps_private *priv = seq->private;
1346
1347 if (priv->mm)
1348 mmdrop(priv->mm);
1349
1350 kfree(priv);
1351 return single_release(inode, file);
1352 }
1353
1354 const struct file_operations proc_pid_smaps_operations = {
1355 .open = pid_smaps_open,
1356 .read = seq_read,
1357 .llseek = seq_lseek,
1358 .release = proc_map_release,
1359 };
1360
1361 const struct file_operations proc_pid_smaps_rollup_operations = {
1362 .open = smaps_rollup_open,
1363 .read = seq_read,
1364 .llseek = seq_lseek,
1365 .release = smaps_rollup_release,
1366 };
1367
1368 enum clear_refs_types {
1369 CLEAR_REFS_ALL = 1,
1370 CLEAR_REFS_ANON,
1371 CLEAR_REFS_MAPPED,
1372 CLEAR_REFS_SOFT_DIRTY,
1373 CLEAR_REFS_MM_HIWATER_RSS,
1374 CLEAR_REFS_LAST,
1375 };
1376
1377 struct clear_refs_private {
1378 enum clear_refs_types type;
1379 };
1380
1381 #ifdef CONFIG_MEM_SOFT_DIRTY
1382
pte_is_pinned(struct vm_area_struct * vma,unsigned long addr,pte_t pte)1383 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1384 {
1385 struct folio *folio;
1386
1387 if (!pte_write(pte))
1388 return false;
1389 if (!is_cow_mapping(vma->vm_flags))
1390 return false;
1391 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1392 return false;
1393 folio = vm_normal_folio(vma, addr, pte);
1394 if (!folio)
1395 return false;
1396 return folio_maybe_dma_pinned(folio);
1397 }
1398
clear_soft_dirty(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)1399 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1400 unsigned long addr, pte_t *pte)
1401 {
1402 /*
1403 * The soft-dirty tracker uses #PF-s to catch writes
1404 * to pages, so write-protect the pte as well. See the
1405 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1406 * of how soft-dirty works.
1407 */
1408 pte_t ptent = ptep_get(pte);
1409
1410 if (pte_present(ptent)) {
1411 pte_t old_pte;
1412
1413 if (pte_is_pinned(vma, addr, ptent))
1414 return;
1415 old_pte = ptep_modify_prot_start(vma, addr, pte);
1416 ptent = pte_wrprotect(old_pte);
1417 ptent = pte_clear_soft_dirty(ptent);
1418 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1419 } else if (is_swap_pte(ptent)) {
1420 ptent = pte_swp_clear_soft_dirty(ptent);
1421 set_pte_at(vma->vm_mm, addr, pte, ptent);
1422 }
1423 }
1424 #else
clear_soft_dirty(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)1425 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1426 unsigned long addr, pte_t *pte)
1427 {
1428 }
1429 #endif
1430
1431 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1432 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1433 unsigned long addr, pmd_t *pmdp)
1434 {
1435 pmd_t old, pmd = *pmdp;
1436
1437 if (pmd_present(pmd)) {
1438 /* See comment in change_huge_pmd() */
1439 old = pmdp_invalidate(vma, addr, pmdp);
1440 if (pmd_dirty(old))
1441 pmd = pmd_mkdirty(pmd);
1442 if (pmd_young(old))
1443 pmd = pmd_mkyoung(pmd);
1444
1445 pmd = pmd_wrprotect(pmd);
1446 pmd = pmd_clear_soft_dirty(pmd);
1447
1448 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1449 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1450 pmd = pmd_swp_clear_soft_dirty(pmd);
1451 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1452 }
1453 }
1454 #else
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1455 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1456 unsigned long addr, pmd_t *pmdp)
1457 {
1458 }
1459 #endif
1460
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1461 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1462 unsigned long end, struct mm_walk *walk)
1463 {
1464 struct clear_refs_private *cp = walk->private;
1465 struct vm_area_struct *vma = walk->vma;
1466 pte_t *pte, ptent;
1467 spinlock_t *ptl;
1468 struct folio *folio;
1469
1470 ptl = pmd_trans_huge_lock(pmd, vma);
1471 if (ptl) {
1472 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1473 clear_soft_dirty_pmd(vma, addr, pmd);
1474 goto out;
1475 }
1476
1477 if (!pmd_present(*pmd))
1478 goto out;
1479
1480 folio = pmd_folio(*pmd);
1481
1482 /* Clear accessed and referenced bits. */
1483 pmdp_test_and_clear_young(vma, addr, pmd);
1484 folio_test_clear_young(folio);
1485 folio_clear_referenced(folio);
1486 out:
1487 spin_unlock(ptl);
1488 return 0;
1489 }
1490
1491 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1492 if (!pte) {
1493 walk->action = ACTION_AGAIN;
1494 return 0;
1495 }
1496 for (; addr != end; pte++, addr += PAGE_SIZE) {
1497 ptent = ptep_get(pte);
1498
1499 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1500 clear_soft_dirty(vma, addr, pte);
1501 continue;
1502 }
1503
1504 if (!pte_present(ptent))
1505 continue;
1506
1507 folio = vm_normal_folio(vma, addr, ptent);
1508 if (!folio)
1509 continue;
1510
1511 /* Clear accessed and referenced bits. */
1512 ptep_test_and_clear_young(vma, addr, pte);
1513 folio_test_clear_young(folio);
1514 folio_clear_referenced(folio);
1515 }
1516 pte_unmap_unlock(pte - 1, ptl);
1517 cond_resched();
1518 return 0;
1519 }
1520
clear_refs_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)1521 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1522 struct mm_walk *walk)
1523 {
1524 struct clear_refs_private *cp = walk->private;
1525 struct vm_area_struct *vma = walk->vma;
1526
1527 if (vma->vm_flags & VM_PFNMAP)
1528 return 1;
1529
1530 /*
1531 * Writing 1 to /proc/pid/clear_refs affects all pages.
1532 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1533 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1534 * Writing 4 to /proc/pid/clear_refs affects all pages.
1535 */
1536 if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1537 return 1;
1538 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1539 return 1;
1540 return 0;
1541 }
1542
1543 static const struct mm_walk_ops clear_refs_walk_ops = {
1544 .pmd_entry = clear_refs_pte_range,
1545 .test_walk = clear_refs_test_walk,
1546 .walk_lock = PGWALK_WRLOCK,
1547 };
1548
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1549 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1550 size_t count, loff_t *ppos)
1551 {
1552 struct task_struct *task;
1553 char buffer[PROC_NUMBUF] = {};
1554 struct mm_struct *mm;
1555 struct vm_area_struct *vma;
1556 enum clear_refs_types type;
1557 int itype;
1558 int rv;
1559
1560 if (count > sizeof(buffer) - 1)
1561 count = sizeof(buffer) - 1;
1562 if (copy_from_user(buffer, buf, count))
1563 return -EFAULT;
1564 rv = kstrtoint(strstrip(buffer), 10, &itype);
1565 if (rv < 0)
1566 return rv;
1567 type = (enum clear_refs_types)itype;
1568 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1569 return -EINVAL;
1570
1571 task = get_proc_task(file_inode(file));
1572 if (!task)
1573 return -ESRCH;
1574 mm = get_task_mm(task);
1575 if (mm) {
1576 VMA_ITERATOR(vmi, mm, 0);
1577 struct mmu_notifier_range range;
1578 struct clear_refs_private cp = {
1579 .type = type,
1580 };
1581
1582 if (mmap_write_lock_killable(mm)) {
1583 count = -EINTR;
1584 goto out_mm;
1585 }
1586 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1587 /*
1588 * Writing 5 to /proc/pid/clear_refs resets the peak
1589 * resident set size to this mm's current rss value.
1590 */
1591 reset_mm_hiwater_rss(mm);
1592 goto out_unlock;
1593 }
1594
1595 if (type == CLEAR_REFS_SOFT_DIRTY) {
1596 for_each_vma(vmi, vma) {
1597 if (!(vma->vm_flags & VM_SOFTDIRTY))
1598 continue;
1599 vm_flags_clear(vma, VM_SOFTDIRTY);
1600 vma_set_page_prot(vma);
1601 }
1602
1603 inc_tlb_flush_pending(mm);
1604 mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1605 0, mm, 0, -1UL);
1606 mmu_notifier_invalidate_range_start(&range);
1607 }
1608 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1609 if (type == CLEAR_REFS_SOFT_DIRTY) {
1610 mmu_notifier_invalidate_range_end(&range);
1611 flush_tlb_mm(mm);
1612 dec_tlb_flush_pending(mm);
1613 }
1614 out_unlock:
1615 mmap_write_unlock(mm);
1616 out_mm:
1617 mmput(mm);
1618 }
1619 put_task_struct(task);
1620
1621 return count;
1622 }
1623
1624 const struct file_operations proc_clear_refs_operations = {
1625 .write = clear_refs_write,
1626 .llseek = noop_llseek,
1627 };
1628
1629 typedef struct {
1630 u64 pme;
1631 } pagemap_entry_t;
1632
1633 struct pagemapread {
1634 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
1635 pagemap_entry_t *buffer;
1636 bool show_pfn;
1637 };
1638
1639 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
1640 #define PAGEMAP_WALK_MASK (PMD_MASK)
1641
1642 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1643 #define PM_PFRAME_BITS 55
1644 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1645 #define PM_SOFT_DIRTY BIT_ULL(55)
1646 #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
1647 #define PM_UFFD_WP BIT_ULL(57)
1648 #define PM_GUARD_REGION BIT_ULL(58)
1649 #define PM_FILE BIT_ULL(61)
1650 #define PM_SWAP BIT_ULL(62)
1651 #define PM_PRESENT BIT_ULL(63)
1652
1653 #define PM_END_OF_BUFFER 1
1654
make_pme(u64 frame,u64 flags)1655 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1656 {
1657 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1658 }
1659
add_to_pagemap(pagemap_entry_t * pme,struct pagemapread * pm)1660 static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
1661 {
1662 pm->buffer[pm->pos++] = *pme;
1663 if (pm->pos >= pm->len)
1664 return PM_END_OF_BUFFER;
1665 return 0;
1666 }
1667
__folio_page_mapped_exclusively(struct folio * folio,struct page * page)1668 static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page)
1669 {
1670 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1671 return folio_precise_page_mapcount(folio, page) == 1;
1672 return !folio_maybe_mapped_shared(folio);
1673 }
1674
pagemap_pte_hole(unsigned long start,unsigned long end,__always_unused int depth,struct mm_walk * walk)1675 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1676 __always_unused int depth, struct mm_walk *walk)
1677 {
1678 struct pagemapread *pm = walk->private;
1679 unsigned long addr = start;
1680 int err = 0;
1681
1682 while (addr < end) {
1683 struct vm_area_struct *vma = find_vma(walk->mm, addr);
1684 pagemap_entry_t pme = make_pme(0, 0);
1685 /* End of address space hole, which we mark as non-present. */
1686 unsigned long hole_end;
1687
1688 if (vma)
1689 hole_end = min(end, vma->vm_start);
1690 else
1691 hole_end = end;
1692
1693 for (; addr < hole_end; addr += PAGE_SIZE) {
1694 err = add_to_pagemap(&pme, pm);
1695 if (err)
1696 goto out;
1697 }
1698
1699 if (!vma)
1700 break;
1701
1702 /* Addresses in the VMA. */
1703 if (vma->vm_flags & VM_SOFTDIRTY)
1704 pme = make_pme(0, PM_SOFT_DIRTY);
1705 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1706 err = add_to_pagemap(&pme, pm);
1707 if (err)
1708 goto out;
1709 }
1710 }
1711 out:
1712 return err;
1713 }
1714
pte_to_pagemap_entry(struct pagemapread * pm,struct vm_area_struct * vma,unsigned long addr,pte_t pte)1715 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1716 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1717 {
1718 u64 frame = 0, flags = 0;
1719 struct page *page = NULL;
1720 struct folio *folio;
1721
1722 if (pte_present(pte)) {
1723 if (pm->show_pfn)
1724 frame = pte_pfn(pte);
1725 flags |= PM_PRESENT;
1726 page = vm_normal_page(vma, addr, pte);
1727 if (pte_soft_dirty(pte))
1728 flags |= PM_SOFT_DIRTY;
1729 if (pte_uffd_wp(pte))
1730 flags |= PM_UFFD_WP;
1731 } else if (is_swap_pte(pte)) {
1732 swp_entry_t entry;
1733 if (pte_swp_soft_dirty(pte))
1734 flags |= PM_SOFT_DIRTY;
1735 if (pte_swp_uffd_wp(pte))
1736 flags |= PM_UFFD_WP;
1737 entry = pte_to_swp_entry(pte);
1738 if (pm->show_pfn) {
1739 pgoff_t offset;
1740 /*
1741 * For PFN swap offsets, keeping the offset field
1742 * to be PFN only to be compatible with old smaps.
1743 */
1744 if (is_pfn_swap_entry(entry))
1745 offset = swp_offset_pfn(entry);
1746 else
1747 offset = swp_offset(entry);
1748 frame = swp_type(entry) |
1749 (offset << MAX_SWAPFILES_SHIFT);
1750 }
1751 flags |= PM_SWAP;
1752 if (is_pfn_swap_entry(entry))
1753 page = pfn_swap_entry_to_page(entry);
1754 if (pte_marker_entry_uffd_wp(entry))
1755 flags |= PM_UFFD_WP;
1756 if (is_guard_swp_entry(entry))
1757 flags |= PM_GUARD_REGION;
1758 }
1759
1760 if (page) {
1761 folio = page_folio(page);
1762 if (!folio_test_anon(folio))
1763 flags |= PM_FILE;
1764 if ((flags & PM_PRESENT) &&
1765 __folio_page_mapped_exclusively(folio, page))
1766 flags |= PM_MMAP_EXCLUSIVE;
1767 }
1768 if (vma->vm_flags & VM_SOFTDIRTY)
1769 flags |= PM_SOFT_DIRTY;
1770
1771 return make_pme(frame, flags);
1772 }
1773
pagemap_pmd_range(pmd_t * pmdp,unsigned long addr,unsigned long end,struct mm_walk * walk)1774 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1775 struct mm_walk *walk)
1776 {
1777 struct vm_area_struct *vma = walk->vma;
1778 struct pagemapread *pm = walk->private;
1779 spinlock_t *ptl;
1780 pte_t *pte, *orig_pte;
1781 int err = 0;
1782 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1783
1784 ptl = pmd_trans_huge_lock(pmdp, vma);
1785 if (ptl) {
1786 unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1787 u64 flags = 0, frame = 0;
1788 pmd_t pmd = *pmdp;
1789 struct page *page = NULL;
1790 struct folio *folio = NULL;
1791
1792 if (vma->vm_flags & VM_SOFTDIRTY)
1793 flags |= PM_SOFT_DIRTY;
1794
1795 if (pmd_present(pmd)) {
1796 page = pmd_page(pmd);
1797
1798 flags |= PM_PRESENT;
1799 if (pmd_soft_dirty(pmd))
1800 flags |= PM_SOFT_DIRTY;
1801 if (pmd_uffd_wp(pmd))
1802 flags |= PM_UFFD_WP;
1803 if (pm->show_pfn)
1804 frame = pmd_pfn(pmd) + idx;
1805 }
1806 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1807 else if (is_swap_pmd(pmd)) {
1808 swp_entry_t entry = pmd_to_swp_entry(pmd);
1809 unsigned long offset;
1810
1811 if (pm->show_pfn) {
1812 if (is_pfn_swap_entry(entry))
1813 offset = swp_offset_pfn(entry) + idx;
1814 else
1815 offset = swp_offset(entry) + idx;
1816 frame = swp_type(entry) |
1817 (offset << MAX_SWAPFILES_SHIFT);
1818 }
1819 flags |= PM_SWAP;
1820 if (pmd_swp_soft_dirty(pmd))
1821 flags |= PM_SOFT_DIRTY;
1822 if (pmd_swp_uffd_wp(pmd))
1823 flags |= PM_UFFD_WP;
1824 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1825 page = pfn_swap_entry_to_page(entry);
1826 }
1827 #endif
1828
1829 if (page) {
1830 folio = page_folio(page);
1831 if (!folio_test_anon(folio))
1832 flags |= PM_FILE;
1833 }
1834
1835 for (; addr != end; addr += PAGE_SIZE, idx++) {
1836 u64 cur_flags = flags;
1837 pagemap_entry_t pme;
1838
1839 if (folio && (flags & PM_PRESENT) &&
1840 __folio_page_mapped_exclusively(folio, page))
1841 cur_flags |= PM_MMAP_EXCLUSIVE;
1842
1843 pme = make_pme(frame, cur_flags);
1844 err = add_to_pagemap(&pme, pm);
1845 if (err)
1846 break;
1847 if (pm->show_pfn) {
1848 if (flags & PM_PRESENT)
1849 frame++;
1850 else if (flags & PM_SWAP)
1851 frame += (1 << MAX_SWAPFILES_SHIFT);
1852 }
1853 }
1854 spin_unlock(ptl);
1855 return err;
1856 }
1857 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1858
1859 /*
1860 * We can assume that @vma always points to a valid one and @end never
1861 * goes beyond vma->vm_end.
1862 */
1863 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1864 if (!pte) {
1865 walk->action = ACTION_AGAIN;
1866 return err;
1867 }
1868 for (; addr < end; pte++, addr += PAGE_SIZE) {
1869 pagemap_entry_t pme;
1870
1871 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1872 err = add_to_pagemap(&pme, pm);
1873 if (err)
1874 break;
1875 }
1876 pte_unmap_unlock(orig_pte, ptl);
1877
1878 cond_resched();
1879
1880 return err;
1881 }
1882
1883 #ifdef CONFIG_HUGETLB_PAGE
1884 /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * ptep,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1885 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1886 unsigned long addr, unsigned long end,
1887 struct mm_walk *walk)
1888 {
1889 struct pagemapread *pm = walk->private;
1890 struct vm_area_struct *vma = walk->vma;
1891 u64 flags = 0, frame = 0;
1892 int err = 0;
1893 pte_t pte;
1894
1895 if (vma->vm_flags & VM_SOFTDIRTY)
1896 flags |= PM_SOFT_DIRTY;
1897
1898 pte = huge_ptep_get(walk->mm, addr, ptep);
1899 if (pte_present(pte)) {
1900 struct folio *folio = page_folio(pte_page(pte));
1901
1902 if (!folio_test_anon(folio))
1903 flags |= PM_FILE;
1904
1905 if (!folio_maybe_mapped_shared(folio) &&
1906 !hugetlb_pmd_shared(ptep))
1907 flags |= PM_MMAP_EXCLUSIVE;
1908
1909 if (huge_pte_uffd_wp(pte))
1910 flags |= PM_UFFD_WP;
1911
1912 flags |= PM_PRESENT;
1913 if (pm->show_pfn)
1914 frame = pte_pfn(pte) +
1915 ((addr & ~hmask) >> PAGE_SHIFT);
1916 } else if (pte_swp_uffd_wp_any(pte)) {
1917 flags |= PM_UFFD_WP;
1918 }
1919
1920 for (; addr != end; addr += PAGE_SIZE) {
1921 pagemap_entry_t pme = make_pme(frame, flags);
1922
1923 err = add_to_pagemap(&pme, pm);
1924 if (err)
1925 return err;
1926 if (pm->show_pfn && (flags & PM_PRESENT))
1927 frame++;
1928 }
1929
1930 cond_resched();
1931
1932 return err;
1933 }
1934 #else
1935 #define pagemap_hugetlb_range NULL
1936 #endif /* HUGETLB_PAGE */
1937
1938 static const struct mm_walk_ops pagemap_ops = {
1939 .pmd_entry = pagemap_pmd_range,
1940 .pte_hole = pagemap_pte_hole,
1941 .hugetlb_entry = pagemap_hugetlb_range,
1942 .walk_lock = PGWALK_RDLOCK,
1943 };
1944
1945 /*
1946 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1947 *
1948 * For each page in the address space, this file contains one 64-bit entry
1949 * consisting of the following:
1950 *
1951 * Bits 0-54 page frame number (PFN) if present
1952 * Bits 0-4 swap type if swapped
1953 * Bits 5-54 swap offset if swapped
1954 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1955 * Bit 56 page exclusively mapped
1956 * Bit 57 pte is uffd-wp write-protected
1957 * Bit 58 pte is a guard region
1958 * Bits 59-60 zero
1959 * Bit 61 page is file-page or shared-anon
1960 * Bit 62 page swapped
1961 * Bit 63 page present
1962 *
1963 * If the page is not present but in swap, then the PFN contains an
1964 * encoding of the swap file number and the page's offset into the
1965 * swap. Unmapped pages return a null PFN. This allows determining
1966 * precisely which pages are mapped (or in swap) and comparing mapped
1967 * pages between processes.
1968 *
1969 * Efficient users of this interface will use /proc/pid/maps to
1970 * determine which areas of memory are actually mapped and llseek to
1971 * skip over unmapped regions.
1972 */
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1973 static ssize_t pagemap_read(struct file *file, char __user *buf,
1974 size_t count, loff_t *ppos)
1975 {
1976 struct mm_struct *mm = file->private_data;
1977 struct pagemapread pm;
1978 unsigned long src;
1979 unsigned long svpfn;
1980 unsigned long start_vaddr;
1981 unsigned long end_vaddr;
1982 int ret = 0, copied = 0;
1983
1984 if (!mm || !mmget_not_zero(mm))
1985 goto out;
1986
1987 ret = -EINVAL;
1988 /* file position must be aligned */
1989 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1990 goto out_mm;
1991
1992 ret = 0;
1993 if (!count)
1994 goto out_mm;
1995
1996 /* do not disclose physical addresses: attack vector */
1997 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1998
1999 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
2000 pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
2001 ret = -ENOMEM;
2002 if (!pm.buffer)
2003 goto out_mm;
2004
2005 src = *ppos;
2006 svpfn = src / PM_ENTRY_BYTES;
2007 end_vaddr = mm->task_size;
2008
2009 /* watch out for wraparound */
2010 start_vaddr = end_vaddr;
2011 if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
2012 unsigned long end;
2013
2014 ret = mmap_read_lock_killable(mm);
2015 if (ret)
2016 goto out_free;
2017 start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
2018 mmap_read_unlock(mm);
2019
2020 end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
2021 if (end >= start_vaddr && end < mm->task_size)
2022 end_vaddr = end;
2023 }
2024
2025 /* Ensure the address is inside the task */
2026 if (start_vaddr > mm->task_size)
2027 start_vaddr = end_vaddr;
2028
2029 ret = 0;
2030 while (count && (start_vaddr < end_vaddr)) {
2031 int len;
2032 unsigned long end;
2033
2034 pm.pos = 0;
2035 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2036 /* overflow ? */
2037 if (end < start_vaddr || end > end_vaddr)
2038 end = end_vaddr;
2039 ret = mmap_read_lock_killable(mm);
2040 if (ret)
2041 goto out_free;
2042 ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
2043 mmap_read_unlock(mm);
2044 start_vaddr = end;
2045
2046 len = min(count, PM_ENTRY_BYTES * pm.pos);
2047 if (copy_to_user(buf, pm.buffer, len)) {
2048 ret = -EFAULT;
2049 goto out_free;
2050 }
2051 copied += len;
2052 buf += len;
2053 count -= len;
2054 }
2055 *ppos += copied;
2056 if (!ret || ret == PM_END_OF_BUFFER)
2057 ret = copied;
2058
2059 out_free:
2060 kfree(pm.buffer);
2061 out_mm:
2062 mmput(mm);
2063 out:
2064 return ret;
2065 }
2066
pagemap_open(struct inode * inode,struct file * file)2067 static int pagemap_open(struct inode *inode, struct file *file)
2068 {
2069 struct mm_struct *mm;
2070
2071 mm = proc_mem_open(inode, PTRACE_MODE_READ);
2072 if (IS_ERR(mm))
2073 return PTR_ERR(mm);
2074 file->private_data = mm;
2075 return 0;
2076 }
2077
pagemap_release(struct inode * inode,struct file * file)2078 static int pagemap_release(struct inode *inode, struct file *file)
2079 {
2080 struct mm_struct *mm = file->private_data;
2081
2082 if (mm)
2083 mmdrop(mm);
2084 return 0;
2085 }
2086
2087 #define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \
2088 PAGE_IS_FILE | PAGE_IS_PRESENT | \
2089 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \
2090 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
2091 #define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
2092
2093 struct pagemap_scan_private {
2094 struct pm_scan_arg arg;
2095 unsigned long masks_of_interest, cur_vma_category;
2096 struct page_region *vec_buf;
2097 unsigned long vec_buf_len, vec_buf_index, found_pages;
2098 struct page_region __user *vec_out;
2099 };
2100
pagemap_page_category(struct pagemap_scan_private * p,struct vm_area_struct * vma,unsigned long addr,pte_t pte)2101 static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
2102 struct vm_area_struct *vma,
2103 unsigned long addr, pte_t pte)
2104 {
2105 unsigned long categories = 0;
2106
2107 if (pte_present(pte)) {
2108 struct page *page;
2109
2110 categories |= PAGE_IS_PRESENT;
2111 if (!pte_uffd_wp(pte))
2112 categories |= PAGE_IS_WRITTEN;
2113
2114 if (p->masks_of_interest & PAGE_IS_FILE) {
2115 page = vm_normal_page(vma, addr, pte);
2116 if (page && !PageAnon(page))
2117 categories |= PAGE_IS_FILE;
2118 }
2119
2120 if (is_zero_pfn(pte_pfn(pte)))
2121 categories |= PAGE_IS_PFNZERO;
2122 if (pte_soft_dirty(pte))
2123 categories |= PAGE_IS_SOFT_DIRTY;
2124 } else if (is_swap_pte(pte)) {
2125 swp_entry_t swp;
2126
2127 categories |= PAGE_IS_SWAPPED;
2128 if (!pte_swp_uffd_wp_any(pte))
2129 categories |= PAGE_IS_WRITTEN;
2130
2131 if (p->masks_of_interest & PAGE_IS_FILE) {
2132 swp = pte_to_swp_entry(pte);
2133 if (is_pfn_swap_entry(swp) &&
2134 !folio_test_anon(pfn_swap_entry_folio(swp)))
2135 categories |= PAGE_IS_FILE;
2136 }
2137 if (pte_swp_soft_dirty(pte))
2138 categories |= PAGE_IS_SOFT_DIRTY;
2139 }
2140
2141 return categories;
2142 }
2143
make_uffd_wp_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t ptent)2144 static void make_uffd_wp_pte(struct vm_area_struct *vma,
2145 unsigned long addr, pte_t *pte, pte_t ptent)
2146 {
2147 if (pte_present(ptent)) {
2148 pte_t old_pte;
2149
2150 old_pte = ptep_modify_prot_start(vma, addr, pte);
2151 ptent = pte_mkuffd_wp(old_pte);
2152 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
2153 } else if (is_swap_pte(ptent)) {
2154 ptent = pte_swp_mkuffd_wp(ptent);
2155 set_pte_at(vma->vm_mm, addr, pte, ptent);
2156 } else {
2157 set_pte_at(vma->vm_mm, addr, pte,
2158 make_pte_marker(PTE_MARKER_UFFD_WP));
2159 }
2160 }
2161
2162 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pagemap_thp_category(struct pagemap_scan_private * p,struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)2163 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
2164 struct vm_area_struct *vma,
2165 unsigned long addr, pmd_t pmd)
2166 {
2167 unsigned long categories = PAGE_IS_HUGE;
2168
2169 if (pmd_present(pmd)) {
2170 struct page *page;
2171
2172 categories |= PAGE_IS_PRESENT;
2173 if (!pmd_uffd_wp(pmd))
2174 categories |= PAGE_IS_WRITTEN;
2175
2176 if (p->masks_of_interest & PAGE_IS_FILE) {
2177 page = vm_normal_page_pmd(vma, addr, pmd);
2178 if (page && !PageAnon(page))
2179 categories |= PAGE_IS_FILE;
2180 }
2181
2182 if (is_zero_pfn(pmd_pfn(pmd)))
2183 categories |= PAGE_IS_PFNZERO;
2184 if (pmd_soft_dirty(pmd))
2185 categories |= PAGE_IS_SOFT_DIRTY;
2186 } else if (is_swap_pmd(pmd)) {
2187 swp_entry_t swp;
2188
2189 categories |= PAGE_IS_SWAPPED;
2190 if (!pmd_swp_uffd_wp(pmd))
2191 categories |= PAGE_IS_WRITTEN;
2192 if (pmd_swp_soft_dirty(pmd))
2193 categories |= PAGE_IS_SOFT_DIRTY;
2194
2195 if (p->masks_of_interest & PAGE_IS_FILE) {
2196 swp = pmd_to_swp_entry(pmd);
2197 if (is_pfn_swap_entry(swp) &&
2198 !folio_test_anon(pfn_swap_entry_folio(swp)))
2199 categories |= PAGE_IS_FILE;
2200 }
2201 }
2202
2203 return categories;
2204 }
2205
make_uffd_wp_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)2206 static void make_uffd_wp_pmd(struct vm_area_struct *vma,
2207 unsigned long addr, pmd_t *pmdp)
2208 {
2209 pmd_t old, pmd = *pmdp;
2210
2211 if (pmd_present(pmd)) {
2212 old = pmdp_invalidate_ad(vma, addr, pmdp);
2213 pmd = pmd_mkuffd_wp(old);
2214 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2215 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
2216 pmd = pmd_swp_mkuffd_wp(pmd);
2217 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2218 }
2219 }
2220 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2221
2222 #ifdef CONFIG_HUGETLB_PAGE
pagemap_hugetlb_category(pte_t pte)2223 static unsigned long pagemap_hugetlb_category(pte_t pte)
2224 {
2225 unsigned long categories = PAGE_IS_HUGE;
2226
2227 /*
2228 * According to pagemap_hugetlb_range(), file-backed HugeTLB
2229 * page cannot be swapped. So PAGE_IS_FILE is not checked for
2230 * swapped pages.
2231 */
2232 if (pte_present(pte)) {
2233 categories |= PAGE_IS_PRESENT;
2234 if (!huge_pte_uffd_wp(pte))
2235 categories |= PAGE_IS_WRITTEN;
2236 if (!PageAnon(pte_page(pte)))
2237 categories |= PAGE_IS_FILE;
2238 if (is_zero_pfn(pte_pfn(pte)))
2239 categories |= PAGE_IS_PFNZERO;
2240 if (pte_soft_dirty(pte))
2241 categories |= PAGE_IS_SOFT_DIRTY;
2242 } else if (is_swap_pte(pte)) {
2243 categories |= PAGE_IS_SWAPPED;
2244 if (!pte_swp_uffd_wp_any(pte))
2245 categories |= PAGE_IS_WRITTEN;
2246 if (pte_swp_soft_dirty(pte))
2247 categories |= PAGE_IS_SOFT_DIRTY;
2248 }
2249
2250 return categories;
2251 }
2252
make_uffd_wp_huge_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t ptent)2253 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
2254 unsigned long addr, pte_t *ptep,
2255 pte_t ptent)
2256 {
2257 unsigned long psize;
2258
2259 if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
2260 return;
2261
2262 psize = huge_page_size(hstate_vma(vma));
2263
2264 if (is_hugetlb_entry_migration(ptent))
2265 set_huge_pte_at(vma->vm_mm, addr, ptep,
2266 pte_swp_mkuffd_wp(ptent), psize);
2267 else if (!huge_pte_none(ptent))
2268 huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
2269 huge_pte_mkuffd_wp(ptent));
2270 else
2271 set_huge_pte_at(vma->vm_mm, addr, ptep,
2272 make_pte_marker(PTE_MARKER_UFFD_WP), psize);
2273 }
2274 #endif /* CONFIG_HUGETLB_PAGE */
2275
2276 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
pagemap_scan_backout_range(struct pagemap_scan_private * p,unsigned long addr,unsigned long end)2277 static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
2278 unsigned long addr, unsigned long end)
2279 {
2280 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2281
2282 if (cur_buf->start != addr)
2283 cur_buf->end = addr;
2284 else
2285 cur_buf->start = cur_buf->end = 0;
2286
2287 p->found_pages -= (end - addr) / PAGE_SIZE;
2288 }
2289 #endif
2290
pagemap_scan_is_interesting_page(unsigned long categories,const struct pagemap_scan_private * p)2291 static bool pagemap_scan_is_interesting_page(unsigned long categories,
2292 const struct pagemap_scan_private *p)
2293 {
2294 categories ^= p->arg.category_inverted;
2295 if ((categories & p->arg.category_mask) != p->arg.category_mask)
2296 return false;
2297 if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
2298 return false;
2299
2300 return true;
2301 }
2302
pagemap_scan_is_interesting_vma(unsigned long categories,const struct pagemap_scan_private * p)2303 static bool pagemap_scan_is_interesting_vma(unsigned long categories,
2304 const struct pagemap_scan_private *p)
2305 {
2306 unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
2307
2308 categories ^= p->arg.category_inverted;
2309 if ((categories & required) != required)
2310 return false;
2311
2312 return true;
2313 }
2314
pagemap_scan_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)2315 static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2316 struct mm_walk *walk)
2317 {
2318 struct pagemap_scan_private *p = walk->private;
2319 struct vm_area_struct *vma = walk->vma;
2320 unsigned long vma_category = 0;
2321 bool wp_allowed = userfaultfd_wp_async(vma) &&
2322 userfaultfd_wp_use_markers(vma);
2323
2324 if (!wp_allowed) {
2325 /* User requested explicit failure over wp-async capability */
2326 if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2327 return -EPERM;
2328 /*
2329 * User requires wr-protect, and allows silently skipping
2330 * unsupported vmas.
2331 */
2332 if (p->arg.flags & PM_SCAN_WP_MATCHING)
2333 return 1;
2334 /*
2335 * Then the request doesn't involve wr-protects at all,
2336 * fall through to the rest checks, and allow vma walk.
2337 */
2338 }
2339
2340 if (vma->vm_flags & VM_PFNMAP)
2341 return 1;
2342
2343 if (wp_allowed)
2344 vma_category |= PAGE_IS_WPALLOWED;
2345
2346 if (vma->vm_flags & VM_SOFTDIRTY)
2347 vma_category |= PAGE_IS_SOFT_DIRTY;
2348
2349 if (!pagemap_scan_is_interesting_vma(vma_category, p))
2350 return 1;
2351
2352 p->cur_vma_category = vma_category;
2353
2354 return 0;
2355 }
2356
pagemap_scan_push_range(unsigned long categories,struct pagemap_scan_private * p,unsigned long addr,unsigned long end)2357 static bool pagemap_scan_push_range(unsigned long categories,
2358 struct pagemap_scan_private *p,
2359 unsigned long addr, unsigned long end)
2360 {
2361 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2362
2363 /*
2364 * When there is no output buffer provided at all, the sentinel values
2365 * won't match here. There is no other way for `cur_buf->end` to be
2366 * non-zero other than it being non-empty.
2367 */
2368 if (addr == cur_buf->end && categories == cur_buf->categories) {
2369 cur_buf->end = end;
2370 return true;
2371 }
2372
2373 if (cur_buf->end) {
2374 if (p->vec_buf_index >= p->vec_buf_len - 1)
2375 return false;
2376
2377 cur_buf = &p->vec_buf[++p->vec_buf_index];
2378 }
2379
2380 cur_buf->start = addr;
2381 cur_buf->end = end;
2382 cur_buf->categories = categories;
2383
2384 return true;
2385 }
2386
pagemap_scan_output(unsigned long categories,struct pagemap_scan_private * p,unsigned long addr,unsigned long * end)2387 static int pagemap_scan_output(unsigned long categories,
2388 struct pagemap_scan_private *p,
2389 unsigned long addr, unsigned long *end)
2390 {
2391 unsigned long n_pages, total_pages;
2392 int ret = 0;
2393
2394 if (!p->vec_buf)
2395 return 0;
2396
2397 categories &= p->arg.return_mask;
2398
2399 n_pages = (*end - addr) / PAGE_SIZE;
2400 if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2401 total_pages > p->arg.max_pages) {
2402 size_t n_too_much = total_pages - p->arg.max_pages;
2403 *end -= n_too_much * PAGE_SIZE;
2404 n_pages -= n_too_much;
2405 ret = -ENOSPC;
2406 }
2407
2408 if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2409 *end = addr;
2410 n_pages = 0;
2411 ret = -ENOSPC;
2412 }
2413
2414 p->found_pages += n_pages;
2415 if (ret)
2416 p->arg.walk_end = *end;
2417
2418 return ret;
2419 }
2420
pagemap_scan_thp_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)2421 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2422 unsigned long end, struct mm_walk *walk)
2423 {
2424 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2425 struct pagemap_scan_private *p = walk->private;
2426 struct vm_area_struct *vma = walk->vma;
2427 unsigned long categories;
2428 spinlock_t *ptl;
2429 int ret = 0;
2430
2431 ptl = pmd_trans_huge_lock(pmd, vma);
2432 if (!ptl)
2433 return -ENOENT;
2434
2435 categories = p->cur_vma_category |
2436 pagemap_thp_category(p, vma, start, *pmd);
2437
2438 if (!pagemap_scan_is_interesting_page(categories, p))
2439 goto out_unlock;
2440
2441 ret = pagemap_scan_output(categories, p, start, &end);
2442 if (start == end)
2443 goto out_unlock;
2444
2445 if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2446 goto out_unlock;
2447 if (~categories & PAGE_IS_WRITTEN)
2448 goto out_unlock;
2449
2450 /*
2451 * Break huge page into small pages if the WP operation
2452 * needs to be performed on a portion of the huge page.
2453 */
2454 if (end != start + HPAGE_SIZE) {
2455 spin_unlock(ptl);
2456 split_huge_pmd(vma, pmd, start);
2457 pagemap_scan_backout_range(p, start, end);
2458 /* Report as if there was no THP */
2459 return -ENOENT;
2460 }
2461
2462 make_uffd_wp_pmd(vma, start, pmd);
2463 flush_tlb_range(vma, start, end);
2464 out_unlock:
2465 spin_unlock(ptl);
2466 return ret;
2467 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2468 return -ENOENT;
2469 #endif
2470 }
2471
pagemap_scan_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)2472 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2473 unsigned long end, struct mm_walk *walk)
2474 {
2475 struct pagemap_scan_private *p = walk->private;
2476 struct vm_area_struct *vma = walk->vma;
2477 unsigned long addr, flush_end = 0;
2478 pte_t *pte, *start_pte;
2479 spinlock_t *ptl;
2480 int ret;
2481
2482 ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2483 if (ret != -ENOENT)
2484 return ret;
2485
2486 ret = 0;
2487 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2488 if (!pte) {
2489 walk->action = ACTION_AGAIN;
2490 return 0;
2491 }
2492
2493 arch_enter_lazy_mmu_mode();
2494
2495 if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2496 /* Fast path for performing exclusive WP */
2497 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2498 pte_t ptent = ptep_get(pte);
2499
2500 if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2501 pte_swp_uffd_wp_any(ptent))
2502 continue;
2503 make_uffd_wp_pte(vma, addr, pte, ptent);
2504 if (!flush_end)
2505 start = addr;
2506 flush_end = addr + PAGE_SIZE;
2507 }
2508 goto flush_and_return;
2509 }
2510
2511 if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2512 p->arg.category_mask == PAGE_IS_WRITTEN &&
2513 p->arg.return_mask == PAGE_IS_WRITTEN) {
2514 for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2515 unsigned long next = addr + PAGE_SIZE;
2516 pte_t ptent = ptep_get(pte);
2517
2518 if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2519 pte_swp_uffd_wp_any(ptent))
2520 continue;
2521 ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2522 p, addr, &next);
2523 if (next == addr)
2524 break;
2525 if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2526 continue;
2527 make_uffd_wp_pte(vma, addr, pte, ptent);
2528 if (!flush_end)
2529 start = addr;
2530 flush_end = next;
2531 }
2532 goto flush_and_return;
2533 }
2534
2535 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2536 pte_t ptent = ptep_get(pte);
2537 unsigned long categories = p->cur_vma_category |
2538 pagemap_page_category(p, vma, addr, ptent);
2539 unsigned long next = addr + PAGE_SIZE;
2540
2541 if (!pagemap_scan_is_interesting_page(categories, p))
2542 continue;
2543
2544 ret = pagemap_scan_output(categories, p, addr, &next);
2545 if (next == addr)
2546 break;
2547
2548 if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2549 continue;
2550 if (~categories & PAGE_IS_WRITTEN)
2551 continue;
2552
2553 make_uffd_wp_pte(vma, addr, pte, ptent);
2554 if (!flush_end)
2555 start = addr;
2556 flush_end = next;
2557 }
2558
2559 flush_and_return:
2560 if (flush_end)
2561 flush_tlb_range(vma, start, addr);
2562
2563 arch_leave_lazy_mmu_mode();
2564 pte_unmap_unlock(start_pte, ptl);
2565
2566 cond_resched();
2567 return ret;
2568 }
2569
2570 #ifdef CONFIG_HUGETLB_PAGE
pagemap_scan_hugetlb_entry(pte_t * ptep,unsigned long hmask,unsigned long start,unsigned long end,struct mm_walk * walk)2571 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2572 unsigned long start, unsigned long end,
2573 struct mm_walk *walk)
2574 {
2575 struct pagemap_scan_private *p = walk->private;
2576 struct vm_area_struct *vma = walk->vma;
2577 unsigned long categories;
2578 spinlock_t *ptl;
2579 int ret = 0;
2580 pte_t pte;
2581
2582 if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2583 /* Go the short route when not write-protecting pages. */
2584
2585 pte = huge_ptep_get(walk->mm, start, ptep);
2586 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2587
2588 if (!pagemap_scan_is_interesting_page(categories, p))
2589 return 0;
2590
2591 return pagemap_scan_output(categories, p, start, &end);
2592 }
2593
2594 i_mmap_lock_write(vma->vm_file->f_mapping);
2595 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2596
2597 pte = huge_ptep_get(walk->mm, start, ptep);
2598 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2599
2600 if (!pagemap_scan_is_interesting_page(categories, p))
2601 goto out_unlock;
2602
2603 ret = pagemap_scan_output(categories, p, start, &end);
2604 if (start == end)
2605 goto out_unlock;
2606
2607 if (~categories & PAGE_IS_WRITTEN)
2608 goto out_unlock;
2609
2610 if (end != start + HPAGE_SIZE) {
2611 /* Partial HugeTLB page WP isn't possible. */
2612 pagemap_scan_backout_range(p, start, end);
2613 p->arg.walk_end = start;
2614 ret = 0;
2615 goto out_unlock;
2616 }
2617
2618 make_uffd_wp_huge_pte(vma, start, ptep, pte);
2619 flush_hugetlb_tlb_range(vma, start, end);
2620
2621 out_unlock:
2622 spin_unlock(ptl);
2623 i_mmap_unlock_write(vma->vm_file->f_mapping);
2624
2625 return ret;
2626 }
2627 #else
2628 #define pagemap_scan_hugetlb_entry NULL
2629 #endif
2630
pagemap_scan_pte_hole(unsigned long addr,unsigned long end,int depth,struct mm_walk * walk)2631 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2632 int depth, struct mm_walk *walk)
2633 {
2634 struct pagemap_scan_private *p = walk->private;
2635 struct vm_area_struct *vma = walk->vma;
2636 int ret, err;
2637
2638 if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2639 return 0;
2640
2641 ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2642 if (addr == end)
2643 return ret;
2644
2645 if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2646 return ret;
2647
2648 err = uffd_wp_range(vma, addr, end - addr, true);
2649 if (err < 0)
2650 ret = err;
2651
2652 return ret;
2653 }
2654
2655 static const struct mm_walk_ops pagemap_scan_ops = {
2656 .test_walk = pagemap_scan_test_walk,
2657 .pmd_entry = pagemap_scan_pmd_entry,
2658 .pte_hole = pagemap_scan_pte_hole,
2659 .hugetlb_entry = pagemap_scan_hugetlb_entry,
2660 };
2661
pagemap_scan_get_args(struct pm_scan_arg * arg,unsigned long uarg)2662 static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2663 unsigned long uarg)
2664 {
2665 if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2666 return -EFAULT;
2667
2668 if (arg->size != sizeof(struct pm_scan_arg))
2669 return -EINVAL;
2670
2671 /* Validate requested features */
2672 if (arg->flags & ~PM_SCAN_FLAGS)
2673 return -EINVAL;
2674 if ((arg->category_inverted | arg->category_mask |
2675 arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2676 return -EINVAL;
2677
2678 arg->start = untagged_addr((unsigned long)arg->start);
2679 arg->end = untagged_addr((unsigned long)arg->end);
2680 arg->vec = untagged_addr((unsigned long)arg->vec);
2681
2682 /* Validate memory pointers */
2683 if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2684 return -EINVAL;
2685 if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2686 return -EFAULT;
2687 if (!arg->vec && arg->vec_len)
2688 return -EINVAL;
2689 if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
2690 return -EINVAL;
2691 if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2692 size_mul(arg->vec_len, sizeof(struct page_region))))
2693 return -EFAULT;
2694
2695 /* Fixup default values */
2696 arg->end = ALIGN(arg->end, PAGE_SIZE);
2697 arg->walk_end = 0;
2698 if (!arg->max_pages)
2699 arg->max_pages = ULONG_MAX;
2700
2701 return 0;
2702 }
2703
pagemap_scan_writeback_args(struct pm_scan_arg * arg,unsigned long uargl)2704 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2705 unsigned long uargl)
2706 {
2707 struct pm_scan_arg __user *uarg = (void __user *)uargl;
2708
2709 if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2710 return -EFAULT;
2711
2712 return 0;
2713 }
2714
pagemap_scan_init_bounce_buffer(struct pagemap_scan_private * p)2715 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2716 {
2717 if (!p->arg.vec_len)
2718 return 0;
2719
2720 p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2721 p->arg.vec_len);
2722 p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2723 GFP_KERNEL);
2724 if (!p->vec_buf)
2725 return -ENOMEM;
2726
2727 p->vec_buf->start = p->vec_buf->end = 0;
2728 p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2729
2730 return 0;
2731 }
2732
pagemap_scan_flush_buffer(struct pagemap_scan_private * p)2733 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2734 {
2735 const struct page_region *buf = p->vec_buf;
2736 long n = p->vec_buf_index;
2737
2738 if (!p->vec_buf)
2739 return 0;
2740
2741 if (buf[n].end != buf[n].start)
2742 n++;
2743
2744 if (!n)
2745 return 0;
2746
2747 if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2748 return -EFAULT;
2749
2750 p->arg.vec_len -= n;
2751 p->vec_out += n;
2752
2753 p->vec_buf_index = 0;
2754 p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2755 p->vec_buf->start = p->vec_buf->end = 0;
2756
2757 return n;
2758 }
2759
do_pagemap_scan(struct mm_struct * mm,unsigned long uarg)2760 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2761 {
2762 struct pagemap_scan_private p = {0};
2763 unsigned long walk_start;
2764 size_t n_ranges_out = 0;
2765 int ret;
2766
2767 ret = pagemap_scan_get_args(&p.arg, uarg);
2768 if (ret)
2769 return ret;
2770
2771 p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2772 p.arg.return_mask;
2773 ret = pagemap_scan_init_bounce_buffer(&p);
2774 if (ret)
2775 return ret;
2776
2777 for (walk_start = p.arg.start; walk_start < p.arg.end;
2778 walk_start = p.arg.walk_end) {
2779 struct mmu_notifier_range range;
2780 long n_out;
2781
2782 if (fatal_signal_pending(current)) {
2783 ret = -EINTR;
2784 break;
2785 }
2786
2787 ret = mmap_read_lock_killable(mm);
2788 if (ret)
2789 break;
2790
2791 /* Protection change for the range is going to happen. */
2792 if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2793 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2794 mm, walk_start, p.arg.end);
2795 mmu_notifier_invalidate_range_start(&range);
2796 }
2797
2798 ret = walk_page_range(mm, walk_start, p.arg.end,
2799 &pagemap_scan_ops, &p);
2800
2801 if (p.arg.flags & PM_SCAN_WP_MATCHING)
2802 mmu_notifier_invalidate_range_end(&range);
2803
2804 mmap_read_unlock(mm);
2805
2806 n_out = pagemap_scan_flush_buffer(&p);
2807 if (n_out < 0)
2808 ret = n_out;
2809 else
2810 n_ranges_out += n_out;
2811
2812 if (ret != -ENOSPC)
2813 break;
2814
2815 if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2816 break;
2817 }
2818
2819 /* ENOSPC signifies early stop (buffer full) from the walk. */
2820 if (!ret || ret == -ENOSPC)
2821 ret = n_ranges_out;
2822
2823 /* The walk_end isn't set when ret is zero */
2824 if (!p.arg.walk_end)
2825 p.arg.walk_end = p.arg.end;
2826 if (pagemap_scan_writeback_args(&p.arg, uarg))
2827 ret = -EFAULT;
2828
2829 kfree(p.vec_buf);
2830 return ret;
2831 }
2832
do_pagemap_cmd(struct file * file,unsigned int cmd,unsigned long arg)2833 static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2834 unsigned long arg)
2835 {
2836 struct mm_struct *mm = file->private_data;
2837
2838 switch (cmd) {
2839 case PAGEMAP_SCAN:
2840 return do_pagemap_scan(mm, arg);
2841
2842 default:
2843 return -EINVAL;
2844 }
2845 }
2846
2847 const struct file_operations proc_pagemap_operations = {
2848 .llseek = mem_lseek, /* borrow this */
2849 .read = pagemap_read,
2850 .open = pagemap_open,
2851 .release = pagemap_release,
2852 .unlocked_ioctl = do_pagemap_cmd,
2853 .compat_ioctl = do_pagemap_cmd,
2854 };
2855 #endif /* CONFIG_PROC_PAGE_MONITOR */
2856
2857 #ifdef CONFIG_NUMA
2858
2859 struct numa_maps {
2860 unsigned long pages;
2861 unsigned long anon;
2862 unsigned long active;
2863 unsigned long writeback;
2864 unsigned long mapcount_max;
2865 unsigned long dirty;
2866 unsigned long swapcache;
2867 unsigned long node[MAX_NUMNODES];
2868 };
2869
2870 struct numa_maps_private {
2871 struct proc_maps_private proc_maps;
2872 struct numa_maps md;
2873 };
2874
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)2875 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2876 unsigned long nr_pages)
2877 {
2878 struct folio *folio = page_folio(page);
2879 int count;
2880
2881 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
2882 count = folio_precise_page_mapcount(folio, page);
2883 else
2884 count = folio_average_page_mapcount(folio);
2885
2886 md->pages += nr_pages;
2887 if (pte_dirty || folio_test_dirty(folio))
2888 md->dirty += nr_pages;
2889
2890 if (folio_test_swapcache(folio))
2891 md->swapcache += nr_pages;
2892
2893 if (folio_test_active(folio) || folio_test_unevictable(folio))
2894 md->active += nr_pages;
2895
2896 if (folio_test_writeback(folio))
2897 md->writeback += nr_pages;
2898
2899 if (folio_test_anon(folio))
2900 md->anon += nr_pages;
2901
2902 if (count > md->mapcount_max)
2903 md->mapcount_max = count;
2904
2905 md->node[folio_nid(folio)] += nr_pages;
2906 }
2907
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)2908 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2909 unsigned long addr)
2910 {
2911 struct page *page;
2912 int nid;
2913
2914 if (!pte_present(pte))
2915 return NULL;
2916
2917 page = vm_normal_page(vma, addr, pte);
2918 if (!page || is_zone_device_page(page))
2919 return NULL;
2920
2921 if (PageReserved(page))
2922 return NULL;
2923
2924 nid = page_to_nid(page);
2925 if (!node_isset(nid, node_states[N_MEMORY]))
2926 return NULL;
2927
2928 return page;
2929 }
2930
2931 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
can_gather_numa_stats_pmd(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr)2932 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2933 struct vm_area_struct *vma,
2934 unsigned long addr)
2935 {
2936 struct page *page;
2937 int nid;
2938
2939 if (!pmd_present(pmd))
2940 return NULL;
2941
2942 page = vm_normal_page_pmd(vma, addr, pmd);
2943 if (!page)
2944 return NULL;
2945
2946 if (PageReserved(page))
2947 return NULL;
2948
2949 nid = page_to_nid(page);
2950 if (!node_isset(nid, node_states[N_MEMORY]))
2951 return NULL;
2952
2953 return page;
2954 }
2955 #endif
2956
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)2957 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2958 unsigned long end, struct mm_walk *walk)
2959 {
2960 struct numa_maps *md = walk->private;
2961 struct vm_area_struct *vma = walk->vma;
2962 spinlock_t *ptl;
2963 pte_t *orig_pte;
2964 pte_t *pte;
2965
2966 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2967 ptl = pmd_trans_huge_lock(pmd, vma);
2968 if (ptl) {
2969 struct page *page;
2970
2971 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2972 if (page)
2973 gather_stats(page, md, pmd_dirty(*pmd),
2974 HPAGE_PMD_SIZE/PAGE_SIZE);
2975 spin_unlock(ptl);
2976 return 0;
2977 }
2978 #endif
2979 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2980 if (!pte) {
2981 walk->action = ACTION_AGAIN;
2982 return 0;
2983 }
2984 do {
2985 pte_t ptent = ptep_get(pte);
2986 struct page *page = can_gather_numa_stats(ptent, vma, addr);
2987 if (!page)
2988 continue;
2989 gather_stats(page, md, pte_dirty(ptent), 1);
2990
2991 } while (pte++, addr += PAGE_SIZE, addr != end);
2992 pte_unmap_unlock(orig_pte, ptl);
2993 cond_resched();
2994 return 0;
2995 }
2996 #ifdef CONFIG_HUGETLB_PAGE
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)2997 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2998 unsigned long addr, unsigned long end, struct mm_walk *walk)
2999 {
3000 pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
3001 struct numa_maps *md;
3002 struct page *page;
3003
3004 if (!pte_present(huge_pte))
3005 return 0;
3006
3007 page = pte_page(huge_pte);
3008
3009 md = walk->private;
3010 gather_stats(page, md, pte_dirty(huge_pte), 1);
3011 return 0;
3012 }
3013
3014 #else
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)3015 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3016 unsigned long addr, unsigned long end, struct mm_walk *walk)
3017 {
3018 return 0;
3019 }
3020 #endif
3021
3022 static const struct mm_walk_ops show_numa_ops = {
3023 .hugetlb_entry = gather_hugetlb_stats,
3024 .pmd_entry = gather_pte_stats,
3025 .walk_lock = PGWALK_RDLOCK,
3026 };
3027
3028 /*
3029 * Display pages allocated per node and memory policy via /proc.
3030 */
show_numa_map(struct seq_file * m,void * v)3031 static int show_numa_map(struct seq_file *m, void *v)
3032 {
3033 struct numa_maps_private *numa_priv = m->private;
3034 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
3035 struct vm_area_struct *vma = v;
3036 struct numa_maps *md = &numa_priv->md;
3037 struct file *file = vma->vm_file;
3038 struct mm_struct *mm = vma->vm_mm;
3039 char buffer[64];
3040 struct mempolicy *pol;
3041 pgoff_t ilx;
3042 int nid;
3043
3044 if (!mm)
3045 return 0;
3046
3047 /* Ensure we start with an empty set of numa_maps statistics. */
3048 memset(md, 0, sizeof(*md));
3049
3050 pol = __get_vma_policy(vma, vma->vm_start, &ilx);
3051 if (pol) {
3052 mpol_to_str(buffer, sizeof(buffer), pol);
3053 mpol_cond_put(pol);
3054 } else {
3055 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
3056 }
3057
3058 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
3059
3060 if (file) {
3061 seq_puts(m, " file=");
3062 seq_path(m, file_user_path(file), "\n\t= ");
3063 } else if (vma_is_initial_heap(vma)) {
3064 seq_puts(m, " heap");
3065 } else if (vma_is_initial_stack(vma)) {
3066 seq_puts(m, " stack");
3067 }
3068
3069 if (is_vm_hugetlb_page(vma))
3070 seq_puts(m, " huge");
3071
3072 /* mmap_lock is held by m_start */
3073 walk_page_vma(vma, &show_numa_ops, md);
3074
3075 if (!md->pages)
3076 goto out;
3077
3078 if (md->anon)
3079 seq_printf(m, " anon=%lu", md->anon);
3080
3081 if (md->dirty)
3082 seq_printf(m, " dirty=%lu", md->dirty);
3083
3084 if (md->pages != md->anon && md->pages != md->dirty)
3085 seq_printf(m, " mapped=%lu", md->pages);
3086
3087 if (md->mapcount_max > 1)
3088 seq_printf(m, " mapmax=%lu", md->mapcount_max);
3089
3090 if (md->swapcache)
3091 seq_printf(m, " swapcache=%lu", md->swapcache);
3092
3093 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
3094 seq_printf(m, " active=%lu", md->active);
3095
3096 if (md->writeback)
3097 seq_printf(m, " writeback=%lu", md->writeback);
3098
3099 for_each_node_state(nid, N_MEMORY)
3100 if (md->node[nid])
3101 seq_printf(m, " N%d=%lu", nid, md->node[nid]);
3102
3103 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
3104 out:
3105 seq_putc(m, '\n');
3106 return 0;
3107 }
3108
3109 static const struct seq_operations proc_pid_numa_maps_op = {
3110 .start = m_start,
3111 .next = m_next,
3112 .stop = m_stop,
3113 .show = show_numa_map,
3114 };
3115
pid_numa_maps_open(struct inode * inode,struct file * file)3116 static int pid_numa_maps_open(struct inode *inode, struct file *file)
3117 {
3118 return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
3119 sizeof(struct numa_maps_private));
3120 }
3121
3122 const struct file_operations proc_pid_numa_maps_operations = {
3123 .open = pid_numa_maps_open,
3124 .read = seq_read,
3125 .llseek = seq_lseek,
3126 .release = proc_map_release,
3127 };
3128
3129 #endif /* CONFIG_NUMA */
3130