1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/ksm.h>
8 #include <linux/seq_file.h>
9 #include <linux/highmem.h>
10 #include <linux/ptrace.h>
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/mempolicy.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/sched/mm.h>
17 #include <linux/swapops.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/page_idle.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/pkeys.h>
23 #include <linux/minmax.h>
24 #include <linux/overflow.h>
25 #include <linux/buildid.h>
26 
27 #include <asm/elf.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include "internal.h"
31 
32 #define SEQ_PUT_DEC(str, val) \
33 		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
34 void task_mem(struct seq_file *m, struct mm_struct *mm)
35 {
36 	unsigned long text, lib, swap, anon, file, shmem;
37 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
38 
39 	anon = get_mm_counter(mm, MM_ANONPAGES);
40 	file = get_mm_counter(mm, MM_FILEPAGES);
41 	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
42 
43 	/*
44 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
45 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
46 	 * collector of these hiwater stats must therefore get total_vm
47 	 * and rss too, which will usually be the higher.  Barriers? not
48 	 * worth the effort, such snapshots can always be inconsistent.
49 	 */
50 	hiwater_vm = total_vm = mm->total_vm;
51 	if (hiwater_vm < mm->hiwater_vm)
52 		hiwater_vm = mm->hiwater_vm;
53 	hiwater_rss = total_rss = anon + file + shmem;
54 	if (hiwater_rss < mm->hiwater_rss)
55 		hiwater_rss = mm->hiwater_rss;
56 
57 	/* split executable areas between text and lib */
58 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
59 	text = min(text, mm->exec_vm << PAGE_SHIFT);
60 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
61 
62 	swap = get_mm_counter(mm, MM_SWAPENTS);
63 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
64 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
65 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
66 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
67 	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
68 	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
69 	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
70 	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
71 	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
72 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
73 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
74 	seq_put_decimal_ull_width(m,
75 		    " kB\nVmExe:\t", text >> 10, 8);
76 	seq_put_decimal_ull_width(m,
77 		    " kB\nVmLib:\t", lib >> 10, 8);
78 	seq_put_decimal_ull_width(m,
79 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
80 	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
81 	seq_puts(m, " kB\n");
82 	hugetlb_report_usage(m, mm);
83 }
84 #undef SEQ_PUT_DEC
85 
86 unsigned long task_vsize(struct mm_struct *mm)
87 {
88 	return PAGE_SIZE * mm->total_vm;
89 }
90 
91 unsigned long task_statm(struct mm_struct *mm,
92 			 unsigned long *shared, unsigned long *text,
93 			 unsigned long *data, unsigned long *resident)
94 {
95 	*shared = get_mm_counter(mm, MM_FILEPAGES) +
96 			get_mm_counter(mm, MM_SHMEMPAGES);
97 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
98 								>> PAGE_SHIFT;
99 	*data = mm->data_vm + mm->stack_vm;
100 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
101 	return mm->total_vm;
102 }
103 
104 #ifdef CONFIG_NUMA
105 /*
106  * Save get_task_policy() for show_numa_map().
107  */
108 static void hold_task_mempolicy(struct proc_maps_private *priv)
109 {
110 	struct task_struct *task = priv->task;
111 
112 	task_lock(task);
113 	priv->task_mempolicy = get_task_policy(task);
114 	mpol_get(priv->task_mempolicy);
115 	task_unlock(task);
116 }
117 static void release_task_mempolicy(struct proc_maps_private *priv)
118 {
119 	mpol_put(priv->task_mempolicy);
120 }
121 #else
122 static void hold_task_mempolicy(struct proc_maps_private *priv)
123 {
124 }
125 static void release_task_mempolicy(struct proc_maps_private *priv)
126 {
127 }
128 #endif
129 
130 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
131 						loff_t *ppos)
132 {
133 	struct vm_area_struct *vma = vma_next(&priv->iter);
134 
135 	if (vma) {
136 		*ppos = vma->vm_start;
137 	} else {
138 		*ppos = -2UL;
139 		vma = get_gate_vma(priv->mm);
140 	}
141 
142 	return vma;
143 }
144 
145 static void *m_start(struct seq_file *m, loff_t *ppos)
146 {
147 	struct proc_maps_private *priv = m->private;
148 	unsigned long last_addr = *ppos;
149 	struct mm_struct *mm;
150 
151 	/* See m_next(). Zero at the start or after lseek. */
152 	if (last_addr == -1UL)
153 		return NULL;
154 
155 	priv->task = get_proc_task(priv->inode);
156 	if (!priv->task)
157 		return ERR_PTR(-ESRCH);
158 
159 	mm = priv->mm;
160 	if (!mm || !mmget_not_zero(mm)) {
161 		put_task_struct(priv->task);
162 		priv->task = NULL;
163 		return NULL;
164 	}
165 
166 	if (mmap_read_lock_killable(mm)) {
167 		mmput(mm);
168 		put_task_struct(priv->task);
169 		priv->task = NULL;
170 		return ERR_PTR(-EINTR);
171 	}
172 
173 	vma_iter_init(&priv->iter, mm, last_addr);
174 	hold_task_mempolicy(priv);
175 	if (last_addr == -2UL)
176 		return get_gate_vma(mm);
177 
178 	return proc_get_vma(priv, ppos);
179 }
180 
181 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
182 {
183 	if (*ppos == -2UL) {
184 		*ppos = -1UL;
185 		return NULL;
186 	}
187 	return proc_get_vma(m->private, ppos);
188 }
189 
190 static void m_stop(struct seq_file *m, void *v)
191 {
192 	struct proc_maps_private *priv = m->private;
193 	struct mm_struct *mm = priv->mm;
194 
195 	if (!priv->task)
196 		return;
197 
198 	release_task_mempolicy(priv);
199 	mmap_read_unlock(mm);
200 	mmput(mm);
201 	put_task_struct(priv->task);
202 	priv->task = NULL;
203 }
204 
205 static int proc_maps_open(struct inode *inode, struct file *file,
206 			const struct seq_operations *ops, int psize)
207 {
208 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
209 
210 	if (!priv)
211 		return -ENOMEM;
212 
213 	priv->inode = inode;
214 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
215 	if (IS_ERR_OR_NULL(priv->mm)) {
216 		int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
217 
218 		seq_release_private(inode, file);
219 		return err;
220 	}
221 
222 	return 0;
223 }
224 
225 static int proc_map_release(struct inode *inode, struct file *file)
226 {
227 	struct seq_file *seq = file->private_data;
228 	struct proc_maps_private *priv = seq->private;
229 
230 	if (priv->mm)
231 		mmdrop(priv->mm);
232 
233 	return seq_release_private(inode, file);
234 }
235 
236 static int do_maps_open(struct inode *inode, struct file *file,
237 			const struct seq_operations *ops)
238 {
239 	return proc_maps_open(inode, file, ops,
240 				sizeof(struct proc_maps_private));
241 }
242 
243 static void get_vma_name(struct vm_area_struct *vma,
244 			 const struct path **path,
245 			 const char **name,
246 			 const char **name_fmt)
247 {
248 	struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
249 
250 	*name = NULL;
251 	*path = NULL;
252 	*name_fmt = NULL;
253 
254 	/*
255 	 * Print the dentry name for named mappings, and a
256 	 * special [heap] marker for the heap:
257 	 */
258 	if (vma->vm_file) {
259 		/*
260 		 * If user named this anon shared memory via
261 		 * prctl(PR_SET_VMA ..., use the provided name.
262 		 */
263 		if (anon_name) {
264 			*name_fmt = "[anon_shmem:%s]";
265 			*name = anon_name->name;
266 		} else {
267 			*path = file_user_path(vma->vm_file);
268 		}
269 		return;
270 	}
271 
272 	if (vma->vm_ops && vma->vm_ops->name) {
273 		*name = vma->vm_ops->name(vma);
274 		if (*name)
275 			return;
276 	}
277 
278 	*name = arch_vma_name(vma);
279 	if (*name)
280 		return;
281 
282 	if (!vma->vm_mm) {
283 		*name = "[vdso]";
284 		return;
285 	}
286 
287 	if (vma_is_initial_heap(vma)) {
288 		*name = "[heap]";
289 		return;
290 	}
291 
292 	if (vma_is_initial_stack(vma)) {
293 		*name = "[stack]";
294 		return;
295 	}
296 
297 	if (anon_name) {
298 		*name_fmt = "[anon:%s]";
299 		*name = anon_name->name;
300 		return;
301 	}
302 }
303 
304 static void show_vma_header_prefix(struct seq_file *m,
305 				   unsigned long start, unsigned long end,
306 				   vm_flags_t flags, unsigned long long pgoff,
307 				   dev_t dev, unsigned long ino)
308 {
309 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310 	seq_put_hex_ll(m, NULL, start, 8);
311 	seq_put_hex_ll(m, "-", end, 8);
312 	seq_putc(m, ' ');
313 	seq_putc(m, flags & VM_READ ? 'r' : '-');
314 	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
315 	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
316 	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
317 	seq_put_hex_ll(m, " ", pgoff, 8);
318 	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
319 	seq_put_hex_ll(m, ":", MINOR(dev), 2);
320 	seq_put_decimal_ull(m, " ", ino);
321 	seq_putc(m, ' ');
322 }
323 
324 static void
325 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
326 {
327 	const struct path *path;
328 	const char *name_fmt, *name;
329 	vm_flags_t flags = vma->vm_flags;
330 	unsigned long ino = 0;
331 	unsigned long long pgoff = 0;
332 	unsigned long start, end;
333 	dev_t dev = 0;
334 
335 	if (vma->vm_file) {
336 		const struct inode *inode = file_user_inode(vma->vm_file);
337 
338 		dev = inode->i_sb->s_dev;
339 		ino = inode->i_ino;
340 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
341 	}
342 
343 	start = vma->vm_start;
344 	end = vma->vm_end;
345 	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
346 
347 	get_vma_name(vma, &path, &name, &name_fmt);
348 	if (path) {
349 		seq_pad(m, ' ');
350 		seq_path(m, path, "\n");
351 	} else if (name_fmt) {
352 		seq_pad(m, ' ');
353 		seq_printf(m, name_fmt, name);
354 	} else if (name) {
355 		seq_pad(m, ' ');
356 		seq_puts(m, name);
357 	}
358 	seq_putc(m, '\n');
359 }
360 
361 static int show_map(struct seq_file *m, void *v)
362 {
363 	show_map_vma(m, v);
364 	return 0;
365 }
366 
367 static const struct seq_operations proc_pid_maps_op = {
368 	.start	= m_start,
369 	.next	= m_next,
370 	.stop	= m_stop,
371 	.show	= show_map
372 };
373 
374 static int pid_maps_open(struct inode *inode, struct file *file)
375 {
376 	return do_maps_open(inode, file, &proc_pid_maps_op);
377 }
378 
379 #define PROCMAP_QUERY_VMA_FLAGS (				\
380 		PROCMAP_QUERY_VMA_READABLE |			\
381 		PROCMAP_QUERY_VMA_WRITABLE |			\
382 		PROCMAP_QUERY_VMA_EXECUTABLE |			\
383 		PROCMAP_QUERY_VMA_SHARED			\
384 )
385 
386 #define PROCMAP_QUERY_VALID_FLAGS_MASK (			\
387 		PROCMAP_QUERY_COVERING_OR_NEXT_VMA |		\
388 		PROCMAP_QUERY_FILE_BACKED_VMA |			\
389 		PROCMAP_QUERY_VMA_FLAGS				\
390 )
391 
392 static int query_vma_setup(struct mm_struct *mm)
393 {
394 	return mmap_read_lock_killable(mm);
395 }
396 
397 static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
398 {
399 	mmap_read_unlock(mm);
400 }
401 
402 static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
403 {
404 	return find_vma(mm, addr);
405 }
406 
407 static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
408 						 unsigned long addr, u32 flags)
409 {
410 	struct vm_area_struct *vma;
411 
412 next_vma:
413 	vma = query_vma_find_by_addr(mm, addr);
414 	if (!vma)
415 		goto no_vma;
416 
417 	/* user requested only file-backed VMA, keep iterating */
418 	if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
419 		goto skip_vma;
420 
421 	/* VMA permissions should satisfy query flags */
422 	if (flags & PROCMAP_QUERY_VMA_FLAGS) {
423 		u32 perm = 0;
424 
425 		if (flags & PROCMAP_QUERY_VMA_READABLE)
426 			perm |= VM_READ;
427 		if (flags & PROCMAP_QUERY_VMA_WRITABLE)
428 			perm |= VM_WRITE;
429 		if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
430 			perm |= VM_EXEC;
431 		if (flags & PROCMAP_QUERY_VMA_SHARED)
432 			perm |= VM_MAYSHARE;
433 
434 		if ((vma->vm_flags & perm) != perm)
435 			goto skip_vma;
436 	}
437 
438 	/* found covering VMA or user is OK with the matching next VMA */
439 	if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
440 		return vma;
441 
442 skip_vma:
443 	/*
444 	 * If the user needs closest matching VMA, keep iterating.
445 	 */
446 	addr = vma->vm_end;
447 	if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
448 		goto next_vma;
449 
450 no_vma:
451 	return ERR_PTR(-ENOENT);
452 }
453 
454 static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
455 {
456 	struct procmap_query karg;
457 	struct vm_area_struct *vma;
458 	struct mm_struct *mm;
459 	const char *name = NULL;
460 	char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
461 	__u64 usize;
462 	int err;
463 
464 	if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
465 		return -EFAULT;
466 	/* argument struct can never be that large, reject abuse */
467 	if (usize > PAGE_SIZE)
468 		return -E2BIG;
469 	/* argument struct should have at least query_flags and query_addr fields */
470 	if (usize < offsetofend(struct procmap_query, query_addr))
471 		return -EINVAL;
472 	err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
473 	if (err)
474 		return err;
475 
476 	/* reject unknown flags */
477 	if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
478 		return -EINVAL;
479 	/* either both buffer address and size are set, or both should be zero */
480 	if (!!karg.vma_name_size != !!karg.vma_name_addr)
481 		return -EINVAL;
482 	if (!!karg.build_id_size != !!karg.build_id_addr)
483 		return -EINVAL;
484 
485 	mm = priv->mm;
486 	if (!mm || !mmget_not_zero(mm))
487 		return -ESRCH;
488 
489 	err = query_vma_setup(mm);
490 	if (err) {
491 		mmput(mm);
492 		return err;
493 	}
494 
495 	vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
496 	if (IS_ERR(vma)) {
497 		err = PTR_ERR(vma);
498 		vma = NULL;
499 		goto out;
500 	}
501 
502 	karg.vma_start = vma->vm_start;
503 	karg.vma_end = vma->vm_end;
504 
505 	karg.vma_flags = 0;
506 	if (vma->vm_flags & VM_READ)
507 		karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
508 	if (vma->vm_flags & VM_WRITE)
509 		karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
510 	if (vma->vm_flags & VM_EXEC)
511 		karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
512 	if (vma->vm_flags & VM_MAYSHARE)
513 		karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
514 
515 	karg.vma_page_size = vma_kernel_pagesize(vma);
516 
517 	if (vma->vm_file) {
518 		const struct inode *inode = file_user_inode(vma->vm_file);
519 
520 		karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
521 		karg.dev_major = MAJOR(inode->i_sb->s_dev);
522 		karg.dev_minor = MINOR(inode->i_sb->s_dev);
523 		karg.inode = inode->i_ino;
524 	} else {
525 		karg.vma_offset = 0;
526 		karg.dev_major = 0;
527 		karg.dev_minor = 0;
528 		karg.inode = 0;
529 	}
530 
531 	if (karg.build_id_size) {
532 		__u32 build_id_sz;
533 
534 		err = build_id_parse(vma, build_id_buf, &build_id_sz);
535 		if (err) {
536 			karg.build_id_size = 0;
537 		} else {
538 			if (karg.build_id_size < build_id_sz) {
539 				err = -ENAMETOOLONG;
540 				goto out;
541 			}
542 			karg.build_id_size = build_id_sz;
543 		}
544 	}
545 
546 	if (karg.vma_name_size) {
547 		size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
548 		const struct path *path;
549 		const char *name_fmt;
550 		size_t name_sz = 0;
551 
552 		get_vma_name(vma, &path, &name, &name_fmt);
553 
554 		if (path || name_fmt || name) {
555 			name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
556 			if (!name_buf) {
557 				err = -ENOMEM;
558 				goto out;
559 			}
560 		}
561 		if (path) {
562 			name = d_path(path, name_buf, name_buf_sz);
563 			if (IS_ERR(name)) {
564 				err = PTR_ERR(name);
565 				goto out;
566 			}
567 			name_sz = name_buf + name_buf_sz - name;
568 		} else if (name || name_fmt) {
569 			name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
570 			name = name_buf;
571 		}
572 		if (name_sz > name_buf_sz) {
573 			err = -ENAMETOOLONG;
574 			goto out;
575 		}
576 		karg.vma_name_size = name_sz;
577 	}
578 
579 	/* unlock vma or mmap_lock, and put mm_struct before copying data to user */
580 	query_vma_teardown(mm, vma);
581 	mmput(mm);
582 
583 	if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
584 					       name, karg.vma_name_size)) {
585 		kfree(name_buf);
586 		return -EFAULT;
587 	}
588 	kfree(name_buf);
589 
590 	if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
591 					       build_id_buf, karg.build_id_size))
592 		return -EFAULT;
593 
594 	if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
595 		return -EFAULT;
596 
597 	return 0;
598 
599 out:
600 	query_vma_teardown(mm, vma);
601 	mmput(mm);
602 	kfree(name_buf);
603 	return err;
604 }
605 
606 static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
607 {
608 	struct seq_file *seq = file->private_data;
609 	struct proc_maps_private *priv = seq->private;
610 
611 	switch (cmd) {
612 	case PROCMAP_QUERY:
613 		return do_procmap_query(priv, (void __user *)arg);
614 	default:
615 		return -ENOIOCTLCMD;
616 	}
617 }
618 
619 const struct file_operations proc_pid_maps_operations = {
620 	.open		= pid_maps_open,
621 	.read		= seq_read,
622 	.llseek		= seq_lseek,
623 	.release	= proc_map_release,
624 	.unlocked_ioctl = procfs_procmap_ioctl,
625 	.compat_ioctl	= compat_ptr_ioctl,
626 };
627 
628 /*
629  * Proportional Set Size(PSS): my share of RSS.
630  *
631  * PSS of a process is the count of pages it has in memory, where each
632  * page is divided by the number of processes sharing it.  So if a
633  * process has 1000 pages all to itself, and 1000 shared with one other
634  * process, its PSS will be 1500.
635  *
636  * To keep (accumulated) division errors low, we adopt a 64bit
637  * fixed-point pss counter to minimize division errors. So (pss >>
638  * PSS_SHIFT) would be the real byte count.
639  *
640  * A shift of 12 before division means (assuming 4K page size):
641  * 	- 1M 3-user-pages add up to 8KB errors;
642  * 	- supports mapcount up to 2^24, or 16M;
643  * 	- supports PSS up to 2^52 bytes, or 4PB.
644  */
645 #define PSS_SHIFT 12
646 
647 #ifdef CONFIG_PROC_PAGE_MONITOR
648 struct mem_size_stats {
649 	unsigned long resident;
650 	unsigned long shared_clean;
651 	unsigned long shared_dirty;
652 	unsigned long private_clean;
653 	unsigned long private_dirty;
654 	unsigned long referenced;
655 	unsigned long anonymous;
656 	unsigned long lazyfree;
657 	unsigned long anonymous_thp;
658 	unsigned long shmem_thp;
659 	unsigned long file_thp;
660 	unsigned long swap;
661 	unsigned long shared_hugetlb;
662 	unsigned long private_hugetlb;
663 	unsigned long ksm;
664 	u64 pss;
665 	u64 pss_anon;
666 	u64 pss_file;
667 	u64 pss_shmem;
668 	u64 pss_dirty;
669 	u64 pss_locked;
670 	u64 swap_pss;
671 };
672 
673 static void smaps_page_accumulate(struct mem_size_stats *mss,
674 		struct folio *folio, unsigned long size, unsigned long pss,
675 		bool dirty, bool locked, bool private)
676 {
677 	mss->pss += pss;
678 
679 	if (folio_test_anon(folio))
680 		mss->pss_anon += pss;
681 	else if (folio_test_swapbacked(folio))
682 		mss->pss_shmem += pss;
683 	else
684 		mss->pss_file += pss;
685 
686 	if (locked)
687 		mss->pss_locked += pss;
688 
689 	if (dirty || folio_test_dirty(folio)) {
690 		mss->pss_dirty += pss;
691 		if (private)
692 			mss->private_dirty += size;
693 		else
694 			mss->shared_dirty += size;
695 	} else {
696 		if (private)
697 			mss->private_clean += size;
698 		else
699 			mss->shared_clean += size;
700 	}
701 }
702 
703 static void smaps_account(struct mem_size_stats *mss, struct page *page,
704 		bool compound, bool young, bool dirty, bool locked,
705 		bool present)
706 {
707 	struct folio *folio = page_folio(page);
708 	int i, nr = compound ? compound_nr(page) : 1;
709 	unsigned long size = nr * PAGE_SIZE;
710 	bool exclusive;
711 	int mapcount;
712 
713 	/*
714 	 * First accumulate quantities that depend only on |size| and the type
715 	 * of the compound page.
716 	 */
717 	if (folio_test_anon(folio)) {
718 		mss->anonymous += size;
719 		if (!folio_test_swapbacked(folio) && !dirty &&
720 		    !folio_test_dirty(folio))
721 			mss->lazyfree += size;
722 	}
723 
724 	if (folio_test_ksm(folio))
725 		mss->ksm += size;
726 
727 	mss->resident += size;
728 	/* Accumulate the size in pages that have been accessed. */
729 	if (young || folio_test_young(folio) || folio_test_referenced(folio))
730 		mss->referenced += size;
731 
732 	/*
733 	 * Then accumulate quantities that may depend on sharing, or that may
734 	 * differ page-by-page.
735 	 *
736 	 * refcount == 1 for present entries guarantees that the folio is mapped
737 	 * exactly once. For large folios this implies that exactly one
738 	 * PTE/PMD/... maps (a part of) this folio.
739 	 *
740 	 * Treat all non-present entries (where relying on the mapcount and
741 	 * refcount doesn't make sense) as "maybe shared, but not sure how
742 	 * often". We treat device private entries as being fake-present.
743 	 *
744 	 * Note that it would not be safe to read the mapcount especially for
745 	 * pages referenced by migration entries, even with the PTL held.
746 	 */
747 	if (folio_ref_count(folio) == 1 || !present) {
748 		smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
749 				      dirty, locked, present);
750 		return;
751 	}
752 
753 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
754 		mapcount = folio_average_page_mapcount(folio);
755 		exclusive = !folio_maybe_mapped_shared(folio);
756 	}
757 
758 	/*
759 	 * We obtain a snapshot of the mapcount. Without holding the folio lock
760 	 * this snapshot can be slightly wrong as we cannot always read the
761 	 * mapcount atomically.
762 	 */
763 	for (i = 0; i < nr; i++, page++) {
764 		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
765 
766 		if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
767 			mapcount = folio_precise_page_mapcount(folio, page);
768 			exclusive = mapcount < 2;
769 		}
770 
771 		if (mapcount >= 2)
772 			pss /= mapcount;
773 		smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
774 				dirty, locked, exclusive);
775 	}
776 }
777 
778 #ifdef CONFIG_SHMEM
779 static int smaps_pte_hole(unsigned long addr, unsigned long end,
780 			  __always_unused int depth, struct mm_walk *walk)
781 {
782 	struct mem_size_stats *mss = walk->private;
783 	struct vm_area_struct *vma = walk->vma;
784 
785 	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
786 					      linear_page_index(vma, addr),
787 					      linear_page_index(vma, end));
788 
789 	return 0;
790 }
791 #else
792 #define smaps_pte_hole		NULL
793 #endif /* CONFIG_SHMEM */
794 
795 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
796 {
797 #ifdef CONFIG_SHMEM
798 	if (walk->ops->pte_hole) {
799 		/* depth is not used */
800 		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
801 	}
802 #endif
803 }
804 
805 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
806 		struct mm_walk *walk)
807 {
808 	struct mem_size_stats *mss = walk->private;
809 	struct vm_area_struct *vma = walk->vma;
810 	bool locked = !!(vma->vm_flags & VM_LOCKED);
811 	struct page *page = NULL;
812 	bool present = false, young = false, dirty = false;
813 	pte_t ptent = ptep_get(pte);
814 
815 	if (pte_present(ptent)) {
816 		page = vm_normal_page(vma, addr, ptent);
817 		young = pte_young(ptent);
818 		dirty = pte_dirty(ptent);
819 		present = true;
820 	} else if (is_swap_pte(ptent)) {
821 		swp_entry_t swpent = pte_to_swp_entry(ptent);
822 
823 		if (!non_swap_entry(swpent)) {
824 			int mapcount;
825 
826 			mss->swap += PAGE_SIZE;
827 			mapcount = swp_swapcount(swpent);
828 			if (mapcount >= 2) {
829 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
830 
831 				do_div(pss_delta, mapcount);
832 				mss->swap_pss += pss_delta;
833 			} else {
834 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
835 			}
836 		} else if (is_pfn_swap_entry(swpent)) {
837 			if (is_device_private_entry(swpent))
838 				present = true;
839 			page = pfn_swap_entry_to_page(swpent);
840 		}
841 	} else {
842 		smaps_pte_hole_lookup(addr, walk);
843 		return;
844 	}
845 
846 	if (!page)
847 		return;
848 
849 	smaps_account(mss, page, false, young, dirty, locked, present);
850 }
851 
852 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
853 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
854 		struct mm_walk *walk)
855 {
856 	struct mem_size_stats *mss = walk->private;
857 	struct vm_area_struct *vma = walk->vma;
858 	bool locked = !!(vma->vm_flags & VM_LOCKED);
859 	struct page *page = NULL;
860 	bool present = false;
861 	struct folio *folio;
862 
863 	if (pmd_present(*pmd)) {
864 		page = vm_normal_page_pmd(vma, addr, *pmd);
865 		present = true;
866 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
867 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
868 
869 		if (is_pfn_swap_entry(entry))
870 			page = pfn_swap_entry_to_page(entry);
871 	}
872 	if (IS_ERR_OR_NULL(page))
873 		return;
874 	folio = page_folio(page);
875 	if (folio_test_anon(folio))
876 		mss->anonymous_thp += HPAGE_PMD_SIZE;
877 	else if (folio_test_swapbacked(folio))
878 		mss->shmem_thp += HPAGE_PMD_SIZE;
879 	else if (folio_is_zone_device(folio))
880 		/* pass */;
881 	else
882 		mss->file_thp += HPAGE_PMD_SIZE;
883 
884 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
885 		      locked, present);
886 }
887 #else
888 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
889 		struct mm_walk *walk)
890 {
891 }
892 #endif
893 
894 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
895 			   struct mm_walk *walk)
896 {
897 	struct vm_area_struct *vma = walk->vma;
898 	pte_t *pte;
899 	spinlock_t *ptl;
900 
901 	ptl = pmd_trans_huge_lock(pmd, vma);
902 	if (ptl) {
903 		smaps_pmd_entry(pmd, addr, walk);
904 		spin_unlock(ptl);
905 		goto out;
906 	}
907 
908 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
909 	if (!pte) {
910 		walk->action = ACTION_AGAIN;
911 		return 0;
912 	}
913 	for (; addr != end; pte++, addr += PAGE_SIZE)
914 		smaps_pte_entry(pte, addr, walk);
915 	pte_unmap_unlock(pte - 1, ptl);
916 out:
917 	cond_resched();
918 	return 0;
919 }
920 
921 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
922 {
923 	/*
924 	 * Don't forget to update Documentation/ on changes.
925 	 *
926 	 * The length of the second argument of mnemonics[]
927 	 * needs to be 3 instead of previously set 2
928 	 * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
929 	 * to avoid spurious
930 	 * -Werror=unterminated-string-initialization warning
931 	 *  with GCC 15
932 	 */
933 	static const char mnemonics[BITS_PER_LONG][3] = {
934 		/*
935 		 * In case if we meet a flag we don't know about.
936 		 */
937 		[0 ... (BITS_PER_LONG-1)] = "??",
938 
939 		[ilog2(VM_READ)]	= "rd",
940 		[ilog2(VM_WRITE)]	= "wr",
941 		[ilog2(VM_EXEC)]	= "ex",
942 		[ilog2(VM_SHARED)]	= "sh",
943 		[ilog2(VM_MAYREAD)]	= "mr",
944 		[ilog2(VM_MAYWRITE)]	= "mw",
945 		[ilog2(VM_MAYEXEC)]	= "me",
946 		[ilog2(VM_MAYSHARE)]	= "ms",
947 		[ilog2(VM_GROWSDOWN)]	= "gd",
948 		[ilog2(VM_PFNMAP)]	= "pf",
949 		[ilog2(VM_LOCKED)]	= "lo",
950 		[ilog2(VM_IO)]		= "io",
951 		[ilog2(VM_SEQ_READ)]	= "sr",
952 		[ilog2(VM_RAND_READ)]	= "rr",
953 		[ilog2(VM_DONTCOPY)]	= "dc",
954 		[ilog2(VM_DONTEXPAND)]	= "de",
955 		[ilog2(VM_LOCKONFAULT)]	= "lf",
956 		[ilog2(VM_ACCOUNT)]	= "ac",
957 		[ilog2(VM_NORESERVE)]	= "nr",
958 		[ilog2(VM_HUGETLB)]	= "ht",
959 		[ilog2(VM_SYNC)]	= "sf",
960 		[ilog2(VM_ARCH_1)]	= "ar",
961 		[ilog2(VM_WIPEONFORK)]	= "wf",
962 		[ilog2(VM_DONTDUMP)]	= "dd",
963 #ifdef CONFIG_ARM64_BTI
964 		[ilog2(VM_ARM64_BTI)]	= "bt",
965 #endif
966 #ifdef CONFIG_MEM_SOFT_DIRTY
967 		[ilog2(VM_SOFTDIRTY)]	= "sd",
968 #endif
969 		[ilog2(VM_MIXEDMAP)]	= "mm",
970 		[ilog2(VM_HUGEPAGE)]	= "hg",
971 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
972 		[ilog2(VM_MERGEABLE)]	= "mg",
973 		[ilog2(VM_UFFD_MISSING)]= "um",
974 		[ilog2(VM_UFFD_WP)]	= "uw",
975 #ifdef CONFIG_ARM64_MTE
976 		[ilog2(VM_MTE)]		= "mt",
977 		[ilog2(VM_MTE_ALLOWED)]	= "",
978 #endif
979 #ifdef CONFIG_ARCH_HAS_PKEYS
980 		/* These come out via ProtectionKey: */
981 		[ilog2(VM_PKEY_BIT0)]	= "",
982 		[ilog2(VM_PKEY_BIT1)]	= "",
983 		[ilog2(VM_PKEY_BIT2)]	= "",
984 #if VM_PKEY_BIT3
985 		[ilog2(VM_PKEY_BIT3)]	= "",
986 #endif
987 #if VM_PKEY_BIT4
988 		[ilog2(VM_PKEY_BIT4)]	= "",
989 #endif
990 #endif /* CONFIG_ARCH_HAS_PKEYS */
991 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
992 		[ilog2(VM_UFFD_MINOR)]	= "ui",
993 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
994 #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
995 		[ilog2(VM_SHADOW_STACK)] = "ss",
996 #endif
997 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
998 		[ilog2(VM_DROPPABLE)] = "dp",
999 #endif
1000 #ifdef CONFIG_64BIT
1001 		[ilog2(VM_SEALED)] = "sl",
1002 #endif
1003 	};
1004 	size_t i;
1005 
1006 	seq_puts(m, "VmFlags: ");
1007 	for (i = 0; i < BITS_PER_LONG; i++) {
1008 		if (!mnemonics[i][0])
1009 			continue;
1010 		if (vma->vm_flags & (1UL << i))
1011 			seq_printf(m, "%s ", mnemonics[i]);
1012 	}
1013 	seq_putc(m, '\n');
1014 }
1015 
1016 #ifdef CONFIG_HUGETLB_PAGE
1017 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1018 				 unsigned long addr, unsigned long end,
1019 				 struct mm_walk *walk)
1020 {
1021 	struct mem_size_stats *mss = walk->private;
1022 	struct vm_area_struct *vma = walk->vma;
1023 	pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
1024 	struct folio *folio = NULL;
1025 	bool present = false;
1026 
1027 	if (pte_present(ptent)) {
1028 		folio = page_folio(pte_page(ptent));
1029 		present = true;
1030 	} else if (is_swap_pte(ptent)) {
1031 		swp_entry_t swpent = pte_to_swp_entry(ptent);
1032 
1033 		if (is_pfn_swap_entry(swpent))
1034 			folio = pfn_swap_entry_folio(swpent);
1035 	}
1036 
1037 	if (folio) {
1038 		/* We treat non-present entries as "maybe shared". */
1039 		if (!present || folio_maybe_mapped_shared(folio) ||
1040 		    hugetlb_pmd_shared(pte))
1041 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1042 		else
1043 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1044 	}
1045 	return 0;
1046 }
1047 #else
1048 #define smaps_hugetlb_range	NULL
1049 #endif /* HUGETLB_PAGE */
1050 
1051 static const struct mm_walk_ops smaps_walk_ops = {
1052 	.pmd_entry		= smaps_pte_range,
1053 	.hugetlb_entry		= smaps_hugetlb_range,
1054 	.walk_lock		= PGWALK_RDLOCK,
1055 };
1056 
1057 static const struct mm_walk_ops smaps_shmem_walk_ops = {
1058 	.pmd_entry		= smaps_pte_range,
1059 	.hugetlb_entry		= smaps_hugetlb_range,
1060 	.pte_hole		= smaps_pte_hole,
1061 	.walk_lock		= PGWALK_RDLOCK,
1062 };
1063 
1064 /*
1065  * Gather mem stats from @vma with the indicated beginning
1066  * address @start, and keep them in @mss.
1067  *
1068  * Use vm_start of @vma as the beginning address if @start is 0.
1069  */
1070 static void smap_gather_stats(struct vm_area_struct *vma,
1071 		struct mem_size_stats *mss, unsigned long start)
1072 {
1073 	const struct mm_walk_ops *ops = &smaps_walk_ops;
1074 
1075 	/* Invalid start */
1076 	if (start >= vma->vm_end)
1077 		return;
1078 
1079 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
1080 		/*
1081 		 * For shared or readonly shmem mappings we know that all
1082 		 * swapped out pages belong to the shmem object, and we can
1083 		 * obtain the swap value much more efficiently. For private
1084 		 * writable mappings, we might have COW pages that are
1085 		 * not affected by the parent swapped out pages of the shmem
1086 		 * object, so we have to distinguish them during the page walk.
1087 		 * Unless we know that the shmem object (or the part mapped by
1088 		 * our VMA) has no swapped out pages at all.
1089 		 */
1090 		unsigned long shmem_swapped = shmem_swap_usage(vma);
1091 
1092 		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
1093 					!(vma->vm_flags & VM_WRITE))) {
1094 			mss->swap += shmem_swapped;
1095 		} else {
1096 			ops = &smaps_shmem_walk_ops;
1097 		}
1098 	}
1099 
1100 	/* mmap_lock is held in m_start */
1101 	if (!start)
1102 		walk_page_vma(vma, ops, mss);
1103 	else
1104 		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
1105 }
1106 
1107 #define SEQ_PUT_DEC(str, val) \
1108 		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
1109 
1110 /* Show the contents common for smaps and smaps_rollup */
1111 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
1112 	bool rollup_mode)
1113 {
1114 	SEQ_PUT_DEC("Rss:            ", mss->resident);
1115 	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
1116 	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
1117 	if (rollup_mode) {
1118 		/*
1119 		 * These are meaningful only for smaps_rollup, otherwise two of
1120 		 * them are zero, and the other one is the same as Pss.
1121 		 */
1122 		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
1123 			mss->pss_anon >> PSS_SHIFT);
1124 		SEQ_PUT_DEC(" kB\nPss_File:       ",
1125 			mss->pss_file >> PSS_SHIFT);
1126 		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
1127 			mss->pss_shmem >> PSS_SHIFT);
1128 	}
1129 	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
1130 	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
1131 	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
1132 	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
1133 	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
1134 	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
1135 	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
1136 	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
1137 	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
1138 	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
1139 	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
1140 	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
1141 	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
1142 				  mss->private_hugetlb >> 10, 7);
1143 	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
1144 	SEQ_PUT_DEC(" kB\nSwapPss:        ",
1145 					mss->swap_pss >> PSS_SHIFT);
1146 	SEQ_PUT_DEC(" kB\nLocked:         ",
1147 					mss->pss_locked >> PSS_SHIFT);
1148 	seq_puts(m, " kB\n");
1149 }
1150 
1151 static int show_smap(struct seq_file *m, void *v)
1152 {
1153 	struct vm_area_struct *vma = v;
1154 	struct mem_size_stats mss = {};
1155 
1156 	smap_gather_stats(vma, &mss, 0);
1157 
1158 	show_map_vma(m, vma);
1159 
1160 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
1161 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
1162 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
1163 	seq_puts(m, " kB\n");
1164 
1165 	__show_smap(m, &mss, false);
1166 
1167 	seq_printf(m, "THPeligible:    %8u\n",
1168 		   !!thp_vma_allowable_orders(vma, vma->vm_flags,
1169 			   TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
1170 
1171 	if (arch_pkeys_enabled())
1172 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
1173 	show_smap_vma_flags(m, vma);
1174 
1175 	return 0;
1176 }
1177 
1178 static int show_smaps_rollup(struct seq_file *m, void *v)
1179 {
1180 	struct proc_maps_private *priv = m->private;
1181 	struct mem_size_stats mss = {};
1182 	struct mm_struct *mm = priv->mm;
1183 	struct vm_area_struct *vma;
1184 	unsigned long vma_start = 0, last_vma_end = 0;
1185 	int ret = 0;
1186 	VMA_ITERATOR(vmi, mm, 0);
1187 
1188 	priv->task = get_proc_task(priv->inode);
1189 	if (!priv->task)
1190 		return -ESRCH;
1191 
1192 	if (!mm || !mmget_not_zero(mm)) {
1193 		ret = -ESRCH;
1194 		goto out_put_task;
1195 	}
1196 
1197 	ret = mmap_read_lock_killable(mm);
1198 	if (ret)
1199 		goto out_put_mm;
1200 
1201 	hold_task_mempolicy(priv);
1202 	vma = vma_next(&vmi);
1203 
1204 	if (unlikely(!vma))
1205 		goto empty_set;
1206 
1207 	vma_start = vma->vm_start;
1208 	do {
1209 		smap_gather_stats(vma, &mss, 0);
1210 		last_vma_end = vma->vm_end;
1211 
1212 		/*
1213 		 * Release mmap_lock temporarily if someone wants to
1214 		 * access it for write request.
1215 		 */
1216 		if (mmap_lock_is_contended(mm)) {
1217 			vma_iter_invalidate(&vmi);
1218 			mmap_read_unlock(mm);
1219 			ret = mmap_read_lock_killable(mm);
1220 			if (ret) {
1221 				release_task_mempolicy(priv);
1222 				goto out_put_mm;
1223 			}
1224 
1225 			/*
1226 			 * After dropping the lock, there are four cases to
1227 			 * consider. See the following example for explanation.
1228 			 *
1229 			 *   +------+------+-----------+
1230 			 *   | VMA1 | VMA2 | VMA3      |
1231 			 *   +------+------+-----------+
1232 			 *   |      |      |           |
1233 			 *  4k     8k     16k         400k
1234 			 *
1235 			 * Suppose we drop the lock after reading VMA2 due to
1236 			 * contention, then we get:
1237 			 *
1238 			 *	last_vma_end = 16k
1239 			 *
1240 			 * 1) VMA2 is freed, but VMA3 exists:
1241 			 *
1242 			 *    vma_next(vmi) will return VMA3.
1243 			 *    In this case, just continue from VMA3.
1244 			 *
1245 			 * 2) VMA2 still exists:
1246 			 *
1247 			 *    vma_next(vmi) will return VMA3.
1248 			 *    In this case, just continue from VMA3.
1249 			 *
1250 			 * 3) No more VMAs can be found:
1251 			 *
1252 			 *    vma_next(vmi) will return NULL.
1253 			 *    No more things to do, just break.
1254 			 *
1255 			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
1256 			 *
1257 			 *    vma_next(vmi) will return VMA' whose range
1258 			 *    contains last_vma_end.
1259 			 *    Iterate VMA' from last_vma_end.
1260 			 */
1261 			vma = vma_next(&vmi);
1262 			/* Case 3 above */
1263 			if (!vma)
1264 				break;
1265 
1266 			/* Case 1 and 2 above */
1267 			if (vma->vm_start >= last_vma_end) {
1268 				smap_gather_stats(vma, &mss, 0);
1269 				last_vma_end = vma->vm_end;
1270 				continue;
1271 			}
1272 
1273 			/* Case 4 above */
1274 			if (vma->vm_end > last_vma_end) {
1275 				smap_gather_stats(vma, &mss, last_vma_end);
1276 				last_vma_end = vma->vm_end;
1277 			}
1278 		}
1279 	} for_each_vma(vmi, vma);
1280 
1281 empty_set:
1282 	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
1283 	seq_pad(m, ' ');
1284 	seq_puts(m, "[rollup]\n");
1285 
1286 	__show_smap(m, &mss, true);
1287 
1288 	release_task_mempolicy(priv);
1289 	mmap_read_unlock(mm);
1290 
1291 out_put_mm:
1292 	mmput(mm);
1293 out_put_task:
1294 	put_task_struct(priv->task);
1295 	priv->task = NULL;
1296 
1297 	return ret;
1298 }
1299 #undef SEQ_PUT_DEC
1300 
1301 static const struct seq_operations proc_pid_smaps_op = {
1302 	.start	= m_start,
1303 	.next	= m_next,
1304 	.stop	= m_stop,
1305 	.show	= show_smap
1306 };
1307 
1308 static int pid_smaps_open(struct inode *inode, struct file *file)
1309 {
1310 	return do_maps_open(inode, file, &proc_pid_smaps_op);
1311 }
1312 
1313 static int smaps_rollup_open(struct inode *inode, struct file *file)
1314 {
1315 	int ret;
1316 	struct proc_maps_private *priv;
1317 
1318 	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1319 	if (!priv)
1320 		return -ENOMEM;
1321 
1322 	ret = single_open(file, show_smaps_rollup, priv);
1323 	if (ret)
1324 		goto out_free;
1325 
1326 	priv->inode = inode;
1327 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1328 	if (IS_ERR_OR_NULL(priv->mm)) {
1329 		ret = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
1330 
1331 		single_release(inode, file);
1332 		goto out_free;
1333 	}
1334 
1335 	return 0;
1336 
1337 out_free:
1338 	kfree(priv);
1339 	return ret;
1340 }
1341 
1342 static int smaps_rollup_release(struct inode *inode, struct file *file)
1343 {
1344 	struct seq_file *seq = file->private_data;
1345 	struct proc_maps_private *priv = seq->private;
1346 
1347 	if (priv->mm)
1348 		mmdrop(priv->mm);
1349 
1350 	kfree(priv);
1351 	return single_release(inode, file);
1352 }
1353 
1354 const struct file_operations proc_pid_smaps_operations = {
1355 	.open		= pid_smaps_open,
1356 	.read		= seq_read,
1357 	.llseek		= seq_lseek,
1358 	.release	= proc_map_release,
1359 };
1360 
1361 const struct file_operations proc_pid_smaps_rollup_operations = {
1362 	.open		= smaps_rollup_open,
1363 	.read		= seq_read,
1364 	.llseek		= seq_lseek,
1365 	.release	= smaps_rollup_release,
1366 };
1367 
1368 enum clear_refs_types {
1369 	CLEAR_REFS_ALL = 1,
1370 	CLEAR_REFS_ANON,
1371 	CLEAR_REFS_MAPPED,
1372 	CLEAR_REFS_SOFT_DIRTY,
1373 	CLEAR_REFS_MM_HIWATER_RSS,
1374 	CLEAR_REFS_LAST,
1375 };
1376 
1377 struct clear_refs_private {
1378 	enum clear_refs_types type;
1379 };
1380 
1381 #ifdef CONFIG_MEM_SOFT_DIRTY
1382 
1383 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1384 {
1385 	struct folio *folio;
1386 
1387 	if (!pte_write(pte))
1388 		return false;
1389 	if (!is_cow_mapping(vma->vm_flags))
1390 		return false;
1391 	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1392 		return false;
1393 	folio = vm_normal_folio(vma, addr, pte);
1394 	if (!folio)
1395 		return false;
1396 	return folio_maybe_dma_pinned(folio);
1397 }
1398 
1399 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1400 		unsigned long addr, pte_t *pte)
1401 {
1402 	/*
1403 	 * The soft-dirty tracker uses #PF-s to catch writes
1404 	 * to pages, so write-protect the pte as well. See the
1405 	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1406 	 * of how soft-dirty works.
1407 	 */
1408 	pte_t ptent = ptep_get(pte);
1409 
1410 	if (pte_present(ptent)) {
1411 		pte_t old_pte;
1412 
1413 		if (pte_is_pinned(vma, addr, ptent))
1414 			return;
1415 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1416 		ptent = pte_wrprotect(old_pte);
1417 		ptent = pte_clear_soft_dirty(ptent);
1418 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1419 	} else if (is_swap_pte(ptent)) {
1420 		ptent = pte_swp_clear_soft_dirty(ptent);
1421 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1422 	}
1423 }
1424 #else
1425 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1426 		unsigned long addr, pte_t *pte)
1427 {
1428 }
1429 #endif
1430 
1431 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1432 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1433 		unsigned long addr, pmd_t *pmdp)
1434 {
1435 	pmd_t old, pmd = *pmdp;
1436 
1437 	if (pmd_present(pmd)) {
1438 		/* See comment in change_huge_pmd() */
1439 		old = pmdp_invalidate(vma, addr, pmdp);
1440 		if (pmd_dirty(old))
1441 			pmd = pmd_mkdirty(pmd);
1442 		if (pmd_young(old))
1443 			pmd = pmd_mkyoung(pmd);
1444 
1445 		pmd = pmd_wrprotect(pmd);
1446 		pmd = pmd_clear_soft_dirty(pmd);
1447 
1448 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1449 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1450 		pmd = pmd_swp_clear_soft_dirty(pmd);
1451 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1452 	}
1453 }
1454 #else
1455 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1456 		unsigned long addr, pmd_t *pmdp)
1457 {
1458 }
1459 #endif
1460 
1461 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1462 				unsigned long end, struct mm_walk *walk)
1463 {
1464 	struct clear_refs_private *cp = walk->private;
1465 	struct vm_area_struct *vma = walk->vma;
1466 	pte_t *pte, ptent;
1467 	spinlock_t *ptl;
1468 	struct folio *folio;
1469 
1470 	ptl = pmd_trans_huge_lock(pmd, vma);
1471 	if (ptl) {
1472 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1473 			clear_soft_dirty_pmd(vma, addr, pmd);
1474 			goto out;
1475 		}
1476 
1477 		if (!pmd_present(*pmd))
1478 			goto out;
1479 
1480 		folio = pmd_folio(*pmd);
1481 
1482 		/* Clear accessed and referenced bits. */
1483 		pmdp_test_and_clear_young(vma, addr, pmd);
1484 		folio_test_clear_young(folio);
1485 		folio_clear_referenced(folio);
1486 out:
1487 		spin_unlock(ptl);
1488 		return 0;
1489 	}
1490 
1491 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1492 	if (!pte) {
1493 		walk->action = ACTION_AGAIN;
1494 		return 0;
1495 	}
1496 	for (; addr != end; pte++, addr += PAGE_SIZE) {
1497 		ptent = ptep_get(pte);
1498 
1499 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1500 			clear_soft_dirty(vma, addr, pte);
1501 			continue;
1502 		}
1503 
1504 		if (!pte_present(ptent))
1505 			continue;
1506 
1507 		folio = vm_normal_folio(vma, addr, ptent);
1508 		if (!folio)
1509 			continue;
1510 
1511 		/* Clear accessed and referenced bits. */
1512 		ptep_test_and_clear_young(vma, addr, pte);
1513 		folio_test_clear_young(folio);
1514 		folio_clear_referenced(folio);
1515 	}
1516 	pte_unmap_unlock(pte - 1, ptl);
1517 	cond_resched();
1518 	return 0;
1519 }
1520 
1521 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1522 				struct mm_walk *walk)
1523 {
1524 	struct clear_refs_private *cp = walk->private;
1525 	struct vm_area_struct *vma = walk->vma;
1526 
1527 	if (vma->vm_flags & VM_PFNMAP)
1528 		return 1;
1529 
1530 	/*
1531 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1532 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1533 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1534 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1535 	 */
1536 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1537 		return 1;
1538 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1539 		return 1;
1540 	return 0;
1541 }
1542 
1543 static const struct mm_walk_ops clear_refs_walk_ops = {
1544 	.pmd_entry		= clear_refs_pte_range,
1545 	.test_walk		= clear_refs_test_walk,
1546 	.walk_lock		= PGWALK_WRLOCK,
1547 };
1548 
1549 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1550 				size_t count, loff_t *ppos)
1551 {
1552 	struct task_struct *task;
1553 	char buffer[PROC_NUMBUF] = {};
1554 	struct mm_struct *mm;
1555 	struct vm_area_struct *vma;
1556 	enum clear_refs_types type;
1557 	int itype;
1558 	int rv;
1559 
1560 	if (count > sizeof(buffer) - 1)
1561 		count = sizeof(buffer) - 1;
1562 	if (copy_from_user(buffer, buf, count))
1563 		return -EFAULT;
1564 	rv = kstrtoint(strstrip(buffer), 10, &itype);
1565 	if (rv < 0)
1566 		return rv;
1567 	type = (enum clear_refs_types)itype;
1568 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1569 		return -EINVAL;
1570 
1571 	task = get_proc_task(file_inode(file));
1572 	if (!task)
1573 		return -ESRCH;
1574 	mm = get_task_mm(task);
1575 	if (mm) {
1576 		VMA_ITERATOR(vmi, mm, 0);
1577 		struct mmu_notifier_range range;
1578 		struct clear_refs_private cp = {
1579 			.type = type,
1580 		};
1581 
1582 		if (mmap_write_lock_killable(mm)) {
1583 			count = -EINTR;
1584 			goto out_mm;
1585 		}
1586 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1587 			/*
1588 			 * Writing 5 to /proc/pid/clear_refs resets the peak
1589 			 * resident set size to this mm's current rss value.
1590 			 */
1591 			reset_mm_hiwater_rss(mm);
1592 			goto out_unlock;
1593 		}
1594 
1595 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1596 			for_each_vma(vmi, vma) {
1597 				if (!(vma->vm_flags & VM_SOFTDIRTY))
1598 					continue;
1599 				vm_flags_clear(vma, VM_SOFTDIRTY);
1600 				vma_set_page_prot(vma);
1601 			}
1602 
1603 			inc_tlb_flush_pending(mm);
1604 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1605 						0, mm, 0, -1UL);
1606 			mmu_notifier_invalidate_range_start(&range);
1607 		}
1608 		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1609 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1610 			mmu_notifier_invalidate_range_end(&range);
1611 			flush_tlb_mm(mm);
1612 			dec_tlb_flush_pending(mm);
1613 		}
1614 out_unlock:
1615 		mmap_write_unlock(mm);
1616 out_mm:
1617 		mmput(mm);
1618 	}
1619 	put_task_struct(task);
1620 
1621 	return count;
1622 }
1623 
1624 const struct file_operations proc_clear_refs_operations = {
1625 	.write		= clear_refs_write,
1626 	.llseek		= noop_llseek,
1627 };
1628 
1629 typedef struct {
1630 	u64 pme;
1631 } pagemap_entry_t;
1632 
1633 struct pagemapread {
1634 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1635 	pagemap_entry_t *buffer;
1636 	bool show_pfn;
1637 };
1638 
1639 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1640 #define PAGEMAP_WALK_MASK	(PMD_MASK)
1641 
1642 #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1643 #define PM_PFRAME_BITS		55
1644 #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1645 #define PM_SOFT_DIRTY		BIT_ULL(55)
1646 #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1647 #define PM_UFFD_WP		BIT_ULL(57)
1648 #define PM_GUARD_REGION		BIT_ULL(58)
1649 #define PM_FILE			BIT_ULL(61)
1650 #define PM_SWAP			BIT_ULL(62)
1651 #define PM_PRESENT		BIT_ULL(63)
1652 
1653 #define PM_END_OF_BUFFER    1
1654 
1655 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1656 {
1657 	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1658 }
1659 
1660 static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
1661 {
1662 	pm->buffer[pm->pos++] = *pme;
1663 	if (pm->pos >= pm->len)
1664 		return PM_END_OF_BUFFER;
1665 	return 0;
1666 }
1667 
1668 static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page)
1669 {
1670 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1671 		return folio_precise_page_mapcount(folio, page) == 1;
1672 	return !folio_maybe_mapped_shared(folio);
1673 }
1674 
1675 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1676 			    __always_unused int depth, struct mm_walk *walk)
1677 {
1678 	struct pagemapread *pm = walk->private;
1679 	unsigned long addr = start;
1680 	int err = 0;
1681 
1682 	while (addr < end) {
1683 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1684 		pagemap_entry_t pme = make_pme(0, 0);
1685 		/* End of address space hole, which we mark as non-present. */
1686 		unsigned long hole_end;
1687 
1688 		if (vma)
1689 			hole_end = min(end, vma->vm_start);
1690 		else
1691 			hole_end = end;
1692 
1693 		for (; addr < hole_end; addr += PAGE_SIZE) {
1694 			err = add_to_pagemap(&pme, pm);
1695 			if (err)
1696 				goto out;
1697 		}
1698 
1699 		if (!vma)
1700 			break;
1701 
1702 		/* Addresses in the VMA. */
1703 		if (vma->vm_flags & VM_SOFTDIRTY)
1704 			pme = make_pme(0, PM_SOFT_DIRTY);
1705 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1706 			err = add_to_pagemap(&pme, pm);
1707 			if (err)
1708 				goto out;
1709 		}
1710 	}
1711 out:
1712 	return err;
1713 }
1714 
1715 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1716 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1717 {
1718 	u64 frame = 0, flags = 0;
1719 	struct page *page = NULL;
1720 	struct folio *folio;
1721 
1722 	if (pte_present(pte)) {
1723 		if (pm->show_pfn)
1724 			frame = pte_pfn(pte);
1725 		flags |= PM_PRESENT;
1726 		page = vm_normal_page(vma, addr, pte);
1727 		if (pte_soft_dirty(pte))
1728 			flags |= PM_SOFT_DIRTY;
1729 		if (pte_uffd_wp(pte))
1730 			flags |= PM_UFFD_WP;
1731 	} else if (is_swap_pte(pte)) {
1732 		swp_entry_t entry;
1733 		if (pte_swp_soft_dirty(pte))
1734 			flags |= PM_SOFT_DIRTY;
1735 		if (pte_swp_uffd_wp(pte))
1736 			flags |= PM_UFFD_WP;
1737 		entry = pte_to_swp_entry(pte);
1738 		if (pm->show_pfn) {
1739 			pgoff_t offset;
1740 			/*
1741 			 * For PFN swap offsets, keeping the offset field
1742 			 * to be PFN only to be compatible with old smaps.
1743 			 */
1744 			if (is_pfn_swap_entry(entry))
1745 				offset = swp_offset_pfn(entry);
1746 			else
1747 				offset = swp_offset(entry);
1748 			frame = swp_type(entry) |
1749 			    (offset << MAX_SWAPFILES_SHIFT);
1750 		}
1751 		flags |= PM_SWAP;
1752 		if (is_pfn_swap_entry(entry))
1753 			page = pfn_swap_entry_to_page(entry);
1754 		if (pte_marker_entry_uffd_wp(entry))
1755 			flags |= PM_UFFD_WP;
1756 		if (is_guard_swp_entry(entry))
1757 			flags |=  PM_GUARD_REGION;
1758 	}
1759 
1760 	if (page) {
1761 		folio = page_folio(page);
1762 		if (!folio_test_anon(folio))
1763 			flags |= PM_FILE;
1764 		if ((flags & PM_PRESENT) &&
1765 		    __folio_page_mapped_exclusively(folio, page))
1766 			flags |= PM_MMAP_EXCLUSIVE;
1767 	}
1768 	if (vma->vm_flags & VM_SOFTDIRTY)
1769 		flags |= PM_SOFT_DIRTY;
1770 
1771 	return make_pme(frame, flags);
1772 }
1773 
1774 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1775 			     struct mm_walk *walk)
1776 {
1777 	struct vm_area_struct *vma = walk->vma;
1778 	struct pagemapread *pm = walk->private;
1779 	spinlock_t *ptl;
1780 	pte_t *pte, *orig_pte;
1781 	int err = 0;
1782 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1783 
1784 	ptl = pmd_trans_huge_lock(pmdp, vma);
1785 	if (ptl) {
1786 		unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1787 		u64 flags = 0, frame = 0;
1788 		pmd_t pmd = *pmdp;
1789 		struct page *page = NULL;
1790 		struct folio *folio = NULL;
1791 
1792 		if (vma->vm_flags & VM_SOFTDIRTY)
1793 			flags |= PM_SOFT_DIRTY;
1794 
1795 		if (pmd_present(pmd)) {
1796 			page = pmd_page(pmd);
1797 
1798 			flags |= PM_PRESENT;
1799 			if (pmd_soft_dirty(pmd))
1800 				flags |= PM_SOFT_DIRTY;
1801 			if (pmd_uffd_wp(pmd))
1802 				flags |= PM_UFFD_WP;
1803 			if (pm->show_pfn)
1804 				frame = pmd_pfn(pmd) + idx;
1805 		}
1806 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1807 		else if (is_swap_pmd(pmd)) {
1808 			swp_entry_t entry = pmd_to_swp_entry(pmd);
1809 			unsigned long offset;
1810 
1811 			if (pm->show_pfn) {
1812 				if (is_pfn_swap_entry(entry))
1813 					offset = swp_offset_pfn(entry) + idx;
1814 				else
1815 					offset = swp_offset(entry) + idx;
1816 				frame = swp_type(entry) |
1817 					(offset << MAX_SWAPFILES_SHIFT);
1818 			}
1819 			flags |= PM_SWAP;
1820 			if (pmd_swp_soft_dirty(pmd))
1821 				flags |= PM_SOFT_DIRTY;
1822 			if (pmd_swp_uffd_wp(pmd))
1823 				flags |= PM_UFFD_WP;
1824 			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1825 			page = pfn_swap_entry_to_page(entry);
1826 		}
1827 #endif
1828 
1829 		if (page) {
1830 			folio = page_folio(page);
1831 			if (!folio_test_anon(folio))
1832 				flags |= PM_FILE;
1833 		}
1834 
1835 		for (; addr != end; addr += PAGE_SIZE, idx++) {
1836 			u64 cur_flags = flags;
1837 			pagemap_entry_t pme;
1838 
1839 			if (folio && (flags & PM_PRESENT) &&
1840 			    __folio_page_mapped_exclusively(folio, page))
1841 				cur_flags |= PM_MMAP_EXCLUSIVE;
1842 
1843 			pme = make_pme(frame, cur_flags);
1844 			err = add_to_pagemap(&pme, pm);
1845 			if (err)
1846 				break;
1847 			if (pm->show_pfn) {
1848 				if (flags & PM_PRESENT)
1849 					frame++;
1850 				else if (flags & PM_SWAP)
1851 					frame += (1 << MAX_SWAPFILES_SHIFT);
1852 			}
1853 		}
1854 		spin_unlock(ptl);
1855 		return err;
1856 	}
1857 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1858 
1859 	/*
1860 	 * We can assume that @vma always points to a valid one and @end never
1861 	 * goes beyond vma->vm_end.
1862 	 */
1863 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1864 	if (!pte) {
1865 		walk->action = ACTION_AGAIN;
1866 		return err;
1867 	}
1868 	for (; addr < end; pte++, addr += PAGE_SIZE) {
1869 		pagemap_entry_t pme;
1870 
1871 		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1872 		err = add_to_pagemap(&pme, pm);
1873 		if (err)
1874 			break;
1875 	}
1876 	pte_unmap_unlock(orig_pte, ptl);
1877 
1878 	cond_resched();
1879 
1880 	return err;
1881 }
1882 
1883 #ifdef CONFIG_HUGETLB_PAGE
1884 /* This function walks within one hugetlb entry in the single call */
1885 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1886 				 unsigned long addr, unsigned long end,
1887 				 struct mm_walk *walk)
1888 {
1889 	struct pagemapread *pm = walk->private;
1890 	struct vm_area_struct *vma = walk->vma;
1891 	u64 flags = 0, frame = 0;
1892 	int err = 0;
1893 	pte_t pte;
1894 
1895 	if (vma->vm_flags & VM_SOFTDIRTY)
1896 		flags |= PM_SOFT_DIRTY;
1897 
1898 	pte = huge_ptep_get(walk->mm, addr, ptep);
1899 	if (pte_present(pte)) {
1900 		struct folio *folio = page_folio(pte_page(pte));
1901 
1902 		if (!folio_test_anon(folio))
1903 			flags |= PM_FILE;
1904 
1905 		if (!folio_maybe_mapped_shared(folio) &&
1906 		    !hugetlb_pmd_shared(ptep))
1907 			flags |= PM_MMAP_EXCLUSIVE;
1908 
1909 		if (huge_pte_uffd_wp(pte))
1910 			flags |= PM_UFFD_WP;
1911 
1912 		flags |= PM_PRESENT;
1913 		if (pm->show_pfn)
1914 			frame = pte_pfn(pte) +
1915 				((addr & ~hmask) >> PAGE_SHIFT);
1916 	} else if (pte_swp_uffd_wp_any(pte)) {
1917 		flags |= PM_UFFD_WP;
1918 	}
1919 
1920 	for (; addr != end; addr += PAGE_SIZE) {
1921 		pagemap_entry_t pme = make_pme(frame, flags);
1922 
1923 		err = add_to_pagemap(&pme, pm);
1924 		if (err)
1925 			return err;
1926 		if (pm->show_pfn && (flags & PM_PRESENT))
1927 			frame++;
1928 	}
1929 
1930 	cond_resched();
1931 
1932 	return err;
1933 }
1934 #else
1935 #define pagemap_hugetlb_range	NULL
1936 #endif /* HUGETLB_PAGE */
1937 
1938 static const struct mm_walk_ops pagemap_ops = {
1939 	.pmd_entry	= pagemap_pmd_range,
1940 	.pte_hole	= pagemap_pte_hole,
1941 	.hugetlb_entry	= pagemap_hugetlb_range,
1942 	.walk_lock	= PGWALK_RDLOCK,
1943 };
1944 
1945 /*
1946  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1947  *
1948  * For each page in the address space, this file contains one 64-bit entry
1949  * consisting of the following:
1950  *
1951  * Bits 0-54  page frame number (PFN) if present
1952  * Bits 0-4   swap type if swapped
1953  * Bits 5-54  swap offset if swapped
1954  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1955  * Bit  56    page exclusively mapped
1956  * Bit  57    pte is uffd-wp write-protected
1957  * Bit  58    pte is a guard region
1958  * Bits 59-60 zero
1959  * Bit  61    page is file-page or shared-anon
1960  * Bit  62    page swapped
1961  * Bit  63    page present
1962  *
1963  * If the page is not present but in swap, then the PFN contains an
1964  * encoding of the swap file number and the page's offset into the
1965  * swap. Unmapped pages return a null PFN. This allows determining
1966  * precisely which pages are mapped (or in swap) and comparing mapped
1967  * pages between processes.
1968  *
1969  * Efficient users of this interface will use /proc/pid/maps to
1970  * determine which areas of memory are actually mapped and llseek to
1971  * skip over unmapped regions.
1972  */
1973 static ssize_t pagemap_read(struct file *file, char __user *buf,
1974 			    size_t count, loff_t *ppos)
1975 {
1976 	struct mm_struct *mm = file->private_data;
1977 	struct pagemapread pm;
1978 	unsigned long src;
1979 	unsigned long svpfn;
1980 	unsigned long start_vaddr;
1981 	unsigned long end_vaddr;
1982 	int ret = 0, copied = 0;
1983 
1984 	if (!mm || !mmget_not_zero(mm))
1985 		goto out;
1986 
1987 	ret = -EINVAL;
1988 	/* file position must be aligned */
1989 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1990 		goto out_mm;
1991 
1992 	ret = 0;
1993 	if (!count)
1994 		goto out_mm;
1995 
1996 	/* do not disclose physical addresses: attack vector */
1997 	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1998 
1999 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
2000 	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
2001 	ret = -ENOMEM;
2002 	if (!pm.buffer)
2003 		goto out_mm;
2004 
2005 	src = *ppos;
2006 	svpfn = src / PM_ENTRY_BYTES;
2007 	end_vaddr = mm->task_size;
2008 
2009 	/* watch out for wraparound */
2010 	start_vaddr = end_vaddr;
2011 	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
2012 		unsigned long end;
2013 
2014 		ret = mmap_read_lock_killable(mm);
2015 		if (ret)
2016 			goto out_free;
2017 		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
2018 		mmap_read_unlock(mm);
2019 
2020 		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
2021 		if (end >= start_vaddr && end < mm->task_size)
2022 			end_vaddr = end;
2023 	}
2024 
2025 	/* Ensure the address is inside the task */
2026 	if (start_vaddr > mm->task_size)
2027 		start_vaddr = end_vaddr;
2028 
2029 	ret = 0;
2030 	while (count && (start_vaddr < end_vaddr)) {
2031 		int len;
2032 		unsigned long end;
2033 
2034 		pm.pos = 0;
2035 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2036 		/* overflow ? */
2037 		if (end < start_vaddr || end > end_vaddr)
2038 			end = end_vaddr;
2039 		ret = mmap_read_lock_killable(mm);
2040 		if (ret)
2041 			goto out_free;
2042 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
2043 		mmap_read_unlock(mm);
2044 		start_vaddr = end;
2045 
2046 		len = min(count, PM_ENTRY_BYTES * pm.pos);
2047 		if (copy_to_user(buf, pm.buffer, len)) {
2048 			ret = -EFAULT;
2049 			goto out_free;
2050 		}
2051 		copied += len;
2052 		buf += len;
2053 		count -= len;
2054 	}
2055 	*ppos += copied;
2056 	if (!ret || ret == PM_END_OF_BUFFER)
2057 		ret = copied;
2058 
2059 out_free:
2060 	kfree(pm.buffer);
2061 out_mm:
2062 	mmput(mm);
2063 out:
2064 	return ret;
2065 }
2066 
2067 static int pagemap_open(struct inode *inode, struct file *file)
2068 {
2069 	struct mm_struct *mm;
2070 
2071 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
2072 	if (IS_ERR_OR_NULL(mm))
2073 		return mm ? PTR_ERR(mm) : -ESRCH;
2074 	file->private_data = mm;
2075 	return 0;
2076 }
2077 
2078 static int pagemap_release(struct inode *inode, struct file *file)
2079 {
2080 	struct mm_struct *mm = file->private_data;
2081 
2082 	if (mm)
2083 		mmdrop(mm);
2084 	return 0;
2085 }
2086 
2087 #define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
2088 				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
2089 				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
2090 				 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY |	\
2091 				 PAGE_IS_GUARD)
2092 #define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
2093 
2094 struct pagemap_scan_private {
2095 	struct pm_scan_arg arg;
2096 	unsigned long masks_of_interest, cur_vma_category;
2097 	struct page_region *vec_buf;
2098 	unsigned long vec_buf_len, vec_buf_index, found_pages;
2099 	struct page_region __user *vec_out;
2100 };
2101 
2102 static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
2103 					   struct vm_area_struct *vma,
2104 					   unsigned long addr, pte_t pte)
2105 {
2106 	unsigned long categories = 0;
2107 
2108 	if (pte_present(pte)) {
2109 		struct page *page;
2110 
2111 		categories |= PAGE_IS_PRESENT;
2112 		if (!pte_uffd_wp(pte))
2113 			categories |= PAGE_IS_WRITTEN;
2114 
2115 		if (p->masks_of_interest & PAGE_IS_FILE) {
2116 			page = vm_normal_page(vma, addr, pte);
2117 			if (page && !PageAnon(page))
2118 				categories |= PAGE_IS_FILE;
2119 		}
2120 
2121 		if (is_zero_pfn(pte_pfn(pte)))
2122 			categories |= PAGE_IS_PFNZERO;
2123 		if (pte_soft_dirty(pte))
2124 			categories |= PAGE_IS_SOFT_DIRTY;
2125 	} else if (is_swap_pte(pte)) {
2126 		swp_entry_t swp;
2127 
2128 		categories |= PAGE_IS_SWAPPED;
2129 		if (!pte_swp_uffd_wp_any(pte))
2130 			categories |= PAGE_IS_WRITTEN;
2131 
2132 		swp = pte_to_swp_entry(pte);
2133 		if (is_guard_swp_entry(swp))
2134 			categories |= PAGE_IS_GUARD;
2135 		else if ((p->masks_of_interest & PAGE_IS_FILE) &&
2136 			 is_pfn_swap_entry(swp) &&
2137 			 !folio_test_anon(pfn_swap_entry_folio(swp)))
2138 			categories |= PAGE_IS_FILE;
2139 
2140 		if (pte_swp_soft_dirty(pte))
2141 			categories |= PAGE_IS_SOFT_DIRTY;
2142 	}
2143 
2144 	return categories;
2145 }
2146 
2147 static void make_uffd_wp_pte(struct vm_area_struct *vma,
2148 			     unsigned long addr, pte_t *pte, pte_t ptent)
2149 {
2150 	if (pte_present(ptent)) {
2151 		pte_t old_pte;
2152 
2153 		old_pte = ptep_modify_prot_start(vma, addr, pte);
2154 		ptent = pte_mkuffd_wp(old_pte);
2155 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
2156 	} else if (is_swap_pte(ptent)) {
2157 		ptent = pte_swp_mkuffd_wp(ptent);
2158 		set_pte_at(vma->vm_mm, addr, pte, ptent);
2159 	} else {
2160 		set_pte_at(vma->vm_mm, addr, pte,
2161 			   make_pte_marker(PTE_MARKER_UFFD_WP));
2162 	}
2163 }
2164 
2165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2166 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
2167 					  struct vm_area_struct *vma,
2168 					  unsigned long addr, pmd_t pmd)
2169 {
2170 	unsigned long categories = PAGE_IS_HUGE;
2171 
2172 	if (pmd_present(pmd)) {
2173 		struct page *page;
2174 
2175 		categories |= PAGE_IS_PRESENT;
2176 		if (!pmd_uffd_wp(pmd))
2177 			categories |= PAGE_IS_WRITTEN;
2178 
2179 		if (p->masks_of_interest & PAGE_IS_FILE) {
2180 			page = vm_normal_page_pmd(vma, addr, pmd);
2181 			if (page && !PageAnon(page))
2182 				categories |= PAGE_IS_FILE;
2183 		}
2184 
2185 		if (is_zero_pfn(pmd_pfn(pmd)))
2186 			categories |= PAGE_IS_PFNZERO;
2187 		if (pmd_soft_dirty(pmd))
2188 			categories |= PAGE_IS_SOFT_DIRTY;
2189 	} else if (is_swap_pmd(pmd)) {
2190 		swp_entry_t swp;
2191 
2192 		categories |= PAGE_IS_SWAPPED;
2193 		if (!pmd_swp_uffd_wp(pmd))
2194 			categories |= PAGE_IS_WRITTEN;
2195 		if (pmd_swp_soft_dirty(pmd))
2196 			categories |= PAGE_IS_SOFT_DIRTY;
2197 
2198 		if (p->masks_of_interest & PAGE_IS_FILE) {
2199 			swp = pmd_to_swp_entry(pmd);
2200 			if (is_pfn_swap_entry(swp) &&
2201 			    !folio_test_anon(pfn_swap_entry_folio(swp)))
2202 				categories |= PAGE_IS_FILE;
2203 		}
2204 	}
2205 
2206 	return categories;
2207 }
2208 
2209 static void make_uffd_wp_pmd(struct vm_area_struct *vma,
2210 			     unsigned long addr, pmd_t *pmdp)
2211 {
2212 	pmd_t old, pmd = *pmdp;
2213 
2214 	if (pmd_present(pmd)) {
2215 		old = pmdp_invalidate_ad(vma, addr, pmdp);
2216 		pmd = pmd_mkuffd_wp(old);
2217 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2218 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
2219 		pmd = pmd_swp_mkuffd_wp(pmd);
2220 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2221 	}
2222 }
2223 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2224 
2225 #ifdef CONFIG_HUGETLB_PAGE
2226 static unsigned long pagemap_hugetlb_category(pte_t pte)
2227 {
2228 	unsigned long categories = PAGE_IS_HUGE;
2229 
2230 	/*
2231 	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
2232 	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
2233 	 * swapped pages.
2234 	 */
2235 	if (pte_present(pte)) {
2236 		categories |= PAGE_IS_PRESENT;
2237 		if (!huge_pte_uffd_wp(pte))
2238 			categories |= PAGE_IS_WRITTEN;
2239 		if (!PageAnon(pte_page(pte)))
2240 			categories |= PAGE_IS_FILE;
2241 		if (is_zero_pfn(pte_pfn(pte)))
2242 			categories |= PAGE_IS_PFNZERO;
2243 		if (pte_soft_dirty(pte))
2244 			categories |= PAGE_IS_SOFT_DIRTY;
2245 	} else if (is_swap_pte(pte)) {
2246 		categories |= PAGE_IS_SWAPPED;
2247 		if (!pte_swp_uffd_wp_any(pte))
2248 			categories |= PAGE_IS_WRITTEN;
2249 		if (pte_swp_soft_dirty(pte))
2250 			categories |= PAGE_IS_SOFT_DIRTY;
2251 	}
2252 
2253 	return categories;
2254 }
2255 
2256 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
2257 				  unsigned long addr, pte_t *ptep,
2258 				  pte_t ptent)
2259 {
2260 	unsigned long psize;
2261 
2262 	if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
2263 		return;
2264 
2265 	psize = huge_page_size(hstate_vma(vma));
2266 
2267 	if (is_hugetlb_entry_migration(ptent))
2268 		set_huge_pte_at(vma->vm_mm, addr, ptep,
2269 				pte_swp_mkuffd_wp(ptent), psize);
2270 	else if (!huge_pte_none(ptent))
2271 		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
2272 					     huge_pte_mkuffd_wp(ptent));
2273 	else
2274 		set_huge_pte_at(vma->vm_mm, addr, ptep,
2275 				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
2276 }
2277 #endif /* CONFIG_HUGETLB_PAGE */
2278 
2279 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
2280 static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
2281 				       unsigned long addr, unsigned long end)
2282 {
2283 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2284 
2285 	if (cur_buf->start != addr)
2286 		cur_buf->end = addr;
2287 	else
2288 		cur_buf->start = cur_buf->end = 0;
2289 
2290 	p->found_pages -= (end - addr) / PAGE_SIZE;
2291 }
2292 #endif
2293 
2294 static bool pagemap_scan_is_interesting_page(unsigned long categories,
2295 					     const struct pagemap_scan_private *p)
2296 {
2297 	categories ^= p->arg.category_inverted;
2298 	if ((categories & p->arg.category_mask) != p->arg.category_mask)
2299 		return false;
2300 	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
2301 		return false;
2302 
2303 	return true;
2304 }
2305 
2306 static bool pagemap_scan_is_interesting_vma(unsigned long categories,
2307 					    const struct pagemap_scan_private *p)
2308 {
2309 	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
2310 
2311 	categories ^= p->arg.category_inverted;
2312 	if ((categories & required) != required)
2313 		return false;
2314 
2315 	return true;
2316 }
2317 
2318 static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2319 				  struct mm_walk *walk)
2320 {
2321 	struct pagemap_scan_private *p = walk->private;
2322 	struct vm_area_struct *vma = walk->vma;
2323 	unsigned long vma_category = 0;
2324 	bool wp_allowed = userfaultfd_wp_async(vma) &&
2325 	    userfaultfd_wp_use_markers(vma);
2326 
2327 	if (!wp_allowed) {
2328 		/* User requested explicit failure over wp-async capability */
2329 		if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2330 			return -EPERM;
2331 		/*
2332 		 * User requires wr-protect, and allows silently skipping
2333 		 * unsupported vmas.
2334 		 */
2335 		if (p->arg.flags & PM_SCAN_WP_MATCHING)
2336 			return 1;
2337 		/*
2338 		 * Then the request doesn't involve wr-protects at all,
2339 		 * fall through to the rest checks, and allow vma walk.
2340 		 */
2341 	}
2342 
2343 	if (vma->vm_flags & VM_PFNMAP)
2344 		return 1;
2345 
2346 	if (wp_allowed)
2347 		vma_category |= PAGE_IS_WPALLOWED;
2348 
2349 	if (vma->vm_flags & VM_SOFTDIRTY)
2350 		vma_category |= PAGE_IS_SOFT_DIRTY;
2351 
2352 	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2353 		return 1;
2354 
2355 	p->cur_vma_category = vma_category;
2356 
2357 	return 0;
2358 }
2359 
2360 static bool pagemap_scan_push_range(unsigned long categories,
2361 				    struct pagemap_scan_private *p,
2362 				    unsigned long addr, unsigned long end)
2363 {
2364 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2365 
2366 	/*
2367 	 * When there is no output buffer provided at all, the sentinel values
2368 	 * won't match here. There is no other way for `cur_buf->end` to be
2369 	 * non-zero other than it being non-empty.
2370 	 */
2371 	if (addr == cur_buf->end && categories == cur_buf->categories) {
2372 		cur_buf->end = end;
2373 		return true;
2374 	}
2375 
2376 	if (cur_buf->end) {
2377 		if (p->vec_buf_index >= p->vec_buf_len - 1)
2378 			return false;
2379 
2380 		cur_buf = &p->vec_buf[++p->vec_buf_index];
2381 	}
2382 
2383 	cur_buf->start = addr;
2384 	cur_buf->end = end;
2385 	cur_buf->categories = categories;
2386 
2387 	return true;
2388 }
2389 
2390 static int pagemap_scan_output(unsigned long categories,
2391 			       struct pagemap_scan_private *p,
2392 			       unsigned long addr, unsigned long *end)
2393 {
2394 	unsigned long n_pages, total_pages;
2395 	int ret = 0;
2396 
2397 	if (!p->vec_buf)
2398 		return 0;
2399 
2400 	categories &= p->arg.return_mask;
2401 
2402 	n_pages = (*end - addr) / PAGE_SIZE;
2403 	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2404 	    total_pages > p->arg.max_pages) {
2405 		size_t n_too_much = total_pages - p->arg.max_pages;
2406 		*end -= n_too_much * PAGE_SIZE;
2407 		n_pages -= n_too_much;
2408 		ret = -ENOSPC;
2409 	}
2410 
2411 	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2412 		*end = addr;
2413 		n_pages = 0;
2414 		ret = -ENOSPC;
2415 	}
2416 
2417 	p->found_pages += n_pages;
2418 	if (ret)
2419 		p->arg.walk_end = *end;
2420 
2421 	return ret;
2422 }
2423 
2424 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2425 				  unsigned long end, struct mm_walk *walk)
2426 {
2427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2428 	struct pagemap_scan_private *p = walk->private;
2429 	struct vm_area_struct *vma = walk->vma;
2430 	unsigned long categories;
2431 	spinlock_t *ptl;
2432 	int ret = 0;
2433 
2434 	ptl = pmd_trans_huge_lock(pmd, vma);
2435 	if (!ptl)
2436 		return -ENOENT;
2437 
2438 	categories = p->cur_vma_category |
2439 		     pagemap_thp_category(p, vma, start, *pmd);
2440 
2441 	if (!pagemap_scan_is_interesting_page(categories, p))
2442 		goto out_unlock;
2443 
2444 	ret = pagemap_scan_output(categories, p, start, &end);
2445 	if (start == end)
2446 		goto out_unlock;
2447 
2448 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2449 		goto out_unlock;
2450 	if (~categories & PAGE_IS_WRITTEN)
2451 		goto out_unlock;
2452 
2453 	/*
2454 	 * Break huge page into small pages if the WP operation
2455 	 * needs to be performed on a portion of the huge page.
2456 	 */
2457 	if (end != start + HPAGE_SIZE) {
2458 		spin_unlock(ptl);
2459 		split_huge_pmd(vma, pmd, start);
2460 		pagemap_scan_backout_range(p, start, end);
2461 		/* Report as if there was no THP */
2462 		return -ENOENT;
2463 	}
2464 
2465 	make_uffd_wp_pmd(vma, start, pmd);
2466 	flush_tlb_range(vma, start, end);
2467 out_unlock:
2468 	spin_unlock(ptl);
2469 	return ret;
2470 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2471 	return -ENOENT;
2472 #endif
2473 }
2474 
2475 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2476 				  unsigned long end, struct mm_walk *walk)
2477 {
2478 	struct pagemap_scan_private *p = walk->private;
2479 	struct vm_area_struct *vma = walk->vma;
2480 	unsigned long addr, flush_end = 0;
2481 	pte_t *pte, *start_pte;
2482 	spinlock_t *ptl;
2483 	int ret;
2484 
2485 	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2486 	if (ret != -ENOENT)
2487 		return ret;
2488 
2489 	ret = 0;
2490 	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2491 	if (!pte) {
2492 		walk->action = ACTION_AGAIN;
2493 		return 0;
2494 	}
2495 
2496 	arch_enter_lazy_mmu_mode();
2497 
2498 	if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2499 		/* Fast path for performing exclusive WP */
2500 		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2501 			pte_t ptent = ptep_get(pte);
2502 
2503 			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2504 			    pte_swp_uffd_wp_any(ptent))
2505 				continue;
2506 			make_uffd_wp_pte(vma, addr, pte, ptent);
2507 			if (!flush_end)
2508 				start = addr;
2509 			flush_end = addr + PAGE_SIZE;
2510 		}
2511 		goto flush_and_return;
2512 	}
2513 
2514 	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2515 	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2516 	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2517 		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2518 			unsigned long next = addr + PAGE_SIZE;
2519 			pte_t ptent = ptep_get(pte);
2520 
2521 			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2522 			    pte_swp_uffd_wp_any(ptent))
2523 				continue;
2524 			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2525 						  p, addr, &next);
2526 			if (next == addr)
2527 				break;
2528 			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2529 				continue;
2530 			make_uffd_wp_pte(vma, addr, pte, ptent);
2531 			if (!flush_end)
2532 				start = addr;
2533 			flush_end = next;
2534 		}
2535 		goto flush_and_return;
2536 	}
2537 
2538 	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2539 		pte_t ptent = ptep_get(pte);
2540 		unsigned long categories = p->cur_vma_category |
2541 					   pagemap_page_category(p, vma, addr, ptent);
2542 		unsigned long next = addr + PAGE_SIZE;
2543 
2544 		if (!pagemap_scan_is_interesting_page(categories, p))
2545 			continue;
2546 
2547 		ret = pagemap_scan_output(categories, p, addr, &next);
2548 		if (next == addr)
2549 			break;
2550 
2551 		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2552 			continue;
2553 		if (~categories & PAGE_IS_WRITTEN)
2554 			continue;
2555 
2556 		make_uffd_wp_pte(vma, addr, pte, ptent);
2557 		if (!flush_end)
2558 			start = addr;
2559 		flush_end = next;
2560 	}
2561 
2562 flush_and_return:
2563 	if (flush_end)
2564 		flush_tlb_range(vma, start, addr);
2565 
2566 	arch_leave_lazy_mmu_mode();
2567 	pte_unmap_unlock(start_pte, ptl);
2568 
2569 	cond_resched();
2570 	return ret;
2571 }
2572 
2573 #ifdef CONFIG_HUGETLB_PAGE
2574 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2575 				      unsigned long start, unsigned long end,
2576 				      struct mm_walk *walk)
2577 {
2578 	struct pagemap_scan_private *p = walk->private;
2579 	struct vm_area_struct *vma = walk->vma;
2580 	unsigned long categories;
2581 	spinlock_t *ptl;
2582 	int ret = 0;
2583 	pte_t pte;
2584 
2585 	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2586 		/* Go the short route when not write-protecting pages. */
2587 
2588 		pte = huge_ptep_get(walk->mm, start, ptep);
2589 		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2590 
2591 		if (!pagemap_scan_is_interesting_page(categories, p))
2592 			return 0;
2593 
2594 		return pagemap_scan_output(categories, p, start, &end);
2595 	}
2596 
2597 	i_mmap_lock_write(vma->vm_file->f_mapping);
2598 	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2599 
2600 	pte = huge_ptep_get(walk->mm, start, ptep);
2601 	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2602 
2603 	if (!pagemap_scan_is_interesting_page(categories, p))
2604 		goto out_unlock;
2605 
2606 	ret = pagemap_scan_output(categories, p, start, &end);
2607 	if (start == end)
2608 		goto out_unlock;
2609 
2610 	if (~categories & PAGE_IS_WRITTEN)
2611 		goto out_unlock;
2612 
2613 	if (end != start + HPAGE_SIZE) {
2614 		/* Partial HugeTLB page WP isn't possible. */
2615 		pagemap_scan_backout_range(p, start, end);
2616 		p->arg.walk_end = start;
2617 		ret = 0;
2618 		goto out_unlock;
2619 	}
2620 
2621 	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2622 	flush_hugetlb_tlb_range(vma, start, end);
2623 
2624 out_unlock:
2625 	spin_unlock(ptl);
2626 	i_mmap_unlock_write(vma->vm_file->f_mapping);
2627 
2628 	return ret;
2629 }
2630 #else
2631 #define pagemap_scan_hugetlb_entry NULL
2632 #endif
2633 
2634 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2635 				 int depth, struct mm_walk *walk)
2636 {
2637 	struct pagemap_scan_private *p = walk->private;
2638 	struct vm_area_struct *vma = walk->vma;
2639 	int ret, err;
2640 
2641 	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2642 		return 0;
2643 
2644 	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2645 	if (addr == end)
2646 		return ret;
2647 
2648 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2649 		return ret;
2650 
2651 	err = uffd_wp_range(vma, addr, end - addr, true);
2652 	if (err < 0)
2653 		ret = err;
2654 
2655 	return ret;
2656 }
2657 
2658 static const struct mm_walk_ops pagemap_scan_ops = {
2659 	.test_walk = pagemap_scan_test_walk,
2660 	.pmd_entry = pagemap_scan_pmd_entry,
2661 	.pte_hole = pagemap_scan_pte_hole,
2662 	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2663 };
2664 
2665 static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2666 				 unsigned long uarg)
2667 {
2668 	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2669 		return -EFAULT;
2670 
2671 	if (arg->size != sizeof(struct pm_scan_arg))
2672 		return -EINVAL;
2673 
2674 	/* Validate requested features */
2675 	if (arg->flags & ~PM_SCAN_FLAGS)
2676 		return -EINVAL;
2677 	if ((arg->category_inverted | arg->category_mask |
2678 	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2679 		return -EINVAL;
2680 
2681 	arg->start = untagged_addr((unsigned long)arg->start);
2682 	arg->end = untagged_addr((unsigned long)arg->end);
2683 	arg->vec = untagged_addr((unsigned long)arg->vec);
2684 
2685 	/* Validate memory pointers */
2686 	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2687 		return -EINVAL;
2688 	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2689 		return -EFAULT;
2690 	if (!arg->vec && arg->vec_len)
2691 		return -EINVAL;
2692 	if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
2693 		return -EINVAL;
2694 	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2695 				   size_mul(arg->vec_len, sizeof(struct page_region))))
2696 		return -EFAULT;
2697 
2698 	/* Fixup default values */
2699 	arg->end = ALIGN(arg->end, PAGE_SIZE);
2700 	arg->walk_end = 0;
2701 	if (!arg->max_pages)
2702 		arg->max_pages = ULONG_MAX;
2703 
2704 	return 0;
2705 }
2706 
2707 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2708 				       unsigned long uargl)
2709 {
2710 	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2711 
2712 	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2713 		return -EFAULT;
2714 
2715 	return 0;
2716 }
2717 
2718 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2719 {
2720 	if (!p->arg.vec_len)
2721 		return 0;
2722 
2723 	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2724 			       p->arg.vec_len);
2725 	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2726 				   GFP_KERNEL);
2727 	if (!p->vec_buf)
2728 		return -ENOMEM;
2729 
2730 	p->vec_buf->start = p->vec_buf->end = 0;
2731 	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2732 
2733 	return 0;
2734 }
2735 
2736 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2737 {
2738 	const struct page_region *buf = p->vec_buf;
2739 	long n = p->vec_buf_index;
2740 
2741 	if (!p->vec_buf)
2742 		return 0;
2743 
2744 	if (buf[n].end != buf[n].start)
2745 		n++;
2746 
2747 	if (!n)
2748 		return 0;
2749 
2750 	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2751 		return -EFAULT;
2752 
2753 	p->arg.vec_len -= n;
2754 	p->vec_out += n;
2755 
2756 	p->vec_buf_index = 0;
2757 	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2758 	p->vec_buf->start = p->vec_buf->end = 0;
2759 
2760 	return n;
2761 }
2762 
2763 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2764 {
2765 	struct pagemap_scan_private p = {0};
2766 	unsigned long walk_start;
2767 	size_t n_ranges_out = 0;
2768 	int ret;
2769 
2770 	ret = pagemap_scan_get_args(&p.arg, uarg);
2771 	if (ret)
2772 		return ret;
2773 
2774 	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2775 			      p.arg.return_mask;
2776 	ret = pagemap_scan_init_bounce_buffer(&p);
2777 	if (ret)
2778 		return ret;
2779 
2780 	for (walk_start = p.arg.start; walk_start < p.arg.end;
2781 			walk_start = p.arg.walk_end) {
2782 		struct mmu_notifier_range range;
2783 		long n_out;
2784 
2785 		if (fatal_signal_pending(current)) {
2786 			ret = -EINTR;
2787 			break;
2788 		}
2789 
2790 		ret = mmap_read_lock_killable(mm);
2791 		if (ret)
2792 			break;
2793 
2794 		/* Protection change for the range is going to happen. */
2795 		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2796 			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2797 						mm, walk_start, p.arg.end);
2798 			mmu_notifier_invalidate_range_start(&range);
2799 		}
2800 
2801 		ret = walk_page_range(mm, walk_start, p.arg.end,
2802 				      &pagemap_scan_ops, &p);
2803 
2804 		if (p.arg.flags & PM_SCAN_WP_MATCHING)
2805 			mmu_notifier_invalidate_range_end(&range);
2806 
2807 		mmap_read_unlock(mm);
2808 
2809 		n_out = pagemap_scan_flush_buffer(&p);
2810 		if (n_out < 0)
2811 			ret = n_out;
2812 		else
2813 			n_ranges_out += n_out;
2814 
2815 		if (ret != -ENOSPC)
2816 			break;
2817 
2818 		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2819 			break;
2820 	}
2821 
2822 	/* ENOSPC signifies early stop (buffer full) from the walk. */
2823 	if (!ret || ret == -ENOSPC)
2824 		ret = n_ranges_out;
2825 
2826 	/* The walk_end isn't set when ret is zero */
2827 	if (!p.arg.walk_end)
2828 		p.arg.walk_end = p.arg.end;
2829 	if (pagemap_scan_writeback_args(&p.arg, uarg))
2830 		ret = -EFAULT;
2831 
2832 	kfree(p.vec_buf);
2833 	return ret;
2834 }
2835 
2836 static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2837 			   unsigned long arg)
2838 {
2839 	struct mm_struct *mm = file->private_data;
2840 
2841 	switch (cmd) {
2842 	case PAGEMAP_SCAN:
2843 		return do_pagemap_scan(mm, arg);
2844 
2845 	default:
2846 		return -EINVAL;
2847 	}
2848 }
2849 
2850 const struct file_operations proc_pagemap_operations = {
2851 	.llseek		= mem_lseek, /* borrow this */
2852 	.read		= pagemap_read,
2853 	.open		= pagemap_open,
2854 	.release	= pagemap_release,
2855 	.unlocked_ioctl = do_pagemap_cmd,
2856 	.compat_ioctl	= do_pagemap_cmd,
2857 };
2858 #endif /* CONFIG_PROC_PAGE_MONITOR */
2859 
2860 #ifdef CONFIG_NUMA
2861 
2862 struct numa_maps {
2863 	unsigned long pages;
2864 	unsigned long anon;
2865 	unsigned long active;
2866 	unsigned long writeback;
2867 	unsigned long mapcount_max;
2868 	unsigned long dirty;
2869 	unsigned long swapcache;
2870 	unsigned long node[MAX_NUMNODES];
2871 };
2872 
2873 struct numa_maps_private {
2874 	struct proc_maps_private proc_maps;
2875 	struct numa_maps md;
2876 };
2877 
2878 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2879 			unsigned long nr_pages)
2880 {
2881 	struct folio *folio = page_folio(page);
2882 	int count;
2883 
2884 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
2885 		count = folio_precise_page_mapcount(folio, page);
2886 	else
2887 		count = folio_average_page_mapcount(folio);
2888 
2889 	md->pages += nr_pages;
2890 	if (pte_dirty || folio_test_dirty(folio))
2891 		md->dirty += nr_pages;
2892 
2893 	if (folio_test_swapcache(folio))
2894 		md->swapcache += nr_pages;
2895 
2896 	if (folio_test_active(folio) || folio_test_unevictable(folio))
2897 		md->active += nr_pages;
2898 
2899 	if (folio_test_writeback(folio))
2900 		md->writeback += nr_pages;
2901 
2902 	if (folio_test_anon(folio))
2903 		md->anon += nr_pages;
2904 
2905 	if (count > md->mapcount_max)
2906 		md->mapcount_max = count;
2907 
2908 	md->node[folio_nid(folio)] += nr_pages;
2909 }
2910 
2911 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2912 		unsigned long addr)
2913 {
2914 	struct page *page;
2915 	int nid;
2916 
2917 	if (!pte_present(pte))
2918 		return NULL;
2919 
2920 	page = vm_normal_page(vma, addr, pte);
2921 	if (!page || is_zone_device_page(page))
2922 		return NULL;
2923 
2924 	if (PageReserved(page))
2925 		return NULL;
2926 
2927 	nid = page_to_nid(page);
2928 	if (!node_isset(nid, node_states[N_MEMORY]))
2929 		return NULL;
2930 
2931 	return page;
2932 }
2933 
2934 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2935 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2936 					      struct vm_area_struct *vma,
2937 					      unsigned long addr)
2938 {
2939 	struct page *page;
2940 	int nid;
2941 
2942 	if (!pmd_present(pmd))
2943 		return NULL;
2944 
2945 	page = vm_normal_page_pmd(vma, addr, pmd);
2946 	if (!page)
2947 		return NULL;
2948 
2949 	if (PageReserved(page))
2950 		return NULL;
2951 
2952 	nid = page_to_nid(page);
2953 	if (!node_isset(nid, node_states[N_MEMORY]))
2954 		return NULL;
2955 
2956 	return page;
2957 }
2958 #endif
2959 
2960 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2961 		unsigned long end, struct mm_walk *walk)
2962 {
2963 	struct numa_maps *md = walk->private;
2964 	struct vm_area_struct *vma = walk->vma;
2965 	spinlock_t *ptl;
2966 	pte_t *orig_pte;
2967 	pte_t *pte;
2968 
2969 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2970 	ptl = pmd_trans_huge_lock(pmd, vma);
2971 	if (ptl) {
2972 		struct page *page;
2973 
2974 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2975 		if (page)
2976 			gather_stats(page, md, pmd_dirty(*pmd),
2977 				     HPAGE_PMD_SIZE/PAGE_SIZE);
2978 		spin_unlock(ptl);
2979 		return 0;
2980 	}
2981 #endif
2982 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2983 	if (!pte) {
2984 		walk->action = ACTION_AGAIN;
2985 		return 0;
2986 	}
2987 	do {
2988 		pte_t ptent = ptep_get(pte);
2989 		struct page *page = can_gather_numa_stats(ptent, vma, addr);
2990 		if (!page)
2991 			continue;
2992 		gather_stats(page, md, pte_dirty(ptent), 1);
2993 
2994 	} while (pte++, addr += PAGE_SIZE, addr != end);
2995 	pte_unmap_unlock(orig_pte, ptl);
2996 	cond_resched();
2997 	return 0;
2998 }
2999 #ifdef CONFIG_HUGETLB_PAGE
3000 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3001 		unsigned long addr, unsigned long end, struct mm_walk *walk)
3002 {
3003 	pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
3004 	struct numa_maps *md;
3005 	struct page *page;
3006 
3007 	if (!pte_present(huge_pte))
3008 		return 0;
3009 
3010 	page = pte_page(huge_pte);
3011 
3012 	md = walk->private;
3013 	gather_stats(page, md, pte_dirty(huge_pte), 1);
3014 	return 0;
3015 }
3016 
3017 #else
3018 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3019 		unsigned long addr, unsigned long end, struct mm_walk *walk)
3020 {
3021 	return 0;
3022 }
3023 #endif
3024 
3025 static const struct mm_walk_ops show_numa_ops = {
3026 	.hugetlb_entry = gather_hugetlb_stats,
3027 	.pmd_entry = gather_pte_stats,
3028 	.walk_lock = PGWALK_RDLOCK,
3029 };
3030 
3031 /*
3032  * Display pages allocated per node and memory policy via /proc.
3033  */
3034 static int show_numa_map(struct seq_file *m, void *v)
3035 {
3036 	struct numa_maps_private *numa_priv = m->private;
3037 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
3038 	struct vm_area_struct *vma = v;
3039 	struct numa_maps *md = &numa_priv->md;
3040 	struct file *file = vma->vm_file;
3041 	struct mm_struct *mm = vma->vm_mm;
3042 	char buffer[64];
3043 	struct mempolicy *pol;
3044 	pgoff_t ilx;
3045 	int nid;
3046 
3047 	if (!mm)
3048 		return 0;
3049 
3050 	/* Ensure we start with an empty set of numa_maps statistics. */
3051 	memset(md, 0, sizeof(*md));
3052 
3053 	pol = __get_vma_policy(vma, vma->vm_start, &ilx);
3054 	if (pol) {
3055 		mpol_to_str(buffer, sizeof(buffer), pol);
3056 		mpol_cond_put(pol);
3057 	} else {
3058 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
3059 	}
3060 
3061 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
3062 
3063 	if (file) {
3064 		seq_puts(m, " file=");
3065 		seq_path(m, file_user_path(file), "\n\t= ");
3066 	} else if (vma_is_initial_heap(vma)) {
3067 		seq_puts(m, " heap");
3068 	} else if (vma_is_initial_stack(vma)) {
3069 		seq_puts(m, " stack");
3070 	}
3071 
3072 	if (is_vm_hugetlb_page(vma))
3073 		seq_puts(m, " huge");
3074 
3075 	/* mmap_lock is held by m_start */
3076 	walk_page_vma(vma, &show_numa_ops, md);
3077 
3078 	if (!md->pages)
3079 		goto out;
3080 
3081 	if (md->anon)
3082 		seq_printf(m, " anon=%lu", md->anon);
3083 
3084 	if (md->dirty)
3085 		seq_printf(m, " dirty=%lu", md->dirty);
3086 
3087 	if (md->pages != md->anon && md->pages != md->dirty)
3088 		seq_printf(m, " mapped=%lu", md->pages);
3089 
3090 	if (md->mapcount_max > 1)
3091 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
3092 
3093 	if (md->swapcache)
3094 		seq_printf(m, " swapcache=%lu", md->swapcache);
3095 
3096 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
3097 		seq_printf(m, " active=%lu", md->active);
3098 
3099 	if (md->writeback)
3100 		seq_printf(m, " writeback=%lu", md->writeback);
3101 
3102 	for_each_node_state(nid, N_MEMORY)
3103 		if (md->node[nid])
3104 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
3105 
3106 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
3107 out:
3108 	seq_putc(m, '\n');
3109 	return 0;
3110 }
3111 
3112 static const struct seq_operations proc_pid_numa_maps_op = {
3113 	.start  = m_start,
3114 	.next   = m_next,
3115 	.stop   = m_stop,
3116 	.show   = show_numa_map,
3117 };
3118 
3119 static int pid_numa_maps_open(struct inode *inode, struct file *file)
3120 {
3121 	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
3122 				sizeof(struct numa_maps_private));
3123 }
3124 
3125 const struct file_operations proc_pid_numa_maps_operations = {
3126 	.open		= pid_numa_maps_open,
3127 	.read		= seq_read,
3128 	.llseek		= seq_lseek,
3129 	.release	= proc_map_release,
3130 };
3131 
3132 #endif /* CONFIG_NUMA */
3133