1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14
15 #include <asm/elf.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
18 #include "internal.h"
19
task_mem(struct seq_file * m,struct mm_struct * mm)20 void task_mem(struct seq_file *m, struct mm_struct *mm)
21 {
22 unsigned long data, text, lib, swap;
23 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24
25 /*
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
31 */
32 hiwater_vm = total_vm = mm->total_vm;
33 if (hiwater_vm < mm->hiwater_vm)
34 hiwater_vm = mm->hiwater_vm;
35 hiwater_rss = total_rss = get_mm_rss(mm);
36 if (hiwater_rss < mm->hiwater_rss)
37 hiwater_rss = mm->hiwater_rss;
38
39 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
42 swap = get_mm_counter(mm, MM_SWAPENTS);
43 seq_printf(m,
44 "VmPeak:\t%8lu kB\n"
45 "VmSize:\t%8lu kB\n"
46 "VmLck:\t%8lu kB\n"
47 "VmPin:\t%8lu kB\n"
48 "VmHWM:\t%8lu kB\n"
49 "VmRSS:\t%8lu kB\n"
50 "VmData:\t%8lu kB\n"
51 "VmStk:\t%8lu kB\n"
52 "VmExe:\t%8lu kB\n"
53 "VmLib:\t%8lu kB\n"
54 "VmPTE:\t%8lu kB\n"
55 "VmSwap:\t%8lu kB\n",
56 hiwater_vm << (PAGE_SHIFT-10),
57 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
58 mm->locked_vm << (PAGE_SHIFT-10),
59 mm->pinned_vm << (PAGE_SHIFT-10),
60 hiwater_rss << (PAGE_SHIFT-10),
61 total_rss << (PAGE_SHIFT-10),
62 data << (PAGE_SHIFT-10),
63 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
64 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 swap << (PAGE_SHIFT-10));
66 }
67
task_vsize(struct mm_struct * mm)68 unsigned long task_vsize(struct mm_struct *mm)
69 {
70 return PAGE_SIZE * mm->total_vm;
71 }
72
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)73 unsigned long task_statm(struct mm_struct *mm,
74 unsigned long *shared, unsigned long *text,
75 unsigned long *data, unsigned long *resident)
76 {
77 *shared = get_mm_counter(mm, MM_FILEPAGES);
78 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 >> PAGE_SHIFT;
80 *data = mm->total_vm - mm->shared_vm;
81 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
82 return mm->total_vm;
83 }
84
pad_len_spaces(struct seq_file * m,int len)85 static void pad_len_spaces(struct seq_file *m, int len)
86 {
87 len = 25 + sizeof(void*) * 6 - len;
88 if (len < 1)
89 len = 1;
90 seq_printf(m, "%*c", len, ' ');
91 }
92
vma_stop(struct proc_maps_private * priv,struct vm_area_struct * vma)93 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
94 {
95 if (vma && vma != priv->tail_vma) {
96 struct mm_struct *mm = vma->vm_mm;
97 up_read(&mm->mmap_sem);
98 mmput(mm);
99 }
100 }
101
m_start(struct seq_file * m,loff_t * pos)102 static void *m_start(struct seq_file *m, loff_t *pos)
103 {
104 struct proc_maps_private *priv = m->private;
105 unsigned long last_addr = m->version;
106 struct mm_struct *mm;
107 struct vm_area_struct *vma, *tail_vma = NULL;
108 loff_t l = *pos;
109
110 /* Clear the per syscall fields in priv */
111 priv->task = NULL;
112 priv->tail_vma = NULL;
113
114 /*
115 * We remember last_addr rather than next_addr to hit with
116 * mmap_cache most of the time. We have zero last_addr at
117 * the beginning and also after lseek. We will have -1 last_addr
118 * after the end of the vmas.
119 */
120
121 if (last_addr == -1UL)
122 return NULL;
123
124 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
125 if (!priv->task)
126 return ERR_PTR(-ESRCH);
127
128 mm = mm_for_maps(priv->task);
129 if (!mm || IS_ERR(mm))
130 return mm;
131 down_read(&mm->mmap_sem);
132
133 tail_vma = get_gate_vma(priv->task->mm);
134 priv->tail_vma = tail_vma;
135
136 /* Start with last addr hint */
137 vma = find_vma(mm, last_addr);
138 if (last_addr && vma) {
139 vma = vma->vm_next;
140 goto out;
141 }
142
143 /*
144 * Check the vma index is within the range and do
145 * sequential scan until m_index.
146 */
147 vma = NULL;
148 if ((unsigned long)l < mm->map_count) {
149 vma = mm->mmap;
150 while (l-- && vma)
151 vma = vma->vm_next;
152 goto out;
153 }
154
155 if (l != mm->map_count)
156 tail_vma = NULL; /* After gate vma */
157
158 out:
159 if (vma)
160 return vma;
161
162 /* End of vmas has been reached */
163 m->version = (tail_vma != NULL)? 0: -1UL;
164 up_read(&mm->mmap_sem);
165 mmput(mm);
166 return tail_vma;
167 }
168
m_next(struct seq_file * m,void * v,loff_t * pos)169 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
170 {
171 struct proc_maps_private *priv = m->private;
172 struct vm_area_struct *vma = v;
173 struct vm_area_struct *tail_vma = priv->tail_vma;
174
175 (*pos)++;
176 if (vma && (vma != tail_vma) && vma->vm_next)
177 return vma->vm_next;
178 vma_stop(priv, vma);
179 return (vma != tail_vma)? tail_vma: NULL;
180 }
181
m_stop(struct seq_file * m,void * v)182 static void m_stop(struct seq_file *m, void *v)
183 {
184 struct proc_maps_private *priv = m->private;
185 struct vm_area_struct *vma = v;
186
187 if (!IS_ERR(vma))
188 vma_stop(priv, vma);
189 if (priv->task)
190 put_task_struct(priv->task);
191 }
192
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)193 static int do_maps_open(struct inode *inode, struct file *file,
194 const struct seq_operations *ops)
195 {
196 struct proc_maps_private *priv;
197 int ret = -ENOMEM;
198 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
199 if (priv) {
200 priv->pid = proc_pid(inode);
201 ret = seq_open(file, ops);
202 if (!ret) {
203 struct seq_file *m = file->private_data;
204 m->private = priv;
205 } else {
206 kfree(priv);
207 }
208 }
209 return ret;
210 }
211
show_map_vma(struct seq_file * m,struct vm_area_struct * vma)212 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
213 {
214 struct mm_struct *mm = vma->vm_mm;
215 struct file *file = vma->vm_file;
216 vm_flags_t flags = vma->vm_flags;
217 unsigned long ino = 0;
218 unsigned long long pgoff = 0;
219 unsigned long start, end;
220 dev_t dev = 0;
221 int len;
222
223 if (file) {
224 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
225 dev = inode->i_sb->s_dev;
226 ino = inode->i_ino;
227 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
228 }
229
230 /* We don't show the stack guard page in /proc/maps */
231 start = vma->vm_start;
232 if (stack_guard_page_start(vma, start))
233 start += PAGE_SIZE;
234 end = vma->vm_end;
235 if (stack_guard_page_end(vma, end))
236 end -= PAGE_SIZE;
237
238 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
239 start,
240 end,
241 flags & VM_READ ? 'r' : '-',
242 flags & VM_WRITE ? 'w' : '-',
243 flags & VM_EXEC ? 'x' : '-',
244 flags & VM_MAYSHARE ? 's' : 'p',
245 pgoff,
246 MAJOR(dev), MINOR(dev), ino, &len);
247
248 /*
249 * Print the dentry name for named mappings, and a
250 * special [heap] marker for the heap:
251 */
252 if (file) {
253 pad_len_spaces(m, len);
254 seq_path(m, &file->f_path, "\n");
255 } else {
256 const char *name = arch_vma_name(vma);
257 if (!name) {
258 if (mm) {
259 if (vma->vm_start <= mm->brk &&
260 vma->vm_end >= mm->start_brk) {
261 name = "[heap]";
262 } else if (vma->vm_start <= mm->start_stack &&
263 vma->vm_end >= mm->start_stack) {
264 name = "[stack]";
265 }
266 } else {
267 name = "[vdso]";
268 }
269 }
270 if (name) {
271 pad_len_spaces(m, len);
272 seq_puts(m, name);
273 }
274 }
275 seq_putc(m, '\n');
276 }
277
show_map(struct seq_file * m,void * v)278 static int show_map(struct seq_file *m, void *v)
279 {
280 struct vm_area_struct *vma = v;
281 struct proc_maps_private *priv = m->private;
282 struct task_struct *task = priv->task;
283
284 show_map_vma(m, vma);
285
286 if (m->count < m->size) /* vma is copied successfully */
287 m->version = (vma != get_gate_vma(task->mm))
288 ? vma->vm_start : 0;
289 return 0;
290 }
291
292 static const struct seq_operations proc_pid_maps_op = {
293 .start = m_start,
294 .next = m_next,
295 .stop = m_stop,
296 .show = show_map
297 };
298
maps_open(struct inode * inode,struct file * file)299 static int maps_open(struct inode *inode, struct file *file)
300 {
301 return do_maps_open(inode, file, &proc_pid_maps_op);
302 }
303
304 const struct file_operations proc_maps_operations = {
305 .open = maps_open,
306 .read = seq_read,
307 .llseek = seq_lseek,
308 .release = seq_release_private,
309 };
310
311 /*
312 * Proportional Set Size(PSS): my share of RSS.
313 *
314 * PSS of a process is the count of pages it has in memory, where each
315 * page is divided by the number of processes sharing it. So if a
316 * process has 1000 pages all to itself, and 1000 shared with one other
317 * process, its PSS will be 1500.
318 *
319 * To keep (accumulated) division errors low, we adopt a 64bit
320 * fixed-point pss counter to minimize division errors. So (pss >>
321 * PSS_SHIFT) would be the real byte count.
322 *
323 * A shift of 12 before division means (assuming 4K page size):
324 * - 1M 3-user-pages add up to 8KB errors;
325 * - supports mapcount up to 2^24, or 16M;
326 * - supports PSS up to 2^52 bytes, or 4PB.
327 */
328 #define PSS_SHIFT 12
329
330 #ifdef CONFIG_PROC_PAGE_MONITOR
331 struct mem_size_stats {
332 struct vm_area_struct *vma;
333 unsigned long resident;
334 unsigned long shared_clean;
335 unsigned long shared_dirty;
336 unsigned long private_clean;
337 unsigned long private_dirty;
338 unsigned long referenced;
339 unsigned long anonymous;
340 unsigned long anonymous_thp;
341 unsigned long swap;
342 u64 pss;
343 };
344
345
smaps_pte_entry(pte_t ptent,unsigned long addr,unsigned long ptent_size,struct mm_walk * walk)346 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
347 unsigned long ptent_size, struct mm_walk *walk)
348 {
349 struct mem_size_stats *mss = walk->private;
350 struct vm_area_struct *vma = mss->vma;
351 struct page *page;
352 int mapcount;
353
354 if (is_swap_pte(ptent)) {
355 mss->swap += ptent_size;
356 return;
357 }
358
359 if (!pte_present(ptent))
360 return;
361
362 page = vm_normal_page(vma, addr, ptent);
363 if (!page)
364 return;
365
366 if (PageAnon(page))
367 mss->anonymous += ptent_size;
368
369 mss->resident += ptent_size;
370 /* Accumulate the size in pages that have been accessed. */
371 if (pte_young(ptent) || PageReferenced(page))
372 mss->referenced += ptent_size;
373 mapcount = page_mapcount(page);
374 if (mapcount >= 2) {
375 if (pte_dirty(ptent) || PageDirty(page))
376 mss->shared_dirty += ptent_size;
377 else
378 mss->shared_clean += ptent_size;
379 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
380 } else {
381 if (pte_dirty(ptent) || PageDirty(page))
382 mss->private_dirty += ptent_size;
383 else
384 mss->private_clean += ptent_size;
385 mss->pss += (ptent_size << PSS_SHIFT);
386 }
387 }
388
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)389 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
390 struct mm_walk *walk)
391 {
392 struct mem_size_stats *mss = walk->private;
393 struct vm_area_struct *vma = mss->vma;
394 pte_t *pte;
395 spinlock_t *ptl;
396
397 spin_lock(&walk->mm->page_table_lock);
398 if (pmd_trans_huge(*pmd)) {
399 if (pmd_trans_splitting(*pmd)) {
400 spin_unlock(&walk->mm->page_table_lock);
401 wait_split_huge_page(vma->anon_vma, pmd);
402 } else {
403 smaps_pte_entry(*(pte_t *)pmd, addr,
404 HPAGE_PMD_SIZE, walk);
405 spin_unlock(&walk->mm->page_table_lock);
406 mss->anonymous_thp += HPAGE_PMD_SIZE;
407 return 0;
408 }
409 } else {
410 spin_unlock(&walk->mm->page_table_lock);
411 }
412 /*
413 * The mmap_sem held all the way back in m_start() is what
414 * keeps khugepaged out of here and from collapsing things
415 * in here.
416 */
417 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
418 for (; addr != end; pte++, addr += PAGE_SIZE)
419 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
420 pte_unmap_unlock(pte - 1, ptl);
421 cond_resched();
422 return 0;
423 }
424
show_smap(struct seq_file * m,void * v)425 static int show_smap(struct seq_file *m, void *v)
426 {
427 struct proc_maps_private *priv = m->private;
428 struct task_struct *task = priv->task;
429 struct vm_area_struct *vma = v;
430 struct mem_size_stats mss;
431 struct mm_walk smaps_walk = {
432 .pmd_entry = smaps_pte_range,
433 .mm = vma->vm_mm,
434 .private = &mss,
435 };
436
437 memset(&mss, 0, sizeof mss);
438 mss.vma = vma;
439 /* mmap_sem is held in m_start */
440 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
441 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
442
443 show_map_vma(m, vma);
444
445 seq_printf(m,
446 "Size: %8lu kB\n"
447 "Rss: %8lu kB\n"
448 "Pss: %8lu kB\n"
449 "Shared_Clean: %8lu kB\n"
450 "Shared_Dirty: %8lu kB\n"
451 "Private_Clean: %8lu kB\n"
452 "Private_Dirty: %8lu kB\n"
453 "Referenced: %8lu kB\n"
454 "Anonymous: %8lu kB\n"
455 "AnonHugePages: %8lu kB\n"
456 "Swap: %8lu kB\n"
457 "KernelPageSize: %8lu kB\n"
458 "MMUPageSize: %8lu kB\n"
459 "Locked: %8lu kB\n",
460 (vma->vm_end - vma->vm_start) >> 10,
461 mss.resident >> 10,
462 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
463 mss.shared_clean >> 10,
464 mss.shared_dirty >> 10,
465 mss.private_clean >> 10,
466 mss.private_dirty >> 10,
467 mss.referenced >> 10,
468 mss.anonymous >> 10,
469 mss.anonymous_thp >> 10,
470 mss.swap >> 10,
471 vma_kernel_pagesize(vma) >> 10,
472 vma_mmu_pagesize(vma) >> 10,
473 (vma->vm_flags & VM_LOCKED) ?
474 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
475
476 if (m->count < m->size) /* vma is copied successfully */
477 m->version = (vma != get_gate_vma(task->mm))
478 ? vma->vm_start : 0;
479 return 0;
480 }
481
482 static const struct seq_operations proc_pid_smaps_op = {
483 .start = m_start,
484 .next = m_next,
485 .stop = m_stop,
486 .show = show_smap
487 };
488
smaps_open(struct inode * inode,struct file * file)489 static int smaps_open(struct inode *inode, struct file *file)
490 {
491 return do_maps_open(inode, file, &proc_pid_smaps_op);
492 }
493
494 const struct file_operations proc_smaps_operations = {
495 .open = smaps_open,
496 .read = seq_read,
497 .llseek = seq_lseek,
498 .release = seq_release_private,
499 };
500
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)501 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
502 unsigned long end, struct mm_walk *walk)
503 {
504 struct vm_area_struct *vma = walk->private;
505 pte_t *pte, ptent;
506 spinlock_t *ptl;
507 struct page *page;
508
509 split_huge_page_pmd(walk->mm, pmd);
510
511 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
512 for (; addr != end; pte++, addr += PAGE_SIZE) {
513 ptent = *pte;
514 if (!pte_present(ptent))
515 continue;
516
517 page = vm_normal_page(vma, addr, ptent);
518 if (!page)
519 continue;
520
521 if (PageReserved(page))
522 continue;
523
524 /* Clear accessed and referenced bits. */
525 ptep_test_and_clear_young(vma, addr, pte);
526 ClearPageReferenced(page);
527 }
528 pte_unmap_unlock(pte - 1, ptl);
529 cond_resched();
530 return 0;
531 }
532
533 #define CLEAR_REFS_ALL 1
534 #define CLEAR_REFS_ANON 2
535 #define CLEAR_REFS_MAPPED 3
536
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)537 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
538 size_t count, loff_t *ppos)
539 {
540 struct task_struct *task;
541 char buffer[PROC_NUMBUF];
542 struct mm_struct *mm;
543 struct vm_area_struct *vma;
544 int type;
545 int rv;
546
547 memset(buffer, 0, sizeof(buffer));
548 if (count > sizeof(buffer) - 1)
549 count = sizeof(buffer) - 1;
550 if (copy_from_user(buffer, buf, count))
551 return -EFAULT;
552 rv = kstrtoint(strstrip(buffer), 10, &type);
553 if (rv < 0)
554 return rv;
555 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
556 return -EINVAL;
557 task = get_proc_task(file->f_path.dentry->d_inode);
558 if (!task)
559 return -ESRCH;
560 mm = get_task_mm(task);
561 if (mm) {
562 struct mm_walk clear_refs_walk = {
563 .pmd_entry = clear_refs_pte_range,
564 .mm = mm,
565 };
566 down_read(&mm->mmap_sem);
567 for (vma = mm->mmap; vma; vma = vma->vm_next) {
568 clear_refs_walk.private = vma;
569 if (is_vm_hugetlb_page(vma))
570 continue;
571 /*
572 * Writing 1 to /proc/pid/clear_refs affects all pages.
573 *
574 * Writing 2 to /proc/pid/clear_refs only affects
575 * Anonymous pages.
576 *
577 * Writing 3 to /proc/pid/clear_refs only affects file
578 * mapped pages.
579 */
580 if (type == CLEAR_REFS_ANON && vma->vm_file)
581 continue;
582 if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
583 continue;
584 walk_page_range(vma->vm_start, vma->vm_end,
585 &clear_refs_walk);
586 }
587 flush_tlb_mm(mm);
588 up_read(&mm->mmap_sem);
589 mmput(mm);
590 }
591 put_task_struct(task);
592
593 return count;
594 }
595
596 const struct file_operations proc_clear_refs_operations = {
597 .write = clear_refs_write,
598 .llseek = noop_llseek,
599 };
600
601 struct pagemapread {
602 int pos, len;
603 u64 *buffer;
604 };
605
606 #define PM_ENTRY_BYTES sizeof(u64)
607 #define PM_STATUS_BITS 3
608 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
609 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
610 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
611 #define PM_PSHIFT_BITS 6
612 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
613 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
614 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
615 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
616 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
617
618 #define PM_PRESENT PM_STATUS(4LL)
619 #define PM_SWAP PM_STATUS(2LL)
620 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
621 #define PM_END_OF_BUFFER 1
622
add_to_pagemap(unsigned long addr,u64 pfn,struct pagemapread * pm)623 static int add_to_pagemap(unsigned long addr, u64 pfn,
624 struct pagemapread *pm)
625 {
626 pm->buffer[pm->pos++] = pfn;
627 if (pm->pos >= pm->len)
628 return PM_END_OF_BUFFER;
629 return 0;
630 }
631
pagemap_pte_hole(unsigned long start,unsigned long end,struct mm_walk * walk)632 static int pagemap_pte_hole(unsigned long start, unsigned long end,
633 struct mm_walk *walk)
634 {
635 struct pagemapread *pm = walk->private;
636 unsigned long addr;
637 int err = 0;
638 for (addr = start; addr < end; addr += PAGE_SIZE) {
639 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
640 if (err)
641 break;
642 }
643 return err;
644 }
645
swap_pte_to_pagemap_entry(pte_t pte)646 static u64 swap_pte_to_pagemap_entry(pte_t pte)
647 {
648 swp_entry_t e = pte_to_swp_entry(pte);
649 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
650 }
651
pte_to_pagemap_entry(pte_t pte)652 static u64 pte_to_pagemap_entry(pte_t pte)
653 {
654 u64 pme = 0;
655 if (is_swap_pte(pte))
656 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
657 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
658 else if (pte_present(pte))
659 pme = PM_PFRAME(pte_pfn(pte))
660 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
661 return pme;
662 }
663
pagemap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)664 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
665 struct mm_walk *walk)
666 {
667 struct vm_area_struct *vma;
668 struct pagemapread *pm = walk->private;
669 pte_t *pte;
670 int err = 0;
671
672 split_huge_page_pmd(walk->mm, pmd);
673
674 /* find the first VMA at or above 'addr' */
675 vma = find_vma(walk->mm, addr);
676 for (; addr != end; addr += PAGE_SIZE) {
677 u64 pfn = PM_NOT_PRESENT;
678
679 /* check to see if we've left 'vma' behind
680 * and need a new, higher one */
681 if (vma && (addr >= vma->vm_end))
682 vma = find_vma(walk->mm, addr);
683
684 /* check that 'vma' actually covers this address,
685 * and that it isn't a huge page vma */
686 if (vma && (vma->vm_start <= addr) &&
687 !is_vm_hugetlb_page(vma)) {
688 pte = pte_offset_map(pmd, addr);
689 pfn = pte_to_pagemap_entry(*pte);
690 /* unmap before userspace copy */
691 pte_unmap(pte);
692 }
693 err = add_to_pagemap(addr, pfn, pm);
694 if (err)
695 return err;
696 }
697
698 cond_resched();
699
700 return err;
701 }
702
703 #ifdef CONFIG_HUGETLB_PAGE
huge_pte_to_pagemap_entry(pte_t pte,int offset)704 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
705 {
706 u64 pme = 0;
707 if (pte_present(pte))
708 pme = PM_PFRAME(pte_pfn(pte) + offset)
709 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
710 return pme;
711 }
712
713 /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)714 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
715 unsigned long addr, unsigned long end,
716 struct mm_walk *walk)
717 {
718 struct pagemapread *pm = walk->private;
719 int err = 0;
720 u64 pfn;
721
722 for (; addr != end; addr += PAGE_SIZE) {
723 int offset = (addr & ~hmask) >> PAGE_SHIFT;
724 pfn = huge_pte_to_pagemap_entry(*pte, offset);
725 err = add_to_pagemap(addr, pfn, pm);
726 if (err)
727 return err;
728 }
729
730 cond_resched();
731
732 return err;
733 }
734 #endif /* HUGETLB_PAGE */
735
736 /*
737 * /proc/pid/pagemap - an array mapping virtual pages to pfns
738 *
739 * For each page in the address space, this file contains one 64-bit entry
740 * consisting of the following:
741 *
742 * Bits 0-55 page frame number (PFN) if present
743 * Bits 0-4 swap type if swapped
744 * Bits 5-55 swap offset if swapped
745 * Bits 55-60 page shift (page size = 1<<page shift)
746 * Bit 61 reserved for future use
747 * Bit 62 page swapped
748 * Bit 63 page present
749 *
750 * If the page is not present but in swap, then the PFN contains an
751 * encoding of the swap file number and the page's offset into the
752 * swap. Unmapped pages return a null PFN. This allows determining
753 * precisely which pages are mapped (or in swap) and comparing mapped
754 * pages between processes.
755 *
756 * Efficient users of this interface will use /proc/pid/maps to
757 * determine which areas of memory are actually mapped and llseek to
758 * skip over unmapped regions.
759 */
760 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
761 #define PAGEMAP_WALK_MASK (PMD_MASK)
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)762 static ssize_t pagemap_read(struct file *file, char __user *buf,
763 size_t count, loff_t *ppos)
764 {
765 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
766 struct mm_struct *mm;
767 struct pagemapread pm;
768 int ret = -ESRCH;
769 struct mm_walk pagemap_walk = {};
770 unsigned long src;
771 unsigned long svpfn;
772 unsigned long start_vaddr;
773 unsigned long end_vaddr;
774 int copied = 0;
775
776 if (!task)
777 goto out;
778
779 ret = -EINVAL;
780 /* file position must be aligned */
781 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
782 goto out_task;
783
784 ret = 0;
785 if (!count)
786 goto out_task;
787
788 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
789 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
790 ret = -ENOMEM;
791 if (!pm.buffer)
792 goto out_task;
793
794 mm = mm_for_maps(task);
795 ret = PTR_ERR(mm);
796 if (!mm || IS_ERR(mm))
797 goto out_free;
798
799 pagemap_walk.pmd_entry = pagemap_pte_range;
800 pagemap_walk.pte_hole = pagemap_pte_hole;
801 #ifdef CONFIG_HUGETLB_PAGE
802 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
803 #endif
804 pagemap_walk.mm = mm;
805 pagemap_walk.private = ±
806
807 src = *ppos;
808 svpfn = src / PM_ENTRY_BYTES;
809 start_vaddr = svpfn << PAGE_SHIFT;
810 end_vaddr = TASK_SIZE_OF(task);
811
812 /* watch out for wraparound */
813 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
814 start_vaddr = end_vaddr;
815
816 /*
817 * The odds are that this will stop walking way
818 * before end_vaddr, because the length of the
819 * user buffer is tracked in "pm", and the walk
820 * will stop when we hit the end of the buffer.
821 */
822 ret = 0;
823 while (count && (start_vaddr < end_vaddr)) {
824 int len;
825 unsigned long end;
826
827 pm.pos = 0;
828 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
829 /* overflow ? */
830 if (end < start_vaddr || end > end_vaddr)
831 end = end_vaddr;
832 down_read(&mm->mmap_sem);
833 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
834 up_read(&mm->mmap_sem);
835 start_vaddr = end;
836
837 len = min(count, PM_ENTRY_BYTES * pm.pos);
838 if (copy_to_user(buf, pm.buffer, len)) {
839 ret = -EFAULT;
840 goto out_mm;
841 }
842 copied += len;
843 buf += len;
844 count -= len;
845 }
846 *ppos += copied;
847 if (!ret || ret == PM_END_OF_BUFFER)
848 ret = copied;
849
850 out_mm:
851 mmput(mm);
852 out_free:
853 kfree(pm.buffer);
854 out_task:
855 put_task_struct(task);
856 out:
857 return ret;
858 }
859
860 const struct file_operations proc_pagemap_operations = {
861 .llseek = mem_lseek, /* borrow this */
862 .read = pagemap_read,
863 };
864 #endif /* CONFIG_PROC_PAGE_MONITOR */
865
866 #ifdef CONFIG_NUMA
867
868 struct numa_maps {
869 struct vm_area_struct *vma;
870 unsigned long pages;
871 unsigned long anon;
872 unsigned long active;
873 unsigned long writeback;
874 unsigned long mapcount_max;
875 unsigned long dirty;
876 unsigned long swapcache;
877 unsigned long node[MAX_NUMNODES];
878 };
879
880 struct numa_maps_private {
881 struct proc_maps_private proc_maps;
882 struct numa_maps md;
883 };
884
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)885 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
886 unsigned long nr_pages)
887 {
888 int count = page_mapcount(page);
889
890 md->pages += nr_pages;
891 if (pte_dirty || PageDirty(page))
892 md->dirty += nr_pages;
893
894 if (PageSwapCache(page))
895 md->swapcache += nr_pages;
896
897 if (PageActive(page) || PageUnevictable(page))
898 md->active += nr_pages;
899
900 if (PageWriteback(page))
901 md->writeback += nr_pages;
902
903 if (PageAnon(page))
904 md->anon += nr_pages;
905
906 if (count > md->mapcount_max)
907 md->mapcount_max = count;
908
909 md->node[page_to_nid(page)] += nr_pages;
910 }
911
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)912 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
913 unsigned long addr)
914 {
915 struct page *page;
916 int nid;
917
918 if (!pte_present(pte))
919 return NULL;
920
921 page = vm_normal_page(vma, addr, pte);
922 if (!page)
923 return NULL;
924
925 if (PageReserved(page))
926 return NULL;
927
928 nid = page_to_nid(page);
929 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
930 return NULL;
931
932 return page;
933 }
934
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)935 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
936 unsigned long end, struct mm_walk *walk)
937 {
938 struct numa_maps *md;
939 spinlock_t *ptl;
940 pte_t *orig_pte;
941 pte_t *pte;
942
943 md = walk->private;
944 spin_lock(&walk->mm->page_table_lock);
945 if (pmd_trans_huge(*pmd)) {
946 if (pmd_trans_splitting(*pmd)) {
947 spin_unlock(&walk->mm->page_table_lock);
948 wait_split_huge_page(md->vma->anon_vma, pmd);
949 } else {
950 pte_t huge_pte = *(pte_t *)pmd;
951 struct page *page;
952
953 page = can_gather_numa_stats(huge_pte, md->vma, addr);
954 if (page)
955 gather_stats(page, md, pte_dirty(huge_pte),
956 HPAGE_PMD_SIZE/PAGE_SIZE);
957 spin_unlock(&walk->mm->page_table_lock);
958 return 0;
959 }
960 } else {
961 spin_unlock(&walk->mm->page_table_lock);
962 }
963
964 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
965 do {
966 struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
967 if (!page)
968 continue;
969 gather_stats(page, md, pte_dirty(*pte), 1);
970
971 } while (pte++, addr += PAGE_SIZE, addr != end);
972 pte_unmap_unlock(orig_pte, ptl);
973 return 0;
974 }
975 #ifdef CONFIG_HUGETLB_PAGE
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)976 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
977 unsigned long addr, unsigned long end, struct mm_walk *walk)
978 {
979 struct numa_maps *md;
980 struct page *page;
981
982 if (pte_none(*pte))
983 return 0;
984
985 page = pte_page(*pte);
986 if (!page)
987 return 0;
988
989 md = walk->private;
990 gather_stats(page, md, pte_dirty(*pte), 1);
991 return 0;
992 }
993
994 #else
gather_hugetbl_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)995 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
996 unsigned long addr, unsigned long end, struct mm_walk *walk)
997 {
998 return 0;
999 }
1000 #endif
1001
1002 /*
1003 * Display pages allocated per node and memory policy via /proc.
1004 */
show_numa_map(struct seq_file * m,void * v)1005 static int show_numa_map(struct seq_file *m, void *v)
1006 {
1007 struct numa_maps_private *numa_priv = m->private;
1008 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1009 struct vm_area_struct *vma = v;
1010 struct numa_maps *md = &numa_priv->md;
1011 struct file *file = vma->vm_file;
1012 struct mm_struct *mm = vma->vm_mm;
1013 struct mm_walk walk = {};
1014 struct mempolicy *pol;
1015 int n;
1016 char buffer[50];
1017
1018 if (!mm)
1019 return 0;
1020
1021 /* Ensure we start with an empty set of numa_maps statistics. */
1022 memset(md, 0, sizeof(*md));
1023
1024 md->vma = vma;
1025
1026 walk.hugetlb_entry = gather_hugetbl_stats;
1027 walk.pmd_entry = gather_pte_stats;
1028 walk.private = md;
1029 walk.mm = mm;
1030
1031 pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
1032 mpol_to_str(buffer, sizeof(buffer), pol, 0);
1033 mpol_cond_put(pol);
1034
1035 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1036
1037 if (file) {
1038 seq_printf(m, " file=");
1039 seq_path(m, &file->f_path, "\n\t= ");
1040 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1041 seq_printf(m, " heap");
1042 } else if (vma->vm_start <= mm->start_stack &&
1043 vma->vm_end >= mm->start_stack) {
1044 seq_printf(m, " stack");
1045 }
1046
1047 if (is_vm_hugetlb_page(vma))
1048 seq_printf(m, " huge");
1049
1050 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1051
1052 if (!md->pages)
1053 goto out;
1054
1055 if (md->anon)
1056 seq_printf(m, " anon=%lu", md->anon);
1057
1058 if (md->dirty)
1059 seq_printf(m, " dirty=%lu", md->dirty);
1060
1061 if (md->pages != md->anon && md->pages != md->dirty)
1062 seq_printf(m, " mapped=%lu", md->pages);
1063
1064 if (md->mapcount_max > 1)
1065 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1066
1067 if (md->swapcache)
1068 seq_printf(m, " swapcache=%lu", md->swapcache);
1069
1070 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1071 seq_printf(m, " active=%lu", md->active);
1072
1073 if (md->writeback)
1074 seq_printf(m, " writeback=%lu", md->writeback);
1075
1076 for_each_node_state(n, N_HIGH_MEMORY)
1077 if (md->node[n])
1078 seq_printf(m, " N%d=%lu", n, md->node[n]);
1079 out:
1080 seq_putc(m, '\n');
1081
1082 if (m->count < m->size)
1083 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1084 return 0;
1085 }
1086
1087 static const struct seq_operations proc_pid_numa_maps_op = {
1088 .start = m_start,
1089 .next = m_next,
1090 .stop = m_stop,
1091 .show = show_numa_map,
1092 };
1093
numa_maps_open(struct inode * inode,struct file * file)1094 static int numa_maps_open(struct inode *inode, struct file *file)
1095 {
1096 struct numa_maps_private *priv;
1097 int ret = -ENOMEM;
1098 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1099 if (priv) {
1100 priv->proc_maps.pid = proc_pid(inode);
1101 ret = seq_open(file, &proc_pid_numa_maps_op);
1102 if (!ret) {
1103 struct seq_file *m = file->private_data;
1104 m->private = priv;
1105 } else {
1106 kfree(priv);
1107 }
1108 }
1109 return ret;
1110 }
1111
1112 const struct file_operations proc_numa_maps_operations = {
1113 .open = numa_maps_open,
1114 .read = seq_read,
1115 .llseek = seq_lseek,
1116 .release = seq_release_private,
1117 };
1118 #endif /* CONFIG_NUMA */
1119