1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
4 #include <linux/fs.h>
5 #include <linux/init.h>
6 #include <linux/ksm.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
19 #include "internal.h"
20
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24
get_max_dump_pfn(void)25 static inline unsigned long get_max_dump_pfn(void)
26 {
27 #ifdef CONFIG_SPARSEMEM
28 /*
29 * The memmap of early sections is completely populated and marked
30 * online even if max_pfn does not fall on a section boundary -
31 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32 * these memmaps.
33 */
34 return round_up(max_pfn, PAGES_PER_SECTION);
35 #else
36 return max_pfn;
37 #endif
38 }
39
40 /* /proc/kpagecount - an array exposing page mapcounts
41 *
42 * Each entry is a u64 representing the corresponding
43 * physical page mapcount.
44 */
kpagecount_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 size_t count, loff_t *ppos)
47 {
48 const unsigned long max_dump_pfn = get_max_dump_pfn();
49 u64 __user *out = (u64 __user *)buf;
50 unsigned long src = *ppos;
51 unsigned long pfn;
52 ssize_t ret = 0;
53
54 pfn = src / KPMSIZE;
55 if (src & KPMMASK || count & KPMMASK)
56 return -EINVAL;
57 if (src >= max_dump_pfn * KPMSIZE)
58 return 0;
59 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
60
61 while (count > 0) {
62 struct page *page;
63 u64 mapcount = 0;
64
65 /*
66 * TODO: ZONE_DEVICE support requires to identify
67 * memmaps that were actually initialized.
68 */
69 page = pfn_to_online_page(pfn);
70 if (page) {
71 struct folio *folio = page_folio(page);
72
73 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
74 mapcount = folio_precise_page_mapcount(folio, page);
75 else
76 mapcount = folio_average_page_mapcount(folio);
77 }
78
79 if (put_user(mapcount, out)) {
80 ret = -EFAULT;
81 break;
82 }
83
84 pfn++;
85 out++;
86 count -= KPMSIZE;
87
88 cond_resched();
89 }
90
91 *ppos += (char __user *)out - buf;
92 if (!ret)
93 ret = (char __user *)out - buf;
94 return ret;
95 }
96
97 static const struct proc_ops kpagecount_proc_ops = {
98 .proc_flags = PROC_ENTRY_PERMANENT,
99 .proc_lseek = mem_lseek,
100 .proc_read = kpagecount_read,
101 };
102
103 /* /proc/kpageflags - an array exposing page flags
104 *
105 * Each entry is a u64 representing the corresponding
106 * physical page flags.
107 */
108
kpf_copy_bit(u64 kflags,int ubit,int kbit)109 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
110 {
111 return ((kflags >> kbit) & 1) << ubit;
112 }
113
stable_page_flags(const struct page * page)114 u64 stable_page_flags(const struct page *page)
115 {
116 const struct folio *folio;
117 unsigned long k;
118 unsigned long mapping;
119 bool is_anon;
120 u64 u = 0;
121
122 /*
123 * pseudo flag: KPF_NOPAGE
124 * it differentiates a memory hole from a page with no flags
125 */
126 if (!page)
127 return 1 << KPF_NOPAGE;
128 folio = page_folio(page);
129
130 k = folio->flags;
131 mapping = (unsigned long)folio->mapping;
132 is_anon = mapping & PAGE_MAPPING_ANON;
133
134 /*
135 * pseudo flags for the well known (anonymous) memory mapped pages
136 */
137 if (page_mapped(page))
138 u |= 1 << KPF_MMAP;
139 if (is_anon) {
140 u |= 1 << KPF_ANON;
141 if (mapping & PAGE_MAPPING_KSM)
142 u |= 1 << KPF_KSM;
143 }
144
145 /*
146 * compound pages: export both head/tail info
147 * they together define a compound page's start/end pos and order
148 */
149 if (page == &folio->page)
150 u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
151 else
152 u |= 1 << KPF_COMPOUND_TAIL;
153 if (folio_test_hugetlb(folio))
154 u |= 1 << KPF_HUGE;
155 else if (folio_test_large(folio) &&
156 folio_test_large_rmappable(folio)) {
157 /* Note: we indicate any THPs here, not just PMD-sized ones */
158 u |= 1 << KPF_THP;
159 } else if (is_huge_zero_folio(folio)) {
160 u |= 1 << KPF_ZERO_PAGE;
161 u |= 1 << KPF_THP;
162 } else if (is_zero_folio(folio)) {
163 u |= 1 << KPF_ZERO_PAGE;
164 }
165
166 /*
167 * Caveats on high order pages: PG_buddy and PG_slab will only be set
168 * on the head page.
169 */
170 if (PageBuddy(page))
171 u |= 1 << KPF_BUDDY;
172 else if (page_count(page) == 0 && is_free_buddy_page(page))
173 u |= 1 << KPF_BUDDY;
174
175 if (PageOffline(page))
176 u |= 1 << KPF_OFFLINE;
177 if (PageTable(page))
178 u |= 1 << KPF_PGTABLE;
179 if (folio_test_slab(folio))
180 u |= 1 << KPF_SLAB;
181
182 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
183 u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
184 #else
185 if (folio_test_idle(folio))
186 u |= 1 << KPF_IDLE;
187 #endif
188
189 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
190 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
191 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
192 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
193
194 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
195 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
196 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
197 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
198
199 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
200 if ((k & SWAPCACHE) == SWAPCACHE)
201 u |= 1 << KPF_SWAPCACHE;
202 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
203
204 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
205 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
206
207 #ifdef CONFIG_MEMORY_FAILURE
208 if (u & (1 << KPF_HUGE))
209 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
210 else
211 u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
212 #endif
213
214 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
215 u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2);
216 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
217 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
218 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
219 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
220 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
221 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
222 #endif
223 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
224 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
225 #endif
226
227 return u;
228 };
229
kpageflags_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)230 static ssize_t kpageflags_read(struct file *file, char __user *buf,
231 size_t count, loff_t *ppos)
232 {
233 const unsigned long max_dump_pfn = get_max_dump_pfn();
234 u64 __user *out = (u64 __user *)buf;
235 unsigned long src = *ppos;
236 unsigned long pfn;
237 ssize_t ret = 0;
238
239 pfn = src / KPMSIZE;
240 if (src & KPMMASK || count & KPMMASK)
241 return -EINVAL;
242 if (src >= max_dump_pfn * KPMSIZE)
243 return 0;
244 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
245
246 while (count > 0) {
247 /*
248 * TODO: ZONE_DEVICE support requires to identify
249 * memmaps that were actually initialized.
250 */
251 struct page *page = pfn_to_online_page(pfn);
252
253 if (put_user(stable_page_flags(page), out)) {
254 ret = -EFAULT;
255 break;
256 }
257
258 pfn++;
259 out++;
260 count -= KPMSIZE;
261
262 cond_resched();
263 }
264
265 *ppos += (char __user *)out - buf;
266 if (!ret)
267 ret = (char __user *)out - buf;
268 return ret;
269 }
270
271 static const struct proc_ops kpageflags_proc_ops = {
272 .proc_flags = PROC_ENTRY_PERMANENT,
273 .proc_lseek = mem_lseek,
274 .proc_read = kpageflags_read,
275 };
276
277 #ifdef CONFIG_MEMCG
kpagecgroup_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)278 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
279 size_t count, loff_t *ppos)
280 {
281 const unsigned long max_dump_pfn = get_max_dump_pfn();
282 u64 __user *out = (u64 __user *)buf;
283 struct page *ppage;
284 unsigned long src = *ppos;
285 unsigned long pfn;
286 ssize_t ret = 0;
287 u64 ino;
288
289 pfn = src / KPMSIZE;
290 if (src & KPMMASK || count & KPMMASK)
291 return -EINVAL;
292 if (src >= max_dump_pfn * KPMSIZE)
293 return 0;
294 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
295
296 while (count > 0) {
297 /*
298 * TODO: ZONE_DEVICE support requires to identify
299 * memmaps that were actually initialized.
300 */
301 ppage = pfn_to_online_page(pfn);
302
303 if (ppage)
304 ino = page_cgroup_ino(ppage);
305 else
306 ino = 0;
307
308 if (put_user(ino, out)) {
309 ret = -EFAULT;
310 break;
311 }
312
313 pfn++;
314 out++;
315 count -= KPMSIZE;
316
317 cond_resched();
318 }
319
320 *ppos += (char __user *)out - buf;
321 if (!ret)
322 ret = (char __user *)out - buf;
323 return ret;
324 }
325
326 static const struct proc_ops kpagecgroup_proc_ops = {
327 .proc_flags = PROC_ENTRY_PERMANENT,
328 .proc_lseek = mem_lseek,
329 .proc_read = kpagecgroup_read,
330 };
331 #endif /* CONFIG_MEMCG */
332
proc_page_init(void)333 static int __init proc_page_init(void)
334 {
335 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
336 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
337 #ifdef CONFIG_MEMCG
338 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
339 #endif
340 return 0;
341 }
342 fs_initcall(proc_page_init);
343