1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/memblock.h>
4 #include <linux/fs.h>
5 #include <linux/sysfs.h>
6 #include <linux/kobject.h>
7 #include <linux/memory_hotplug.h>
8 #include <linux/mm.h>
9 #include <linux/mmzone.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/page_ext.h>
14 #include <linux/page_idle.h>
15
16 #include "internal.h"
17
18 #define BITMAP_CHUNK_SIZE sizeof(u64)
19 #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
20
21 /*
22 * Idle page tracking only considers user memory pages, for other types of
23 * pages the idle flag is always unset and an attempt to set it is silently
24 * ignored.
25 *
26 * We treat a page as a user memory page if it is on an LRU list, because it is
27 * always safe to pass such a page to rmap_walk(), which is essential for idle
28 * page tracking. With such an indicator of user pages we can skip isolated
29 * pages, but since there are not usually many of them, it will hardly affect
30 * the overall result.
31 *
32 * This function tries to get a user memory page by pfn as described above.
33 */
page_idle_get_folio(unsigned long pfn)34 static struct folio *page_idle_get_folio(unsigned long pfn)
35 {
36 struct page *page = pfn_to_online_page(pfn);
37 struct folio *folio;
38
39 if (!page || PageTail(page))
40 return NULL;
41
42 folio = page_folio(page);
43 if (!folio_test_lru(folio) || !folio_try_get(folio))
44 return NULL;
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
46 folio_put(folio);
47 folio = NULL;
48 }
49 return folio;
50 }
51
page_idle_clear_pte_refs_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)52 static bool page_idle_clear_pte_refs_one(struct folio *folio,
53 struct vm_area_struct *vma,
54 unsigned long addr, void *arg)
55 {
56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
57 bool referenced = false;
58
59 while (page_vma_mapped_walk(&pvmw)) {
60 addr = pvmw.address;
61 if (pvmw.pte) {
62 /*
63 * For PTE-mapped THP, one sub page is referenced,
64 * the whole THP is referenced.
65 *
66 * PFN swap PTEs, such as device-exclusive ones, that
67 * actually map pages are "old" from a CPU perspective.
68 * The MMU notifier takes care of any device aspects.
69 */
70 if (likely(pte_present(ptep_get(pvmw.pte))))
71 referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
72 referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
73 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
74 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
75 referenced = true;
76 } else {
77 /* unexpected pmd-mapped page? */
78 WARN_ON_ONCE(1);
79 }
80 }
81
82 if (referenced) {
83 folio_clear_idle(folio);
84 /*
85 * We cleared the referenced bit in a mapping to this page. To
86 * avoid interference with page reclaim, mark it young so that
87 * folio_referenced() will return > 0.
88 */
89 folio_set_young(folio);
90 }
91 return true;
92 }
93
page_idle_clear_pte_refs(struct folio * folio)94 static void page_idle_clear_pte_refs(struct folio *folio)
95 {
96 /*
97 * Since rwc.try_lock is unused, rwc is effectively immutable, so we
98 * can make it static to save some cycles and stack.
99 */
100 static struct rmap_walk_control rwc = {
101 .rmap_one = page_idle_clear_pte_refs_one,
102 .anon_lock = folio_lock_anon_vma_read,
103 };
104 bool need_lock;
105
106 if (!folio_mapped(folio) || !folio_raw_mapping(folio))
107 return;
108
109 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
110 if (need_lock && !folio_trylock(folio))
111 return;
112
113 rmap_walk(folio, &rwc);
114
115 if (need_lock)
116 folio_unlock(folio);
117 }
118
page_idle_bitmap_read(struct file * file,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t pos,size_t count)119 static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
120 const struct bin_attribute *attr, char *buf,
121 loff_t pos, size_t count)
122 {
123 u64 *out = (u64 *)buf;
124 struct folio *folio;
125 unsigned long pfn, end_pfn;
126 int bit;
127
128 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
129 return -EINVAL;
130
131 pfn = pos * BITS_PER_BYTE;
132 if (pfn >= max_pfn)
133 return 0;
134
135 end_pfn = pfn + count * BITS_PER_BYTE;
136 if (end_pfn > max_pfn)
137 end_pfn = max_pfn;
138
139 for (; pfn < end_pfn; pfn++) {
140 bit = pfn % BITMAP_CHUNK_BITS;
141 if (!bit)
142 *out = 0ULL;
143 folio = page_idle_get_folio(pfn);
144 if (folio) {
145 if (folio_test_idle(folio)) {
146 /*
147 * The page might have been referenced via a
148 * pte, in which case it is not idle. Clear
149 * refs and recheck.
150 */
151 page_idle_clear_pte_refs(folio);
152 if (folio_test_idle(folio))
153 *out |= 1ULL << bit;
154 }
155 folio_put(folio);
156 }
157 if (bit == BITMAP_CHUNK_BITS - 1)
158 out++;
159 cond_resched();
160 }
161 return (char *)out - buf;
162 }
163
page_idle_bitmap_write(struct file * file,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t pos,size_t count)164 static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
165 const struct bin_attribute *attr, char *buf,
166 loff_t pos, size_t count)
167 {
168 const u64 *in = (u64 *)buf;
169 struct folio *folio;
170 unsigned long pfn, end_pfn;
171 int bit;
172
173 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
174 return -EINVAL;
175
176 pfn = pos * BITS_PER_BYTE;
177 if (pfn >= max_pfn)
178 return -ENXIO;
179
180 end_pfn = pfn + count * BITS_PER_BYTE;
181 if (end_pfn > max_pfn)
182 end_pfn = max_pfn;
183
184 for (; pfn < end_pfn; pfn++) {
185 bit = pfn % BITMAP_CHUNK_BITS;
186 if ((*in >> bit) & 1) {
187 folio = page_idle_get_folio(pfn);
188 if (folio) {
189 page_idle_clear_pte_refs(folio);
190 folio_set_idle(folio);
191 folio_put(folio);
192 }
193 }
194 if (bit == BITMAP_CHUNK_BITS - 1)
195 in++;
196 cond_resched();
197 }
198 return (char *)in - buf;
199 }
200
201 static const struct bin_attribute page_idle_bitmap_attr =
202 __BIN_ATTR(bitmap, 0600,
203 page_idle_bitmap_read, page_idle_bitmap_write, 0);
204
205 static const struct bin_attribute *const page_idle_bin_attrs[] = {
206 &page_idle_bitmap_attr,
207 NULL,
208 };
209
210 static const struct attribute_group page_idle_attr_group = {
211 .bin_attrs_new = page_idle_bin_attrs,
212 .name = "page_idle",
213 };
214
page_idle_init(void)215 static int __init page_idle_init(void)
216 {
217 int err;
218
219 err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
220 if (err) {
221 pr_err("page_idle: register sysfs failed\n");
222 return err;
223 }
224 return 0;
225 }
226 subsys_initcall(page_idle_init);
227