1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
6 */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12
13 #undef pr_fmt
14 #define pr_fmt(fmt) "page_table_check: " fmt
15
16 struct page_table_check {
17 atomic_t anon_map_count;
18 atomic_t file_map_count;
19 };
20
21 static bool __page_table_check_enabled __initdata =
22 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
23
24 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
25 EXPORT_SYMBOL(page_table_check_disabled);
26
early_page_table_check_param(char * buf)27 static int __init early_page_table_check_param(char *buf)
28 {
29 return kstrtobool(buf, &__page_table_check_enabled);
30 }
31
32 early_param("page_table_check", early_page_table_check_param);
33
need_page_table_check(void)34 static bool __init need_page_table_check(void)
35 {
36 return __page_table_check_enabled;
37 }
38
init_page_table_check(void)39 static void __init init_page_table_check(void)
40 {
41 if (!__page_table_check_enabled)
42 return;
43 static_branch_disable(&page_table_check_disabled);
44 }
45
46 struct page_ext_operations page_table_check_ops = {
47 .size = sizeof(struct page_table_check),
48 .need = need_page_table_check,
49 .init = init_page_table_check,
50 .need_shared_flags = false,
51 };
52
get_page_table_check(struct page_ext * page_ext)53 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
54 {
55 BUG_ON(!page_ext);
56 return page_ext_data(page_ext, &page_table_check_ops);
57 }
58
59 /*
60 * An entry is removed from the page table, decrement the counters for that page
61 * verify that it is of correct type and counters do not become negative.
62 */
page_table_check_clear(unsigned long pfn,unsigned long pgcnt)63 static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
64 {
65 struct page_ext_iter iter;
66 struct page_ext *page_ext;
67 struct page *page;
68 bool anon;
69
70 if (!pfn_valid(pfn))
71 return;
72
73 page = pfn_to_page(pfn);
74 BUG_ON(PageSlab(page));
75 anon = PageAnon(page);
76
77 rcu_read_lock();
78 for_each_page_ext(page, pgcnt, page_ext, iter) {
79 struct page_table_check *ptc = get_page_table_check(page_ext);
80
81 if (anon) {
82 BUG_ON(atomic_read(&ptc->file_map_count));
83 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
84 } else {
85 BUG_ON(atomic_read(&ptc->anon_map_count));
86 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
87 }
88 }
89 rcu_read_unlock();
90 }
91
92 /*
93 * A new entry is added to the page table, increment the counters for that page
94 * verify that it is of correct type and is not being mapped with a different
95 * type to a different process.
96 */
page_table_check_set(unsigned long pfn,unsigned long pgcnt,bool rw)97 static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
98 bool rw)
99 {
100 struct page_ext_iter iter;
101 struct page_ext *page_ext;
102 struct page *page;
103 bool anon;
104
105 if (!pfn_valid(pfn))
106 return;
107
108 page = pfn_to_page(pfn);
109 BUG_ON(PageSlab(page));
110 anon = PageAnon(page);
111
112 rcu_read_lock();
113 for_each_page_ext(page, pgcnt, page_ext, iter) {
114 struct page_table_check *ptc = get_page_table_check(page_ext);
115
116 if (anon) {
117 BUG_ON(atomic_read(&ptc->file_map_count));
118 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
119 } else {
120 BUG_ON(atomic_read(&ptc->anon_map_count));
121 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
122 }
123 }
124 rcu_read_unlock();
125 }
126
127 /*
128 * page is on free list, or is being allocated, verify that counters are zeroes
129 * crash if they are not.
130 */
__page_table_check_zero(struct page * page,unsigned int order)131 void __page_table_check_zero(struct page *page, unsigned int order)
132 {
133 struct page_ext_iter iter;
134 struct page_ext *page_ext;
135
136 BUG_ON(PageSlab(page));
137
138 rcu_read_lock();
139 for_each_page_ext(page, 1 << order, page_ext, iter) {
140 struct page_table_check *ptc = get_page_table_check(page_ext);
141
142 BUG_ON(atomic_read(&ptc->anon_map_count));
143 BUG_ON(atomic_read(&ptc->file_map_count));
144 }
145 rcu_read_unlock();
146 }
147
__page_table_check_pte_clear(struct mm_struct * mm,pte_t pte)148 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
149 {
150 if (&init_mm == mm)
151 return;
152
153 if (pte_user_accessible_page(pte)) {
154 page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
155 }
156 }
157 EXPORT_SYMBOL(__page_table_check_pte_clear);
158
__page_table_check_pmd_clear(struct mm_struct * mm,pmd_t pmd)159 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
160 {
161 if (&init_mm == mm)
162 return;
163
164 if (pmd_user_accessible_page(pmd)) {
165 page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
166 }
167 }
168 EXPORT_SYMBOL(__page_table_check_pmd_clear);
169
__page_table_check_pud_clear(struct mm_struct * mm,pud_t pud)170 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
171 {
172 if (&init_mm == mm)
173 return;
174
175 if (pud_user_accessible_page(pud)) {
176 page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
177 }
178 }
179 EXPORT_SYMBOL(__page_table_check_pud_clear);
180
181 /* Whether the swap entry cached writable information */
swap_cached_writable(swp_entry_t entry)182 static inline bool swap_cached_writable(swp_entry_t entry)
183 {
184 return is_writable_device_private_entry(entry) ||
185 is_writable_migration_entry(entry);
186 }
187
page_table_check_pte_flags(pte_t pte)188 static inline void page_table_check_pte_flags(pte_t pte)
189 {
190 if (pte_present(pte) && pte_uffd_wp(pte))
191 WARN_ON_ONCE(pte_write(pte));
192 else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
193 WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
194 }
195
__page_table_check_ptes_set(struct mm_struct * mm,pte_t * ptep,pte_t pte,unsigned int nr)196 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
197 unsigned int nr)
198 {
199 unsigned int i;
200
201 if (&init_mm == mm)
202 return;
203
204 page_table_check_pte_flags(pte);
205
206 for (i = 0; i < nr; i++)
207 __page_table_check_pte_clear(mm, ptep_get(ptep + i));
208 if (pte_user_accessible_page(pte))
209 page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
210 }
211 EXPORT_SYMBOL(__page_table_check_ptes_set);
212
page_table_check_pmd_flags(pmd_t pmd)213 static inline void page_table_check_pmd_flags(pmd_t pmd)
214 {
215 if (pmd_present(pmd) && pmd_uffd_wp(pmd))
216 WARN_ON_ONCE(pmd_write(pmd));
217 else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
218 WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
219 }
220
__page_table_check_pmd_set(struct mm_struct * mm,pmd_t * pmdp,pmd_t pmd)221 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
222 {
223 if (&init_mm == mm)
224 return;
225
226 page_table_check_pmd_flags(pmd);
227
228 __page_table_check_pmd_clear(mm, *pmdp);
229 if (pmd_user_accessible_page(pmd)) {
230 page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
231 pmd_write(pmd));
232 }
233 }
234 EXPORT_SYMBOL(__page_table_check_pmd_set);
235
__page_table_check_pud_set(struct mm_struct * mm,pud_t * pudp,pud_t pud)236 void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
237 {
238 if (&init_mm == mm)
239 return;
240
241 __page_table_check_pud_clear(mm, *pudp);
242 if (pud_user_accessible_page(pud)) {
243 page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
244 pud_write(pud));
245 }
246 }
247 EXPORT_SYMBOL(__page_table_check_pud_set);
248
__page_table_check_pte_clear_range(struct mm_struct * mm,unsigned long addr,pmd_t pmd)249 void __page_table_check_pte_clear_range(struct mm_struct *mm,
250 unsigned long addr,
251 pmd_t pmd)
252 {
253 if (&init_mm == mm)
254 return;
255
256 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
257 pte_t *ptep = pte_offset_map(&pmd, addr);
258 unsigned long i;
259
260 if (WARN_ON(!ptep))
261 return;
262 for (i = 0; i < PTRS_PER_PTE; i++) {
263 __page_table_check_pte_clear(mm, ptep_get(ptep));
264 addr += PAGE_SIZE;
265 ptep++;
266 }
267 pte_unmap(ptep - PTRS_PER_PTE);
268 }
269 }
270