1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains common routines for dealing with free of page tables
4 * Along with common page table handling code
5 *
6 * Derived from arch/powerpc/mm/tlb_64.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/gfp.h>
22 #include <linux/mm.h>
23 #include <linux/percpu.h>
24 #include <linux/hardirq.h>
25 #include <linux/page_table_check.h>
26 #include <linux/hugetlb.h>
27 #include <asm/tlbflush.h>
28 #include <asm/tlb.h>
29 #include <asm/hugetlb.h>
30 #include <asm/pte-walk.h>
31
32 #ifdef CONFIG_PPC64
33 #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
34 #else
35 #define PGD_ALIGN PAGE_SIZE
36 #endif
37
38 pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
39
is_exec_fault(void)40 static inline int is_exec_fault(void)
41 {
42 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
43 }
44
45 /* We only try to do i/d cache coherency on stuff that looks like
46 * reasonably "normal" PTEs. We currently require a PTE to be present
47 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
48 * on userspace PTEs
49 */
pte_looks_normal(pte_t pte,unsigned long addr)50 static inline int pte_looks_normal(pte_t pte, unsigned long addr)
51 {
52
53 if (pte_present(pte) && !pte_special(pte)) {
54 if (pte_ci(pte))
55 return 0;
56 if (!is_kernel_addr(addr))
57 return 1;
58 }
59 return 0;
60 }
61
maybe_pte_to_folio(pte_t pte)62 static struct folio *maybe_pte_to_folio(pte_t pte)
63 {
64 unsigned long pfn = pte_pfn(pte);
65 struct page *page;
66
67 if (unlikely(!pfn_valid(pfn)))
68 return NULL;
69 page = pfn_to_page(pfn);
70 if (PageReserved(page))
71 return NULL;
72 return page_folio(page);
73 }
74
75 #ifdef CONFIG_PPC_BOOK3S
76
77 /* Server-style MMU handles coherency when hashing if HW exec permission
78 * is supposed per page (currently 64-bit only). If not, then, we always
79 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
80 * support falls into the same category.
81 */
82
set_pte_filter_hash(pte_t pte,unsigned long addr)83 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
84 {
85 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
86 if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
87 cpu_has_feature(CPU_FTR_NOEXECUTE))) {
88 struct folio *folio = maybe_pte_to_folio(pte);
89 if (!folio)
90 return pte;
91 if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
92 flush_dcache_icache_folio(folio);
93 set_bit(PG_dcache_clean, &folio->flags.f);
94 }
95 }
96 return pte;
97 }
98
99 #else /* CONFIG_PPC_BOOK3S */
100
set_pte_filter_hash(pte_t pte,unsigned long addr)101 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
102
103 #endif /* CONFIG_PPC_BOOK3S */
104
105 /* Embedded type MMU with HW exec support. This is a bit more complicated
106 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
107 * instead we "filter out" the exec permission for non clean pages.
108 *
109 * This is also called once for the folio. So only work with folio->flags here.
110 */
set_pte_filter(pte_t pte,unsigned long addr)111 static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
112 {
113 struct folio *folio;
114
115 if (radix_enabled())
116 return pte;
117
118 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
119 return set_pte_filter_hash(pte, addr);
120
121 /* No exec permission in the first place, move on */
122 if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
123 return pte;
124
125 /* If you set _PAGE_EXEC on weird pages you're on your own */
126 folio = maybe_pte_to_folio(pte);
127 if (unlikely(!folio))
128 return pte;
129
130 /* If the page clean, we move on */
131 if (test_bit(PG_dcache_clean, &folio->flags.f))
132 return pte;
133
134 /* If it's an exec fault, we flush the cache and make it clean */
135 if (is_exec_fault()) {
136 flush_dcache_icache_folio(folio);
137 set_bit(PG_dcache_clean, &folio->flags.f);
138 return pte;
139 }
140
141 /* Else, we filter out _PAGE_EXEC */
142 return pte_exprotect(pte);
143 }
144
set_access_flags_filter(pte_t pte,struct vm_area_struct * vma,int dirty)145 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
146 int dirty)
147 {
148 struct folio *folio;
149
150 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
151 return pte;
152
153 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
154 return pte;
155
156 /* So here, we only care about exec faults, as we use them
157 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
158 * if necessary. Also if _PAGE_EXEC is already set, same deal,
159 * we just bail out
160 */
161 if (dirty || pte_exec(pte) || !is_exec_fault())
162 return pte;
163
164 #ifdef CONFIG_DEBUG_VM
165 /* So this is an exec fault, _PAGE_EXEC is not set. If it was
166 * an error we would have bailed out earlier in do_page_fault()
167 * but let's make sure of it
168 */
169 if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
170 return pte;
171 #endif /* CONFIG_DEBUG_VM */
172
173 /* If you set _PAGE_EXEC on weird pages you're on your own */
174 folio = maybe_pte_to_folio(pte);
175 if (unlikely(!folio))
176 goto bail;
177
178 /* If the page is already clean, we move on */
179 if (test_bit(PG_dcache_clean, &folio->flags.f))
180 goto bail;
181
182 /* Clean the page and set PG_dcache_clean */
183 flush_dcache_icache_folio(folio);
184 set_bit(PG_dcache_clean, &folio->flags.f);
185
186 bail:
187 return pte_mkexec(pte);
188 }
189
190 /*
191 * set_pte stores a linux PTE into the linux page table.
192 */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)193 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
194 pte_t pte, unsigned int nr)
195 {
196
197 /* Note: mm->context.id might not yet have been assigned as
198 * this context might not have been activated yet when this
199 * is called. Filter the pte value and use the filtered value
200 * to setup all the ptes in the range.
201 */
202 pte = set_pte_filter(pte, addr);
203
204 /*
205 * We don't need to call arch_enter/leave_lazy_mmu_mode()
206 * because we expect set_ptes to be only be used on not present
207 * and not hw_valid ptes. Hence there is no translation cache flush
208 * involved that need to be batched.
209 */
210
211 page_table_check_ptes_set(mm, addr, ptep, pte, nr);
212
213 for (;;) {
214
215 /*
216 * Make sure hardware valid bit is not set. We don't do
217 * tlb flush for this update.
218 */
219 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
220
221 /* Perform the setting of the PTE */
222 __set_pte_at(mm, addr, ptep, pte, 0);
223 if (--nr == 0)
224 break;
225 ptep++;
226 addr += PAGE_SIZE;
227 pte = pte_next_pfn(pte);
228 }
229 }
230
set_pte_at_unchecked(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)231 void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
232 pte_t *ptep, pte_t pte)
233 {
234 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
235 pte = set_pte_filter(pte, addr);
236 __set_pte_at(mm, addr, ptep, pte, 0);
237 }
238
unmap_kernel_page(unsigned long va)239 void unmap_kernel_page(unsigned long va)
240 {
241 pmd_t *pmdp = pmd_off_k(va);
242 pte_t *ptep = pte_offset_kernel(pmdp, va);
243
244 pte_clear(&init_mm, va, ptep);
245 flush_tlb_kernel_range(va, va + PAGE_SIZE);
246 }
247
248 /*
249 * This is called when relaxing access to a PTE. It's also called in the page
250 * fault path when we don't hit any of the major fault cases, ie, a minor
251 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
252 * handled those two for us, we additionally deal with missing execute
253 * permission here on some processors
254 */
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)255 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
256 pte_t *ptep, pte_t entry, int dirty)
257 {
258 int changed;
259 entry = set_access_flags_filter(entry, vma, dirty);
260 changed = !pte_same(*(ptep), entry);
261 if (changed) {
262 assert_pte_locked(vma->vm_mm, address);
263 __ptep_set_access_flags(vma, ptep, entry,
264 address, mmu_virtual_psize);
265 }
266 return changed;
267 }
268
269 #ifdef CONFIG_HUGETLB_PAGE
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)270 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
271 unsigned long addr, pte_t *ptep,
272 pte_t pte, int dirty)
273 {
274 #ifdef HUGETLB_NEED_PRELOAD
275 /*
276 * The "return 1" forces a call of update_mmu_cache, which will write a
277 * TLB entry. Without this, platforms that don't do a write of the TLB
278 * entry in the TLB miss handler asm will fault ad infinitum.
279 */
280 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
281 return 1;
282 #else
283 int changed, psize;
284
285 pte = set_access_flags_filter(pte, vma, dirty);
286 changed = !pte_same(*(ptep), pte);
287 if (changed) {
288
289 #ifdef CONFIG_PPC_BOOK3S_64
290 struct hstate *h = hstate_vma(vma);
291
292 psize = hstate_get_psize(h);
293 #ifdef CONFIG_DEBUG_VM
294 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
295 #endif
296
297 #else
298 /*
299 * Not used on non book3s64 platforms.
300 * 8xx compares it with mmu_virtual_psize to
301 * know if it is a huge page or not.
302 */
303 psize = MMU_PAGE_COUNT;
304 #endif
305 __ptep_set_access_flags(vma, ptep, pte, addr, psize);
306 }
307 return changed;
308 #endif
309 }
310
311 #if defined(CONFIG_PPC_8xx)
312
313 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS)
314 /* We need the same lock to protect the PMD table and the two PTE tables. */
315 #error "8M hugetlb folios are incompatible with split page table locks"
316 #endif
317
__set_huge_pte_at(pmd_t * pmd,pte_t * ptep,pte_basic_t val)318 static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
319 {
320 pte_basic_t *entry = (pte_basic_t *)ptep;
321 int num, i;
322
323 /*
324 * Make sure hardware valid bit is not set. We don't do
325 * tlb flush for this update.
326 */
327 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
328
329 num = number_of_cells_per_pte(pmd, val, 1);
330
331 for (i = 0; i < num; i++, entry++, val += SZ_4K)
332 *entry = val;
333 }
334
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)335 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
336 pte_t pte, unsigned long sz)
337 {
338 pmd_t *pmdp = pmd_off(mm, addr);
339
340 pte = set_pte_filter(pte, addr);
341
342 if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */
343 *pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M);
344 *(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M);
345
346 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte));
347 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M);
348 } else {
349 __set_huge_pte_at(pmdp, ptep, pte_val(pte));
350 }
351 }
352 #else
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)353 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
354 pte_t pte, unsigned long sz)
355 {
356 unsigned long pdsize;
357 int i;
358
359 pte = set_pte_filter(pte, addr);
360
361 /*
362 * Make sure hardware valid bit is not set. We don't do
363 * tlb flush for this update.
364 */
365 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
366
367 if (sz < PMD_SIZE)
368 pdsize = PAGE_SIZE;
369 else if (sz < PUD_SIZE)
370 pdsize = PMD_SIZE;
371 else if (sz < P4D_SIZE)
372 pdsize = PUD_SIZE;
373 else if (sz < PGDIR_SIZE)
374 pdsize = P4D_SIZE;
375 else
376 pdsize = PGDIR_SIZE;
377
378 for (i = 0; i < sz / pdsize; i++, ptep++, addr += pdsize) {
379 __set_pte_at(mm, addr, ptep, pte, 0);
380 pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT));
381 }
382 }
383 #endif
384 #endif /* CONFIG_HUGETLB_PAGE */
385
386 #ifdef CONFIG_DEBUG_VM
assert_pte_locked(struct mm_struct * mm,unsigned long addr)387 void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
388 {
389 pgd_t *pgd;
390 p4d_t *p4d;
391 pud_t *pud;
392 pmd_t *pmd;
393 pte_t *pte;
394 spinlock_t *ptl;
395
396 if (mm == &init_mm)
397 return;
398 pgd = mm->pgd + pgd_index(addr);
399 BUG_ON(pgd_none(*pgd));
400 p4d = p4d_offset(pgd, addr);
401 BUG_ON(p4d_none(*p4d));
402 pud = pud_offset(p4d, addr);
403 BUG_ON(pud_none(*pud));
404 pmd = pmd_offset(pud, addr);
405 /*
406 * khugepaged to collapse normal pages to hugepage, first set
407 * pmd to none to force page fault/gup to take mmap_lock. After
408 * pmd is set to none, we do a pte_clear which does this assertion
409 * so if we find pmd none, return.
410 */
411 if (pmd_none(*pmd))
412 return;
413 pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
414 BUG_ON(!pte);
415 assert_spin_locked(ptl);
416 pte_unmap(pte);
417 }
418 #endif /* CONFIG_DEBUG_VM */
419
vmalloc_to_phys(void * va)420 unsigned long vmalloc_to_phys(void *va)
421 {
422 unsigned long pfn = vmalloc_to_pfn(va);
423
424 BUG_ON(!pfn);
425 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
426 }
427 EXPORT_SYMBOL_GPL(vmalloc_to_phys);
428
429 /*
430 * We have 3 cases for pgds and pmds:
431 * (1) invalid (all zeroes)
432 * (2) pointer to next table, as normal; bottom 6 bits == 0
433 * (3) leaf pte for huge page _PAGE_PTE set
434 *
435 * So long as we atomically load page table pointers we are safe against teardown,
436 * we can follow the address down to the page and take a ref on it.
437 * This function need to be called with interrupts disabled. We use this variant
438 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
439 */
__find_linux_pte(pgd_t * pgdir,unsigned long ea,bool * is_thp,unsigned * hpage_shift)440 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
441 bool *is_thp, unsigned *hpage_shift)
442 {
443 pgd_t *pgdp;
444 #ifdef CONFIG_PPC64
445 p4d_t p4d, *p4dp;
446 pud_t pud, *pudp;
447 #endif
448 pmd_t pmd, *pmdp;
449 pte_t *ret_pte;
450 unsigned pdshift;
451
452 if (hpage_shift)
453 *hpage_shift = 0;
454
455 if (is_thp)
456 *is_thp = false;
457
458 /*
459 * Always operate on the local stack value. This make sure the
460 * value don't get updated by a parallel THP split/collapse,
461 * page fault or a page unmap. The return pte_t * is still not
462 * stable. So should be checked there for above conditions.
463 * Top level is an exception because it is folded into p4d.
464 *
465 * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to
466 * PMD level.
467 */
468 pgdp = pgdir + pgd_index(ea);
469 #ifdef CONFIG_PPC64
470 p4dp = p4d_offset(pgdp, ea);
471 p4d = READ_ONCE(*p4dp);
472 pdshift = P4D_SHIFT;
473
474 if (p4d_none(p4d))
475 return NULL;
476
477 if (p4d_leaf(p4d)) {
478 ret_pte = (pte_t *)p4dp;
479 goto out;
480 }
481
482 /*
483 * Even if we end up with an unmap, the pgtable will not
484 * be freed, because we do an rcu free and here we are
485 * irq disabled
486 */
487 pdshift = PUD_SHIFT;
488 pudp = pud_offset(&p4d, ea);
489 pud = READ_ONCE(*pudp);
490
491 if (pud_none(pud))
492 return NULL;
493
494 if (pud_leaf(pud)) {
495 ret_pte = (pte_t *)pudp;
496 goto out;
497 }
498
499 pmdp = pmd_offset(&pud, ea);
500 #else
501 pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea);
502 #endif
503 pdshift = PMD_SHIFT;
504 pmd = READ_ONCE(*pmdp);
505
506 /*
507 * A hugepage collapse is captured by this condition, see
508 * pmdp_collapse_flush.
509 */
510 if (pmd_none(pmd))
511 return NULL;
512
513 #ifdef CONFIG_PPC_BOOK3S_64
514 /*
515 * A hugepage split is captured by this condition, see
516 * pmdp_invalidate.
517 *
518 * Huge page modification can be caught here too.
519 */
520 if (pmd_is_serializing(pmd))
521 return NULL;
522 #endif
523
524 if (pmd_trans_huge(pmd)) {
525 if (is_thp)
526 *is_thp = true;
527 ret_pte = (pte_t *)pmdp;
528 goto out;
529 }
530
531 if (pmd_leaf(pmd)) {
532 ret_pte = (pte_t *)pmdp;
533 goto out;
534 }
535
536 return pte_offset_kernel(&pmd, ea);
537
538 out:
539 if (hpage_shift)
540 *hpage_shift = pdshift;
541 return ret_pte;
542 }
543 EXPORT_SYMBOL_GPL(__find_linux_pte);
544
545 /* Note due to the way vm flags are laid out, the bits are XWR */
546 const pgprot_t protection_map[16] = {
547 [VM_NONE] = PAGE_NONE,
548 [VM_READ] = PAGE_READONLY,
549 [VM_WRITE] = PAGE_COPY,
550 [VM_WRITE | VM_READ] = PAGE_COPY,
551 [VM_EXEC] = PAGE_EXECONLY_X,
552 [VM_EXEC | VM_READ] = PAGE_READONLY_X,
553 [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
554 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
555 [VM_SHARED] = PAGE_NONE,
556 [VM_SHARED | VM_READ] = PAGE_READONLY,
557 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
558 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
559 [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X,
560 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
561 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
562 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
563 };
564
565 #ifndef CONFIG_PPC_BOOK3S_64
566 DECLARE_VM_GET_PAGE_PROT
567 #endif
568