1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/mm_types.h>
8 #include <linux/memblock.h>
9 #include <linux/memremap.h>
10 #include <linux/pkeys.h>
11 #include <linux/debugfs.h>
12 #include <linux/proc_fs.h>
13 #include <linux/page_table_check.h>
14
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/trace.h>
18 #include <asm/powernv.h>
19 #include <asm/firmware.h>
20 #include <asm/ultravisor.h>
21 #include <asm/kexec.h>
22
23 #include <mm/mmu_decl.h>
24 #include <trace/events/thp.h>
25
26 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
27 EXPORT_SYMBOL_GPL(mmu_psize_defs);
28
29 #ifdef CONFIG_SPARSEMEM_VMEMMAP
30 int mmu_vmemmap_psize = MMU_PAGE_4K;
31 #endif
32
33 unsigned long __pmd_frag_nr;
34 EXPORT_SYMBOL(__pmd_frag_nr);
35 unsigned long __pmd_frag_size_shift;
36 EXPORT_SYMBOL(__pmd_frag_size_shift);
37
38 #ifdef CONFIG_KFENCE
39 extern bool kfence_early_init;
parse_kfence_early_init(char * arg)40 static int __init parse_kfence_early_init(char *arg)
41 {
42 int val;
43
44 if (get_option(&arg, &val))
45 kfence_early_init = !!val;
46 return 0;
47 }
48 early_param("kfence.sample_interval", parse_kfence_early_init);
49 #endif
50
51 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
52 /*
53 * This is called when relaxing access to a hugepage. It's also called in the page
54 * fault path when we don't hit any of the major fault cases, ie, a minor
55 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
56 * handled those two for us, we additionally deal with missing execute
57 * permission here on some processors
58 */
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)59 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
60 pmd_t *pmdp, pmd_t entry, int dirty)
61 {
62 int changed;
63 #ifdef CONFIG_DEBUG_VM
64 WARN_ON(!pmd_trans_huge(*pmdp));
65 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
66 #endif
67 changed = !pmd_same(*(pmdp), entry);
68 if (changed) {
69 /*
70 * We can use MMU_PAGE_2M here, because only radix
71 * path look at the psize.
72 */
73 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
74 pmd_pte(entry), address, MMU_PAGE_2M);
75 }
76 return changed;
77 }
78
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)79 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
80 pud_t *pudp, pud_t entry, int dirty)
81 {
82 int changed;
83 #ifdef CONFIG_DEBUG_VM
84 assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
85 #endif
86 changed = !pud_same(*(pudp), entry);
87 if (changed) {
88 /*
89 * We can use MMU_PAGE_1G here, because only radix
90 * path look at the psize.
91 */
92 __ptep_set_access_flags(vma, pudp_ptep(pudp),
93 pud_pte(entry), address, MMU_PAGE_1G);
94 }
95 return changed;
96 }
97
98
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)99 bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
100 unsigned long address, pmd_t *pmdp)
101 {
102 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
103 }
104
pudp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)105 bool pudp_test_and_clear_young(struct vm_area_struct *vma,
106 unsigned long address, pud_t *pudp)
107 {
108 return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
109 }
110
111 /*
112 * set a new huge pmd. We should not be called for updating
113 * an existing pmd entry. That should go via pmd_hugepage_update.
114 */
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)115 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
116 pmd_t *pmdp, pmd_t pmd)
117 {
118 #ifdef CONFIG_DEBUG_VM
119 /*
120 * Make sure hardware valid bit is not set. We don't do
121 * tlb flush for this update.
122 */
123
124 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
125 assert_spin_locked(pmd_lockptr(mm, pmdp));
126 WARN_ON(!(pmd_leaf(pmd)));
127 #endif
128 trace_hugepage_set_pmd(addr, pmd_val(pmd));
129 page_table_check_pmd_set(mm, addr, pmdp, pmd);
130 return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
131 }
132
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)133 void set_pud_at(struct mm_struct *mm, unsigned long addr,
134 pud_t *pudp, pud_t pud)
135 {
136 #ifdef CONFIG_DEBUG_VM
137 /*
138 * Make sure hardware valid bit is not set. We don't do
139 * tlb flush for this update.
140 */
141
142 WARN_ON(pte_hw_valid(pud_pte(*pudp)));
143 assert_spin_locked(pud_lockptr(mm, pudp));
144 WARN_ON(!(pud_leaf(pud)));
145 #endif
146 trace_hugepage_set_pud(addr, pud_val(pud));
147 page_table_check_pud_set(mm, addr, pudp, pud);
148 return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
149 }
150
151 /*
152 * We use this to invalidate a pmdp entry before switching from a
153 * hugepte to regular pmd entry.
154 */
pmdp_invalidate(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)155 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
156 pmd_t *pmdp)
157 {
158 pmd_t old_pmd;
159
160 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
161 old_pmd = __pmd(pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID));
162 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
163 page_table_check_pmd_clear(vma->vm_mm, address, old_pmd);
164
165 return old_pmd;
166 }
167
pudp_invalidate(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)168 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
169 pud_t *pudp)
170 {
171 pud_t old_pud;
172
173 VM_WARN_ON_ONCE(!pud_present(*pudp));
174 old_pud = __pud(pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID));
175 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
176 page_table_check_pud_clear(vma->vm_mm, address, old_pud);
177
178 return old_pud;
179 }
180
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)181 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
182 unsigned long addr, pmd_t *pmdp, int full)
183 {
184 pmd_t pmd;
185 bool was_present = pmd_present(*pmdp);
186
187 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
188 VM_BUG_ON(was_present && !pmd_trans_huge(*pmdp));
189 /*
190 * Check pmdp_huge_get_and_clear() for non-present pmd case.
191 */
192 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
193 /*
194 * if it not a fullmm flush, then we can possibly end up converting
195 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
196 * Make sure we flush the tlb in this case. TLB flush not needed for
197 * non-present case.
198 */
199 if (was_present && !full)
200 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
201 return pmd;
202 }
203
pudp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int full)204 pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
205 unsigned long addr, pud_t *pudp, int full)
206 {
207 pud_t pud;
208
209 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
210 VM_BUG_ON(!pud_present(*pudp));
211 pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
212 /*
213 * if it not a fullmm flush, then we can possibly end up converting
214 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
215 * Make sure we flush the tlb in this case.
216 */
217 if (!full)
218 flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
219 return pud;
220 }
221
pmd_set_protbits(pmd_t pmd,pgprot_t pgprot)222 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
223 {
224 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
225 }
226
pud_set_protbits(pud_t pud,pgprot_t pgprot)227 static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
228 {
229 return __pud(pud_val(pud) | pgprot_val(pgprot));
230 }
231
232 /*
233 * At some point we should be able to get rid of
234 * pmd_mkhuge() and mk_huge_pmd() when we update all the
235 * other archs to mark the pmd huge in pfn_pmd()
236 */
pfn_pmd(unsigned long pfn,pgprot_t pgprot)237 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
238 {
239 unsigned long pmdv;
240
241 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
242
243 return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
244 }
245
pfn_pud(unsigned long pfn,pgprot_t pgprot)246 pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
247 {
248 unsigned long pudv;
249
250 pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
251
252 return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
253 }
254
pmd_modify(pmd_t pmd,pgprot_t newprot)255 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
256 {
257 unsigned long pmdv;
258
259 pmdv = pmd_val(pmd);
260 pmdv &= _HPAGE_CHG_MASK;
261 return pmd_set_protbits(__pmd(pmdv), newprot);
262 }
263
pud_modify(pud_t pud,pgprot_t newprot)264 pud_t pud_modify(pud_t pud, pgprot_t newprot)
265 {
266 unsigned long pudv;
267
268 pudv = pud_val(pud);
269 pudv &= _HPAGE_CHG_MASK;
270 return pud_set_protbits(__pud(pudv), newprot);
271 }
272 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
273
274 /* For use by kexec, called with MMU off */
mmu_cleanup_all(void)275 notrace void mmu_cleanup_all(void)
276 {
277 if (radix_enabled())
278 radix__mmu_cleanup_all();
279 else if (mmu_hash_ops.hpte_clear_all)
280 mmu_hash_ops.hpte_clear_all();
281
282 reset_sprs();
283 }
284
285 #ifdef CONFIG_MEMORY_HOTPLUG
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)286 int __meminit create_section_mapping(unsigned long start, unsigned long end,
287 int nid, pgprot_t prot)
288 {
289 if (radix_enabled())
290 return radix__create_section_mapping(start, end, nid, prot);
291
292 return hash__create_section_mapping(start, end, nid, prot);
293 }
294
remove_section_mapping(unsigned long start,unsigned long end)295 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
296 {
297 if (radix_enabled())
298 return radix__remove_section_mapping(start, end);
299
300 return hash__remove_section_mapping(start, end);
301 }
302 #endif /* CONFIG_MEMORY_HOTPLUG */
303
mmu_partition_table_init(void)304 void __init mmu_partition_table_init(void)
305 {
306 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
307 unsigned long ptcr;
308
309 /* Initialize the Partition Table with no entries */
310 partition_tb = memblock_alloc_or_panic(patb_size, patb_size);
311 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
312 set_ptcr_when_no_uv(ptcr);
313 powernv_set_nmmu_ptcr(ptcr);
314 }
315
flush_partition(unsigned int lpid,bool radix)316 static void flush_partition(unsigned int lpid, bool radix)
317 {
318 if (radix) {
319 radix__flush_all_lpid(lpid);
320 radix__flush_all_lpid_guest(lpid);
321 } else {
322 asm volatile("ptesync" : : : "memory");
323 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
324 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
325 /* do we need fixup here ?*/
326 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
327 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
328 }
329 }
330
mmu_partition_table_set_entry(unsigned int lpid,unsigned long dw0,unsigned long dw1,bool flush)331 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
332 unsigned long dw1, bool flush)
333 {
334 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
335
336 /*
337 * When ultravisor is enabled, the partition table is stored in secure
338 * memory and can only be accessed doing an ultravisor call. However, we
339 * maintain a copy of the partition table in normal memory to allow Nest
340 * MMU translations to occur (for normal VMs).
341 *
342 * Therefore, here we always update partition_tb, regardless of whether
343 * we are running under an ultravisor or not.
344 */
345 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
346 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
347
348 /*
349 * If ultravisor is enabled, we do an ultravisor call to register the
350 * partition table entry (PATE), which also do a global flush of TLBs
351 * and partition table caches for the lpid. Otherwise, just do the
352 * flush. The type of flush (hash or radix) depends on what the previous
353 * use of the partition ID was, not the new use.
354 */
355 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
356 uv_register_pate(lpid, dw0, dw1);
357 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
358 dw0, dw1);
359 } else if (flush) {
360 /*
361 * Boot does not need to flush, because MMU is off and each
362 * CPU does a tlbiel_all() before switching them on, which
363 * flushes everything.
364 */
365 flush_partition(lpid, (old & PATB_HR));
366 }
367 }
368 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
369
get_pmd_from_cache(struct mm_struct * mm)370 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
371 {
372 void *pmd_frag, *ret;
373
374 if (PMD_FRAG_NR == 1)
375 return NULL;
376
377 spin_lock(&mm->page_table_lock);
378 ret = mm->context.pmd_frag;
379 if (ret) {
380 pmd_frag = ret + PMD_FRAG_SIZE;
381 /*
382 * If we have taken up all the fragments mark PTE page NULL
383 */
384 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
385 pmd_frag = NULL;
386 mm->context.pmd_frag = pmd_frag;
387 }
388 spin_unlock(&mm->page_table_lock);
389 return (pmd_t *)ret;
390 }
391
__alloc_for_pmdcache(struct mm_struct * mm)392 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
393 {
394 void *ret = NULL;
395 struct ptdesc *ptdesc;
396 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
397
398 if (mm == &init_mm)
399 gfp &= ~__GFP_ACCOUNT;
400 ptdesc = pagetable_alloc(gfp, 0);
401 if (!ptdesc)
402 return NULL;
403 if (!pagetable_pmd_ctor(mm, ptdesc)) {
404 pagetable_free(ptdesc);
405 return NULL;
406 }
407
408 atomic_set(&ptdesc->pt_frag_refcount, 1);
409
410 ret = ptdesc_address(ptdesc);
411 /*
412 * if we support only one fragment just return the
413 * allocated page.
414 */
415 if (PMD_FRAG_NR == 1)
416 return ret;
417
418 spin_lock(&mm->page_table_lock);
419 /*
420 * If we find ptdesc_page set, we return
421 * the allocated page with single fragment
422 * count.
423 */
424 if (likely(!mm->context.pmd_frag)) {
425 atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
426 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
427 }
428 spin_unlock(&mm->page_table_lock);
429
430 return (pmd_t *)ret;
431 }
432
pmd_fragment_alloc(struct mm_struct * mm,unsigned long vmaddr)433 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
434 {
435 pmd_t *pmd;
436
437 pmd = get_pmd_from_cache(mm);
438 if (pmd)
439 return pmd;
440
441 return __alloc_for_pmdcache(mm);
442 }
443
pmd_fragment_free(unsigned long * pmd)444 void pmd_fragment_free(unsigned long *pmd)
445 {
446 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
447
448 if (pagetable_is_reserved(ptdesc))
449 return free_reserved_ptdesc(ptdesc);
450
451 BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
452 if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
453 pagetable_dtor(ptdesc);
454 pagetable_free(ptdesc);
455 }
456 }
457
pgtable_free(void * table,int index)458 static inline void pgtable_free(void *table, int index)
459 {
460 switch (index) {
461 case PTE_INDEX:
462 pte_fragment_free(table, 0);
463 break;
464 case PMD_INDEX:
465 pmd_fragment_free(table);
466 break;
467 case PUD_INDEX:
468 __pud_free(table);
469 break;
470 /* We don't free pgd table via RCU callback */
471 default:
472 BUG();
473 }
474 }
475
pgtable_free_tlb(struct mmu_gather * tlb,void * table,int index)476 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
477 {
478 unsigned long pgf = (unsigned long)table;
479
480 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
481 pgf |= index;
482 tlb_remove_table(tlb, (void *)pgf);
483 }
484
__tlb_remove_table(void * _table)485 void __tlb_remove_table(void *_table)
486 {
487 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
488 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
489
490 return pgtable_free(table, index);
491 }
492
493 #ifdef CONFIG_PROC_FS
494 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
495
arch_report_meminfo(struct seq_file * m)496 void arch_report_meminfo(struct seq_file *m)
497 {
498 seq_printf(m, "DirectMap4k: %8lu kB\n",
499 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
500 seq_printf(m, "DirectMap64k: %8lu kB\n",
501 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
502 if (radix_enabled()) {
503 seq_printf(m, "DirectMap2M: %8lu kB\n",
504 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
505 seq_printf(m, "DirectMap1G: %8lu kB\n",
506 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
507 } else {
508 seq_printf(m, "DirectMap16M: %8lu kB\n",
509 atomic_long_read(&direct_pages_count[MMU_PAGE_16M]) << 14);
510 seq_printf(m, "DirectMap16G: %8lu kB\n",
511 atomic_long_read(&direct_pages_count[MMU_PAGE_16G]) << 24);
512 }
513 }
514 #endif /* CONFIG_PROC_FS */
515
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)516 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
517 pte_t *ptep)
518 {
519 unsigned long pte_val;
520
521 /*
522 * Clear the _PAGE_PRESENT so that no hardware parallel update is
523 * possible. Also keep the pte_present true so that we don't take
524 * wrong fault.
525 */
526 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
527
528 return __pte(pte_val);
529
530 }
531
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)532 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
533 pte_t *ptep, pte_t old_pte, pte_t pte)
534 {
535 if (radix_enabled())
536 return radix__ptep_modify_prot_commit(vma, addr,
537 ptep, old_pte, pte);
538 set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte);
539 }
540
541 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
542 /*
543 * For hash translation mode, we use the deposited table to store hash slot
544 * information and they are stored at PTRS_PER_PMD offset from related pmd
545 * location. Hence a pmd move requires deposit and withdraw.
546 *
547 * For radix translation with split pmd ptl, we store the deposited table in the
548 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
549 * move.
550 *
551 * With hash we use deposited table always irrespective of anon or not.
552 * With radix we use deposited table only for anonymous mapping.
553 */
pmd_move_must_withdraw(struct spinlock * new_pmd_ptl,struct spinlock * old_pmd_ptl,struct vm_area_struct * vma)554 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
555 struct spinlock *old_pmd_ptl,
556 struct vm_area_struct *vma)
557 {
558 if (radix_enabled())
559 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
560
561 return true;
562 }
563 #endif
564
565 /*
566 * Does the CPU support tlbie?
567 */
568 bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
569 EXPORT_SYMBOL(tlbie_capable);
570
571 /*
572 * Should tlbie be used for management of CPU TLBs, for kernel and process
573 * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
574 * guest address spaces.
575 */
576 bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
577
setup_disable_tlbie(char * str)578 static int __init setup_disable_tlbie(char *str)
579 {
580 if (!radix_enabled()) {
581 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
582 return 1;
583 }
584
585 tlbie_capable = false;
586 tlbie_enabled = false;
587
588 return 1;
589 }
590 __setup("disable_tlbie", setup_disable_tlbie);
591
pgtable_debugfs_setup(void)592 static int __init pgtable_debugfs_setup(void)
593 {
594 if (!tlbie_capable)
595 return 0;
596
597 /*
598 * There is no locking vs tlb flushing when changing this value.
599 * The tlb flushers will see one value or another, and use either
600 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
601 * invalidated as expected.
602 */
603 debugfs_create_bool("tlbie_enabled", 0600,
604 arch_debugfs_dir,
605 &tlbie_enabled);
606
607 return 0;
608 }
609 arch_initcall(pgtable_debugfs_setup);
610
611 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
612 /*
613 * Override the generic version in mm/memremap.c.
614 *
615 * With hash translation, the direct-map range is mapped with just one
616 * page size selected by htab_init_page_sizes(). Consult
617 * mmu_psize_defs[] to determine the minimum page size alignment.
618 */
memremap_compat_align(void)619 unsigned long memremap_compat_align(void)
620 {
621 if (!radix_enabled()) {
622 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
623 return max(SUBSECTION_SIZE, 1UL << shift);
624 }
625
626 return SUBSECTION_SIZE;
627 }
628 EXPORT_SYMBOL_GPL(memremap_compat_align);
629 #endif
630
vm_get_page_prot(vm_flags_t vm_flags)631 pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
632 {
633 unsigned long prot;
634
635 /* Radix supports execute-only, but protection_map maps X -> RX */
636 if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
637 vm_flags |= VM_READ;
638
639 prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
640
641 if (vm_flags & VM_SAO)
642 prot |= _PAGE_SAO;
643
644 #ifdef CONFIG_PPC_MEM_KEYS
645 prot |= vmflag_to_pte_pkey_bits(vm_flags);
646 #endif
647
648 return __pgprot(prot);
649 }
650 EXPORT_SYMBOL(vm_get_page_prot);
651