1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/mm_types.h>
8 #include <linux/memblock.h>
9 #include <linux/memremap.h>
10 #include <linux/pkeys.h>
11 #include <linux/debugfs.h>
12 #include <linux/proc_fs.h>
13
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16 #include <asm/trace.h>
17 #include <asm/powernv.h>
18 #include <asm/firmware.h>
19 #include <asm/ultravisor.h>
20 #include <asm/kexec.h>
21
22 #include <mm/mmu_decl.h>
23 #include <trace/events/thp.h>
24
25 #include "internal.h"
26
27 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
28 EXPORT_SYMBOL_GPL(mmu_psize_defs);
29
30 #ifdef CONFIG_SPARSEMEM_VMEMMAP
31 int mmu_vmemmap_psize = MMU_PAGE_4K;
32 #endif
33
34 unsigned long __pmd_frag_nr;
35 EXPORT_SYMBOL(__pmd_frag_nr);
36 unsigned long __pmd_frag_size_shift;
37 EXPORT_SYMBOL(__pmd_frag_size_shift);
38
39 #ifdef CONFIG_KFENCE
40 extern bool kfence_early_init;
parse_kfence_early_init(char * arg)41 static int __init parse_kfence_early_init(char *arg)
42 {
43 int val;
44
45 if (get_option(&arg, &val))
46 kfence_early_init = !!val;
47 return 0;
48 }
49 early_param("kfence.sample_interval", parse_kfence_early_init);
50 #endif
51
52 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53 /*
54 * This is called when relaxing access to a hugepage. It's also called in the page
55 * fault path when we don't hit any of the major fault cases, ie, a minor
56 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
57 * handled those two for us, we additionally deal with missing execute
58 * permission here on some processors
59 */
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)60 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
61 pmd_t *pmdp, pmd_t entry, int dirty)
62 {
63 int changed;
64 #ifdef CONFIG_DEBUG_VM
65 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
66 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
67 #endif
68 changed = !pmd_same(*(pmdp), entry);
69 if (changed) {
70 /*
71 * We can use MMU_PAGE_2M here, because only radix
72 * path look at the psize.
73 */
74 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
75 pmd_pte(entry), address, MMU_PAGE_2M);
76 }
77 return changed;
78 }
79
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)80 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
81 pud_t *pudp, pud_t entry, int dirty)
82 {
83 int changed;
84 #ifdef CONFIG_DEBUG_VM
85 WARN_ON(!pud_devmap(*pudp));
86 assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
87 #endif
88 changed = !pud_same(*(pudp), entry);
89 if (changed) {
90 /*
91 * We can use MMU_PAGE_1G here, because only radix
92 * path look at the psize.
93 */
94 __ptep_set_access_flags(vma, pudp_ptep(pudp),
95 pud_pte(entry), address, MMU_PAGE_1G);
96 }
97 return changed;
98 }
99
100
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)101 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
102 unsigned long address, pmd_t *pmdp)
103 {
104 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
105 }
106
pudp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)107 int pudp_test_and_clear_young(struct vm_area_struct *vma,
108 unsigned long address, pud_t *pudp)
109 {
110 return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
111 }
112
113 /*
114 * set a new huge pmd. We should not be called for updating
115 * an existing pmd entry. That should go via pmd_hugepage_update.
116 */
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)117 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
118 pmd_t *pmdp, pmd_t pmd)
119 {
120 #ifdef CONFIG_DEBUG_VM
121 /*
122 * Make sure hardware valid bit is not set. We don't do
123 * tlb flush for this update.
124 */
125
126 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
127 assert_spin_locked(pmd_lockptr(mm, pmdp));
128 WARN_ON(!(pmd_leaf(pmd)));
129 #endif
130 trace_hugepage_set_pmd(addr, pmd_val(pmd));
131 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
132 }
133
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)134 void set_pud_at(struct mm_struct *mm, unsigned long addr,
135 pud_t *pudp, pud_t pud)
136 {
137 #ifdef CONFIG_DEBUG_VM
138 /*
139 * Make sure hardware valid bit is not set. We don't do
140 * tlb flush for this update.
141 */
142
143 WARN_ON(pte_hw_valid(pud_pte(*pudp)));
144 assert_spin_locked(pud_lockptr(mm, pudp));
145 WARN_ON(!(pud_leaf(pud)));
146 #endif
147 trace_hugepage_set_pud(addr, pud_val(pud));
148 return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
149 }
150
do_serialize(void * arg)151 static void do_serialize(void *arg)
152 {
153 /* We've taken the IPI, so try to trim the mask while here */
154 if (radix_enabled()) {
155 struct mm_struct *mm = arg;
156 exit_lazy_flush_tlb(mm, false);
157 }
158 }
159
160 /*
161 * Serialize against __find_linux_pte() which does lock-less
162 * lookup in page tables with local interrupts disabled. For huge pages
163 * it casts pmd_t to pte_t. Since format of pte_t is different from
164 * pmd_t we want to prevent transit from pmd pointing to page table
165 * to pmd pointing to huge page (and back) while interrupts are disabled.
166 * We clear pmd to possibly replace it with page table pointer in
167 * different code paths. So make sure we wait for the parallel
168 * __find_linux_pte() to finish.
169 */
serialize_against_pte_lookup(struct mm_struct * mm)170 void serialize_against_pte_lookup(struct mm_struct *mm)
171 {
172 smp_mb();
173 smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
174 }
175
176 /*
177 * We use this to invalidate a pmdp entry before switching from a
178 * hugepte to regular pmd entry.
179 */
pmdp_invalidate(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)180 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
181 pmd_t *pmdp)
182 {
183 unsigned long old_pmd;
184
185 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
186 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
187 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
188 return __pmd(old_pmd);
189 }
190
pudp_invalidate(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)191 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
192 pud_t *pudp)
193 {
194 unsigned long old_pud;
195
196 VM_WARN_ON_ONCE(!pud_present(*pudp));
197 old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
198 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
199 return __pud(old_pud);
200 }
201
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)202 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
203 unsigned long addr, pmd_t *pmdp, int full)
204 {
205 pmd_t pmd;
206 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
207 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
208 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
209 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
210 /*
211 * if it not a fullmm flush, then we can possibly end up converting
212 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
213 * Make sure we flush the tlb in this case.
214 */
215 if (!full)
216 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
217 return pmd;
218 }
219
pudp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int full)220 pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
221 unsigned long addr, pud_t *pudp, int full)
222 {
223 pud_t pud;
224
225 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
226 VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) ||
227 !pud_present(*pudp));
228 pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
229 /*
230 * if it not a fullmm flush, then we can possibly end up converting
231 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
232 * Make sure we flush the tlb in this case.
233 */
234 if (!full)
235 flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
236 return pud;
237 }
238
pmd_set_protbits(pmd_t pmd,pgprot_t pgprot)239 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
240 {
241 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
242 }
243
pud_set_protbits(pud_t pud,pgprot_t pgprot)244 static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
245 {
246 return __pud(pud_val(pud) | pgprot_val(pgprot));
247 }
248
249 /*
250 * At some point we should be able to get rid of
251 * pmd_mkhuge() and mk_huge_pmd() when we update all the
252 * other archs to mark the pmd huge in pfn_pmd()
253 */
pfn_pmd(unsigned long pfn,pgprot_t pgprot)254 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
255 {
256 unsigned long pmdv;
257
258 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
259
260 return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
261 }
262
pfn_pud(unsigned long pfn,pgprot_t pgprot)263 pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
264 {
265 unsigned long pudv;
266
267 pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
268
269 return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
270 }
271
mk_pmd(struct page * page,pgprot_t pgprot)272 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
273 {
274 return pfn_pmd(page_to_pfn(page), pgprot);
275 }
276
pmd_modify(pmd_t pmd,pgprot_t newprot)277 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
278 {
279 unsigned long pmdv;
280
281 pmdv = pmd_val(pmd);
282 pmdv &= _HPAGE_CHG_MASK;
283 return pmd_set_protbits(__pmd(pmdv), newprot);
284 }
285
pud_modify(pud_t pud,pgprot_t newprot)286 pud_t pud_modify(pud_t pud, pgprot_t newprot)
287 {
288 unsigned long pudv;
289
290 pudv = pud_val(pud);
291 pudv &= _HPAGE_CHG_MASK;
292 return pud_set_protbits(__pud(pudv), newprot);
293 }
294 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
295
296 /* For use by kexec, called with MMU off */
mmu_cleanup_all(void)297 notrace void mmu_cleanup_all(void)
298 {
299 if (radix_enabled())
300 radix__mmu_cleanup_all();
301 else if (mmu_hash_ops.hpte_clear_all)
302 mmu_hash_ops.hpte_clear_all();
303
304 reset_sprs();
305 }
306
307 #ifdef CONFIG_MEMORY_HOTPLUG
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)308 int __meminit create_section_mapping(unsigned long start, unsigned long end,
309 int nid, pgprot_t prot)
310 {
311 if (radix_enabled())
312 return radix__create_section_mapping(start, end, nid, prot);
313
314 return hash__create_section_mapping(start, end, nid, prot);
315 }
316
remove_section_mapping(unsigned long start,unsigned long end)317 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
318 {
319 if (radix_enabled())
320 return radix__remove_section_mapping(start, end);
321
322 return hash__remove_section_mapping(start, end);
323 }
324 #endif /* CONFIG_MEMORY_HOTPLUG */
325
mmu_partition_table_init(void)326 void __init mmu_partition_table_init(void)
327 {
328 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
329 unsigned long ptcr;
330
331 /* Initialize the Partition Table with no entries */
332 partition_tb = memblock_alloc_or_panic(patb_size, patb_size);
333 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
334 set_ptcr_when_no_uv(ptcr);
335 powernv_set_nmmu_ptcr(ptcr);
336 }
337
flush_partition(unsigned int lpid,bool radix)338 static void flush_partition(unsigned int lpid, bool radix)
339 {
340 if (radix) {
341 radix__flush_all_lpid(lpid);
342 radix__flush_all_lpid_guest(lpid);
343 } else {
344 asm volatile("ptesync" : : : "memory");
345 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
346 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
347 /* do we need fixup here ?*/
348 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
349 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
350 }
351 }
352
mmu_partition_table_set_entry(unsigned int lpid,unsigned long dw0,unsigned long dw1,bool flush)353 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
354 unsigned long dw1, bool flush)
355 {
356 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
357
358 /*
359 * When ultravisor is enabled, the partition table is stored in secure
360 * memory and can only be accessed doing an ultravisor call. However, we
361 * maintain a copy of the partition table in normal memory to allow Nest
362 * MMU translations to occur (for normal VMs).
363 *
364 * Therefore, here we always update partition_tb, regardless of whether
365 * we are running under an ultravisor or not.
366 */
367 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
368 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
369
370 /*
371 * If ultravisor is enabled, we do an ultravisor call to register the
372 * partition table entry (PATE), which also do a global flush of TLBs
373 * and partition table caches for the lpid. Otherwise, just do the
374 * flush. The type of flush (hash or radix) depends on what the previous
375 * use of the partition ID was, not the new use.
376 */
377 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
378 uv_register_pate(lpid, dw0, dw1);
379 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
380 dw0, dw1);
381 } else if (flush) {
382 /*
383 * Boot does not need to flush, because MMU is off and each
384 * CPU does a tlbiel_all() before switching them on, which
385 * flushes everything.
386 */
387 flush_partition(lpid, (old & PATB_HR));
388 }
389 }
390 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
391
get_pmd_from_cache(struct mm_struct * mm)392 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
393 {
394 void *pmd_frag, *ret;
395
396 if (PMD_FRAG_NR == 1)
397 return NULL;
398
399 spin_lock(&mm->page_table_lock);
400 ret = mm->context.pmd_frag;
401 if (ret) {
402 pmd_frag = ret + PMD_FRAG_SIZE;
403 /*
404 * If we have taken up all the fragments mark PTE page NULL
405 */
406 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
407 pmd_frag = NULL;
408 mm->context.pmd_frag = pmd_frag;
409 }
410 spin_unlock(&mm->page_table_lock);
411 return (pmd_t *)ret;
412 }
413
__alloc_for_pmdcache(struct mm_struct * mm)414 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
415 {
416 void *ret = NULL;
417 struct ptdesc *ptdesc;
418 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
419
420 if (mm == &init_mm)
421 gfp &= ~__GFP_ACCOUNT;
422 ptdesc = pagetable_alloc(gfp, 0);
423 if (!ptdesc)
424 return NULL;
425 if (!pagetable_pmd_ctor(ptdesc)) {
426 pagetable_free(ptdesc);
427 return NULL;
428 }
429
430 atomic_set(&ptdesc->pt_frag_refcount, 1);
431
432 ret = ptdesc_address(ptdesc);
433 /*
434 * if we support only one fragment just return the
435 * allocated page.
436 */
437 if (PMD_FRAG_NR == 1)
438 return ret;
439
440 spin_lock(&mm->page_table_lock);
441 /*
442 * If we find ptdesc_page set, we return
443 * the allocated page with single fragment
444 * count.
445 */
446 if (likely(!mm->context.pmd_frag)) {
447 atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
448 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
449 }
450 spin_unlock(&mm->page_table_lock);
451
452 return (pmd_t *)ret;
453 }
454
pmd_fragment_alloc(struct mm_struct * mm,unsigned long vmaddr)455 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
456 {
457 pmd_t *pmd;
458
459 pmd = get_pmd_from_cache(mm);
460 if (pmd)
461 return pmd;
462
463 return __alloc_for_pmdcache(mm);
464 }
465
pmd_fragment_free(unsigned long * pmd)466 void pmd_fragment_free(unsigned long *pmd)
467 {
468 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
469
470 if (pagetable_is_reserved(ptdesc))
471 return free_reserved_ptdesc(ptdesc);
472
473 BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
474 if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
475 pagetable_dtor(ptdesc);
476 pagetable_free(ptdesc);
477 }
478 }
479
pgtable_free(void * table,int index)480 static inline void pgtable_free(void *table, int index)
481 {
482 switch (index) {
483 case PTE_INDEX:
484 pte_fragment_free(table, 0);
485 break;
486 case PMD_INDEX:
487 pmd_fragment_free(table);
488 break;
489 case PUD_INDEX:
490 __pud_free(table);
491 break;
492 /* We don't free pgd table via RCU callback */
493 default:
494 BUG();
495 }
496 }
497
pgtable_free_tlb(struct mmu_gather * tlb,void * table,int index)498 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
499 {
500 unsigned long pgf = (unsigned long)table;
501
502 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
503 pgf |= index;
504 tlb_remove_table(tlb, (void *)pgf);
505 }
506
__tlb_remove_table(void * _table)507 void __tlb_remove_table(void *_table)
508 {
509 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
510 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
511
512 return pgtable_free(table, index);
513 }
514
515 #ifdef CONFIG_PROC_FS
516 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
517
arch_report_meminfo(struct seq_file * m)518 void arch_report_meminfo(struct seq_file *m)
519 {
520 /*
521 * Hash maps the memory with one size mmu_linear_psize.
522 * So don't bother to print these on hash
523 */
524 if (!radix_enabled())
525 return;
526 seq_printf(m, "DirectMap4k: %8lu kB\n",
527 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
528 seq_printf(m, "DirectMap64k: %8lu kB\n",
529 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
530 seq_printf(m, "DirectMap2M: %8lu kB\n",
531 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
532 seq_printf(m, "DirectMap1G: %8lu kB\n",
533 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
534 }
535 #endif /* CONFIG_PROC_FS */
536
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)537 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
538 pte_t *ptep)
539 {
540 unsigned long pte_val;
541
542 /*
543 * Clear the _PAGE_PRESENT so that no hardware parallel update is
544 * possible. Also keep the pte_present true so that we don't take
545 * wrong fault.
546 */
547 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
548
549 return __pte(pte_val);
550
551 }
552
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)553 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
554 pte_t *ptep, pte_t old_pte, pte_t pte)
555 {
556 if (radix_enabled())
557 return radix__ptep_modify_prot_commit(vma, addr,
558 ptep, old_pte, pte);
559 set_pte_at(vma->vm_mm, addr, ptep, pte);
560 }
561
562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
563 /*
564 * For hash translation mode, we use the deposited table to store hash slot
565 * information and they are stored at PTRS_PER_PMD offset from related pmd
566 * location. Hence a pmd move requires deposit and withdraw.
567 *
568 * For radix translation with split pmd ptl, we store the deposited table in the
569 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
570 * move.
571 *
572 * With hash we use deposited table always irrespective of anon or not.
573 * With radix we use deposited table only for anonymous mapping.
574 */
pmd_move_must_withdraw(struct spinlock * new_pmd_ptl,struct spinlock * old_pmd_ptl,struct vm_area_struct * vma)575 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
576 struct spinlock *old_pmd_ptl,
577 struct vm_area_struct *vma)
578 {
579 if (radix_enabled())
580 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
581
582 return true;
583 }
584 #endif
585
586 /*
587 * Does the CPU support tlbie?
588 */
589 bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
590 EXPORT_SYMBOL(tlbie_capable);
591
592 /*
593 * Should tlbie be used for management of CPU TLBs, for kernel and process
594 * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
595 * guest address spaces.
596 */
597 bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
598
setup_disable_tlbie(char * str)599 static int __init setup_disable_tlbie(char *str)
600 {
601 if (!radix_enabled()) {
602 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
603 return 1;
604 }
605
606 tlbie_capable = false;
607 tlbie_enabled = false;
608
609 return 1;
610 }
611 __setup("disable_tlbie", setup_disable_tlbie);
612
pgtable_debugfs_setup(void)613 static int __init pgtable_debugfs_setup(void)
614 {
615 if (!tlbie_capable)
616 return 0;
617
618 /*
619 * There is no locking vs tlb flushing when changing this value.
620 * The tlb flushers will see one value or another, and use either
621 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
622 * invalidated as expected.
623 */
624 debugfs_create_bool("tlbie_enabled", 0600,
625 arch_debugfs_dir,
626 &tlbie_enabled);
627
628 return 0;
629 }
630 arch_initcall(pgtable_debugfs_setup);
631
632 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
633 /*
634 * Override the generic version in mm/memremap.c.
635 *
636 * With hash translation, the direct-map range is mapped with just one
637 * page size selected by htab_init_page_sizes(). Consult
638 * mmu_psize_defs[] to determine the minimum page size alignment.
639 */
memremap_compat_align(void)640 unsigned long memremap_compat_align(void)
641 {
642 if (!radix_enabled()) {
643 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
644 return max(SUBSECTION_SIZE, 1UL << shift);
645 }
646
647 return SUBSECTION_SIZE;
648 }
649 EXPORT_SYMBOL_GPL(memremap_compat_align);
650 #endif
651
vm_get_page_prot(unsigned long vm_flags)652 pgprot_t vm_get_page_prot(unsigned long vm_flags)
653 {
654 unsigned long prot;
655
656 /* Radix supports execute-only, but protection_map maps X -> RX */
657 if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
658 vm_flags |= VM_READ;
659
660 prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
661
662 if (vm_flags & VM_SAO)
663 prot |= _PAGE_SAO;
664
665 #ifdef CONFIG_PPC_MEM_KEYS
666 prot |= vmflag_to_pte_pkey_bits(vm_flags);
667 #endif
668
669 return __pgprot(prot);
670 }
671 EXPORT_SYMBOL(vm_get_page_prot);
672