Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #define pr_fmt(fmt) "radix-mmu: " fmt
157 return -ENOMEM; in __map_kernel_page()
164 return -ENOMEM; in __map_kernel_page()
171 return -ENOMEM; in __map_kernel_page()
183 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); in radix__map_kernel_page()
200 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", in radix__change_memory_range()
267 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, in print_mapping()
278 // Relocatable kernel running at non-zero real address in next_boundary()
281 unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys; in next_boundary()
319 gap = next_boundary(addr, end) - addr; in create_physical_mapping()
402 -1, PAGE_KERNEL, PAGE_SIZE)) in map_kfence_pool()
436 * page tables will be allocated within the range. No in radix_init_pgtable()
446 -1, PAGE_KERNEL, ~0UL)); in radix_init_pgtable()
466 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); in radix_init_pgtable()
471 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); in radix_init_pgtable()
474 * The init_mm context is given the first available (non-zero) PID, in radix_init_pgtable()
475 * which is the "guard PID" and contains no page table. PIDR should in radix_init_pgtable()
497 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR; in radix_init_partition_table()
505 int idx = -1; in get_idx_from_shift()
539 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); in radix_dt_scan_page_sizes()
543 pr_info("Page sizes from device-tree:\n"); in radix_dt_scan_page_sizes()
544 for (; size >= 4; size -= 4, ++prop) { in radix_dt_scan_page_sizes()
558 def->shift = shift; in radix_dt_scan_page_sizes()
559 def->ap = ap; in radix_dt_scan_page_sizes()
560 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx); in radix_dt_scan_page_sizes()
564 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; in radix_dt_scan_page_sizes()
573 * Try to find the available page sizes in the device-tree in radix__early_init_devtree()
578 * No page size details found in device tree. in radix__early_init_devtree()
667 (PATB_SIZE_SHIFT - 12)); in radix__early_init_mmu_secondary()
769 alt_start = altmap->base_pfn; in free_vmemmap_pages()
770 alt_end = altmap->base_pfn + altmap->reserve + altmap->free; in free_vmemmap_pages()
780 while (nr_pages--) in free_vmemmap_pages()
787 unsigned long end, bool direct, in remove_pte_table() argument
803 if (!direct) in remove_pte_table()
809 else if (!direct && vmemmap_page_is_unused(addr, next)) { in remove_pte_table()
815 if (direct) in remove_pte_table()
816 update_page_count(mmu_virtual_psize, -pages); in remove_pte_table()
820 unsigned long end, bool direct, in remove_pmd_table() argument
837 if (!direct) in remove_pmd_table()
843 else if (!direct && vmemmap_pmd_is_unused(addr, next)) { in remove_pmd_table()
852 remove_pte_table(pte_base, addr, next, direct, altmap); in remove_pmd_table()
855 if (direct) in remove_pmd_table()
856 update_page_count(MMU_PAGE_2M, -pages); in remove_pmd_table()
860 unsigned long end, bool direct, in remove_pud_table() argument
877 WARN_ONCE(1, "%s: unaligned range\n", __func__); in remove_pud_table()
886 remove_pmd_table(pmd_base, addr, next, direct, altmap); in remove_pud_table()
889 if (direct) in remove_pud_table()
890 update_page_count(MMU_PAGE_1G, -pages); in remove_pud_table()
894 remove_pagetable(unsigned long start, unsigned long end, bool direct, in remove_pagetable() argument
915 WARN_ONCE(1, "%s: unaligned range\n", __func__); in remove_pagetable()
924 remove_pud_table(pud_base, addr, next, direct, altmap); in remove_pagetable()
938 return -1; in radix__create_section_mapping()
970 return -1; in radix__vmemmap_create_mapping()
1144 return -ENOMEM; in radix__vmemmap_populate()
1147 return -ENOMEM; in radix__vmemmap_populate()
1200 return -ENOMEM; in radix__vmemmap_populate()
1204 return -ENOMEM; in radix__vmemmap_populate()
1256 map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE; in vmemmap_compound_tail_page()
1283 pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL); in vmemmap_compound_tail_page()
1323 return -ENOMEM; in vmemmap_populate_compound_pages()
1326 return -ENOMEM; in vmemmap_populate_compound_pages()
1336 return -ENOMEM; in vmemmap_populate_compound_pages()
1348 unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages); in vmemmap_populate_compound_pages()
1359 return -ENOMEM; in vmemmap_populate_compound_pages()
1369 return -ENOMEM; in vmemmap_populate_compound_pages()
1384 return -ENOMEM; in vmemmap_populate_compound_pages()
1394 return -ENOMEM; in vmemmap_populate_compound_pages()
1471 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); in radix__pmdp_collapse_flush()
1511 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in radix__pgtable_trans_huge_withdraw()
1548 struct mm_struct *mm = vma->vm_mm; in radix__ptep_set_access_flags()
1554 * On POWER9, the NMMU is not able to relax PTE access permissions in radix__ptep_set_access_flags()
1565 atomic_read(&mm->context.copros) > 0) { in radix__ptep_set_access_flags()
1575 * Book3S does not require a TLB flush when relaxing access in radix__ptep_set_access_flags()
1578 * taking an access fault, as defined by the architecture. See in radix__ptep_set_access_flags()
1579 * "Setting a Reference or Change Bit or Upgrading Access in radix__ptep_set_access_flags()
1591 struct mm_struct *mm = vma->vm_mm; in radix__ptep_modify_prot_commit()
1595 * installing a PTE with more relaxed access permissions, see in radix__ptep_modify_prot_commit()
1600 (atomic_read(&mm->context.copros) > 0)) in radix__ptep_modify_prot_commit()