Home
last modified time | relevance | path

Searched refs:PUD_SIZE (Results 1 – 25 of 71) sorted by relevance

123

/linux/arch/riscv/mm/
H A Dkasan_init.c97 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_populate_pud()
98 (next - vaddr) >= PUD_SIZE) { in kasan_populate_pud()
99 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); in kasan_populate_pud()
102 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE); in kasan_populate_pud()
181 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { in kasan_early_clear_pud()
252 if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_early_populate_pud()
253 (next - vaddr) >= PUD_SIZE) { in kasan_early_populate_pud()
H A Dhugetlbpage.c51 if (sz == PUD_SIZE) { in huge_pte_alloc()
104 if (sz == PUD_SIZE) in huge_pte_offset()
134 case PUD_SIZE: in hugetlb_mask_last_page()
135 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
226 else if (sz >= PUD_SIZE) in num_contig_ptes_from_size()
420 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in __hugetlb_valid_size()
H A Dtlbflush.c190 else if (stride_size >= PUD_SIZE) in flush_tlb_range()
191 stride_size = PUD_SIZE; in flush_tlb_range()
221 start, end - start, PUD_SIZE); in flush_pud_tlb_range()
H A Dinit.c640 if (sz == PUD_SIZE) { in create_pud_mapping()
752 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size()
753 return PUD_SIZE; in best_map_size()
880 PUD_SIZE, PAGE_TABLE); in set_satp_mode()
1105 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm()
1175 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1194 (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm()
1205 (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE); in setup_vm()
1717 free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap); in remove_pud_mapping()
/linux/arch/arm64/mm/
H A Dhugetlbpage.c52 case PUD_SIZE: in __hugetlb_valid_size()
255 if (sz == PUD_SIZE) { in huge_pte_alloc()
296 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset()
326 case PUD_SIZE: in hugetlb_mask_last_page()
328 return PGDIR_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
332 return PUD_SIZE - CONT_PMD_SIZE; in hugetlb_mask_last_page()
334 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
350 case PUD_SIZE: in arch_make_huge_pte()
/linux/include/asm-generic/
H A Dpgtable-nopud.h20 #define PUD_SIZE (1UL << PUD_SHIFT) macro
21 #define PUD_MASK (~(PUD_SIZE-1))
/linux/arch/powerpc/include/asm/nohash/64/
H A Dpgtable-4k.h36 #define PUD_SIZE (1UL << PUD_SHIFT) macro
37 #define PUD_MASK (~(PUD_SIZE-1))
/linux/arch/x86/include/asm/
H A Dpgtable_64_types.h84 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro
85 #define PUD_MASK (~(PUD_SIZE - 1))
/linux/arch/s390/mm/
H A Dhugetlbpage.c194 if (sz == PUD_SIZE) in huge_pte_alloc()
216 if (sz == PUD_SIZE) in huge_pte_offset()
229 else if (cpu_has_edat2() && size == PUD_SIZE) in arch_hugetlb_valid_size()
H A Dvmem.c331 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table()
332 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table()
334 vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap); in modify_pud_table()
343 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table()
344 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
/linux/arch/arm64/include/asm/
H A Dhugetlb.h78 case PUD_SIZE: in __flush_hugetlb_tlb_range()
79 __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1); in __flush_hugetlb_tlb_range()
/linux/mm/
H A Dpage_table_check.c179 page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT); in __page_table_check_pud_clear()
252 unsigned long stride = PUD_SIZE >> PAGE_SHIFT; in __page_table_check_puds_set()
259 __page_table_check_pud_clear(mm, addr + PUD_SIZE * i, *(pudp + i)); in __page_table_check_puds_set()
/linux/arch/um/include/asm/
H A Dpgtable-4level.h23 #define PUD_SIZE (1UL << PUD_SHIFT) macro
24 #define PUD_MASK (~(PUD_SIZE-1))
/linux/arch/s390/boot/
H A Dvmem.c163 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in kasan_pud_populate_zero_shadow()
307 !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE)) in try_get_large_pud_pa()
311 if (!IS_ALIGNED(pa, PUD_SIZE)) in try_get_large_pud_pa()
/linux/arch/sparc/mm/
H A Dhugetlbpage.c203 if (sz >= PUD_SIZE) in huge_pte_alloc()
250 if (size >= PUD_SIZE) in __set_huge_pte_at()
293 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
/linux/mm/kasan/
H A Dinit.c152 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate()
406 if (IS_ALIGNED(addr, PUD_SIZE) && in kasan_remove_pud_table()
407 IS_ALIGNED(next, PUD_SIZE)) { in kasan_remove_pud_table()
/linux/arch/x86/mm/
H A Dkasan_init_64.c82 ((end - addr) == PUD_SIZE) && in kasan_populate_pud()
83 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud()
84 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud()
87 memblock_free(p, PUD_SIZE); in kasan_populate_pud()
H A Dinit.c367 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask()
368 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask()
439 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
452 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
453 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
H A Dhugetlbpage.c28 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) in arch_hugetlb_valid_size()
/linux/arch/riscv/kvm/
H A Dmmu.c276 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_gfn()
299 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_test_age_gfn()
403 size = PUD_SIZE; in get_hva_mapping_size()
498 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) in kvm_riscv_mmu_map()
512 if (vma_pagesize != PUD_SIZE && in kvm_riscv_mmu_map()
/linux/arch/arc/include/asm/
H A Dpgtable-levels.h75 #define PUD_SIZE BIT(PUD_SHIFT) macro
76 #define PUD_MASK (~(PUD_SIZE - 1))
/linux/drivers/dax/
H A Ddevice.c196 unsigned int fault_size = PUD_SIZE; in __dev_dax_pud_fault()
202 if (dev_dax->align > PUD_SIZE) { in __dev_dax_pud_fault()
215 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) in __dev_dax_pud_fault()
219 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); in __dev_dax_pud_fault()
/linux/arch/powerpc/mm/book3s64/
H A Dradix_pgtable.c93 if (map_page_size == PUD_SIZE) { in early_map_kernel_page()
159 if (map_page_size == PUD_SIZE) { in __map_kernel_page()
326 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping()
328 mapping_size = PUD_SIZE; in create_physical_mapping()
876 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table()
877 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table()
1646 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
/linux/arch/x86/mm/pat/
H A Dset_memory.c1504 if (start & (PUD_SIZE - 1)) { in unmap_pud_range()
1505 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in unmap_pud_range()
1517 while (end - start >= PUD_SIZE) { in unmap_pud_range()
1522 unmap_pmd_range(pud, start, start + PUD_SIZE); in unmap_pud_range()
1524 start += PUD_SIZE; in unmap_pud_range()
1667 if (start & (PUD_SIZE - 1)) { in populate_pud()
1669 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud()
1702 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud()
1706 start += PUD_SIZE; in populate_pud()
1707 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud()
[all …]
/linux/arch/mips/include/asm/
H A Dpgtable-64.h60 #define PUD_SIZE (1UL << PUD_SHIFT) macro
61 #define PUD_MASK (~(PUD_SIZE-1))

123