/linux/arch/x86/boot/startup/ |
H A D | map_kernel.c | 14 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 64 early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD); in sme_postprocess_startup() 90 pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts); in __startup_64() 182 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; in __startup_64() 213 for (; i < PTRS_PER_PMD; i++) in __startup_64()
|
H A D | sme.c | 135 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); in sme_prepare_pgd() 136 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; in sme_prepare_pgd() 268 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc() 279 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc()
|
/linux/arch/m68k/include/asm/ |
H A D | pgtable_mm.h | 60 #define PTRS_PER_PMD 1 macro 65 #define PTRS_PER_PMD 1 macro 69 #define PTRS_PER_PMD 128 macro
|
/linux/arch/x86/include/asm/ |
H A D | pgtable_32.h | 56 * With PAE paging (PTRS_PER_PMD > 1), we allocate PTRS_PER_PGD == 4 pages for 59 #if PTRS_PER_PMD > 1 60 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
|
H A D | pgtable-2level_types.h | 34 #define PTRS_PER_PMD 1 macro
|
H A D | pgtable-3level_types.h | 43 #define PTRS_PER_PMD 512 macro
|
H A D | pgtable_64_types.h | 72 #define PTRS_PER_PMD 512 macro
|
/linux/arch/mips/include/asm/ |
H A D | pgtable-64.h | 31 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page 130 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) macro 144 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ 234 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
|
/linux/arch/powerpc/mm/book3s64/ |
H A D | hash_pgtable.c | 275 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; in hash__pgtable_trans_huge_deposit() 293 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; in hash__pgtable_trans_huge_withdraw() 359 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; in hash__pmdp_huge_get_and_clear()
|
/linux/arch/loongarch/include/asm/ |
H A D | pgtable.h | 52 #define PTRS_PER_PMD (PAGE_SIZE >> 3) macro 96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 100 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 179 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
|
/linux/arch/sh/include/asm/ |
H A D | pgtable-3level.h | 26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE) macro
|
/linux/arch/x86/mm/ |
H A D | pgtable.c | 267 sizeof(pmd_t) * PTRS_PER_PMD); in pgd_prepopulate_pmd() 293 sizeof(pmd_t) * PTRS_PER_PMD); in pgd_prepopulate_user_pmd() 740 for (i = 0; i < PTRS_PER_PMD; i++) { in pud_free_pmd_page() 751 for (i = 0; i < PTRS_PER_PMD; i++) { in pud_free_pmd_page()
|
H A D | init_32.c | 142 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); in page_table_range_init_count() 225 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); in page_table_range_init() 304 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; in kernel_physical_mapping_init()
|
/linux/arch/sh/mm/ |
H A D | pgtable.c | 29 PTRS_PER_PMD * (1<<PTE_MAGNITUDE), in pgtable_cache_init()
|
/linux/arch/m68k/mm/ |
H A D | init.c | 108 for (j = 0; j < PTRS_PER_PMD; j++) { in init_pointer_tables()
|
/linux/arch/x86/platform/pvh/ |
H A D | head.S | 147 movl $PTRS_PER_PMD, %ecx 278 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
|
/linux/arch/x86/virt/svm/ |
H A D | sev.c | 818 pfn_i = ALIGN_DOWN(pfn, PTRS_PER_PMD); in dump_rmpentry() 819 pfn_end = pfn_i + PTRS_PER_PMD; in dump_rmpentry() 931 (!IS_ALIGNED(pfn, PTRS_PER_PMD) || !pfn_valid(pfn + PTRS_PER_PMD - 1))) in adjust_direct_map()
|
/linux/arch/powerpc/include/asm/nohash/64/ |
H A D | pgtable-4k.h | 25 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) macro
|
/linux/arch/parisc/mm/ |
H A D | init.c | 43 pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); 367 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ in map_pages() 368 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); in map_pages() 387 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { in map_pages() 554 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); in mem_init()
|
/linux/arch/powerpc/mm/kasan/ |
H A D | init_book3s_64.c | 71 for (i = 0; i < PTRS_PER_PMD; i++) in kasan_init()
|
/linux/arch/mips/mm/ |
H A D | pgtable-64.c | 52 end = p + PTRS_PER_PMD; in pmd_init()
|
/linux/arch/sparc/include/asm/ |
H A D | pgtsrmmu.h | 21 #define SRMMU_PMD_TABLE_SIZE (PTRS_PER_PMD*4)
|
/linux/arch/arm/include/asm/ |
H A D | pgtable-2level.h | 71 #define PTRS_PER_PMD 1 macro
|
/linux/arch/um/include/asm/ |
H A D | pgtable-4level.h | 39 #define PTRS_PER_PMD 512 macro
|
/linux/arch/arm/mm/ |
H A D | idmap.c | 42 PTRS_PER_PMD * sizeof(pmd_t)); in idmap_add_pmd()
|