/linux/arch/x86/platform/intel-quark/ |
H A D | imr_selftest.c | 72 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); in imr_self_test() 77 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); in imr_self_test() 82 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); in imr_self_test()
|
/linux/arch/x86/include/asm/ |
H A D | page.h | 57 #ifndef __va 58 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) macro 61 #define __boot_va(x) __va(x) 74 return __va(pfn << PAGE_SHIFT); in pfn_to_kaddr()
|
/linux/arch/loongarch/include/asm/ |
H A D | page.h | 65 * __pa()/__va() should be used only during mem init. 68 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) macro 70 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 81 #define page_to_virt(page) __va(page_to_phys(page)) 91 (__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page); \
|
/linux/arch/m68k/include/asm/ |
H A D | page_no.h | 20 #define __va(paddr) ((void *)((unsigned long)(paddr))) macro 29 return __va(pfn << PAGE_SHIFT); in pfn_to_virt() 33 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
|
H A D | motorola_pgtable.h | 100 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) 101 #define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK)) 102 #define pud_pgtable(pud) ((pmd_t *)__va(pud_val(pud) & _TABLE_MASK)) 110 #define pte_page(pte) virt_to_page(__va(pte_val(pte))) 132 #define pud_page(pud) (mem_map + ((unsigned long)(__va(pud_val(pud)) - PAGE_OFFSET) >> PAGE_SHIFT))
|
H A D | page_mm.h | 81 static inline void *__va(unsigned long paddr) in __va() function 105 static inline void *__va(unsigned long x) in __va() function 130 return __va(pfn << PAGE_SHIFT); in pfn_to_virt()
|
/linux/arch/powerpc/include/asm/ |
H A D | page.h | 116 * On Book-E parts we need __va to parse the device tree and we can't 135 * existing equation for the __va()/__pa() translations: 137 * __va(x) = (x) - PHYSICAL_START + KERNELBASE 153 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000 161 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ] 169 * To make the cost of __va() / __pa() more light weight, we introduce 178 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE 186 * the other definitions for __va & __pa. 189 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) macro 201 #define __va( macro 214 #define __va( global() macro [all...] |
H A D | sections.h | 68 return start < (unsigned long)__va(real_end) && in overlaps_interrupt_vector_text() 69 (unsigned long)__va(real_start) < end; in overlaps_interrupt_vector_text()
|
/linux/arch/loongarch/mm/ |
H A D | kasan_init.c | 124 memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte)); in kasan_pte_offset() 125 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys)); in kasan_pte_offset() 137 memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd)); in kasan_pmd_offset() 138 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys)); in kasan_pmd_offset() 150 memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud)); in kasan_pud_offset() 151 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys)); in kasan_pud_offset() 163 memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d)); in kasan_p4d_offset() 164 pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys)); in kasan_p4d_offset()
|
/linux/arch/microblaze/include/asm/ |
H A D | page.h | 100 # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 119 # define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) macro 128 return __va(pfn_to_phys((pfn))); in pfn_to_virt()
|
/linux/arch/riscv/mm/ |
H A D | init.c | 256 * In 64-bit, any use of __va/__pa before this point is wrong as we in setup_bootmem() 424 return (pte_t *) __va(pa); in get_pte_virt_late() 511 return (pmd_t *) __va(pa); in get_pmd_virt_late() 575 return (pud_t *)__va(pa); in get_pud_virt_late() 613 return (p4d_t *)__va(pa); in get_p4d_virt_late() 691 #define alloc_pgd_next(__va) (pgtable_l5_enabled ? \ argument 692 pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \ 693 pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))) 697 #define create_pgd_next_mapping(__nextp, __va, __p argument 711 alloc_pgd_next(__va) global() argument 713 create_pgd_next_mapping(__nextp,__va,__pa,__sz,__prot) global() argument 716 create_p4d_mapping(__pmdp,__va,__pa,__sz,__prot) global() argument 717 create_pud_mapping(__pmdp,__va,__pa,__sz,__prot) global() argument 718 create_pmd_mapping(__pmdp,__va,__pa,__sz,__prot) global() argument [all...] |
H A D | kasan_init.c | 45 memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE); in kasan_populate_pte() 71 memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE); in kasan_populate_pmd() 102 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE); in kasan_populate_pud() 133 memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE); in kasan_populate_p4d() 156 memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE); in kasan_populate_pgd() 512 void *start = (void *)__va(p_start); in kasan_init() 513 void *end = (void *)__va(p_end); in kasan_init()
|
/linux/arch/arm/mm/ |
H A D | dma-mapping-nommu.c | 19 dmac_map_area(__va(paddr), size, dir); in arch_sync_dma_for_device() 32 dmac_unmap_area(__va(paddr), size, dir); in arch_sync_dma_for_cpu()
|
/linux/tools/include/linux/ |
H A D | mm.h | 17 #define __va(x) ((void *)((unsigned long)(x))) macro 25 return __va(address); in phys_to_virt()
|
/linux/arch/x86/include/asm/uv/ |
H A D | uv_hub.h | 379 * Note: use the standard __pa() & __va() macros for converting 538 return __va(((unsigned long)pnode << m_val) | offset); in uv_pnode_offset_to_vaddr() 544 return __va((unsigned long)offset); in uv_pnode_offset_to_vaddr() 547 return __va(base << UV_GAM_RANGE_SHFT | offset); in uv_pnode_offset_to_vaddr() 565 return __va(UV_GLOBAL_MMR32_BASE | in uv_global_mmr32_address() 585 return __va(UV_GLOBAL_MMR64_BASE | in uv_global_mmr64_address() 615 return __va(UV_LOCAL_MMR_BASE | offset); in uv_local_mmr_address()
|
/linux/arch/x86/realmode/ |
H A D | init.c | 140 __va(real_mode_header->trampoline_header); in setup_real_mode() 163 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); in setup_real_mode() 202 (unsigned long) __va(real_mode_header->text_start); in set_real_mode_permissions()
|
/linux/arch/sparc/include/asm/ |
H A D | page_64.h | 145 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) macro 147 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 154 #define phys_to_virt __va
|
/linux/arch/x86/include/asm/numachip/ |
H A D | numachip_csr.h | 41 return __va(NUMACHIP_LCSR_BASE | (1UL << 15) | in lcsr_address() 69 return (void __iomem *)__va(NUMACHIP2_LCSR_BASE | in numachip2_lcsr_address()
|
/linux/arch/parisc/kernel/ |
H A D | setup.c | 54 strscpy(boot_command_line, (char *)__va(boot_args[1]), in setup_cmdline() 74 initrd_start = (unsigned long)__va(boot_args[2]); in setup_cmdline() 75 initrd_end = (unsigned long)__va(boot_args[3]); in setup_cmdline()
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | opal-fadump.c | 193 opal_fdm = __va(fadump_conf->kernel_metadata); in opal_fadump_init_mem_struct() 240 opal_fdm = __va(fadump_conf->kernel_metadata); in opal_fadump_setup_metadata() 382 (u64)__va(be64_to_cpu(opal_cpu_metadata->region[0].dest)); in is_opal_fadump_cpu_data_valid() 431 bufp = __va(fadump_conf->cpu_state_dest_vaddr); in opal_fadump_build_cpu_notes() 529 fdh = __va(fadump_conf->fadumphdr_addr); in opal_fadump_process() 695 opal_fdm_active = __va(addr); in opal_fadump_dt_scan() 712 opal_cpu_metadata = __va(addr); in opal_fadump_dt_scan()
|
/linux/arch/s390/include/asm/ |
H A D | dma-types.h | 45 return __va((__force unsigned long)addr); in dma32_to_virt() 80 return __va((__force unsigned long)addr); in dma64_to_virt()
|
/linux/arch/nios2/include/asm/ |
H A D | page.h | 76 # define __va(x) \ macro 82 # define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
/linux/arch/s390/mm/ |
H A D | fault.c | 80 unsigned long entry, *table = __va(asce & _ASCE_ORIGIN); in dump_pagetable() 91 table = __va(entry & _REGION_ENTRY_ORIGIN); in dump_pagetable() 100 table = __va(entry & _REGION_ENTRY_ORIGIN); in dump_pagetable() 109 table = __va(entry & _REGION_ENTRY_ORIGIN); in dump_pagetable() 118 table = __va(entry & _SEGMENT_ENTRY_ORIGIN); in dump_pagetable()
|
/linux/arch/riscv/include/asm/ |
H A D | page.h | 182 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) macro 188 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) 199 return __va(pfn << PAGE_SHIFT); in pfn_to_kaddr()
|
/linux/arch/openrisc/mm/ |
H A D | init.c | 83 v = (u32) __va(p); in map_ram() 156 unsigned long *dtlb_vector = __va(0x900); in paging_init() 157 unsigned long *itlb_vector = __va(0xa00); in paging_init()
|