1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
7 */
8
9 #ifndef _ASM_RISCV_PAGE_H
10 #define _ASM_RISCV_PAGE_H
11
12 #include <linux/pfn.h>
13 #include <linux/const.h>
14
15 #include <vdso/page.h>
16
17 #define HPAGE_SHIFT PMD_SHIFT
18 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
19 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
20 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
21
22 /*
23 * PAGE_OFFSET -- the first address of the first page of memory.
24 * When not using MMU this corresponds to the first free page in
25 * physical memory (aligned on a page boundary).
26 */
27 #ifdef CONFIG_MMU
28 #ifdef CONFIG_64BIT
29 #define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL)
30 #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
31 #define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
32 #ifdef CONFIG_XIP_KERNEL
33 #define PAGE_OFFSET PAGE_OFFSET_L3
34 #else
35 #define PAGE_OFFSET kernel_map.page_offset
36 #endif /* CONFIG_XIP_KERNEL */
37 #else
38 #define PAGE_OFFSET _AC(0xc0000000, UL)
39 #endif /* CONFIG_64BIT */
40 #else
41 #define PAGE_OFFSET ((unsigned long)phys_ram_base)
42 #endif /* CONFIG_MMU */
43
44 #ifndef __ASSEMBLER__
45
46 #ifdef CONFIG_RISCV_ISA_ZICBOZ
47 void clear_page(void *page);
48 #else
49 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
50 #endif
51 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
52
53 #define copy_user_page(vto, vfrom, vaddr, topg) copy_page(vto, vfrom)
54
55 /*
56 * Use struct definitions to apply C type checking
57 */
58
59 /* Page Global Directory entry */
60 typedef struct {
61 unsigned long pgd;
62 } pgd_t;
63
64 /* Page Table entry */
65 typedef struct {
66 unsigned long pte;
67 } pte_t;
68
69 typedef struct {
70 unsigned long pgprot;
71 } pgprot_t;
72
73 typedef struct page *pgtable_t;
74
75 #define pte_val(x) ((x).pte)
76 #define pgd_val(x) ((x).pgd)
77 #define pgprot_val(x) ((x).pgprot)
78
79 #define __pte(x) ((pte_t) { (x) })
80 #define __pgd(x) ((pgd_t) { (x) })
81 #define __pgprot(x) ((pgprot_t) { (x) })
82
83 #ifdef CONFIG_64BIT
84 #define PTE_FMT "%016lx"
85 #else
86 #define PTE_FMT "%08lx"
87 #endif
88
89 #if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
90 /*
91 * We override this value as its generic definition uses __pa too early in
92 * the boot process (before kernel_map.va_pa_offset is set).
93 */
94 #define MIN_MEMBLOCK_ADDR 0
95 #endif
96
97 #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
98
99 struct kernel_mapping {
100 unsigned long virt_addr;
101 unsigned long virt_offset;
102 uintptr_t phys_addr;
103 uintptr_t size;
104 /* Offset between linear mapping virtual address and kernel load address */
105 unsigned long va_pa_offset;
106 /* Offset between kernel mapping virtual address and kernel load address */
107 #ifdef CONFIG_XIP_KERNEL
108 unsigned long va_kernel_xip_text_pa_offset;
109 unsigned long va_kernel_xip_data_pa_offset;
110 uintptr_t xiprom;
111 uintptr_t xiprom_sz;
112 #else
113 unsigned long page_offset;
114 unsigned long va_kernel_pa_offset;
115 #endif
116 };
117
118 extern struct kernel_mapping kernel_map;
119 extern phys_addr_t phys_ram_base;
120 extern unsigned long vmemmap_start_pfn;
121
122 #define is_kernel_mapping(x) \
123 ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
124
125 #define is_linear_mapping(x) \
126 ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
127
128 #ifndef CONFIG_DEBUG_VIRTUAL
129 #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
130 #else
131 void *linear_mapping_pa_to_va(unsigned long x);
132 #endif
133
134 #ifdef CONFIG_XIP_KERNEL
135 #define kernel_mapping_pa_to_va(y) ({ \
136 unsigned long _y = (unsigned long)(y); \
137 (_y < phys_ram_base) ? \
138 (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
139 (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
140 })
141 #else
142 #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
143 #endif
144
145 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
146
147 #ifndef CONFIG_DEBUG_VIRTUAL
148 #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
149 #else
150 phys_addr_t linear_mapping_va_to_pa(unsigned long x);
151 #endif
152
153 #ifdef CONFIG_XIP_KERNEL
154 #define kernel_mapping_va_to_pa(y) ({ \
155 unsigned long _y = (unsigned long)(y); \
156 (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
157 (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
158 (_y - kernel_map.va_kernel_xip_data_pa_offset); \
159 })
160 #else
161 #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
162 #endif
163
164 #define __va_to_pa_nodebug(x) ({ \
165 unsigned long _x = x; \
166 is_linear_mapping(_x) ? \
167 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
168 })
169
170 #ifdef CONFIG_DEBUG_VIRTUAL
171 extern phys_addr_t __virt_to_phys(unsigned long x);
172 extern phys_addr_t __phys_addr_symbol(unsigned long x);
173 #else
174 #define __virt_to_phys(x) __va_to_pa_nodebug(x)
175 #define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
176 #endif /* CONFIG_DEBUG_VIRTUAL */
177
178 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
179 #define __pa(x) __virt_to_phys((unsigned long)(x))
180 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
181
182 #define phys_to_pfn(phys) (PFN_DOWN(phys))
183 #define pfn_to_phys(pfn) (PFN_PHYS(pfn))
184
185 #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
186 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
187
188 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
189 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
190
191 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
192
193 unsigned long kaslr_offset(void);
194
pfn_to_kaddr(unsigned long pfn)195 static __always_inline void *pfn_to_kaddr(unsigned long pfn)
196 {
197 return __va(pfn << PAGE_SHIFT);
198 }
199
200 #endif /* __ASSEMBLER__ */
201
202 #define virt_addr_valid(vaddr) ({ \
203 unsigned long _addr = (unsigned long)vaddr; \
204 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
205 })
206
207 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
208
209 #include <asm-generic/memory_model.h>
210 #include <asm-generic/getorder.h>
211
212 #endif /* _ASM_RISCV_PAGE_H */
213