1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4
5 #ifndef __ASSEMBLY__
6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
7 unsigned long clr, unsigned long set, int huge);
8 #endif
9
10 #if defined(CONFIG_PPC64)
11 #include <asm/nohash/64/pgtable.h>
12 #else
13 #include <asm/nohash/32/pgtable.h>
14 #endif
15
16 /*
17 * _PAGE_CHG_MASK masks of bits that are to be preserved across
18 * pgprot changes.
19 */
20 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
21
22 /* Permission masks used for kernel mappings */
23 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
24 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
25 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
26 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
27 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
28 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
29
30 #ifndef __ASSEMBLY__
31
32 extern int icache_44x_need_flush;
33
34 /*
35 * PTE updates. This function is called whenever an existing
36 * valid PTE is updated. This does -not- include set_pte_at()
37 * which nowadays only sets a new PTE.
38 *
39 * Depending on the type of MMU, we may need to use atomic updates
40 * and the PTE may be either 32 or 64 bit wide. In the later case,
41 * when using atomic updates, only the low part of the PTE is
42 * accessed atomically.
43 *
44 * In addition, on 44x, we also maintain a global flag indicating
45 * that an executable user mapping was modified, which is needed
46 * to properly flush the virtually tagged instruction cache of
47 * those implementations.
48 */
49 #ifndef pte_update
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)50 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
51 unsigned long clr, unsigned long set, int huge)
52 {
53 pte_basic_t old = pte_val(*p);
54 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
55
56 if (new == old)
57 return old;
58
59 *p = __pte(new);
60
61 if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
62 icache_44x_need_flush = 1;
63
64 /* huge pages use the old page table lock */
65 if (!huge)
66 assert_pte_locked(mm, addr);
67
68 return old;
69 }
70 #endif
71
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)72 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
73 unsigned long addr, pte_t *ptep)
74 {
75 unsigned long old;
76
77 old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
78
79 return (old & _PAGE_ACCESSED) != 0;
80 }
81 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
82
83 #ifndef ptep_set_wrprotect
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)84 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
85 pte_t *ptep)
86 {
87 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
88 }
89 #endif
90 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
91
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)92 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
93 pte_t *ptep)
94 {
95 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
96 }
97 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
98
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)99 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
100 {
101 pte_update(mm, addr, ptep, ~0UL, 0, 0);
102 }
103
104 /* Set the dirty and/or accessed bits atomically in a linux PTE */
105 #ifndef __ptep_set_access_flags
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)106 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
107 pte_t *ptep, pte_t entry,
108 unsigned long address,
109 int psize)
110 {
111 unsigned long set = pte_val(entry) &
112 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
113 int huge = psize > mmu_virtual_psize ? 1 : 0;
114
115 pte_update(vma->vm_mm, address, ptep, 0, set, huge);
116
117 flush_tlb_page(vma, address);
118 }
119 #endif
120
121 /* Generic accessors to PTE bits */
122 #ifndef pte_mkwrite_novma
pte_mkwrite_novma(pte_t pte)123 static inline pte_t pte_mkwrite_novma(pte_t pte)
124 {
125 /*
126 * write implies read, hence set both
127 */
128 return __pte(pte_val(pte) | _PAGE_RW);
129 }
130 #endif
131
pte_mkdirty(pte_t pte)132 static inline pte_t pte_mkdirty(pte_t pte)
133 {
134 return __pte(pte_val(pte) | _PAGE_DIRTY);
135 }
136
pte_mkyoung(pte_t pte)137 static inline pte_t pte_mkyoung(pte_t pte)
138 {
139 return __pte(pte_val(pte) | _PAGE_ACCESSED);
140 }
141
142 #ifndef pte_wrprotect
pte_wrprotect(pte_t pte)143 static inline pte_t pte_wrprotect(pte_t pte)
144 {
145 return __pte(pte_val(pte) & ~_PAGE_WRITE);
146 }
147 #endif
148
149 #ifndef pte_mkexec
pte_mkexec(pte_t pte)150 static inline pte_t pte_mkexec(pte_t pte)
151 {
152 return __pte(pte_val(pte) | _PAGE_EXEC);
153 }
154 #endif
155
156 #ifndef pte_write
pte_write(pte_t pte)157 static inline int pte_write(pte_t pte)
158 {
159 return pte_val(pte) & _PAGE_WRITE;
160 }
161 #endif
pte_dirty(pte_t pte)162 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_special(pte_t pte)163 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
pte_none(pte_t pte)164 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_hashpte(pte_t pte)165 static inline bool pte_hashpte(pte_t pte) { return false; }
pte_ci(pte_t pte)166 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
pte_exec(pte_t pte)167 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
168
pte_present(pte_t pte)169 static inline int pte_present(pte_t pte)
170 {
171 return pte_val(pte) & _PAGE_PRESENT;
172 }
173
pte_hw_valid(pte_t pte)174 static inline bool pte_hw_valid(pte_t pte)
175 {
176 return pte_val(pte) & _PAGE_PRESENT;
177 }
178
pte_young(pte_t pte)179 static inline int pte_young(pte_t pte)
180 {
181 return pte_val(pte) & _PAGE_ACCESSED;
182 }
183
184 /*
185 * Don't just check for any non zero bits in __PAGE_READ, since for book3e
186 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
187 * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
188 */
189 #ifndef pte_read
pte_read(pte_t pte)190 static inline bool pte_read(pte_t pte)
191 {
192 return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
193 }
194 #endif
195
196 /*
197 * We only find page table entry in the last level
198 * Hence no need for other accessors
199 */
200 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)201 static inline bool pte_access_permitted(pte_t pte, bool write)
202 {
203 /*
204 * A read-only access is controlled by _PAGE_READ bit.
205 * We have _PAGE_READ set for WRITE
206 */
207 if (!pte_present(pte) || !pte_read(pte))
208 return false;
209
210 if (write && !pte_write(pte))
211 return false;
212
213 return true;
214 }
215
216 /* Conversion functions: convert a page and protection to a page entry,
217 * and a page entry and page directory to the page they refer to.
218 *
219 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
220 * long for now.
221 */
pfn_pte(unsigned long pfn,pgprot_t pgprot)222 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
223 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
224 pgprot_val(pgprot)); }
225
226 /* Generic modifiers for PTE bits */
pte_exprotect(pte_t pte)227 static inline pte_t pte_exprotect(pte_t pte)
228 {
229 return __pte(pte_val(pte) & ~_PAGE_EXEC);
230 }
231
pte_mkclean(pte_t pte)232 static inline pte_t pte_mkclean(pte_t pte)
233 {
234 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
235 }
236
pte_mkold(pte_t pte)237 static inline pte_t pte_mkold(pte_t pte)
238 {
239 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
240 }
241
pte_mkspecial(pte_t pte)242 static inline pte_t pte_mkspecial(pte_t pte)
243 {
244 return __pte(pte_val(pte) | _PAGE_SPECIAL);
245 }
246
247 #ifndef pte_mkhuge
pte_mkhuge(pte_t pte)248 static inline pte_t pte_mkhuge(pte_t pte)
249 {
250 return __pte(pte_val(pte));
251 }
252 #endif
253
pte_modify(pte_t pte,pgprot_t newprot)254 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
255 {
256 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
257 }
258
pte_swp_exclusive(pte_t pte)259 static inline int pte_swp_exclusive(pte_t pte)
260 {
261 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
262 }
263
pte_swp_mkexclusive(pte_t pte)264 static inline pte_t pte_swp_mkexclusive(pte_t pte)
265 {
266 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
267 }
268
pte_swp_clear_exclusive(pte_t pte)269 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
270 {
271 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
272 }
273
274 /* This low level function performs the actual PTE insertion
275 * Setting the PTE depends on the MMU type and other factors. It's
276 * an horrible mess that I'm not going to try to clean up now but
277 * I'm keeping it in one place rather than spread around
278 */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)279 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t pte, int percpu)
281 {
282 /* Second case is 32-bit with 64-bit PTE. In this case, we
283 * can just store as long as we do the two halves in the right order
284 * with a barrier in between.
285 * In the percpu case, we also fallback to the simple update
286 */
287 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
288 __asm__ __volatile__("\
289 stw%X0 %2,%0\n\
290 mbar\n\
291 stw%X1 %L2,%1"
292 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
293 : "r" (pte) : "memory");
294 return;
295 }
296 /* Anything else just stores the PTE normally. That covers all 64-bit
297 * cases, and 32-bit non-hash with 32-bit PTEs.
298 */
299 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
300 ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
301 #else
302 *ptep = pte;
303 #endif
304
305 /*
306 * With hardware tablewalk, a sync is needed to ensure that
307 * subsequent accesses see the PTE we just wrote. Unlike userspace
308 * mappings, we can't tolerate spurious faults, so make sure
309 * the new PTE will be seen the first time.
310 */
311 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
312 mb();
313 }
314
315 /*
316 * Macro to mark a page protection value as "uncacheable".
317 */
318
319 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
320 _PAGE_WRITETHRU)
321
322 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
323 _PAGE_NO_CACHE | _PAGE_GUARDED))
324
325 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
326 _PAGE_NO_CACHE))
327
328 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
329 _PAGE_COHERENT))
330
331 #if _PAGE_WRITETHRU != 0
332 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
333 _PAGE_COHERENT | _PAGE_WRITETHRU))
334 #else
335 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
336 #endif
337
338 #define pgprot_cached_noncoherent(prot) \
339 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
340
341 #define pgprot_writecombine pgprot_noncached_wc
342
343 #ifdef CONFIG_HUGETLB_PAGE
hugepd_ok(hugepd_t hpd)344 static inline int hugepd_ok(hugepd_t hpd)
345 {
346 #ifdef CONFIG_PPC_8xx
347 return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
348 #else
349 /* We clear the top bit to indicate hugepd */
350 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
351 #endif
352 }
353
pmd_huge(pmd_t pmd)354 static inline int pmd_huge(pmd_t pmd)
355 {
356 return 0;
357 }
358
pud_huge(pud_t pud)359 static inline int pud_huge(pud_t pud)
360 {
361 return 0;
362 }
363
364 #define is_hugepd(hpd) (hugepd_ok(hpd))
365 #endif
366
367 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
368 void unmap_kernel_page(unsigned long va);
369
370 #endif /* __ASSEMBLY__ */
371 #endif
372