xref: /kvm-unit-tests/lib/s390x/mmu.c (revision cd87c813b0bd1e5e4270273964a436c6a63bd88d)
16c9f99dfSJanosch Frank /* SPDX-License-Identifier: GPL-2.0-only */
2c08c320bSDavid Hildenbrand /*
3c08c320bSDavid Hildenbrand  * s390x MMU
4c08c320bSDavid Hildenbrand  *
5c08c320bSDavid Hildenbrand  * Copyright (c) 2017 Red Hat Inc
6c08c320bSDavid Hildenbrand  *
7c08c320bSDavid Hildenbrand  * Authors:
8c08c320bSDavid Hildenbrand  *  David Hildenbrand <david@redhat.com>
9c08c320bSDavid Hildenbrand  */
10c08c320bSDavid Hildenbrand 
11c08c320bSDavid Hildenbrand #include <libcflat.h>
12c08c320bSDavid Hildenbrand #include <asm/pgtable.h>
13c08c320bSDavid Hildenbrand #include <asm/arch_def.h>
14c08c320bSDavid Hildenbrand #include <asm/barrier.h>
15c08c320bSDavid Hildenbrand #include <vmalloc.h>
16dfe993b0SThomas Huth #include "mmu.h"
17c08c320bSDavid Hildenbrand 
18*cd87c813SClaudio Imbrenda /*
19*cd87c813SClaudio Imbrenda  * The naming convention used here is the same as used in the Linux kernel;
20*cd87c813SClaudio Imbrenda  * this is the correspondence between the s390x architectural names and the
21*cd87c813SClaudio Imbrenda  * Linux ones:
22*cd87c813SClaudio Imbrenda  *
23*cd87c813SClaudio Imbrenda  * pgd - region 1 table entry
24*cd87c813SClaudio Imbrenda  * p4d - region 2 table entry
25*cd87c813SClaudio Imbrenda  * pud - region 3 table entry
26*cd87c813SClaudio Imbrenda  * pmd - segment table entry
27*cd87c813SClaudio Imbrenda  * pte - page table entry
28*cd87c813SClaudio Imbrenda  */
29*cd87c813SClaudio Imbrenda 
3049a732c7SJanosch Frank static pgd_t *table_root;
3149a732c7SJanosch Frank 
32c08c320bSDavid Hildenbrand void configure_dat(int enable)
33c08c320bSDavid Hildenbrand {
34c08c320bSDavid Hildenbrand 	uint64_t mask;
35c08c320bSDavid Hildenbrand 
36c08c320bSDavid Hildenbrand 	if (enable)
37c08c320bSDavid Hildenbrand 		mask = extract_psw_mask() | PSW_MASK_DAT;
38c08c320bSDavid Hildenbrand 	else
39c08c320bSDavid Hildenbrand 		mask = extract_psw_mask() & ~PSW_MASK_DAT;
40c08c320bSDavid Hildenbrand 
41c08c320bSDavid Hildenbrand 	load_psw_mask(mask);
42c08c320bSDavid Hildenbrand }
43c08c320bSDavid Hildenbrand 
44c08c320bSDavid Hildenbrand static void mmu_enable(pgd_t *pgtable)
45c08c320bSDavid Hildenbrand {
4683e3ed62SDavid Hildenbrand 	struct lowcore *lc = NULL;
47c08c320bSDavid Hildenbrand 	const uint64_t asce = __pa(pgtable) | ASCE_DT_REGION1 |
48c08c320bSDavid Hildenbrand 			      REGION_TABLE_LENGTH;
49c08c320bSDavid Hildenbrand 
50c08c320bSDavid Hildenbrand 	/* set primary asce */
51c08c320bSDavid Hildenbrand 	lctlg(1, asce);
52c08c320bSDavid Hildenbrand 	assert(stctg(1) == asce);
53c08c320bSDavid Hildenbrand 
54c08c320bSDavid Hildenbrand 	/* enable dat (primary == 0 set as default) */
55c08c320bSDavid Hildenbrand 	configure_dat(1);
5683e3ed62SDavid Hildenbrand 
5783e3ed62SDavid Hildenbrand 	/* we can now also use DAT unconditionally in our PGM handler */
5883e3ed62SDavid Hildenbrand 	lc->pgm_new_psw.mask |= PSW_MASK_DAT;
59c08c320bSDavid Hildenbrand }
60c08c320bSDavid Hildenbrand 
61*cd87c813SClaudio Imbrenda /*
62*cd87c813SClaudio Imbrenda  * Get the pud (region 3) DAT table entry for the given address and root,
63*cd87c813SClaudio Imbrenda  * allocating it if necessary
64*cd87c813SClaudio Imbrenda  */
65*cd87c813SClaudio Imbrenda static inline pud_t *get_pud(pgd_t *pgtable, uintptr_t vaddr)
66c08c320bSDavid Hildenbrand {
67c08c320bSDavid Hildenbrand 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
68c08c320bSDavid Hildenbrand 	p4d_t *p4d = p4d_alloc(pgd, vaddr);
69c08c320bSDavid Hildenbrand 	pud_t *pud = pud_alloc(p4d, vaddr);
70c08c320bSDavid Hildenbrand 
71*cd87c813SClaudio Imbrenda 	return pud;
72*cd87c813SClaudio Imbrenda }
73*cd87c813SClaudio Imbrenda 
74*cd87c813SClaudio Imbrenda /*
75*cd87c813SClaudio Imbrenda  * Get the pmd (segment) DAT table entry for the given address and pud,
76*cd87c813SClaudio Imbrenda  * allocating it if necessary.
77*cd87c813SClaudio Imbrenda  * The pud must not be huge.
78*cd87c813SClaudio Imbrenda  */
79*cd87c813SClaudio Imbrenda static inline pmd_t *get_pmd(pud_t *pud, uintptr_t vaddr)
80*cd87c813SClaudio Imbrenda {
81*cd87c813SClaudio Imbrenda 	pmd_t *pmd;
82*cd87c813SClaudio Imbrenda 
83*cd87c813SClaudio Imbrenda 	assert(!pud_huge(*pud));
84*cd87c813SClaudio Imbrenda 	pmd = pmd_alloc(pud, vaddr);
85*cd87c813SClaudio Imbrenda 	return pmd;
86*cd87c813SClaudio Imbrenda }
87*cd87c813SClaudio Imbrenda 
88*cd87c813SClaudio Imbrenda /*
89*cd87c813SClaudio Imbrenda  * Get the pte (page) DAT table entry for the given address and pmd,
90*cd87c813SClaudio Imbrenda  * allocating it if necessary.
91*cd87c813SClaudio Imbrenda  * The pmd must not be large.
92*cd87c813SClaudio Imbrenda  */
93*cd87c813SClaudio Imbrenda static inline pte_t *get_pte(pmd_t *pmd, uintptr_t vaddr)
94*cd87c813SClaudio Imbrenda {
95*cd87c813SClaudio Imbrenda 	pte_t *pte;
96*cd87c813SClaudio Imbrenda 
97*cd87c813SClaudio Imbrenda 	assert(!pmd_large(*pmd));
98*cd87c813SClaudio Imbrenda 	pte = pte_alloc(pmd, vaddr);
99*cd87c813SClaudio Imbrenda 	return pte;
100*cd87c813SClaudio Imbrenda }
101*cd87c813SClaudio Imbrenda 
102*cd87c813SClaudio Imbrenda /*
103*cd87c813SClaudio Imbrenda  * Splits a large pmd (segment) DAT table entry into equivalent 4kB small
104*cd87c813SClaudio Imbrenda  * pages.
105*cd87c813SClaudio Imbrenda  * @pmd The pmd to split, it must be large.
106*cd87c813SClaudio Imbrenda  * @va the virtual address corresponding to this pmd.
107*cd87c813SClaudio Imbrenda  */
108*cd87c813SClaudio Imbrenda static void split_pmd(pmd_t *pmd, uintptr_t va)
109*cd87c813SClaudio Imbrenda {
110*cd87c813SClaudio Imbrenda 	phys_addr_t pa = pmd_val(*pmd) & SEGMENT_ENTRY_SFAA;
111*cd87c813SClaudio Imbrenda 	unsigned long i, prot;
112*cd87c813SClaudio Imbrenda 	pte_t *pte;
113*cd87c813SClaudio Imbrenda 
114*cd87c813SClaudio Imbrenda 	assert(pmd_large(*pmd));
115*cd87c813SClaudio Imbrenda 	pte = alloc_pages(PAGE_TABLE_ORDER);
116*cd87c813SClaudio Imbrenda 	prot = pmd_val(*pmd) & (SEGMENT_ENTRY_IEP | SEGMENT_ENTRY_P);
117*cd87c813SClaudio Imbrenda 	for (i = 0; i < PAGE_TABLE_ENTRIES; i++)
118*cd87c813SClaudio Imbrenda 		pte_val(pte[i]) =  pa | PAGE_SIZE * i | prot;
119*cd87c813SClaudio Imbrenda 	idte_pmdp(va, &pmd_val(*pmd));
120*cd87c813SClaudio Imbrenda 	pmd_val(*pmd) = __pa(pte) | SEGMENT_ENTRY_TT_SEGMENT;
121*cd87c813SClaudio Imbrenda 
122*cd87c813SClaudio Imbrenda }
123*cd87c813SClaudio Imbrenda 
124*cd87c813SClaudio Imbrenda /*
125*cd87c813SClaudio Imbrenda  * Splits a huge pud (region 3) DAT table entry into equivalent 1MB large
126*cd87c813SClaudio Imbrenda  * pages.
127*cd87c813SClaudio Imbrenda  * @pud The pud to split, it must be huge.
128*cd87c813SClaudio Imbrenda  * @va the virtual address corresponding to this pud.
129*cd87c813SClaudio Imbrenda  */
130*cd87c813SClaudio Imbrenda static void split_pud(pud_t *pud, uintptr_t va)
131*cd87c813SClaudio Imbrenda {
132*cd87c813SClaudio Imbrenda 	phys_addr_t pa = pud_val(*pud) & REGION3_ENTRY_RFAA;
133*cd87c813SClaudio Imbrenda 	unsigned long i, prot;
134*cd87c813SClaudio Imbrenda 	pmd_t *pmd;
135*cd87c813SClaudio Imbrenda 
136*cd87c813SClaudio Imbrenda 	assert(pud_huge(*pud));
137*cd87c813SClaudio Imbrenda 	pmd = alloc_pages(SEGMENT_TABLE_ORDER);
138*cd87c813SClaudio Imbrenda 	prot = pud_val(*pud) & (REGION3_ENTRY_IEP | REGION_ENTRY_P);
139*cd87c813SClaudio Imbrenda 	for (i = 0; i < SEGMENT_TABLE_ENTRIES; i++)
140*cd87c813SClaudio Imbrenda 		pmd_val(pmd[i]) =  pa | SZ_1M * i | prot | SEGMENT_ENTRY_FC | SEGMENT_ENTRY_TT_SEGMENT;
141*cd87c813SClaudio Imbrenda 	idte_pudp(va, &pud_val(*pud));
142*cd87c813SClaudio Imbrenda 	pud_val(*pud) = __pa(pmd) | REGION_ENTRY_TT_REGION3 | REGION_TABLE_LENGTH;
143*cd87c813SClaudio Imbrenda }
144*cd87c813SClaudio Imbrenda 
145*cd87c813SClaudio Imbrenda void *get_dat_entry(pgd_t *pgtable, void *vaddr, enum pgt_level level)
146*cd87c813SClaudio Imbrenda {
147*cd87c813SClaudio Imbrenda 	uintptr_t va = (uintptr_t)vaddr;
148*cd87c813SClaudio Imbrenda 	pgd_t *pgd;
149*cd87c813SClaudio Imbrenda 	p4d_t *p4d;
150*cd87c813SClaudio Imbrenda 	pud_t *pud;
151*cd87c813SClaudio Imbrenda 	pmd_t *pmd;
152*cd87c813SClaudio Imbrenda 
153*cd87c813SClaudio Imbrenda 	assert(level && (level <= 5));
154*cd87c813SClaudio Imbrenda 	pgd = pgd_offset(pgtable, va);
155*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pgd)
156*cd87c813SClaudio Imbrenda 		return pgd;
157*cd87c813SClaudio Imbrenda 	p4d = p4d_alloc(pgd, va);
158*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_p4d)
159*cd87c813SClaudio Imbrenda 		return p4d;
160*cd87c813SClaudio Imbrenda 	pud = pud_alloc(p4d, va);
161*cd87c813SClaudio Imbrenda 
162*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pud)
163*cd87c813SClaudio Imbrenda 		return pud;
164*cd87c813SClaudio Imbrenda 	if (!pud_none(*pud) && pud_huge(*pud))
165*cd87c813SClaudio Imbrenda 		split_pud(pud, va);
166*cd87c813SClaudio Imbrenda 	pmd = get_pmd(pud, va);
167*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pmd)
168*cd87c813SClaudio Imbrenda 		return pmd;
169*cd87c813SClaudio Imbrenda 	if (!pmd_none(*pmd) && pmd_large(*pmd))
170*cd87c813SClaudio Imbrenda 		split_pmd(pmd, va);
171*cd87c813SClaudio Imbrenda 	return get_pte(pmd, va);
172*cd87c813SClaudio Imbrenda }
173*cd87c813SClaudio Imbrenda 
174*cd87c813SClaudio Imbrenda void *split_page(pgd_t *pgtable, void *vaddr, enum pgt_level level)
175*cd87c813SClaudio Imbrenda {
176*cd87c813SClaudio Imbrenda 	assert((level >= 3) && (level <= 5));
177*cd87c813SClaudio Imbrenda 	return get_dat_entry(pgtable ? pgtable : table_root, vaddr, level);
178c08c320bSDavid Hildenbrand }
179c08c320bSDavid Hildenbrand 
180c08c320bSDavid Hildenbrand phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *vaddr)
181c08c320bSDavid Hildenbrand {
182*cd87c813SClaudio Imbrenda 	uintptr_t va = (uintptr_t)vaddr;
183*cd87c813SClaudio Imbrenda 	pud_t *pud;
184*cd87c813SClaudio Imbrenda 	pmd_t *pmd;
185*cd87c813SClaudio Imbrenda 	pte_t *pte;
186*cd87c813SClaudio Imbrenda 
187*cd87c813SClaudio Imbrenda 	pud = get_pud(pgtable, va);
188*cd87c813SClaudio Imbrenda 	if (pud_huge(*pud))
189*cd87c813SClaudio Imbrenda 		return (pud_val(*pud) & REGION3_ENTRY_RFAA) | (va & ~REGION3_ENTRY_RFAA);
190*cd87c813SClaudio Imbrenda 	pmd = get_pmd(pud, va);
191*cd87c813SClaudio Imbrenda 	if (pmd_large(*pmd))
192*cd87c813SClaudio Imbrenda 		return (pmd_val(*pmd) & SEGMENT_ENTRY_SFAA) | (va & ~SEGMENT_ENTRY_SFAA);
193*cd87c813SClaudio Imbrenda 	pte = get_pte(pmd, va);
194*cd87c813SClaudio Imbrenda 	return (pte_val(*pte) & PAGE_MASK) | (va & ~PAGE_MASK);
195c08c320bSDavid Hildenbrand }
196c08c320bSDavid Hildenbrand 
197*cd87c813SClaudio Imbrenda /*
198*cd87c813SClaudio Imbrenda  * Get the DAT table entry of the given level for the given address,
199*cd87c813SClaudio Imbrenda  * splitting if necessary. If the entry was not invalid, invalidate it, and
200*cd87c813SClaudio Imbrenda  * return the pointer to the entry and, if requested, its old value.
201*cd87c813SClaudio Imbrenda  * @pgtable root of the page tables
202*cd87c813SClaudio Imbrenda  * @vaddr virtual address
203*cd87c813SClaudio Imbrenda  * @level 3 (for 2GB pud), 4 (for 1MB pmd) or 5 (for 4kB pages)
204*cd87c813SClaudio Imbrenda  * @old if not NULL, will be written with the old value of the DAT table
205*cd87c813SClaudio Imbrenda  * entry before invalidation
206*cd87c813SClaudio Imbrenda  */
207*cd87c813SClaudio Imbrenda static void *dat_get_and_invalidate(pgd_t *pgtable, void *vaddr, enum pgt_level level, unsigned long *old)
208c08c320bSDavid Hildenbrand {
209*cd87c813SClaudio Imbrenda 	unsigned long va = (unsigned long)vaddr;
210*cd87c813SClaudio Imbrenda 	void *ptr;
211c08c320bSDavid Hildenbrand 
212*cd87c813SClaudio Imbrenda 	ptr = get_dat_entry(pgtable, vaddr, level);
213*cd87c813SClaudio Imbrenda 	if (old)
214*cd87c813SClaudio Imbrenda 		*old = *(unsigned long *)ptr;
215*cd87c813SClaudio Imbrenda 	if ((level == pgtable_level_pgd) && !pgd_none(*(pgd_t *)ptr))
216*cd87c813SClaudio Imbrenda 		idte_pgdp(va, ptr);
217*cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_p4d) && !p4d_none(*(p4d_t *)ptr))
218*cd87c813SClaudio Imbrenda 		idte_p4dp(va, ptr);
219*cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_pud) && !pud_none(*(pud_t *)ptr))
220*cd87c813SClaudio Imbrenda 		idte_pudp(va, ptr);
221*cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_pmd) && !pmd_none(*(pmd_t *)ptr))
222*cd87c813SClaudio Imbrenda 		idte_pmdp(va, ptr);
223*cd87c813SClaudio Imbrenda 	else if (!pte_none(*(pte_t *)ptr))
224*cd87c813SClaudio Imbrenda 		ipte(va, ptr);
225*cd87c813SClaudio Imbrenda 	return ptr;
226*cd87c813SClaudio Imbrenda }
227c08c320bSDavid Hildenbrand 
228*cd87c813SClaudio Imbrenda static void cleanup_pmd(pmd_t *pmd)
229*cd87c813SClaudio Imbrenda {
230*cd87c813SClaudio Imbrenda 	/* was invalid or large, nothing to do */
231*cd87c813SClaudio Imbrenda 	if (pmd_none(*pmd) || pmd_large(*pmd))
232*cd87c813SClaudio Imbrenda 		return;
233*cd87c813SClaudio Imbrenda 	/* was not large, free the corresponding page table */
234*cd87c813SClaudio Imbrenda 	free_pages((void *)(pmd_val(*pmd) & PAGE_MASK));
235*cd87c813SClaudio Imbrenda }
236*cd87c813SClaudio Imbrenda 
237*cd87c813SClaudio Imbrenda static void cleanup_pud(pud_t *pud)
238*cd87c813SClaudio Imbrenda {
239*cd87c813SClaudio Imbrenda 	unsigned long i;
240*cd87c813SClaudio Imbrenda 	pmd_t *pmd;
241*cd87c813SClaudio Imbrenda 
242*cd87c813SClaudio Imbrenda 	/* was invalid or large, nothing to do */
243*cd87c813SClaudio Imbrenda 	if (pud_none(*pud) || pud_huge(*pud))
244*cd87c813SClaudio Imbrenda 		return;
245*cd87c813SClaudio Imbrenda 	/* recursively clean up all pmds if needed */
246*cd87c813SClaudio Imbrenda 	pmd = (pmd_t *)(pud_val(*pud) & PAGE_MASK);
247*cd87c813SClaudio Imbrenda 	for (i = 0; i < SEGMENT_TABLE_ENTRIES; i++)
248*cd87c813SClaudio Imbrenda 		cleanup_pmd(pmd + i);
249*cd87c813SClaudio Imbrenda 	/* free the corresponding segment table */
250*cd87c813SClaudio Imbrenda 	free_pages(pmd);
251*cd87c813SClaudio Imbrenda }
252*cd87c813SClaudio Imbrenda 
253*cd87c813SClaudio Imbrenda /*
254*cd87c813SClaudio Imbrenda  * Set the DAT entry for the given level of the given virtual address. If a
255*cd87c813SClaudio Imbrenda  * mapping already existed, it is overwritten. If an existing mapping with
256*cd87c813SClaudio Imbrenda  * smaller pages existed, all the lower tables are freed.
257*cd87c813SClaudio Imbrenda  * Returns the pointer to the DAT table entry.
258*cd87c813SClaudio Imbrenda  * @pgtable root of the page tables
259*cd87c813SClaudio Imbrenda  * @val the new value for the DAT table entry
260*cd87c813SClaudio Imbrenda  * @vaddr the virtual address
261*cd87c813SClaudio Imbrenda  * @level 3 for pud (region 3), 4 for pmd (segment) and 5 for pte (pages)
262*cd87c813SClaudio Imbrenda  */
263*cd87c813SClaudio Imbrenda static void *set_dat_entry(pgd_t *pgtable, unsigned long val, void *vaddr, enum pgt_level level)
264*cd87c813SClaudio Imbrenda {
265*cd87c813SClaudio Imbrenda 	unsigned long old, *res;
266*cd87c813SClaudio Imbrenda 
267*cd87c813SClaudio Imbrenda 	res = dat_get_and_invalidate(pgtable, vaddr, level, &old);
268*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pmd)
269*cd87c813SClaudio Imbrenda 		cleanup_pmd((pmd_t *)&old);
270*cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pud)
271*cd87c813SClaudio Imbrenda 		cleanup_pud((pud_t *)&old);
272*cd87c813SClaudio Imbrenda 	*res = val;
273*cd87c813SClaudio Imbrenda 	return res;
274c08c320bSDavid Hildenbrand }
275c08c320bSDavid Hildenbrand 
27649a732c7SJanosch Frank pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
27749a732c7SJanosch Frank {
278*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, PAGE_SIZE));
279*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, PAGE_SIZE));
280*cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys, vaddr, pgtable_level_pte);
28149a732c7SJanosch Frank }
28249a732c7SJanosch Frank 
283*cd87c813SClaudio Imbrenda pmdval_t *install_large_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
28449a732c7SJanosch Frank {
285*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, SZ_1M));
286*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, SZ_1M));
287*cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys | SEGMENT_ENTRY_FC, vaddr, pgtable_level_pmd);
28849a732c7SJanosch Frank }
28949a732c7SJanosch Frank 
290*cd87c813SClaudio Imbrenda pudval_t *install_huge_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
29149a732c7SJanosch Frank {
292*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, SZ_2G));
293*cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, SZ_2G));
294*cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys | REGION3_ENTRY_FC | REGION_ENTRY_TT_REGION3, vaddr, pgtable_level_pud);
295*cd87c813SClaudio Imbrenda }
29649a732c7SJanosch Frank 
297*cd87c813SClaudio Imbrenda void protect_dat_entry(void *vaddr, unsigned long prot, enum pgt_level level)
298*cd87c813SClaudio Imbrenda {
299*cd87c813SClaudio Imbrenda 	unsigned long old, *ptr;
300*cd87c813SClaudio Imbrenda 
301*cd87c813SClaudio Imbrenda 	ptr = dat_get_and_invalidate(table_root, vaddr, level, &old);
302*cd87c813SClaudio Imbrenda 	*ptr = old | prot;
303*cd87c813SClaudio Imbrenda }
304*cd87c813SClaudio Imbrenda 
305*cd87c813SClaudio Imbrenda void unprotect_dat_entry(void *vaddr, unsigned long prot, enum pgt_level level)
306*cd87c813SClaudio Imbrenda {
307*cd87c813SClaudio Imbrenda 	unsigned long old, *ptr;
308*cd87c813SClaudio Imbrenda 
309*cd87c813SClaudio Imbrenda 	ptr = dat_get_and_invalidate(table_root, vaddr, level, &old);
310*cd87c813SClaudio Imbrenda 	*ptr = old & ~prot;
31149a732c7SJanosch Frank }
31249a732c7SJanosch Frank 
31349a732c7SJanosch Frank void protect_range(void *start, unsigned long len, unsigned long prot)
31449a732c7SJanosch Frank {
31549a732c7SJanosch Frank 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
31649a732c7SJanosch Frank 
31749a732c7SJanosch Frank 	len &= PAGE_MASK;
31849a732c7SJanosch Frank 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
319*cd87c813SClaudio Imbrenda 		protect_dat_entry((void *)curr, prot, 5);
32049a732c7SJanosch Frank }
32149a732c7SJanosch Frank 
32249a732c7SJanosch Frank void unprotect_range(void *start, unsigned long len, unsigned long prot)
32349a732c7SJanosch Frank {
32449a732c7SJanosch Frank 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
32549a732c7SJanosch Frank 
32649a732c7SJanosch Frank 	len &= PAGE_MASK;
32749a732c7SJanosch Frank 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
328*cd87c813SClaudio Imbrenda 		unprotect_dat_entry((void *)curr, prot, 5);
32949a732c7SJanosch Frank }
33049a732c7SJanosch Frank 
331c08c320bSDavid Hildenbrand static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr,
332c08c320bSDavid Hildenbrand 			   phys_addr_t end_addr)
333c08c320bSDavid Hildenbrand {
334c08c320bSDavid Hildenbrand 	phys_addr_t cur;
335c08c320bSDavid Hildenbrand 
336c08c320bSDavid Hildenbrand 	start_addr &= PAGE_MASK;
337c08c320bSDavid Hildenbrand 	for (cur = start_addr; true; cur += PAGE_SIZE) {
338c08c320bSDavid Hildenbrand 		if (start_addr < end_addr && cur >= end_addr)
339c08c320bSDavid Hildenbrand 			break;
340c08c320bSDavid Hildenbrand 		if (start_addr > end_addr && cur <= end_addr)
341c08c320bSDavid Hildenbrand 			break;
342c08c320bSDavid Hildenbrand 		install_page(pgtable, cur, __va(cur));
343c08c320bSDavid Hildenbrand 	}
344c08c320bSDavid Hildenbrand }
345c08c320bSDavid Hildenbrand 
346c08c320bSDavid Hildenbrand void *setup_mmu(phys_addr_t phys_end){
347c08c320bSDavid Hildenbrand 	pgd_t *page_root;
348c08c320bSDavid Hildenbrand 
349c08c320bSDavid Hildenbrand 	/* allocate a region-1 table */
350c08c320bSDavid Hildenbrand 	page_root = pgd_alloc_one();
351c08c320bSDavid Hildenbrand 
352c08c320bSDavid Hildenbrand 	/* map all physical memory 1:1 */
353c08c320bSDavid Hildenbrand 	setup_identity(page_root, 0, phys_end);
354c08c320bSDavid Hildenbrand 
355c08c320bSDavid Hildenbrand 	/* generate 128MB of invalid adresses at the end (for testing PGM) */
356c08c320bSDavid Hildenbrand 	init_alloc_vpage((void *) -(1UL << 27));
357c08c320bSDavid Hildenbrand 	setup_identity(page_root, -(1UL << 27), 0);
358c08c320bSDavid Hildenbrand 
359c08c320bSDavid Hildenbrand 	/* finally enable DAT with the new table */
360c08c320bSDavid Hildenbrand 	mmu_enable(page_root);
36149a732c7SJanosch Frank 	table_root = page_root;
362c08c320bSDavid Hildenbrand 	return page_root;
363c08c320bSDavid Hildenbrand }
364