xref: /kvm-unit-tests/lib/s390x/mmu.c (revision 0a5b31d2539d67b2e46b037012bb83f09b585c9e)
16c9f99dfSJanosch Frank /* SPDX-License-Identifier: GPL-2.0-only */
2c08c320bSDavid Hildenbrand /*
3c08c320bSDavid Hildenbrand  * s390x MMU
4c08c320bSDavid Hildenbrand  *
5c08c320bSDavid Hildenbrand  * Copyright (c) 2017 Red Hat Inc
6c08c320bSDavid Hildenbrand  *
7c08c320bSDavid Hildenbrand  * Authors:
8c08c320bSDavid Hildenbrand  *  David Hildenbrand <david@redhat.com>
9c08c320bSDavid Hildenbrand  */
10c08c320bSDavid Hildenbrand 
11c08c320bSDavid Hildenbrand #include <libcflat.h>
12c08c320bSDavid Hildenbrand #include <asm/pgtable.h>
13c08c320bSDavid Hildenbrand #include <asm/arch_def.h>
14c08c320bSDavid Hildenbrand #include <asm/barrier.h>
15c08c320bSDavid Hildenbrand #include <vmalloc.h>
16dfe993b0SThomas Huth #include "mmu.h"
17c08c320bSDavid Hildenbrand 
18cd87c813SClaudio Imbrenda /*
19cd87c813SClaudio Imbrenda  * The naming convention used here is the same as used in the Linux kernel;
20cd87c813SClaudio Imbrenda  * this is the correspondence between the s390x architectural names and the
21cd87c813SClaudio Imbrenda  * Linux ones:
22cd87c813SClaudio Imbrenda  *
23cd87c813SClaudio Imbrenda  * pgd - region 1 table entry
24cd87c813SClaudio Imbrenda  * p4d - region 2 table entry
25cd87c813SClaudio Imbrenda  * pud - region 3 table entry
26cd87c813SClaudio Imbrenda  * pmd - segment table entry
27cd87c813SClaudio Imbrenda  * pte - page table entry
28cd87c813SClaudio Imbrenda  */
29cd87c813SClaudio Imbrenda 
3049a732c7SJanosch Frank static pgd_t *table_root;
3149a732c7SJanosch Frank 
32c08c320bSDavid Hildenbrand void configure_dat(int enable)
33c08c320bSDavid Hildenbrand {
34c08c320bSDavid Hildenbrand 	uint64_t mask;
35c08c320bSDavid Hildenbrand 
36c08c320bSDavid Hildenbrand 	if (enable)
37c08c320bSDavid Hildenbrand 		mask = extract_psw_mask() | PSW_MASK_DAT;
38c08c320bSDavid Hildenbrand 	else
39c08c320bSDavid Hildenbrand 		mask = extract_psw_mask() & ~PSW_MASK_DAT;
40c08c320bSDavid Hildenbrand 
41c08c320bSDavid Hildenbrand 	load_psw_mask(mask);
42c08c320bSDavid Hildenbrand }
43c08c320bSDavid Hildenbrand 
44c08c320bSDavid Hildenbrand static void mmu_enable(pgd_t *pgtable)
45c08c320bSDavid Hildenbrand {
4683e3ed62SDavid Hildenbrand 	struct lowcore *lc = NULL;
47c08c320bSDavid Hildenbrand 	const uint64_t asce = __pa(pgtable) | ASCE_DT_REGION1 |
48c08c320bSDavid Hildenbrand 			      REGION_TABLE_LENGTH;
49c08c320bSDavid Hildenbrand 
50c08c320bSDavid Hildenbrand 	/* set primary asce */
51c08c320bSDavid Hildenbrand 	lctlg(1, asce);
52c08c320bSDavid Hildenbrand 	assert(stctg(1) == asce);
53c08c320bSDavid Hildenbrand 
54c08c320bSDavid Hildenbrand 	/* enable dat (primary == 0 set as default) */
55c08c320bSDavid Hildenbrand 	configure_dat(1);
5683e3ed62SDavid Hildenbrand 
5783e3ed62SDavid Hildenbrand 	/* we can now also use DAT unconditionally in our PGM handler */
5883e3ed62SDavid Hildenbrand 	lc->pgm_new_psw.mask |= PSW_MASK_DAT;
59c08c320bSDavid Hildenbrand }
60c08c320bSDavid Hildenbrand 
61cd87c813SClaudio Imbrenda /*
62cd87c813SClaudio Imbrenda  * Get the pud (region 3) DAT table entry for the given address and root,
63cd87c813SClaudio Imbrenda  * allocating it if necessary
64cd87c813SClaudio Imbrenda  */
65cd87c813SClaudio Imbrenda static inline pud_t *get_pud(pgd_t *pgtable, uintptr_t vaddr)
66c08c320bSDavid Hildenbrand {
67c08c320bSDavid Hildenbrand 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
68c08c320bSDavid Hildenbrand 	p4d_t *p4d = p4d_alloc(pgd, vaddr);
69c08c320bSDavid Hildenbrand 	pud_t *pud = pud_alloc(p4d, vaddr);
70c08c320bSDavid Hildenbrand 
71cd87c813SClaudio Imbrenda 	return pud;
72cd87c813SClaudio Imbrenda }
73cd87c813SClaudio Imbrenda 
74cd87c813SClaudio Imbrenda /*
75cd87c813SClaudio Imbrenda  * Get the pmd (segment) DAT table entry for the given address and pud,
76cd87c813SClaudio Imbrenda  * allocating it if necessary.
77cd87c813SClaudio Imbrenda  * The pud must not be huge.
78cd87c813SClaudio Imbrenda  */
79cd87c813SClaudio Imbrenda static inline pmd_t *get_pmd(pud_t *pud, uintptr_t vaddr)
80cd87c813SClaudio Imbrenda {
81cd87c813SClaudio Imbrenda 	pmd_t *pmd;
82cd87c813SClaudio Imbrenda 
83cd87c813SClaudio Imbrenda 	assert(!pud_huge(*pud));
84cd87c813SClaudio Imbrenda 	pmd = pmd_alloc(pud, vaddr);
85cd87c813SClaudio Imbrenda 	return pmd;
86cd87c813SClaudio Imbrenda }
87cd87c813SClaudio Imbrenda 
88cd87c813SClaudio Imbrenda /*
89cd87c813SClaudio Imbrenda  * Get the pte (page) DAT table entry for the given address and pmd,
90cd87c813SClaudio Imbrenda  * allocating it if necessary.
91cd87c813SClaudio Imbrenda  * The pmd must not be large.
92cd87c813SClaudio Imbrenda  */
93cd87c813SClaudio Imbrenda static inline pte_t *get_pte(pmd_t *pmd, uintptr_t vaddr)
94cd87c813SClaudio Imbrenda {
95cd87c813SClaudio Imbrenda 	pte_t *pte;
96cd87c813SClaudio Imbrenda 
97cd87c813SClaudio Imbrenda 	assert(!pmd_large(*pmd));
98cd87c813SClaudio Imbrenda 	pte = pte_alloc(pmd, vaddr);
99cd87c813SClaudio Imbrenda 	return pte;
100cd87c813SClaudio Imbrenda }
101cd87c813SClaudio Imbrenda 
102cd87c813SClaudio Imbrenda /*
103cd87c813SClaudio Imbrenda  * Splits a large pmd (segment) DAT table entry into equivalent 4kB small
104cd87c813SClaudio Imbrenda  * pages.
105cd87c813SClaudio Imbrenda  * @pmd The pmd to split, it must be large.
106cd87c813SClaudio Imbrenda  * @va the virtual address corresponding to this pmd.
107cd87c813SClaudio Imbrenda  */
108cd87c813SClaudio Imbrenda static void split_pmd(pmd_t *pmd, uintptr_t va)
109cd87c813SClaudio Imbrenda {
110cd87c813SClaudio Imbrenda 	phys_addr_t pa = pmd_val(*pmd) & SEGMENT_ENTRY_SFAA;
111cd87c813SClaudio Imbrenda 	unsigned long i, prot;
112cd87c813SClaudio Imbrenda 	pte_t *pte;
113cd87c813SClaudio Imbrenda 
114cd87c813SClaudio Imbrenda 	assert(pmd_large(*pmd));
115cd87c813SClaudio Imbrenda 	pte = alloc_pages(PAGE_TABLE_ORDER);
116cd87c813SClaudio Imbrenda 	prot = pmd_val(*pmd) & (SEGMENT_ENTRY_IEP | SEGMENT_ENTRY_P);
117cd87c813SClaudio Imbrenda 	for (i = 0; i < PAGE_TABLE_ENTRIES; i++)
118cd87c813SClaudio Imbrenda 		pte_val(pte[i]) =  pa | PAGE_SIZE * i | prot;
119cd87c813SClaudio Imbrenda 	idte_pmdp(va, &pmd_val(*pmd));
120cd87c813SClaudio Imbrenda 	pmd_val(*pmd) = __pa(pte) | SEGMENT_ENTRY_TT_SEGMENT;
121cd87c813SClaudio Imbrenda 
122cd87c813SClaudio Imbrenda }
123cd87c813SClaudio Imbrenda 
124cd87c813SClaudio Imbrenda /*
125cd87c813SClaudio Imbrenda  * Splits a huge pud (region 3) DAT table entry into equivalent 1MB large
126cd87c813SClaudio Imbrenda  * pages.
127cd87c813SClaudio Imbrenda  * @pud The pud to split, it must be huge.
128cd87c813SClaudio Imbrenda  * @va the virtual address corresponding to this pud.
129cd87c813SClaudio Imbrenda  */
130cd87c813SClaudio Imbrenda static void split_pud(pud_t *pud, uintptr_t va)
131cd87c813SClaudio Imbrenda {
132cd87c813SClaudio Imbrenda 	phys_addr_t pa = pud_val(*pud) & REGION3_ENTRY_RFAA;
133cd87c813SClaudio Imbrenda 	unsigned long i, prot;
134cd87c813SClaudio Imbrenda 	pmd_t *pmd;
135cd87c813SClaudio Imbrenda 
136cd87c813SClaudio Imbrenda 	assert(pud_huge(*pud));
137cd87c813SClaudio Imbrenda 	pmd = alloc_pages(SEGMENT_TABLE_ORDER);
138cd87c813SClaudio Imbrenda 	prot = pud_val(*pud) & (REGION3_ENTRY_IEP | REGION_ENTRY_P);
139cd87c813SClaudio Imbrenda 	for (i = 0; i < SEGMENT_TABLE_ENTRIES; i++)
140cd87c813SClaudio Imbrenda 		pmd_val(pmd[i]) =  pa | SZ_1M * i | prot | SEGMENT_ENTRY_FC | SEGMENT_ENTRY_TT_SEGMENT;
141cd87c813SClaudio Imbrenda 	idte_pudp(va, &pud_val(*pud));
142cd87c813SClaudio Imbrenda 	pud_val(*pud) = __pa(pmd) | REGION_ENTRY_TT_REGION3 | REGION_TABLE_LENGTH;
143cd87c813SClaudio Imbrenda }
144cd87c813SClaudio Imbrenda 
145cd87c813SClaudio Imbrenda void *get_dat_entry(pgd_t *pgtable, void *vaddr, enum pgt_level level)
146cd87c813SClaudio Imbrenda {
147cd87c813SClaudio Imbrenda 	uintptr_t va = (uintptr_t)vaddr;
148cd87c813SClaudio Imbrenda 	pgd_t *pgd;
149cd87c813SClaudio Imbrenda 	p4d_t *p4d;
150cd87c813SClaudio Imbrenda 	pud_t *pud;
151cd87c813SClaudio Imbrenda 	pmd_t *pmd;
152cd87c813SClaudio Imbrenda 
153cd87c813SClaudio Imbrenda 	assert(level && (level <= 5));
154cd87c813SClaudio Imbrenda 	pgd = pgd_offset(pgtable, va);
155cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pgd)
156cd87c813SClaudio Imbrenda 		return pgd;
157cd87c813SClaudio Imbrenda 	p4d = p4d_alloc(pgd, va);
158cd87c813SClaudio Imbrenda 	if (level == pgtable_level_p4d)
159cd87c813SClaudio Imbrenda 		return p4d;
160cd87c813SClaudio Imbrenda 	pud = pud_alloc(p4d, va);
161cd87c813SClaudio Imbrenda 
162cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pud)
163cd87c813SClaudio Imbrenda 		return pud;
164cd87c813SClaudio Imbrenda 	if (!pud_none(*pud) && pud_huge(*pud))
165cd87c813SClaudio Imbrenda 		split_pud(pud, va);
166cd87c813SClaudio Imbrenda 	pmd = get_pmd(pud, va);
167cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pmd)
168cd87c813SClaudio Imbrenda 		return pmd;
169cd87c813SClaudio Imbrenda 	if (!pmd_none(*pmd) && pmd_large(*pmd))
170cd87c813SClaudio Imbrenda 		split_pmd(pmd, va);
171cd87c813SClaudio Imbrenda 	return get_pte(pmd, va);
172cd87c813SClaudio Imbrenda }
173cd87c813SClaudio Imbrenda 
174cd87c813SClaudio Imbrenda void *split_page(pgd_t *pgtable, void *vaddr, enum pgt_level level)
175cd87c813SClaudio Imbrenda {
176cd87c813SClaudio Imbrenda 	assert((level >= 3) && (level <= 5));
177cd87c813SClaudio Imbrenda 	return get_dat_entry(pgtable ? pgtable : table_root, vaddr, level);
178c08c320bSDavid Hildenbrand }
179c08c320bSDavid Hildenbrand 
180c08c320bSDavid Hildenbrand phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *vaddr)
181c08c320bSDavid Hildenbrand {
182cd87c813SClaudio Imbrenda 	uintptr_t va = (uintptr_t)vaddr;
183cd87c813SClaudio Imbrenda 	pud_t *pud;
184cd87c813SClaudio Imbrenda 	pmd_t *pmd;
185cd87c813SClaudio Imbrenda 	pte_t *pte;
186cd87c813SClaudio Imbrenda 
187cd87c813SClaudio Imbrenda 	pud = get_pud(pgtable, va);
188cd87c813SClaudio Imbrenda 	if (pud_huge(*pud))
189cd87c813SClaudio Imbrenda 		return (pud_val(*pud) & REGION3_ENTRY_RFAA) | (va & ~REGION3_ENTRY_RFAA);
190cd87c813SClaudio Imbrenda 	pmd = get_pmd(pud, va);
191cd87c813SClaudio Imbrenda 	if (pmd_large(*pmd))
192cd87c813SClaudio Imbrenda 		return (pmd_val(*pmd) & SEGMENT_ENTRY_SFAA) | (va & ~SEGMENT_ENTRY_SFAA);
193cd87c813SClaudio Imbrenda 	pte = get_pte(pmd, va);
194cd87c813SClaudio Imbrenda 	return (pte_val(*pte) & PAGE_MASK) | (va & ~PAGE_MASK);
195c08c320bSDavid Hildenbrand }
196c08c320bSDavid Hildenbrand 
197cd87c813SClaudio Imbrenda /*
198cd87c813SClaudio Imbrenda  * Get the DAT table entry of the given level for the given address,
199cd87c813SClaudio Imbrenda  * splitting if necessary. If the entry was not invalid, invalidate it, and
200cd87c813SClaudio Imbrenda  * return the pointer to the entry and, if requested, its old value.
201cd87c813SClaudio Imbrenda  * @pgtable root of the page tables
202cd87c813SClaudio Imbrenda  * @vaddr virtual address
203cd87c813SClaudio Imbrenda  * @level 3 (for 2GB pud), 4 (for 1MB pmd) or 5 (for 4kB pages)
204cd87c813SClaudio Imbrenda  * @old if not NULL, will be written with the old value of the DAT table
205cd87c813SClaudio Imbrenda  * entry before invalidation
206cd87c813SClaudio Imbrenda  */
207cd87c813SClaudio Imbrenda static void *dat_get_and_invalidate(pgd_t *pgtable, void *vaddr, enum pgt_level level, unsigned long *old)
208c08c320bSDavid Hildenbrand {
209cd87c813SClaudio Imbrenda 	unsigned long va = (unsigned long)vaddr;
210cd87c813SClaudio Imbrenda 	void *ptr;
211c08c320bSDavid Hildenbrand 
212cd87c813SClaudio Imbrenda 	ptr = get_dat_entry(pgtable, vaddr, level);
213cd87c813SClaudio Imbrenda 	if (old)
214cd87c813SClaudio Imbrenda 		*old = *(unsigned long *)ptr;
215cd87c813SClaudio Imbrenda 	if ((level == pgtable_level_pgd) && !pgd_none(*(pgd_t *)ptr))
216cd87c813SClaudio Imbrenda 		idte_pgdp(va, ptr);
217cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_p4d) && !p4d_none(*(p4d_t *)ptr))
218cd87c813SClaudio Imbrenda 		idte_p4dp(va, ptr);
219cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_pud) && !pud_none(*(pud_t *)ptr))
220cd87c813SClaudio Imbrenda 		idte_pudp(va, ptr);
221cd87c813SClaudio Imbrenda 	else if ((level == pgtable_level_pmd) && !pmd_none(*(pmd_t *)ptr))
222cd87c813SClaudio Imbrenda 		idte_pmdp(va, ptr);
223cd87c813SClaudio Imbrenda 	else if (!pte_none(*(pte_t *)ptr))
224cd87c813SClaudio Imbrenda 		ipte(va, ptr);
225cd87c813SClaudio Imbrenda 	return ptr;
226cd87c813SClaudio Imbrenda }
227c08c320bSDavid Hildenbrand 
228cd87c813SClaudio Imbrenda static void cleanup_pmd(pmd_t *pmd)
229cd87c813SClaudio Imbrenda {
230cd87c813SClaudio Imbrenda 	/* was invalid or large, nothing to do */
231cd87c813SClaudio Imbrenda 	if (pmd_none(*pmd) || pmd_large(*pmd))
232cd87c813SClaudio Imbrenda 		return;
233cd87c813SClaudio Imbrenda 	/* was not large, free the corresponding page table */
234cd87c813SClaudio Imbrenda 	free_pages((void *)(pmd_val(*pmd) & PAGE_MASK));
235cd87c813SClaudio Imbrenda }
236cd87c813SClaudio Imbrenda 
237cd87c813SClaudio Imbrenda static void cleanup_pud(pud_t *pud)
238cd87c813SClaudio Imbrenda {
239cd87c813SClaudio Imbrenda 	unsigned long i;
240cd87c813SClaudio Imbrenda 	pmd_t *pmd;
241cd87c813SClaudio Imbrenda 
242cd87c813SClaudio Imbrenda 	/* was invalid or large, nothing to do */
243cd87c813SClaudio Imbrenda 	if (pud_none(*pud) || pud_huge(*pud))
244cd87c813SClaudio Imbrenda 		return;
245cd87c813SClaudio Imbrenda 	/* recursively clean up all pmds if needed */
246cd87c813SClaudio Imbrenda 	pmd = (pmd_t *)(pud_val(*pud) & PAGE_MASK);
247cd87c813SClaudio Imbrenda 	for (i = 0; i < SEGMENT_TABLE_ENTRIES; i++)
248cd87c813SClaudio Imbrenda 		cleanup_pmd(pmd + i);
249cd87c813SClaudio Imbrenda 	/* free the corresponding segment table */
250cd87c813SClaudio Imbrenda 	free_pages(pmd);
251cd87c813SClaudio Imbrenda }
252cd87c813SClaudio Imbrenda 
253cd87c813SClaudio Imbrenda /*
254cd87c813SClaudio Imbrenda  * Set the DAT entry for the given level of the given virtual address. If a
255cd87c813SClaudio Imbrenda  * mapping already existed, it is overwritten. If an existing mapping with
256cd87c813SClaudio Imbrenda  * smaller pages existed, all the lower tables are freed.
257cd87c813SClaudio Imbrenda  * Returns the pointer to the DAT table entry.
258cd87c813SClaudio Imbrenda  * @pgtable root of the page tables
259cd87c813SClaudio Imbrenda  * @val the new value for the DAT table entry
260cd87c813SClaudio Imbrenda  * @vaddr the virtual address
261cd87c813SClaudio Imbrenda  * @level 3 for pud (region 3), 4 for pmd (segment) and 5 for pte (pages)
262cd87c813SClaudio Imbrenda  */
263cd87c813SClaudio Imbrenda static void *set_dat_entry(pgd_t *pgtable, unsigned long val, void *vaddr, enum pgt_level level)
264cd87c813SClaudio Imbrenda {
265cd87c813SClaudio Imbrenda 	unsigned long old, *res;
266cd87c813SClaudio Imbrenda 
267cd87c813SClaudio Imbrenda 	res = dat_get_and_invalidate(pgtable, vaddr, level, &old);
268cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pmd)
269cd87c813SClaudio Imbrenda 		cleanup_pmd((pmd_t *)&old);
270cd87c813SClaudio Imbrenda 	if (level == pgtable_level_pud)
271cd87c813SClaudio Imbrenda 		cleanup_pud((pud_t *)&old);
272cd87c813SClaudio Imbrenda 	*res = val;
273cd87c813SClaudio Imbrenda 	return res;
274c08c320bSDavid Hildenbrand }
275c08c320bSDavid Hildenbrand 
27649a732c7SJanosch Frank pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
27749a732c7SJanosch Frank {
278cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, PAGE_SIZE));
279cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, PAGE_SIZE));
280cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys, vaddr, pgtable_level_pte);
28149a732c7SJanosch Frank }
28249a732c7SJanosch Frank 
283cd87c813SClaudio Imbrenda pmdval_t *install_large_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
28449a732c7SJanosch Frank {
285cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, SZ_1M));
286cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, SZ_1M));
287cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys | SEGMENT_ENTRY_FC, vaddr, pgtable_level_pmd);
28849a732c7SJanosch Frank }
28949a732c7SJanosch Frank 
290cd87c813SClaudio Imbrenda pudval_t *install_huge_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
29149a732c7SJanosch Frank {
292cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED(phys, SZ_2G));
293cd87c813SClaudio Imbrenda 	assert(IS_ALIGNED((uintptr_t)vaddr, SZ_2G));
294cd87c813SClaudio Imbrenda 	return set_dat_entry(pgtable, phys | REGION3_ENTRY_FC | REGION_ENTRY_TT_REGION3, vaddr, pgtable_level_pud);
295cd87c813SClaudio Imbrenda }
29649a732c7SJanosch Frank 
297cd87c813SClaudio Imbrenda void protect_dat_entry(void *vaddr, unsigned long prot, enum pgt_level level)
298cd87c813SClaudio Imbrenda {
299cd87c813SClaudio Imbrenda 	unsigned long old, *ptr;
300cd87c813SClaudio Imbrenda 
301cd87c813SClaudio Imbrenda 	ptr = dat_get_and_invalidate(table_root, vaddr, level, &old);
302cd87c813SClaudio Imbrenda 	*ptr = old | prot;
303cd87c813SClaudio Imbrenda }
304cd87c813SClaudio Imbrenda 
305cd87c813SClaudio Imbrenda void unprotect_dat_entry(void *vaddr, unsigned long prot, enum pgt_level level)
306cd87c813SClaudio Imbrenda {
307cd87c813SClaudio Imbrenda 	unsigned long old, *ptr;
308cd87c813SClaudio Imbrenda 
309cd87c813SClaudio Imbrenda 	ptr = dat_get_and_invalidate(table_root, vaddr, level, &old);
310cd87c813SClaudio Imbrenda 	*ptr = old & ~prot;
31149a732c7SJanosch Frank }
31249a732c7SJanosch Frank 
31349a732c7SJanosch Frank void protect_range(void *start, unsigned long len, unsigned long prot)
31449a732c7SJanosch Frank {
31549a732c7SJanosch Frank 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
31649a732c7SJanosch Frank 
31749a732c7SJanosch Frank 	len &= PAGE_MASK;
31849a732c7SJanosch Frank 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
319cd87c813SClaudio Imbrenda 		protect_dat_entry((void *)curr, prot, 5);
32049a732c7SJanosch Frank }
32149a732c7SJanosch Frank 
32249a732c7SJanosch Frank void unprotect_range(void *start, unsigned long len, unsigned long prot)
32349a732c7SJanosch Frank {
32449a732c7SJanosch Frank 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
32549a732c7SJanosch Frank 
32649a732c7SJanosch Frank 	len &= PAGE_MASK;
32749a732c7SJanosch Frank 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
328cd87c813SClaudio Imbrenda 		unprotect_dat_entry((void *)curr, prot, 5);
32949a732c7SJanosch Frank }
33049a732c7SJanosch Frank 
331c08c320bSDavid Hildenbrand static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr,
332c08c320bSDavid Hildenbrand 			   phys_addr_t end_addr)
333c08c320bSDavid Hildenbrand {
334c08c320bSDavid Hildenbrand 	phys_addr_t cur;
335c08c320bSDavid Hildenbrand 
336c08c320bSDavid Hildenbrand 	start_addr &= PAGE_MASK;
337c08c320bSDavid Hildenbrand 	for (cur = start_addr; true; cur += PAGE_SIZE) {
338c08c320bSDavid Hildenbrand 		if (start_addr < end_addr && cur >= end_addr)
339c08c320bSDavid Hildenbrand 			break;
340c08c320bSDavid Hildenbrand 		if (start_addr > end_addr && cur <= end_addr)
341c08c320bSDavid Hildenbrand 			break;
342c08c320bSDavid Hildenbrand 		install_page(pgtable, cur, __va(cur));
343c08c320bSDavid Hildenbrand 	}
344c08c320bSDavid Hildenbrand }
345c08c320bSDavid Hildenbrand 
346*0a5b31d2SSean Christopherson void *setup_mmu(phys_addr_t phys_end, void *unused)
347*0a5b31d2SSean Christopherson {
348c08c320bSDavid Hildenbrand 	pgd_t *page_root;
349c08c320bSDavid Hildenbrand 
350c08c320bSDavid Hildenbrand 	/* allocate a region-1 table */
351c08c320bSDavid Hildenbrand 	page_root = pgd_alloc_one();
352c08c320bSDavid Hildenbrand 
353c08c320bSDavid Hildenbrand 	/* map all physical memory 1:1 */
354c08c320bSDavid Hildenbrand 	setup_identity(page_root, 0, phys_end);
355c08c320bSDavid Hildenbrand 
356c08c320bSDavid Hildenbrand 	/* generate 128MB of invalid adresses at the end (for testing PGM) */
357c08c320bSDavid Hildenbrand 	init_alloc_vpage((void *) -(1UL << 27));
358c08c320bSDavid Hildenbrand 	setup_identity(page_root, -(1UL << 27), 0);
359c08c320bSDavid Hildenbrand 
360c08c320bSDavid Hildenbrand 	/* finally enable DAT with the new table */
361c08c320bSDavid Hildenbrand 	mmu_enable(page_root);
36249a732c7SJanosch Frank 	table_root = page_root;
363c08c320bSDavid Hildenbrand 	return page_root;
364c08c320bSDavid Hildenbrand }
365