xref: /kvm-unit-tests/lib/s390x/mmu.c (revision c865f654ffe4c5955038aaf74f702ba62f3eb014)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x MMU
4  *
5  * Copyright (c) 2017 Red Hat Inc
6  *
7  * Authors:
8  *  David Hildenbrand <david@redhat.com>
9  */
10 
11 #include <libcflat.h>
12 #include <asm/pgtable.h>
13 #include <asm/arch_def.h>
14 #include <asm/barrier.h>
15 #include <vmalloc.h>
16 #include "mmu.h"
17 
18 static pgd_t *table_root;
19 
20 void configure_dat(int enable)
21 {
22 	uint64_t mask;
23 
24 	if (enable)
25 		mask = extract_psw_mask() | PSW_MASK_DAT;
26 	else
27 		mask = extract_psw_mask() & ~PSW_MASK_DAT;
28 
29 	load_psw_mask(mask);
30 }
31 
32 static void mmu_enable(pgd_t *pgtable)
33 {
34 	struct lowcore *lc = NULL;
35 	const uint64_t asce = __pa(pgtable) | ASCE_DT_REGION1 |
36 			      REGION_TABLE_LENGTH;
37 
38 	/* set primary asce */
39 	lctlg(1, asce);
40 	assert(stctg(1) == asce);
41 
42 	/* enable dat (primary == 0 set as default) */
43 	configure_dat(1);
44 
45 	/* we can now also use DAT unconditionally in our PGM handler */
46 	lc->pgm_new_psw.mask |= PSW_MASK_DAT;
47 }
48 
49 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
50 {
51 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
52 	p4d_t *p4d = p4d_alloc(pgd, vaddr);
53 	pud_t *pud = pud_alloc(p4d, vaddr);
54 	pmd_t *pmd = pmd_alloc(pud, vaddr);
55 	pte_t *pte = pte_alloc(pmd, vaddr);
56 
57 	return &pte_val(*pte);
58 }
59 
60 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *vaddr)
61 {
62 	return (*get_pte(pgtable, (uintptr_t)vaddr) & PAGE_MASK) +
63 	       ((unsigned long)vaddr & ~PAGE_MASK);
64 }
65 
66 static pteval_t *set_pte(pgd_t *pgtable, pteval_t val, void *vaddr)
67 {
68 	pteval_t *p_pte = get_pte(pgtable, (uintptr_t)vaddr);
69 
70 	/* first flush the old entry (if we're replacing anything) */
71 	if (!(*p_pte & PAGE_ENTRY_I))
72 		ipte((uintptr_t)vaddr, p_pte);
73 
74 	*p_pte = val;
75 	return p_pte;
76 }
77 
78 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
79 {
80 	return set_pte(pgtable, __pa(phys), vaddr);
81 }
82 
83 void protect_page(void *vaddr, unsigned long prot)
84 {
85 	pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr);
86 	pteval_t n_pte = *p_pte | prot;
87 
88 	set_pte(table_root, n_pte, vaddr);
89 }
90 
91 void unprotect_page(void *vaddr, unsigned long prot)
92 {
93 	pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr);
94 	pteval_t n_pte = *p_pte & ~prot;
95 
96 	set_pte(table_root, n_pte, vaddr);
97 }
98 
99 void protect_range(void *start, unsigned long len, unsigned long prot)
100 {
101 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
102 
103 	len &= PAGE_MASK;
104 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
105 		protect_page((void *)curr, prot);
106 }
107 
108 void unprotect_range(void *start, unsigned long len, unsigned long prot)
109 {
110 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
111 
112 	len &= PAGE_MASK;
113 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
114 		unprotect_page((void *)curr, prot);
115 }
116 
117 static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr,
118 			   phys_addr_t end_addr)
119 {
120 	phys_addr_t cur;
121 
122 	start_addr &= PAGE_MASK;
123 	for (cur = start_addr; true; cur += PAGE_SIZE) {
124 		if (start_addr < end_addr && cur >= end_addr)
125 			break;
126 		if (start_addr > end_addr && cur <= end_addr)
127 			break;
128 		install_page(pgtable, cur, __va(cur));
129 	}
130 }
131 
132 void *setup_mmu(phys_addr_t phys_end){
133 	pgd_t *page_root;
134 
135 	/* allocate a region-1 table */
136 	page_root = pgd_alloc_one();
137 
138 	/* map all physical memory 1:1 */
139 	setup_identity(page_root, 0, phys_end);
140 
141 	/* generate 128MB of invalid adresses at the end (for testing PGM) */
142 	init_alloc_vpage((void *) -(1UL << 27));
143 	setup_identity(page_root, -(1UL << 27), 0);
144 
145 	/* finally enable DAT with the new table */
146 	mmu_enable(page_root);
147 	table_root = page_root;
148 	return page_root;
149 }
150