xref: /kvm-unit-tests/lib/s390x/mmu.c (revision 728e71ee457384eb1cf2d5111d1e4329498581db)
1 /*
2  * s390x MMU
3  *
4  * Copyright (c) 2017 Red Hat Inc
5  *
6  * Authors:
7  *  David Hildenbrand <david@redhat.com>
8  *
9  * This code is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU Library General Public License version 2.
11  */
12 
13 #include <libcflat.h>
14 #include <asm/pgtable.h>
15 #include <asm/arch_def.h>
16 #include <asm/barrier.h>
17 #include <vmalloc.h>
18 
19 static pgd_t *table_root;
20 
21 void configure_dat(int enable)
22 {
23 	uint64_t mask;
24 
25 	if (enable)
26 		mask = extract_psw_mask() | PSW_MASK_DAT;
27 	else
28 		mask = extract_psw_mask() & ~PSW_MASK_DAT;
29 
30 	load_psw_mask(mask);
31 }
32 
33 static void mmu_enable(pgd_t *pgtable)
34 {
35 	struct lowcore *lc = NULL;
36 	const uint64_t asce = __pa(pgtable) | ASCE_DT_REGION1 |
37 			      REGION_TABLE_LENGTH;
38 
39 	/* set primary asce */
40 	lctlg(1, asce);
41 	assert(stctg(1) == asce);
42 
43 	/* enable dat (primary == 0 set as default) */
44 	configure_dat(1);
45 
46 	/* we can now also use DAT unconditionally in our PGM handler */
47 	lc->pgm_new_psw.mask |= PSW_MASK_DAT;
48 }
49 
50 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
51 {
52 	pgd_t *pgd = pgd_offset(pgtable, vaddr);
53 	p4d_t *p4d = p4d_alloc(pgd, vaddr);
54 	pud_t *pud = pud_alloc(p4d, vaddr);
55 	pmd_t *pmd = pmd_alloc(pud, vaddr);
56 	pte_t *pte = pte_alloc(pmd, vaddr);
57 
58 	return &pte_val(*pte);
59 }
60 
61 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *vaddr)
62 {
63 	return (*get_pte(pgtable, (uintptr_t)vaddr) & PAGE_MASK) +
64 	       ((unsigned long)vaddr & ~PAGE_MASK);
65 }
66 
67 static pteval_t *set_pte(pgd_t *pgtable, pteval_t val, void *vaddr)
68 {
69 	pteval_t *p_pte = get_pte(pgtable, (uintptr_t)vaddr);
70 
71 	/* first flush the old entry (if we're replacing anything) */
72 	if (!(*p_pte & PAGE_ENTRY_I))
73 		ipte((uintptr_t)vaddr, p_pte);
74 
75 	*p_pte = val;
76 	return p_pte;
77 }
78 
79 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr)
80 {
81 	return set_pte(pgtable, __pa(phys), vaddr);
82 }
83 
84 void protect_page(void *vaddr, unsigned long prot)
85 {
86 	pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr);
87 	pteval_t n_pte = *p_pte | prot;
88 
89 	set_pte(table_root, n_pte, vaddr);
90 }
91 
92 void unprotect_page(void *vaddr, unsigned long prot)
93 {
94 	pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr);
95 	pteval_t n_pte = *p_pte & ~prot;
96 
97 	set_pte(table_root, n_pte, vaddr);
98 }
99 
100 void protect_range(void *start, unsigned long len, unsigned long prot)
101 {
102 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
103 
104 	len &= PAGE_MASK;
105 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
106 		protect_page((void *)curr, prot);
107 }
108 
109 void unprotect_range(void *start, unsigned long len, unsigned long prot)
110 {
111 	uintptr_t curr = (uintptr_t)start & PAGE_MASK;
112 
113 	len &= PAGE_MASK;
114 	for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE)
115 		unprotect_page((void *)curr, prot);
116 }
117 
118 static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr,
119 			   phys_addr_t end_addr)
120 {
121 	phys_addr_t cur;
122 
123 	start_addr &= PAGE_MASK;
124 	for (cur = start_addr; true; cur += PAGE_SIZE) {
125 		if (start_addr < end_addr && cur >= end_addr)
126 			break;
127 		if (start_addr > end_addr && cur <= end_addr)
128 			break;
129 		install_page(pgtable, cur, __va(cur));
130 	}
131 }
132 
133 void *setup_mmu(phys_addr_t phys_end){
134 	pgd_t *page_root;
135 
136 	/* allocate a region-1 table */
137 	page_root = pgd_alloc_one();
138 
139 	/* map all physical memory 1:1 */
140 	setup_identity(page_root, 0, phys_end);
141 
142 	/* generate 128MB of invalid adresses at the end (for testing PGM) */
143 	init_alloc_vpage((void *) -(1UL << 27));
144 	setup_identity(page_root, -(1UL << 27), 0);
145 
146 	/* finally enable DAT with the new table */
147 	mmu_enable(page_root);
148 	table_root = page_root;
149 	return page_root;
150 }
151