1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helper routines for building identity mapping page tables. This is
4  * included by both the compressed kernel and the regular kernel.
5  */
6 
7 static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
8 {
9 	pte_t *pte = pte_offset_kernel(pmd, 0);
10 
11 	info->free_pgt_page(pte, info->context);
12 }
13 
14 static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
15 {
16 	pmd_t *pmd = pmd_offset(pud, 0);
17 	int i;
18 
19 	for (i = 0; i < PTRS_PER_PMD; i++) {
20 		if (!pmd_present(pmd[i]))
21 			continue;
22 
23 		if (pmd_leaf(pmd[i]))
24 			continue;
25 
26 		free_pte(info, &pmd[i]);
27 	}
28 
29 	info->free_pgt_page(pmd, info->context);
30 }
31 
32 static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
33 {
34 	pud_t *pud = pud_offset(p4d, 0);
35 	int i;
36 
37 	for (i = 0; i < PTRS_PER_PUD; i++) {
38 		if (!pud_present(pud[i]))
39 			continue;
40 
41 		if (pud_leaf(pud[i]))
42 			continue;
43 
44 		free_pmd(info, &pud[i]);
45 	}
46 
47 	info->free_pgt_page(pud, info->context);
48 }
49 
50 static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
51 {
52 	p4d_t *p4d = p4d_offset(pgd, 0);
53 	int i;
54 
55 	for (i = 0; i < PTRS_PER_P4D; i++) {
56 		if (!p4d_present(p4d[i]))
57 			continue;
58 
59 		free_pud(info, &p4d[i]);
60 	}
61 
62 	if (pgtable_l5_enabled())
63 		info->free_pgt_page(p4d, info->context);
64 }
65 
66 void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
67 {
68 	int i;
69 
70 	for (i = 0; i < PTRS_PER_PGD; i++) {
71 		if (!pgd_present(pgd[i]))
72 			continue;
73 
74 		free_p4d(info, &pgd[i]);
75 	}
76 
77 	info->free_pgt_page(pgd, info->context);
78 }
79 
80 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
81 			   unsigned long addr, unsigned long end)
82 {
83 	addr &= PMD_MASK;
84 	for (; addr < end; addr += PMD_SIZE) {
85 		pmd_t *pmd = pmd_page + pmd_index(addr);
86 
87 		if (pmd_present(*pmd))
88 			continue;
89 
90 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
91 	}
92 }
93 
94 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
95 			  unsigned long addr, unsigned long end)
96 {
97 	unsigned long next;
98 
99 	for (; addr < end; addr = next) {
100 		pud_t *pud = pud_page + pud_index(addr);
101 		pmd_t *pmd;
102 		bool use_gbpage;
103 
104 		next = pud_addr_end(addr, end);
105 
106 		/* if this is already a gbpage, this portion is already mapped */
107 		if (pud_leaf(*pud))
108 			continue;
109 
110 		/* Is using a gbpage allowed? */
111 		use_gbpage = info->direct_gbpages;
112 
113 		/* Don't use gbpage if it maps more than the requested region. */
114 		/* at the begining: */
115 		use_gbpage &= ((addr & ~PUD_MASK) == 0);
116 		/* ... or at the end: */
117 		use_gbpage &= ((next & ~PUD_MASK) == 0);
118 
119 		/* Never overwrite existing mappings */
120 		use_gbpage &= !pud_present(*pud);
121 
122 		if (use_gbpage) {
123 			pud_t pudval;
124 
125 			pudval = __pud((addr - info->offset) | info->page_flag);
126 			set_pud(pud, pudval);
127 			continue;
128 		}
129 
130 		if (pud_present(*pud)) {
131 			pmd = pmd_offset(pud, 0);
132 			ident_pmd_init(info, pmd, addr, next);
133 			continue;
134 		}
135 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
136 		if (!pmd)
137 			return -ENOMEM;
138 		ident_pmd_init(info, pmd, addr, next);
139 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
140 	}
141 
142 	return 0;
143 }
144 
145 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
146 			  unsigned long addr, unsigned long end)
147 {
148 	unsigned long next;
149 	int result;
150 
151 	for (; addr < end; addr = next) {
152 		p4d_t *p4d = p4d_page + p4d_index(addr);
153 		pud_t *pud;
154 
155 		next = p4d_addr_end(addr, end);
156 		if (p4d_present(*p4d)) {
157 			pud = pud_offset(p4d, 0);
158 			result = ident_pud_init(info, pud, addr, next);
159 			if (result)
160 				return result;
161 
162 			continue;
163 		}
164 		pud = (pud_t *)info->alloc_pgt_page(info->context);
165 		if (!pud)
166 			return -ENOMEM;
167 
168 		result = ident_pud_init(info, pud, addr, next);
169 		if (result)
170 			return result;
171 
172 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
173 	}
174 
175 	return 0;
176 }
177 
178 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
179 			      unsigned long pstart, unsigned long pend)
180 {
181 	unsigned long addr = pstart + info->offset;
182 	unsigned long end = pend + info->offset;
183 	unsigned long next;
184 	int result;
185 
186 	/* Set the default pagetable flags if not supplied */
187 	if (!info->kernpg_flag)
188 		info->kernpg_flag = _KERNPG_TABLE;
189 
190 	/* Filter out unsupported __PAGE_KERNEL_* bits: */
191 	info->kernpg_flag &= __default_kernel_pte_mask;
192 
193 	for (; addr < end; addr = next) {
194 		pgd_t *pgd = pgd_page + pgd_index(addr);
195 		p4d_t *p4d;
196 
197 		next = pgd_addr_end(addr, end);
198 		if (pgd_present(*pgd)) {
199 			p4d = p4d_offset(pgd, 0);
200 			result = ident_p4d_init(info, p4d, addr, next);
201 			if (result)
202 				return result;
203 			continue;
204 		}
205 
206 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
207 		if (!p4d)
208 			return -ENOMEM;
209 		result = ident_p4d_init(info, p4d, addr, next);
210 		if (result)
211 			return result;
212 		if (pgtable_l5_enabled()) {
213 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
214 		} else {
215 			/*
216 			 * With p4d folded, pgd is equal to p4d.
217 			 * The pgd entry has to point to the pud page table in this case.
218 			 */
219 			pud_t *pud = pud_offset(p4d, 0);
220 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
221 		}
222 	}
223 
224 	return 0;
225 }
226