1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Will Deacon <will@kernel.org>
5 */
6
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13
14 #define KVM_PGTABLE_FIRST_LEVEL -1
15 #define KVM_PGTABLE_LAST_LEVEL 3
16
17 /*
18 * The largest supported block sizes for KVM (no 52-bit PA support):
19 * - 4K (level 1): 1GB
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
22 */
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1
25 #else
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2
27 #endif
28
29 #define kvm_lpa2_is_enabled() system_supports_lpa2()
30
kvm_get_parange_max(void)31 static inline u64 kvm_get_parange_max(void)
32 {
33 if (kvm_lpa2_is_enabled() ||
34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 return ID_AA64MMFR0_EL1_PARANGE_52;
36 else
37 return ID_AA64MMFR0_EL1_PARANGE_48;
38 }
39
kvm_get_parange(u64 mmfr0)40 static inline u64 kvm_get_parange(u64 mmfr0)
41 {
42 u64 parange_max = kvm_get_parange_max();
43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 if (parange > parange_max)
46 parange = parange_max;
47
48 return parange;
49 }
50
51 typedef u64 kvm_pte_t;
52
53 #define KVM_PTE_VALID BIT(0)
54
55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
59
60 #define KVM_PHYS_INVALID (-1ULL)
61
kvm_pte_valid(kvm_pte_t pte)62 static inline bool kvm_pte_valid(kvm_pte_t pte)
63 {
64 return pte & KVM_PTE_VALID;
65 }
66
kvm_pte_to_phys(kvm_pte_t pte)67 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
68 {
69 u64 pa;
70
71 if (kvm_lpa2_is_enabled()) {
72 pa = pte & KVM_PTE_ADDR_MASK_LPA2;
73 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
74 } else {
75 pa = pte & KVM_PTE_ADDR_MASK;
76 if (PAGE_SHIFT == 16)
77 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
78 }
79
80 return pa;
81 }
82
kvm_phys_to_pte(u64 pa)83 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
84 {
85 kvm_pte_t pte;
86
87 if (kvm_lpa2_is_enabled()) {
88 pte = pa & KVM_PTE_ADDR_MASK_LPA2;
89 pa &= GENMASK(51, 50);
90 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
91 } else {
92 pte = pa & KVM_PTE_ADDR_MASK;
93 if (PAGE_SHIFT == 16) {
94 pa &= GENMASK(51, 48);
95 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
96 }
97 }
98
99 return pte;
100 }
101
kvm_pte_to_pfn(kvm_pte_t pte)102 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
103 {
104 return __phys_to_pfn(kvm_pte_to_phys(pte));
105 }
106
kvm_granule_shift(s8 level)107 static inline u64 kvm_granule_shift(s8 level)
108 {
109 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
110 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
111 }
112
kvm_granule_size(s8 level)113 static inline u64 kvm_granule_size(s8 level)
114 {
115 return BIT(kvm_granule_shift(level));
116 }
117
kvm_level_supports_block_mapping(s8 level)118 static inline bool kvm_level_supports_block_mapping(s8 level)
119 {
120 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
121 }
122
kvm_supported_block_sizes(void)123 static inline u32 kvm_supported_block_sizes(void)
124 {
125 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
126 u32 r = 0;
127
128 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
129 r |= BIT(kvm_granule_shift(level));
130
131 return r;
132 }
133
kvm_is_block_size_supported(u64 size)134 static inline bool kvm_is_block_size_supported(u64 size)
135 {
136 bool is_power_of_two = IS_ALIGNED(size, size);
137
138 return is_power_of_two && (size & kvm_supported_block_sizes());
139 }
140
141 /**
142 * struct kvm_pgtable_mm_ops - Memory management callbacks.
143 * @zalloc_page: Allocate a single zeroed memory page.
144 * The @arg parameter can be used by the walker
145 * to pass a memcache. The initial refcount of
146 * the page is 1.
147 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
148 * The @size parameter is in bytes, and is rounded
149 * up to the next page boundary. The resulting
150 * allocation is physically contiguous.
151 * @free_pages_exact: Free an exact number of memory pages previously
152 * allocated by zalloc_pages_exact.
153 * @free_unlinked_table: Free an unlinked paging structure by unlinking and
154 * dropping references.
155 * @get_page: Increment the refcount on a page.
156 * @put_page: Decrement the refcount on a page. When the
157 * refcount reaches 0 the page is automatically
158 * freed.
159 * @page_count: Return the refcount of a page.
160 * @phys_to_virt: Convert a physical address into a virtual
161 * address mapped in the current context.
162 * @virt_to_phys: Convert a virtual address mapped in the current
163 * context into a physical address.
164 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
165 * for the specified memory address range.
166 * @icache_inval_pou: Invalidate the instruction cache to the PoU
167 * for the specified memory address range.
168 */
169 struct kvm_pgtable_mm_ops {
170 void* (*zalloc_page)(void *arg);
171 void* (*zalloc_pages_exact)(size_t size);
172 void (*free_pages_exact)(void *addr, size_t size);
173 void (*free_unlinked_table)(void *addr, s8 level);
174 void (*get_page)(void *addr);
175 void (*put_page)(void *addr);
176 int (*page_count)(void *addr);
177 void* (*phys_to_virt)(phys_addr_t phys);
178 phys_addr_t (*virt_to_phys)(void *addr);
179 void (*dcache_clean_inval_poc)(void *addr, size_t size);
180 void (*icache_inval_pou)(void *addr, size_t size);
181 };
182
183 /**
184 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
185 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
186 * ARM64_HAS_STAGE2_FWB.
187 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
188 */
189 enum kvm_pgtable_stage2_flags {
190 KVM_PGTABLE_S2_NOFWB = BIT(0),
191 KVM_PGTABLE_S2_IDMAP = BIT(1),
192 };
193
194 /**
195 * enum kvm_pgtable_prot - Page-table permissions and attributes.
196 * @KVM_PGTABLE_PROT_X: Execute permission.
197 * @KVM_PGTABLE_PROT_W: Write permission.
198 * @KVM_PGTABLE_PROT_R: Read permission.
199 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
200 * @KVM_PGTABLE_PROT_SW0: Software bit 0.
201 * @KVM_PGTABLE_PROT_SW1: Software bit 1.
202 * @KVM_PGTABLE_PROT_SW2: Software bit 2.
203 * @KVM_PGTABLE_PROT_SW3: Software bit 3.
204 */
205 enum kvm_pgtable_prot {
206 KVM_PGTABLE_PROT_X = BIT(0),
207 KVM_PGTABLE_PROT_W = BIT(1),
208 KVM_PGTABLE_PROT_R = BIT(2),
209
210 KVM_PGTABLE_PROT_DEVICE = BIT(3),
211
212 KVM_PGTABLE_PROT_SW0 = BIT(55),
213 KVM_PGTABLE_PROT_SW1 = BIT(56),
214 KVM_PGTABLE_PROT_SW2 = BIT(57),
215 KVM_PGTABLE_PROT_SW3 = BIT(58),
216 };
217
218 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
219 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
220
221 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
222 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
223
224 #define PAGE_HYP KVM_PGTABLE_PROT_RW
225 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
226 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
227 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
228
229 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
230 enum kvm_pgtable_prot prot);
231
232 /**
233 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
234 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
235 * entries.
236 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
237 * children.
238 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
239 * children.
240 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
241 * with other software walkers.
242 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
243 * invoked from a fault handler.
244 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
245 * without Break-before-make's
246 * TLB invalidation.
247 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
248 * without Cache maintenance
249 * operations required.
250 */
251 enum kvm_pgtable_walk_flags {
252 KVM_PGTABLE_WALK_LEAF = BIT(0),
253 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
254 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
255 KVM_PGTABLE_WALK_SHARED = BIT(3),
256 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
257 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
258 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
259 };
260
261 struct kvm_pgtable_visit_ctx {
262 kvm_pte_t *ptep;
263 kvm_pte_t old;
264 void *arg;
265 struct kvm_pgtable_mm_ops *mm_ops;
266 u64 start;
267 u64 addr;
268 u64 end;
269 s8 level;
270 enum kvm_pgtable_walk_flags flags;
271 };
272
273 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
274 enum kvm_pgtable_walk_flags visit);
275
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)276 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
277 {
278 return ctx->flags & KVM_PGTABLE_WALK_SHARED;
279 }
280
281 /**
282 * struct kvm_pgtable_walker - Hook into a page-table walk.
283 * @cb: Callback function to invoke during the walk.
284 * @arg: Argument passed to the callback function.
285 * @flags: Bitwise-OR of flags to identify the entry types on which to
286 * invoke the callback function.
287 */
288 struct kvm_pgtable_walker {
289 const kvm_pgtable_visitor_fn_t cb;
290 void * const arg;
291 const enum kvm_pgtable_walk_flags flags;
292 };
293
294 /*
295 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
296 * table walkers used in hyp do not call into RCU and instead use other
297 * synchronization mechanisms (such as a spinlock).
298 */
299 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
300
301 typedef kvm_pte_t *kvm_pteref_t;
302
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)303 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
304 kvm_pteref_t pteref)
305 {
306 return pteref;
307 }
308
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)309 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
310 {
311 /*
312 * Due to the lack of RCU (or a similar protection scheme), only
313 * non-shared table walkers are allowed in the hypervisor.
314 */
315 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
316 return -EPERM;
317
318 return 0;
319 }
320
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)321 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
322
kvm_pgtable_walk_lock_held(void)323 static inline bool kvm_pgtable_walk_lock_held(void)
324 {
325 return true;
326 }
327
328 #else
329
330 typedef kvm_pte_t __rcu *kvm_pteref_t;
331
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)332 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
333 kvm_pteref_t pteref)
334 {
335 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
336 }
337
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)338 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
339 {
340 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
341 rcu_read_lock();
342
343 return 0;
344 }
345
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)346 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
347 {
348 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
349 rcu_read_unlock();
350 }
351
kvm_pgtable_walk_lock_held(void)352 static inline bool kvm_pgtable_walk_lock_held(void)
353 {
354 return rcu_read_lock_held();
355 }
356
357 #endif
358
359 /**
360 * struct kvm_pgtable - KVM page-table.
361 * @ia_bits: Maximum input address size, in bits.
362 * @start_level: Level at which the page-table walk starts.
363 * @pgd: Pointer to the first top-level entry of the page-table.
364 * @mm_ops: Memory management callbacks.
365 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
366 * @flags: Stage-2 page-table flags.
367 * @force_pte_cb: Function that returns true if page level mappings must
368 * be used instead of block mappings.
369 */
370 struct kvm_pgtable {
371 u32 ia_bits;
372 s8 start_level;
373 kvm_pteref_t pgd;
374 struct kvm_pgtable_mm_ops *mm_ops;
375
376 /* Stage-2 only */
377 struct kvm_s2_mmu *mmu;
378 enum kvm_pgtable_stage2_flags flags;
379 kvm_pgtable_force_pte_cb_t force_pte_cb;
380 };
381
382 /**
383 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
384 * @pgt: Uninitialised page-table structure to initialise.
385 * @va_bits: Maximum virtual address bits.
386 * @mm_ops: Memory management callbacks.
387 *
388 * Return: 0 on success, negative error code on failure.
389 */
390 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
391 struct kvm_pgtable_mm_ops *mm_ops);
392
393 /**
394 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
395 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
396 *
397 * The page-table is assumed to be unreachable by any hardware walkers prior
398 * to freeing and therefore no TLB invalidation is performed.
399 */
400 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
401
402 /**
403 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
404 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
405 * @addr: Virtual address at which to place the mapping.
406 * @size: Size of the mapping.
407 * @phys: Physical address of the memory to map.
408 * @prot: Permissions and attributes for the mapping.
409 *
410 * The offset of @addr within a page is ignored, @size is rounded-up to
411 * the next page boundary and @phys is rounded-down to the previous page
412 * boundary.
413 *
414 * If device attributes are not explicitly requested in @prot, then the
415 * mapping will be normal, cacheable. Attempts to install a new mapping
416 * for a virtual address that is already mapped will be rejected with an
417 * error and a WARN().
418 *
419 * Return: 0 on success, negative error code on failure.
420 */
421 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
422 enum kvm_pgtable_prot prot);
423
424 /**
425 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
426 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
427 * @addr: Virtual address from which to remove the mapping.
428 * @size: Size of the mapping.
429 *
430 * The offset of @addr within a page is ignored, @size is rounded-up to
431 * the next page boundary and @phys is rounded-down to the previous page
432 * boundary.
433 *
434 * TLB invalidation is performed for each page-table entry cleared during the
435 * unmapping operation and the reference count for the page-table page
436 * containing the cleared entry is decremented, with unreferenced pages being
437 * freed. The unmapping operation will stop early if it encounters either an
438 * invalid page-table entry or a valid block mapping which maps beyond the range
439 * being unmapped.
440 *
441 * Return: Number of bytes unmapped, which may be 0.
442 */
443 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
444
445 /**
446 * kvm_get_vtcr() - Helper to construct VTCR_EL2
447 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
448 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
449 * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
450 *
451 * The VTCR value is common across all the physical CPUs on the system.
452 * We use system wide sanitised values to fill in different fields,
453 * except for Hardware Management of Access Flags. HA Flag is set
454 * unconditionally on all CPUs, as it is safe to run with or without
455 * the feature and the bit is RES0 on CPUs that don't support it.
456 *
457 * Return: VTCR_EL2 value
458 */
459 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
460
461 /**
462 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
463 * @vtcr: Content of the VTCR register.
464 *
465 * Return: the size (in bytes) of the stage-2 PGD
466 */
467 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
468
469 /**
470 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
471 * @pgt: Uninitialised page-table structure to initialise.
472 * @mmu: S2 MMU context for this S2 translation
473 * @mm_ops: Memory management callbacks.
474 * @flags: Stage-2 configuration flags.
475 * @force_pte_cb: Function that returns true if page level mappings must
476 * be used instead of block mappings.
477 *
478 * Return: 0 on success, negative error code on failure.
479 */
480 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
481 struct kvm_pgtable_mm_ops *mm_ops,
482 enum kvm_pgtable_stage2_flags flags,
483 kvm_pgtable_force_pte_cb_t force_pte_cb);
484
485 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
486 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
487
488 /**
489 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
490 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
491 *
492 * The page-table is assumed to be unreachable by any hardware walkers prior
493 * to freeing and therefore no TLB invalidation is performed.
494 */
495 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
496
497 /**
498 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
499 * @mm_ops: Memory management callbacks.
500 * @pgtable: Unlinked stage-2 paging structure to be freed.
501 * @level: Level of the stage-2 paging structure to be freed.
502 *
503 * The page-table is assumed to be unreachable by any hardware walkers prior to
504 * freeing and therefore no TLB invalidation is performed.
505 */
506 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
507
508 /**
509 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
510 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
511 * @phys: Physical address of the memory to map.
512 * @level: Starting level of the stage-2 paging structure to be created.
513 * @prot: Permissions and attributes for the mapping.
514 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
515 * page-table pages.
516 * @force_pte: Force mappings to PAGE_SIZE granularity.
517 *
518 * Returns an unlinked page-table tree. This new page-table tree is
519 * not reachable (i.e., it is unlinked) from the root pgd and it's
520 * therefore unreachableby the hardware page-table walker. No TLB
521 * invalidation or CMOs are performed.
522 *
523 * If device attributes are not explicitly requested in @prot, then the
524 * mapping will be normal, cacheable.
525 *
526 * Return: The fully populated (unlinked) stage-2 paging structure, or
527 * an ERR_PTR(error) on failure.
528 */
529 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
530 u64 phys, s8 level,
531 enum kvm_pgtable_prot prot,
532 void *mc, bool force_pte);
533
534 /**
535 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
536 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
537 * @addr: Intermediate physical address at which to place the mapping.
538 * @size: Size of the mapping.
539 * @phys: Physical address of the memory to map.
540 * @prot: Permissions and attributes for the mapping.
541 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
542 * page-table pages.
543 * @flags: Flags to control the page-table walk (ex. a shared walk)
544 *
545 * The offset of @addr within a page is ignored, @size is rounded-up to
546 * the next page boundary and @phys is rounded-down to the previous page
547 * boundary.
548 *
549 * If device attributes are not explicitly requested in @prot, then the
550 * mapping will be normal, cacheable.
551 *
552 * Note that the update of a valid leaf PTE in this function will be aborted,
553 * if it's trying to recreate the exact same mapping or only change the access
554 * permissions. Instead, the vCPU will exit one more time from guest if still
555 * needed and then go through the path of relaxing permissions.
556 *
557 * Note that this function will both coalesce existing table entries and split
558 * existing block mappings, relying on page-faults to fault back areas outside
559 * of the new mapping lazily.
560 *
561 * Return: 0 on success, negative error code on failure.
562 */
563 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
564 u64 phys, enum kvm_pgtable_prot prot,
565 void *mc, enum kvm_pgtable_walk_flags flags);
566
567 /**
568 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
569 * track ownership.
570 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
571 * @addr: Base intermediate physical address to annotate.
572 * @size: Size of the annotated range.
573 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
574 * page-table pages.
575 * @owner_id: Unique identifier for the owner of the page.
576 *
577 * By default, all page-tables are owned by identifier 0. This function can be
578 * used to mark portions of the IPA space as owned by other entities. When a
579 * stage 2 is used with identity-mappings, these annotations allow to use the
580 * page-table data structure as a simple rmap.
581 *
582 * Return: 0 on success, negative error code on failure.
583 */
584 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
585 void *mc, u8 owner_id);
586
587 /**
588 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
589 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
590 * @addr: Intermediate physical address from which to remove the mapping.
591 * @size: Size of the mapping.
592 *
593 * The offset of @addr within a page is ignored and @size is rounded-up to
594 * the next page boundary.
595 *
596 * TLB invalidation is performed for each page-table entry cleared during the
597 * unmapping operation and the reference count for the page-table page
598 * containing the cleared entry is decremented, with unreferenced pages being
599 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
600 * FWB is not supported by the CPU.
601 *
602 * Return: 0 on success, negative error code on failure.
603 */
604 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
605
606 /**
607 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
608 * without TLB invalidation.
609 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
610 * @addr: Intermediate physical address from which to write-protect,
611 * @size: Size of the range.
612 *
613 * The offset of @addr within a page is ignored and @size is rounded-up to
614 * the next page boundary.
615 *
616 * Note that it is the caller's responsibility to invalidate the TLB after
617 * calling this function to ensure that the updated permissions are visible
618 * to the CPUs.
619 *
620 * Return: 0 on success, negative error code on failure.
621 */
622 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
623
624 /**
625 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
626 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
627 * @addr: Intermediate physical address to identify the page-table entry.
628 *
629 * The offset of @addr within a page is ignored.
630 *
631 * If there is a valid, leaf page-table entry used to translate @addr, then
632 * set the access flag in that entry.
633 *
634 * Return: The old page-table entry prior to setting the flag, 0 on failure.
635 */
636 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
637
638 /**
639 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
640 * flag in a page-table entry.
641 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
642 * @addr: Intermediate physical address to identify the page-table entry.
643 * @size: Size of the address range to visit.
644 * @mkold: True if the access flag should be cleared.
645 *
646 * The offset of @addr within a page is ignored.
647 *
648 * Tests and conditionally clears the access flag for every valid, leaf
649 * page-table entry used to translate the range [@addr, @addr + @size).
650 *
651 * Note that it is the caller's responsibility to invalidate the TLB after
652 * calling this function to ensure that the updated permissions are visible
653 * to the CPUs.
654 *
655 * Return: True if any of the visited PTEs had the access flag set.
656 */
657 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
658 u64 size, bool mkold);
659
660 /**
661 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
662 * page-table entry.
663 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
664 * @addr: Intermediate physical address to identify the page-table entry.
665 * @prot: Additional permissions to grant for the mapping.
666 *
667 * The offset of @addr within a page is ignored.
668 *
669 * If there is a valid, leaf page-table entry used to translate @addr, then
670 * relax the permissions in that entry according to the read, write and
671 * execute permissions specified by @prot. No permissions are removed, and
672 * TLB invalidation is performed after updating the entry. Software bits cannot
673 * be set or cleared using kvm_pgtable_stage2_relax_perms().
674 *
675 * Return: 0 on success, negative error code on failure.
676 */
677 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
678 enum kvm_pgtable_prot prot);
679
680 /**
681 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
682 * of Coherency for guest stage-2 address
683 * range.
684 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
685 * @addr: Intermediate physical address from which to flush.
686 * @size: Size of the range.
687 *
688 * The offset of @addr within a page is ignored and @size is rounded-up to
689 * the next page boundary.
690 *
691 * Return: 0 on success, negative error code on failure.
692 */
693 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
694
695 /**
696 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
697 * to PAGE_SIZE guest pages.
698 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
699 * @addr: Intermediate physical address from which to split.
700 * @size: Size of the range.
701 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
702 * page-table pages.
703 *
704 * The function tries to split any level 1 or 2 entry that overlaps
705 * with the input range (given by @addr and @size).
706 *
707 * Return: 0 on success, negative error code on failure. Note that
708 * kvm_pgtable_stage2_split() is best effort: it tries to break as many
709 * blocks in the input range as allowed by @mc_capacity.
710 */
711 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
712 struct kvm_mmu_memory_cache *mc);
713
714 /**
715 * kvm_pgtable_walk() - Walk a page-table.
716 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
717 * @addr: Input address for the start of the walk.
718 * @size: Size of the range to walk.
719 * @walker: Walker callback description.
720 *
721 * The offset of @addr within a page is ignored and @size is rounded-up to
722 * the next page boundary.
723 *
724 * The walker will walk the page-table entries corresponding to the input
725 * address range specified, visiting entries according to the walker flags.
726 * Invalid entries are treated as leaf entries. The visited page table entry is
727 * reloaded after invoking the walker callback, allowing the walker to descend
728 * into a newly installed table.
729 *
730 * Returning a negative error code from the walker callback function will
731 * terminate the walk immediately with the same error code.
732 *
733 * Return: 0 on success, negative error code on failure.
734 */
735 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
736 struct kvm_pgtable_walker *walker);
737
738 /**
739 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
740 * with its level.
741 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
742 * or a similar initialiser.
743 * @addr: Input address for the start of the walk.
744 * @ptep: Pointer to storage for the retrieved PTE.
745 * @level: Pointer to storage for the level of the retrieved PTE.
746 *
747 * The offset of @addr within a page is ignored.
748 *
749 * The walker will walk the page-table entries corresponding to the input
750 * address specified, retrieving the leaf corresponding to this address.
751 * Invalid entries are treated as leaf entries.
752 *
753 * Return: 0 on success, negative error code on failure.
754 */
755 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
756 kvm_pte_t *ptep, s8 *level);
757
758 /**
759 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
760 * stage-2 Page-Table Entry.
761 * @pte: Page-table entry
762 *
763 * Return: protection attributes of the page-table entry in the enum
764 * kvm_pgtable_prot format.
765 */
766 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
767
768 /**
769 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
770 * Page-Table Entry.
771 * @pte: Page-table entry
772 *
773 * Return: protection attributes of the page-table entry in the enum
774 * kvm_pgtable_prot format.
775 */
776 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
777
778 /**
779 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
780 *
781 * @mmu: Stage-2 KVM MMU struct
782 * @addr: The base Intermediate physical address from which to invalidate
783 * @size: Size of the range from the base to invalidate
784 */
785 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
786 phys_addr_t addr, size_t size);
787 #endif /* __ARM64_KVM_PGTABLE_H__ */
788