Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
14 #define KVM_PGTABLE_FIRST_LEVEL -1
18 * The largest supported block sizes for KVM (no 52-bit PA support):
19 * - 4K (level 1): 1GB
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
60 #define KVM_PHYS_INVALID (-1ULL)
134 static inline bool kvm_is_block_size_supported(u64 size) in kvm_is_block_size_supported() argument
136 bool is_power_of_two = IS_ALIGNED(size, size); in kvm_is_block_size_supported()
138 return is_power_of_two && (size & kvm_supported_block_sizes()); in kvm_is_block_size_supported()
142 * struct kvm_pgtable_mm_ops - Memory management callbacks.
143 * @zalloc_page: Allocate a single zeroed memory page.
146 * the page is 1.
148 * The @size parameter is in bytes, and is rounded
149 * up to the next page boundary. The resulting
155 * @get_page: Increment the refcount on a page.
156 * @put_page: Decrement the refcount on a page. When the
157 * refcount reaches 0 the page is automatically
159 * @page_count: Return the refcount of a page.
171 void* (*zalloc_pages_exact)(size_t size);
172 void (*free_pages_exact)(void *addr, size_t size);
179 void (*dcache_clean_inval_poc)(void *addr, size_t size);
180 void (*icache_inval_pou)(void *addr, size_t size);
184 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
185 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
195 * enum kvm_pgtable_prot - Page-table permissions and attributes.
233 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
240 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
242 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
245 * without Break-before-make's
278 return ctx->flags & KVM_PGTABLE_WALK_SHARED; in kvm_pgtable_walk_shared()
282 * struct kvm_pgtable_walker - Hook into a page-table walk.
285 * @flags: Bitwise-OR of flags to identify the entry types on which to
295 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
313 * non-shared table walkers are allowed in the hypervisor. in kvm_pgtable_walk_begin()
315 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_begin()
316 return -EPERM; in kvm_pgtable_walk_begin()
335 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); in kvm_dereference_pteref()
340 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_begin()
348 if (walker->flags & KVM_PGTABLE_WALK_SHARED) in kvm_pgtable_walk_end()
360 * struct kvm_pgtable - KVM page-table.
361 * @ia_bits: Maximum input address size, in bits.
362 * @start_level: Level at which the page-table walk starts.
363 * @pgd: Pointer to the first top-level entry of the page-table.
365 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
366 * @flags: Stage-2 page-table flags.
367 * @force_pte_cb: Function that returns true if page level mappings must
376 /* Stage-2 only */
383 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
384 * @pgt: Uninitialised page-table structure to initialise.
394 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
395 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
397 * The page-table is assumed to be unreachable by any hardware walkers prior
403 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
404 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
406 * @size: Size of the mapping.
410 * The offset of @addr within a page is ignored, @size is rounded-up to
411 * the next page boundary and @phys is rounded-down to the previous page
421 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
425 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
426 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
428 * @size: Size of the mapping.
430 * The offset of @addr within a page is ignored, @size is rounded-up to
431 * the next page boundary and @phys is rounded-down to the previous page
434 * TLB invalidation is performed for each page-table entry cleared during the
435 * unmapping operation and the reference count for the page-table page
438 * invalid page-table entry or a valid block mapping which maps beyond the range
443 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
446 * kvm_get_vtcr() - Helper to construct VTCR_EL2
462 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
465 * Return: the size (in bytes) of the stage-2 PGD
470 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
471 * @pgt: Uninitialised page-table structure to initialise.
474 * @flags: Stage-2 configuration flags.
475 * @force_pte_cb: Function that returns true if page level mappings must
489 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
490 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
492 * The page-table is assumed to be unreachable by any hardware walkers prior
498 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
500 * @pgtable: Unlinked stage-2 paging structure to be freed.
501 * @level: Level of the stage-2 paging structure to be freed.
503 * The page-table is assumed to be unreachable by any hardware walkers prior to
509 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
510 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
512 * @level: Starting level of the stage-2 paging structure to be created.
514 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
515 * page-table pages.
518 * Returns an unlinked page-table tree. This new page-table tree is
520 * therefore unreachableby the hardware page-table walker. No TLB
526 * Return: The fully populated (unlinked) stage-2 paging structure, or
535 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
536 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
538 * @size: Size of the mapping.
541 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
542 * page-table pages.
543 * @flags: Flags to control the page-table walk (ex. a shared walk)
545 * The offset of @addr within a page is ignored, @size is rounded-up to
546 * the next page boundary and @phys is rounded-down to the previous page
558 * existing block mappings, relying on page-faults to fault back areas outside
563 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
568 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
570 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
572 * @size: Size of the annotated range.
573 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
574 * page-table pages.
575 * @owner_id: Unique identifier for the owner of the page.
577 * By default, all page-tables are owned by identifier 0. This function can be
579 * stage 2 is used with identity-mappings, these annotations allow to use the
580 * page-table data structure as a simple rmap.
584 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
588 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
589 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
591 * @size: Size of the mapping.
593 * The offset of @addr within a page is ignored and @size is rounded-up to
594 * the next page boundary.
596 * TLB invalidation is performed for each page-table entry cleared during the
597 * unmapping operation and the reference count for the page-table page
599 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
604 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
607 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
609 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
610 * @addr: Intermediate physical address from which to write-protect,
611 * @size: Size of the range.
613 * The offset of @addr within a page is ignored and @size is rounded-up to
614 * the next page boundary.
622 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
625 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
626 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
627 * @addr: Intermediate physical address to identify the page-table entry.
629 * The offset of @addr within a page is ignored.
631 * If there is a valid, leaf page-table entry used to translate @addr, then
634 * Return: The old page-table entry prior to setting the flag, 0 on failure.
639 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
640 * flag in a page-table entry.
641 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
642 * @addr: Intermediate physical address to identify the page-table entry.
643 * @size: Size of the address range to visit.
646 * The offset of @addr within a page is ignored.
649 * page-table entry used to translate the range [@addr, @addr + @size).
658 u64 size, bool mkold);
661 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
662 * page-table entry.
663 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
664 * @addr: Intermediate physical address to identify the page-table entry.
667 * The offset of @addr within a page is ignored.
669 * If there is a valid, leaf page-table entry used to translate @addr, then
681 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
682 * of Coherency for guest stage-2 address
684 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
686 * @size: Size of the range.
688 * The offset of @addr within a page is ignored and @size is rounded-up to
689 * the next page boundary.
693 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
696 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
698 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
700 * @size: Size of the range.
701 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
702 * page-table pages.
705 * with the input range (given by @addr and @size).
711 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
715 * kvm_pgtable_walk() - Walk a page-table.
716 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
718 * @size: Size of the range to walk.
721 * The offset of @addr within a page is ignored and @size is rounded-up to
722 * the next page boundary.
724 * The walker will walk the page-table entries corresponding to the input
726 * Invalid entries are treated as leaf entries. The visited page table entry is
735 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
739 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
741 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
747 * The offset of @addr within a page is ignored.
749 * The walker will walk the page-table entries corresponding to the input
759 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
760 * stage-2 Page-Table Entry.
761 * @pte: Page-table entry
763 * Return: protection attributes of the page-table entry in the enum
769 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
770 * Page-Table Entry.
771 * @pte: Page-table entry
773 * Return: protection attributes of the page-table entry in the enum
779 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
781 * @mmu: Stage-2 KVM MMU struct
783 * @size: Size of the range from the base to invalidate
786 phys_addr_t addr, size_t size);