Lines Matching +full:ext +full:- +full:gen
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
57 #include <asm/spec-ctrl.h>
64 int __read_mostly nx_huge_pages = -1;
100 * When setting this variable to true it enables Two-Dimensional-Paging
102 * 1. the guest-virtual to guest-physical
103 * 2. while doing 1. it walks guest-physical to host-physical
203 return !!(regs->reg & flag); \
225 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
228 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
229 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
230 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
231 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
232 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
234 BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
238 return mmu->cpu_role.base.level > 0; in is_cr0_pg()
243 return !mmu->cpu_role.base.has_4_byte_gpte; in is_cr4_pae()
251 .efer = vcpu->arch.efer, in vcpu_to_role_regs()
265 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) in kvm_mmu_get_guest_pgd()
268 return mmu->get_guest_pgd(vcpu); in kvm_mmu_get_guest_pgd()
288 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
317 u64 kvm_gen, spte_gen, gen; in check_mmio_spte() local
319 gen = kvm_vcpu_memslots(vcpu)->generation; in check_mmio_spte()
320 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) in check_mmio_spte()
323 kvm_gen = gen & MMIO_SPTE_GEN_MASK; in check_mmio_spte()
376 sp->clear_spte_count++; in count_spte_clear()
386 ssptep->spte_high = sspte.spte_high; in __set_spte()
395 WRITE_ONCE(ssptep->spte_low, sspte.spte_low); in __set_spte()
405 WRITE_ONCE(ssptep->spte_low, sspte.spte_low); in __update_clear_spte_fast()
413 ssptep->spte_high = sspte.spte_high; in __update_clear_spte_fast()
425 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); in __update_clear_spte_slow()
426 orig.spte_high = ssptep->spte_high; in __update_clear_spte_slow()
427 ssptep->spte_high = sspte.spte_high; in __update_clear_spte_slow()
439 * we need to protect against in-progress updates of the spte.
442 * for the high part of the spte. The race is fine for a present->non-present
443 * change (because the high part of the spte is ignored for non-present spte),
444 * but for a present->present change we must reread the spte.
446 * All such changes are done in two steps (present->non-present and
447 * non-present->present), hence it is enough to count the number of
448 * present->non-present updates: if it changed while reading the spte,
458 count = sp->clear_spte_count; in __get_spte_lockless()
461 spte.spte_low = orig->spte_low; in __get_spte_lockless()
464 spte.spte_high = orig->spte_high; in __get_spte_lockless()
467 if (unlikely(spte.spte_low != orig->spte_low || in __get_spte_lockless()
468 count != sp->clear_spte_count)) in __get_spte_lockless()
524 int level = sptep_to_sp(sptep)->role.level; in mmu_spte_clear_track_bits()
535 kvm_update_page_stats(kvm, level, -1); in mmu_spte_clear_track_bits()
556 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; in is_tdp_mmu_active()
565 * Prevent page table teardown by making any free-er wait during in walk_shadow_page_lockless_begin()
572 * to vcpu->mode. in walk_shadow_page_lockless_begin()
574 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); in walk_shadow_page_lockless_begin()
584 * Make sure the write to vcpu->mode is not reordered in front of in walk_shadow_page_lockless_end()
588 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); in walk_shadow_page_lockless_end()
598 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
602 if (kvm_has_mirrored_tdp(vcpu->kvm)) { in mmu_topup_memory_caches()
603 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache, in mmu_topup_memory_caches()
608 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, in mmu_topup_memory_caches()
613 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache, in mmu_topup_memory_caches()
618 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
624 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_free_memory_caches()
625 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); in mmu_free_memory_caches()
626 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache); in mmu_free_memory_caches()
627 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache); in mmu_free_memory_caches()
628 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); in mmu_free_memory_caches()
640 if (sp->role.passthrough) in kvm_mmu_page_get_gfn()
641 return sp->gfn; in kvm_mmu_page_get_gfn()
643 if (sp->shadowed_translation) in kvm_mmu_page_get_gfn()
644 return sp->shadowed_translation[index] >> PAGE_SHIFT; in kvm_mmu_page_get_gfn()
646 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
657 if (sp->shadowed_translation) in kvm_mmu_page_get_access()
658 return sp->shadowed_translation[index] & ACC_ALL; in kvm_mmu_page_get_access()
661 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs, in kvm_mmu_page_get_access()
670 * In both cases, sp->role.access contains the correct access bits. in kvm_mmu_page_get_access()
672 return sp->role.access; in kvm_mmu_page_get_access()
678 if (sp->shadowed_translation) { in kvm_mmu_page_set_translation()
679 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation()
685 sp->role.passthrough ? "passthrough" : "direct", in kvm_mmu_page_set_translation()
686 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation()
690 sp->role.passthrough ? "passthrough" : "direct", in kvm_mmu_page_set_translation()
691 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn); in kvm_mmu_page_set_translation()
711 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
712 return &slot->arch.lpage_info[level - 2][idx]; in lpage_info_slot()
732 old = linfo->disallow_lpage; in update_gfn_disallow_lpage_count()
733 linfo->disallow_lpage += count; in update_gfn_disallow_lpage_count()
734 WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG); in update_gfn_disallow_lpage_count()
745 update_gfn_disallow_lpage_count(slot, gfn, -1); in kvm_mmu_gfn_allow_lpage()
754 kvm->arch.indirect_shadow_pages++; in account_shadowed()
756 * Ensure indirect_shadow_pages is elevated prior to re-reading guest in account_shadowed()
757 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight in account_shadowed()
758 * emulated writes are visible before re-reading guest PTEs, or that in account_shadowed()
764 gfn = sp->gfn; in account_shadowed()
765 slots = kvm_memslots_for_spte_role(kvm, sp->role); in account_shadowed()
768 /* the non-leaf shadow pages are keeping readonly. */ in account_shadowed()
769 if (sp->role.level > PG_LEVEL_4K) in account_shadowed()
788 if (!list_empty(&sp->possible_nx_huge_page_link)) in track_possible_nx_huge_page()
791 ++kvm->stat.nx_lpage_splits; in track_possible_nx_huge_page()
792 list_add_tail(&sp->possible_nx_huge_page_link, in track_possible_nx_huge_page()
793 &kvm->arch.possible_nx_huge_pages); in track_possible_nx_huge_page()
799 sp->nx_huge_page_disallowed = true; in account_nx_huge_page()
811 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
812 gfn = sp->gfn; in unaccount_shadowed()
813 slots = kvm_memslots_for_spte_role(kvm, sp->role); in unaccount_shadowed()
815 if (sp->role.level > PG_LEVEL_4K) in unaccount_shadowed()
823 if (list_empty(&sp->possible_nx_huge_page_link)) in untrack_possible_nx_huge_page()
826 --kvm->stat.nx_lpage_splits; in untrack_possible_nx_huge_page()
827 list_del_init(&sp->possible_nx_huge_page_link); in untrack_possible_nx_huge_page()
832 sp->nx_huge_page_disallowed = false; in unaccount_nx_huge_page()
844 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in gfn_to_memslot_dirty_bitmap()
855 * If the bit zero of rmap_head->val is clear, then it points to the only spte
856 * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
886 * Elide the lock if the rmap is empty, as lockless walkers (read-only in __kvm_rmap_lock()
891 old_val = atomic_long_read(&rmap_head->val); in __kvm_rmap_lock()
903 old_val = atomic_long_read(&rmap_head->val); in __kvm_rmap_lock()
924 } while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val)); in __kvm_rmap_lock()
928 * impossible for the return value to be 0 (see above), i.e. the read- in __kvm_rmap_lock()
937 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_lock()
952 atomic_long_set_release(&rmap_head->val, val); in __kvm_rmap_unlock()
959 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_unlock()
966 return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED; in kvm_rmap_get()
970 * If mmu_lock isn't held, rmaps can only be locked in read-only mode. The
1015 desc->sptes[0] = (u64 *)old_val; in pte_list_add()
1016 desc->sptes[1] = spte; in pte_list_add()
1017 desc->spte_count = 2; in pte_list_add()
1018 desc->tail_count = 0; in pte_list_add()
1023 count = desc->tail_count + desc->spte_count; in pte_list_add()
1029 if (desc->spte_count == PTE_LIST_EXT) { in pte_list_add()
1031 desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY); in pte_list_add()
1032 desc->spte_count = 0; in pte_list_add()
1033 desc->tail_count = count; in pte_list_add()
1038 desc->sptes[desc->spte_count++] = spte; in pte_list_add()
1050 int j = head_desc->spte_count - 1; in pte_list_desc_remove_entry()
1060 * Replace the to-be-freed SPTE with the last valid entry from the head in pte_list_desc_remove_entry()
1064 desc->sptes[i] = head_desc->sptes[j]; in pte_list_desc_remove_entry()
1065 head_desc->sptes[j] = NULL; in pte_list_desc_remove_entry()
1066 head_desc->spte_count--; in pte_list_desc_remove_entry()
1067 if (head_desc->spte_count) in pte_list_desc_remove_entry()
1075 if (!head_desc->more) in pte_list_desc_remove_entry()
1078 *rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY; in pte_list_desc_remove_entry()
1101 for (i = 0; i < desc->spte_count; ++i) { in pte_list_remove()
1102 if (desc->sptes[i] == spte) { in pte_list_remove()
1108 desc = desc->more; in pte_list_remove()
1145 for (i = 0; i < desc->spte_count; i++) in kvm_zap_all_rmap_sptes()
1146 mmu_spte_clear_track_bits(kvm, desc->sptes[i]); in kvm_zap_all_rmap_sptes()
1147 next = desc->more; in kvm_zap_all_rmap_sptes()
1167 return desc->tail_count + desc->spte_count; in pte_list_count()
1175 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap()
1176 return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; in gfn_to_rmap()
1193 * information in sp->role. in rmap_remove()
1195 slots = kvm_memslots_for_spte_role(kvm, sp->role); in rmap_remove()
1198 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in rmap_remove()
1230 iter->desc = NULL; in rmap_get_first()
1234 iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY); in rmap_get_first()
1235 iter->pos = 0; in rmap_get_first()
1236 return iter->desc->sptes[iter->pos]; in rmap_get_first()
1246 if (iter->desc) { in rmap_get_next()
1247 if (iter->pos < PTE_LIST_EXT - 1) { in rmap_get_next()
1248 ++iter->pos; in rmap_get_next()
1249 if (iter->desc->sptes[iter->pos]) in rmap_get_next()
1250 return iter->desc->sptes[iter->pos]; in rmap_get_next()
1253 iter->desc = iter->desc->more; in rmap_get_next()
1255 if (iter->desc) { in rmap_get_next()
1256 iter->pos = 0; in rmap_get_next()
1257 /* desc->sptes[0] cannot be NULL */ in rmap_get_next()
1258 return iter->desc->sptes[iter->pos]; in rmap_get_next()
1290 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); in drop_large_spte()
1299 * Write-protect on the specified @sptep, @pt_protect indicates whether
1300 * spte write-protection is caused by protecting shadow page table.
1304 * - for dirty logging, the spte can be set to writable at anytime if
1306 * - for spte protection, the spte can be writable only after unsync-ing
1350 * - D bit on ad-enabled SPTEs, and
1351 * - W bit on ad-disabled SPTEs.
1380 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked()
1386 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1391 mask &= mask - 1; in kvm_mmu_write_protect_pt_masked()
1403 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked()
1409 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
1414 mask &= mask - 1; in kvm_mmu_clear_dirty_pt_masked()
1423 * If the slot was assumed to be "initially all dirty", write-protect in kvm_arch_mmu_enable_log_dirty_pt_masked()
1434 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1435 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1451 * mask. If PML is enabled and the GFN doesn't need to be write- in kvm_arch_mmu_enable_log_dirty_pt_masked()
1497 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); in kvm_vcpu_write_protect_gfn()
1526 iterator->level = level; in rmap_walk_init_level()
1527 iterator->gfn = iterator->start_gfn; in rmap_walk_init_level()
1528 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot); in rmap_walk_init_level()
1529 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot); in rmap_walk_init_level()
1537 iterator->slot = slot; in slot_rmap_walk_init()
1538 iterator->start_level = start_level; in slot_rmap_walk_init()
1539 iterator->end_level = end_level; in slot_rmap_walk_init()
1540 iterator->start_gfn = start_gfn; in slot_rmap_walk_init()
1541 iterator->end_gfn = end_gfn; in slot_rmap_walk_init()
1543 rmap_walk_init_level(iterator, iterator->start_level); in slot_rmap_walk_init()
1548 return !!iterator->rmap; in slot_rmap_walk_okay()
1553 while (++iterator->rmap <= iterator->end_rmap) { in slot_rmap_walk_next()
1554 iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level); in slot_rmap_walk_next()
1556 if (atomic_long_read(&iterator->rmap->val)) in slot_rmap_walk_next()
1560 if (++iterator->level > iterator->end_level) { in slot_rmap_walk_next()
1561 iterator->rmap = NULL; in slot_rmap_walk_next()
1565 rmap_walk_init_level(iterator, iterator->level); in slot_rmap_walk_next()
1590 lockdep_assert_held_write(&kvm->mmu_lock); in __walk_slot_rmaps()
1600 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in __walk_slot_rmaps()
1603 iterator.gfn - start_gfn + 1); in __walk_slot_rmaps()
1606 cond_resched_rwlock_write(&kvm->mmu_lock); in __walk_slot_rmaps()
1620 slot->base_gfn, slot->base_gfn + slot->npages - 1, in walk_slot_rmaps()
1639 start, end - 1, can_yield, true, flush); in __kvm_rmap_zap_gfn_range()
1654 lockdep_assert_once(kvm->mmu_invalidate_in_progress || in kvm_unmap_gfn_range()
1655 lockdep_is_held(&kvm->slots_lock)); in kvm_unmap_gfn_range()
1658 flush = __kvm_rmap_zap_gfn_range(kvm, range->slot, in kvm_unmap_gfn_range()
1659 range->start, range->end, in kvm_unmap_gfn_range()
1660 range->may_block, flush); in kvm_unmap_gfn_range()
1666 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) in kvm_unmap_gfn_range()
1685 kvm_update_page_stats(kvm, sp->role.level, 1); in __rmap_add()
1687 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in __rmap_add()
1690 if (rmap_count > kvm->stat.max_mmu_rmap_size) in __rmap_add()
1691 kvm->stat.max_mmu_rmap_size = rmap_count; in __rmap_add()
1694 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in __rmap_add()
1701 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_add()
1703 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access); in rmap_add()
1720 for (gfn = range->start; gfn < range->end; in kvm_rmap_age_gfn_range()
1722 rmap_head = gfn_to_rmap(gfn, level, range->slot); in kvm_rmap_age_gfn_range()
1735 clear_bit((ffs(shadow_accessed_mask) - 1), in kvm_rmap_age_gfn_range()
1756 return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages); in kvm_may_have_shadow_mmu_sptes()
1794 if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) in kvm_mmu_check_sptes_at_free()
1795 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free", in kvm_mmu_check_sptes_at_free()
1796 sp->spt[i], &sp->spt[i], in kvm_mmu_check_sptes_at_free()
1804 kvm->arch.n_used_mmu_pages++; in kvm_account_mmu_page()
1805 kvm_account_pgtable_pages((void *)sp->spt, +1); in kvm_account_mmu_page()
1810 kvm->arch.n_used_mmu_pages--; in kvm_unaccount_mmu_page()
1811 kvm_account_pgtable_pages((void *)sp->spt, -1); in kvm_unaccount_mmu_page()
1818 hlist_del(&sp->hash_link); in kvm_mmu_free_shadow_page()
1819 list_del(&sp->link); in kvm_mmu_free_shadow_page()
1820 free_page((unsigned long)sp->spt); in kvm_mmu_free_shadow_page()
1821 free_page((unsigned long)sp->shadowed_translation); in kvm_mmu_free_shadow_page()
1837 pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes); in mmu_page_add_parent_pte()
1843 pte_list_remove(kvm, parent_pte, &sp->parent_ptes); in mmu_page_remove_parent_pte()
1859 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { in kvm_mmu_mark_parents_unsync()
1869 if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap)) in mark_unsync()
1871 if (sp->unsync_children++) in mark_unsync()
1891 if (sp->unsync) in mmu_pages_add()
1892 for (i=0; i < pvec->nr; i++) in mmu_pages_add()
1893 if (pvec->page[i].sp == sp) in mmu_pages_add()
1896 pvec->page[pvec->nr].sp = sp; in mmu_pages_add()
1897 pvec->page[pvec->nr].idx = idx; in mmu_pages_add()
1898 pvec->nr++; in mmu_pages_add()
1899 return (pvec->nr == KVM_PAGE_ARRAY_NR); in mmu_pages_add()
1904 --sp->unsync_children; in clear_unsync_child_bit()
1905 WARN_ON_ONCE((int)sp->unsync_children < 0); in clear_unsync_child_bit()
1906 __clear_bit(idx, sp->unsync_child_bitmap); in clear_unsync_child_bit()
1914 for_each_set_bit(i, sp->unsync_child_bitmap, 512) { in __mmu_unsync_walk()
1916 u64 ent = sp->spt[i]; in __mmu_unsync_walk()
1925 if (child->unsync_children) { in __mmu_unsync_walk()
1927 return -ENOSPC; in __mmu_unsync_walk()
1937 } else if (child->unsync) { in __mmu_unsync_walk()
1940 return -ENOSPC; in __mmu_unsync_walk()
1948 #define INVALID_INDEX (-1)
1953 pvec->nr = 0; in mmu_unsync_walk()
1954 if (!sp->unsync_children) in mmu_unsync_walk()
1963 WARN_ON_ONCE(!sp->unsync); in kvm_unlink_unsync_page()
1965 sp->unsync = 0; in kvm_unlink_unsync_page()
1966 --kvm->stat.mmu_unsync; in kvm_unlink_unsync_page()
1976 if (sp->role.direct) in sp_has_gptes()
1979 if (sp->role.passthrough) in sp_has_gptes()
1992 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1993 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1997 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; in kvm_sync_page_check()
2003 * - level: not part of the overall MMU role and will never match as the MMU's in kvm_sync_page_check()
2005 * - access: updated based on the new guest PTE in kvm_sync_page_check()
2006 * - quadrant: not part of the overall MMU role (similar to level) in kvm_sync_page_check()
2018 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the in kvm_sync_page_check()
2021 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || in kvm_sync_page_check()
2022 (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) in kvm_sync_page_check()
2030 /* sp->spt[i] has initial value of shadow page table allocation */ in kvm_sync_spte()
2031 if (sp->spt[i] == SHADOW_NONPRESENT_VALUE) in kvm_sync_spte()
2034 return vcpu->arch.mmu->sync_spte(vcpu, sp, i); in kvm_sync_spte()
2043 return -1; in __kvm_sync_page()
2048 if (ret < -1) in __kvm_sync_page()
2049 return -1; in __kvm_sync_page()
2071 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in kvm_sync_page()
2091 if (sp->role.invalid) in is_obsolete_sp()
2096 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2115 for (n = i+1; n < pvec->nr; n++) { in mmu_pages_next()
2116 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next()
2117 unsigned idx = pvec->page[n].idx; in mmu_pages_next()
2118 int level = sp->role.level; in mmu_pages_next()
2120 parents->idx[level-1] = idx; in mmu_pages_next()
2124 parents->parent[level-2] = sp; in mmu_pages_next()
2136 if (pvec->nr == 0) in mmu_pages_first()
2139 WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX); in mmu_pages_first()
2141 sp = pvec->page[0].sp; in mmu_pages_first()
2142 level = sp->role.level; in mmu_pages_first()
2145 parents->parent[level-2] = sp; in mmu_pages_first()
2150 parents->parent[level-1] = NULL; in mmu_pages_first()
2160 unsigned int idx = parents->idx[level]; in mmu_pages_clear_parents()
2161 sp = parents->parent[level]; in mmu_pages_clear_parents()
2168 } while (!sp->unsync_children); in mmu_pages_clear_parents()
2185 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn); in mmu_sync_children()
2188 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true); in mmu_sync_children()
2193 kvm_unlink_unsync_page(vcpu->kvm, sp); in mmu_sync_children()
2197 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { in mmu_sync_children()
2198 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in mmu_sync_children()
2201 return -EINTR; in mmu_sync_children()
2204 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2209 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in mmu_sync_children()
2215 atomic_set(&sp->write_flooding_count, 0); in __clear_sp_write_flooding_count()
2241 if (sp->gfn != gfn) { in kvm_mmu_find_shadow_page()
2246 if (sp->role.word != role.word) { in kvm_mmu_find_shadow_page()
2248 * If the guest is creating an upper-level page, zap in kvm_mmu_find_shadow_page()
2254 * upper-level page will be write-protected. in kvm_mmu_find_shadow_page()
2256 if (role.level > PG_LEVEL_4K && sp->unsync) in kvm_mmu_find_shadow_page()
2262 /* unsync and write-flooding only apply to indirect SPs. */ in kvm_mmu_find_shadow_page()
2263 if (sp->role.direct) in kvm_mmu_find_shadow_page()
2266 if (sp->unsync) { in kvm_mmu_find_shadow_page()
2273 * it doesn't write-protect the page or mark it synchronized! in kvm_mmu_find_shadow_page()
2297 ++kvm->stat.mmu_cache_miss; in kvm_mmu_find_shadow_page()
2302 if (collisions > kvm->stat.max_mmu_page_hash_collisions) in kvm_mmu_find_shadow_page()
2303 kvm->stat.max_mmu_page_hash_collisions = collisions; in kvm_mmu_find_shadow_page()
2322 sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache); in kvm_mmu_alloc_shadow_page()
2323 sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache); in kvm_mmu_alloc_shadow_page()
2325 sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache); in kvm_mmu_alloc_shadow_page()
2327 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); in kvm_mmu_alloc_shadow_page()
2329 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); in kvm_mmu_alloc_shadow_page()
2336 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen; in kvm_mmu_alloc_shadow_page()
2337 list_add(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_alloc_shadow_page()
2340 sp->gfn = gfn; in kvm_mmu_alloc_shadow_page()
2341 sp->role = role; in kvm_mmu_alloc_shadow_page()
2342 hlist_add_head(&sp->hash_link, sp_list); in kvm_mmu_alloc_shadow_page()
2360 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; in __kvm_mmu_get_shadow_page()
2377 .page_header_cache = &vcpu->arch.mmu_page_header_cache, in kvm_mmu_get_shadow_page()
2378 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache, in kvm_mmu_get_shadow_page()
2379 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache, in kvm_mmu_get_shadow_page()
2382 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role); in kvm_mmu_get_shadow_page()
2391 role = parent_sp->role; in kvm_mmu_child_role()
2392 role.level--; in kvm_mmu_child_role()
2398 * If the guest has 4-byte PTEs then that means it's using 32-bit, in kvm_mmu_child_role()
2399 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging in kvm_mmu_child_role()
2400 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must in kvm_mmu_child_role()
2413 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE in kvm_mmu_child_role()
2415 * PDPTEs; those 4 PAE page directories are pre-allocated and their in kvm_mmu_child_role()
2416 * quadrant is assigned in mmu_alloc_root(). A 4-byte PTE consumes in kvm_mmu_child_role()
2417 * bits 21:12, while an 8-byte PTE consumes bits 20:12. To consume in kvm_mmu_child_role()
2419 * quadrant, i.e. sets quadrant to '0' or '1'. The parent 8-byte PDE in kvm_mmu_child_role()
2438 return ERR_PTR(-EEXIST); in kvm_mmu_get_child_sp()
2448 iterator->addr = addr; in shadow_walk_init_using_root()
2449 iterator->shadow_addr = root; in shadow_walk_init_using_root()
2450 iterator->level = vcpu->arch.mmu->root_role.level; in shadow_walk_init_using_root()
2452 if (iterator->level >= PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2453 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2454 !vcpu->arch.mmu->root_role.direct) in shadow_walk_init_using_root()
2455 iterator->level = PT32E_ROOT_LEVEL; in shadow_walk_init_using_root()
2457 if (iterator->level == PT32E_ROOT_LEVEL) { in shadow_walk_init_using_root()
2459 * prev_root is currently only used for 64-bit hosts. So only in shadow_walk_init_using_root()
2462 BUG_ON(root != vcpu->arch.mmu->root.hpa); in shadow_walk_init_using_root()
2464 iterator->shadow_addr in shadow_walk_init_using_root()
2465 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2466 iterator->shadow_addr &= SPTE_BASE_ADDR_MASK; in shadow_walk_init_using_root()
2467 --iterator->level; in shadow_walk_init_using_root()
2468 if (!iterator->shadow_addr) in shadow_walk_init_using_root()
2469 iterator->level = 0; in shadow_walk_init_using_root()
2476 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa, in shadow_walk_init()
2482 if (iterator->level < PG_LEVEL_4K) in shadow_walk_okay()
2485 iterator->index = SPTE_INDEX(iterator->addr, iterator->level); in shadow_walk_okay()
2486 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; in shadow_walk_okay()
2493 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2494 iterator->level = 0; in __shadow_walk_next()
2498 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK; in __shadow_walk_next()
2499 --iterator->level; in __shadow_walk_next()
2504 __shadow_walk_next(iterator, *iterator->sptep); in shadow_walk_next()
2523 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); in __link_shadow_page()
2530 * The non-direct sub-pagetable must be updated before linking. For in __link_shadow_page()
2532 * kvm_mmu_find_shadow_page() without write-protecting the gfn, in __link_shadow_page()
2533 * so sp->unsync can be true or false. For higher level non-direct in __link_shadow_page()
2535 * FNAME(fetch)(), so sp->unsync_children can only be false. in __link_shadow_page()
2538 if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync) in __link_shadow_page()
2545 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true); in link_shadow_page()
2557 * sp's access: allow writable in the read-only sp, in validate_direct_spte()
2562 if (child->role.access == direct_access) in validate_direct_spte()
2565 drop_parent_pte(vcpu->kvm, child, sptep); in validate_direct_spte()
2566 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); in validate_direct_spte()
2570 /* Returns the number of zapped non-leaf child shadow pages. */
2579 if (is_last_spte(pte, sp->role.level)) { in mmu_page_zap_pte()
2591 child->role.guest_mode && in mmu_page_zap_pte()
2592 !atomic_long_read(&child->parent_ptes.val)) in mmu_page_zap_pte()
2610 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); in kvm_mmu_page_unlink_children()
2620 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2632 if (parent->role.level == PG_LEVEL_4K) in mmu_zap_unsync_children()
2655 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_mmu_prepare_zap_page()
2657 ++kvm->stat.mmu_shadow_zapped; in __kvm_mmu_prepare_zap_page()
2665 if (!sp->role.invalid && sp_has_gptes(sp)) in __kvm_mmu_prepare_zap_page()
2668 if (sp->unsync) in __kvm_mmu_prepare_zap_page()
2670 if (!sp->root_count) { in __kvm_mmu_prepare_zap_page()
2677 * !sp->root_count. in __kvm_mmu_prepare_zap_page()
2679 if (sp->role.invalid) in __kvm_mmu_prepare_zap_page()
2680 list_add(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2682 list_move(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2689 list_del(&sp->link); in __kvm_mmu_prepare_zap_page()
2699 if (sp->nx_huge_page_disallowed) in __kvm_mmu_prepare_zap_page()
2702 sp->role.invalid = 1; in __kvm_mmu_prepare_zap_page()
2732 * the page tables and see changes to vcpu->mode here. The barrier in kvm_mmu_commit_zap_page()
2742 WARN_ON_ONCE(!sp->role.invalid || sp->root_count); in kvm_mmu_commit_zap_page()
2756 if (list_empty(&kvm->arch.active_mmu_pages)) in kvm_mmu_zap_oldest_mmu_pages()
2760 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { in kvm_mmu_zap_oldest_mmu_pages()
2765 if (sp->root_count) in kvm_mmu_zap_oldest_mmu_pages()
2780 kvm->stat.mmu_recycled += total_zapped; in kvm_mmu_zap_oldest_mmu_pages()
2786 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) in kvm_mmu_available_pages()
2787 return kvm->arch.n_max_mmu_pages - in kvm_mmu_available_pages()
2788 kvm->arch.n_used_mmu_pages; in kvm_mmu_available_pages()
2795 unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); in make_mmu_pages_available()
2800 kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); in make_mmu_pages_available()
2805 * four pages, e.g. for PAE roots or for 5-level paging. Temporarily in make_mmu_pages_available()
2811 if (!kvm_mmu_available_pages(vcpu->kvm)) in make_mmu_pages_available()
2812 return -ENOSPC; in make_mmu_pages_available()
2822 write_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2824 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2825 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - in kvm_mmu_change_mmu_pages()
2828 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2831 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2833 write_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2839 struct kvm *kvm = vcpu->kvm; in __kvm_mmu_unprotect_gfn_and_retry()
2846 * Bail early if there aren't any write-protected shadow pages to avoid in __kvm_mmu_unprotect_gfn_and_retry()
2847 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked in __kvm_mmu_unprotect_gfn_and_retry()
2853 if (!READ_ONCE(kvm->arch.indirect_shadow_pages)) in __kvm_mmu_unprotect_gfn_and_retry()
2856 if (!vcpu->arch.mmu->root_role.direct) { in __kvm_mmu_unprotect_gfn_and_retry()
2862 write_lock(&kvm->mmu_lock); in __kvm_mmu_unprotect_gfn_and_retry()
2872 write_unlock(&kvm->mmu_lock); in __kvm_mmu_unprotect_gfn_and_retry()
2876 vcpu->arch.last_retry_eip = kvm_rip_read(vcpu); in __kvm_mmu_unprotect_gfn_and_retry()
2877 vcpu->arch.last_retry_addr = cr2_or_gpa; in __kvm_mmu_unprotect_gfn_and_retry()
2885 ++kvm->stat.mmu_unsync; in kvm_unsync_page()
2886 sp->unsync = 1; in kvm_unsync_page()
2894 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2895 * be write-protected.
2904 * Force write-protection if the page is being tracked. Note, the page in mmu_try_to_unsync_pages()
2905 * track machinery is used to write-protect upper-level shadow pages, in mmu_try_to_unsync_pages()
2909 return -EPERM; in mmu_try_to_unsync_pages()
2912 * The page is not write-tracked, mark existing shadow pages unsync in mmu_try_to_unsync_pages()
2919 return -EPERM; in mmu_try_to_unsync_pages()
2921 if (sp->unsync) in mmu_try_to_unsync_pages()
2925 return -EEXIST; in mmu_try_to_unsync_pages()
2936 spin_lock(&kvm->arch.mmu_unsync_pages_lock); in mmu_try_to_unsync_pages()
2942 * possible as clearing sp->unsync _must_ hold mmu_lock in mmu_try_to_unsync_pages()
2943 * for write, i.e. unsync cannot transition from 1->0 in mmu_try_to_unsync_pages()
2946 if (READ_ONCE(sp->unsync)) in mmu_try_to_unsync_pages()
2950 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in mmu_try_to_unsync_pages()
2954 spin_unlock(&kvm->arch.mmu_unsync_pages_lock); in mmu_try_to_unsync_pages()
2961 * before the page had been marked as unsync-ed, something like the in mmu_try_to_unsync_pages()
2965 * --------------------------------------------------------------------- in mmu_try_to_unsync_pages()
2978 * 2.3 Walking of unsync pages sees sp->unsync is in mmu_try_to_unsync_pages()
2987 * (sp->unsync = true) in mmu_try_to_unsync_pages()
3003 int level = sp->role.level; in mmu_set_spte()
3011 bool host_writable = !fault || fault->map_writable; in mmu_set_spte()
3012 bool prefetch = !fault || fault->prefetch; in mmu_set_spte()
3013 bool write_fault = fault && fault->write; in mmu_set_spte()
3016 vcpu->stat.pf_mmio_spte_created++; in mmu_set_spte()
3034 drop_parent_pte(vcpu->kvm, child, sptep); in mmu_set_spte()
3037 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
3057 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); in mmu_set_spte()
3112 unsigned int access = sp->role.access; in direct_pte_prefetch_many()
3114 return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access); in direct_pte_prefetch_many()
3123 WARN_ON_ONCE(!sp->role.direct); in __direct_pte_prefetch()
3125 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
3126 spte = sp->spt + i; in __direct_pte_prefetch()
3157 if (sp->role.level > PG_LEVEL_4K) in direct_pte_prefetch()
3164 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress)) in direct_pte_prefetch()
3178 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3182 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3186 * - Do not use the result to install new mappings, e.g. use the host mapping
3207 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() in host_pfn_mapping_level()
3210 * read-only memslots due to gfn_to_hva() assuming writes. Earlier in host_pfn_mapping_level()
3212 * read-only memslot. in host_pfn_mapping_level()
3224 * Read each entry once. As above, a non-leaf entry can be promoted to in host_pfn_mapping_level()
3225 * a huge page _during_ this walk. Re-reading the entry could send the in host_pfn_mapping_level()
3230 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); in host_pfn_mapping_level()
3267 for ( ; max_level > PG_LEVEL_4K; max_level--) { in __kvm_mmu_max_mapping_level()
3269 if (!linfo->disallow_lpage) in __kvm_mmu_max_mapping_level()
3294 struct kvm_memory_slot *slot = fault->slot; in kvm_mmu_hugepage_adjust()
3297 fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled; in kvm_mmu_hugepage_adjust()
3299 if (unlikely(fault->max_level == PG_LEVEL_4K)) in kvm_mmu_hugepage_adjust()
3302 if (is_error_noslot_pfn(fault->pfn)) in kvm_mmu_hugepage_adjust()
3312 fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot, in kvm_mmu_hugepage_adjust()
3313 fault->gfn, fault->max_level, in kvm_mmu_hugepage_adjust()
3314 fault->is_private); in kvm_mmu_hugepage_adjust()
3315 if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed) in kvm_mmu_hugepage_adjust()
3322 fault->goal_level = fault->req_level; in kvm_mmu_hugepage_adjust()
3323 mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1; in kvm_mmu_hugepage_adjust()
3324 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask)); in kvm_mmu_hugepage_adjust()
3325 fault->pfn &= ~mask; in kvm_mmu_hugepage_adjust()
3331 cur_level == fault->goal_level && in disallowed_hugepage_adjust()
3334 spte_to_child_sp(spte)->nx_huge_page_disallowed) { in disallowed_hugepage_adjust()
3342 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) - in disallowed_hugepage_adjust()
3343 KVM_PAGES_PER_HPAGE(cur_level - 1); in disallowed_hugepage_adjust()
3344 fault->pfn |= fault->gfn & page_mask; in disallowed_hugepage_adjust()
3345 fault->goal_level--; in disallowed_hugepage_adjust()
3354 gfn_t base_gfn = fault->gfn; in direct_map()
3359 for_each_shadow_entry(vcpu, fault->addr, it) { in direct_map()
3364 if (fault->nx_huge_page_workaround_enabled) in direct_map()
3367 base_gfn = gfn_round_for_level(fault->gfn, it.level); in direct_map()
3368 if (it.level == fault->goal_level) in direct_map()
3372 if (sp == ERR_PTR(-EEXIST)) in direct_map()
3376 if (fault->huge_page_disallowed) in direct_map()
3377 account_nx_huge_page(vcpu->kvm, sp, in direct_map()
3378 fault->req_level >= it.level); in direct_map()
3381 if (WARN_ON_ONCE(it.level != fault->goal_level)) in direct_map()
3382 return -EFAULT; in direct_map()
3384 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL, in direct_map()
3385 base_gfn, fault->pfn, fault); in direct_map()
3402 if (is_sigpending_pfn(fault->pfn)) { in kvm_handle_error_pfn()
3404 return -EINTR; in kvm_handle_error_pfn()
3412 if (fault->pfn == KVM_PFN_ERR_RO_FAULT) in kvm_handle_error_pfn()
3415 if (fault->pfn == KVM_PFN_ERR_HWPOISON) { in kvm_handle_error_pfn()
3416 kvm_send_hwpoison_signal(fault->slot, fault->gfn); in kvm_handle_error_pfn()
3420 return -EFAULT; in kvm_handle_error_pfn()
3427 gva_t gva = fault->is_tdp ? 0 : fault->addr; in kvm_handle_noslot_fault()
3429 if (fault->is_private) { in kvm_handle_noslot_fault()
3431 return -EFAULT; in kvm_handle_noslot_fault()
3434 vcpu_cache_mmio_info(vcpu, gva, fault->gfn, in kvm_handle_noslot_fault()
3437 fault->slot = NULL; in kvm_handle_noslot_fault()
3438 fault->pfn = KVM_PFN_NOSLOT; in kvm_handle_noslot_fault()
3439 fault->map_writable = false; in kvm_handle_noslot_fault()
3456 if (unlikely(fault->gfn > kvm_mmu_max_gfn())) in kvm_handle_noslot_fault()
3470 if (fault->rsvd) in page_fault_can_be_fast()
3474 * For hardware-protected VMs, certain conditions like attempting to in page_fault_can_be_fast()
3478 * result of a write-protected access, and treat it as a spurious case in page_fault_can_be_fast()
3489 if (kvm->arch.has_private_mem && in page_fault_can_be_fast()
3490 fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) in page_fault_can_be_fast()
3504 * the fault was caused by a write-protection violation. If the in page_fault_can_be_fast()
3505 * SPTE is MMU-writable (determined later), the fault can be fixed in page_fault_can_be_fast()
3508 if (!fault->present) in page_fault_can_be_fast()
3515 return fault->write; in page_fault_can_be_fast()
3534 * so non-PML cases won't be impacted. in fast_pf_fix_direct_spte()
3542 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn); in fast_pf_fix_direct_spte()
3549 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3553 * - Must be called between walk_shadow_page_lockless_{begin,end}.
3554 * - The returned sptep must not be used after walk_shadow_page_lockless_end.
3581 if (!page_fault_can_be_fast(vcpu->kvm, fault)) in fast_page_fault()
3590 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte); in fast_page_fault()
3592 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); in fast_page_fault()
3606 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
3629 * uses A/D bits for non-nested MMUs. Thus, if A/D bits are in fast_page_fault()
3630 * enabled, the SPTE can't be an access-tracked SPTE. in fast_page_fault()
3637 * To keep things simple, only SPTEs that are MMU-writable can in fast_page_fault()
3639 * that were write-protected for dirty-logging or access in fast_page_fault()
3644 * shadow-present, i.e. except for access tracking restoration in fast_page_fault()
3647 if (fault->write && is_mmu_writable_spte(spte)) { in fast_page_fault()
3651 * Do not fix write-permission on the large spte when in fast_page_fault()
3653 * first page into the dirty-bitmap in in fast_page_fault()
3660 if (sp->role.level > PG_LEVEL_4K && in fast_page_fault()
3661 kvm_slot_dirty_track_enabled(fault->slot)) in fast_page_fault()
3691 vcpu->stat.pf_fast++; in fast_page_fault()
3709 lockdep_assert_held_read(&kvm->mmu_lock); in mmu_free_root_page()
3712 lockdep_assert_held_write(&kvm->mmu_lock); in mmu_free_root_page()
3713 if (!--sp->root_count && sp->role.invalid) in mmu_free_root_page()
3724 bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct; in kvm_mmu_free_roots()
3735 && VALID_PAGE(mmu->root.hpa); in kvm_mmu_free_roots()
3740 VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_free_roots()
3748 read_lock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3750 write_lock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3754 mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, in kvm_mmu_free_roots()
3758 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) { in kvm_mmu_free_roots()
3760 } else if (root_to_sp(mmu->root.hpa)) { in kvm_mmu_free_roots()
3761 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list); in kvm_mmu_free_roots()
3762 } else if (mmu->pae_root) { in kvm_mmu_free_roots()
3764 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i])) in kvm_mmu_free_roots()
3767 mmu_free_root_page(kvm, &mmu->pae_root[i], in kvm_mmu_free_roots()
3769 mmu->pae_root[i] = INVALID_PAE_ROOT; in kvm_mmu_free_roots()
3772 mmu->root.hpa = INVALID_PAGE; in kvm_mmu_free_roots()
3773 mmu->root.pgd = 0; in kvm_mmu_free_roots()
3777 read_unlock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3781 write_unlock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3797 WARN_ON_ONCE(mmu->root_role.guest_mode); in kvm_mmu_free_guest_mode_roots()
3800 root_hpa = mmu->prev_roots[i].hpa; in kvm_mmu_free_guest_mode_roots()
3805 if (!sp || sp->role.guest_mode) in kvm_mmu_free_guest_mode_roots()
3816 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; in mmu_alloc_root()
3826 ++sp->root_count; in mmu_alloc_root()
3828 return __pa(sp->spt); in mmu_alloc_root()
3833 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_direct_roots()
3834 u8 shadow_root_level = mmu->root_role.level; in mmu_alloc_direct_roots()
3840 if (kvm_has_mirrored_tdp(vcpu->kvm) && in mmu_alloc_direct_roots()
3841 !VALID_PAGE(mmu->mirror_root_hpa)) in mmu_alloc_direct_roots()
3847 write_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3854 mmu->root.hpa = root; in mmu_alloc_direct_roots()
3856 if (WARN_ON_ONCE(!mmu->pae_root)) { in mmu_alloc_direct_roots()
3857 r = -EIO; in mmu_alloc_direct_roots()
3862 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); in mmu_alloc_direct_roots()
3864 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0, in mmu_alloc_direct_roots()
3866 mmu->pae_root[i] = root | PT_PRESENT_MASK | in mmu_alloc_direct_roots()
3869 mmu->root.hpa = __pa(mmu->pae_root); in mmu_alloc_direct_roots()
3872 r = -EIO; in mmu_alloc_direct_roots()
3877 mmu->root.pgd = 0; in mmu_alloc_direct_roots()
3879 write_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3896 mutex_lock(&kvm->slots_arch_lock); in mmu_first_shadow_root_alloc()
3914 * Both of these functions are no-ops if the target is in mmu_first_shadow_root_alloc()
3923 r = memslot_rmap_alloc(slot, slot->npages); in mmu_first_shadow_root_alloc()
3937 smp_store_release(&kvm->arch.shadow_root_allocated, true); in mmu_first_shadow_root_alloc()
3940 mutex_unlock(&kvm->slots_arch_lock); in mmu_first_shadow_root_alloc()
3946 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_shadow_roots()
3956 mmu->root.hpa = kvm_mmu_get_dummy_root(); in mmu_alloc_shadow_roots()
3964 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3966 pdptrs[i] = mmu->get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3975 r = mmu_first_shadow_root_alloc(vcpu->kvm); in mmu_alloc_shadow_roots()
3979 write_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3986 * write-protect the guests page table root. in mmu_alloc_shadow_roots()
3988 if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3990 mmu->root_role.level); in mmu_alloc_shadow_roots()
3991 mmu->root.hpa = root; in mmu_alloc_shadow_roots()
3995 if (WARN_ON_ONCE(!mmu->pae_root)) { in mmu_alloc_shadow_roots()
3996 r = -EIO; in mmu_alloc_shadow_roots()
4001 * We shadow a 32 bit page table. This may be a legacy 2-level in mmu_alloc_shadow_roots()
4002 * or a PAE 3-level page table. In either case we need to be aware that in mmu_alloc_shadow_roots()
4006 if (mmu->root_role.level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
4009 if (WARN_ON_ONCE(!mmu->pml4_root)) { in mmu_alloc_shadow_roots()
4010 r = -EIO; in mmu_alloc_shadow_roots()
4013 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; in mmu_alloc_shadow_roots()
4015 if (mmu->root_role.level == PT64_ROOT_5LEVEL) { in mmu_alloc_shadow_roots()
4016 if (WARN_ON_ONCE(!mmu->pml5_root)) { in mmu_alloc_shadow_roots()
4017 r = -EIO; in mmu_alloc_shadow_roots()
4020 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask; in mmu_alloc_shadow_roots()
4025 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); in mmu_alloc_shadow_roots()
4027 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
4029 mmu->pae_root[i] = INVALID_PAE_ROOT; in mmu_alloc_shadow_roots()
4036 * If shadowing 32-bit non-PAE page tables, each PAE page in mmu_alloc_shadow_roots()
4037 * directory maps one quarter of the guest's non-PAE page in mmu_alloc_shadow_roots()
4041 quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0; in mmu_alloc_shadow_roots()
4044 mmu->pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
4047 if (mmu->root_role.level == PT64_ROOT_5LEVEL) in mmu_alloc_shadow_roots()
4048 mmu->root.hpa = __pa(mmu->pml5_root); in mmu_alloc_shadow_roots()
4049 else if (mmu->root_role.level == PT64_ROOT_4LEVEL) in mmu_alloc_shadow_roots()
4050 mmu->root.hpa = __pa(mmu->pml4_root); in mmu_alloc_shadow_roots()
4052 mmu->root.hpa = __pa(mmu->pae_root); in mmu_alloc_shadow_roots()
4055 mmu->root.pgd = root_pgd; in mmu_alloc_shadow_roots()
4057 write_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
4064 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_special_roots()
4065 bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL; in mmu_alloc_special_roots()
4071 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP in mmu_alloc_special_roots()
4074 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. in mmu_alloc_special_roots()
4076 if (mmu->root_role.direct || in mmu_alloc_special_roots()
4077 mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL || in mmu_alloc_special_roots()
4078 mmu->root_role.level < PT64_ROOT_4LEVEL) in mmu_alloc_special_roots()
4083 * of levels for the shadow page tables, e.g. all MMUs are 4-level or in mmu_alloc_special_roots()
4084 * all MMus are 5-level. Thus, this can safely require that pml5_root in mmu_alloc_special_roots()
4088 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root)) in mmu_alloc_special_roots()
4095 if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root || in mmu_alloc_special_roots()
4096 (need_pml5 && mmu->pml5_root))) in mmu_alloc_special_roots()
4097 return -EIO; in mmu_alloc_special_roots()
4100 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and in mmu_alloc_special_roots()
4105 return -ENOMEM; in mmu_alloc_special_roots()
4119 mmu->pae_root = pae_root; in mmu_alloc_special_roots()
4120 mmu->pml4_root = pml4_root; in mmu_alloc_special_roots()
4121 mmu->pml5_root = pml5_root; in mmu_alloc_special_roots()
4130 return -ENOMEM; in mmu_alloc_special_roots()
4143 * walk before the reads of sp->unsync/sp->unsync_children here. in is_unsync_root()
4145 * Even if another CPU was marking the SP as unsync-ed simultaneously, in is_unsync_root()
4163 if (sp->unsync || sp->unsync_children) in is_unsync_root()
4174 if (vcpu->arch.mmu->root_role.direct) in kvm_mmu_sync_roots()
4177 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa)) in kvm_mmu_sync_roots()
4182 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
4183 hpa_t root = vcpu->arch.mmu->root.hpa; in kvm_mmu_sync_roots()
4190 write_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
4192 write_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
4196 write_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
4199 hpa_t root = vcpu->arch.mmu->pae_root[i]; in kvm_mmu_sync_roots()
4207 write_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
4216 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa)) in kvm_mmu_sync_prev_roots()
4220 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free); in kvm_mmu_sync_prev_roots()
4228 exception->error_code = 0; in nonpaging_gva_to_gpa()
4249 * That SPTE may be non-present.
4256 int leaf = -1; in get_walk()
4288 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
4308 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs. in get_mmio_spte()
4313 rsvd_check = &vcpu->arch.mmu->shadow_zero_check; in get_mmio_spte()
4315 for (level = root; level >= leaf; level--) in get_mmio_spte()
4319 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n", in get_mmio_spte()
4321 for (level = root; level >= leaf; level--) in get_mmio_spte()
4322 pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx", in get_mmio_spte()
4340 return -EINVAL; in handle_mmio_page_fault()
4342 if (is_mmio_spte(vcpu->kvm, spte)) { in handle_mmio_page_fault()
4367 if (unlikely(fault->rsvd)) in page_fault_handle_page_track()
4370 if (!fault->present || !fault->write) in page_fault_handle_page_track()
4377 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn)) in page_fault_handle_page_track()
4397 u32 id = vcpu->arch.apf.id; in alloc_apf_token()
4400 vcpu->arch.apf.id = 1; in alloc_apf_token()
4402 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in alloc_apf_token()
4411 arch.gfn = fault->gfn; in kvm_arch_setup_async_pf()
4412 arch.error_code = fault->error_code; in kvm_arch_setup_async_pf()
4413 arch.direct_map = vcpu->arch.mmu->root_role.direct; in kvm_arch_setup_async_pf()
4414 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu); in kvm_arch_setup_async_pf()
4416 return kvm_setup_async_pf(vcpu, fault->addr, in kvm_arch_setup_async_pf()
4417 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch); in kvm_arch_setup_async_pf()
4424 if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS)) in kvm_arch_async_page_ready()
4427 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) || in kvm_arch_async_page_ready()
4428 work->wakeup_all) in kvm_arch_async_page_ready()
4435 if (!vcpu->arch.mmu->root_role.direct && in kvm_arch_async_page_ready()
4436 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) in kvm_arch_async_page_ready()
4439 r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, in kvm_arch_async_page_ready()
4444 * ignore stats for all other return times. Page-ready "faults" aren't in kvm_arch_async_page_ready()
4448 vcpu->stat.pf_fixed++; in kvm_arch_async_page_ready()
4490 kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page, in kvm_mmu_finish_page_fault()
4491 r == RET_PF_RETRY, fault->map_writable); in kvm_mmu_finish_page_fault()
4499 if (!kvm_slot_can_be_private(fault->slot)) { in kvm_mmu_faultin_pfn_private()
4501 return -EFAULT; in kvm_mmu_faultin_pfn_private()
4504 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, in kvm_mmu_faultin_pfn_private()
4505 &fault->refcounted_page, &max_order); in kvm_mmu_faultin_pfn_private()
4511 fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); in kvm_mmu_faultin_pfn_private()
4512 fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, in kvm_mmu_faultin_pfn_private()
4513 fault->max_level, max_order); in kvm_mmu_faultin_pfn_private()
4521 unsigned int foll = fault->write ? FOLL_WRITE : 0; in __kvm_mmu_faultin_pfn()
4523 if (fault->is_private) in __kvm_mmu_faultin_pfn()
4527 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, in __kvm_mmu_faultin_pfn()
4528 &fault->map_writable, &fault->refcounted_page); in __kvm_mmu_faultin_pfn()
4531 * If resolving the page failed because I/O is needed to fault-in the in __kvm_mmu_faultin_pfn()
4536 if (fault->pfn != KVM_PFN_ERR_NEEDS_IO) in __kvm_mmu_faultin_pfn()
4539 if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) { in __kvm_mmu_faultin_pfn()
4540 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
4541 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { in __kvm_mmu_faultin_pfn()
4542 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
4551 * Allow gup to bail on pending non-fatal signals when it's also allowed in __kvm_mmu_faultin_pfn()
4557 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, in __kvm_mmu_faultin_pfn()
4558 &fault->map_writable, &fault->refcounted_page); in __kvm_mmu_faultin_pfn()
4566 struct kvm_memory_slot *slot = fault->slot; in kvm_mmu_faultin_pfn()
4567 struct kvm *kvm = vcpu->kvm; in kvm_mmu_faultin_pfn()
4570 if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm)) in kvm_mmu_faultin_pfn()
4571 return -EFAULT; in kvm_mmu_faultin_pfn()
4576 * invalidation relate to fault->fn and resume the guest without in kvm_mmu_faultin_pfn()
4579 fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; in kvm_mmu_faultin_pfn()
4586 if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) { in kvm_mmu_faultin_pfn()
4588 return -EFAULT; in kvm_mmu_faultin_pfn()
4599 if (slot->flags & KVM_MEMSLOT_INVALID) in kvm_mmu_faultin_pfn()
4602 if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) { in kvm_mmu_faultin_pfn()
4621 * when the AVIC is re-enabled. in kvm_mmu_faultin_pfn()
4623 if (!kvm_apicv_activated(vcpu->kvm)) in kvm_mmu_faultin_pfn()
4631 * For mmu_lock, if there is an in-progress invalidation and the kernel in kvm_mmu_faultin_pfn()
4633 * in response to mmu_lock being contended, which is *very* counter- in kvm_mmu_faultin_pfn()
4643 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM in kvm_mmu_faultin_pfn()
4648 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) in kvm_mmu_faultin_pfn()
4655 if (unlikely(is_error_pfn(fault->pfn))) in kvm_mmu_faultin_pfn()
4658 if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn))) in kvm_mmu_faultin_pfn()
4668 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) { in kvm_mmu_faultin_pfn()
4683 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in is_page_fault_stale()
4686 if (sp && is_obsolete_sp(vcpu->kvm, sp)) in is_page_fault_stale()
4705 return fault->slot && in is_page_fault_stale()
4706 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn); in is_page_fault_stale()
4714 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) in direct_page_fault()
4733 write_lock(&vcpu->kvm->mmu_lock); in direct_page_fault()
4746 write_unlock(&vcpu->kvm->mmu_lock); in direct_page_fault()
4754 fault->max_level = PG_LEVEL_2M; in nonpaging_page_fault()
4762 u32 flags = vcpu->arch.apf.host_apf_flags; in kvm_handle_page_fault()
4765 /* A 64-bit CR2 should be impossible on 32-bit KVM. */ in kvm_handle_page_fault()
4767 return -EFAULT; in kvm_handle_page_fault()
4770 * Legacy #PF exception only have a 32-bit error code. Simply drop the in kvm_handle_page_fault()
4772 * set), and to ensure there are no collisions with KVM-defined bits. in kvm_handle_page_fault()
4778 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for in kvm_handle_page_fault()
4783 vcpu->arch.l1tf_flush_l1d = true; in kvm_handle_page_fault()
4790 vcpu->arch.apf.host_apf_flags = 0; in kvm_handle_page_fault()
4824 read_lock(&vcpu->kvm->mmu_lock); in kvm_tdp_mmu_page_fault()
4833 read_unlock(&vcpu->kvm->mmu_lock); in kvm_tdp_mmu_page_fault()
4841 * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM in kvm_mmu_may_ignore_guest_pat()
4842 * has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to in kvm_mmu_may_ignore_guest_pat()
4845 * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA, in kvm_mmu_may_ignore_guest_pat()
4870 if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault) in kvm_tdp_map_page()
4871 return -EOPNOTSUPP; in kvm_tdp_map_page()
4875 return -EINTR; in kvm_tdp_map_page()
4890 return -ENOENT; in kvm_tdp_map_page()
4897 return -EIO; in kvm_tdp_map_page()
4909 if (!vcpu->kvm->arch.pre_fault_allowed) in kvm_arch_vcpu_pre_fault_memory()
4910 return -EOPNOTSUPP; in kvm_arch_vcpu_pre_fault_memory()
4920 if (kvm_arch_has_private_mem(vcpu->kvm) && in kvm_arch_vcpu_pre_fault_memory()
4921 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa))) in kvm_arch_vcpu_pre_fault_memory()
4926 * two-dimensional paging. in kvm_arch_vcpu_pre_fault_memory()
4928 r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level); in kvm_arch_vcpu_pre_fault_memory()
4933 * If the mapping that covers range->gpa can use a huge page, it in kvm_arch_vcpu_pre_fault_memory()
4934 * may start below it or end after range->gpa + range->size. in kvm_arch_vcpu_pre_fault_memory()
4936 end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level); in kvm_arch_vcpu_pre_fault_memory()
4937 return min(range->size, end - range->gpa); in kvm_arch_vcpu_pre_fault_memory()
4942 context->page_fault = nonpaging_page_fault; in nonpaging_init_context()
4943 context->gva_to_gpa = nonpaging_gva_to_gpa; in nonpaging_init_context()
4944 context->sync_spte = NULL; in nonpaging_init_context()
4952 if (!VALID_PAGE(root->hpa)) in is_root_usable()
4955 if (!role.direct && pgd != root->pgd) in is_root_usable()
4958 sp = root_to_sp(root->hpa); in is_root_usable()
4962 return role.word == sp->role.word; in is_root_usable()
4968 * If a matching root is found, it is assigned to kvm_mmu->root and
4970 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4979 if (is_root_usable(&mmu->root, new_pgd, new_role)) in cached_root_find_and_keep_current()
4991 swap(mmu->root, mmu->prev_roots[i]); in cached_root_find_and_keep_current()
4992 if (is_root_usable(&mmu->root, new_pgd, new_role)) in cached_root_find_and_keep_current()
5002 * On entry, mmu->root is invalid.
5003 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
5005 * If no match is found, kvm_mmu->root is left invalid and false is returned.
5014 if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role)) in cached_root_find_without_current()
5020 swap(mmu->root, mmu->prev_roots[i]); in cached_root_find_without_current()
5022 for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++) in cached_root_find_without_current()
5023 mmu->prev_roots[i] = mmu->prev_roots[i + 1]; in cached_root_find_without_current()
5024 mmu->prev_roots[i].hpa = INVALID_PAGE; in cached_root_find_without_current()
5032 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to in fast_pgd_switch()
5035 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa)) in fast_pgd_switch()
5038 if (VALID_PAGE(mmu->root.hpa)) in fast_pgd_switch()
5046 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_new_pgd()
5047 union kvm_mmu_page_role new_role = mmu->root_role; in kvm_mmu_new_pgd()
5051 * will establish a valid root prior to the next VM-Enter. in kvm_mmu_new_pgd()
5053 if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) in kvm_mmu_new_pgd()
5071 * switching to a new CR3, that GVA->GPA mapping may no longer be in kvm_mmu_new_pgd()
5082 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in kvm_mmu_new_pgd()
5093 if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) { in sync_mmio_spte()
5127 rsvd_check->bad_mt_xwr = 0; in __reset_rsvds_bits_mask()
5142 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for in __reset_rsvds_bits_mask()
5151 rsvd_check->rsvd_bits_mask[0][1] = 0; in __reset_rsvds_bits_mask()
5152 rsvd_check->rsvd_bits_mask[0][0] = 0; in __reset_rsvds_bits_mask()
5153 rsvd_check->rsvd_bits_mask[1][0] = in __reset_rsvds_bits_mask()
5154 rsvd_check->rsvd_bits_mask[0][0]; in __reset_rsvds_bits_mask()
5157 rsvd_check->rsvd_bits_mask[1][1] = 0; in __reset_rsvds_bits_mask()
5163 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); in __reset_rsvds_bits_mask()
5166 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); in __reset_rsvds_bits_mask()
5169 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) | in __reset_rsvds_bits_mask()
5173 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */ in __reset_rsvds_bits_mask()
5174 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */ in __reset_rsvds_bits_mask()
5175 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5177 rsvd_check->rsvd_bits_mask[1][0] = in __reset_rsvds_bits_mask()
5178 rsvd_check->rsvd_bits_mask[0][0]; in __reset_rsvds_bits_mask()
5181 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5184 rsvd_check->rsvd_bits_mask[1][4] = in __reset_rsvds_bits_mask()
5185 rsvd_check->rsvd_bits_mask[0][4]; in __reset_rsvds_bits_mask()
5188 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5191 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5193 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; in __reset_rsvds_bits_mask()
5194 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; in __reset_rsvds_bits_mask()
5195 rsvd_check->rsvd_bits_mask[1][3] = in __reset_rsvds_bits_mask()
5196 rsvd_check->rsvd_bits_mask[0][3]; in __reset_rsvds_bits_mask()
5197 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5200 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | in __reset_rsvds_bits_mask()
5202 rsvd_check->rsvd_bits_mask[1][0] = in __reset_rsvds_bits_mask()
5203 rsvd_check->rsvd_bits_mask[0][0]; in __reset_rsvds_bits_mask()
5211 __reset_rsvds_bits_mask(&context->guest_rsvd_check, in reset_guest_rsvds_bits_mask()
5212 vcpu->arch.reserved_gpa_bits, in reset_guest_rsvds_bits_mask()
5213 context->cpu_role.base.level, is_efer_nx(context), in reset_guest_rsvds_bits_mask()
5232 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); in __reset_rsvds_bits_mask_ept()
5233 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); in __reset_rsvds_bits_mask_ept()
5234 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd; in __reset_rsvds_bits_mask_ept()
5235 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd; in __reset_rsvds_bits_mask_ept()
5236 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; in __reset_rsvds_bits_mask_ept()
5239 rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; in __reset_rsvds_bits_mask_ept()
5240 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; in __reset_rsvds_bits_mask_ept()
5241 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd; in __reset_rsvds_bits_mask_ept()
5242 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd; in __reset_rsvds_bits_mask_ept()
5243 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; in __reset_rsvds_bits_mask_ept()
5254 rsvd_check->bad_mt_xwr = bad_mt_xwr; in __reset_rsvds_bits_mask_ept()
5260 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, in reset_rsvds_bits_mask_ept()
5261 vcpu->arch.reserved_gpa_bits, execonly, in reset_rsvds_bits_mask_ept()
5280 /* KVM doesn't use 2-level page tables for the shadow MMU. */ in reset_shadow_zero_bits_mask()
5285 WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL); in reset_shadow_zero_bits_mask()
5287 shadow_zero_check = &context->shadow_zero_check; in reset_shadow_zero_bits_mask()
5289 context->root_role.level, in reset_shadow_zero_bits_mask()
5290 context->root_role.efer_nx, in reset_shadow_zero_bits_mask()
5297 for (i = context->root_role.level; --i >= 0;) { in reset_shadow_zero_bits_mask()
5304 shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask; in reset_shadow_zero_bits_mask()
5305 shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask; in reset_shadow_zero_bits_mask()
5306 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value; in reset_shadow_zero_bits_mask()
5307 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value; in reset_shadow_zero_bits_mask()
5320 * possible, however, kvm currently does not do execution-protection.
5327 shadow_zero_check = &context->shadow_zero_check; in reset_tdp_shadow_zero_bits_mask()
5331 context->root_role.level, true, in reset_tdp_shadow_zero_bits_mask()
5342 for (i = context->root_role.level; --i >= 0;) { in reset_tdp_shadow_zero_bits_mask()
5343 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; in reset_tdp_shadow_zero_bits_mask()
5344 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; in reset_tdp_shadow_zero_bits_mask()
5355 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, in reset_ept_shadow_zero_bits_mask()
5383 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
5391 /* Faults from writes to non-writable pages */ in update_permission_bitmask()
5395 /* Faults from fetches of non-executable pages*/ in update_permission_bitmask()
5419 * SMAP:kernel-mode data accesses from user-mode in update_permission_bitmask()
5423 * - X86_CR4_SMAP is set in CR4 in update_permission_bitmask()
5424 * - A user page is accessed in update_permission_bitmask()
5425 * - The access is not a fetch in update_permission_bitmask()
5426 * - The access is supervisor mode in update_permission_bitmask()
5427 * - If implicit supervisor access or X86_EFLAGS_AC is clear in update_permission_bitmask()
5438 mmu->permissions[byte] = ff | uf | wf | smepf | smapf; in update_permission_bitmask()
5444 * user-mode addresses based on the value in the PKRU register. Protection
5453 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5454 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5455 * - PK is always zero if U=0 in the page tables
5456 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5471 mmu->pkru_mask = 0; in update_pkru_bitmask()
5478 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { in update_pkru_bitmask()
5506 mmu->pkru_mask |= (pkey_bits & 3) << pfec; in update_pkru_bitmask()
5523 context->page_fault = paging64_page_fault; in paging64_init_context()
5524 context->gva_to_gpa = paging64_gva_to_gpa; in paging64_init_context()
5525 context->sync_spte = paging64_sync_spte; in paging64_init_context()
5530 context->page_fault = paging32_page_fault; in paging32_init_context()
5531 context->gva_to_gpa = paging32_gva_to_gpa; in paging32_init_context()
5532 context->sync_spte = paging32_sync_spte; in paging32_init_context()
5543 role.ext.valid = 1; in kvm_calc_cpu_role()
5564 role.ext.cr4_smep = ____is_cr4_smep(regs); in kvm_calc_cpu_role()
5565 role.ext.cr4_smap = ____is_cr4_smap(regs); in kvm_calc_cpu_role()
5566 role.ext.cr4_pse = ____is_cr4_pse(regs); in kvm_calc_cpu_role()
5569 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs); in kvm_calc_cpu_role()
5570 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs); in kvm_calc_cpu_role()
5571 role.ext.efer_lma = ____is_efer_lma(regs); in kvm_calc_cpu_role()
5586 mmu->cpu_role.base.cr0_wp = cr0_wp; in __kvm_mmu_refresh_passthrough_bits()
5596 /* Use 5-level TDP if and only if it's useful/necessary. */ in kvm_mmu_get_tdp_level()
5630 struct kvm_mmu *context = &vcpu->arch.root_mmu; in init_kvm_tdp_mmu()
5633 if (cpu_role.as_u64 == context->cpu_role.as_u64 && in init_kvm_tdp_mmu()
5634 root_role.word == context->root_role.word) in init_kvm_tdp_mmu()
5637 context->cpu_role.as_u64 = cpu_role.as_u64; in init_kvm_tdp_mmu()
5638 context->root_role.word = root_role.word; in init_kvm_tdp_mmu()
5639 context->page_fault = kvm_tdp_page_fault; in init_kvm_tdp_mmu()
5640 context->sync_spte = NULL; in init_kvm_tdp_mmu()
5641 context->get_guest_pgd = get_guest_cr3; in init_kvm_tdp_mmu()
5642 context->get_pdptr = kvm_pdptr_read; in init_kvm_tdp_mmu()
5643 context->inject_page_fault = kvm_inject_page_fault; in init_kvm_tdp_mmu()
5646 context->gva_to_gpa = nonpaging_gva_to_gpa; in init_kvm_tdp_mmu()
5648 context->gva_to_gpa = paging64_gva_to_gpa; in init_kvm_tdp_mmu()
5650 context->gva_to_gpa = paging32_gva_to_gpa; in init_kvm_tdp_mmu()
5660 if (cpu_role.as_u64 == context->cpu_role.as_u64 && in shadow_mmu_init_context()
5661 root_role.word == context->root_role.word) in shadow_mmu_init_context()
5664 context->cpu_role.as_u64 = cpu_role.as_u64; in shadow_mmu_init_context()
5665 context->root_role.word = root_role.word; in shadow_mmu_init_context()
5681 struct kvm_mmu *context = &vcpu->arch.root_mmu; in kvm_init_shadow_mmu()
5686 /* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */ in kvm_init_shadow_mmu()
5692 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and in kvm_init_shadow_mmu()
5694 * The iTLB multi-hit workaround can be toggled at any time, so assume in kvm_init_shadow_mmu()
5695 * NX can be used by any non-nested shadow MMU to avoid having to reset in kvm_init_shadow_mmu()
5706 struct kvm_mmu *context = &vcpu->arch.guest_mmu; in kvm_init_shadow_npt_mmu()
5747 role.ext.word = 0; in kvm_calc_shadow_ept_root_page_role()
5748 role.ext.execonly = execonly; in kvm_calc_shadow_ept_root_page_role()
5749 role.ext.valid = 1; in kvm_calc_shadow_ept_root_page_role()
5758 struct kvm_mmu *context = &vcpu->arch.guest_mmu; in kvm_init_shadow_ept_mmu()
5764 if (new_mode.as_u64 != context->cpu_role.as_u64) { in kvm_init_shadow_ept_mmu()
5766 context->cpu_role.as_u64 = new_mode.as_u64; in kvm_init_shadow_ept_mmu()
5767 context->root_role.word = new_mode.base.word; in kvm_init_shadow_ept_mmu()
5769 context->page_fault = ept_page_fault; in kvm_init_shadow_ept_mmu()
5770 context->gva_to_gpa = ept_gva_to_gpa; in kvm_init_shadow_ept_mmu()
5771 context->sync_spte = ept_sync_spte; in kvm_init_shadow_ept_mmu()
5774 context->pkru_mask = 0; in kvm_init_shadow_ept_mmu()
5786 struct kvm_mmu *context = &vcpu->arch.root_mmu; in init_kvm_softmmu()
5790 context->get_guest_pgd = get_guest_cr3; in init_kvm_softmmu()
5791 context->get_pdptr = kvm_pdptr_read; in init_kvm_softmmu()
5792 context->inject_page_fault = kvm_inject_page_fault; in init_kvm_softmmu()
5798 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
5800 if (new_mode.as_u64 == g_context->cpu_role.as_u64) in init_kvm_nested_mmu()
5803 g_context->cpu_role.as_u64 = new_mode.as_u64; in init_kvm_nested_mmu()
5804 g_context->get_guest_pgd = get_guest_cr3; in init_kvm_nested_mmu()
5805 g_context->get_pdptr = kvm_pdptr_read; in init_kvm_nested_mmu()
5806 g_context->inject_page_fault = kvm_inject_page_fault; in init_kvm_nested_mmu()
5812 g_context->sync_spte = NULL; in init_kvm_nested_mmu()
5815 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using in init_kvm_nested_mmu()
5823 g_context->gva_to_gpa = nonpaging_gva_to_gpa; in init_kvm_nested_mmu()
5825 g_context->gva_to_gpa = paging64_gva_to_gpa; in init_kvm_nested_mmu()
5827 g_context->gva_to_gpa = paging64_gva_to_gpa; in init_kvm_nested_mmu()
5829 g_context->gva_to_gpa = paging32_gva_to_gpa; in init_kvm_nested_mmu()
5862 vcpu->arch.root_mmu.root_role.invalid = 1; in kvm_mmu_after_set_cpuid()
5863 vcpu->arch.guest_mmu.root_role.invalid = 1; in kvm_mmu_after_set_cpuid()
5864 vcpu->arch.nested_mmu.root_role.invalid = 1; in kvm_mmu_after_set_cpuid()
5865 vcpu->arch.root_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5866 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5867 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5874 KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm); in kvm_mmu_after_set_cpuid()
5888 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct); in kvm_mmu_load()
5894 if (vcpu->arch.mmu->root_role.direct) in kvm_mmu_load()
5919 struct kvm *kvm = vcpu->kvm; in kvm_mmu_unload()
5921 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); in kvm_mmu_unload()
5922 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa)); in kvm_mmu_unload()
5923 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in kvm_mmu_unload()
5924 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa)); in kvm_mmu_unload()
5946 * is unlikely to zap an in-use PGD. in is_obsolete_root()
5960 if (is_obsolete_root(kvm, mmu->root.hpa)) in __kvm_mmu_free_obsolete_roots()
5964 if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) in __kvm_mmu_free_obsolete_roots()
5974 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu); in kvm_mmu_free_obsolete_roots()
5975 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu); in kvm_mmu_free_obsolete_roots()
5991 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ in mmu_pte_write_fetch_gpte()
6012 * Skip write-flooding detected for the sp whose level is 1, because in detect_write_flooding()
6013 * it can become unsync, then the guest page is not write-protected. in detect_write_flooding()
6015 if (sp->role.level == PG_LEVEL_4K) in detect_write_flooding()
6018 atomic_inc(&sp->write_flooding_count); in detect_write_flooding()
6019 return atomic_read(&sp->write_flooding_count) >= 3; in detect_write_flooding()
6032 pte_size = sp->role.has_4_byte_gpte ? 4 : 8; in detect_write_misaligned()
6038 if (!(offset & (pte_size - 1)) && bytes == 1) in detect_write_misaligned()
6041 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); in detect_write_misaligned()
6054 level = sp->role.level; in get_written_sptes()
6056 if (sp->role.has_4_byte_gpte) { in get_written_sptes()
6057 page_offset <<= 1; /* 32->64 */ in get_written_sptes()
6059 * A 32-bit pde maps 4MB while the shadow pdes map in get_written_sptes()
6070 if (quadrant != sp->role.quadrant) in get_written_sptes()
6074 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
6093 * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in in kvm_mmu_track_write()
6097 if (!vcpu->kvm->arch.indirect_shadow_pages) in kvm_mmu_track_write()
6100 write_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_track_write()
6104 ++vcpu->kvm->stat.mmu_pte_write; in kvm_mmu_track_write()
6106 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) { in kvm_mmu_track_write()
6109 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_track_write()
6110 ++vcpu->kvm->stat.mmu_flooded; in kvm_mmu_track_write()
6118 while (npte--) { in kvm_mmu_track_write()
6120 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); in kvm_mmu_track_write()
6121 if (gentry && sp->role.level != PG_LEVEL_4K) in kvm_mmu_track_write()
6122 ++vcpu->kvm->stat.mmu_pde_zapped; in kvm_mmu_track_write()
6128 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in kvm_mmu_track_write()
6129 write_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_track_write()
6142 bool direct = vcpu->arch.mmu->root_role.direct; in kvm_mmu_write_protect_fault()
6145 * Do not try to unprotect and retry if the vCPU re-faulted on the same in kvm_mmu_write_protect_fault()
6148 * a non-page-table modifying instruction on the PDE that points to the in kvm_mmu_write_protect_fault()
6152 if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) && in kvm_mmu_write_protect_fault()
6153 vcpu->arch.last_retry_addr == cr2_or_gpa) in kvm_mmu_write_protect_fault()
6162 vcpu->arch.last_retry_eip = 0; in kvm_mmu_write_protect_fault()
6163 vcpu->arch.last_retry_addr = 0; in kvm_mmu_write_protect_fault()
6172 * will keep faulting on the non-existent MMIO address. in kvm_mmu_write_protect_fault()
6179 * to a read-only violation while the CPU was walking non-nested NPT in kvm_mmu_write_protect_fault()
6183 * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also in kvm_mmu_write_protect_fault()
6184 * unknowingly write-protecting L1's guest page tables, which KVM isn't in kvm_mmu_write_protect_fault()
6192 * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs in kvm_mmu_write_protect_fault()
6198 * the instruction. If no shadow pages were zapped, then the write- in kvm_mmu_write_protect_fault()
6218 * The gfn is write-protected, but if KVM detects its emulating an in kvm_mmu_write_protect_fault()
6221 * re-execute the instruction that caused the page fault. Do not allow in kvm_mmu_write_protect_fault()
6236 bool direct = vcpu->arch.mmu->root_role.direct; in kvm_mmu_page_fault()
6238 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) in kvm_mmu_page_fault()
6242 * Except for reserved faults (emulated MMIO is shared-only), set the in kvm_mmu_page_fault()
6243 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's in kvm_mmu_page_fault()
6247 * for software-protected VMs. in kvm_mmu_page_fault()
6251 vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM && in kvm_mmu_page_fault()
6252 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa))) in kvm_mmu_page_fault()
6258 return -EFAULT; in kvm_mmu_page_fault()
6266 vcpu->stat.pf_taken++; in kvm_mmu_page_fault()
6270 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm)) in kvm_mmu_page_fault()
6271 return -EIO; in kvm_mmu_page_fault()
6282 vcpu->stat.pf_fixed++; in kvm_mmu_page_fault()
6284 vcpu->stat.pf_emulate++; in kvm_mmu_page_fault()
6286 vcpu->stat.pf_spurious++; in kvm_mmu_page_fault()
6315 for (level = root_level; level >= leaf; level--) in kvm_mmu_print_sptes()
6333 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu)) in __kvm_mmu_invalidate_addr()
6339 write_lock(&vcpu->kvm->mmu_lock); in __kvm_mmu_invalidate_addr()
6343 if (sp->unsync) { in __kvm_mmu_invalidate_addr()
6347 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL); in __kvm_mmu_invalidate_addr()
6349 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep); in __kvm_mmu_invalidate_addr()
6352 if (!sp->unsync_children) in __kvm_mmu_invalidate_addr()
6355 write_unlock(&vcpu->kvm->mmu_lock); in __kvm_mmu_invalidate_addr()
6365 /* It's actually a GPA for vcpu->arch.guest_mmu. */ in kvm_mmu_invalidate_addr()
6366 if (mmu != &vcpu->arch.guest_mmu) { in kvm_mmu_invalidate_addr()
6367 /* INVLPG on a non-canonical address is a NOP according to the SDM. */ in kvm_mmu_invalidate_addr()
6374 if (!mmu->sync_spte) in kvm_mmu_invalidate_addr()
6378 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); in kvm_mmu_invalidate_addr()
6382 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_addr()
6399 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); in kvm_mmu_invlpg()
6400 ++vcpu->stat.invlpg; in kvm_mmu_invlpg()
6407 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_invpcid_gva()
6415 if (VALID_PAGE(mmu->prev_roots[i].hpa) && in kvm_mmu_invpcid_gva()
6416 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) in kvm_mmu_invpcid_gva()
6422 ++vcpu->stat.invlpg; in kvm_mmu_invpcid_gva()
6459 if (!tdp_enabled && mmu->pae_root) in free_mmu_pages()
6460 set_memory_encrypted((unsigned long)mmu->pae_root, 1); in free_mmu_pages()
6461 free_page((unsigned long)mmu->pae_root); in free_mmu_pages()
6462 free_page((unsigned long)mmu->pml4_root); in free_mmu_pages()
6463 free_page((unsigned long)mmu->pml5_root); in free_mmu_pages()
6471 mmu->root.hpa = INVALID_PAGE; in __kvm_mmu_create()
6472 mmu->root.pgd = 0; in __kvm_mmu_create()
6473 mmu->mirror_root_hpa = INVALID_PAGE; in __kvm_mmu_create()
6475 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; in __kvm_mmu_create()
6477 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */ in __kvm_mmu_create()
6478 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu) in __kvm_mmu_create()
6483 * while the PDP table is a per-vCPU construct that's allocated at MMU in __kvm_mmu_create()
6484 * creation. When emulating 32-bit mode, cr3 is only 32 bits even on in __kvm_mmu_create()
6488 * table. The main exception, handled here, is SVM's 32-bit NPT. The in __kvm_mmu_create()
6489 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit in __kvm_mmu_create()
6490 * KVM; that horror is handled on-demand by mmu_alloc_special_roots(). in __kvm_mmu_create()
6497 return -ENOMEM; in __kvm_mmu_create()
6499 mmu->pae_root = page_address(page); in __kvm_mmu_create()
6505 * only necessary when using shadow paging, as 64-bit NPT can get at in __kvm_mmu_create()
6506 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported in __kvm_mmu_create()
6507 * by 32-bit kernels (when KVM itself uses 32-bit NPT). in __kvm_mmu_create()
6510 set_memory_decrypted((unsigned long)mmu->pae_root, 1); in __kvm_mmu_create()
6515 mmu->pae_root[i] = INVALID_PAE_ROOT; in __kvm_mmu_create()
6524 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; in kvm_mmu_create()
6525 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6527 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; in kvm_mmu_create()
6528 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6530 vcpu->arch.mmu_shadow_page_cache.init_value = in kvm_mmu_create()
6532 if (!vcpu->arch.mmu_shadow_page_cache.init_value) in kvm_mmu_create()
6533 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6535 vcpu->arch.mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
6536 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
6538 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); in kvm_mmu_create()
6542 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); in kvm_mmu_create()
6548 free_mmu_pages(&vcpu->arch.guest_mmu); in kvm_mmu_create()
6560 lockdep_assert_held(&kvm->slots_lock); in kvm_zap_obsolete_pages()
6564 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
6577 if (WARN_ON_ONCE(sp->role.invalid)) in kvm_zap_obsolete_pages()
6587 cond_resched_rwlock_write(&kvm->mmu_lock)) { in kvm_zap_obsolete_pages()
6613 * Fast invalidate all shadow pages and use lock-break technique
6618 * not use any resource of the being-deleted slot or all slots
6623 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_zap_all_fast()
6625 write_lock(&kvm->mmu_lock); in kvm_mmu_zap_all_fast()
6635 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; in kvm_mmu_zap_all_fast()
6638 * In order to ensure all vCPUs drop their soon-to-be invalid roots, in kvm_mmu_zap_all_fast()
6663 write_unlock(&kvm->mmu_lock); in kvm_mmu_zap_all_fast()
6671 * lead to use-after-free. in kvm_mmu_zap_all_fast()
6679 kvm->arch.shadow_mmio_value = shadow_mmio_value; in kvm_mmu_init_vm()
6680 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_mmu_init_vm()
6681 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); in kvm_mmu_init_vm()
6682 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); in kvm_mmu_init_vm()
6687 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache; in kvm_mmu_init_vm()
6688 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6690 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6692 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; in kvm_mmu_init_vm()
6693 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6698 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache); in mmu_free_vm_memory_caches()
6699 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache); in mmu_free_vm_memory_caches()
6700 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache); in mmu_free_vm_memory_caches()
6728 start = max(gfn_start, memslot->base_gfn); in kvm_rmap_zap_gfn_range()
6729 end = min(gfn_end, memslot->base_gfn + memslot->npages); in kvm_rmap_zap_gfn_range()
6752 write_lock(&kvm->mmu_lock); in kvm_zap_gfn_range()
6764 kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); in kvm_zap_gfn_range()
6768 write_unlock(&kvm->mmu_lock); in kvm_zap_gfn_range()
6783 write_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
6786 write_unlock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
6790 read_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
6792 read_unlock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
6803 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) in need_topup_split_caches_or_resched()
6811 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) || in need_topup_split_caches_or_resched()
6812 need_topup(&kvm->arch.split_page_header_cache, 1) || in need_topup_split_caches_or_resched()
6813 need_topup(&kvm->arch.split_shadow_page_cache, 1); in need_topup_split_caches_or_resched()
6825 * but aliasing rarely occurs post-boot or for many gfns. If there is in topup_split_caches()
6826 * only one rmap entry, rmap->val points directly at that one entry and in topup_split_caches()
6835 lockdep_assert_held(&kvm->slots_lock); in topup_split_caches()
6837 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity, in topup_split_caches()
6842 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1); in topup_split_caches()
6846 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1); in topup_split_caches()
6869 caches.page_header_cache = &kvm->arch.split_page_header_cache; in shadow_mmu_get_sp_for_split()
6870 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache; in shadow_mmu_get_sp_for_split()
6881 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache; in shadow_mmu_split_huge_page()
6892 sptep = &sp->spt[index]; in shadow_mmu_split_huge_page()
6899 * gfn-to-pfn translation since the SP is direct, so no need to in shadow_mmu_split_huge_page()
6910 flush |= !is_last_spte(*sptep, sp->role.level); in shadow_mmu_split_huge_page()
6914 spte = make_small_spte(kvm, huge_spte, sp->role, index); in shadow_mmu_split_huge_page()
6916 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); in shadow_mmu_split_huge_page()
6933 level = huge_sp->role.level; in shadow_mmu_try_split_huge_page()
6937 r = -ENOSPC; in shadow_mmu_try_split_huge_page()
6942 write_unlock(&kvm->mmu_lock); in shadow_mmu_try_split_huge_page()
6945 * If the topup succeeds, return -EAGAIN to indicate that the in shadow_mmu_try_split_huge_page()
6949 r = topup_split_caches(kvm) ?: -EAGAIN; in shadow_mmu_try_split_huge_page()
6950 write_lock(&kvm->mmu_lock); in shadow_mmu_try_split_huge_page()
6975 if (WARN_ON_ONCE(!sp->role.guest_mode)) in shadow_mmu_try_split_huge_pages()
6978 /* The rmaps should never contain non-leaf SPTEs. */ in shadow_mmu_try_split_huge_pages()
6983 if (WARN_ON_ONCE(sp->unsync)) in shadow_mmu_try_split_huge_pages()
6987 if (sp->role.invalid) in shadow_mmu_try_split_huge_pages()
6997 if (!r || r == -EAGAIN) in shadow_mmu_try_split_huge_pages()
7000 /* The split failed and shouldn't be retried (e.g. -ENOMEM). */ in shadow_mmu_try_split_huge_pages()
7020 for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) in kvm_shadow_mmu_try_split_huge_pages()
7022 level, level, start, end - 1, true, true, false); in kvm_shadow_mmu_try_split_huge_pages()
7025 /* Must be called with the mmu_lock held in write-mode. */
7049 u64 start = memslot->base_gfn; in kvm_mmu_slot_try_split_huge_pages()
7050 u64 end = start + memslot->npages; in kvm_mmu_slot_try_split_huge_pages()
7056 write_lock(&kvm->mmu_lock); in kvm_mmu_slot_try_split_huge_pages()
7058 write_unlock(&kvm->mmu_lock); in kvm_mmu_slot_try_split_huge_pages()
7061 read_lock(&kvm->mmu_lock); in kvm_mmu_slot_try_split_huge_pages()
7063 read_unlock(&kvm->mmu_lock); in kvm_mmu_slot_try_split_huge_pages()
7067 * write-protecting and/or clearing dirty on the newly split SPTEs to in kvm_mmu_slot_try_split_huge_pages()
7096 if (sp->role.direct && in kvm_mmu_zap_collapsible_spte()
7097 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) { in kvm_mmu_zap_collapsible_spte()
7117 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap in kvm_rmap_zap_collapsible_sptes()
7121 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true)) in kvm_rmap_zap_collapsible_sptes()
7129 write_lock(&kvm->mmu_lock); in kvm_mmu_recover_huge_pages()
7131 write_unlock(&kvm->mmu_lock); in kvm_mmu_recover_huge_pages()
7135 read_lock(&kvm->mmu_lock); in kvm_mmu_recover_huge_pages()
7137 read_unlock(&kvm->mmu_lock); in kvm_mmu_recover_huge_pages()
7145 write_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
7151 write_unlock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
7155 read_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
7157 read_unlock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
7176 write_lock(&kvm->mmu_lock); in kvm_mmu_zap_all()
7178 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { in kvm_mmu_zap_all()
7179 if (WARN_ON_ONCE(sp->role.invalid)) in kvm_mmu_zap_all()
7183 if (cond_resched_rwlock_write(&kvm->mmu_lock)) in kvm_mmu_zap_all()
7192 write_unlock(&kvm->mmu_lock); in kvm_mmu_zap_all()
7207 if (list_empty(&kvm->arch.active_mmu_pages)) in kvm_mmu_zap_memslot_pages_and_flush()
7214 * will result in use-after-free, e.g. in unaccount_shadowed(). in kvm_mmu_zap_memslot_pages_and_flush()
7216 for (i = 0; i < slot->npages; i++) { in kvm_mmu_zap_memslot_pages_and_flush()
7218 gfn_t gfn = slot->base_gfn + i; in kvm_mmu_zap_memslot_pages_and_flush()
7223 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in kvm_mmu_zap_memslot_pages_and_flush()
7226 cond_resched_rwlock_write(&kvm->mmu_lock); in kvm_mmu_zap_memslot_pages_and_flush()
7239 .start = slot->base_gfn, in kvm_mmu_zap_memslot()
7240 .end = slot->base_gfn + slot->npages, in kvm_mmu_zap_memslot()
7245 write_lock(&kvm->mmu_lock); in kvm_mmu_zap_memslot()
7248 write_unlock(&kvm->mmu_lock); in kvm_mmu_zap_memslot()
7253 return kvm->arch.vm_type == KVM_X86_DEFAULT_VM && in kvm_memslot_flush_zap_all()
7266 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) in kvm_mmu_invalidate_mmio_sptes() argument
7268 WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); in kvm_mmu_invalidate_mmio_sptes()
7270 gen &= MMIO_SPTE_GEN_MASK; in kvm_mmu_invalidate_mmio_sptes()
7279 gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1); in kvm_mmu_invalidate_mmio_sptes()
7285 if (unlikely(gen == 0)) { in kvm_mmu_invalidate_mmio_sptes()
7300 * The NX recovery thread is spawned on-demand at the first KVM_RUN and in kvm_wake_nx_recovery_thread()
7304 struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread); in kvm_wake_nx_recovery_thread()
7335 return -EPERM; in set_nx_huge_pages()
7350 return -EBUSY; in set_nx_huge_pages()
7355 return -EINVAL; in set_nx_huge_pages()
7366 mutex_lock(&kvm->slots_lock); in set_nx_huge_pages()
7368 mutex_unlock(&kvm->slots_lock); in set_nx_huge_pages()
7380 * its default value of -1 is technically undefined behavior for a boolean.
7386 if (nx_huge_pages == -1) in kvm_mmu_x86_module_init()
7406 int ret = -ENOMEM; in kvm_mmu_vendor_module_init()
7441 read_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_destroy()
7442 mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa, in kvm_mmu_destroy()
7444 read_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_destroy()
7446 free_mmu_pages(&vcpu->arch.root_mmu); in kvm_mmu_destroy()
7447 free_mmu_pages(&vcpu->arch.guest_mmu); in kvm_mmu_destroy()
7488 return -EPERM; in set_nx_huge_pages_recovery_param()
7515 unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits; in kvm_recover_nx_huge_pages()
7524 rcu_idx = srcu_read_lock(&kvm->srcu); in kvm_recover_nx_huge_pages()
7525 write_lock(&kvm->mmu_lock); in kvm_recover_nx_huge_pages()
7536 for ( ; to_zap; --to_zap) { in kvm_recover_nx_huge_pages()
7537 if (list_empty(&kvm->arch.possible_nx_huge_pages)) in kvm_recover_nx_huge_pages()
7547 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages, in kvm_recover_nx_huge_pages()
7550 WARN_ON_ONCE(!sp->nx_huge_page_disallowed); in kvm_recover_nx_huge_pages()
7551 WARN_ON_ONCE(!sp->role.direct); in kvm_recover_nx_huge_pages()
7567 * of kvm->nr_memslots_dirty_logging is not a problem: if it is in kvm_recover_nx_huge_pages()
7574 if (atomic_read(&kvm->nr_memslots_dirty_logging)) { in kvm_recover_nx_huge_pages()
7577 slots = kvm_memslots_for_spte_role(kvm, sp->role); in kvm_recover_nx_huge_pages()
7578 slot = __gfn_to_memslot(slots, sp->gfn); in kvm_recover_nx_huge_pages()
7588 WARN_ON_ONCE(sp->nx_huge_page_disallowed); in kvm_recover_nx_huge_pages()
7590 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in kvm_recover_nx_huge_pages()
7594 cond_resched_rwlock_write(&kvm->mmu_lock); in kvm_recover_nx_huge_pages()
7604 write_unlock(&kvm->mmu_lock); in kvm_recover_nx_huge_pages()
7605 srcu_read_unlock(&kvm->srcu, rcu_idx); in kvm_recover_nx_huge_pages()
7623 remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period) in kvm_nx_huge_page_recovery_worker()
7624 - get_jiffies_64(); in kvm_nx_huge_page_recovery_worker()
7633 kvm->arch.nx_huge_page_last = get_jiffies_64(); in kvm_nx_huge_page_recovery_worker()
7643 kvm->arch.nx_huge_page_last = get_jiffies_64(); in kvm_mmu_start_lpage_recovery()
7646 kvm, "kvm-nx-lpage-recovery"); in kvm_mmu_start_lpage_recovery()
7654 WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread); in kvm_mmu_start_lpage_recovery()
7663 return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); in kvm_mmu_post_init_vm()
7668 if (kvm->arch.nx_huge_page_recovery_thread) in kvm_mmu_pre_destroy_vm()
7669 vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread); in kvm_mmu_pre_destroy_vm()
7676 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG; in hugepage_test_mixed()
7682 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG; in hugepage_clear_mixed()
7688 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; in hugepage_set_mixed()
7694 struct kvm_memory_slot *slot = range->slot; in kvm_arch_pre_set_memory_attributes()
7711 if (WARN_ON_ONCE(range->end <= range->start)) in kvm_arch_pre_set_memory_attributes()
7724 gfn_t start = gfn_round_for_level(range->start, level); in kvm_arch_pre_set_memory_attributes()
7725 gfn_t end = gfn_round_for_level(range->end - 1, level); in kvm_arch_pre_set_memory_attributes()
7728 if ((start != range->start || start + nr_pages > range->end) && in kvm_arch_pre_set_memory_attributes()
7729 start >= slot->base_gfn && in kvm_arch_pre_set_memory_attributes()
7730 start + nr_pages <= slot->base_gfn + slot->npages && in kvm_arch_pre_set_memory_attributes()
7737 if ((end + nr_pages) > range->end && in kvm_arch_pre_set_memory_attributes()
7738 (end + nr_pages) <= (slot->base_gfn + slot->npages) && in kvm_arch_pre_set_memory_attributes()
7744 if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) in kvm_arch_pre_set_memory_attributes()
7745 range->attr_filter = KVM_FILTER_SHARED; in kvm_arch_pre_set_memory_attributes()
7747 range->attr_filter = KVM_FILTER_PRIVATE; in kvm_arch_pre_set_memory_attributes()
7763 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { in hugepage_has_attrs()
7764 if (hugepage_test_mixed(slot, gfn, level - 1) || in hugepage_has_attrs()
7774 unsigned long attrs = range->arg.attributes; in kvm_arch_post_set_memory_attributes()
7775 struct kvm_memory_slot *slot = range->slot; in kvm_arch_post_set_memory_attributes()
7778 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_arch_post_set_memory_attributes()
7779 lockdep_assert_held(&kvm->slots_lock); in kvm_arch_post_set_memory_attributes()
7796 gfn_t gfn = gfn_round_for_level(range->start, level); in kvm_arch_post_set_memory_attributes()
7799 if (gfn != range->start || gfn + nr_pages > range->end) { in kvm_arch_post_set_memory_attributes()
7805 if (gfn >= slot->base_gfn && in kvm_arch_post_set_memory_attributes()
7806 gfn + nr_pages <= slot->base_gfn + slot->npages) { in kvm_arch_post_set_memory_attributes()
7819 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages) in kvm_arch_post_set_memory_attributes()
7827 if (gfn < range->end && in kvm_arch_post_set_memory_attributes()
7828 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) { in kvm_arch_post_set_memory_attributes()
7852 gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level); in kvm_mmu_init_memslot_memory_attributes()
7853 gfn_t start = gfn_round_for_level(slot->base_gfn, level); in kvm_mmu_init_memslot_memory_attributes()
7857 if (start < slot->base_gfn) in kvm_mmu_init_memslot_memory_attributes()