Lines Matching +full:embedded +full:- +full:trace +full:- +full:extension

1 /* SPDX-License-Identifier: GPL-2.0-only */
60 * Bit 63 of the memslot generation number is an "update in-progress flag",
74 * memslot update is in-progress, and to prevent cache hits *after* updating
121 * translated to pfn - it is not in slot or failed to
151 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
163 * Architecture-independent vcpu->requests bit members
164 * Bits 3-7 are reserved for more arch-independent bits.
177 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
183 …BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BA…
305 return !!map->hva; in kvm_vcpu_mapped()
314 * Sometimes a large or cross-page mmio needs to be broken up into separate
330 int vcpu_idx; /* index into kvm->vcpu_array */
419 * non-instrumentable.
429 * we do with user-mode execution. in guest_context_enter_irqoff()
449 * guest_state_enter_irqoff - Fixup state when entering a guest
454 * 1) Trace interrupts on state
460 * non-instrumentable.
482 * non-instrumentable.
521 * guest_state_exit_irqoff - Establish state when returning from guest mode
528 * 3) Trace interrupts off state
532 * non-instrumentable.
550 * The memory barrier ensures a previous write to vcpu->requests cannot in kvm_vcpu_exiting_guest_mode()
551 * be reordered with the read of vcpu->mode. It pairs with the general in kvm_vcpu_exiting_guest_mode()
552 * memory barrier following the write of vcpu->mode in VCPU RUN. in kvm_vcpu_exiting_guest_mode()
555 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); in kvm_vcpu_exiting_guest_mode()
562 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
566 * two embedded nodes for each data structure that it forms a part of.
603 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); in kvm_slot_can_be_private()
608 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; in kvm_slot_dirty_track_enabled()
613 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
620 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); in kvm_second_dirty_bitmap()
691 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
724 * 7-bit bucket count matches the size of the old id to index array for
743 * Protects the arch-specific fields of struct kvm_memory_slots in
745 * kvm->srcu critical section where acquiring the slots_lock would
752 /* The two memslot sets - active and inactive (per address space) */
773 * created_vcpus is protected by kvm->lock, and is incremented
861 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
864 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
866 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
869 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
873 kvm->vm_dead = true; in kvm_vm_dead()
879 kvm->vm_bugged = true; in kvm_vm_bugged()
888 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
897 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
916 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
924 WARN_ONCE(vcpu->srcu_depth++, in kvm_vcpu_srcu_read_lock()
925 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); in kvm_vcpu_srcu_read_lock()
927 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_srcu_read_lock()
932 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); in kvm_vcpu_srcu_read_unlock()
935 WARN_ONCE(--vcpu->srcu_depth, in kvm_vcpu_srcu_read_unlock()
936 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); in kvm_vcpu_srcu_read_unlock()
942 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); in kvm_dirty_log_manual_protect_and_init_set()
947 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, in kvm_get_bus()
948 lockdep_is_held(&kvm->slots_lock) || in kvm_get_bus()
949 !refcount_read(&kvm->users_count)); in kvm_get_bus()
954 int num_vcpus = atomic_read(&kvm->online_vcpus); in kvm_get_vcpu()
959 return xa_load(&kvm->vcpu_array, i); in kvm_get_vcpu()
963 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
964 (atomic_read(&kvm->online_vcpus) - 1))
975 if (vcpu && vcpu->vcpu_id == id) in kvm_get_vcpu_by_id()
978 if (vcpu->vcpu_id == id) in kvm_get_vcpu_by_id()
1025 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, in __kvm_memslots()
1026 lockdep_is_held(&kvm->slots_lock) || in __kvm_memslots()
1027 !refcount_read(&kvm->users_count)); in __kvm_memslots()
1039 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
1044 return RB_EMPTY_ROOT(&slots->gfn_tree); in kvm_memslots_empty()
1050 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1051 if (WARN_ON_ONCE(!memslot->npages)) { \
1058 int idx = slots->node_idx; in id_to_memslot()
1060 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { in id_to_memslot()
1061 if (slot->id == id) in id_to_memslot()
1077 iter->node = rb_next(iter->node); in kvm_memslot_iter_next()
1078 if (!iter->node) in kvm_memslot_iter_next()
1081 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); in kvm_memslot_iter_next()
1088 int idx = slots->node_idx; in kvm_memslot_iter_start()
1092 iter->slots = slots; in kvm_memslot_iter_start()
1095 * Find the so called "upper bound" of a key - the first node that has in kvm_memslot_iter_start()
1098 iter->node = NULL; in kvm_memslot_iter_start()
1099 for (tmp = slots->gfn_tree.rb_node; tmp; ) { in kvm_memslot_iter_start()
1101 if (start < slot->base_gfn) { in kvm_memslot_iter_start()
1102 iter->node = tmp; in kvm_memslot_iter_start()
1103 tmp = tmp->rb_left; in kvm_memslot_iter_start()
1105 tmp = tmp->rb_right; in kvm_memslot_iter_start()
1113 if (iter->node) { in kvm_memslot_iter_start()
1119 tmp = rb_prev(iter->node); in kvm_memslot_iter_start()
1121 iter->node = tmp; in kvm_memslot_iter_start()
1124 iter->node = rb_last(&slots->gfn_tree); in kvm_memslot_iter_start()
1127 if (iter->node) { in kvm_memslot_iter_start()
1128 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); in kvm_memslot_iter_start()
1135 * In such non-overlapping case the next slot (if it exists) will in kvm_memslot_iter_start()
1139 if (iter->slot->base_gfn + iter->slot->npages <= start) in kvm_memslot_iter_start()
1146 if (!iter->node) in kvm_memslot_iter_is_valid()
1153 return iter->slot->base_gfn < end; in kvm_memslot_iter_is_valid()
1164 * - create a new memory slot
1165 * - delete an existing memory slot
1166 * - modify an existing memory slot
1167 * -- move it in the guest physical memory space
1168 * -- just change its flags
1251 int __ret = -EFAULT; \
1271 int __ret = -EFAULT; \
1318 * kvm_gpc_init - initialize gfn_to_pfn_cache.
1326 * the cache from MMU notifiers---but not for KVM memslot
1327 * changes!---will also force @vcpu to exit the guest and
1332 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
1339 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
1347 * -EINVAL for a mapping which would cross a page boundary.
1348 * -EFAULT for an untranslatable guest physical address.
1350 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
1357 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
1365 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1370 * still hold a read lock on kvm->scru for the memslot checks.
1375 * kvm_gpc_refresh - update a previously initialized cache.
1381 * -EINVAL for a mapping which would cross a page boundary.
1382 * -EFAULT for an untranslatable guest physical address.
1393 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
1538 return -ENOTSUPP; in kvm_arch_flush_remote_tlbs()
1548 return -EOPNOTSUPP; in kvm_arch_flush_remote_tlbs_range()
1594 return vcpu->arch.waitp; in kvm_arch_vcpu_get_wait()
1596 return &vcpu->wait; in kvm_arch_vcpu_get_wait()
1685 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot()
1703 int idx = slots->node_idx; in search_memslots()
1706 for (node = slots->gfn_tree.rb_node; node; ) { in search_memslots()
1708 if (gfn >= slot->base_gfn) { in search_memslots()
1709 if (gfn < slot->base_gfn + slot->npages) in search_memslots()
1711 node = node->rb_right; in search_memslots()
1713 node = node->rb_left; in search_memslots()
1724 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); in ____gfn_to_memslot()
1731 atomic_long_set(&slots->last_used_slot, (unsigned long)slot); in ____gfn_to_memslot()
1758 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot()
1759 offset = array_index_nospec(offset, slot->npages); in __gfn_to_hva_memslot()
1760 return slot->userspace_addr + offset * PAGE_SIZE; in __gfn_to_hva_memslot()
1765 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1771 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; in hva_to_gfn_memslot()
1773 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
1900 KVM_STATS_BASE_POW10, -9)
1904 KVM_STATS_BASE_POW10, -9, sz, bsz)
1908 KVM_STATS_BASE_POW10, -9, sz)
1938 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
1951 index = min(index, size - 1); in kvm_stats_linear_hist_update()
1956 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
1967 index = min(index, size - 1); in kvm_stats_log_hist_update()
1985 if (unlikely(kvm->mmu_invalidate_in_progress)) in mmu_invalidate_retry()
1991 * that the caller either sees the old (non-zero) value of in mmu_invalidate_retry()
1995 * PowerPC Book3s HV KVM calls this under a per-page lock rather in mmu_invalidate_retry()
1996 * than under kvm->mmu_lock, for scalability, so can't rely on in mmu_invalidate_retry()
1997 * kvm->mmu_lock to keep things ordered. in mmu_invalidate_retry()
2000 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry()
2009 lockdep_assert_held(&kvm->mmu_lock); in mmu_invalidate_retry_gfn()
2011 * If mmu_invalidate_in_progress is non-zero, then the range maintained in mmu_invalidate_retry_gfn()
2016 if (unlikely(kvm->mmu_invalidate_in_progress)) { in mmu_invalidate_retry_gfn()
2021 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || in mmu_invalidate_retry_gfn()
2022 kvm->mmu_invalidate_range_end == INVALID_GPA)) in mmu_invalidate_retry_gfn()
2025 if (gfn >= kvm->mmu_invalidate_range_start && in mmu_invalidate_retry_gfn()
2026 gfn < kvm->mmu_invalidate_range_end) in mmu_invalidate_retry_gfn()
2030 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry_gfn()
2036 * This lockless version of the range-based retry check *must* be paired with a
2038 * use only as a pre-check to avoid contending mmu_lock. This version *will*
2046 * Use READ_ONCE() to ensure the in-progress flag and sequence counter in mmu_invalidate_retry_gfn_unsafe()
2050 * the 1=>0 transition of in-progress, i.e. getting false negatives in mmu_invalidate_retry_gfn_unsafe()
2053 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && in mmu_invalidate_retry_gfn_unsafe()
2054 gfn >= kvm->mmu_invalidate_range_start && in mmu_invalidate_retry_gfn_unsafe()
2055 gfn < kvm->mmu_invalidate_range_end) in mmu_invalidate_retry_gfn_unsafe()
2058 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; in mmu_invalidate_retry_gfn_unsafe()
2064 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
2097 return -EINVAL; in kvm_irqfd()
2119 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in __kvm_make_request()
2126 * vcpu->requests. The vCPU won't clear the request, so it will stay in kvm_make_request()
2137 return READ_ONCE(vcpu->requests); in kvm_request_pending()
2142 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in kvm_test_request()
2147 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in kvm_clear_request()
2187 * create is called holding kvm->lock and any operations not suitable
2195 * outside of holding kvm->lock.
2214 * the VM. kvm->lock is held.
2238 vcpu->spin_loop.in_spin_loop = val; in kvm_vcpu_set_in_spin_loop()
2242 vcpu->spin_loop.dy_eligible = val; in kvm_vcpu_set_dy_eligible()
2258 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && in kvm_is_visible_memslot()
2259 !(memslot->flags & KVM_MEMSLOT_INVALID)); in kvm_is_visible_memslot()
2283 return vcpu->valid_wakeup; in vcpu_valid_wakeup()
2311 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
2335 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvm_handle_signal_exit()
2336 vcpu->stat.signal_exits++; in kvm_handle_signal_exit()
2345 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
2346 * is thread-safe.
2368 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; in kvm_prepare_memory_fault_exit()
2369 vcpu->run->memory_fault.gpa = gpa; in kvm_prepare_memory_fault_exit()
2370 vcpu->run->memory_fault.size = size; in kvm_prepare_memory_fault_exit()
2373 vcpu->run->memory_fault.flags = 0; in kvm_prepare_memory_fault_exit()
2375 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; in kvm_prepare_memory_fault_exit()
2381 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); in kvm_get_memory_attributes()
2412 return -EIO; in kvm_gmem_get_pfn()