xref: /linux/include/linux/kvm_types.h (revision e2aa39b368bb147afe8f6bd63d962494354f6498)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
5 
6 #include <linux/bits.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <asm/kvm_types.h>
10 
11 #ifdef KVM_SUB_MODULES
12 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
13 	EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
14 #define EXPORT_SYMBOL_FOR_KVM(symbol) \
15 	EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES))
16 #else
17 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
18 /*
19  * Allow architectures to provide a custom EXPORT_SYMBOL_FOR_KVM, but only
20  * if there are no submodules, e.g. to allow suppressing exports if KVM=m, but
21  * kvm.ko won't actually be built (due to lack of at least one submodule).
22  */
23 #ifndef EXPORT_SYMBOL_FOR_KVM
24 #if IS_MODULE(CONFIG_KVM)
25 #define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm")
26 #else
27 #define EXPORT_SYMBOL_FOR_KVM(symbol)
28 #endif /* IS_MODULE(CONFIG_KVM) */
29 #endif /* EXPORT_SYMBOL_FOR_KVM */
30 #endif
31 
32 #ifndef __ASSEMBLER__
33 
34 #include <linux/mutex.h>
35 #include <linux/spinlock_types.h>
36 
37 struct kvm;
38 struct kvm_async_pf;
39 struct kvm_device_ops;
40 struct kvm_gfn_range;
41 struct kvm_interrupt;
42 struct kvm_irq_routing_table;
43 struct kvm_memory_slot;
44 struct kvm_one_reg;
45 struct kvm_run;
46 struct kvm_userspace_memory_region;
47 struct kvm_vcpu;
48 struct kvm_vcpu_init;
49 struct kvm_memslots;
50 
51 enum kvm_mr_change;
52 
53 /*
54  * Address types:
55  *
56  *  gva - guest virtual address
57  *  gpa - guest physical address
58  *  gfn - guest frame number
59  *  hva - host virtual address
60  *  hpa - host physical address
61  *  hfn - host frame number
62  */
63 
64 typedef unsigned long  gva_t;
65 typedef u64            gpa_t;
66 typedef u64            gfn_t;
67 
68 #define INVALID_GPA	(~(gpa_t)0)
69 
70 typedef unsigned long  hva_t;
71 typedef u64            hpa_t;
72 typedef u64            hfn_t;
73 
74 typedef hfn_t kvm_pfn_t;
75 
76 struct gfn_to_hva_cache {
77 	u64 generation;
78 	gpa_t gpa;
79 	unsigned long hva;
80 	unsigned long len;
81 	struct kvm_memory_slot *memslot;
82 };
83 
84 struct gfn_to_pfn_cache {
85 	u64 generation;
86 	gpa_t gpa;
87 	unsigned long uhva;
88 	struct kvm_memory_slot *memslot;
89 	struct kvm *kvm;
90 	struct list_head list;
91 	rwlock_t lock;
92 	struct mutex refresh_lock;
93 	void *khva;
94 	kvm_pfn_t pfn;
95 	bool active;
96 	bool valid;
97 };
98 
99 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
100 /*
101  * Memory caches are used to preallocate memory ahead of various MMU flows,
102  * e.g. page fault handlers.  Gracefully handling allocation failures deep in
103  * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
104  * holding MMU locks.  Note, these caches act more like prefetch buffers than
105  * classical caches, i.e. objects are not returned to the cache on being freed.
106  *
107  * The @capacity field and @objects array are lazily initialized when the cache
108  * is topped up (__kvm_mmu_topup_memory_cache()).
109  */
110 struct kvm_mmu_memory_cache {
111 	gfp_t gfp_zero;
112 	gfp_t gfp_custom;
113 	u64 init_value;
114 	struct kmem_cache *kmem_cache;
115 	int capacity;
116 	int nobjs;
117 	void **objects;
118 };
119 #endif
120 
121 #define HALT_POLL_HIST_COUNT			32
122 
123 struct kvm_vm_stat_generic {
124 	u64 remote_tlb_flush;
125 	u64 remote_tlb_flush_requests;
126 };
127 
128 struct kvm_vcpu_stat_generic {
129 	u64 halt_successful_poll;
130 	u64 halt_attempted_poll;
131 	u64 halt_poll_invalid;
132 	u64 halt_wakeup;
133 	u64 halt_poll_success_ns;
134 	u64 halt_poll_fail_ns;
135 	u64 halt_wait_ns;
136 	u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
137 	u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
138 	u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
139 	u64 blocking;
140 };
141 
142 #define KVM_STATS_NAME_SIZE	48
143 #endif /* !__ASSEMBLER__ */
144 
145 #endif /* __KVM_TYPES_H__ */
146