1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/preempt.h>
19 #include <linux/msi.h>
20 #include <linux/slab.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ratelimit.h>
23 #include <asm/signal.h>
24 
25 #include <linux/kvm.h>
26 #include <linux/kvm_para.h>
27 
28 #include <linux/kvm_types.h>
29 
30 #include <asm/kvm_host.h>
31 
32 #ifndef KVM_MMIO_SIZE
33 #define KVM_MMIO_SIZE 8
34 #endif
35 
36 /*
37  * vcpu->requests bit members
38  */
39 #define KVM_REQ_TLB_FLUSH          0
40 #define KVM_REQ_MIGRATE_TIMER      1
41 #define KVM_REQ_REPORT_TPR_ACCESS  2
42 #define KVM_REQ_MMU_RELOAD         3
43 #define KVM_REQ_TRIPLE_FAULT       4
44 #define KVM_REQ_PENDING_TIMER      5
45 #define KVM_REQ_UNHALT             6
46 #define KVM_REQ_MMU_SYNC           7
47 #define KVM_REQ_CLOCK_UPDATE       8
48 #define KVM_REQ_KICK               9
49 #define KVM_REQ_DEACTIVATE_FPU    10
50 #define KVM_REQ_EVENT             11
51 #define KVM_REQ_APF_HALT          12
52 #define KVM_REQ_STEAL_UPDATE      13
53 #define KVM_REQ_NMI               14
54 #define KVM_REQ_IMMEDIATE_EXIT    15
55 #define KVM_REQ_PMU               16
56 #define KVM_REQ_PMI               17
57 
58 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
59 
60 struct kvm;
61 struct kvm_vcpu;
62 extern struct kmem_cache *kvm_vcpu_cache;
63 
64 struct kvm_io_range {
65 	gpa_t addr;
66 	int len;
67 	struct kvm_io_device *dev;
68 };
69 
70 struct kvm_io_bus {
71 	int                   dev_count;
72 #define NR_IOBUS_DEVS 300
73 	struct kvm_io_range range[NR_IOBUS_DEVS];
74 };
75 
76 enum kvm_bus {
77 	KVM_MMIO_BUS,
78 	KVM_PIO_BUS,
79 	KVM_NR_BUSES
80 };
81 
82 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
83 		     int len, const void *val);
84 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
85 		    void *val);
86 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
87 			    int len, struct kvm_io_device *dev);
88 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
89 			      struct kvm_io_device *dev);
90 
91 #ifdef CONFIG_KVM_ASYNC_PF
92 struct kvm_async_pf {
93 	struct work_struct work;
94 	struct list_head link;
95 	struct list_head queue;
96 	struct kvm_vcpu *vcpu;
97 	struct mm_struct *mm;
98 	gva_t gva;
99 	unsigned long addr;
100 	struct kvm_arch_async_pf arch;
101 	struct page *page;
102 	bool done;
103 };
104 
105 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
106 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
107 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
108 		       struct kvm_arch_async_pf *arch);
109 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
110 #endif
111 
112 enum {
113 	OUTSIDE_GUEST_MODE,
114 	IN_GUEST_MODE,
115 	EXITING_GUEST_MODE
116 };
117 
118 struct kvm_vcpu {
119 	struct kvm *kvm;
120 #ifdef CONFIG_PREEMPT_NOTIFIERS
121 	struct preempt_notifier preempt_notifier;
122 #endif
123 	int cpu;
124 	int vcpu_id;
125 	int srcu_idx;
126 	int mode;
127 	unsigned long requests;
128 	unsigned long guest_debug;
129 
130 	struct mutex mutex;
131 	struct kvm_run *run;
132 
133 	int fpu_active;
134 	int guest_fpu_loaded, guest_xcr0_loaded;
135 	wait_queue_head_t wq;
136 	struct pid *pid;
137 	int sigset_active;
138 	sigset_t sigset;
139 	struct kvm_vcpu_stat stat;
140 
141 #ifdef CONFIG_HAS_IOMEM
142 	int mmio_needed;
143 	int mmio_read_completed;
144 	int mmio_is_write;
145 	int mmio_size;
146 	int mmio_index;
147 	unsigned char mmio_data[KVM_MMIO_SIZE];
148 	gpa_t mmio_phys_addr;
149 #endif
150 
151 #ifdef CONFIG_KVM_ASYNC_PF
152 	struct {
153 		u32 queued;
154 		struct list_head queue;
155 		struct list_head done;
156 		spinlock_t lock;
157 	} async_pf;
158 #endif
159 
160 	struct kvm_vcpu_arch arch;
161 };
162 
kvm_vcpu_exiting_guest_mode(struct kvm_vcpu * vcpu)163 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
164 {
165 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
166 }
167 
168 /*
169  * Some of the bitops functions do not support too long bitmaps.
170  * This number must be determined not to exceed such limits.
171  */
172 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
173 
174 struct kvm_lpage_info {
175 	unsigned long rmap_pde;
176 	int write_count;
177 };
178 
179 struct kvm_memory_slot {
180 	gfn_t base_gfn;
181 	unsigned long npages;
182 	unsigned long flags;
183 	unsigned long *rmap;
184 	unsigned long *dirty_bitmap;
185 	unsigned long *dirty_bitmap_head;
186 	unsigned long nr_dirty_pages;
187 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
188 	unsigned long userspace_addr;
189 	int user_alloc;
190 	int id;
191 };
192 
kvm_dirty_bitmap_bytes(struct kvm_memory_slot * memslot)193 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
194 {
195 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
196 }
197 
198 struct kvm_kernel_irq_routing_entry {
199 	u32 gsi;
200 	u32 type;
201 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
202 		   struct kvm *kvm, int irq_source_id, int level);
203 	union {
204 		struct {
205 			unsigned irqchip;
206 			unsigned pin;
207 		} irqchip;
208 		struct msi_msg msi;
209 	};
210 	struct hlist_node link;
211 };
212 
213 #ifdef __KVM_HAVE_IOAPIC
214 
215 struct kvm_irq_routing_table {
216 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
217 	struct kvm_kernel_irq_routing_entry *rt_entries;
218 	u32 nr_rt_entries;
219 	/*
220 	 * Array indexed by gsi. Each entry contains list of irq chips
221 	 * the gsi is connected to.
222 	 */
223 	struct hlist_head map[0];
224 };
225 
226 #else
227 
228 struct kvm_irq_routing_table {};
229 
230 #endif
231 
232 #ifndef KVM_MEM_SLOTS_NUM
233 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
234 #endif
235 
236 /*
237  * Note:
238  * memslots are not sorted by id anymore, please use id_to_memslot()
239  * to get the memslot by its id.
240  */
241 struct kvm_memslots {
242 	u64 generation;
243 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
244 	/* The mapping table from slot id to the index in memslots[]. */
245 	int id_to_index[KVM_MEM_SLOTS_NUM];
246 };
247 
248 struct kvm {
249 	spinlock_t mmu_lock;
250 	struct mutex slots_lock;
251 	struct mm_struct *mm; /* userspace tied to this vm */
252 	struct kvm_memslots *memslots;
253 	struct srcu_struct srcu;
254 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
255 	u32 bsp_vcpu_id;
256 #endif
257 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
258 	atomic_t online_vcpus;
259 	int last_boosted_vcpu;
260 	struct list_head vm_list;
261 	struct mutex lock;
262 	struct kvm_io_bus *buses[KVM_NR_BUSES];
263 #ifdef CONFIG_HAVE_KVM_EVENTFD
264 	struct {
265 		spinlock_t        lock;
266 		struct list_head  items;
267 	} irqfds;
268 	struct list_head ioeventfds;
269 #endif
270 	struct kvm_vm_stat stat;
271 	struct kvm_arch arch;
272 	atomic_t users_count;
273 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
274 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
275 	spinlock_t ring_lock;
276 	struct list_head coalesced_zones;
277 #endif
278 
279 	struct mutex irq_lock;
280 #ifdef CONFIG_HAVE_KVM_IRQCHIP
281 	/*
282 	 * Update side is protected by irq_lock and,
283 	 * if configured, irqfds.lock.
284 	 */
285 	struct kvm_irq_routing_table __rcu *irq_routing;
286 	struct hlist_head mask_notifier_list;
287 	struct hlist_head irq_ack_notifier_list;
288 #endif
289 
290 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
291 	struct mmu_notifier mmu_notifier;
292 	unsigned long mmu_notifier_seq;
293 	long mmu_notifier_count;
294 #endif
295 	long tlbs_dirty;
296 };
297 
298 /* The guest did something we don't support. */
299 #define pr_unimpl(vcpu, fmt, ...)					\
300 	pr_err_ratelimited("kvm: %i: cpu%i " fmt,			\
301 			   current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
302 
303 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
304 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
305 
kvm_get_vcpu(struct kvm * kvm,int i)306 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
307 {
308 	smp_rmb();
309 	return kvm->vcpus[i];
310 }
311 
312 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
313 	for (idx = 0; \
314 	     idx < atomic_read(&kvm->online_vcpus) && \
315 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
316 	     idx++)
317 
318 #define kvm_for_each_memslot(memslot, slots)	\
319 	for (memslot = &slots->memslots[0];	\
320 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
321 		memslot++)
322 
323 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
324 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
325 
326 void vcpu_load(struct kvm_vcpu *vcpu);
327 void vcpu_put(struct kvm_vcpu *vcpu);
328 
329 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
330 		  struct module *module);
331 void kvm_exit(void);
332 
333 void kvm_get_kvm(struct kvm *kvm);
334 void kvm_put_kvm(struct kvm *kvm);
335 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
336 
kvm_memslots(struct kvm * kvm)337 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
338 {
339 	return rcu_dereference_check(kvm->memslots,
340 			srcu_read_lock_held(&kvm->srcu)
341 			|| lockdep_is_held(&kvm->slots_lock));
342 }
343 
344 static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots * slots,int id)345 id_to_memslot(struct kvm_memslots *slots, int id)
346 {
347 	int index = slots->id_to_index[id];
348 	struct kvm_memory_slot *slot;
349 
350 	slot = &slots->memslots[index];
351 
352 	WARN_ON(slot->id != id);
353 	return slot;
354 }
355 
356 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
357 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
is_error_hpa(hpa_t hpa)358 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
359 
360 extern struct page *bad_page;
361 extern struct page *fault_page;
362 
363 extern pfn_t bad_pfn;
364 extern pfn_t fault_pfn;
365 
366 int is_error_page(struct page *page);
367 int is_error_pfn(pfn_t pfn);
368 int is_hwpoison_pfn(pfn_t pfn);
369 int is_fault_pfn(pfn_t pfn);
370 int is_noslot_pfn(pfn_t pfn);
371 int is_invalid_pfn(pfn_t pfn);
372 int kvm_is_error_hva(unsigned long addr);
373 int kvm_set_memory_region(struct kvm *kvm,
374 			  struct kvm_userspace_memory_region *mem,
375 			  int user_alloc);
376 int __kvm_set_memory_region(struct kvm *kvm,
377 			    struct kvm_userspace_memory_region *mem,
378 			    int user_alloc);
379 int kvm_arch_prepare_memory_region(struct kvm *kvm,
380 				struct kvm_memory_slot *memslot,
381 				struct kvm_memory_slot old,
382 				struct kvm_userspace_memory_region *mem,
383 				int user_alloc);
384 void kvm_arch_commit_memory_region(struct kvm *kvm,
385 				struct kvm_userspace_memory_region *mem,
386 				struct kvm_memory_slot old,
387 				int user_alloc);
388 void kvm_disable_largepages(void);
389 void kvm_arch_flush_shadow(struct kvm *kvm);
390 
391 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
392 			    int nr_pages);
393 
394 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
395 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
396 void kvm_release_page_clean(struct page *page);
397 void kvm_release_page_dirty(struct page *page);
398 void kvm_set_page_dirty(struct page *page);
399 void kvm_set_page_accessed(struct page *page);
400 
401 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
402 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
403 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
404 		       bool write_fault, bool *writable);
405 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
406 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
407 		      bool *writable);
408 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
409 			 struct kvm_memory_slot *slot, gfn_t gfn);
410 void kvm_release_pfn_dirty(pfn_t);
411 void kvm_release_pfn_clean(pfn_t pfn);
412 void kvm_set_pfn_dirty(pfn_t pfn);
413 void kvm_set_pfn_accessed(pfn_t pfn);
414 void kvm_get_pfn(pfn_t pfn);
415 
416 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
417 			int len);
418 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
419 			  unsigned long len);
420 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
421 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
422 			   void *data, unsigned long len);
423 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
424 			 int offset, int len);
425 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
426 		    unsigned long len);
427 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
428 			   void *data, unsigned long len);
429 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
430 			      gpa_t gpa);
431 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
432 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
433 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
434 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
435 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
436 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
437 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
438 			     gfn_t gfn);
439 
440 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
441 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
442 void kvm_resched(struct kvm_vcpu *vcpu);
443 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
444 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
445 
446 void kvm_flush_remote_tlbs(struct kvm *kvm);
447 void kvm_reload_remote_mmus(struct kvm *kvm);
448 
449 long kvm_arch_dev_ioctl(struct file *filp,
450 			unsigned int ioctl, unsigned long arg);
451 long kvm_arch_vcpu_ioctl(struct file *filp,
452 			 unsigned int ioctl, unsigned long arg);
453 
454 int kvm_dev_ioctl_check_extension(long ext);
455 
456 int kvm_get_dirty_log(struct kvm *kvm,
457 			struct kvm_dirty_log *log, int *is_dirty);
458 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
459 				struct kvm_dirty_log *log);
460 
461 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
462 				   struct
463 				   kvm_userspace_memory_region *mem,
464 				   int user_alloc);
465 long kvm_arch_vm_ioctl(struct file *filp,
466 		       unsigned int ioctl, unsigned long arg);
467 
468 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
469 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
470 
471 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
472 				    struct kvm_translation *tr);
473 
474 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
475 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
476 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
477 				  struct kvm_sregs *sregs);
478 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
479 				  struct kvm_sregs *sregs);
480 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
481 				    struct kvm_mp_state *mp_state);
482 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 				    struct kvm_mp_state *mp_state);
484 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
485 					struct kvm_guest_debug *dbg);
486 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
487 
488 int kvm_arch_init(void *opaque);
489 void kvm_arch_exit(void);
490 
491 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
492 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
493 
494 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
495 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
496 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
497 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
498 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
499 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
500 
501 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
502 int kvm_arch_hardware_enable(void *garbage);
503 void kvm_arch_hardware_disable(void *garbage);
504 int kvm_arch_hardware_setup(void);
505 void kvm_arch_hardware_unsetup(void);
506 void kvm_arch_check_processor_compat(void *rtn);
507 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
508 
509 void kvm_free_physmem(struct kvm *kvm);
510 
511 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
kvm_arch_alloc_vm(void)512 static inline struct kvm *kvm_arch_alloc_vm(void)
513 {
514 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
515 }
516 
kvm_arch_free_vm(struct kvm * kvm)517 static inline void kvm_arch_free_vm(struct kvm *kvm)
518 {
519 	kfree(kvm);
520 }
521 #endif
522 
523 int kvm_arch_init_vm(struct kvm *kvm);
524 void kvm_arch_destroy_vm(struct kvm *kvm);
525 void kvm_free_all_assigned_devices(struct kvm *kvm);
526 void kvm_arch_sync_events(struct kvm *kvm);
527 
528 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
529 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
530 
531 int kvm_is_mmio_pfn(pfn_t pfn);
532 
533 struct kvm_irq_ack_notifier {
534 	struct hlist_node link;
535 	unsigned gsi;
536 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
537 };
538 
539 struct kvm_assigned_dev_kernel {
540 	struct kvm_irq_ack_notifier ack_notifier;
541 	struct list_head list;
542 	int assigned_dev_id;
543 	int host_segnr;
544 	int host_busnr;
545 	int host_devfn;
546 	unsigned int entries_nr;
547 	int host_irq;
548 	bool host_irq_disabled;
549 	struct msix_entry *host_msix_entries;
550 	int guest_irq;
551 	struct msix_entry *guest_msix_entries;
552 	unsigned long irq_requested_type;
553 	int irq_source_id;
554 	int flags;
555 	struct pci_dev *dev;
556 	struct kvm *kvm;
557 	spinlock_t intx_lock;
558 	char irq_name[32];
559 	struct pci_saved_state *pci_saved_state;
560 };
561 
562 struct kvm_irq_mask_notifier {
563 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
564 	int irq;
565 	struct hlist_node link;
566 };
567 
568 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
569 				    struct kvm_irq_mask_notifier *kimn);
570 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
571 				      struct kvm_irq_mask_notifier *kimn);
572 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
573 			     bool mask);
574 
575 #ifdef __KVM_HAVE_IOAPIC
576 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
577 				   union kvm_ioapic_redirect_entry *entry,
578 				   unsigned long *deliver_bitmask);
579 #endif
580 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
581 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
582 		int irq_source_id, int level);
583 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
584 void kvm_register_irq_ack_notifier(struct kvm *kvm,
585 				   struct kvm_irq_ack_notifier *kian);
586 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
587 				   struct kvm_irq_ack_notifier *kian);
588 int kvm_request_irq_source_id(struct kvm *kvm);
589 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
590 
591 /* For vcpu->arch.iommu_flags */
592 #define KVM_IOMMU_CACHE_COHERENCY	0x1
593 
594 #ifdef CONFIG_IOMMU_API
595 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
596 int kvm_iommu_map_guest(struct kvm *kvm);
597 int kvm_iommu_unmap_guest(struct kvm *kvm);
598 int kvm_assign_device(struct kvm *kvm,
599 		      struct kvm_assigned_dev_kernel *assigned_dev);
600 int kvm_deassign_device(struct kvm *kvm,
601 			struct kvm_assigned_dev_kernel *assigned_dev);
602 #else /* CONFIG_IOMMU_API */
kvm_iommu_map_pages(struct kvm * kvm,struct kvm_memory_slot * slot)603 static inline int kvm_iommu_map_pages(struct kvm *kvm,
604 				      struct kvm_memory_slot *slot)
605 {
606 	return 0;
607 }
608 
kvm_iommu_map_guest(struct kvm * kvm)609 static inline int kvm_iommu_map_guest(struct kvm *kvm)
610 {
611 	return -ENODEV;
612 }
613 
kvm_iommu_unmap_guest(struct kvm * kvm)614 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
615 {
616 	return 0;
617 }
618 
kvm_assign_device(struct kvm * kvm,struct kvm_assigned_dev_kernel * assigned_dev)619 static inline int kvm_assign_device(struct kvm *kvm,
620 		struct kvm_assigned_dev_kernel *assigned_dev)
621 {
622 	return 0;
623 }
624 
kvm_deassign_device(struct kvm * kvm,struct kvm_assigned_dev_kernel * assigned_dev)625 static inline int kvm_deassign_device(struct kvm *kvm,
626 		struct kvm_assigned_dev_kernel *assigned_dev)
627 {
628 	return 0;
629 }
630 #endif /* CONFIG_IOMMU_API */
631 
kvm_guest_enter(void)632 static inline void kvm_guest_enter(void)
633 {
634 	BUG_ON(preemptible());
635 	account_system_vtime(current);
636 	current->flags |= PF_VCPU;
637 	/* KVM does not hold any references to rcu protected data when it
638 	 * switches CPU into a guest mode. In fact switching to a guest mode
639 	 * is very similar to exiting to userspase from rcu point of view. In
640 	 * addition CPU may stay in a guest mode for quite a long time (up to
641 	 * one time slice). Lets treat guest mode as quiescent state, just like
642 	 * we do with user-mode execution.
643 	 */
644 	rcu_virt_note_context_switch(smp_processor_id());
645 }
646 
kvm_guest_exit(void)647 static inline void kvm_guest_exit(void)
648 {
649 	account_system_vtime(current);
650 	current->flags &= ~PF_VCPU;
651 }
652 
memslot_id(struct kvm * kvm,gfn_t gfn)653 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
654 {
655 	return gfn_to_memslot(kvm, gfn)->id;
656 }
657 
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)658 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
659 					       gfn_t gfn)
660 {
661 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
662 }
663 
gfn_to_gpa(gfn_t gfn)664 static inline gpa_t gfn_to_gpa(gfn_t gfn)
665 {
666 	return (gpa_t)gfn << PAGE_SHIFT;
667 }
668 
gpa_to_gfn(gpa_t gpa)669 static inline gfn_t gpa_to_gfn(gpa_t gpa)
670 {
671 	return (gfn_t)(gpa >> PAGE_SHIFT);
672 }
673 
pfn_to_hpa(pfn_t pfn)674 static inline hpa_t pfn_to_hpa(pfn_t pfn)
675 {
676 	return (hpa_t)pfn << PAGE_SHIFT;
677 }
678 
kvm_migrate_timers(struct kvm_vcpu * vcpu)679 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
680 {
681 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
682 }
683 
684 enum kvm_stat_kind {
685 	KVM_STAT_VM,
686 	KVM_STAT_VCPU,
687 };
688 
689 struct kvm_stats_debugfs_item {
690 	const char *name;
691 	int offset;
692 	enum kvm_stat_kind kind;
693 	struct dentry *dentry;
694 };
695 extern struct kvm_stats_debugfs_item debugfs_entries[];
696 extern struct dentry *kvm_debugfs_dir;
697 
698 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
mmu_notifier_retry(struct kvm_vcpu * vcpu,unsigned long mmu_seq)699 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
700 {
701 	if (unlikely(vcpu->kvm->mmu_notifier_count))
702 		return 1;
703 	/*
704 	 * Both reads happen under the mmu_lock and both values are
705 	 * modified under mmu_lock, so there's no need of smb_rmb()
706 	 * here in between, otherwise mmu_notifier_count should be
707 	 * read before mmu_notifier_seq, see
708 	 * mmu_notifier_invalidate_range_end write side.
709 	 */
710 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
711 		return 1;
712 	return 0;
713 }
714 #endif
715 
716 #ifdef CONFIG_HAVE_KVM_IRQCHIP
717 
718 #define KVM_MAX_IRQ_ROUTES 1024
719 
720 int kvm_setup_default_irq_routing(struct kvm *kvm);
721 int kvm_set_irq_routing(struct kvm *kvm,
722 			const struct kvm_irq_routing_entry *entries,
723 			unsigned nr,
724 			unsigned flags);
725 void kvm_free_irq_routing(struct kvm *kvm);
726 
727 #else
728 
kvm_free_irq_routing(struct kvm * kvm)729 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
730 
731 #endif
732 
733 #ifdef CONFIG_HAVE_KVM_EVENTFD
734 
735 void kvm_eventfd_init(struct kvm *kvm);
736 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
737 void kvm_irqfd_release(struct kvm *kvm);
738 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
739 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
740 
741 #else
742 
kvm_eventfd_init(struct kvm * kvm)743 static inline void kvm_eventfd_init(struct kvm *kvm) {}
744 
kvm_irqfd(struct kvm * kvm,int fd,int gsi,int flags)745 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
746 {
747 	return -EINVAL;
748 }
749 
kvm_irqfd_release(struct kvm * kvm)750 static inline void kvm_irqfd_release(struct kvm *kvm) {}
751 
752 #ifdef CONFIG_HAVE_KVM_IRQCHIP
kvm_irq_routing_update(struct kvm * kvm,struct kvm_irq_routing_table * irq_rt)753 static inline void kvm_irq_routing_update(struct kvm *kvm,
754 					  struct kvm_irq_routing_table *irq_rt)
755 {
756 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
757 }
758 #endif
759 
kvm_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)760 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
761 {
762 	return -ENOSYS;
763 }
764 
765 #endif /* CONFIG_HAVE_KVM_EVENTFD */
766 
767 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
kvm_vcpu_is_bsp(struct kvm_vcpu * vcpu)768 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
769 {
770 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
771 }
772 #endif
773 
774 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
775 
776 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
777 				  unsigned long arg);
778 
779 #else
780 
kvm_vm_ioctl_assigned_device(struct kvm * kvm,unsigned ioctl,unsigned long arg)781 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
782 						unsigned long arg)
783 {
784 	return -ENOTTY;
785 }
786 
787 #endif
788 
kvm_make_request(int req,struct kvm_vcpu * vcpu)789 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
790 {
791 	set_bit(req, &vcpu->requests);
792 }
793 
kvm_check_request(int req,struct kvm_vcpu * vcpu)794 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
795 {
796 	if (test_bit(req, &vcpu->requests)) {
797 		clear_bit(req, &vcpu->requests);
798 		return true;
799 	} else {
800 		return false;
801 	}
802 }
803 
804 #endif
805 
806