xref: /src/sys/amd64/vmm/vmm.c (revision 002c50ea23b99b415b1b392d3dd5ea6223c3a74c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_bhyve_snapshot.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/pcpu.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/proc.h>
40 #include <sys/rwlock.h>
41 #include <sys/sched.h>
42 #include <sys/smp.h>
43 #include <sys/sx.h>
44 #include <sys/vnode.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_page.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vnode_pager.h>
56 #include <vm/swap_pager.h>
57 #include <vm/uma.h>
58 
59 #include <machine/cpu.h>
60 #include <machine/pcb.h>
61 #include <machine/smp.h>
62 #include <machine/md_var.h>
63 #include <x86/psl.h>
64 #include <x86/apicreg.h>
65 #include <x86/ifunc.h>
66 
67 #include <machine/vmm.h>
68 #include <machine/vmm_instruction_emul.h>
69 #include <machine/vmm_snapshot.h>
70 
71 #include <dev/vmm/vmm_dev.h>
72 #include <dev/vmm/vmm_ktr.h>
73 #include <dev/vmm/vmm_mem.h>
74 #include <dev/vmm/vmm_vm.h>
75 
76 #include "vmm_ioport.h"
77 #include "vmm_host.h"
78 #include "vmm_mem.h"
79 #include "vmm_util.h"
80 #include "vatpic.h"
81 #include "vatpit.h"
82 #include "vhpet.h"
83 #include "vioapic.h"
84 #include "vlapic.h"
85 #include "vpmtmr.h"
86 #include "vrtc.h"
87 #include "vmm_stat.h"
88 #include "vmm_lapic.h"
89 
90 #include "io/ppt.h"
91 #include "io/iommu.h"
92 
93 struct vlapic;
94 
95 #define	VMM_CTR0(vcpu, format)						\
96 	VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
97 
98 #define	VMM_CTR1(vcpu, format, p1)					\
99 	VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
100 
101 #define	VMM_CTR2(vcpu, format, p1, p2)					\
102 	VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
103 
104 #define	VMM_CTR3(vcpu, format, p1, p2, p3)				\
105 	VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
106 
107 #define	VMM_CTR4(vcpu, format, p1, p2, p3, p4)				\
108 	VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
109 
110 static void	vmmops_panic(void);
111 
112 static void
vmmops_panic(void)113 vmmops_panic(void)
114 {
115 	panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()");
116 }
117 
118 #define	DEFINE_VMMOPS_IFUNC(ret_type, opname, args)			\
119     DEFINE_IFUNC(, ret_type, vmmops_##opname, args)			\
120     {									\
121     	if (vmm_is_intel())						\
122     		return (vmm_ops_intel.opname);				\
123     	else if (vmm_is_svm())						\
124     		return (vmm_ops_amd.opname);				\
125     	else								\
126     		return ((ret_type (*)args)vmmops_panic);		\
127     }
128 
129 DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
130 DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
131 DEFINE_VMMOPS_IFUNC(void, modsuspend, (void))
132 DEFINE_VMMOPS_IFUNC(void, modresume, (void))
133 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
134 DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap,
135     struct vm_eventinfo *info))
136 DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
137 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
138     int vcpu_id))
139 DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
140 DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
141 DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
142 DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc))
143 DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc))
144 DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
145 DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
146 DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
147     vm_offset_t max))
148 DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
149 DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui))
150 DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic))
151 #ifdef BHYVE_SNAPSHOT
152 DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
153     struct vm_snapshot_meta *meta))
154 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
155 #endif
156 
157 SDT_PROVIDER_DEFINE(vmm);
158 
159 static MALLOC_DEFINE(M_VM, "vm", "vm");
160 
161 /* statistics */
162 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
163 
164 SYSCTL_DECL(_hw_vmm);
165 
166 /*
167  * Halt the guest if all vcpus are executing a HLT instruction with
168  * interrupts disabled.
169  */
170 static int halt_detection_enabled = 1;
171 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
172     &halt_detection_enabled, 0,
173     "Halt VM if all vcpus execute HLT with interrupts disabled");
174 
175 static int trace_guest_exceptions;
176 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
177     &trace_guest_exceptions, 0,
178     "Trap into hypervisor on all guest exceptions and reflect them back");
179 
180 static int trap_wbinvd;
181 SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
182     "WBINVD triggers a VM-exit");
183 
184 /* global statistics */
185 VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
186 VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
187 VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt");
188 VMM_STAT(VMEXIT_HLT, "number of times hlt was intercepted");
189 VMM_STAT(VMEXIT_CR_ACCESS, "number of times %cr access was intercepted");
190 VMM_STAT(VMEXIT_RDMSR, "number of times rdmsr was intercepted");
191 VMM_STAT(VMEXIT_WRMSR, "number of times wrmsr was intercepted");
192 VMM_STAT(VMEXIT_MTRAP, "number of monitor trap exits");
193 VMM_STAT(VMEXIT_PAUSE, "number of times pause was intercepted");
194 VMM_STAT(VMEXIT_INTR_WINDOW, "vm exits due to interrupt window opening");
195 VMM_STAT(VMEXIT_NMI_WINDOW, "vm exits due to nmi window opening");
196 VMM_STAT(VMEXIT_INOUT, "number of times in/out was intercepted");
197 VMM_STAT(VMEXIT_CPUID, "number of times cpuid was intercepted");
198 VMM_STAT(VMEXIT_NESTED_FAULT, "vm exits due to nested page fault");
199 VMM_STAT(VMEXIT_INST_EMUL, "vm exits for instruction emulation");
200 VMM_STAT(VMEXIT_UNKNOWN, "number of vm exits for unknown reason");
201 VMM_STAT(VMEXIT_ASTPENDING, "number of times astpending at exit");
202 VMM_STAT(VMEXIT_REQIDLE, "number of times idle requested at exit");
203 VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
204 VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit");
205 VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
206 
207 static void
vcpu_cleanup(struct vcpu * vcpu,bool destroy)208 vcpu_cleanup(struct vcpu *vcpu, bool destroy)
209 {
210 	vmmops_vlapic_cleanup(vcpu->vlapic);
211 	vmmops_vcpu_cleanup(vcpu->cookie);
212 	vcpu->cookie = NULL;
213 	if (destroy) {
214 		vmm_stat_free(vcpu->stats);
215 		fpu_save_area_free(vcpu->guestfpu);
216 		vcpu_lock_destroy(vcpu);
217 		free(vcpu, M_VM);
218 	}
219 }
220 
221 static struct vcpu *
vcpu_alloc(struct vm * vm,int vcpu_id)222 vcpu_alloc(struct vm *vm, int vcpu_id)
223 {
224 	struct vcpu *vcpu;
225 
226 	KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
227 	    ("vcpu_init: invalid vcpu %d", vcpu_id));
228 
229 	vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO);
230 	vcpu_lock_init(vcpu);
231 	vcpu->state = VCPU_IDLE;
232 	vcpu->hostcpu = NOCPU;
233 	vcpu->vcpuid = vcpu_id;
234 	vcpu->vm = vm;
235 	vcpu->guestfpu = fpu_save_area_alloc();
236 	vcpu->stats = vmm_stat_alloc();
237 	vcpu->tsc_offset = 0;
238 	return (vcpu);
239 }
240 
241 static void
vcpu_init(struct vcpu * vcpu)242 vcpu_init(struct vcpu *vcpu)
243 {
244 	vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
245 	vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
246 	vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
247 	vcpu->reqidle = 0;
248 	vcpu->exitintinfo = 0;
249 	vcpu->nmi_pending = 0;
250 	vcpu->extint_pending = 0;
251 	vcpu->exception_pending = 0;
252 	vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
253 	fpu_save_area_reset(vcpu->guestfpu);
254 	vmm_stat_init(vcpu->stats);
255 }
256 
257 int
vcpu_trace_exceptions(struct vcpu * vcpu)258 vcpu_trace_exceptions(struct vcpu *vcpu)
259 {
260 	return (trace_guest_exceptions);
261 }
262 
263 int
vcpu_trap_wbinvd(struct vcpu * vcpu)264 vcpu_trap_wbinvd(struct vcpu *vcpu)
265 {
266 	return (trap_wbinvd);
267 }
268 
269 struct vm_exit *
vm_exitinfo(struct vcpu * vcpu)270 vm_exitinfo(struct vcpu *vcpu)
271 {
272 	return (&vcpu->exitinfo);
273 }
274 
275 cpuset_t *
vm_exitinfo_cpuset(struct vcpu * vcpu)276 vm_exitinfo_cpuset(struct vcpu *vcpu)
277 {
278 	return (&vcpu->exitinfo_cpuset);
279 }
280 
281 int
vmm_modinit(void)282 vmm_modinit(void)
283 {
284 	if (!vmm_is_hw_supported())
285 		return (ENXIO);
286 
287 	vmm_host_state_init();
288 
289 	vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
290 	    &IDTVEC(justreturn));
291 	if (vmm_ipinum < 0)
292 		vmm_ipinum = IPI_AST;
293 
294 	vmm_suspend_p = vmmops_modsuspend;
295 	vmm_resume_p = vmmops_modresume;
296 
297 	return (vmmops_modinit(vmm_ipinum));
298 }
299 
300 int
vmm_modcleanup(void)301 vmm_modcleanup(void)
302 {
303 	vmm_suspend_p = NULL;
304 	vmm_resume_p = NULL;
305 	iommu_cleanup();
306 	if (vmm_ipinum != IPI_AST)
307 		lapic_ipi_free(vmm_ipinum);
308 	return (vmmops_modcleanup());
309 }
310 
311 static void
vm_init(struct vm * vm,bool create)312 vm_init(struct vm *vm, bool create)
313 {
314 	vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
315 	vm->iommu = NULL;
316 	vm->vioapic = vioapic_init(vm);
317 	vm->vhpet = vhpet_init(vm);
318 	vm->vatpic = vatpic_init(vm);
319 	vm->vatpit = vatpit_init(vm);
320 	vm->vpmtmr = vpmtmr_init(vm);
321 	if (create)
322 		vm->vrtc = vrtc_init(vm);
323 
324 	CPU_ZERO(&vm->active_cpus);
325 	CPU_ZERO(&vm->debug_cpus);
326 	CPU_ZERO(&vm->startup_cpus);
327 
328 	vm->suspend = 0;
329 	CPU_ZERO(&vm->suspended_cpus);
330 
331 	if (!create) {
332 		for (int i = 0; i < vm->maxcpus; i++) {
333 			if (vm->vcpu[i] != NULL)
334 				vcpu_init(vm->vcpu[i]);
335 		}
336 	}
337 }
338 
339 struct vcpu *
vm_alloc_vcpu(struct vm * vm,int vcpuid)340 vm_alloc_vcpu(struct vm *vm, int vcpuid)
341 {
342 	struct vcpu *vcpu;
343 
344 	if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm))
345 		return (NULL);
346 
347 	vcpu = (struct vcpu *)
348 	    atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]);
349 	if (__predict_true(vcpu != NULL))
350 		return (vcpu);
351 
352 	sx_xlock(&vm->vcpus_init_lock);
353 	vcpu = vm->vcpu[vcpuid];
354 	if (vcpu == NULL && !vm->dying) {
355 		vcpu = vcpu_alloc(vm, vcpuid);
356 		vcpu_init(vcpu);
357 
358 		/*
359 		 * Ensure vCPU is fully created before updating pointer
360 		 * to permit unlocked reads above.
361 		 */
362 		atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
363 		    (uintptr_t)vcpu);
364 	}
365 	sx_xunlock(&vm->vcpus_init_lock);
366 	return (vcpu);
367 }
368 
369 int
vm_create(const char * name,struct vm ** retvm)370 vm_create(const char *name, struct vm **retvm)
371 {
372 	struct vm *vm;
373 	int error;
374 
375 	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
376 	error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48);
377 	if (error != 0) {
378 		free(vm, M_VM);
379 		return (error);
380 	}
381 	strcpy(vm->name, name);
382 	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
383 	sx_init(&vm->vcpus_init_lock, "vm vcpus");
384 	vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
385 	    M_ZERO);
386 
387 	vm->sockets = 1;
388 	vm->cores = 1;		/* XXX backwards compatibility */
389 	vm->threads = 1;	/* XXX backwards compatibility */
390 	vm->maxcpus = vm_maxcpu;
391 
392 	vm_init(vm, true);
393 
394 	*retvm = vm;
395 	return (0);
396 }
397 
398 static void
vm_cleanup(struct vm * vm,bool destroy)399 vm_cleanup(struct vm *vm, bool destroy)
400 {
401 	if (destroy)
402 		vm_xlock_memsegs(vm);
403 	else
404 		vm_assert_memseg_xlocked(vm);
405 
406 	ppt_unassign_all(vm);
407 
408 	if (vm->iommu != NULL)
409 		iommu_destroy_domain(vm->iommu);
410 
411 	if (destroy)
412 		vrtc_cleanup(vm->vrtc);
413 	else
414 		vrtc_reset(vm->vrtc);
415 	vpmtmr_cleanup(vm->vpmtmr);
416 	vatpit_cleanup(vm->vatpit);
417 	vhpet_cleanup(vm->vhpet);
418 	vatpic_cleanup(vm->vatpic);
419 	vioapic_cleanup(vm->vioapic);
420 
421 	for (int i = 0; i < vm->maxcpus; i++) {
422 		if (vm->vcpu[i] != NULL)
423 			vcpu_cleanup(vm->vcpu[i], destroy);
424 	}
425 
426 	vmmops_cleanup(vm->cookie);
427 
428 	vm_mem_cleanup(vm);
429 
430 	if (destroy) {
431 		vm_mem_destroy(vm);
432 
433 		free(vm->vcpu, M_VM);
434 		sx_destroy(&vm->vcpus_init_lock);
435 		mtx_destroy(&vm->rendezvous_mtx);
436 	}
437 }
438 
439 void
vm_destroy(struct vm * vm)440 vm_destroy(struct vm *vm)
441 {
442 	vm_cleanup(vm, true);
443 	free(vm, M_VM);
444 }
445 
446 void
vm_reset(struct vm * vm)447 vm_reset(struct vm *vm)
448 {
449 	vm_cleanup(vm, false);
450 	vm_init(vm, false);
451 }
452 
453 int
vm_map_mmio(struct vm * vm,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)454 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
455 {
456 	return (vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa));
457 }
458 
459 int
vm_unmap_mmio(struct vm * vm,vm_paddr_t gpa,size_t len)460 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
461 {
462 
463 	vmm_mmio_free(vm_vmspace(vm), gpa, len);
464 	return (0);
465 }
466 
467 static int
vm_iommu_map(struct vm * vm)468 vm_iommu_map(struct vm *vm)
469 {
470 	pmap_t pmap;
471 	vm_paddr_t gpa, hpa;
472 	struct vm_mem_map *mm;
473 	int error, i;
474 
475 	sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED);
476 
477 	pmap = vmspace_pmap(vm_vmspace(vm));
478 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
479 		mm = &vm->mem.mem_maps[i];
480 		if (!vm_memseg_sysmem(vm, mm->segid))
481 			continue;
482 
483 		KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
484 		    ("iommu map found invalid memmap %#lx/%#lx/%#x",
485 		    mm->gpa, mm->len, mm->flags));
486 		if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
487 			continue;
488 		mm->flags |= VM_MEMMAP_F_IOMMU;
489 
490 		for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
491 			hpa = pmap_extract(pmap, gpa);
492 
493 			/*
494 			 * All mappings in the vmm vmspace must be
495 			 * present since they are managed by vmm in this way.
496 			 * Because we are in pass-through mode, the
497 			 * mappings must also be wired.  This implies
498 			 * that all pages must be mapped and wired,
499 			 * allowing to use pmap_extract() and avoiding the
500 			 * need to use vm_gpa_hold_global().
501 			 *
502 			 * This could change if/when we start
503 			 * supporting page faults on IOMMU maps.
504 			 */
505 			KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)),
506 			    ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
507 			    vm, (uintmax_t)gpa, (uintmax_t)hpa));
508 
509 			iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
510 		}
511 	}
512 
513 	error = iommu_invalidate_tlb(iommu_host_domain());
514 	return (error);
515 }
516 
517 static int
vm_iommu_unmap(struct vm * vm)518 vm_iommu_unmap(struct vm *vm)
519 {
520 	vm_paddr_t gpa;
521 	struct vm_mem_map *mm;
522 	int error, i;
523 
524 	sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED);
525 
526 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
527 		mm = &vm->mem.mem_maps[i];
528 		if (!vm_memseg_sysmem(vm, mm->segid))
529 			continue;
530 
531 		if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
532 			continue;
533 		mm->flags &= ~VM_MEMMAP_F_IOMMU;
534 		KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
535 		    ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
536 		    mm->gpa, mm->len, mm->flags));
537 
538 		for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
539 			KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
540 			    vmspace_pmap(vm_vmspace(vm)), gpa))),
541 			    ("vm_iommu_unmap: vm %p gpa %jx not wired",
542 			    vm, (uintmax_t)gpa));
543 			iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
544 		}
545 	}
546 
547 	/*
548 	 * Invalidate the cached translations associated with the domain
549 	 * from which pages were removed.
550 	 */
551 	error = iommu_invalidate_tlb(vm->iommu);
552 	return (error);
553 }
554 
555 int
vm_unassign_pptdev(struct vm * vm,int bus,int slot,int func)556 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
557 {
558 	int error;
559 
560 	error = ppt_unassign_device(vm, bus, slot, func);
561 	if (error)
562 		return (error);
563 
564 	if (ppt_assigned_devices(vm) == 0)
565 		error = vm_iommu_unmap(vm);
566 
567 	return (error);
568 }
569 
570 int
vm_assign_pptdev(struct vm * vm,int bus,int slot,int func)571 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
572 {
573 	int error;
574 	vm_paddr_t maxaddr;
575 	bool map = false;
576 
577 	/* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
578 	if (ppt_assigned_devices(vm) == 0) {
579 		KASSERT(vm->iommu == NULL,
580 		    ("vm_assign_pptdev: iommu must be NULL"));
581 		maxaddr = vmm_sysmem_maxaddr(vm);
582 		vm->iommu = iommu_create_domain(maxaddr);
583 		if (vm->iommu == NULL)
584 			return (ENXIO);
585 		map = true;
586 	}
587 
588 	error = ppt_assign_device(vm, bus, slot, func);
589 	if (error == 0 && map)
590 		error = vm_iommu_map(vm);
591 	return (error);
592 }
593 
594 int
vm_get_register(struct vcpu * vcpu,int reg,uint64_t * retval)595 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
596 {
597 	/* Negative values represent VM control structure fields. */
598 	if (reg >= VM_REG_LAST)
599 		return (EINVAL);
600 
601 	return (vmmops_getreg(vcpu->cookie, reg, retval));
602 }
603 
604 int
vm_set_register(struct vcpu * vcpu,int reg,uint64_t val)605 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
606 {
607 	int error;
608 
609 	/* Negative values represent VM control structure fields. */
610 	if (reg >= VM_REG_LAST)
611 		return (EINVAL);
612 
613 	error = vmmops_setreg(vcpu->cookie, reg, val);
614 	if (error || reg != VM_REG_GUEST_RIP)
615 		return (error);
616 
617 	/* Set 'nextrip' to match the value of %rip */
618 	VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
619 	vcpu->nextrip = val;
620 	return (0);
621 }
622 
623 static bool
is_descriptor_table(int reg)624 is_descriptor_table(int reg)
625 {
626 
627 	switch (reg) {
628 	case VM_REG_GUEST_IDTR:
629 	case VM_REG_GUEST_GDTR:
630 		return (true);
631 	default:
632 		return (false);
633 	}
634 }
635 
636 static bool
is_segment_register(int reg)637 is_segment_register(int reg)
638 {
639 
640 	switch (reg) {
641 	case VM_REG_GUEST_ES:
642 	case VM_REG_GUEST_CS:
643 	case VM_REG_GUEST_SS:
644 	case VM_REG_GUEST_DS:
645 	case VM_REG_GUEST_FS:
646 	case VM_REG_GUEST_GS:
647 	case VM_REG_GUEST_TR:
648 	case VM_REG_GUEST_LDTR:
649 		return (true);
650 	default:
651 		return (false);
652 	}
653 }
654 
655 int
vm_get_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc)656 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
657 {
658 
659 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
660 		return (EINVAL);
661 
662 	return (vmmops_getdesc(vcpu->cookie, reg, desc));
663 }
664 
665 int
vm_set_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc)666 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
667 {
668 
669 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
670 		return (EINVAL);
671 
672 	return (vmmops_setdesc(vcpu->cookie, reg, desc));
673 }
674 
675 static void
restore_guest_fpustate(struct vcpu * vcpu)676 restore_guest_fpustate(struct vcpu *vcpu)
677 {
678 
679 	/* flush host state to the pcb */
680 	fpuexit(curthread);
681 
682 	/* restore guest FPU state */
683 	fpu_enable();
684 	fpurestore(vcpu->guestfpu);
685 
686 	/* restore guest XCR0 if XSAVE is enabled in the host */
687 	if (rcr4() & CR4_XSAVE)
688 		load_xcr(0, vcpu->guest_xcr0);
689 
690 	/*
691 	 * The FPU is now "dirty" with the guest's state so disable
692 	 * the FPU to trap any access by the host.
693 	 */
694 	fpu_disable();
695 }
696 
697 static void
save_guest_fpustate(struct vcpu * vcpu)698 save_guest_fpustate(struct vcpu *vcpu)
699 {
700 
701 	if ((rcr0() & CR0_TS) == 0)
702 		panic("fpu emulation not enabled in host!");
703 
704 	/* save guest XCR0 and restore host XCR0 */
705 	if (rcr4() & CR4_XSAVE) {
706 		vcpu->guest_xcr0 = rxcr(0);
707 		load_xcr(0, vmm_get_host_xcr0());
708 	}
709 
710 	/* save guest FPU state */
711 	fpu_enable();
712 	fpusave(vcpu->guestfpu);
713 	fpu_disable();
714 }
715 
716 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
717 
718 static void
vcpu_require_state(struct vcpu * vcpu,enum vcpu_state newstate)719 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
720 {
721 	int error;
722 
723 	if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
724 		panic("Error %d setting state to %d\n", error, newstate);
725 }
726 
727 static void
vcpu_require_state_locked(struct vcpu * vcpu,enum vcpu_state newstate)728 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
729 {
730 	int error;
731 
732 	if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
733 		panic("Error %d setting state to %d", error, newstate);
734 }
735 
736 /*
737  * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
738  */
739 static int
vm_handle_hlt(struct vcpu * vcpu,bool intr_disabled,bool * retu)740 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu)
741 {
742 	struct vm *vm = vcpu->vm;
743 	const char *wmesg;
744 	struct thread *td;
745 	int error, t, vcpuid, vcpu_halted, vm_halted;
746 
747 	vcpuid = vcpu->vcpuid;
748 	vcpu_halted = 0;
749 	vm_halted = 0;
750 	error = 0;
751 	td = curthread;
752 
753 	KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
754 
755 	vcpu_lock(vcpu);
756 	while (1) {
757 		/*
758 		 * Do a final check for pending NMI or interrupts before
759 		 * really putting this thread to sleep. Also check for
760 		 * software events that would cause this vcpu to wakeup.
761 		 *
762 		 * These interrupts/events could have happened after the
763 		 * vcpu returned from vmmops_run() and before it acquired the
764 		 * vcpu lock above.
765 		 */
766 		if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
767 			break;
768 		if (vm_nmi_pending(vcpu))
769 			break;
770 		if (!intr_disabled) {
771 			if (vm_extint_pending(vcpu) ||
772 			    vlapic_pending_intr(vcpu->vlapic, NULL)) {
773 				break;
774 			}
775 		}
776 
777 		/* Don't go to sleep if the vcpu thread needs to yield */
778 		if (vcpu_should_yield(vcpu))
779 			break;
780 
781 		if (vcpu_debugged(vcpu))
782 			break;
783 
784 		/*
785 		 * Some Linux guests implement "halt" by having all vcpus
786 		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
787 		 * track of the vcpus that have entered this state. When all
788 		 * vcpus enter the halted state the virtual machine is halted.
789 		 */
790 		if (intr_disabled) {
791 			wmesg = "vmhalt";
792 			VMM_CTR0(vcpu, "Halted");
793 			if (!vcpu_halted && halt_detection_enabled) {
794 				vcpu_halted = 1;
795 				CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
796 			}
797 			if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
798 				vm_halted = 1;
799 				break;
800 			}
801 		} else {
802 			wmesg = "vmidle";
803 		}
804 
805 		t = ticks;
806 		vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
807 		/*
808 		 * XXX msleep_spin() cannot be interrupted by signals so
809 		 * wake up periodically to check pending signals.
810 		 */
811 		msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
812 		vcpu_require_state_locked(vcpu, VCPU_FROZEN);
813 		vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
814 		if (td_ast_pending(td, TDA_SUSPEND)) {
815 			vcpu_unlock(vcpu);
816 			error = thread_check_susp(td, false);
817 			if (error != 0) {
818 				if (vcpu_halted) {
819 					CPU_CLR_ATOMIC(vcpuid,
820 					    &vm->halted_cpus);
821 				}
822 				return (error);
823 			}
824 			vcpu_lock(vcpu);
825 		}
826 	}
827 
828 	if (vcpu_halted)
829 		CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
830 
831 	vcpu_unlock(vcpu);
832 
833 	if (vm_halted)
834 		vm_suspend(vm, VM_SUSPEND_HALT);
835 
836 	return (0);
837 }
838 
839 static int
vm_handle_paging(struct vcpu * vcpu,bool * retu)840 vm_handle_paging(struct vcpu *vcpu, bool *retu)
841 {
842 	struct vm *vm = vcpu->vm;
843 	int rv, ftype;
844 	struct vm_map *map;
845 	struct vm_exit *vme;
846 
847 	vme = &vcpu->exitinfo;
848 
849 	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
850 	    __func__, vme->inst_length));
851 
852 	ftype = vme->u.paging.fault_type;
853 	KASSERT(ftype == VM_PROT_READ ||
854 	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
855 	    ("vm_handle_paging: invalid fault_type %d", ftype));
856 
857 	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
858 		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm_vmspace(vm)),
859 		    vme->u.paging.gpa, ftype);
860 		if (rv == 0) {
861 			VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
862 			    ftype == VM_PROT_READ ? "accessed" : "dirty",
863 			    vme->u.paging.gpa);
864 			goto done;
865 		}
866 	}
867 
868 	map = &vm_vmspace(vm)->vm_map;
869 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
870 
871 	VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
872 	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
873 
874 	if (rv != KERN_SUCCESS)
875 		return (EFAULT);
876 done:
877 	return (0);
878 }
879 
880 static int
vm_handle_inst_emul(struct vcpu * vcpu,bool * retu)881 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
882 {
883 	struct vie *vie;
884 	struct vm_exit *vme;
885 	uint64_t gla, gpa, cs_base;
886 	struct vm_guest_paging *paging;
887 	mem_region_read_t mread;
888 	mem_region_write_t mwrite;
889 	enum vm_cpu_mode cpu_mode;
890 	int cs_d, error, fault;
891 
892 	vme = &vcpu->exitinfo;
893 
894 	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
895 	    __func__, vme->inst_length));
896 
897 	gla = vme->u.inst_emul.gla;
898 	gpa = vme->u.inst_emul.gpa;
899 	cs_base = vme->u.inst_emul.cs_base;
900 	cs_d = vme->u.inst_emul.cs_d;
901 	vie = &vme->u.inst_emul.vie;
902 	paging = &vme->u.inst_emul.paging;
903 	cpu_mode = paging->cpu_mode;
904 
905 	VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
906 
907 	/* Fetch, decode and emulate the faulting instruction */
908 	if (vie->num_valid == 0) {
909 		error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
910 		    VIE_INST_SIZE, vie, &fault);
911 	} else {
912 		/*
913 		 * The instruction bytes have already been copied into 'vie'
914 		 */
915 		error = fault = 0;
916 	}
917 	if (error || fault)
918 		return (error);
919 
920 	if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
921 		VMM_CTR1(vcpu, "Error decoding instruction at %#lx",
922 		    vme->rip + cs_base);
923 		*retu = true;	    /* dump instruction bytes in userspace */
924 		return (0);
925 	}
926 
927 	/*
928 	 * Update 'nextrip' based on the length of the emulated instruction.
929 	 */
930 	vme->inst_length = vie->num_processed;
931 	vcpu->nextrip += vie->num_processed;
932 	VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding",
933 	    vcpu->nextrip);
934 
935 	/* return to userland unless this is an in-kernel emulated device */
936 	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
937 		mread = lapic_mmio_read;
938 		mwrite = lapic_mmio_write;
939 	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
940 		mread = vioapic_mmio_read;
941 		mwrite = vioapic_mmio_write;
942 	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
943 		mread = vhpet_mmio_read;
944 		mwrite = vhpet_mmio_write;
945 	} else {
946 		*retu = true;
947 		return (0);
948 	}
949 
950 	error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
951 	    retu);
952 
953 	return (error);
954 }
955 
956 static int
vm_handle_suspend(struct vcpu * vcpu,bool * retu)957 vm_handle_suspend(struct vcpu *vcpu, bool *retu)
958 {
959 	struct vm *vm = vcpu->vm;
960 	int error, i;
961 	struct thread *td;
962 
963 	error = 0;
964 	td = curthread;
965 
966 	CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
967 
968 	/*
969 	 * Wait until all 'active_cpus' have suspended themselves.
970 	 *
971 	 * Since a VM may be suspended at any time including when one or
972 	 * more vcpus are doing a rendezvous we need to call the rendezvous
973 	 * handler while we are waiting to prevent a deadlock.
974 	 */
975 	vcpu_lock(vcpu);
976 	while (error == 0) {
977 		if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
978 			VMM_CTR0(vcpu, "All vcpus suspended");
979 			break;
980 		}
981 
982 		if (vm->rendezvous_func == NULL) {
983 			VMM_CTR0(vcpu, "Sleeping during suspend");
984 			vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
985 			msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
986 			vcpu_require_state_locked(vcpu, VCPU_FROZEN);
987 			if (td_ast_pending(td, TDA_SUSPEND)) {
988 				vcpu_unlock(vcpu);
989 				error = thread_check_susp(td, false);
990 				vcpu_lock(vcpu);
991 			}
992 		} else {
993 			VMM_CTR0(vcpu, "Rendezvous during suspend");
994 			vcpu_unlock(vcpu);
995 			error = vm_handle_rendezvous(vcpu);
996 			vcpu_lock(vcpu);
997 		}
998 	}
999 	vcpu_unlock(vcpu);
1000 
1001 	/*
1002 	 * Wakeup the other sleeping vcpus and return to userspace.
1003 	 */
1004 	for (i = 0; i < vm->maxcpus; i++) {
1005 		if (CPU_ISSET(i, &vm->suspended_cpus)) {
1006 			vcpu_notify_event(vm_vcpu(vm, i));
1007 		}
1008 	}
1009 
1010 	*retu = true;
1011 	return (error);
1012 }
1013 
1014 static int
vm_handle_reqidle(struct vcpu * vcpu,bool * retu)1015 vm_handle_reqidle(struct vcpu *vcpu, bool *retu)
1016 {
1017 	vcpu_lock(vcpu);
1018 	KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1019 	vcpu->reqidle = 0;
1020 	vcpu_unlock(vcpu);
1021 	*retu = true;
1022 	return (0);
1023 }
1024 
1025 static int
vm_handle_db(struct vcpu * vcpu,struct vm_exit * vme,bool * retu)1026 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1027 {
1028 	int error, fault;
1029 	uint64_t rsp;
1030 	uint64_t rflags;
1031 	struct vm_copyinfo copyinfo[2];
1032 
1033 	*retu = true;
1034 	if (!vme->u.dbg.pushf_intercept || vme->u.dbg.tf_shadow_val != 0) {
1035 		return (0);
1036 	}
1037 
1038 	vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
1039 	error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t),
1040 	    VM_PROT_RW, copyinfo, nitems(copyinfo), &fault);
1041 	if (error != 0 || fault != 0) {
1042 		*retu = false;
1043 		return (EINVAL);
1044 	}
1045 
1046 	/* Read pushed rflags value from top of stack. */
1047 	vm_copyin(copyinfo, &rflags, sizeof(uint64_t));
1048 
1049 	/* Clear TF bit. */
1050 	rflags &= ~(PSL_T);
1051 
1052 	/* Write updated value back to memory. */
1053 	vm_copyout(&rflags, copyinfo, sizeof(uint64_t));
1054 	vm_copy_teardown(copyinfo, nitems(copyinfo));
1055 
1056 	return (0);
1057 }
1058 
1059 void
vm_exit_suspended(struct vcpu * vcpu,uint64_t rip)1060 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
1061 {
1062 	struct vm *vm = vcpu->vm;
1063 	struct vm_exit *vmexit;
1064 
1065 	KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1066 	    ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1067 
1068 	vmexit = vm_exitinfo(vcpu);
1069 	vmexit->rip = rip;
1070 	vmexit->inst_length = 0;
1071 	vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1072 	vmexit->u.suspended.how = vm->suspend;
1073 }
1074 
1075 void
vm_exit_debug(struct vcpu * vcpu,uint64_t rip)1076 vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
1077 {
1078 	struct vm_exit *vmexit;
1079 
1080 	vmexit = vm_exitinfo(vcpu);
1081 	vmexit->rip = rip;
1082 	vmexit->inst_length = 0;
1083 	vmexit->exitcode = VM_EXITCODE_DEBUG;
1084 }
1085 
1086 void
vm_exit_rendezvous(struct vcpu * vcpu,uint64_t rip)1087 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
1088 {
1089 	struct vm_exit *vmexit;
1090 
1091 	vmexit = vm_exitinfo(vcpu);
1092 	vmexit->rip = rip;
1093 	vmexit->inst_length = 0;
1094 	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1095 	vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
1096 }
1097 
1098 void
vm_exit_reqidle(struct vcpu * vcpu,uint64_t rip)1099 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
1100 {
1101 	struct vm_exit *vmexit;
1102 
1103 	vmexit = vm_exitinfo(vcpu);
1104 	vmexit->rip = rip;
1105 	vmexit->inst_length = 0;
1106 	vmexit->exitcode = VM_EXITCODE_REQIDLE;
1107 	vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
1108 }
1109 
1110 void
vm_exit_astpending(struct vcpu * vcpu,uint64_t rip)1111 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
1112 {
1113 	struct vm_exit *vmexit;
1114 
1115 	vmexit = vm_exitinfo(vcpu);
1116 	vmexit->rip = rip;
1117 	vmexit->inst_length = 0;
1118 	vmexit->exitcode = VM_EXITCODE_BOGUS;
1119 	vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
1120 }
1121 
1122 int
vm_run(struct vcpu * vcpu)1123 vm_run(struct vcpu *vcpu)
1124 {
1125 	struct vm *vm = vcpu->vm;
1126 	struct vm_eventinfo evinfo;
1127 	int error, vcpuid;
1128 	struct pcb *pcb;
1129 	uint64_t tscval;
1130 	struct vm_exit *vme;
1131 	bool retu, intr_disabled;
1132 	pmap_t pmap;
1133 
1134 	vcpuid = vcpu->vcpuid;
1135 
1136 	if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1137 		return (EINVAL);
1138 
1139 	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1140 		return (EINVAL);
1141 
1142 	pmap = vmspace_pmap(vm_vmspace(vm));
1143 	vme = &vcpu->exitinfo;
1144 	evinfo.rptr = &vm->rendezvous_req_cpus;
1145 	evinfo.sptr = &vm->suspend;
1146 	evinfo.iptr = &vcpu->reqidle;
1147 restart:
1148 	critical_enter();
1149 
1150 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1151 	    ("vm_run: absurd pm_active"));
1152 
1153 	tscval = rdtsc();
1154 
1155 	pcb = PCPU_GET(curpcb);
1156 	set_pcb_flags(pcb, PCB_FULL_IRET);
1157 
1158 	restore_guest_fpustate(vcpu);
1159 
1160 	vcpu_require_state(vcpu, VCPU_RUNNING);
1161 	error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1162 	vcpu_require_state(vcpu, VCPU_FROZEN);
1163 
1164 	save_guest_fpustate(vcpu);
1165 
1166 	vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1167 
1168 	critical_exit();
1169 
1170 	if (error == 0) {
1171 		retu = false;
1172 		vcpu->nextrip = vme->rip + vme->inst_length;
1173 		switch (vme->exitcode) {
1174 		case VM_EXITCODE_REQIDLE:
1175 			error = vm_handle_reqidle(vcpu, &retu);
1176 			break;
1177 		case VM_EXITCODE_SUSPENDED:
1178 			error = vm_handle_suspend(vcpu, &retu);
1179 			break;
1180 		case VM_EXITCODE_IOAPIC_EOI:
1181 			vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector);
1182 			break;
1183 		case VM_EXITCODE_RENDEZVOUS:
1184 			error = vm_handle_rendezvous(vcpu);
1185 			break;
1186 		case VM_EXITCODE_HLT:
1187 			intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1188 			error = vm_handle_hlt(vcpu, intr_disabled, &retu);
1189 			break;
1190 		case VM_EXITCODE_PAGING:
1191 			error = vm_handle_paging(vcpu, &retu);
1192 			break;
1193 		case VM_EXITCODE_INST_EMUL:
1194 			error = vm_handle_inst_emul(vcpu, &retu);
1195 			break;
1196 		case VM_EXITCODE_INOUT:
1197 		case VM_EXITCODE_INOUT_STR:
1198 			error = vm_handle_inout(vcpu, vme, &retu);
1199 			break;
1200 		case VM_EXITCODE_DB:
1201 			error = vm_handle_db(vcpu, vme, &retu);
1202 			break;
1203 		case VM_EXITCODE_MONITOR:
1204 		case VM_EXITCODE_MWAIT:
1205 		case VM_EXITCODE_VMINSN:
1206 			vm_inject_ud(vcpu);
1207 			break;
1208 		default:
1209 			retu = true;	/* handled in userland */
1210 			break;
1211 		}
1212 	}
1213 
1214 	/*
1215 	 * VM_EXITCODE_INST_EMUL could access the apic which could transform the
1216 	 * exit code into VM_EXITCODE_IPI.
1217 	 */
1218 	if (error == 0 && vme->exitcode == VM_EXITCODE_IPI)
1219 		error = vm_handle_ipi(vcpu, vme, &retu);
1220 
1221 	if (error == 0 && retu == false)
1222 		goto restart;
1223 
1224 	vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
1225 	VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
1226 
1227 	return (error);
1228 }
1229 
1230 int
vm_restart_instruction(struct vcpu * vcpu)1231 vm_restart_instruction(struct vcpu *vcpu)
1232 {
1233 	enum vcpu_state state;
1234 	uint64_t rip;
1235 	int error __diagused;
1236 
1237 	state = vcpu_get_state(vcpu, NULL);
1238 	if (state == VCPU_RUNNING) {
1239 		/*
1240 		 * When a vcpu is "running" the next instruction is determined
1241 		 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1242 		 * Thus setting 'inst_length' to zero will cause the current
1243 		 * instruction to be restarted.
1244 		 */
1245 		vcpu->exitinfo.inst_length = 0;
1246 		VMM_CTR1(vcpu, "restarting instruction at %#lx by "
1247 		    "setting inst_length to zero", vcpu->exitinfo.rip);
1248 	} else if (state == VCPU_FROZEN) {
1249 		/*
1250 		 * When a vcpu is "frozen" it is outside the critical section
1251 		 * around vmmops_run() and 'nextrip' points to the next
1252 		 * instruction. Thus instruction restart is achieved by setting
1253 		 * 'nextrip' to the vcpu's %rip.
1254 		 */
1255 		error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
1256 		KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1257 		VMM_CTR2(vcpu, "restarting instruction by updating "
1258 		    "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1259 		vcpu->nextrip = rip;
1260 	} else {
1261 		panic("%s: invalid state %d", __func__, state);
1262 	}
1263 	return (0);
1264 }
1265 
1266 int
vm_exit_intinfo(struct vcpu * vcpu,uint64_t info)1267 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
1268 {
1269 	int type, vector;
1270 
1271 	if (info & VM_INTINFO_VALID) {
1272 		type = info & VM_INTINFO_TYPE;
1273 		vector = info & 0xff;
1274 		if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1275 			return (EINVAL);
1276 		if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1277 			return (EINVAL);
1278 		if (info & VM_INTINFO_RSVD)
1279 			return (EINVAL);
1280 	} else {
1281 		info = 0;
1282 	}
1283 	VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
1284 	vcpu->exitintinfo = info;
1285 	return (0);
1286 }
1287 
1288 enum exc_class {
1289 	EXC_BENIGN,
1290 	EXC_CONTRIBUTORY,
1291 	EXC_PAGEFAULT
1292 };
1293 
1294 #define	IDT_VE	20	/* Virtualization Exception (Intel specific) */
1295 
1296 static enum exc_class
exception_class(uint64_t info)1297 exception_class(uint64_t info)
1298 {
1299 	int type, vector;
1300 
1301 	KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1302 	type = info & VM_INTINFO_TYPE;
1303 	vector = info & 0xff;
1304 
1305 	/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1306 	switch (type) {
1307 	case VM_INTINFO_HWINTR:
1308 	case VM_INTINFO_SWINTR:
1309 	case VM_INTINFO_NMI:
1310 		return (EXC_BENIGN);
1311 	default:
1312 		/*
1313 		 * Hardware exception.
1314 		 *
1315 		 * SVM and VT-x use identical type values to represent NMI,
1316 		 * hardware interrupt and software interrupt.
1317 		 *
1318 		 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1319 		 * for exceptions except #BP and #OF. #BP and #OF use a type
1320 		 * value of '5' or '6'. Therefore we don't check for explicit
1321 		 * values of 'type' to classify 'intinfo' into a hardware
1322 		 * exception.
1323 		 */
1324 		break;
1325 	}
1326 
1327 	switch (vector) {
1328 	case IDT_PF:
1329 	case IDT_VE:
1330 		return (EXC_PAGEFAULT);
1331 	case IDT_DE:
1332 	case IDT_TS:
1333 	case IDT_NP:
1334 	case IDT_SS:
1335 	case IDT_GP:
1336 		return (EXC_CONTRIBUTORY);
1337 	default:
1338 		return (EXC_BENIGN);
1339 	}
1340 }
1341 
1342 static int
nested_fault(struct vcpu * vcpu,uint64_t info1,uint64_t info2,uint64_t * retinfo)1343 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
1344     uint64_t *retinfo)
1345 {
1346 	enum exc_class exc1, exc2;
1347 	int type1, vector1;
1348 
1349 	KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1350 	KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1351 
1352 	/*
1353 	 * If an exception occurs while attempting to call the double-fault
1354 	 * handler the processor enters shutdown mode (aka triple fault).
1355 	 */
1356 	type1 = info1 & VM_INTINFO_TYPE;
1357 	vector1 = info1 & 0xff;
1358 	if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1359 		VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
1360 		    info1, info2);
1361 		vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
1362 		*retinfo = 0;
1363 		return (0);
1364 	}
1365 
1366 	/*
1367 	 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1368 	 */
1369 	exc1 = exception_class(info1);
1370 	exc2 = exception_class(info2);
1371 	if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1372 	    (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1373 		/* Convert nested fault into a double fault. */
1374 		*retinfo = IDT_DF;
1375 		*retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1376 		*retinfo |= VM_INTINFO_DEL_ERRCODE;
1377 	} else {
1378 		/* Handle exceptions serially */
1379 		*retinfo = info2;
1380 	}
1381 	return (1);
1382 }
1383 
1384 static uint64_t
vcpu_exception_intinfo(struct vcpu * vcpu)1385 vcpu_exception_intinfo(struct vcpu *vcpu)
1386 {
1387 	uint64_t info = 0;
1388 
1389 	if (vcpu->exception_pending) {
1390 		info = vcpu->exc_vector & 0xff;
1391 		info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1392 		if (vcpu->exc_errcode_valid) {
1393 			info |= VM_INTINFO_DEL_ERRCODE;
1394 			info |= (uint64_t)vcpu->exc_errcode << 32;
1395 		}
1396 	}
1397 	return (info);
1398 }
1399 
1400 int
vm_entry_intinfo(struct vcpu * vcpu,uint64_t * retinfo)1401 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
1402 {
1403 	uint64_t info1, info2;
1404 	int valid;
1405 
1406 	info1 = vcpu->exitintinfo;
1407 	vcpu->exitintinfo = 0;
1408 
1409 	info2 = 0;
1410 	if (vcpu->exception_pending) {
1411 		info2 = vcpu_exception_intinfo(vcpu);
1412 		vcpu->exception_pending = 0;
1413 		VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
1414 		    vcpu->exc_vector, info2);
1415 	}
1416 
1417 	if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1418 		valid = nested_fault(vcpu, info1, info2, retinfo);
1419 	} else if (info1 & VM_INTINFO_VALID) {
1420 		*retinfo = info1;
1421 		valid = 1;
1422 	} else if (info2 & VM_INTINFO_VALID) {
1423 		*retinfo = info2;
1424 		valid = 1;
1425 	} else {
1426 		valid = 0;
1427 	}
1428 
1429 	if (valid) {
1430 		VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
1431 		    "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1432 	}
1433 
1434 	return (valid);
1435 }
1436 
1437 int
vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2)1438 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1439 {
1440 	*info1 = vcpu->exitintinfo;
1441 	*info2 = vcpu_exception_intinfo(vcpu);
1442 	return (0);
1443 }
1444 
1445 int
vm_inject_exception(struct vcpu * vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction)1446 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
1447     uint32_t errcode, int restart_instruction)
1448 {
1449 	uint64_t regval;
1450 	int error __diagused;
1451 
1452 	if (vector < 0 || vector >= 32)
1453 		return (EINVAL);
1454 
1455 	/*
1456 	 * A double fault exception should never be injected directly into
1457 	 * the guest. It is a derived exception that results from specific
1458 	 * combinations of nested faults.
1459 	 */
1460 	if (vector == IDT_DF)
1461 		return (EINVAL);
1462 
1463 	if (vcpu->exception_pending) {
1464 		VMM_CTR2(vcpu, "Unable to inject exception %d due to "
1465 		    "pending exception %d", vector, vcpu->exc_vector);
1466 		return (EBUSY);
1467 	}
1468 
1469 	if (errcode_valid) {
1470 		/*
1471 		 * Exceptions don't deliver an error code in real mode.
1472 		 */
1473 		error = vm_get_register(vcpu, VM_REG_GUEST_CR0, &regval);
1474 		KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
1475 		if (!(regval & CR0_PE))
1476 			errcode_valid = 0;
1477 	}
1478 
1479 	/*
1480 	 * From section 26.6.1 "Interruptibility State" in Intel SDM:
1481 	 *
1482 	 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
1483 	 * one instruction or incurs an exception.
1484 	 */
1485 	error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
1486 	KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
1487 	    __func__, error));
1488 
1489 	if (restart_instruction)
1490 		vm_restart_instruction(vcpu);
1491 
1492 	vcpu->exception_pending = 1;
1493 	vcpu->exc_vector = vector;
1494 	vcpu->exc_errcode = errcode;
1495 	vcpu->exc_errcode_valid = errcode_valid;
1496 	VMM_CTR1(vcpu, "Exception %d pending", vector);
1497 	return (0);
1498 }
1499 
1500 void
vm_inject_fault(struct vcpu * vcpu,int vector,int errcode_valid,int errcode)1501 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
1502 {
1503 	int error __diagused, restart_instruction;
1504 
1505 	restart_instruction = 1;
1506 
1507 	error = vm_inject_exception(vcpu, vector, errcode_valid,
1508 	    errcode, restart_instruction);
1509 	KASSERT(error == 0, ("vm_inject_exception error %d", error));
1510 }
1511 
1512 void
vm_inject_pf(struct vcpu * vcpu,int error_code,uint64_t cr2)1513 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
1514 {
1515 	int error __diagused;
1516 
1517 	VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
1518 	    error_code, cr2);
1519 
1520 	error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
1521 	KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
1522 
1523 	vm_inject_fault(vcpu, IDT_PF, 1, error_code);
1524 }
1525 
1526 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1527 
1528 int
vm_inject_nmi(struct vcpu * vcpu)1529 vm_inject_nmi(struct vcpu *vcpu)
1530 {
1531 
1532 	vcpu->nmi_pending = 1;
1533 	vcpu_notify_event(vcpu);
1534 	return (0);
1535 }
1536 
1537 int
vm_nmi_pending(struct vcpu * vcpu)1538 vm_nmi_pending(struct vcpu *vcpu)
1539 {
1540 	return (vcpu->nmi_pending);
1541 }
1542 
1543 void
vm_nmi_clear(struct vcpu * vcpu)1544 vm_nmi_clear(struct vcpu *vcpu)
1545 {
1546 	if (vcpu->nmi_pending == 0)
1547 		panic("vm_nmi_clear: inconsistent nmi_pending state");
1548 
1549 	vcpu->nmi_pending = 0;
1550 	vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
1551 }
1552 
1553 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
1554 
1555 int
vm_inject_extint(struct vcpu * vcpu)1556 vm_inject_extint(struct vcpu *vcpu)
1557 {
1558 
1559 	vcpu->extint_pending = 1;
1560 	vcpu_notify_event(vcpu);
1561 	return (0);
1562 }
1563 
1564 int
vm_extint_pending(struct vcpu * vcpu)1565 vm_extint_pending(struct vcpu *vcpu)
1566 {
1567 	return (vcpu->extint_pending);
1568 }
1569 
1570 void
vm_extint_clear(struct vcpu * vcpu)1571 vm_extint_clear(struct vcpu *vcpu)
1572 {
1573 	if (vcpu->extint_pending == 0)
1574 		panic("vm_extint_clear: inconsistent extint_pending state");
1575 
1576 	vcpu->extint_pending = 0;
1577 	vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
1578 }
1579 
1580 int
vm_get_capability(struct vcpu * vcpu,int type,int * retval)1581 vm_get_capability(struct vcpu *vcpu, int type, int *retval)
1582 {
1583 	if (type < 0 || type >= VM_CAP_MAX)
1584 		return (EINVAL);
1585 
1586 	return (vmmops_getcap(vcpu->cookie, type, retval));
1587 }
1588 
1589 int
vm_set_capability(struct vcpu * vcpu,int type,int val)1590 vm_set_capability(struct vcpu *vcpu, int type, int val)
1591 {
1592 	if (type < 0 || type >= VM_CAP_MAX)
1593 		return (EINVAL);
1594 
1595 	return (vmmops_setcap(vcpu->cookie, type, val));
1596 }
1597 
1598 struct vlapic *
vm_lapic(struct vcpu * vcpu)1599 vm_lapic(struct vcpu *vcpu)
1600 {
1601 	return (vcpu->vlapic);
1602 }
1603 
1604 struct vioapic *
vm_ioapic(struct vm * vm)1605 vm_ioapic(struct vm *vm)
1606 {
1607 
1608 	return (vm->vioapic);
1609 }
1610 
1611 struct vhpet *
vm_hpet(struct vm * vm)1612 vm_hpet(struct vm *vm)
1613 {
1614 
1615 	return (vm->vhpet);
1616 }
1617 
1618 bool
vmm_is_pptdev(int bus,int slot,int func)1619 vmm_is_pptdev(int bus, int slot, int func)
1620 {
1621 	int b, f, i, n, s;
1622 	char *val, *cp, *cp2;
1623 	bool found;
1624 
1625 	/*
1626 	 * XXX
1627 	 * The length of an environment variable is limited to 128 bytes which
1628 	 * puts an upper limit on the number of passthru devices that may be
1629 	 * specified using a single environment variable.
1630 	 *
1631 	 * Work around this by scanning multiple environment variable
1632 	 * names instead of a single one - yuck!
1633 	 */
1634 	const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
1635 
1636 	/* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
1637 	found = false;
1638 	for (i = 0; names[i] != NULL && !found; i++) {
1639 		cp = val = kern_getenv(names[i]);
1640 		while (cp != NULL && *cp != '\0') {
1641 			if ((cp2 = strchr(cp, ' ')) != NULL)
1642 				*cp2 = '\0';
1643 
1644 			n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
1645 			if (n == 3 && bus == b && slot == s && func == f) {
1646 				found = true;
1647 				break;
1648 			}
1649 
1650 			if (cp2 != NULL)
1651 				*cp2++ = ' ';
1652 
1653 			cp = cp2;
1654 		}
1655 		freeenv(val);
1656 	}
1657 	return (found);
1658 }
1659 
1660 void *
vm_iommu_domain(struct vm * vm)1661 vm_iommu_domain(struct vm *vm)
1662 {
1663 
1664 	return (vm->iommu);
1665 }
1666 
1667 /*
1668  * Returns the subset of vCPUs in tostart that are awaiting startup.
1669  * These vCPUs are also marked as no longer awaiting startup.
1670  */
1671 cpuset_t
vm_start_cpus(struct vm * vm,const cpuset_t * tostart)1672 vm_start_cpus(struct vm *vm, const cpuset_t *tostart)
1673 {
1674 	cpuset_t set;
1675 
1676 	mtx_lock(&vm->rendezvous_mtx);
1677 	CPU_AND(&set, &vm->startup_cpus, tostart);
1678 	CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set);
1679 	mtx_unlock(&vm->rendezvous_mtx);
1680 	return (set);
1681 }
1682 
1683 void
vm_await_start(struct vm * vm,const cpuset_t * waiting)1684 vm_await_start(struct vm *vm, const cpuset_t *waiting)
1685 {
1686 	mtx_lock(&vm->rendezvous_mtx);
1687 	CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting);
1688 	mtx_unlock(&vm->rendezvous_mtx);
1689 }
1690 
1691 int
vm_get_x2apic_state(struct vcpu * vcpu,enum x2apic_state * state)1692 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
1693 {
1694 	*state = vcpu->x2apic_state;
1695 
1696 	return (0);
1697 }
1698 
1699 int
vm_set_x2apic_state(struct vcpu * vcpu,enum x2apic_state state)1700 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
1701 {
1702 	if (state >= X2APIC_STATE_LAST)
1703 		return (EINVAL);
1704 
1705 	vcpu->x2apic_state = state;
1706 
1707 	vlapic_set_x2apic_state(vcpu, state);
1708 
1709 	return (0);
1710 }
1711 
1712 void
vcpu_notify_lapic(struct vcpu * vcpu)1713 vcpu_notify_lapic(struct vcpu *vcpu)
1714 {
1715 	vcpu_lock(vcpu);
1716 	if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu)
1717 		vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum);
1718 	else
1719 		vcpu_notify_event_locked(vcpu);
1720 	vcpu_unlock(vcpu);
1721 }
1722 
1723 int
vm_apicid2vcpuid(struct vm * vm,int apicid)1724 vm_apicid2vcpuid(struct vm *vm, int apicid)
1725 {
1726 	/*
1727 	 * XXX apic id is assumed to be numerically identical to vcpu id
1728 	 */
1729 	return (apicid);
1730 }
1731 
1732 int
vm_smp_rendezvous(struct vcpu * vcpu,cpuset_t dest,vm_rendezvous_func_t func,void * arg)1733 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
1734     vm_rendezvous_func_t func, void *arg)
1735 {
1736 	struct vm *vm = vcpu->vm;
1737 	int error, i;
1738 
1739 	/*
1740 	 * Enforce that this function is called without any locks
1741 	 */
1742 	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
1743 
1744 restart:
1745 	mtx_lock(&vm->rendezvous_mtx);
1746 	if (vm->rendezvous_func != NULL) {
1747 		/*
1748 		 * If a rendezvous is already in progress then we need to
1749 		 * call the rendezvous handler in case this 'vcpu' is one
1750 		 * of the targets of the rendezvous.
1751 		 */
1752 		VMM_CTR0(vcpu, "Rendezvous already in progress");
1753 		mtx_unlock(&vm->rendezvous_mtx);
1754 		error = vm_handle_rendezvous(vcpu);
1755 		if (error != 0)
1756 			return (error);
1757 		goto restart;
1758 	}
1759 	KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
1760 	    "rendezvous is still in progress"));
1761 
1762 	VMM_CTR0(vcpu, "Initiating rendezvous");
1763 	vm->rendezvous_req_cpus = dest;
1764 	CPU_ZERO(&vm->rendezvous_done_cpus);
1765 	vm->rendezvous_arg = arg;
1766 	vm->rendezvous_func = func;
1767 	mtx_unlock(&vm->rendezvous_mtx);
1768 
1769 	/*
1770 	 * Wake up any sleeping vcpus and trigger a VM-exit in any running
1771 	 * vcpus so they handle the rendezvous as soon as possible.
1772 	 */
1773 	for (i = 0; i < vm->maxcpus; i++) {
1774 		if (CPU_ISSET(i, &dest))
1775 			vcpu_notify_event(vm_vcpu(vm, i));
1776 	}
1777 
1778 	return (vm_handle_rendezvous(vcpu));
1779 }
1780 
1781 struct vatpic *
vm_atpic(struct vm * vm)1782 vm_atpic(struct vm *vm)
1783 {
1784 	return (vm->vatpic);
1785 }
1786 
1787 struct vatpit *
vm_atpit(struct vm * vm)1788 vm_atpit(struct vm *vm)
1789 {
1790 	return (vm->vatpit);
1791 }
1792 
1793 struct vpmtmr *
vm_pmtmr(struct vm * vm)1794 vm_pmtmr(struct vm *vm)
1795 {
1796 
1797 	return (vm->vpmtmr);
1798 }
1799 
1800 struct vrtc *
vm_rtc(struct vm * vm)1801 vm_rtc(struct vm *vm)
1802 {
1803 
1804 	return (vm->vrtc);
1805 }
1806 
1807 enum vm_reg_name
vm_segment_name(int seg)1808 vm_segment_name(int seg)
1809 {
1810 	static enum vm_reg_name seg_names[] = {
1811 		VM_REG_GUEST_ES,
1812 		VM_REG_GUEST_CS,
1813 		VM_REG_GUEST_SS,
1814 		VM_REG_GUEST_DS,
1815 		VM_REG_GUEST_FS,
1816 		VM_REG_GUEST_GS
1817 	};
1818 
1819 	KASSERT(seg >= 0 && seg < nitems(seg_names),
1820 	    ("%s: invalid segment encoding %d", __func__, seg));
1821 	return (seg_names[seg]);
1822 }
1823 
1824 void
vm_copy_teardown(struct vm_copyinfo * copyinfo,int num_copyinfo)1825 vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo)
1826 {
1827 	int idx;
1828 
1829 	for (idx = 0; idx < num_copyinfo; idx++) {
1830 		if (copyinfo[idx].cookie != NULL)
1831 			vm_gpa_release(copyinfo[idx].cookie);
1832 	}
1833 	bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
1834 }
1835 
1836 int
vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct vm_copyinfo * copyinfo,int num_copyinfo,int * fault)1837 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1838     uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
1839     int num_copyinfo, int *fault)
1840 {
1841 	int error, idx, nused;
1842 	size_t n, off, remaining;
1843 	void *hva, *cookie;
1844 	uint64_t gpa;
1845 
1846 	bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
1847 
1848 	nused = 0;
1849 	remaining = len;
1850 	while (remaining > 0) {
1851 		if (nused >= num_copyinfo)
1852 			return (EFAULT);
1853 		error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1854 		if (error || *fault)
1855 			return (error);
1856 		off = gpa & PAGE_MASK;
1857 		n = min(remaining, PAGE_SIZE - off);
1858 		copyinfo[nused].gpa = gpa;
1859 		copyinfo[nused].len = n;
1860 		remaining -= n;
1861 		gla += n;
1862 		nused++;
1863 	}
1864 
1865 	for (idx = 0; idx < nused; idx++) {
1866 		hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
1867 		    copyinfo[idx].len, prot, &cookie);
1868 		if (hva == NULL)
1869 			break;
1870 		copyinfo[idx].hva = hva;
1871 		copyinfo[idx].cookie = cookie;
1872 	}
1873 
1874 	if (idx != nused) {
1875 		vm_copy_teardown(copyinfo, num_copyinfo);
1876 		return (EFAULT);
1877 	} else {
1878 		*fault = 0;
1879 		return (0);
1880 	}
1881 }
1882 
1883 void
vm_copyin(struct vm_copyinfo * copyinfo,void * kaddr,size_t len)1884 vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len)
1885 {
1886 	char *dst;
1887 	int idx;
1888 
1889 	dst = kaddr;
1890 	idx = 0;
1891 	while (len > 0) {
1892 		bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
1893 		len -= copyinfo[idx].len;
1894 		dst += copyinfo[idx].len;
1895 		idx++;
1896 	}
1897 }
1898 
1899 void
vm_copyout(const void * kaddr,struct vm_copyinfo * copyinfo,size_t len)1900 vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len)
1901 {
1902 	const char *src;
1903 	int idx;
1904 
1905 	src = kaddr;
1906 	idx = 0;
1907 	while (len > 0) {
1908 		bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
1909 		len -= copyinfo[idx].len;
1910 		src += copyinfo[idx].len;
1911 		idx++;
1912 	}
1913 }
1914 
1915 /*
1916  * Return the amount of in-use and wired memory for the VM. Since
1917  * these are global stats, only return the values with for vCPU 0
1918  */
1919 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
1920 VMM_STAT_DECLARE(VMM_MEM_WIRED);
1921 
1922 static void
vm_get_rescnt(struct vcpu * vcpu,struct vmm_stat_type * stat)1923 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
1924 {
1925 
1926 	if (vcpu->vcpuid == 0) {
1927 		vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
1928 		    vmspace_resident_count(vm_vmspace(vcpu->vm)));
1929 	}
1930 }
1931 
1932 static void
vm_get_wiredcnt(struct vcpu * vcpu,struct vmm_stat_type * stat)1933 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
1934 {
1935 
1936 	if (vcpu->vcpuid == 0) {
1937 		vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
1938 		    pmap_wired_count(vmspace_pmap(vm_vmspace(vcpu->vm))));
1939 	}
1940 }
1941 
1942 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
1943 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
1944 
1945 #ifdef BHYVE_SNAPSHOT
1946 static int
vm_snapshot_vcpus(struct vm * vm,struct vm_snapshot_meta * meta)1947 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta)
1948 {
1949 	uint64_t tsc, now;
1950 	int ret;
1951 	struct vcpu *vcpu;
1952 	uint16_t i, maxcpus;
1953 
1954 	now = rdtsc();
1955 	maxcpus = vm_get_maxcpus(vm);
1956 	for (i = 0; i < maxcpus; i++) {
1957 		vcpu = vm->vcpu[i];
1958 		if (vcpu == NULL)
1959 			continue;
1960 
1961 		SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
1962 		SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
1963 		SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
1964 		SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
1965 		SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
1966 		SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
1967 		SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
1968 		SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
1969 
1970 		/*
1971 		 * Save the absolute TSC value by adding now to tsc_offset.
1972 		 *
1973 		 * It will be turned turned back into an actual offset when the
1974 		 * TSC restore function is called
1975 		 */
1976 		tsc = now + vcpu->tsc_offset;
1977 		SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done);
1978 		if (meta->op == VM_SNAPSHOT_RESTORE)
1979 			vcpu->tsc_offset = tsc;
1980 	}
1981 
1982 done:
1983 	return (ret);
1984 }
1985 
1986 static int
vm_snapshot_vm(struct vm * vm,struct vm_snapshot_meta * meta)1987 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta)
1988 {
1989 	int ret;
1990 
1991 	ret = vm_snapshot_vcpus(vm, meta);
1992 	if (ret != 0)
1993 		goto done;
1994 
1995 	SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done);
1996 done:
1997 	return (ret);
1998 }
1999 
2000 static int
vm_snapshot_vcpu(struct vm * vm,struct vm_snapshot_meta * meta)2001 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
2002 {
2003 	int error;
2004 	struct vcpu *vcpu;
2005 	uint16_t i, maxcpus;
2006 
2007 	error = 0;
2008 
2009 	maxcpus = vm_get_maxcpus(vm);
2010 	for (i = 0; i < maxcpus; i++) {
2011 		vcpu = vm->vcpu[i];
2012 		if (vcpu == NULL)
2013 			continue;
2014 
2015 		error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
2016 		if (error != 0) {
2017 			printf("%s: failed to snapshot vmcs/vmcb data for "
2018 			       "vCPU: %d; error: %d\n", __func__, i, error);
2019 			goto done;
2020 		}
2021 	}
2022 
2023 done:
2024 	return (error);
2025 }
2026 
2027 /*
2028  * Save kernel-side structures to user-space for snapshotting.
2029  */
2030 int
vm_snapshot_req(struct vm * vm,struct vm_snapshot_meta * meta)2031 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
2032 {
2033 	int ret = 0;
2034 
2035 	switch (meta->dev_req) {
2036 	case STRUCT_VMCX:
2037 		ret = vm_snapshot_vcpu(vm, meta);
2038 		break;
2039 	case STRUCT_VM:
2040 		ret = vm_snapshot_vm(vm, meta);
2041 		break;
2042 	case STRUCT_VIOAPIC:
2043 		ret = vioapic_snapshot(vm_ioapic(vm), meta);
2044 		break;
2045 	case STRUCT_VLAPIC:
2046 		ret = vlapic_snapshot(vm, meta);
2047 		break;
2048 	case STRUCT_VHPET:
2049 		ret = vhpet_snapshot(vm_hpet(vm), meta);
2050 		break;
2051 	case STRUCT_VATPIC:
2052 		ret = vatpic_snapshot(vm_atpic(vm), meta);
2053 		break;
2054 	case STRUCT_VATPIT:
2055 		ret = vatpit_snapshot(vm_atpit(vm), meta);
2056 		break;
2057 	case STRUCT_VPMTMR:
2058 		ret = vpmtmr_snapshot(vm_pmtmr(vm), meta);
2059 		break;
2060 	case STRUCT_VRTC:
2061 		ret = vrtc_snapshot(vm_rtc(vm), meta);
2062 		break;
2063 	default:
2064 		printf("%s: failed to find the requested type %#x\n",
2065 		       __func__, meta->dev_req);
2066 		ret = (EINVAL);
2067 	}
2068 	return (ret);
2069 }
2070 
2071 void
vm_set_tsc_offset(struct vcpu * vcpu,uint64_t offset)2072 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
2073 {
2074 	vcpu->tsc_offset = offset;
2075 }
2076 
2077 int
vm_restore_time(struct vm * vm)2078 vm_restore_time(struct vm *vm)
2079 {
2080 	int error;
2081 	uint64_t now;
2082 	struct vcpu *vcpu;
2083 	uint16_t i, maxcpus;
2084 
2085 	now = rdtsc();
2086 
2087 	error = vhpet_restore_time(vm_hpet(vm));
2088 	if (error)
2089 		return (error);
2090 
2091 	maxcpus = vm_get_maxcpus(vm);
2092 	for (i = 0; i < maxcpus; i++) {
2093 		vcpu = vm->vcpu[i];
2094 		if (vcpu == NULL)
2095 			continue;
2096 
2097 		error = vmmops_restore_tsc(vcpu->cookie,
2098 		    vcpu->tsc_offset - now);
2099 		if (error)
2100 			return (error);
2101 	}
2102 
2103 	return (0);
2104 }
2105 #endif
2106