1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <poll.h>
19
20 #include <linux/kvm.h>
21
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "gdbstub/enums.h"
31 #include "system/kvm_int.h"
32 #include "system/runstate.h"
33 #include "system/cpus.h"
34 #include "system/accel-blocker.h"
35 #include "qemu/bswap.h"
36 #include "exec/tswap.h"
37 #include "system/memory.h"
38 #include "system/ram_addr.h"
39 #include "qemu/event_notifier.h"
40 #include "qemu/main-loop.h"
41 #include "trace.h"
42 #include "hw/irq.h"
43 #include "qapi/visitor.h"
44 #include "qapi/qapi-types-common.h"
45 #include "qapi/qapi-visit-common.h"
46 #include "system/reset.h"
47 #include "qemu/guest-random.h"
48 #include "system/hw_accel.h"
49 #include "kvm-cpus.h"
50 #include "system/dirtylimit.h"
51 #include "qemu/range.h"
52
53 #include "hw/boards.h"
54 #include "system/stats.h"
55
56 /* This check must be after config-host.h is included */
57 #ifdef CONFIG_EVENTFD
58 #include <sys/eventfd.h>
59 #endif
60
61 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
62 # define KVM_HAVE_MCE_INJECTION 1
63 #endif
64
65
66 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
67 * need to use the real host PAGE_SIZE, as that's what KVM will use.
68 */
69 #ifdef PAGE_SIZE
70 #undef PAGE_SIZE
71 #endif
72 #define PAGE_SIZE qemu_real_host_page_size()
73
74 #ifndef KVM_GUESTDBG_BLOCKIRQ
75 #define KVM_GUESTDBG_BLOCKIRQ 0
76 #endif
77
78 /* Default num of memslots to be allocated when VM starts */
79 #define KVM_MEMSLOTS_NR_ALLOC_DEFAULT 16
80 /* Default max allowed memslots if kernel reported nothing */
81 #define KVM_MEMSLOTS_NR_MAX_DEFAULT 32
82
83 struct KVMParkedVcpu {
84 unsigned long vcpu_id;
85 int kvm_fd;
86 QLIST_ENTRY(KVMParkedVcpu) node;
87 };
88
89 KVMState *kvm_state;
90 bool kvm_kernel_irqchip;
91 bool kvm_split_irqchip;
92 bool kvm_async_interrupts_allowed;
93 bool kvm_halt_in_kernel_allowed;
94 bool kvm_resamplefds_allowed;
95 bool kvm_msi_via_irqfd_allowed;
96 bool kvm_gsi_routing_allowed;
97 bool kvm_gsi_direct_mapping;
98 bool kvm_allowed;
99 bool kvm_readonly_mem_allowed;
100 bool kvm_vm_attributes_allowed;
101 bool kvm_msi_use_devid;
102 bool kvm_pre_fault_memory_supported;
103 static bool kvm_has_guest_debug;
104 static int kvm_sstep_flags;
105 static bool kvm_immediate_exit;
106 static uint64_t kvm_supported_memory_attributes;
107 static bool kvm_guest_memfd_supported;
108 static hwaddr kvm_max_slot_size = ~0;
109
110 static const KVMCapabilityInfo kvm_required_capabilites[] = {
111 KVM_CAP_INFO(USER_MEMORY),
112 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
113 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
114 KVM_CAP_INFO(INTERNAL_ERROR_DATA),
115 KVM_CAP_INFO(IOEVENTFD),
116 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
117 KVM_CAP_LAST_INFO
118 };
119
120 static NotifierList kvm_irqchip_change_notifiers =
121 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
122
123 struct KVMResampleFd {
124 int gsi;
125 EventNotifier *resample_event;
126 QLIST_ENTRY(KVMResampleFd) node;
127 };
128 typedef struct KVMResampleFd KVMResampleFd;
129
130 /*
131 * Only used with split irqchip where we need to do the resample fd
132 * kick for the kernel from userspace.
133 */
134 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
135 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
136
137 static QemuMutex kml_slots_lock;
138
139 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
140 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
141
142 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
143
kvm_resample_fd_remove(int gsi)144 static inline void kvm_resample_fd_remove(int gsi)
145 {
146 KVMResampleFd *rfd;
147
148 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
149 if (rfd->gsi == gsi) {
150 QLIST_REMOVE(rfd, node);
151 g_free(rfd);
152 break;
153 }
154 }
155 }
156
kvm_resample_fd_insert(int gsi,EventNotifier * event)157 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
158 {
159 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
160
161 rfd->gsi = gsi;
162 rfd->resample_event = event;
163
164 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
165 }
166
kvm_resample_fd_notify(int gsi)167 void kvm_resample_fd_notify(int gsi)
168 {
169 KVMResampleFd *rfd;
170
171 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
172 if (rfd->gsi == gsi) {
173 event_notifier_set(rfd->resample_event);
174 trace_kvm_resample_fd_notify(gsi);
175 return;
176 }
177 }
178 }
179
180 /**
181 * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
182 *
183 * @kml: The KVMMemoryListener* to grow the slots[] array
184 * @nr_slots_new: The new size of slots[] array
185 *
186 * Returns: True if the array grows larger, false otherwise.
187 */
kvm_slots_grow(KVMMemoryListener * kml,unsigned int nr_slots_new)188 static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
189 {
190 unsigned int i, cur = kml->nr_slots_allocated;
191 KVMSlot *slots;
192
193 if (nr_slots_new > kvm_state->nr_slots_max) {
194 nr_slots_new = kvm_state->nr_slots_max;
195 }
196
197 if (cur >= nr_slots_new) {
198 /* Big enough, no need to grow, or we reached max */
199 return false;
200 }
201
202 if (cur == 0) {
203 slots = g_new0(KVMSlot, nr_slots_new);
204 } else {
205 assert(kml->slots);
206 slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
207 /*
208 * g_renew() doesn't initialize extended buffers, however kvm
209 * memslots require fields to be zero-initialized. E.g. pointers,
210 * memory_size field, etc.
211 */
212 memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
213 }
214
215 for (i = cur; i < nr_slots_new; i++) {
216 slots[i].slot = i;
217 }
218
219 kml->slots = slots;
220 kml->nr_slots_allocated = nr_slots_new;
221 trace_kvm_slots_grow(cur, nr_slots_new);
222
223 return true;
224 }
225
kvm_slots_double(KVMMemoryListener * kml)226 static bool kvm_slots_double(KVMMemoryListener *kml)
227 {
228 return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
229 }
230
kvm_get_max_memslots(void)231 unsigned int kvm_get_max_memslots(void)
232 {
233 KVMState *s = KVM_STATE(current_accel());
234
235 return s->nr_slots_max;
236 }
237
kvm_get_free_memslots(void)238 unsigned int kvm_get_free_memslots(void)
239 {
240 unsigned int used_slots = 0;
241 KVMState *s = kvm_state;
242 int i;
243
244 kvm_slots_lock();
245 for (i = 0; i < s->nr_as; i++) {
246 if (!s->as[i].ml) {
247 continue;
248 }
249 used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
250 }
251 kvm_slots_unlock();
252
253 return s->nr_slots_max - used_slots;
254 }
255
256 /* Called with KVMMemoryListener.slots_lock held */
kvm_get_free_slot(KVMMemoryListener * kml)257 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
258 {
259 unsigned int n;
260 int i;
261
262 for (i = 0; i < kml->nr_slots_allocated; i++) {
263 if (kml->slots[i].memory_size == 0) {
264 return &kml->slots[i];
265 }
266 }
267
268 /*
269 * If no free slots, try to grow first by doubling. Cache the old size
270 * here to avoid another round of search: if the grow succeeded, it
271 * means slots[] now must have the existing "n" slots occupied,
272 * followed by one or more free slots starting from slots[n].
273 */
274 n = kml->nr_slots_allocated;
275 if (kvm_slots_double(kml)) {
276 return &kml->slots[n];
277 }
278
279 return NULL;
280 }
281
282 /* Called with KVMMemoryListener.slots_lock held */
kvm_alloc_slot(KVMMemoryListener * kml)283 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
284 {
285 KVMSlot *slot = kvm_get_free_slot(kml);
286
287 if (slot) {
288 return slot;
289 }
290
291 fprintf(stderr, "%s: no free slot available\n", __func__);
292 abort();
293 }
294
kvm_lookup_matching_slot(KVMMemoryListener * kml,hwaddr start_addr,hwaddr size)295 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
296 hwaddr start_addr,
297 hwaddr size)
298 {
299 int i;
300
301 for (i = 0; i < kml->nr_slots_allocated; i++) {
302 KVMSlot *mem = &kml->slots[i];
303
304 if (start_addr == mem->start_addr && size == mem->memory_size) {
305 return mem;
306 }
307 }
308
309 return NULL;
310 }
311
312 /*
313 * Calculate and align the start address and the size of the section.
314 * Return the size. If the size is 0, the aligned section is empty.
315 */
kvm_align_section(MemoryRegionSection * section,hwaddr * start)316 static hwaddr kvm_align_section(MemoryRegionSection *section,
317 hwaddr *start)
318 {
319 hwaddr size = int128_get64(section->size);
320 hwaddr delta, aligned;
321
322 /* kvm works in page size chunks, but the function may be called
323 with sub-page size and unaligned start address. Pad the start
324 address to next and truncate size to previous page boundary. */
325 aligned = ROUND_UP(section->offset_within_address_space,
326 qemu_real_host_page_size());
327 delta = aligned - section->offset_within_address_space;
328 *start = aligned;
329 if (delta > size) {
330 return 0;
331 }
332
333 return (size - delta) & qemu_real_host_page_mask();
334 }
335
kvm_physical_memory_addr_from_host(KVMState * s,void * ram,hwaddr * phys_addr)336 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
337 hwaddr *phys_addr)
338 {
339 KVMMemoryListener *kml = &s->memory_listener;
340 int i, ret = 0;
341
342 kvm_slots_lock();
343 for (i = 0; i < kml->nr_slots_allocated; i++) {
344 KVMSlot *mem = &kml->slots[i];
345
346 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
347 *phys_addr = mem->start_addr + (ram - mem->ram);
348 ret = 1;
349 break;
350 }
351 }
352 kvm_slots_unlock();
353
354 return ret;
355 }
356
kvm_set_user_memory_region(KVMMemoryListener * kml,KVMSlot * slot,bool new)357 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
358 {
359 KVMState *s = kvm_state;
360 struct kvm_userspace_memory_region2 mem;
361 int ret;
362
363 mem.slot = slot->slot | (kml->as_id << 16);
364 mem.guest_phys_addr = slot->start_addr;
365 mem.userspace_addr = (unsigned long)slot->ram;
366 mem.flags = slot->flags;
367 mem.guest_memfd = slot->guest_memfd;
368 mem.guest_memfd_offset = slot->guest_memfd_offset;
369
370 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
371 /* Set the slot size to 0 before setting the slot to the desired
372 * value. This is needed based on KVM commit 75d61fbc. */
373 mem.memory_size = 0;
374
375 if (kvm_guest_memfd_supported) {
376 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
377 } else {
378 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
379 }
380 if (ret < 0) {
381 goto err;
382 }
383 }
384 mem.memory_size = slot->memory_size;
385 if (kvm_guest_memfd_supported) {
386 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
387 } else {
388 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
389 }
390 slot->old_flags = mem.flags;
391 err:
392 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
393 mem.guest_phys_addr, mem.memory_size,
394 mem.userspace_addr, mem.guest_memfd,
395 mem.guest_memfd_offset, ret);
396 if (ret < 0) {
397 if (kvm_guest_memfd_supported) {
398 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
399 " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
400 " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
401 " guest_memfd_offset=0x%" PRIx64 ": %s",
402 __func__, mem.slot, slot->start_addr,
403 (uint64_t)mem.memory_size, mem.flags,
404 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
405 strerror(errno));
406 } else {
407 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
408 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
409 __func__, mem.slot, slot->start_addr,
410 (uint64_t)mem.memory_size, strerror(errno));
411 }
412 }
413 return ret;
414 }
415
kvm_park_vcpu(CPUState * cpu)416 void kvm_park_vcpu(CPUState *cpu)
417 {
418 struct KVMParkedVcpu *vcpu;
419
420 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
421
422 vcpu = g_malloc0(sizeof(*vcpu));
423 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
424 vcpu->kvm_fd = cpu->kvm_fd;
425 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
426 }
427
kvm_unpark_vcpu(KVMState * s,unsigned long vcpu_id)428 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
429 {
430 struct KVMParkedVcpu *cpu;
431 int kvm_fd = -ENOENT;
432
433 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
434 if (cpu->vcpu_id == vcpu_id) {
435 QLIST_REMOVE(cpu, node);
436 kvm_fd = cpu->kvm_fd;
437 g_free(cpu);
438 break;
439 }
440 }
441
442 trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
443
444 return kvm_fd;
445 }
446
kvm_reset_parked_vcpus(KVMState * s)447 static void kvm_reset_parked_vcpus(KVMState *s)
448 {
449 struct KVMParkedVcpu *cpu;
450
451 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
452 kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
453 }
454 }
455
456 /**
457 * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
458 * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
459 *
460 * @returns: 0 when success, errno (<0) when failed.
461 */
kvm_create_vcpu(CPUState * cpu)462 static int kvm_create_vcpu(CPUState *cpu)
463 {
464 unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
465 KVMState *s = kvm_state;
466 int kvm_fd;
467
468 /* check if the KVM vCPU already exist but is parked */
469 kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
470 if (kvm_fd < 0) {
471 /* vCPU not parked: create a new KVM vCPU */
472 kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
473 if (kvm_fd < 0) {
474 error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
475 return kvm_fd;
476 }
477 }
478
479 cpu->kvm_fd = kvm_fd;
480 cpu->kvm_state = s;
481 if (!s->guest_state_protected) {
482 cpu->vcpu_dirty = true;
483 }
484 cpu->dirty_pages = 0;
485 cpu->throttle_us_per_full = 0;
486
487 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
488
489 return 0;
490 }
491
kvm_create_and_park_vcpu(CPUState * cpu)492 int kvm_create_and_park_vcpu(CPUState *cpu)
493 {
494 int ret = 0;
495
496 ret = kvm_create_vcpu(cpu);
497 if (!ret) {
498 kvm_park_vcpu(cpu);
499 }
500
501 return ret;
502 }
503
do_kvm_destroy_vcpu(CPUState * cpu)504 static int do_kvm_destroy_vcpu(CPUState *cpu)
505 {
506 KVMState *s = kvm_state;
507 int mmap_size;
508 int ret = 0;
509
510 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
511
512 ret = kvm_arch_destroy_vcpu(cpu);
513 if (ret < 0) {
514 goto err;
515 }
516
517 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
518 if (mmap_size < 0) {
519 ret = mmap_size;
520 trace_kvm_failed_get_vcpu_mmap_size();
521 goto err;
522 }
523
524 /* If I am the CPU that created coalesced_mmio_ring, then discard it */
525 if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) {
526 s->coalesced_mmio_ring = NULL;
527 }
528
529 ret = munmap(cpu->kvm_run, mmap_size);
530 if (ret < 0) {
531 goto err;
532 }
533 cpu->kvm_run = NULL;
534
535 if (cpu->kvm_dirty_gfns) {
536 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
537 if (ret < 0) {
538 goto err;
539 }
540 cpu->kvm_dirty_gfns = NULL;
541 }
542
543 kvm_park_vcpu(cpu);
544 err:
545 return ret;
546 }
547
kvm_destroy_vcpu(CPUState * cpu)548 void kvm_destroy_vcpu(CPUState *cpu)
549 {
550 if (do_kvm_destroy_vcpu(cpu) < 0) {
551 error_report("kvm_destroy_vcpu failed");
552 exit(EXIT_FAILURE);
553 }
554 }
555
kvm_init_vcpu(CPUState * cpu,Error ** errp)556 int kvm_init_vcpu(CPUState *cpu, Error **errp)
557 {
558 KVMState *s = kvm_state;
559 int mmap_size;
560 int ret;
561
562 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
563
564 ret = kvm_arch_pre_create_vcpu(cpu, errp);
565 if (ret < 0) {
566 goto err;
567 }
568
569 ret = kvm_create_vcpu(cpu);
570 if (ret < 0) {
571 error_setg_errno(errp, -ret,
572 "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
573 kvm_arch_vcpu_id(cpu));
574 goto err;
575 }
576
577 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
578 if (mmap_size < 0) {
579 ret = mmap_size;
580 error_setg_errno(errp, -mmap_size,
581 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
582 goto err;
583 }
584
585 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
586 cpu->kvm_fd, 0);
587 if (cpu->kvm_run == MAP_FAILED) {
588 ret = -errno;
589 error_setg_errno(errp, ret,
590 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
591 kvm_arch_vcpu_id(cpu));
592 goto err;
593 }
594
595 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
596 s->coalesced_mmio_ring =
597 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
598 }
599
600 if (s->kvm_dirty_ring_size) {
601 /* Use MAP_SHARED to share pages with the kernel */
602 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
603 PROT_READ | PROT_WRITE, MAP_SHARED,
604 cpu->kvm_fd,
605 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
606 if (cpu->kvm_dirty_gfns == MAP_FAILED) {
607 ret = -errno;
608 goto err;
609 }
610 }
611
612 ret = kvm_arch_init_vcpu(cpu);
613 if (ret < 0) {
614 error_setg_errno(errp, -ret,
615 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
616 kvm_arch_vcpu_id(cpu));
617 }
618 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
619
620 err:
621 return ret;
622 }
623
kvm_close(void)624 void kvm_close(void)
625 {
626 CPUState *cpu;
627
628 if (!kvm_state || kvm_state->fd == -1) {
629 return;
630 }
631
632 CPU_FOREACH(cpu) {
633 cpu_remove_sync(cpu);
634 close(cpu->kvm_fd);
635 cpu->kvm_fd = -1;
636 close(cpu->kvm_vcpu_stats_fd);
637 cpu->kvm_vcpu_stats_fd = -1;
638 }
639
640 if (kvm_state && kvm_state->fd != -1) {
641 close(kvm_state->vmfd);
642 kvm_state->vmfd = -1;
643 close(kvm_state->fd);
644 kvm_state->fd = -1;
645 }
646 kvm_state = NULL;
647 }
648
649 /*
650 * dirty pages logging control
651 */
652
kvm_mem_flags(MemoryRegion * mr)653 static int kvm_mem_flags(MemoryRegion *mr)
654 {
655 bool readonly = mr->readonly || memory_region_is_romd(mr);
656 int flags = 0;
657
658 if (memory_region_get_dirty_log_mask(mr) != 0) {
659 flags |= KVM_MEM_LOG_DIRTY_PAGES;
660 }
661 if (readonly && kvm_readonly_mem_allowed) {
662 flags |= KVM_MEM_READONLY;
663 }
664 if (memory_region_has_guest_memfd(mr)) {
665 assert(kvm_guest_memfd_supported);
666 flags |= KVM_MEM_GUEST_MEMFD;
667 }
668 return flags;
669 }
670
671 /* Called with KVMMemoryListener.slots_lock held */
kvm_slot_update_flags(KVMMemoryListener * kml,KVMSlot * mem,MemoryRegion * mr)672 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
673 MemoryRegion *mr)
674 {
675 mem->flags = kvm_mem_flags(mr);
676
677 /* If nothing changed effectively, no need to issue ioctl */
678 if (mem->flags == mem->old_flags) {
679 return 0;
680 }
681
682 kvm_slot_init_dirty_bitmap(mem);
683 return kvm_set_user_memory_region(kml, mem, false);
684 }
685
kvm_section_update_flags(KVMMemoryListener * kml,MemoryRegionSection * section)686 static int kvm_section_update_flags(KVMMemoryListener *kml,
687 MemoryRegionSection *section)
688 {
689 hwaddr start_addr, size, slot_size;
690 KVMSlot *mem;
691 int ret = 0;
692
693 size = kvm_align_section(section, &start_addr);
694 if (!size) {
695 return 0;
696 }
697
698 kvm_slots_lock();
699
700 while (size && !ret) {
701 slot_size = MIN(kvm_max_slot_size, size);
702 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
703 if (!mem) {
704 /* We don't have a slot if we want to trap every access. */
705 goto out;
706 }
707
708 ret = kvm_slot_update_flags(kml, mem, section->mr);
709 start_addr += slot_size;
710 size -= slot_size;
711 }
712
713 out:
714 kvm_slots_unlock();
715 return ret;
716 }
717
kvm_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)718 static void kvm_log_start(MemoryListener *listener,
719 MemoryRegionSection *section,
720 int old, int new)
721 {
722 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
723 int r;
724
725 if (old != 0) {
726 return;
727 }
728
729 r = kvm_section_update_flags(kml, section);
730 if (r < 0) {
731 abort();
732 }
733 }
734
kvm_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)735 static void kvm_log_stop(MemoryListener *listener,
736 MemoryRegionSection *section,
737 int old, int new)
738 {
739 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
740 int r;
741
742 if (new != 0) {
743 return;
744 }
745
746 r = kvm_section_update_flags(kml, section);
747 if (r < 0) {
748 abort();
749 }
750 }
751
752 /* get kvm's dirty pages bitmap and update qemu's */
kvm_slot_sync_dirty_pages(KVMSlot * slot)753 static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
754 {
755 ram_addr_t start = slot->ram_start_offset;
756 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
757
758 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
759 }
760
kvm_slot_reset_dirty_pages(KVMSlot * slot)761 static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
762 {
763 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
764 }
765
766 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
767
768 /* Allocate the dirty bitmap for a slot */
kvm_slot_init_dirty_bitmap(KVMSlot * mem)769 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
770 {
771 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
772 return;
773 }
774
775 /*
776 * XXX bad kernel interface alert
777 * For dirty bitmap, kernel allocates array of size aligned to
778 * bits-per-long. But for case when the kernel is 64bits and
779 * the userspace is 32bits, userspace can't align to the same
780 * bits-per-long, since sizeof(long) is different between kernel
781 * and user space. This way, userspace will provide buffer which
782 * may be 4 bytes less than the kernel will use, resulting in
783 * userspace memory corruption (which is not detectable by valgrind
784 * too, in most cases).
785 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
786 * a hope that sizeof(long) won't become >8 any time soon.
787 *
788 * Note: the granule of kvm dirty log is qemu_real_host_page_size.
789 * And mem->memory_size is aligned to it (otherwise this mem can't
790 * be registered to KVM).
791 */
792 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
793 /*HOST_LONG_BITS*/ 64) / 8;
794 mem->dirty_bmap = g_malloc0(bitmap_size);
795 mem->dirty_bmap_size = bitmap_size;
796 }
797
798 /*
799 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
800 * succeeded, false otherwise
801 */
kvm_slot_get_dirty_log(KVMState * s,KVMSlot * slot)802 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
803 {
804 struct kvm_dirty_log d = {};
805 int ret;
806
807 d.dirty_bitmap = slot->dirty_bmap;
808 d.slot = slot->slot | (slot->as_id << 16);
809 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
810
811 if (ret == -ENOENT) {
812 /* kernel does not have dirty bitmap in this slot */
813 ret = 0;
814 }
815 if (ret) {
816 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
817 __func__, ret);
818 }
819 return ret == 0;
820 }
821
822 /* Should be with all slots_lock held for the address spaces. */
kvm_dirty_ring_mark_page(KVMState * s,uint32_t as_id,uint32_t slot_id,uint64_t offset)823 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
824 uint32_t slot_id, uint64_t offset)
825 {
826 KVMMemoryListener *kml;
827 KVMSlot *mem;
828
829 if (as_id >= s->nr_as) {
830 return;
831 }
832
833 kml = s->as[as_id].ml;
834 mem = &kml->slots[slot_id];
835
836 if (!mem->memory_size || offset >=
837 (mem->memory_size / qemu_real_host_page_size())) {
838 return;
839 }
840
841 set_bit(offset, mem->dirty_bmap);
842 }
843
dirty_gfn_is_dirtied(struct kvm_dirty_gfn * gfn)844 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
845 {
846 /*
847 * Read the flags before the value. Pairs with barrier in
848 * KVM's kvm_dirty_ring_push() function.
849 */
850 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
851 }
852
dirty_gfn_set_collected(struct kvm_dirty_gfn * gfn)853 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
854 {
855 /*
856 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
857 * sees the full content of the ring:
858 *
859 * CPU0 CPU1 CPU2
860 * ------------------------------------------------------------------------------
861 * fill gfn0
862 * store-rel flags for gfn0
863 * load-acq flags for gfn0
864 * store-rel RESET for gfn0
865 * ioctl(RESET_RINGS)
866 * load-acq flags for gfn0
867 * check if flags have RESET
868 *
869 * The synchronization goes from CPU2 to CPU0 to CPU1.
870 */
871 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
872 }
873
874 /*
875 * Should be with all slots_lock held for the address spaces. It returns the
876 * dirty page we've collected on this dirty ring.
877 */
kvm_dirty_ring_reap_one(KVMState * s,CPUState * cpu)878 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
879 {
880 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
881 uint32_t ring_size = s->kvm_dirty_ring_size;
882 uint32_t count = 0, fetch = cpu->kvm_fetch_index;
883
884 /*
885 * It's possible that we race with vcpu creation code where the vcpu is
886 * put onto the vcpus list but not yet initialized the dirty ring
887 * structures. If so, skip it.
888 */
889 if (!cpu->created) {
890 return 0;
891 }
892
893 assert(dirty_gfns && ring_size);
894 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
895
896 while (true) {
897 cur = &dirty_gfns[fetch % ring_size];
898 if (!dirty_gfn_is_dirtied(cur)) {
899 break;
900 }
901 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
902 cur->offset);
903 dirty_gfn_set_collected(cur);
904 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
905 fetch++;
906 count++;
907 }
908 cpu->kvm_fetch_index = fetch;
909 cpu->dirty_pages += count;
910
911 return count;
912 }
913
914 /* Must be with slots_lock held */
kvm_dirty_ring_reap_locked(KVMState * s,CPUState * cpu)915 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
916 {
917 int ret;
918 uint64_t total = 0;
919 int64_t stamp;
920
921 stamp = get_clock();
922
923 if (cpu) {
924 total = kvm_dirty_ring_reap_one(s, cpu);
925 } else {
926 CPU_FOREACH(cpu) {
927 total += kvm_dirty_ring_reap_one(s, cpu);
928 }
929 }
930
931 if (total) {
932 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
933 assert(ret == total);
934 }
935
936 stamp = get_clock() - stamp;
937
938 if (total) {
939 trace_kvm_dirty_ring_reap(total, stamp / 1000);
940 }
941
942 return total;
943 }
944
945 /*
946 * Currently for simplicity, we must hold BQL before calling this. We can
947 * consider to drop the BQL if we're clear with all the race conditions.
948 */
kvm_dirty_ring_reap(KVMState * s,CPUState * cpu)949 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
950 {
951 uint64_t total;
952
953 /*
954 * We need to lock all kvm slots for all address spaces here,
955 * because:
956 *
957 * (1) We need to mark dirty for dirty bitmaps in multiple slots
958 * and for tons of pages, so it's better to take the lock here
959 * once rather than once per page. And more importantly,
960 *
961 * (2) We must _NOT_ publish dirty bits to the other threads
962 * (e.g., the migration thread) via the kvm memory slot dirty
963 * bitmaps before correctly re-protect those dirtied pages.
964 * Otherwise we can have potential risk of data corruption if
965 * the page data is read in the other thread before we do
966 * reset below.
967 */
968 kvm_slots_lock();
969 total = kvm_dirty_ring_reap_locked(s, cpu);
970 kvm_slots_unlock();
971
972 return total;
973 }
974
do_kvm_cpu_synchronize_kick(CPUState * cpu,run_on_cpu_data arg)975 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
976 {
977 /* No need to do anything */
978 }
979
980 /*
981 * Kick all vcpus out in a synchronized way. When returned, we
982 * guarantee that every vcpu has been kicked and at least returned to
983 * userspace once.
984 */
kvm_cpu_synchronize_kick_all(void)985 static void kvm_cpu_synchronize_kick_all(void)
986 {
987 CPUState *cpu;
988
989 CPU_FOREACH(cpu) {
990 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
991 }
992 }
993
994 /*
995 * Flush all the existing dirty pages to the KVM slot buffers. When
996 * this call returns, we guarantee that all the touched dirty pages
997 * before calling this function have been put into the per-kvmslot
998 * dirty bitmap.
999 *
1000 * This function must be called with BQL held.
1001 */
kvm_dirty_ring_flush(void)1002 static void kvm_dirty_ring_flush(void)
1003 {
1004 trace_kvm_dirty_ring_flush(0);
1005 /*
1006 * The function needs to be serialized. Since this function
1007 * should always be with BQL held, serialization is guaranteed.
1008 * However, let's be sure of it.
1009 */
1010 assert(bql_locked());
1011 /*
1012 * First make sure to flush the hardware buffers by kicking all
1013 * vcpus out in a synchronous way.
1014 */
1015 kvm_cpu_synchronize_kick_all();
1016 kvm_dirty_ring_reap(kvm_state, NULL);
1017 trace_kvm_dirty_ring_flush(1);
1018 }
1019
1020 /**
1021 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
1022 *
1023 * This function will first try to fetch dirty bitmap from the kernel,
1024 * and then updates qemu's dirty bitmap.
1025 *
1026 * NOTE: caller must be with kml->slots_lock held.
1027 *
1028 * @kml: the KVM memory listener object
1029 * @section: the memory section to sync the dirty bitmap with
1030 */
kvm_physical_sync_dirty_bitmap(KVMMemoryListener * kml,MemoryRegionSection * section)1031 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
1032 MemoryRegionSection *section)
1033 {
1034 KVMState *s = kvm_state;
1035 KVMSlot *mem;
1036 hwaddr start_addr, size;
1037 hwaddr slot_size;
1038
1039 size = kvm_align_section(section, &start_addr);
1040 while (size) {
1041 slot_size = MIN(kvm_max_slot_size, size);
1042 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1043 if (!mem) {
1044 /* We don't have a slot if we want to trap every access. */
1045 return;
1046 }
1047 if (kvm_slot_get_dirty_log(s, mem)) {
1048 kvm_slot_sync_dirty_pages(mem);
1049 }
1050 start_addr += slot_size;
1051 size -= slot_size;
1052 }
1053 }
1054
1055 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1056 #define KVM_CLEAR_LOG_SHIFT 6
1057 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
1058 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
1059
kvm_log_clear_one_slot(KVMSlot * mem,int as_id,uint64_t start,uint64_t size)1060 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
1061 uint64_t size)
1062 {
1063 KVMState *s = kvm_state;
1064 uint64_t end, bmap_start, start_delta, bmap_npages;
1065 struct kvm_clear_dirty_log d;
1066 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
1067 int ret;
1068
1069 /*
1070 * We need to extend either the start or the size or both to
1071 * satisfy the KVM interface requirement. Firstly, do the start
1072 * page alignment on 64 host pages
1073 */
1074 bmap_start = start & KVM_CLEAR_LOG_MASK;
1075 start_delta = start - bmap_start;
1076 bmap_start /= psize;
1077
1078 /*
1079 * The kernel interface has restriction on the size too, that either:
1080 *
1081 * (1) the size is 64 host pages aligned (just like the start), or
1082 * (2) the size fills up until the end of the KVM memslot.
1083 */
1084 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
1085 << KVM_CLEAR_LOG_SHIFT;
1086 end = mem->memory_size / psize;
1087 if (bmap_npages > end - bmap_start) {
1088 bmap_npages = end - bmap_start;
1089 }
1090 start_delta /= psize;
1091
1092 /*
1093 * Prepare the bitmap to clear dirty bits. Here we must guarantee
1094 * that we won't clear any unknown dirty bits otherwise we might
1095 * accidentally clear some set bits which are not yet synced from
1096 * the kernel into QEMU's bitmap, then we'll lose track of the
1097 * guest modifications upon those pages (which can directly lead
1098 * to guest data loss or panic after migration).
1099 *
1100 * Layout of the KVMSlot.dirty_bmap:
1101 *
1102 * |<-------- bmap_npages -----------..>|
1103 * [1]
1104 * start_delta size
1105 * |----------------|-------------|------------------|------------|
1106 * ^ ^ ^ ^
1107 * | | | |
1108 * start bmap_start (start) end
1109 * of memslot of memslot
1110 *
1111 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
1112 */
1113
1114 assert(bmap_start % BITS_PER_LONG == 0);
1115 /* We should never do log_clear before log_sync */
1116 assert(mem->dirty_bmap);
1117 if (start_delta || bmap_npages - size / psize) {
1118 /* Slow path - we need to manipulate a temp bitmap */
1119 bmap_clear = bitmap_new(bmap_npages);
1120 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
1121 bmap_start, start_delta + size / psize);
1122 /*
1123 * We need to fill the holes at start because that was not
1124 * specified by the caller and we extended the bitmap only for
1125 * 64 pages alignment
1126 */
1127 bitmap_clear(bmap_clear, 0, start_delta);
1128 d.dirty_bitmap = bmap_clear;
1129 } else {
1130 /*
1131 * Fast path - both start and size align well with BITS_PER_LONG
1132 * (or the end of memory slot)
1133 */
1134 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
1135 }
1136
1137 d.first_page = bmap_start;
1138 /* It should never overflow. If it happens, say something */
1139 assert(bmap_npages <= UINT32_MAX);
1140 d.num_pages = bmap_npages;
1141 d.slot = mem->slot | (as_id << 16);
1142
1143 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
1144 if (ret < 0 && ret != -ENOENT) {
1145 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
1146 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
1147 __func__, d.slot, (uint64_t)d.first_page,
1148 (uint32_t)d.num_pages, ret);
1149 } else {
1150 ret = 0;
1151 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
1152 }
1153
1154 /*
1155 * After we have updated the remote dirty bitmap, we update the
1156 * cached bitmap as well for the memslot, then if another user
1157 * clears the same region we know we shouldn't clear it again on
1158 * the remote otherwise it's data loss as well.
1159 */
1160 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1161 size / psize);
1162 /* This handles the NULL case well */
1163 g_free(bmap_clear);
1164 return ret;
1165 }
1166
1167
1168 /**
1169 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1170 *
1171 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1172 * protection in the host kernel because in that case this operation
1173 * will be done within log_sync().
1174 *
1175 * @kml: the kvm memory listener
1176 * @section: the memory range to clear dirty bitmap
1177 */
kvm_physical_log_clear(KVMMemoryListener * kml,MemoryRegionSection * section)1178 static int kvm_physical_log_clear(KVMMemoryListener *kml,
1179 MemoryRegionSection *section)
1180 {
1181 KVMState *s = kvm_state;
1182 uint64_t start, size, offset, count;
1183 KVMSlot *mem;
1184 int ret = 0, i;
1185
1186 if (!s->manual_dirty_log_protect) {
1187 /* No need to do explicit clear */
1188 return ret;
1189 }
1190
1191 start = section->offset_within_address_space;
1192 size = int128_get64(section->size);
1193
1194 if (!size) {
1195 /* Nothing more we can do... */
1196 return ret;
1197 }
1198
1199 kvm_slots_lock();
1200
1201 for (i = 0; i < kml->nr_slots_allocated; i++) {
1202 mem = &kml->slots[i];
1203 /* Discard slots that are empty or do not overlap the section */
1204 if (!mem->memory_size ||
1205 mem->start_addr > start + size - 1 ||
1206 start > mem->start_addr + mem->memory_size - 1) {
1207 continue;
1208 }
1209
1210 if (start >= mem->start_addr) {
1211 /* The slot starts before section or is aligned to it. */
1212 offset = start - mem->start_addr;
1213 count = MIN(mem->memory_size - offset, size);
1214 } else {
1215 /* The slot starts after section. */
1216 offset = 0;
1217 count = MIN(mem->memory_size, size - (mem->start_addr - start));
1218 }
1219 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1220 if (ret < 0) {
1221 break;
1222 }
1223 }
1224
1225 kvm_slots_unlock();
1226
1227 return ret;
1228 }
1229
kvm_coalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1230 static void kvm_coalesce_mmio_region(MemoryListener *listener,
1231 MemoryRegionSection *secion,
1232 hwaddr start, hwaddr size)
1233 {
1234 KVMState *s = kvm_state;
1235
1236 if (s->coalesced_mmio) {
1237 struct kvm_coalesced_mmio_zone zone;
1238
1239 zone.addr = start;
1240 zone.size = size;
1241 zone.pad = 0;
1242
1243 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1244 }
1245 }
1246
kvm_uncoalesce_mmio_region(MemoryListener * listener,MemoryRegionSection * secion,hwaddr start,hwaddr size)1247 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1248 MemoryRegionSection *secion,
1249 hwaddr start, hwaddr size)
1250 {
1251 KVMState *s = kvm_state;
1252
1253 if (s->coalesced_mmio) {
1254 struct kvm_coalesced_mmio_zone zone;
1255
1256 zone.addr = start;
1257 zone.size = size;
1258 zone.pad = 0;
1259
1260 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1261 }
1262 }
1263
kvm_coalesce_pio_add(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1264 static void kvm_coalesce_pio_add(MemoryListener *listener,
1265 MemoryRegionSection *section,
1266 hwaddr start, hwaddr size)
1267 {
1268 KVMState *s = kvm_state;
1269
1270 if (s->coalesced_pio) {
1271 struct kvm_coalesced_mmio_zone zone;
1272
1273 zone.addr = start;
1274 zone.size = size;
1275 zone.pio = 1;
1276
1277 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1278 }
1279 }
1280
kvm_coalesce_pio_del(MemoryListener * listener,MemoryRegionSection * section,hwaddr start,hwaddr size)1281 static void kvm_coalesce_pio_del(MemoryListener *listener,
1282 MemoryRegionSection *section,
1283 hwaddr start, hwaddr size)
1284 {
1285 KVMState *s = kvm_state;
1286
1287 if (s->coalesced_pio) {
1288 struct kvm_coalesced_mmio_zone zone;
1289
1290 zone.addr = start;
1291 zone.size = size;
1292 zone.pio = 1;
1293
1294 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1295 }
1296 }
1297
kvm_check_extension(KVMState * s,unsigned int extension)1298 int kvm_check_extension(KVMState *s, unsigned int extension)
1299 {
1300 int ret;
1301
1302 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1303 if (ret < 0) {
1304 ret = 0;
1305 }
1306
1307 return ret;
1308 }
1309
kvm_vm_check_extension(KVMState * s,unsigned int extension)1310 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1311 {
1312 int ret;
1313
1314 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1315 if (ret < 0) {
1316 /* VM wide version not implemented, use global one instead */
1317 ret = kvm_check_extension(s, extension);
1318 }
1319
1320 return ret;
1321 }
1322
1323 /*
1324 * We track the poisoned pages to be able to:
1325 * - replace them on VM reset
1326 * - block a migration for a VM with a poisoned page
1327 */
1328 typedef struct HWPoisonPage {
1329 ram_addr_t ram_addr;
1330 QLIST_ENTRY(HWPoisonPage) list;
1331 } HWPoisonPage;
1332
1333 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1334 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1335
kvm_unpoison_all(void * param)1336 static void kvm_unpoison_all(void *param)
1337 {
1338 HWPoisonPage *page, *next_page;
1339
1340 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1341 QLIST_REMOVE(page, list);
1342 qemu_ram_remap(page->ram_addr);
1343 g_free(page);
1344 }
1345 }
1346
kvm_hwpoison_page_add(ram_addr_t ram_addr)1347 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1348 {
1349 HWPoisonPage *page;
1350
1351 QLIST_FOREACH(page, &hwpoison_page_list, list) {
1352 if (page->ram_addr == ram_addr) {
1353 return;
1354 }
1355 }
1356 page = g_new(HWPoisonPage, 1);
1357 page->ram_addr = ram_addr;
1358 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1359 }
1360
kvm_hwpoisoned_mem(void)1361 bool kvm_hwpoisoned_mem(void)
1362 {
1363 return !QLIST_EMPTY(&hwpoison_page_list);
1364 }
1365
adjust_ioeventfd_endianness(uint32_t val,uint32_t size)1366 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1367 {
1368 if (target_needs_bswap()) {
1369 /*
1370 * The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1371 * endianness, but the memory core hands them in target endianness.
1372 * For example, PPC is always treated as big-endian even if running
1373 * on KVM and on PPC64LE. Correct here, swapping back.
1374 */
1375 switch (size) {
1376 case 2:
1377 val = bswap16(val);
1378 break;
1379 case 4:
1380 val = bswap32(val);
1381 break;
1382 }
1383 }
1384 return val;
1385 }
1386
kvm_set_ioeventfd_mmio(int fd,hwaddr addr,uint32_t val,bool assign,uint32_t size,bool datamatch)1387 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1388 bool assign, uint32_t size, bool datamatch)
1389 {
1390 int ret;
1391 struct kvm_ioeventfd iofd = {
1392 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1393 .addr = addr,
1394 .len = size,
1395 .flags = 0,
1396 .fd = fd,
1397 };
1398
1399 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1400 datamatch);
1401 if (!kvm_enabled()) {
1402 return -ENOSYS;
1403 }
1404
1405 if (datamatch) {
1406 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1407 }
1408 if (!assign) {
1409 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1410 }
1411
1412 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1413
1414 if (ret < 0) {
1415 return -errno;
1416 }
1417
1418 return 0;
1419 }
1420
kvm_set_ioeventfd_pio(int fd,uint16_t addr,uint16_t val,bool assign,uint32_t size,bool datamatch)1421 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1422 bool assign, uint32_t size, bool datamatch)
1423 {
1424 struct kvm_ioeventfd kick = {
1425 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1426 .addr = addr,
1427 .flags = KVM_IOEVENTFD_FLAG_PIO,
1428 .len = size,
1429 .fd = fd,
1430 };
1431 int r;
1432 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1433 if (!kvm_enabled()) {
1434 return -ENOSYS;
1435 }
1436 if (datamatch) {
1437 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1438 }
1439 if (!assign) {
1440 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1441 }
1442 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1443 if (r < 0) {
1444 return r;
1445 }
1446 return 0;
1447 }
1448
1449
1450 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)1451 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1452 {
1453 while (list->name) {
1454 if (!kvm_check_extension(s, list->value)) {
1455 return list;
1456 }
1457 list++;
1458 }
1459 return NULL;
1460 }
1461
kvm_set_max_memslot_size(hwaddr max_slot_size)1462 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1463 {
1464 g_assert(
1465 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1466 );
1467 kvm_max_slot_size = max_slot_size;
1468 }
1469
kvm_set_memory_attributes(hwaddr start,uint64_t size,uint64_t attr)1470 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
1471 {
1472 struct kvm_memory_attributes attrs;
1473 int r;
1474
1475 assert((attr & kvm_supported_memory_attributes) == attr);
1476 attrs.attributes = attr;
1477 attrs.address = start;
1478 attrs.size = size;
1479 attrs.flags = 0;
1480
1481 r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
1482 if (r) {
1483 error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
1484 "with attr 0x%" PRIx64 " error '%s'",
1485 start, size, attr, strerror(errno));
1486 }
1487 return r;
1488 }
1489
kvm_set_memory_attributes_private(hwaddr start,uint64_t size)1490 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
1491 {
1492 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
1493 }
1494
kvm_set_memory_attributes_shared(hwaddr start,uint64_t size)1495 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
1496 {
1497 return kvm_set_memory_attributes(start, size, 0);
1498 }
1499
1500 /* Called with KVMMemoryListener.slots_lock held */
kvm_set_phys_mem(KVMMemoryListener * kml,MemoryRegionSection * section,bool add)1501 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1502 MemoryRegionSection *section, bool add)
1503 {
1504 KVMSlot *mem;
1505 int err;
1506 MemoryRegion *mr = section->mr;
1507 bool writable = !mr->readonly && !mr->rom_device;
1508 hwaddr start_addr, size, slot_size, mr_offset;
1509 ram_addr_t ram_start_offset;
1510 void *ram;
1511
1512 if (!memory_region_is_ram(mr)) {
1513 if (writable || !kvm_readonly_mem_allowed) {
1514 return;
1515 } else if (!mr->romd_mode) {
1516 /* If the memory device is not in romd_mode, then we actually want
1517 * to remove the kvm memory slot so all accesses will trap. */
1518 add = false;
1519 }
1520 }
1521
1522 size = kvm_align_section(section, &start_addr);
1523 if (!size) {
1524 return;
1525 }
1526
1527 /* The offset of the kvmslot within the memory region */
1528 mr_offset = section->offset_within_region + start_addr -
1529 section->offset_within_address_space;
1530
1531 /* use aligned delta to align the ram address and offset */
1532 ram = memory_region_get_ram_ptr(mr) + mr_offset;
1533 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1534
1535 if (!add) {
1536 do {
1537 slot_size = MIN(kvm_max_slot_size, size);
1538 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1539 if (!mem) {
1540 return;
1541 }
1542 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1543 /*
1544 * NOTE: We should be aware of the fact that here we're only
1545 * doing a best effort to sync dirty bits. No matter whether
1546 * we're using dirty log or dirty ring, we ignored two facts:
1547 *
1548 * (1) dirty bits can reside in hardware buffers (PML)
1549 *
1550 * (2) after we collected dirty bits here, pages can be dirtied
1551 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1552 * remove the slot.
1553 *
1554 * Not easy. Let's cross the fingers until it's fixed.
1555 */
1556 if (kvm_state->kvm_dirty_ring_size) {
1557 kvm_dirty_ring_reap_locked(kvm_state, NULL);
1558 if (kvm_state->kvm_dirty_ring_with_bitmap) {
1559 kvm_slot_sync_dirty_pages(mem);
1560 kvm_slot_get_dirty_log(kvm_state, mem);
1561 }
1562 } else {
1563 kvm_slot_get_dirty_log(kvm_state, mem);
1564 }
1565 kvm_slot_sync_dirty_pages(mem);
1566 }
1567
1568 /* unregister the slot */
1569 g_free(mem->dirty_bmap);
1570 mem->dirty_bmap = NULL;
1571 mem->memory_size = 0;
1572 mem->flags = 0;
1573 err = kvm_set_user_memory_region(kml, mem, false);
1574 if (err) {
1575 fprintf(stderr, "%s: error unregistering slot: %s\n",
1576 __func__, strerror(-err));
1577 abort();
1578 }
1579 start_addr += slot_size;
1580 size -= slot_size;
1581 kml->nr_slots_used--;
1582 } while (size);
1583 return;
1584 }
1585
1586 /* register the new slot */
1587 do {
1588 slot_size = MIN(kvm_max_slot_size, size);
1589 mem = kvm_alloc_slot(kml);
1590 mem->as_id = kml->as_id;
1591 mem->memory_size = slot_size;
1592 mem->start_addr = start_addr;
1593 mem->ram_start_offset = ram_start_offset;
1594 mem->ram = ram;
1595 mem->flags = kvm_mem_flags(mr);
1596 mem->guest_memfd = mr->ram_block->guest_memfd;
1597 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
1598
1599 kvm_slot_init_dirty_bitmap(mem);
1600 err = kvm_set_user_memory_region(kml, mem, true);
1601 if (err) {
1602 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1603 strerror(-err));
1604 abort();
1605 }
1606
1607 if (memory_region_has_guest_memfd(mr)) {
1608 err = kvm_set_memory_attributes_private(start_addr, slot_size);
1609 if (err) {
1610 error_report("%s: failed to set memory attribute private: %s",
1611 __func__, strerror(-err));
1612 exit(1);
1613 }
1614 }
1615
1616 start_addr += slot_size;
1617 ram_start_offset += slot_size;
1618 ram += slot_size;
1619 size -= slot_size;
1620 kml->nr_slots_used++;
1621 } while (size);
1622 }
1623
kvm_dirty_ring_reaper_thread(void * data)1624 static void *kvm_dirty_ring_reaper_thread(void *data)
1625 {
1626 KVMState *s = data;
1627 struct KVMDirtyRingReaper *r = &s->reaper;
1628
1629 rcu_register_thread();
1630
1631 trace_kvm_dirty_ring_reaper("init");
1632
1633 while (true) {
1634 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1635 trace_kvm_dirty_ring_reaper("wait");
1636 /*
1637 * TODO: provide a smarter timeout rather than a constant?
1638 */
1639 sleep(1);
1640
1641 /* keep sleeping so that dirtylimit not be interfered by reaper */
1642 if (dirtylimit_in_service()) {
1643 continue;
1644 }
1645
1646 trace_kvm_dirty_ring_reaper("wakeup");
1647 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1648
1649 bql_lock();
1650 kvm_dirty_ring_reap(s, NULL);
1651 bql_unlock();
1652
1653 r->reaper_iteration++;
1654 }
1655
1656 g_assert_not_reached();
1657 }
1658
kvm_dirty_ring_reaper_init(KVMState * s)1659 static void kvm_dirty_ring_reaper_init(KVMState *s)
1660 {
1661 struct KVMDirtyRingReaper *r = &s->reaper;
1662
1663 qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1664 kvm_dirty_ring_reaper_thread,
1665 s, QEMU_THREAD_JOINABLE);
1666 }
1667
kvm_dirty_ring_init(KVMState * s)1668 static int kvm_dirty_ring_init(KVMState *s)
1669 {
1670 uint32_t ring_size = s->kvm_dirty_ring_size;
1671 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
1672 unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
1673 int ret;
1674
1675 s->kvm_dirty_ring_size = 0;
1676 s->kvm_dirty_ring_bytes = 0;
1677
1678 /* Bail if the dirty ring size isn't specified */
1679 if (!ring_size) {
1680 return 0;
1681 }
1682
1683 /*
1684 * Read the max supported pages. Fall back to dirty logging mode
1685 * if the dirty ring isn't supported.
1686 */
1687 ret = kvm_vm_check_extension(s, capability);
1688 if (ret <= 0) {
1689 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
1690 ret = kvm_vm_check_extension(s, capability);
1691 }
1692
1693 if (ret <= 0) {
1694 warn_report("KVM dirty ring not available, using bitmap method");
1695 return 0;
1696 }
1697
1698 if (ring_bytes > ret) {
1699 error_report("KVM dirty ring size %" PRIu32 " too big "
1700 "(maximum is %ld). Please use a smaller value.",
1701 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
1702 return -EINVAL;
1703 }
1704
1705 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
1706 if (ret) {
1707 error_report("Enabling of KVM dirty ring failed: %s. "
1708 "Suggested minimum value is 1024.", strerror(-ret));
1709 return -EIO;
1710 }
1711
1712 /* Enable the backup bitmap if it is supported */
1713 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
1714 if (ret > 0) {
1715 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
1716 if (ret) {
1717 error_report("Enabling of KVM dirty ring's backup bitmap failed: "
1718 "%s. ", strerror(-ret));
1719 return -EIO;
1720 }
1721
1722 s->kvm_dirty_ring_with_bitmap = true;
1723 }
1724
1725 s->kvm_dirty_ring_size = ring_size;
1726 s->kvm_dirty_ring_bytes = ring_bytes;
1727
1728 return 0;
1729 }
1730
kvm_region_add(MemoryListener * listener,MemoryRegionSection * section)1731 static void kvm_region_add(MemoryListener *listener,
1732 MemoryRegionSection *section)
1733 {
1734 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1735 KVMMemoryUpdate *update;
1736
1737 update = g_new0(KVMMemoryUpdate, 1);
1738 update->section = *section;
1739
1740 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
1741 }
1742
kvm_region_del(MemoryListener * listener,MemoryRegionSection * section)1743 static void kvm_region_del(MemoryListener *listener,
1744 MemoryRegionSection *section)
1745 {
1746 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1747 KVMMemoryUpdate *update;
1748
1749 update = g_new0(KVMMemoryUpdate, 1);
1750 update->section = *section;
1751
1752 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
1753 }
1754
kvm_region_commit(MemoryListener * listener)1755 static void kvm_region_commit(MemoryListener *listener)
1756 {
1757 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
1758 listener);
1759 KVMMemoryUpdate *u1, *u2;
1760 bool need_inhibit = false;
1761
1762 if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
1763 QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1764 return;
1765 }
1766
1767 /*
1768 * We have to be careful when regions to add overlap with ranges to remove.
1769 * We have to simulate atomic KVM memslot updates by making sure no ioctl()
1770 * is currently active.
1771 *
1772 * The lists are order by addresses, so it's easy to find overlaps.
1773 */
1774 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1775 u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
1776 while (u1 && u2) {
1777 Range r1, r2;
1778
1779 range_init_nofail(&r1, u1->section.offset_within_address_space,
1780 int128_get64(u1->section.size));
1781 range_init_nofail(&r2, u2->section.offset_within_address_space,
1782 int128_get64(u2->section.size));
1783
1784 if (range_overlaps_range(&r1, &r2)) {
1785 need_inhibit = true;
1786 break;
1787 }
1788 if (range_lob(&r1) < range_lob(&r2)) {
1789 u1 = QSIMPLEQ_NEXT(u1, next);
1790 } else {
1791 u2 = QSIMPLEQ_NEXT(u2, next);
1792 }
1793 }
1794
1795 kvm_slots_lock();
1796 if (need_inhibit) {
1797 accel_ioctl_inhibit_begin();
1798 }
1799
1800 /* Remove all memslots before adding the new ones. */
1801 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
1802 u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
1803 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
1804
1805 kvm_set_phys_mem(kml, &u1->section, false);
1806 memory_region_unref(u1->section.mr);
1807
1808 g_free(u1);
1809 }
1810 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
1811 u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
1812 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
1813
1814 memory_region_ref(u1->section.mr);
1815 kvm_set_phys_mem(kml, &u1->section, true);
1816
1817 g_free(u1);
1818 }
1819
1820 if (need_inhibit) {
1821 accel_ioctl_inhibit_end();
1822 }
1823 kvm_slots_unlock();
1824 }
1825
kvm_log_sync(MemoryListener * listener,MemoryRegionSection * section)1826 static void kvm_log_sync(MemoryListener *listener,
1827 MemoryRegionSection *section)
1828 {
1829 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1830
1831 kvm_slots_lock();
1832 kvm_physical_sync_dirty_bitmap(kml, section);
1833 kvm_slots_unlock();
1834 }
1835
kvm_log_sync_global(MemoryListener * l,bool last_stage)1836 static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
1837 {
1838 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1839 KVMState *s = kvm_state;
1840 KVMSlot *mem;
1841 int i;
1842
1843 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1844 kvm_dirty_ring_flush();
1845
1846 kvm_slots_lock();
1847 for (i = 0; i < kml->nr_slots_allocated; i++) {
1848 mem = &kml->slots[i];
1849 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1850 kvm_slot_sync_dirty_pages(mem);
1851
1852 if (s->kvm_dirty_ring_with_bitmap && last_stage &&
1853 kvm_slot_get_dirty_log(s, mem)) {
1854 kvm_slot_sync_dirty_pages(mem);
1855 }
1856
1857 /*
1858 * This is not needed by KVM_GET_DIRTY_LOG because the
1859 * ioctl will unconditionally overwrite the whole region.
1860 * However kvm dirty ring has no such side effect.
1861 */
1862 kvm_slot_reset_dirty_pages(mem);
1863 }
1864 }
1865 kvm_slots_unlock();
1866 }
1867
kvm_log_clear(MemoryListener * listener,MemoryRegionSection * section)1868 static void kvm_log_clear(MemoryListener *listener,
1869 MemoryRegionSection *section)
1870 {
1871 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1872 int r;
1873
1874 r = kvm_physical_log_clear(kml, section);
1875 if (r < 0) {
1876 error_report_once("%s: kvm log clear failed: mr=%s "
1877 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1878 section->mr->name, section->offset_within_region,
1879 int128_get64(section->size));
1880 abort();
1881 }
1882 }
1883
kvm_mem_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1884 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1885 MemoryRegionSection *section,
1886 bool match_data, uint64_t data,
1887 EventNotifier *e)
1888 {
1889 int fd = event_notifier_get_fd(e);
1890 int r;
1891
1892 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1893 data, true, int128_get64(section->size),
1894 match_data);
1895 if (r < 0) {
1896 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1897 __func__, strerror(-r), -r);
1898 abort();
1899 }
1900 }
1901
kvm_mem_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1902 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1903 MemoryRegionSection *section,
1904 bool match_data, uint64_t data,
1905 EventNotifier *e)
1906 {
1907 int fd = event_notifier_get_fd(e);
1908 int r;
1909
1910 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1911 data, false, int128_get64(section->size),
1912 match_data);
1913 if (r < 0) {
1914 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1915 __func__, strerror(-r), -r);
1916 abort();
1917 }
1918 }
1919
kvm_io_ioeventfd_add(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1920 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1921 MemoryRegionSection *section,
1922 bool match_data, uint64_t data,
1923 EventNotifier *e)
1924 {
1925 int fd = event_notifier_get_fd(e);
1926 int r;
1927
1928 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1929 data, true, int128_get64(section->size),
1930 match_data);
1931 if (r < 0) {
1932 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1933 __func__, strerror(-r), -r);
1934 abort();
1935 }
1936 }
1937
kvm_io_ioeventfd_del(MemoryListener * listener,MemoryRegionSection * section,bool match_data,uint64_t data,EventNotifier * e)1938 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1939 MemoryRegionSection *section,
1940 bool match_data, uint64_t data,
1941 EventNotifier *e)
1942
1943 {
1944 int fd = event_notifier_get_fd(e);
1945 int r;
1946
1947 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1948 data, false, int128_get64(section->size),
1949 match_data);
1950 if (r < 0) {
1951 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1952 __func__, strerror(-r), -r);
1953 abort();
1954 }
1955 }
1956
kvm_memory_listener_register(KVMState * s,KVMMemoryListener * kml,AddressSpace * as,int as_id,const char * name)1957 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1958 AddressSpace *as, int as_id, const char *name)
1959 {
1960 int i;
1961
1962 kml->as_id = as_id;
1963
1964 kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
1965
1966 QSIMPLEQ_INIT(&kml->transaction_add);
1967 QSIMPLEQ_INIT(&kml->transaction_del);
1968
1969 kml->listener.region_add = kvm_region_add;
1970 kml->listener.region_del = kvm_region_del;
1971 kml->listener.commit = kvm_region_commit;
1972 kml->listener.log_start = kvm_log_start;
1973 kml->listener.log_stop = kvm_log_stop;
1974 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
1975 kml->listener.name = name;
1976
1977 if (s->kvm_dirty_ring_size) {
1978 kml->listener.log_sync_global = kvm_log_sync_global;
1979 } else {
1980 kml->listener.log_sync = kvm_log_sync;
1981 kml->listener.log_clear = kvm_log_clear;
1982 }
1983
1984 memory_listener_register(&kml->listener, as);
1985
1986 for (i = 0; i < s->nr_as; ++i) {
1987 if (!s->as[i].as) {
1988 s->as[i].as = as;
1989 s->as[i].ml = kml;
1990 break;
1991 }
1992 }
1993 }
1994
1995 static MemoryListener kvm_io_listener = {
1996 .name = "kvm-io",
1997 .coalesced_io_add = kvm_coalesce_pio_add,
1998 .coalesced_io_del = kvm_coalesce_pio_del,
1999 .eventfd_add = kvm_io_ioeventfd_add,
2000 .eventfd_del = kvm_io_ioeventfd_del,
2001 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
2002 };
2003
kvm_set_irq(KVMState * s,int irq,int level)2004 int kvm_set_irq(KVMState *s, int irq, int level)
2005 {
2006 struct kvm_irq_level event;
2007 int ret;
2008
2009 assert(kvm_async_interrupts_enabled());
2010
2011 event.level = level;
2012 event.irq = irq;
2013 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
2014 if (ret < 0) {
2015 perror("kvm_set_irq");
2016 abort();
2017 }
2018
2019 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
2020 }
2021
2022 #ifdef KVM_CAP_IRQ_ROUTING
2023 typedef struct KVMMSIRoute {
2024 struct kvm_irq_routing_entry kroute;
2025 QTAILQ_ENTRY(KVMMSIRoute) entry;
2026 } KVMMSIRoute;
2027
set_gsi(KVMState * s,unsigned int gsi)2028 static void set_gsi(KVMState *s, unsigned int gsi)
2029 {
2030 set_bit(gsi, s->used_gsi_bitmap);
2031 }
2032
clear_gsi(KVMState * s,unsigned int gsi)2033 static void clear_gsi(KVMState *s, unsigned int gsi)
2034 {
2035 clear_bit(gsi, s->used_gsi_bitmap);
2036 }
2037
kvm_init_irq_routing(KVMState * s)2038 void kvm_init_irq_routing(KVMState *s)
2039 {
2040 int gsi_count;
2041
2042 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
2043 if (gsi_count > 0) {
2044 /* Round up so we can search ints using ffs */
2045 s->used_gsi_bitmap = bitmap_new(gsi_count);
2046 s->gsi_count = gsi_count;
2047 }
2048
2049 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
2050 s->nr_allocated_irq_routes = 0;
2051
2052 kvm_arch_init_irq_routing(s);
2053 }
2054
kvm_irqchip_commit_routes(KVMState * s)2055 void kvm_irqchip_commit_routes(KVMState *s)
2056 {
2057 int ret;
2058
2059 if (kvm_gsi_direct_mapping()) {
2060 return;
2061 }
2062
2063 if (!kvm_gsi_routing_enabled()) {
2064 return;
2065 }
2066
2067 s->irq_routes->flags = 0;
2068 trace_kvm_irqchip_commit_routes();
2069 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
2070 assert(ret == 0);
2071 }
2072
kvm_add_routing_entry(KVMState * s,struct kvm_irq_routing_entry * entry)2073 void kvm_add_routing_entry(KVMState *s,
2074 struct kvm_irq_routing_entry *entry)
2075 {
2076 struct kvm_irq_routing_entry *new;
2077 int n, size;
2078
2079 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
2080 n = s->nr_allocated_irq_routes * 2;
2081 if (n < 64) {
2082 n = 64;
2083 }
2084 size = sizeof(struct kvm_irq_routing);
2085 size += n * sizeof(*new);
2086 s->irq_routes = g_realloc(s->irq_routes, size);
2087 s->nr_allocated_irq_routes = n;
2088 }
2089 n = s->irq_routes->nr++;
2090 new = &s->irq_routes->entries[n];
2091
2092 *new = *entry;
2093
2094 set_gsi(s, entry->gsi);
2095 }
2096
kvm_update_routing_entry(KVMState * s,struct kvm_irq_routing_entry * new_entry)2097 static int kvm_update_routing_entry(KVMState *s,
2098 struct kvm_irq_routing_entry *new_entry)
2099 {
2100 struct kvm_irq_routing_entry *entry;
2101 int n;
2102
2103 for (n = 0; n < s->irq_routes->nr; n++) {
2104 entry = &s->irq_routes->entries[n];
2105 if (entry->gsi != new_entry->gsi) {
2106 continue;
2107 }
2108
2109 if(!memcmp(entry, new_entry, sizeof *entry)) {
2110 return 0;
2111 }
2112
2113 *entry = *new_entry;
2114
2115 return 0;
2116 }
2117
2118 return -ESRCH;
2119 }
2120
kvm_irqchip_add_irq_route(KVMState * s,int irq,int irqchip,int pin)2121 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
2122 {
2123 struct kvm_irq_routing_entry e = {};
2124
2125 assert(pin < s->gsi_count);
2126
2127 e.gsi = irq;
2128 e.type = KVM_IRQ_ROUTING_IRQCHIP;
2129 e.flags = 0;
2130 e.u.irqchip.irqchip = irqchip;
2131 e.u.irqchip.pin = pin;
2132 kvm_add_routing_entry(s, &e);
2133 }
2134
kvm_irqchip_release_virq(KVMState * s,int virq)2135 void kvm_irqchip_release_virq(KVMState *s, int virq)
2136 {
2137 struct kvm_irq_routing_entry *e;
2138 int i;
2139
2140 if (kvm_gsi_direct_mapping()) {
2141 return;
2142 }
2143
2144 for (i = 0; i < s->irq_routes->nr; i++) {
2145 e = &s->irq_routes->entries[i];
2146 if (e->gsi == virq) {
2147 s->irq_routes->nr--;
2148 *e = s->irq_routes->entries[s->irq_routes->nr];
2149 }
2150 }
2151 clear_gsi(s, virq);
2152 kvm_arch_release_virq_post(virq);
2153 trace_kvm_irqchip_release_virq(virq);
2154 }
2155
kvm_irqchip_add_change_notifier(Notifier * n)2156 void kvm_irqchip_add_change_notifier(Notifier *n)
2157 {
2158 notifier_list_add(&kvm_irqchip_change_notifiers, n);
2159 }
2160
kvm_irqchip_remove_change_notifier(Notifier * n)2161 void kvm_irqchip_remove_change_notifier(Notifier *n)
2162 {
2163 notifier_remove(n);
2164 }
2165
kvm_irqchip_change_notify(void)2166 void kvm_irqchip_change_notify(void)
2167 {
2168 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
2169 }
2170
kvm_irqchip_get_virq(KVMState * s)2171 int kvm_irqchip_get_virq(KVMState *s)
2172 {
2173 int next_virq;
2174
2175 /* Return the lowest unused GSI in the bitmap */
2176 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
2177 if (next_virq >= s->gsi_count) {
2178 return -ENOSPC;
2179 } else {
2180 return next_virq;
2181 }
2182 }
2183
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2184 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2185 {
2186 struct kvm_msi msi;
2187
2188 msi.address_lo = (uint32_t)msg.address;
2189 msi.address_hi = msg.address >> 32;
2190 msi.data = le32_to_cpu(msg.data);
2191 msi.flags = 0;
2192 memset(msi.pad, 0, sizeof(msi.pad));
2193
2194 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
2195 }
2196
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2197 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2198 {
2199 struct kvm_irq_routing_entry kroute = {};
2200 int virq;
2201 KVMState *s = c->s;
2202 MSIMessage msg = {0, 0};
2203
2204 if (pci_available && dev) {
2205 msg = pci_get_msi_message(dev, vector);
2206 }
2207
2208 if (kvm_gsi_direct_mapping()) {
2209 return kvm_arch_msi_data_to_gsi(msg.data);
2210 }
2211
2212 if (!kvm_gsi_routing_enabled()) {
2213 return -ENOSYS;
2214 }
2215
2216 virq = kvm_irqchip_get_virq(s);
2217 if (virq < 0) {
2218 return virq;
2219 }
2220
2221 kroute.gsi = virq;
2222 kroute.type = KVM_IRQ_ROUTING_MSI;
2223 kroute.flags = 0;
2224 kroute.u.msi.address_lo = (uint32_t)msg.address;
2225 kroute.u.msi.address_hi = msg.address >> 32;
2226 kroute.u.msi.data = le32_to_cpu(msg.data);
2227 if (pci_available && kvm_msi_devid_required()) {
2228 kroute.flags = KVM_MSI_VALID_DEVID;
2229 kroute.u.msi.devid = pci_requester_id(dev);
2230 }
2231 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2232 kvm_irqchip_release_virq(s, virq);
2233 return -EINVAL;
2234 }
2235
2236 if (s->irq_routes->nr < s->gsi_count) {
2237 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2238 vector, virq);
2239
2240 kvm_add_routing_entry(s, &kroute);
2241 kvm_arch_add_msi_route_post(&kroute, vector, dev);
2242 c->changes++;
2243 } else {
2244 kvm_irqchip_release_virq(s, virq);
2245 return -ENOSPC;
2246 }
2247
2248 return virq;
2249 }
2250
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg,PCIDevice * dev)2251 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2252 PCIDevice *dev)
2253 {
2254 struct kvm_irq_routing_entry kroute = {};
2255
2256 if (kvm_gsi_direct_mapping()) {
2257 return 0;
2258 }
2259
2260 if (!kvm_irqchip_in_kernel()) {
2261 return -ENOSYS;
2262 }
2263
2264 kroute.gsi = virq;
2265 kroute.type = KVM_IRQ_ROUTING_MSI;
2266 kroute.flags = 0;
2267 kroute.u.msi.address_lo = (uint32_t)msg.address;
2268 kroute.u.msi.address_hi = msg.address >> 32;
2269 kroute.u.msi.data = le32_to_cpu(msg.data);
2270 if (pci_available && kvm_msi_devid_required()) {
2271 kroute.flags = KVM_MSI_VALID_DEVID;
2272 kroute.u.msi.devid = pci_requester_id(dev);
2273 }
2274 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2275 return -EINVAL;
2276 }
2277
2278 trace_kvm_irqchip_update_msi_route(virq);
2279
2280 return kvm_update_routing_entry(s, &kroute);
2281 }
2282
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2283 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2284 EventNotifier *resample, int virq,
2285 bool assign)
2286 {
2287 int fd = event_notifier_get_fd(event);
2288 int rfd = resample ? event_notifier_get_fd(resample) : -1;
2289
2290 struct kvm_irqfd irqfd = {
2291 .fd = fd,
2292 .gsi = virq,
2293 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2294 };
2295
2296 if (rfd != -1) {
2297 assert(assign);
2298 if (kvm_irqchip_is_split()) {
2299 /*
2300 * When the slow irqchip (e.g. IOAPIC) is in the
2301 * userspace, KVM kernel resamplefd will not work because
2302 * the EOI of the interrupt will be delivered to userspace
2303 * instead, so the KVM kernel resamplefd kick will be
2304 * skipped. The userspace here mimics what the kernel
2305 * provides with resamplefd, remember the resamplefd and
2306 * kick it when we receive EOI of this IRQ.
2307 *
2308 * This is hackery because IOAPIC is mostly bypassed
2309 * (except EOI broadcasts) when irqfd is used. However
2310 * this can bring much performance back for split irqchip
2311 * with INTx IRQs (for VFIO, this gives 93% perf of the
2312 * full fast path, which is 46% perf boost comparing to
2313 * the INTx slow path).
2314 */
2315 kvm_resample_fd_insert(virq, resample);
2316 } else {
2317 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2318 irqfd.resamplefd = rfd;
2319 }
2320 } else if (!assign) {
2321 if (kvm_irqchip_is_split()) {
2322 kvm_resample_fd_remove(virq);
2323 }
2324 }
2325
2326 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2327 }
2328
2329 #else /* !KVM_CAP_IRQ_ROUTING */
2330
kvm_init_irq_routing(KVMState * s)2331 void kvm_init_irq_routing(KVMState *s)
2332 {
2333 }
2334
kvm_irqchip_release_virq(KVMState * s,int virq)2335 void kvm_irqchip_release_virq(KVMState *s, int virq)
2336 {
2337 }
2338
kvm_irqchip_send_msi(KVMState * s,MSIMessage msg)2339 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2340 {
2341 abort();
2342 }
2343
kvm_irqchip_add_msi_route(KVMRouteChange * c,int vector,PCIDevice * dev)2344 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2345 {
2346 return -ENOSYS;
2347 }
2348
kvm_irqchip_add_adapter_route(KVMState * s,AdapterInfo * adapter)2349 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2350 {
2351 return -ENOSYS;
2352 }
2353
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)2354 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2355 {
2356 return -ENOSYS;
2357 }
2358
kvm_irqchip_assign_irqfd(KVMState * s,EventNotifier * event,EventNotifier * resample,int virq,bool assign)2359 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2360 EventNotifier *resample, int virq,
2361 bool assign)
2362 {
2363 abort();
2364 }
2365
kvm_irqchip_update_msi_route(KVMState * s,int virq,MSIMessage msg)2366 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2367 {
2368 return -ENOSYS;
2369 }
2370 #endif /* !KVM_CAP_IRQ_ROUTING */
2371
kvm_irqchip_add_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,EventNotifier * rn,int virq)2372 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2373 EventNotifier *rn, int virq)
2374 {
2375 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2376 }
2377
kvm_irqchip_remove_irqfd_notifier_gsi(KVMState * s,EventNotifier * n,int virq)2378 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2379 int virq)
2380 {
2381 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2382 }
2383
kvm_irqchip_add_irqfd_notifier(KVMState * s,EventNotifier * n,EventNotifier * rn,qemu_irq irq)2384 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2385 EventNotifier *rn, qemu_irq irq)
2386 {
2387 gpointer key, gsi;
2388 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2389
2390 if (!found) {
2391 return -ENXIO;
2392 }
2393 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2394 }
2395
kvm_irqchip_remove_irqfd_notifier(KVMState * s,EventNotifier * n,qemu_irq irq)2396 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2397 qemu_irq irq)
2398 {
2399 gpointer key, gsi;
2400 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2401
2402 if (!found) {
2403 return -ENXIO;
2404 }
2405 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2406 }
2407
kvm_irqchip_set_qemuirq_gsi(KVMState * s,qemu_irq irq,int gsi)2408 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2409 {
2410 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2411 }
2412
kvm_irqchip_create(KVMState * s)2413 static void kvm_irqchip_create(KVMState *s)
2414 {
2415 int ret;
2416
2417 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2418 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2419 ;
2420 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2421 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2422 if (ret < 0) {
2423 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2424 exit(1);
2425 }
2426 } else {
2427 return;
2428 }
2429
2430 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
2431 fprintf(stderr, "kvm: irqfd not implemented\n");
2432 exit(1);
2433 }
2434
2435 /* First probe and see if there's a arch-specific hook to create the
2436 * in-kernel irqchip for us */
2437 ret = kvm_arch_irqchip_create(s);
2438 if (ret == 0) {
2439 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2440 error_report("Split IRQ chip mode not supported.");
2441 exit(1);
2442 } else {
2443 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2444 }
2445 }
2446 if (ret < 0) {
2447 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2448 exit(1);
2449 }
2450
2451 kvm_kernel_irqchip = true;
2452 /* If we have an in-kernel IRQ chip then we must have asynchronous
2453 * interrupt delivery (though the reverse is not necessarily true)
2454 */
2455 kvm_async_interrupts_allowed = true;
2456 kvm_halt_in_kernel_allowed = true;
2457
2458 kvm_init_irq_routing(s);
2459
2460 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2461 }
2462
2463 /* Find number of supported CPUs using the recommended
2464 * procedure from the kernel API documentation to cope with
2465 * older kernels that may be missing capabilities.
2466 */
kvm_recommended_vcpus(KVMState * s)2467 static int kvm_recommended_vcpus(KVMState *s)
2468 {
2469 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2470 return (ret) ? ret : 4;
2471 }
2472
kvm_max_vcpus(KVMState * s)2473 static int kvm_max_vcpus(KVMState *s)
2474 {
2475 int ret = kvm_vm_check_extension(s, KVM_CAP_MAX_VCPUS);
2476 return (ret) ? ret : kvm_recommended_vcpus(s);
2477 }
2478
kvm_max_vcpu_id(KVMState * s)2479 static int kvm_max_vcpu_id(KVMState *s)
2480 {
2481 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2482 return (ret) ? ret : kvm_max_vcpus(s);
2483 }
2484
kvm_vcpu_id_is_valid(int vcpu_id)2485 bool kvm_vcpu_id_is_valid(int vcpu_id)
2486 {
2487 KVMState *s = KVM_STATE(current_accel());
2488 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2489 }
2490
kvm_dirty_ring_enabled(void)2491 bool kvm_dirty_ring_enabled(void)
2492 {
2493 return kvm_state && kvm_state->kvm_dirty_ring_size;
2494 }
2495
2496 static void query_stats_cb(StatsResultList **result, StatsTarget target,
2497 strList *names, strList *targets, Error **errp);
2498 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2499
kvm_dirty_ring_size(void)2500 uint32_t kvm_dirty_ring_size(void)
2501 {
2502 return kvm_state->kvm_dirty_ring_size;
2503 }
2504
do_kvm_create_vm(KVMState * s,int type)2505 static int do_kvm_create_vm(KVMState *s, int type)
2506 {
2507 int ret;
2508
2509 do {
2510 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2511 } while (ret == -EINTR);
2512
2513 if (ret < 0) {
2514 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
2515
2516 #ifdef TARGET_S390X
2517 if (ret == -EINVAL) {
2518 error_printf("Host kernel setup problem detected."
2519 " Please verify:\n");
2520 error_printf("- for kernels supporting the"
2521 " switch_amode or user_mode parameters, whether");
2522 error_printf(" user space is running in primary address space\n");
2523 error_printf("- for kernels supporting the vm.allocate_pgste"
2524 " sysctl, whether it is enabled\n");
2525 }
2526 #elif defined(TARGET_PPC)
2527 if (ret == -EINVAL) {
2528 error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2529 (type == 2) ? "pr" : "hv");
2530 }
2531 #endif
2532 }
2533
2534 return ret;
2535 }
2536
find_kvm_machine_type(MachineState * ms)2537 static int find_kvm_machine_type(MachineState *ms)
2538 {
2539 MachineClass *mc = MACHINE_GET_CLASS(ms);
2540 int type;
2541
2542 if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2543 g_autofree char *kvm_type;
2544 kvm_type = object_property_get_str(OBJECT(current_machine),
2545 "kvm-type",
2546 &error_abort);
2547 type = mc->kvm_type(ms, kvm_type);
2548 } else if (mc->kvm_type) {
2549 type = mc->kvm_type(ms, NULL);
2550 } else {
2551 type = kvm_arch_get_default_type(ms);
2552 }
2553 return type;
2554 }
2555
kvm_setup_dirty_ring(KVMState * s)2556 static int kvm_setup_dirty_ring(KVMState *s)
2557 {
2558 uint64_t dirty_log_manual_caps;
2559 int ret;
2560
2561 /*
2562 * Enable KVM dirty ring if supported, otherwise fall back to
2563 * dirty logging mode
2564 */
2565 ret = kvm_dirty_ring_init(s);
2566 if (ret < 0) {
2567 return ret;
2568 }
2569
2570 /*
2571 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2572 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2573 * page is wr-protected initially, which is against how kvm dirty ring is
2574 * usage - kvm dirty ring requires all pages are wr-protected at the very
2575 * beginning. Enabling this feature for dirty ring causes data corruption.
2576 *
2577 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2578 * we may expect a higher stall time when starting the migration. In the
2579 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2580 * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2581 * guest pages.
2582 */
2583 if (!s->kvm_dirty_ring_size) {
2584 dirty_log_manual_caps =
2585 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2586 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2587 KVM_DIRTY_LOG_INITIALLY_SET);
2588 s->manual_dirty_log_protect = dirty_log_manual_caps;
2589 if (dirty_log_manual_caps) {
2590 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2591 dirty_log_manual_caps);
2592 if (ret) {
2593 warn_report("Trying to enable capability %"PRIu64" of "
2594 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2595 "Falling back to the legacy mode. ",
2596 dirty_log_manual_caps);
2597 s->manual_dirty_log_protect = 0;
2598 }
2599 }
2600 }
2601
2602 return 0;
2603 }
2604
kvm_init(AccelState * as,MachineState * ms)2605 static int kvm_init(AccelState *as, MachineState *ms)
2606 {
2607 MachineClass *mc = MACHINE_GET_CLASS(ms);
2608 static const char upgrade_note[] =
2609 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2610 "(see http://sourceforge.net/projects/kvm).\n";
2611 const struct {
2612 const char *name;
2613 int num;
2614 } num_cpus[] = {
2615 { "SMP", ms->smp.cpus },
2616 { "hotpluggable", ms->smp.max_cpus },
2617 { /* end of list */ }
2618 }, *nc = num_cpus;
2619 int soft_vcpus_limit, hard_vcpus_limit;
2620 KVMState *s = KVM_STATE(as);
2621 const KVMCapabilityInfo *missing_cap;
2622 int ret;
2623 int type;
2624
2625 qemu_mutex_init(&kml_slots_lock);
2626
2627 /*
2628 * On systems where the kernel can support different base page
2629 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2630 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2631 * page size for the system though.
2632 */
2633 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2634
2635 s->sigmask_len = 8;
2636 accel_blocker_init();
2637
2638 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2639 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2640 #endif
2641 QLIST_INIT(&s->kvm_parked_vcpus);
2642 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
2643 if (s->fd == -1) {
2644 error_report("Could not access KVM kernel module: %m");
2645 ret = -errno;
2646 goto err;
2647 }
2648
2649 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2650 if (ret < KVM_API_VERSION) {
2651 if (ret >= 0) {
2652 ret = -EINVAL;
2653 }
2654 error_report("kvm version too old");
2655 goto err;
2656 }
2657
2658 if (ret > KVM_API_VERSION) {
2659 ret = -EINVAL;
2660 error_report("kvm version not supported");
2661 goto err;
2662 }
2663
2664 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2665 s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2666
2667 /* If unspecified, use the default value */
2668 if (!s->nr_slots_max) {
2669 s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
2670 }
2671
2672 type = find_kvm_machine_type(ms);
2673 if (type < 0) {
2674 ret = -EINVAL;
2675 goto err;
2676 }
2677
2678 ret = do_kvm_create_vm(s, type);
2679 if (ret < 0) {
2680 goto err;
2681 }
2682
2683 s->vmfd = ret;
2684
2685 s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2686 if (s->nr_as <= 1) {
2687 s->nr_as = 1;
2688 }
2689 s->as = g_new0(struct KVMAs, s->nr_as);
2690
2691 /* check the vcpu limits */
2692 soft_vcpus_limit = kvm_recommended_vcpus(s);
2693 hard_vcpus_limit = kvm_max_vcpus(s);
2694
2695 while (nc->name) {
2696 if (nc->num > soft_vcpus_limit) {
2697 warn_report("Number of %s cpus requested (%d) exceeds "
2698 "the recommended cpus supported by KVM (%d)",
2699 nc->name, nc->num, soft_vcpus_limit);
2700
2701 if (nc->num > hard_vcpus_limit) {
2702 error_report("Number of %s cpus requested (%d) exceeds "
2703 "the maximum cpus supported by KVM (%d)",
2704 nc->name, nc->num, hard_vcpus_limit);
2705 exit(1);
2706 }
2707 }
2708 nc++;
2709 }
2710
2711 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2712 if (!missing_cap) {
2713 missing_cap =
2714 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2715 }
2716 if (missing_cap) {
2717 ret = -EINVAL;
2718 error_report("kvm does not support %s", missing_cap->name);
2719 error_printf("%s", upgrade_note);
2720 goto err;
2721 }
2722
2723 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2724 s->coalesced_pio = s->coalesced_mmio &&
2725 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2726
2727 ret = kvm_setup_dirty_ring(s);
2728 if (ret < 0) {
2729 goto err;
2730 }
2731
2732 #ifdef KVM_CAP_VCPU_EVENTS
2733 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2734 #endif
2735 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2736
2737 s->irq_set_ioctl = KVM_IRQ_LINE;
2738 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2739 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2740 }
2741
2742 kvm_readonly_mem_allowed =
2743 (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2744
2745 kvm_resamplefds_allowed =
2746 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2747
2748 kvm_vm_attributes_allowed =
2749 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2750
2751 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
2752 kvm_has_guest_debug =
2753 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2754 #endif
2755
2756 kvm_sstep_flags = 0;
2757 if (kvm_has_guest_debug) {
2758 kvm_sstep_flags = SSTEP_ENABLE;
2759
2760 #if defined TARGET_KVM_HAVE_GUEST_DEBUG
2761 int guest_debug_flags =
2762 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2763
2764 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2765 kvm_sstep_flags |= SSTEP_NOIRQ;
2766 }
2767 #endif
2768 }
2769
2770 kvm_state = s;
2771
2772 ret = kvm_arch_init(ms, s);
2773 if (ret < 0) {
2774 goto err;
2775 }
2776
2777 kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
2778 kvm_guest_memfd_supported =
2779 kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
2780 kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
2781 (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
2782 kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY);
2783
2784 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2785 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2786 }
2787
2788 qemu_register_reset(kvm_unpoison_all, NULL);
2789
2790 if (s->kernel_irqchip_allowed) {
2791 kvm_irqchip_create(s);
2792 }
2793
2794 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2795 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2796 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2797 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2798
2799 kvm_memory_listener_register(s, &s->memory_listener,
2800 &address_space_memory, 0, "kvm-memory");
2801 memory_listener_register(&kvm_io_listener,
2802 &address_space_io);
2803
2804 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2805 if (!s->sync_mmu) {
2806 ret = ram_block_discard_disable(true);
2807 assert(!ret);
2808 }
2809
2810 if (s->kvm_dirty_ring_size) {
2811 kvm_dirty_ring_reaper_init(s);
2812 }
2813
2814 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2815 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2816 query_stats_schemas_cb);
2817 }
2818
2819 return 0;
2820
2821 err:
2822 assert(ret < 0);
2823 if (s->vmfd >= 0) {
2824 close(s->vmfd);
2825 }
2826 if (s->fd != -1) {
2827 close(s->fd);
2828 }
2829 g_free(s->as);
2830 g_free(s->memory_listener.slots);
2831
2832 return ret;
2833 }
2834
kvm_set_sigmask_len(KVMState * s,unsigned int sigmask_len)2835 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2836 {
2837 s->sigmask_len = sigmask_len;
2838 }
2839
kvm_handle_io(uint16_t port,MemTxAttrs attrs,void * data,int direction,int size,uint32_t count)2840 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2841 int size, uint32_t count)
2842 {
2843 int i;
2844 uint8_t *ptr = data;
2845
2846 for (i = 0; i < count; i++) {
2847 address_space_rw(&address_space_io, port, attrs,
2848 ptr, size,
2849 direction == KVM_EXIT_IO_OUT);
2850 ptr += size;
2851 }
2852 }
2853
kvm_handle_internal_error(CPUState * cpu,struct kvm_run * run)2854 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2855 {
2856 int i;
2857
2858 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2859 run->internal.suberror);
2860
2861 for (i = 0; i < run->internal.ndata; ++i) {
2862 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2863 i, (uint64_t)run->internal.data[i]);
2864 }
2865 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2866 fprintf(stderr, "emulation failure\n");
2867 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2868 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2869 return EXCP_INTERRUPT;
2870 }
2871 }
2872 /* FIXME: Should trigger a qmp message to let management know
2873 * something went wrong.
2874 */
2875 return -1;
2876 }
2877
kvm_flush_coalesced_mmio_buffer(void)2878 void kvm_flush_coalesced_mmio_buffer(void)
2879 {
2880 KVMState *s = kvm_state;
2881
2882 if (!s || s->coalesced_flush_in_progress) {
2883 return;
2884 }
2885
2886 s->coalesced_flush_in_progress = true;
2887
2888 if (s->coalesced_mmio_ring) {
2889 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2890 while (ring->first != ring->last) {
2891 struct kvm_coalesced_mmio *ent;
2892
2893 ent = &ring->coalesced_mmio[ring->first];
2894
2895 if (ent->pio == 1) {
2896 address_space_write(&address_space_io, ent->phys_addr,
2897 MEMTXATTRS_UNSPECIFIED, ent->data,
2898 ent->len);
2899 } else {
2900 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2901 }
2902 smp_wmb();
2903 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2904 }
2905 }
2906
2907 s->coalesced_flush_in_progress = false;
2908 }
2909
do_kvm_cpu_synchronize_state(CPUState * cpu,run_on_cpu_data arg)2910 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2911 {
2912 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2913 Error *err = NULL;
2914 int ret = kvm_arch_get_registers(cpu, &err);
2915 if (ret) {
2916 if (err) {
2917 error_reportf_err(err, "Failed to synchronize CPU state: ");
2918 } else {
2919 error_report("Failed to get registers: %s", strerror(-ret));
2920 }
2921
2922 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2923 vm_stop(RUN_STATE_INTERNAL_ERROR);
2924 }
2925
2926 cpu->vcpu_dirty = true;
2927 }
2928 }
2929
kvm_cpu_synchronize_state(CPUState * cpu)2930 void kvm_cpu_synchronize_state(CPUState *cpu)
2931 {
2932 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
2933 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2934 }
2935 }
2936
do_kvm_cpu_synchronize_post_reset(CPUState * cpu,run_on_cpu_data arg)2937 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2938 {
2939 Error *err = NULL;
2940 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
2941 if (ret) {
2942 if (err) {
2943 error_reportf_err(err, "Restoring resisters after reset: ");
2944 } else {
2945 error_report("Failed to put registers after reset: %s",
2946 strerror(-ret));
2947 }
2948 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2949 vm_stop(RUN_STATE_INTERNAL_ERROR);
2950 }
2951
2952 cpu->vcpu_dirty = false;
2953 }
2954
kvm_cpu_synchronize_post_reset(CPUState * cpu)2955 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2956 {
2957 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2958
2959 if (cpu == first_cpu) {
2960 kvm_reset_parked_vcpus(kvm_state);
2961 }
2962 }
2963
do_kvm_cpu_synchronize_post_init(CPUState * cpu,run_on_cpu_data arg)2964 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2965 {
2966 Error *err = NULL;
2967 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
2968 if (ret) {
2969 if (err) {
2970 error_reportf_err(err, "Putting registers after init: ");
2971 } else {
2972 error_report("Failed to put registers after init: %s",
2973 strerror(-ret));
2974 }
2975 exit(1);
2976 }
2977
2978 cpu->vcpu_dirty = false;
2979 }
2980
kvm_cpu_synchronize_post_init(CPUState * cpu)2981 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2982 {
2983 if (!kvm_state->guest_state_protected) {
2984 /*
2985 * This runs before the machine_init_done notifiers, and is the last
2986 * opportunity to synchronize the state of confidential guests.
2987 */
2988 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2989 }
2990 }
2991
do_kvm_cpu_synchronize_pre_loadvm(CPUState * cpu,run_on_cpu_data arg)2992 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2993 {
2994 cpu->vcpu_dirty = true;
2995 }
2996
kvm_cpu_synchronize_pre_loadvm(CPUState * cpu)2997 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2998 {
2999 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
3000 }
3001
3002 #ifdef KVM_HAVE_MCE_INJECTION
3003 static __thread void *pending_sigbus_addr;
3004 static __thread int pending_sigbus_code;
3005 static __thread bool have_sigbus_pending;
3006 #endif
3007
kvm_cpu_kick(CPUState * cpu)3008 static void kvm_cpu_kick(CPUState *cpu)
3009 {
3010 qatomic_set(&cpu->kvm_run->immediate_exit, 1);
3011 }
3012
kvm_cpu_kick_self(void)3013 static void kvm_cpu_kick_self(void)
3014 {
3015 if (kvm_immediate_exit) {
3016 kvm_cpu_kick(current_cpu);
3017 } else {
3018 qemu_cpu_kick_self();
3019 }
3020 }
3021
kvm_eat_signals(CPUState * cpu)3022 static void kvm_eat_signals(CPUState *cpu)
3023 {
3024 struct timespec ts = { 0, 0 };
3025 siginfo_t siginfo;
3026 sigset_t waitset;
3027 sigset_t chkset;
3028 int r;
3029
3030 if (kvm_immediate_exit) {
3031 qatomic_set(&cpu->kvm_run->immediate_exit, 0);
3032 /* Write kvm_run->immediate_exit before the cpu->exit_request
3033 * write in kvm_cpu_exec.
3034 */
3035 smp_wmb();
3036 return;
3037 }
3038
3039 sigemptyset(&waitset);
3040 sigaddset(&waitset, SIG_IPI);
3041
3042 do {
3043 r = sigtimedwait(&waitset, &siginfo, &ts);
3044 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
3045 perror("sigtimedwait");
3046 exit(1);
3047 }
3048
3049 r = sigpending(&chkset);
3050 if (r == -1) {
3051 perror("sigpending");
3052 exit(1);
3053 }
3054 } while (sigismember(&chkset, SIG_IPI));
3055 }
3056
kvm_convert_memory(hwaddr start,hwaddr size,bool to_private)3057 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
3058 {
3059 MemoryRegionSection section;
3060 ram_addr_t offset;
3061 MemoryRegion *mr;
3062 RAMBlock *rb;
3063 void *addr;
3064 int ret = -EINVAL;
3065
3066 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
3067
3068 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
3069 !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
3070 return ret;
3071 }
3072
3073 if (!size) {
3074 return ret;
3075 }
3076
3077 section = memory_region_find(get_system_memory(), start, size);
3078 mr = section.mr;
3079 if (!mr) {
3080 /*
3081 * Ignore converting non-assigned region to shared.
3082 *
3083 * TDX requires vMMIO region to be shared to inject #VE to guest.
3084 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
3085 * and vIO-APIC 0xFEC00000 4K page.
3086 * OVMF assigns 32bit PCI MMIO region to
3087 * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
3088 */
3089 if (!to_private) {
3090 return 0;
3091 }
3092 return ret;
3093 }
3094
3095 if (!memory_region_has_guest_memfd(mr)) {
3096 /*
3097 * Because vMMIO region must be shared, guest TD may convert vMMIO
3098 * region to shared explicitly. Don't complain such case. See
3099 * memory_region_type() for checking if the region is MMIO region.
3100 */
3101 if (!to_private &&
3102 !memory_region_is_ram(mr) &&
3103 !memory_region_is_ram_device(mr) &&
3104 !memory_region_is_rom(mr) &&
3105 !memory_region_is_romd(mr)) {
3106 ret = 0;
3107 } else {
3108 error_report("Convert non guest_memfd backed memory region "
3109 "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
3110 start, size, to_private ? "private" : "shared");
3111 }
3112 goto out_unref;
3113 }
3114
3115 if (to_private) {
3116 ret = kvm_set_memory_attributes_private(start, size);
3117 } else {
3118 ret = kvm_set_memory_attributes_shared(start, size);
3119 }
3120 if (ret) {
3121 goto out_unref;
3122 }
3123
3124 addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
3125 rb = qemu_ram_block_from_host(addr, false, &offset);
3126
3127 ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm),
3128 offset, size, to_private);
3129 if (ret) {
3130 error_report("Failed to notify the listener the state change of "
3131 "(0x%"HWADDR_PRIx" + 0x%"HWADDR_PRIx") to %s",
3132 start, size, to_private ? "private" : "shared");
3133 goto out_unref;
3134 }
3135
3136 if (to_private) {
3137 if (rb->page_size != qemu_real_host_page_size()) {
3138 /*
3139 * shared memory is backed by hugetlb, which is supposed to be
3140 * pre-allocated and doesn't need to be discarded
3141 */
3142 goto out_unref;
3143 }
3144 ret = ram_block_discard_range(rb, offset, size);
3145 } else {
3146 ret = ram_block_discard_guest_memfd_range(rb, offset, size);
3147 }
3148
3149 out_unref:
3150 memory_region_unref(mr);
3151 return ret;
3152 }
3153
kvm_cpu_exec(CPUState * cpu)3154 int kvm_cpu_exec(CPUState *cpu)
3155 {
3156 struct kvm_run *run = cpu->kvm_run;
3157 int ret, run_ret;
3158
3159 trace_kvm_cpu_exec();
3160
3161 if (kvm_arch_process_async_events(cpu)) {
3162 qatomic_set(&cpu->exit_request, 0);
3163 return EXCP_HLT;
3164 }
3165
3166 bql_unlock();
3167 cpu_exec_start(cpu);
3168
3169 do {
3170 MemTxAttrs attrs;
3171
3172 if (cpu->vcpu_dirty) {
3173 Error *err = NULL;
3174 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
3175 if (ret) {
3176 if (err) {
3177 error_reportf_err(err, "Putting registers after init: ");
3178 } else {
3179 error_report("Failed to put registers after init: %s",
3180 strerror(-ret));
3181 }
3182 ret = -1;
3183 break;
3184 }
3185
3186 cpu->vcpu_dirty = false;
3187 }
3188
3189 kvm_arch_pre_run(cpu, run);
3190 if (qatomic_read(&cpu->exit_request)) {
3191 trace_kvm_interrupt_exit_request();
3192 /*
3193 * KVM requires us to reenter the kernel after IO exits to complete
3194 * instruction emulation. This self-signal will ensure that we
3195 * leave ASAP again.
3196 */
3197 kvm_cpu_kick_self();
3198 }
3199
3200 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
3201 * Matching barrier in kvm_eat_signals.
3202 */
3203 smp_rmb();
3204
3205 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
3206
3207 attrs = kvm_arch_post_run(cpu, run);
3208
3209 #ifdef KVM_HAVE_MCE_INJECTION
3210 if (unlikely(have_sigbus_pending)) {
3211 bql_lock();
3212 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
3213 pending_sigbus_addr);
3214 have_sigbus_pending = false;
3215 bql_unlock();
3216 }
3217 #endif
3218
3219 if (run_ret < 0) {
3220 if (run_ret == -EINTR || run_ret == -EAGAIN) {
3221 trace_kvm_io_window_exit();
3222 kvm_eat_signals(cpu);
3223 ret = EXCP_INTERRUPT;
3224 break;
3225 }
3226 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
3227 fprintf(stderr, "error: kvm run failed %s\n",
3228 strerror(-run_ret));
3229 #ifdef TARGET_PPC
3230 if (run_ret == -EBUSY) {
3231 fprintf(stderr,
3232 "This is probably because your SMT is enabled.\n"
3233 "VCPU can only run on primary threads with all "
3234 "secondary threads offline.\n");
3235 }
3236 #endif
3237 ret = -1;
3238 break;
3239 }
3240 }
3241
3242 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
3243 switch (run->exit_reason) {
3244 case KVM_EXIT_IO:
3245 /* Called outside BQL */
3246 kvm_handle_io(run->io.port, attrs,
3247 (uint8_t *)run + run->io.data_offset,
3248 run->io.direction,
3249 run->io.size,
3250 run->io.count);
3251 ret = 0;
3252 break;
3253 case KVM_EXIT_MMIO:
3254 /* Called outside BQL */
3255 address_space_rw(&address_space_memory,
3256 run->mmio.phys_addr, attrs,
3257 run->mmio.data,
3258 run->mmio.len,
3259 run->mmio.is_write);
3260 ret = 0;
3261 break;
3262 case KVM_EXIT_IRQ_WINDOW_OPEN:
3263 ret = EXCP_INTERRUPT;
3264 break;
3265 case KVM_EXIT_SHUTDOWN:
3266 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3267 ret = EXCP_INTERRUPT;
3268 break;
3269 case KVM_EXIT_UNKNOWN:
3270 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
3271 (uint64_t)run->hw.hardware_exit_reason);
3272 ret = -1;
3273 break;
3274 case KVM_EXIT_INTERNAL_ERROR:
3275 ret = kvm_handle_internal_error(cpu, run);
3276 break;
3277 case KVM_EXIT_DIRTY_RING_FULL:
3278 /*
3279 * We shouldn't continue if the dirty ring of this vcpu is
3280 * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
3281 */
3282 trace_kvm_dirty_ring_full(cpu->cpu_index);
3283 bql_lock();
3284 /*
3285 * We throttle vCPU by making it sleep once it exit from kernel
3286 * due to dirty ring full. In the dirtylimit scenario, reaping
3287 * all vCPUs after a single vCPU dirty ring get full result in
3288 * the miss of sleep, so just reap the ring-fulled vCPU.
3289 */
3290 if (dirtylimit_in_service()) {
3291 kvm_dirty_ring_reap(kvm_state, cpu);
3292 } else {
3293 kvm_dirty_ring_reap(kvm_state, NULL);
3294 }
3295 bql_unlock();
3296 dirtylimit_vcpu_execute(cpu);
3297 ret = 0;
3298 break;
3299 case KVM_EXIT_SYSTEM_EVENT:
3300 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
3301 switch (run->system_event.type) {
3302 case KVM_SYSTEM_EVENT_SHUTDOWN:
3303 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3304 ret = EXCP_INTERRUPT;
3305 break;
3306 case KVM_SYSTEM_EVENT_RESET:
3307 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3308 ret = EXCP_INTERRUPT;
3309 break;
3310 case KVM_SYSTEM_EVENT_CRASH:
3311 kvm_cpu_synchronize_state(cpu);
3312 bql_lock();
3313 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3314 bql_unlock();
3315 ret = 0;
3316 break;
3317 default:
3318 ret = kvm_arch_handle_exit(cpu, run);
3319 break;
3320 }
3321 break;
3322 case KVM_EXIT_MEMORY_FAULT:
3323 trace_kvm_memory_fault(run->memory_fault.gpa,
3324 run->memory_fault.size,
3325 run->memory_fault.flags);
3326 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
3327 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
3328 (uint64_t)run->memory_fault.flags);
3329 ret = -1;
3330 break;
3331 }
3332 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
3333 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
3334 break;
3335 default:
3336 ret = kvm_arch_handle_exit(cpu, run);
3337 break;
3338 }
3339 } while (ret == 0);
3340
3341 cpu_exec_end(cpu);
3342 bql_lock();
3343
3344 if (ret < 0) {
3345 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3346 vm_stop(RUN_STATE_INTERNAL_ERROR);
3347 }
3348
3349 qatomic_set(&cpu->exit_request, 0);
3350 return ret;
3351 }
3352
kvm_ioctl(KVMState * s,unsigned long type,...)3353 int kvm_ioctl(KVMState *s, unsigned long type, ...)
3354 {
3355 int ret;
3356 void *arg;
3357 va_list ap;
3358
3359 va_start(ap, type);
3360 arg = va_arg(ap, void *);
3361 va_end(ap);
3362
3363 trace_kvm_ioctl(type, arg);
3364 ret = ioctl(s->fd, type, arg);
3365 if (ret == -1) {
3366 ret = -errno;
3367 }
3368 return ret;
3369 }
3370
kvm_vm_ioctl(KVMState * s,unsigned long type,...)3371 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
3372 {
3373 int ret;
3374 void *arg;
3375 va_list ap;
3376
3377 va_start(ap, type);
3378 arg = va_arg(ap, void *);
3379 va_end(ap);
3380
3381 trace_kvm_vm_ioctl(type, arg);
3382 accel_ioctl_begin();
3383 ret = ioctl(s->vmfd, type, arg);
3384 accel_ioctl_end();
3385 if (ret == -1) {
3386 ret = -errno;
3387 }
3388 return ret;
3389 }
3390
kvm_vcpu_ioctl(CPUState * cpu,unsigned long type,...)3391 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
3392 {
3393 int ret;
3394 void *arg;
3395 va_list ap;
3396
3397 va_start(ap, type);
3398 arg = va_arg(ap, void *);
3399 va_end(ap);
3400
3401 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3402 accel_cpu_ioctl_begin(cpu);
3403 ret = ioctl(cpu->kvm_fd, type, arg);
3404 accel_cpu_ioctl_end(cpu);
3405 if (ret == -1) {
3406 ret = -errno;
3407 }
3408 return ret;
3409 }
3410
kvm_device_ioctl(int fd,unsigned long type,...)3411 int kvm_device_ioctl(int fd, unsigned long type, ...)
3412 {
3413 int ret;
3414 void *arg;
3415 va_list ap;
3416
3417 va_start(ap, type);
3418 arg = va_arg(ap, void *);
3419 va_end(ap);
3420
3421 trace_kvm_device_ioctl(fd, type, arg);
3422 accel_ioctl_begin();
3423 ret = ioctl(fd, type, arg);
3424 accel_ioctl_end();
3425 if (ret == -1) {
3426 ret = -errno;
3427 }
3428 return ret;
3429 }
3430
kvm_vm_check_attr(KVMState * s,uint32_t group,uint64_t attr)3431 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3432 {
3433 int ret;
3434 struct kvm_device_attr attribute = {
3435 .group = group,
3436 .attr = attr,
3437 };
3438
3439 if (!kvm_vm_attributes_allowed) {
3440 return 0;
3441 }
3442
3443 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3444 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3445 return ret ? 0 : 1;
3446 }
3447
kvm_device_check_attr(int dev_fd,uint32_t group,uint64_t attr)3448 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3449 {
3450 struct kvm_device_attr attribute = {
3451 .group = group,
3452 .attr = attr,
3453 .flags = 0,
3454 };
3455
3456 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3457 }
3458
kvm_device_access(int fd,int group,uint64_t attr,void * val,bool write,Error ** errp)3459 int kvm_device_access(int fd, int group, uint64_t attr,
3460 void *val, bool write, Error **errp)
3461 {
3462 struct kvm_device_attr kvmattr;
3463 int err;
3464
3465 kvmattr.flags = 0;
3466 kvmattr.group = group;
3467 kvmattr.attr = attr;
3468 kvmattr.addr = (uintptr_t)val;
3469
3470 err = kvm_device_ioctl(fd,
3471 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3472 &kvmattr);
3473 if (err < 0) {
3474 error_setg_errno(errp, -err,
3475 "KVM_%s_DEVICE_ATTR failed: Group %d "
3476 "attr 0x%016" PRIx64,
3477 write ? "SET" : "GET", group, attr);
3478 }
3479 return err;
3480 }
3481
kvm_has_sync_mmu(void)3482 bool kvm_has_sync_mmu(void)
3483 {
3484 return kvm_state->sync_mmu;
3485 }
3486
kvm_has_vcpu_events(void)3487 int kvm_has_vcpu_events(void)
3488 {
3489 return kvm_state->vcpu_events;
3490 }
3491
kvm_max_nested_state_length(void)3492 int kvm_max_nested_state_length(void)
3493 {
3494 return kvm_state->max_nested_state_len;
3495 }
3496
kvm_has_gsi_routing(void)3497 int kvm_has_gsi_routing(void)
3498 {
3499 #ifdef KVM_CAP_IRQ_ROUTING
3500 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3501 #else
3502 return false;
3503 #endif
3504 }
3505
kvm_arm_supports_user_irq(void)3506 bool kvm_arm_supports_user_irq(void)
3507 {
3508 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3509 }
3510
3511 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
kvm_find_sw_breakpoint(CPUState * cpu,vaddr pc)3512 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
3513 {
3514 struct kvm_sw_breakpoint *bp;
3515
3516 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3517 if (bp->pc == pc) {
3518 return bp;
3519 }
3520 }
3521 return NULL;
3522 }
3523
kvm_sw_breakpoints_active(CPUState * cpu)3524 int kvm_sw_breakpoints_active(CPUState *cpu)
3525 {
3526 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3527 }
3528
3529 struct kvm_set_guest_debug_data {
3530 struct kvm_guest_debug dbg;
3531 int err;
3532 };
3533
kvm_invoke_set_guest_debug(CPUState * cpu,run_on_cpu_data data)3534 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3535 {
3536 struct kvm_set_guest_debug_data *dbg_data =
3537 (struct kvm_set_guest_debug_data *) data.host_ptr;
3538
3539 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3540 &dbg_data->dbg);
3541 }
3542
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)3543 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3544 {
3545 struct kvm_set_guest_debug_data data;
3546
3547 data.dbg.control = reinject_trap;
3548
3549 if (cpu->singlestep_enabled) {
3550 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3551
3552 if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3553 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3554 }
3555 }
3556 kvm_arch_update_guest_debug(cpu, &data.dbg);
3557
3558 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3559 RUN_ON_CPU_HOST_PTR(&data));
3560 return data.err;
3561 }
3562
kvm_supports_guest_debug(void)3563 bool kvm_supports_guest_debug(void)
3564 {
3565 /* probed during kvm_init() */
3566 return kvm_has_guest_debug;
3567 }
3568
kvm_insert_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3569 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3570 {
3571 struct kvm_sw_breakpoint *bp;
3572 int err;
3573
3574 if (type == GDB_BREAKPOINT_SW) {
3575 bp = kvm_find_sw_breakpoint(cpu, addr);
3576 if (bp) {
3577 bp->use_count++;
3578 return 0;
3579 }
3580
3581 bp = g_new(struct kvm_sw_breakpoint, 1);
3582 bp->pc = addr;
3583 bp->use_count = 1;
3584 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3585 if (err) {
3586 g_free(bp);
3587 return err;
3588 }
3589
3590 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3591 } else {
3592 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3593 if (err) {
3594 return err;
3595 }
3596 }
3597
3598 CPU_FOREACH(cpu) {
3599 err = kvm_update_guest_debug(cpu, 0);
3600 if (err) {
3601 return err;
3602 }
3603 }
3604 return 0;
3605 }
3606
kvm_remove_breakpoint(CPUState * cpu,int type,vaddr addr,vaddr len)3607 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
3608 {
3609 struct kvm_sw_breakpoint *bp;
3610 int err;
3611
3612 if (type == GDB_BREAKPOINT_SW) {
3613 bp = kvm_find_sw_breakpoint(cpu, addr);
3614 if (!bp) {
3615 return -ENOENT;
3616 }
3617
3618 if (bp->use_count > 1) {
3619 bp->use_count--;
3620 return 0;
3621 }
3622
3623 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3624 if (err) {
3625 return err;
3626 }
3627
3628 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3629 g_free(bp);
3630 } else {
3631 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3632 if (err) {
3633 return err;
3634 }
3635 }
3636
3637 CPU_FOREACH(cpu) {
3638 err = kvm_update_guest_debug(cpu, 0);
3639 if (err) {
3640 return err;
3641 }
3642 }
3643 return 0;
3644 }
3645
kvm_remove_all_breakpoints(CPUState * cpu)3646 void kvm_remove_all_breakpoints(CPUState *cpu)
3647 {
3648 struct kvm_sw_breakpoint *bp, *next;
3649 KVMState *s = cpu->kvm_state;
3650 CPUState *tmpcpu;
3651
3652 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3653 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3654 /* Try harder to find a CPU that currently sees the breakpoint. */
3655 CPU_FOREACH(tmpcpu) {
3656 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3657 break;
3658 }
3659 }
3660 }
3661 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3662 g_free(bp);
3663 }
3664 kvm_arch_remove_all_hw_breakpoints();
3665
3666 CPU_FOREACH(cpu) {
3667 kvm_update_guest_debug(cpu, 0);
3668 }
3669 }
3670
3671 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
3672
kvm_set_signal_mask(CPUState * cpu,const sigset_t * sigset)3673 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3674 {
3675 KVMState *s = kvm_state;
3676 struct kvm_signal_mask *sigmask;
3677 int r;
3678
3679 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3680
3681 sigmask->len = s->sigmask_len;
3682 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3683 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3684 g_free(sigmask);
3685
3686 return r;
3687 }
3688
kvm_ipi_signal(int sig)3689 static void kvm_ipi_signal(int sig)
3690 {
3691 if (current_cpu) {
3692 assert(kvm_immediate_exit);
3693 kvm_cpu_kick(current_cpu);
3694 }
3695 }
3696
kvm_init_cpu_signals(CPUState * cpu)3697 void kvm_init_cpu_signals(CPUState *cpu)
3698 {
3699 int r;
3700 sigset_t set;
3701 struct sigaction sigact;
3702
3703 memset(&sigact, 0, sizeof(sigact));
3704 sigact.sa_handler = kvm_ipi_signal;
3705 sigaction(SIG_IPI, &sigact, NULL);
3706
3707 pthread_sigmask(SIG_BLOCK, NULL, &set);
3708 #if defined KVM_HAVE_MCE_INJECTION
3709 sigdelset(&set, SIGBUS);
3710 pthread_sigmask(SIG_SETMASK, &set, NULL);
3711 #endif
3712 sigdelset(&set, SIG_IPI);
3713 if (kvm_immediate_exit) {
3714 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3715 } else {
3716 r = kvm_set_signal_mask(cpu, &set);
3717 }
3718 if (r) {
3719 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3720 exit(1);
3721 }
3722 }
3723
3724 /* Called asynchronously in VCPU thread. */
kvm_on_sigbus_vcpu(CPUState * cpu,int code,void * addr)3725 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3726 {
3727 #ifdef KVM_HAVE_MCE_INJECTION
3728 if (have_sigbus_pending) {
3729 return 1;
3730 }
3731 have_sigbus_pending = true;
3732 pending_sigbus_addr = addr;
3733 pending_sigbus_code = code;
3734 qatomic_set(&cpu->exit_request, 1);
3735 return 0;
3736 #else
3737 return 1;
3738 #endif
3739 }
3740
3741 /* Called synchronously (via signalfd) in main thread. */
kvm_on_sigbus(int code,void * addr)3742 int kvm_on_sigbus(int code, void *addr)
3743 {
3744 #ifdef KVM_HAVE_MCE_INJECTION
3745 /* Action required MCE kills the process if SIGBUS is blocked. Because
3746 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3747 * we can only get action optional here.
3748 */
3749 assert(code != BUS_MCEERR_AR);
3750 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3751 return 0;
3752 #else
3753 return 1;
3754 #endif
3755 }
3756
kvm_create_device(KVMState * s,uint64_t type,bool test)3757 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3758 {
3759 int ret;
3760 struct kvm_create_device create_dev;
3761
3762 create_dev.type = type;
3763 create_dev.fd = -1;
3764 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3765
3766 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3767 return -ENOTSUP;
3768 }
3769
3770 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3771 if (ret) {
3772 return ret;
3773 }
3774
3775 return test ? 0 : create_dev.fd;
3776 }
3777
kvm_device_supported(int vmfd,uint64_t type)3778 bool kvm_device_supported(int vmfd, uint64_t type)
3779 {
3780 struct kvm_create_device create_dev = {
3781 .type = type,
3782 .fd = -1,
3783 .flags = KVM_CREATE_DEVICE_TEST,
3784 };
3785
3786 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3787 return false;
3788 }
3789
3790 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3791 }
3792
kvm_set_one_reg(CPUState * cs,uint64_t id,void * source)3793 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3794 {
3795 struct kvm_one_reg reg;
3796 int r;
3797
3798 reg.id = id;
3799 reg.addr = (uintptr_t) source;
3800 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
3801 if (r) {
3802 trace_kvm_failed_reg_set(id, strerror(-r));
3803 }
3804 return r;
3805 }
3806
kvm_get_one_reg(CPUState * cs,uint64_t id,void * target)3807 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3808 {
3809 struct kvm_one_reg reg;
3810 int r;
3811
3812 reg.id = id;
3813 reg.addr = (uintptr_t) target;
3814 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
3815 if (r) {
3816 trace_kvm_failed_reg_get(id, strerror(-r));
3817 }
3818 return r;
3819 }
3820
kvm_accel_has_memory(AccelState * accel,AddressSpace * as,hwaddr start_addr,hwaddr size)3821 static bool kvm_accel_has_memory(AccelState *accel, AddressSpace *as,
3822 hwaddr start_addr, hwaddr size)
3823 {
3824 KVMState *kvm = KVM_STATE(accel);
3825 int i;
3826
3827 for (i = 0; i < kvm->nr_as; ++i) {
3828 if (kvm->as[i].as == as && kvm->as[i].ml) {
3829 size = MIN(kvm_max_slot_size, size);
3830 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3831 start_addr, size);
3832 }
3833 }
3834
3835 return false;
3836 }
3837
kvm_get_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3838 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3839 const char *name, void *opaque,
3840 Error **errp)
3841 {
3842 KVMState *s = KVM_STATE(obj);
3843 int64_t value = s->kvm_shadow_mem;
3844
3845 visit_type_int(v, name, &value, errp);
3846 }
3847
kvm_set_kvm_shadow_mem(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3848 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3849 const char *name, void *opaque,
3850 Error **errp)
3851 {
3852 KVMState *s = KVM_STATE(obj);
3853 int64_t value;
3854
3855 if (s->fd != -1) {
3856 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3857 return;
3858 }
3859
3860 if (!visit_type_int(v, name, &value, errp)) {
3861 return;
3862 }
3863
3864 s->kvm_shadow_mem = value;
3865 }
3866
kvm_set_kernel_irqchip(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3867 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3868 const char *name, void *opaque,
3869 Error **errp)
3870 {
3871 KVMState *s = KVM_STATE(obj);
3872 OnOffSplit mode;
3873
3874 if (s->fd != -1) {
3875 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3876 return;
3877 }
3878
3879 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3880 return;
3881 }
3882 switch (mode) {
3883 case ON_OFF_SPLIT_ON:
3884 s->kernel_irqchip_allowed = true;
3885 s->kernel_irqchip_required = true;
3886 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3887 break;
3888 case ON_OFF_SPLIT_OFF:
3889 s->kernel_irqchip_allowed = false;
3890 s->kernel_irqchip_required = false;
3891 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3892 break;
3893 case ON_OFF_SPLIT_SPLIT:
3894 s->kernel_irqchip_allowed = true;
3895 s->kernel_irqchip_required = true;
3896 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3897 break;
3898 default:
3899 /* The value was checked in visit_type_OnOffSplit() above. If
3900 * we get here, then something is wrong in QEMU.
3901 */
3902 abort();
3903 }
3904 }
3905
kvm_kernel_irqchip_allowed(void)3906 bool kvm_kernel_irqchip_allowed(void)
3907 {
3908 return kvm_state->kernel_irqchip_allowed;
3909 }
3910
kvm_kernel_irqchip_required(void)3911 bool kvm_kernel_irqchip_required(void)
3912 {
3913 return kvm_state->kernel_irqchip_required;
3914 }
3915
kvm_kernel_irqchip_split(void)3916 bool kvm_kernel_irqchip_split(void)
3917 {
3918 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3919 }
3920
kvm_get_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3921 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3922 const char *name, void *opaque,
3923 Error **errp)
3924 {
3925 KVMState *s = KVM_STATE(obj);
3926 uint32_t value = s->kvm_dirty_ring_size;
3927
3928 visit_type_uint32(v, name, &value, errp);
3929 }
3930
kvm_set_dirty_ring_size(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)3931 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3932 const char *name, void *opaque,
3933 Error **errp)
3934 {
3935 KVMState *s = KVM_STATE(obj);
3936 uint32_t value;
3937
3938 if (s->fd != -1) {
3939 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3940 return;
3941 }
3942
3943 if (!visit_type_uint32(v, name, &value, errp)) {
3944 return;
3945 }
3946 if (value & (value - 1)) {
3947 error_setg(errp, "dirty-ring-size must be a power of two.");
3948 return;
3949 }
3950
3951 s->kvm_dirty_ring_size = value;
3952 }
3953
kvm_get_device(Object * obj,Error ** errp G_GNUC_UNUSED)3954 static char *kvm_get_device(Object *obj,
3955 Error **errp G_GNUC_UNUSED)
3956 {
3957 KVMState *s = KVM_STATE(obj);
3958
3959 return g_strdup(s->device);
3960 }
3961
kvm_set_device(Object * obj,const char * value,Error ** errp G_GNUC_UNUSED)3962 static void kvm_set_device(Object *obj,
3963 const char *value,
3964 Error **errp G_GNUC_UNUSED)
3965 {
3966 KVMState *s = KVM_STATE(obj);
3967
3968 g_free(s->device);
3969 s->device = g_strdup(value);
3970 }
3971
kvm_set_kvm_rapl(Object * obj,bool value,Error ** errp)3972 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
3973 {
3974 KVMState *s = KVM_STATE(obj);
3975 s->msr_energy.enable = value;
3976 }
3977
kvm_set_kvm_rapl_socket_path(Object * obj,const char * str,Error ** errp)3978 static void kvm_set_kvm_rapl_socket_path(Object *obj,
3979 const char *str,
3980 Error **errp)
3981 {
3982 KVMState *s = KVM_STATE(obj);
3983 g_free(s->msr_energy.socket_path);
3984 s->msr_energy.socket_path = g_strdup(str);
3985 }
3986
kvm_accel_instance_init(Object * obj)3987 static void kvm_accel_instance_init(Object *obj)
3988 {
3989 KVMState *s = KVM_STATE(obj);
3990
3991 s->fd = -1;
3992 s->vmfd = -1;
3993 s->kvm_shadow_mem = -1;
3994 s->kernel_irqchip_allowed = true;
3995 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3996 /* KVM dirty ring is by default off */
3997 s->kvm_dirty_ring_size = 0;
3998 s->kvm_dirty_ring_with_bitmap = false;
3999 s->kvm_eager_split_size = 0;
4000 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
4001 s->notify_window = 0;
4002 s->xen_version = 0;
4003 s->xen_gnttab_max_frames = 64;
4004 s->xen_evtchn_max_pirq = 256;
4005 s->device = NULL;
4006 s->msr_energy.enable = false;
4007 }
4008
4009 /**
4010 * kvm_gdbstub_sstep_flags():
4011 *
4012 * Returns: SSTEP_* flags that KVM supports for guest debug. The
4013 * support is probed during kvm_init()
4014 */
kvm_gdbstub_sstep_flags(AccelState * as)4015 static int kvm_gdbstub_sstep_flags(AccelState *as)
4016 {
4017 return kvm_sstep_flags;
4018 }
4019
kvm_accel_class_init(ObjectClass * oc,const void * data)4020 static void kvm_accel_class_init(ObjectClass *oc, const void *data)
4021 {
4022 AccelClass *ac = ACCEL_CLASS(oc);
4023 ac->name = "KVM";
4024 ac->init_machine = kvm_init;
4025 ac->has_memory = kvm_accel_has_memory;
4026 ac->allowed = &kvm_allowed;
4027 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
4028
4029 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
4030 NULL, kvm_set_kernel_irqchip,
4031 NULL, NULL);
4032 object_class_property_set_description(oc, "kernel-irqchip",
4033 "Configure KVM in-kernel irqchip");
4034
4035 object_class_property_add(oc, "kvm-shadow-mem", "int",
4036 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
4037 NULL, NULL);
4038 object_class_property_set_description(oc, "kvm-shadow-mem",
4039 "KVM shadow MMU size");
4040
4041 object_class_property_add(oc, "dirty-ring-size", "uint32",
4042 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
4043 NULL, NULL);
4044 object_class_property_set_description(oc, "dirty-ring-size",
4045 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
4046
4047 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
4048 object_class_property_set_description(oc, "device",
4049 "Path to the device node to use (default: /dev/kvm)");
4050
4051 object_class_property_add_bool(oc, "rapl",
4052 NULL,
4053 kvm_set_kvm_rapl);
4054 object_class_property_set_description(oc, "rapl",
4055 "Allow energy related MSRs for RAPL interface in Guest");
4056
4057 object_class_property_add_str(oc, "rapl-helper-socket", NULL,
4058 kvm_set_kvm_rapl_socket_path);
4059 object_class_property_set_description(oc, "rapl-helper-socket",
4060 "Socket Path for comminucating with the Virtual MSR helper daemon");
4061
4062 kvm_arch_accel_class_init(oc);
4063 }
4064
4065 static const TypeInfo kvm_accel_type = {
4066 .name = TYPE_KVM_ACCEL,
4067 .parent = TYPE_ACCEL,
4068 .instance_init = kvm_accel_instance_init,
4069 .class_init = kvm_accel_class_init,
4070 .instance_size = sizeof(KVMState),
4071 };
4072
kvm_type_init(void)4073 static void kvm_type_init(void)
4074 {
4075 type_register_static(&kvm_accel_type);
4076 }
4077
4078 type_init(kvm_type_init);
4079
4080 typedef struct StatsArgs {
4081 union StatsResultsType {
4082 StatsResultList **stats;
4083 StatsSchemaList **schema;
4084 } result;
4085 strList *names;
4086 Error **errp;
4087 } StatsArgs;
4088
add_kvmstat_entry(struct kvm_stats_desc * pdesc,uint64_t * stats_data,StatsList * stats_list,Error ** errp)4089 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
4090 uint64_t *stats_data,
4091 StatsList *stats_list,
4092 Error **errp)
4093 {
4094
4095 Stats *stats;
4096 uint64List *val_list = NULL;
4097
4098 /* Only add stats that we understand. */
4099 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4100 case KVM_STATS_TYPE_CUMULATIVE:
4101 case KVM_STATS_TYPE_INSTANT:
4102 case KVM_STATS_TYPE_PEAK:
4103 case KVM_STATS_TYPE_LINEAR_HIST:
4104 case KVM_STATS_TYPE_LOG_HIST:
4105 break;
4106 default:
4107 return stats_list;
4108 }
4109
4110 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4111 case KVM_STATS_UNIT_NONE:
4112 case KVM_STATS_UNIT_BYTES:
4113 case KVM_STATS_UNIT_CYCLES:
4114 case KVM_STATS_UNIT_SECONDS:
4115 case KVM_STATS_UNIT_BOOLEAN:
4116 break;
4117 default:
4118 return stats_list;
4119 }
4120
4121 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4122 case KVM_STATS_BASE_POW10:
4123 case KVM_STATS_BASE_POW2:
4124 break;
4125 default:
4126 return stats_list;
4127 }
4128
4129 /* Alloc and populate data list */
4130 stats = g_new0(Stats, 1);
4131 stats->name = g_strdup(pdesc->name);
4132 stats->value = g_new0(StatsValue, 1);
4133
4134 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
4135 stats->value->u.boolean = *stats_data;
4136 stats->value->type = QTYPE_QBOOL;
4137 } else if (pdesc->size == 1) {
4138 stats->value->u.scalar = *stats_data;
4139 stats->value->type = QTYPE_QNUM;
4140 } else {
4141 int i;
4142 for (i = 0; i < pdesc->size; i++) {
4143 QAPI_LIST_PREPEND(val_list, stats_data[i]);
4144 }
4145 stats->value->u.list = val_list;
4146 stats->value->type = QTYPE_QLIST;
4147 }
4148
4149 QAPI_LIST_PREPEND(stats_list, stats);
4150 return stats_list;
4151 }
4152
add_kvmschema_entry(struct kvm_stats_desc * pdesc,StatsSchemaValueList * list,Error ** errp)4153 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
4154 StatsSchemaValueList *list,
4155 Error **errp)
4156 {
4157 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
4158 schema_entry->value = g_new0(StatsSchemaValue, 1);
4159
4160 switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
4161 case KVM_STATS_TYPE_CUMULATIVE:
4162 schema_entry->value->type = STATS_TYPE_CUMULATIVE;
4163 break;
4164 case KVM_STATS_TYPE_INSTANT:
4165 schema_entry->value->type = STATS_TYPE_INSTANT;
4166 break;
4167 case KVM_STATS_TYPE_PEAK:
4168 schema_entry->value->type = STATS_TYPE_PEAK;
4169 break;
4170 case KVM_STATS_TYPE_LINEAR_HIST:
4171 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
4172 schema_entry->value->bucket_size = pdesc->bucket_size;
4173 schema_entry->value->has_bucket_size = true;
4174 break;
4175 case KVM_STATS_TYPE_LOG_HIST:
4176 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
4177 break;
4178 default:
4179 goto exit;
4180 }
4181
4182 switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
4183 case KVM_STATS_UNIT_NONE:
4184 break;
4185 case KVM_STATS_UNIT_BOOLEAN:
4186 schema_entry->value->has_unit = true;
4187 schema_entry->value->unit = STATS_UNIT_BOOLEAN;
4188 break;
4189 case KVM_STATS_UNIT_BYTES:
4190 schema_entry->value->has_unit = true;
4191 schema_entry->value->unit = STATS_UNIT_BYTES;
4192 break;
4193 case KVM_STATS_UNIT_CYCLES:
4194 schema_entry->value->has_unit = true;
4195 schema_entry->value->unit = STATS_UNIT_CYCLES;
4196 break;
4197 case KVM_STATS_UNIT_SECONDS:
4198 schema_entry->value->has_unit = true;
4199 schema_entry->value->unit = STATS_UNIT_SECONDS;
4200 break;
4201 default:
4202 goto exit;
4203 }
4204
4205 schema_entry->value->exponent = pdesc->exponent;
4206 if (pdesc->exponent) {
4207 switch (pdesc->flags & KVM_STATS_BASE_MASK) {
4208 case KVM_STATS_BASE_POW10:
4209 schema_entry->value->has_base = true;
4210 schema_entry->value->base = 10;
4211 break;
4212 case KVM_STATS_BASE_POW2:
4213 schema_entry->value->has_base = true;
4214 schema_entry->value->base = 2;
4215 break;
4216 default:
4217 goto exit;
4218 }
4219 }
4220
4221 schema_entry->value->name = g_strdup(pdesc->name);
4222 schema_entry->next = list;
4223 return schema_entry;
4224 exit:
4225 g_free(schema_entry->value);
4226 g_free(schema_entry);
4227 return list;
4228 }
4229
4230 /* Cached stats descriptors */
4231 typedef struct StatsDescriptors {
4232 const char *ident; /* cache key, currently the StatsTarget */
4233 struct kvm_stats_desc *kvm_stats_desc;
4234 struct kvm_stats_header kvm_stats_header;
4235 QTAILQ_ENTRY(StatsDescriptors) next;
4236 } StatsDescriptors;
4237
4238 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
4239 QTAILQ_HEAD_INITIALIZER(stats_descriptors);
4240
4241 /*
4242 * Return the descriptors for 'target', that either have already been read
4243 * or are retrieved from 'stats_fd'.
4244 */
find_stats_descriptors(StatsTarget target,int stats_fd,Error ** errp)4245 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
4246 Error **errp)
4247 {
4248 StatsDescriptors *descriptors;
4249 const char *ident;
4250 struct kvm_stats_desc *kvm_stats_desc;
4251 struct kvm_stats_header *kvm_stats_header;
4252 size_t size_desc;
4253 ssize_t ret;
4254
4255 ident = StatsTarget_str(target);
4256 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
4257 if (g_str_equal(descriptors->ident, ident)) {
4258 return descriptors;
4259 }
4260 }
4261
4262 descriptors = g_new0(StatsDescriptors, 1);
4263
4264 /* Read stats header */
4265 kvm_stats_header = &descriptors->kvm_stats_header;
4266 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
4267 if (ret != sizeof(*kvm_stats_header)) {
4268 error_setg(errp, "KVM stats: failed to read stats header: "
4269 "expected %zu actual %zu",
4270 sizeof(*kvm_stats_header), ret);
4271 g_free(descriptors);
4272 return NULL;
4273 }
4274 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4275
4276 /* Read stats descriptors */
4277 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
4278 ret = pread(stats_fd, kvm_stats_desc,
4279 size_desc * kvm_stats_header->num_desc,
4280 kvm_stats_header->desc_offset);
4281
4282 if (ret != size_desc * kvm_stats_header->num_desc) {
4283 error_setg(errp, "KVM stats: failed to read stats descriptors: "
4284 "expected %zu actual %zu",
4285 size_desc * kvm_stats_header->num_desc, ret);
4286 g_free(descriptors);
4287 g_free(kvm_stats_desc);
4288 return NULL;
4289 }
4290 descriptors->kvm_stats_desc = kvm_stats_desc;
4291 descriptors->ident = ident;
4292 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
4293 return descriptors;
4294 }
4295
query_stats(StatsResultList ** result,StatsTarget target,strList * names,int stats_fd,CPUState * cpu,Error ** errp)4296 static void query_stats(StatsResultList **result, StatsTarget target,
4297 strList *names, int stats_fd, CPUState *cpu,
4298 Error **errp)
4299 {
4300 struct kvm_stats_desc *kvm_stats_desc;
4301 struct kvm_stats_header *kvm_stats_header;
4302 StatsDescriptors *descriptors;
4303 g_autofree uint64_t *stats_data = NULL;
4304 struct kvm_stats_desc *pdesc;
4305 StatsList *stats_list = NULL;
4306 size_t size_desc, size_data = 0;
4307 ssize_t ret;
4308 int i;
4309
4310 descriptors = find_stats_descriptors(target, stats_fd, errp);
4311 if (!descriptors) {
4312 return;
4313 }
4314
4315 kvm_stats_header = &descriptors->kvm_stats_header;
4316 kvm_stats_desc = descriptors->kvm_stats_desc;
4317 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4318
4319 /* Tally the total data size; read schema data */
4320 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4321 pdesc = (void *)kvm_stats_desc + i * size_desc;
4322 size_data += pdesc->size * sizeof(*stats_data);
4323 }
4324
4325 stats_data = g_malloc0(size_data);
4326 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
4327
4328 if (ret != size_data) {
4329 error_setg(errp, "KVM stats: failed to read data: "
4330 "expected %zu actual %zu", size_data, ret);
4331 return;
4332 }
4333
4334 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4335 uint64_t *stats;
4336 pdesc = (void *)kvm_stats_desc + i * size_desc;
4337
4338 /* Add entry to the list */
4339 stats = (void *)stats_data + pdesc->offset;
4340 if (!apply_str_list_filter(pdesc->name, names)) {
4341 continue;
4342 }
4343 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4344 }
4345
4346 if (!stats_list) {
4347 return;
4348 }
4349
4350 switch (target) {
4351 case STATS_TARGET_VM:
4352 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4353 break;
4354 case STATS_TARGET_VCPU:
4355 add_stats_entry(result, STATS_PROVIDER_KVM,
4356 cpu->parent_obj.canonical_path,
4357 stats_list);
4358 break;
4359 default:
4360 g_assert_not_reached();
4361 }
4362 }
4363
query_stats_schema(StatsSchemaList ** result,StatsTarget target,int stats_fd,Error ** errp)4364 static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4365 int stats_fd, Error **errp)
4366 {
4367 struct kvm_stats_desc *kvm_stats_desc;
4368 struct kvm_stats_header *kvm_stats_header;
4369 StatsDescriptors *descriptors;
4370 struct kvm_stats_desc *pdesc;
4371 StatsSchemaValueList *stats_list = NULL;
4372 size_t size_desc;
4373 int i;
4374
4375 descriptors = find_stats_descriptors(target, stats_fd, errp);
4376 if (!descriptors) {
4377 return;
4378 }
4379
4380 kvm_stats_header = &descriptors->kvm_stats_header;
4381 kvm_stats_desc = descriptors->kvm_stats_desc;
4382 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4383
4384 /* Tally the total data size; read schema data */
4385 for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4386 pdesc = (void *)kvm_stats_desc + i * size_desc;
4387 stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4388 }
4389
4390 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4391 }
4392
query_stats_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4393 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4394 {
4395 int stats_fd = cpu->kvm_vcpu_stats_fd;
4396 Error *local_err = NULL;
4397
4398 if (stats_fd == -1) {
4399 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4400 error_propagate(kvm_stats_args->errp, local_err);
4401 return;
4402 }
4403 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4404 kvm_stats_args->names, stats_fd, cpu,
4405 kvm_stats_args->errp);
4406 }
4407
query_stats_schema_vcpu(CPUState * cpu,StatsArgs * kvm_stats_args)4408 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
4409 {
4410 int stats_fd = cpu->kvm_vcpu_stats_fd;
4411 Error *local_err = NULL;
4412
4413 if (stats_fd == -1) {
4414 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4415 error_propagate(kvm_stats_args->errp, local_err);
4416 return;
4417 }
4418 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4419 kvm_stats_args->errp);
4420 }
4421
query_stats_cb(StatsResultList ** result,StatsTarget target,strList * names,strList * targets,Error ** errp)4422 static void query_stats_cb(StatsResultList **result, StatsTarget target,
4423 strList *names, strList *targets, Error **errp)
4424 {
4425 KVMState *s = kvm_state;
4426 CPUState *cpu;
4427 int stats_fd;
4428
4429 switch (target) {
4430 case STATS_TARGET_VM:
4431 {
4432 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4433 if (stats_fd == -1) {
4434 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4435 return;
4436 }
4437 query_stats(result, target, names, stats_fd, NULL, errp);
4438 close(stats_fd);
4439 break;
4440 }
4441 case STATS_TARGET_VCPU:
4442 {
4443 StatsArgs stats_args;
4444 stats_args.result.stats = result;
4445 stats_args.names = names;
4446 stats_args.errp = errp;
4447 CPU_FOREACH(cpu) {
4448 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4449 continue;
4450 }
4451 query_stats_vcpu(cpu, &stats_args);
4452 }
4453 break;
4454 }
4455 default:
4456 break;
4457 }
4458 }
4459
query_stats_schemas_cb(StatsSchemaList ** result,Error ** errp)4460 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4461 {
4462 StatsArgs stats_args;
4463 KVMState *s = kvm_state;
4464 int stats_fd;
4465
4466 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4467 if (stats_fd == -1) {
4468 error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4469 return;
4470 }
4471 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4472 close(stats_fd);
4473
4474 if (first_cpu) {
4475 stats_args.result.schema = result;
4476 stats_args.errp = errp;
4477 query_stats_schema_vcpu(first_cpu, &stats_args);
4478 }
4479 }
4480
kvm_mark_guest_state_protected(void)4481 void kvm_mark_guest_state_protected(void)
4482 {
4483 kvm_state->guest_state_protected = true;
4484 }
4485
kvm_create_guest_memfd(uint64_t size,uint64_t flags,Error ** errp)4486 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
4487 {
4488 int fd;
4489 struct kvm_create_guest_memfd guest_memfd = {
4490 .size = size,
4491 .flags = flags,
4492 };
4493
4494 if (!kvm_guest_memfd_supported) {
4495 error_setg(errp, "KVM does not support guest_memfd");
4496 return -1;
4497 }
4498
4499 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
4500 if (fd < 0) {
4501 error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
4502 return -1;
4503 }
4504
4505 return fd;
4506 }
4507