Lines Matching +full:value +full:- +full:start
12 * See the COPYING file in the top-level directory.
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
34 #include "system/accel-blocker.h"
40 #include "qemu/main-loop.h"
44 #include "qapi/qapi-types-common.h"
45 #include "qapi/qapi-visit-common.h"
47 #include "qemu/guest-random.h"
49 #include "kvm-cpus.h"
56 /* This check must be after config-host.h is included */
149 if (rfd->gsi == gsi) { in kvm_resample_fd_remove()
161 rfd->gsi = gsi; in kvm_resample_fd_insert()
162 rfd->resample_event = event; in kvm_resample_fd_insert()
172 if (rfd->gsi == gsi) { in kvm_resample_fd_notify()
173 event_notifier_set(rfd->resample_event); in kvm_resample_fd_notify()
190 unsigned int i, cur = kml->nr_slots_allocated; in kvm_slots_grow()
193 if (nr_slots_new > kvm_state->nr_slots_max) { in kvm_slots_grow()
194 nr_slots_new = kvm_state->nr_slots_max; in kvm_slots_grow()
205 assert(kml->slots); in kvm_slots_grow()
206 slots = g_renew(KVMSlot, kml->slots, nr_slots_new); in kvm_slots_grow()
209 * memslots require fields to be zero-initialized. E.g. pointers, in kvm_slots_grow()
212 memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur)); in kvm_slots_grow()
219 kml->slots = slots; in kvm_slots_grow()
220 kml->nr_slots_allocated = nr_slots_new; in kvm_slots_grow()
228 return kvm_slots_grow(kml, kml->nr_slots_allocated * 2); in kvm_slots_double()
235 return s->nr_slots_max; in kvm_get_max_memslots()
245 for (i = 0; i < s->nr_as; i++) { in kvm_get_free_memslots()
246 if (!s->as[i].ml) { in kvm_get_free_memslots()
249 used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used); in kvm_get_free_memslots()
253 return s->nr_slots_max - used_slots; in kvm_get_free_memslots()
262 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_get_free_slot()
263 if (kml->slots[i].memory_size == 0) { in kvm_get_free_slot()
264 return &kml->slots[i]; in kvm_get_free_slot()
274 n = kml->nr_slots_allocated; in kvm_get_free_slot()
276 return &kml->slots[n]; in kvm_get_free_slot()
301 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_lookup_matching_slot()
302 KVMSlot *mem = &kml->slots[i]; in kvm_lookup_matching_slot()
304 if (start_addr == mem->start_addr && size == mem->memory_size) { in kvm_lookup_matching_slot()
313 * Calculate and align the start address and the size of the section.
317 hwaddr *start) in kvm_align_section() argument
319 hwaddr size = int128_get64(section->size); in kvm_align_section()
323 with sub-page size and unaligned start address. Pad the start in kvm_align_section()
325 aligned = ROUND_UP(section->offset_within_address_space, in kvm_align_section()
327 delta = aligned - section->offset_within_address_space; in kvm_align_section()
328 *start = aligned; in kvm_align_section()
333 return (size - delta) & qemu_real_host_page_mask(); in kvm_align_section()
339 KVMMemoryListener *kml = &s->memory_listener; in kvm_physical_memory_addr_from_host()
343 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_memory_addr_from_host()
344 KVMSlot *mem = &kml->slots[i]; in kvm_physical_memory_addr_from_host()
346 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { in kvm_physical_memory_addr_from_host()
347 *phys_addr = mem->start_addr + (ram - mem->ram); in kvm_physical_memory_addr_from_host()
363 mem.slot = slot->slot | (kml->as_id << 16); in kvm_set_user_memory_region()
364 mem.guest_phys_addr = slot->start_addr; in kvm_set_user_memory_region()
365 mem.userspace_addr = (unsigned long)slot->ram; in kvm_set_user_memory_region()
366 mem.flags = slot->flags; in kvm_set_user_memory_region()
367 mem.guest_memfd = slot->guest_memfd; in kvm_set_user_memory_region()
368 mem.guest_memfd_offset = slot->guest_memfd_offset; in kvm_set_user_memory_region()
370 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { in kvm_set_user_memory_region()
372 * value. This is needed based on KVM commit 75d61fbc. */ in kvm_set_user_memory_region()
384 mem.memory_size = slot->memory_size; in kvm_set_user_memory_region()
390 slot->old_flags = mem.flags; in kvm_set_user_memory_region()
399 " start=0x%" PRIx64 ", size=0x%" PRIx64 "," in kvm_set_user_memory_region()
402 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
408 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s", in kvm_set_user_memory_region()
409 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
420 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_park_vcpu()
423 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); in kvm_park_vcpu()
424 vcpu->kvm_fd = cpu->kvm_fd; in kvm_park_vcpu()
425 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); in kvm_park_vcpu()
431 int kvm_fd = -ENOENT; in kvm_unpark_vcpu()
433 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_unpark_vcpu()
434 if (cpu->vcpu_id == vcpu_id) { in kvm_unpark_vcpu()
436 kvm_fd = cpu->kvm_fd; in kvm_unpark_vcpu()
451 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_reset_parked_vcpus()
452 kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd); in kvm_reset_parked_vcpus()
473 cpu->kvm_fd = kvm_fd; in kvm_create_vcpu()
474 cpu->kvm_state = s; in kvm_create_vcpu()
475 if (!s->guest_state_protected) { in kvm_create_vcpu()
476 cpu->vcpu_dirty = true; in kvm_create_vcpu()
478 cpu->dirty_pages = 0; in kvm_create_vcpu()
479 cpu->throttle_us_per_full = 0; in kvm_create_vcpu()
481 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd); in kvm_create_vcpu()
504 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in do_kvm_destroy_vcpu()
518 ret = munmap(cpu->kvm_run, mmap_size); in do_kvm_destroy_vcpu()
523 if (cpu->kvm_dirty_gfns) { in do_kvm_destroy_vcpu()
524 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); in do_kvm_destroy_vcpu()
549 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_init_vcpu()
558 error_setg_errno(errp, -ret, in kvm_init_vcpu()
567 error_setg_errno(errp, -mmap_size, in kvm_init_vcpu()
572 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, in kvm_init_vcpu()
573 cpu->kvm_fd, 0); in kvm_init_vcpu()
574 if (cpu->kvm_run == MAP_FAILED) { in kvm_init_vcpu()
575 ret = -errno; in kvm_init_vcpu()
582 if (s->coalesced_mmio && !s->coalesced_mmio_ring) { in kvm_init_vcpu()
583 s->coalesced_mmio_ring = in kvm_init_vcpu()
584 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; in kvm_init_vcpu()
587 if (s->kvm_dirty_ring_size) { in kvm_init_vcpu()
589 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, in kvm_init_vcpu()
591 cpu->kvm_fd, in kvm_init_vcpu()
593 if (cpu->kvm_dirty_gfns == MAP_FAILED) { in kvm_init_vcpu()
594 ret = -errno; in kvm_init_vcpu()
601 error_setg_errno(errp, -ret, in kvm_init_vcpu()
605 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); in kvm_init_vcpu()
617 bool readonly = mr->readonly || memory_region_is_romd(mr); in kvm_mem_flags()
637 mem->flags = kvm_mem_flags(mr); in kvm_slot_update_flags()
640 if (mem->flags == mem->old_flags) { in kvm_slot_update_flags()
670 ret = kvm_slot_update_flags(kml, mem, section->mr); in kvm_section_update_flags()
672 size -= slot_size; in kvm_section_update_flags()
717 ram_addr_t start = slot->ram_start_offset; in kvm_slot_sync_dirty_pages() local
718 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); in kvm_slot_sync_dirty_pages()
720 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); in kvm_slot_sync_dirty_pages()
725 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); in kvm_slot_reset_dirty_pages()
728 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
733 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { in kvm_slot_init_dirty_bitmap()
740 * bits-per-long. But for case when the kernel is 64bits and in kvm_slot_init_dirty_bitmap()
742 * bits-per-long, since sizeof(long) is different between kernel in kvm_slot_init_dirty_bitmap()
751 * And mem->memory_size is aligned to it (otherwise this mem can't in kvm_slot_init_dirty_bitmap()
754 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), in kvm_slot_init_dirty_bitmap()
756 mem->dirty_bmap = g_malloc0(bitmap_size); in kvm_slot_init_dirty_bitmap()
757 mem->dirty_bmap_size = bitmap_size; in kvm_slot_init_dirty_bitmap()
769 d.dirty_bitmap = slot->dirty_bmap; in kvm_slot_get_dirty_log()
770 d.slot = slot->slot | (slot->as_id << 16); in kvm_slot_get_dirty_log()
773 if (ret == -ENOENT) { in kvm_slot_get_dirty_log()
791 if (as_id >= s->nr_as) { in kvm_dirty_ring_mark_page()
795 kml = s->as[as_id].ml; in kvm_dirty_ring_mark_page()
796 mem = &kml->slots[slot_id]; in kvm_dirty_ring_mark_page()
798 if (!mem->memory_size || offset >= in kvm_dirty_ring_mark_page()
799 (mem->memory_size / qemu_real_host_page_size())) { in kvm_dirty_ring_mark_page()
803 set_bit(offset, mem->dirty_bmap); in kvm_dirty_ring_mark_page()
809 * Read the flags before the value. Pairs with barrier in in dirty_gfn_is_dirtied()
812 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; in dirty_gfn_is_dirtied()
818 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS in dirty_gfn_set_collected()
822 * ------------------------------------------------------------------------------ in dirty_gfn_set_collected()
824 * store-rel flags for gfn0 in dirty_gfn_set_collected()
825 * load-acq flags for gfn0 in dirty_gfn_set_collected()
826 * store-rel RESET for gfn0 in dirty_gfn_set_collected()
828 * load-acq flags for gfn0 in dirty_gfn_set_collected()
833 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); in dirty_gfn_set_collected()
842 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; in kvm_dirty_ring_reap_one()
843 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_reap_one()
844 uint32_t count = 0, fetch = cpu->kvm_fetch_index; in kvm_dirty_ring_reap_one()
851 if (!cpu->created) { in kvm_dirty_ring_reap_one()
856 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); in kvm_dirty_ring_reap_one()
863 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, in kvm_dirty_ring_reap_one()
864 cur->offset); in kvm_dirty_ring_reap_one()
866 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); in kvm_dirty_ring_reap_one()
870 cpu->kvm_fetch_index = fetch; in kvm_dirty_ring_reap_one()
871 cpu->dirty_pages += count; in kvm_dirty_ring_reap_one()
898 stamp = get_clock() - stamp; in kvm_dirty_ring_reap_locked()
925 * bitmaps before correctly re-protect those dirtied pages. in kvm_dirty_ring_reap()
959 * before calling this function have been put into the per-kvmslot
983 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
988 * NOTE: caller must be with kml->slots_lock held.
1013 size -= slot_size; in kvm_physical_sync_dirty_bitmap()
1017 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1020 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
1022 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, in kvm_log_clear_one_slot() argument
1032 * We need to extend either the start or the size or both to in kvm_log_clear_one_slot()
1033 * satisfy the KVM interface requirement. Firstly, do the start in kvm_log_clear_one_slot()
1036 bmap_start = start & KVM_CLEAR_LOG_MASK; in kvm_log_clear_one_slot()
1037 start_delta = start - bmap_start; in kvm_log_clear_one_slot()
1043 * (1) the size is 64 host pages aligned (just like the start), or in kvm_log_clear_one_slot()
1048 end = mem->memory_size / psize; in kvm_log_clear_one_slot()
1049 if (bmap_npages > end - bmap_start) { in kvm_log_clear_one_slot()
1050 bmap_npages = end - bmap_start; in kvm_log_clear_one_slot()
1064 * |<-------- bmap_npages -----------..>| in kvm_log_clear_one_slot()
1067 * |----------------|-------------|------------------|------------| in kvm_log_clear_one_slot()
1070 * start bmap_start (start) end in kvm_log_clear_one_slot()
1078 assert(mem->dirty_bmap); in kvm_log_clear_one_slot()
1079 if (start_delta || bmap_npages - size / psize) { in kvm_log_clear_one_slot()
1080 /* Slow path - we need to manipulate a temp bitmap */ in kvm_log_clear_one_slot()
1082 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, in kvm_log_clear_one_slot()
1085 * We need to fill the holes at start because that was not in kvm_log_clear_one_slot()
1093 * Fast path - both start and size align well with BITS_PER_LONG in kvm_log_clear_one_slot()
1096 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); in kvm_log_clear_one_slot()
1103 d.slot = mem->slot | (as_id << 16); in kvm_log_clear_one_slot()
1106 if (ret < 0 && ret != -ENOENT) { in kvm_log_clear_one_slot()
1108 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d", in kvm_log_clear_one_slot()
1122 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, in kvm_log_clear_one_slot()
1131 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1133 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1144 uint64_t start, size, offset, count; in kvm_physical_log_clear() local
1148 if (!s->manual_dirty_log_protect) { in kvm_physical_log_clear()
1153 start = section->offset_within_address_space; in kvm_physical_log_clear()
1154 size = int128_get64(section->size); in kvm_physical_log_clear()
1163 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_log_clear()
1164 mem = &kml->slots[i]; in kvm_physical_log_clear()
1166 if (!mem->memory_size || in kvm_physical_log_clear()
1167 mem->start_addr > start + size - 1 || in kvm_physical_log_clear()
1168 start > mem->start_addr + mem->memory_size - 1) { in kvm_physical_log_clear()
1172 if (start >= mem->start_addr) { in kvm_physical_log_clear()
1174 offset = start - mem->start_addr; in kvm_physical_log_clear()
1175 count = MIN(mem->memory_size - offset, size); in kvm_physical_log_clear()
1179 count = MIN(mem->memory_size, size - (mem->start_addr - start)); in kvm_physical_log_clear()
1181 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); in kvm_physical_log_clear()
1194 hwaddr start, hwaddr size) in kvm_coalesce_mmio_region() argument
1198 if (s->coalesced_mmio) { in kvm_coalesce_mmio_region()
1201 zone.addr = start; in kvm_coalesce_mmio_region()
1211 hwaddr start, hwaddr size) in kvm_uncoalesce_mmio_region() argument
1215 if (s->coalesced_mmio) { in kvm_uncoalesce_mmio_region()
1218 zone.addr = start; in kvm_uncoalesce_mmio_region()
1228 hwaddr start, hwaddr size) in kvm_coalesce_pio_add() argument
1232 if (s->coalesced_pio) { in kvm_coalesce_pio_add()
1235 zone.addr = start; in kvm_coalesce_pio_add()
1245 hwaddr start, hwaddr size) in kvm_coalesce_pio_del() argument
1249 if (s->coalesced_pio) { in kvm_coalesce_pio_del()
1252 zone.addr = start; in kvm_coalesce_pio_del()
1287 * - replace them on VM reset
1288 * - block a migration for a VM with a poisoned page
1304 qemu_ram_remap(page->ram_addr); in kvm_unpoison_all()
1314 if (page->ram_addr == ram_addr) { in kvm_hwpoison_page_add()
1319 page->ram_addr = ram_addr; in kvm_hwpoison_page_add()
1334 * For example, PPC is always treated as big-endian even if running in adjust_ioeventfd_endianness()
1364 return -ENOSYS; in kvm_set_ioeventfd_mmio()
1377 return -errno; in kvm_set_ioeventfd_mmio()
1396 return -ENOSYS; in kvm_set_ioeventfd_pio()
1415 while (list->name) { in kvm_check_extension_list()
1416 if (!kvm_check_extension(s, list->value)) { in kvm_check_extension_list()
1432 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr) in kvm_set_memory_attributes() argument
1439 attrs.address = start; in kvm_set_memory_attributes()
1447 start, size, attr, strerror(errno)); in kvm_set_memory_attributes()
1452 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size) in kvm_set_memory_attributes_private() argument
1454 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); in kvm_set_memory_attributes_private()
1457 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size) in kvm_set_memory_attributes_shared() argument
1459 return kvm_set_memory_attributes(start, size, 0); in kvm_set_memory_attributes_shared()
1468 MemoryRegion *mr = section->mr; in kvm_set_phys_mem()
1469 bool writable = !mr->readonly && !mr->rom_device; in kvm_set_phys_mem()
1477 } else if (!mr->romd_mode) { in kvm_set_phys_mem()
1490 mr_offset = section->offset_within_region + start_addr - in kvm_set_phys_mem()
1491 section->offset_within_address_space; in kvm_set_phys_mem()
1504 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_set_phys_mem()
1518 if (kvm_state->kvm_dirty_ring_size) { in kvm_set_phys_mem()
1520 if (kvm_state->kvm_dirty_ring_with_bitmap) { in kvm_set_phys_mem()
1531 g_free(mem->dirty_bmap); in kvm_set_phys_mem()
1532 mem->dirty_bmap = NULL; in kvm_set_phys_mem()
1533 mem->memory_size = 0; in kvm_set_phys_mem()
1534 mem->flags = 0; in kvm_set_phys_mem()
1538 __func__, strerror(-err)); in kvm_set_phys_mem()
1542 size -= slot_size; in kvm_set_phys_mem()
1543 kml->nr_slots_used--; in kvm_set_phys_mem()
1552 mem->as_id = kml->as_id; in kvm_set_phys_mem()
1553 mem->memory_size = slot_size; in kvm_set_phys_mem()
1554 mem->start_addr = start_addr; in kvm_set_phys_mem()
1555 mem->ram_start_offset = ram_start_offset; in kvm_set_phys_mem()
1556 mem->ram = ram; in kvm_set_phys_mem()
1557 mem->flags = kvm_mem_flags(mr); in kvm_set_phys_mem()
1558 mem->guest_memfd = mr->ram_block->guest_memfd; in kvm_set_phys_mem()
1559 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; in kvm_set_phys_mem()
1565 strerror(-err)); in kvm_set_phys_mem()
1573 __func__, strerror(-err)); in kvm_set_phys_mem()
1581 size -= slot_size; in kvm_set_phys_mem()
1582 kml->nr_slots_used++; in kvm_set_phys_mem()
1589 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_thread()
1596 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; in kvm_dirty_ring_reaper_thread()
1609 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; in kvm_dirty_ring_reaper_thread()
1615 r->reaper_iteration++; in kvm_dirty_ring_reaper_thread()
1623 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_init()
1625 qemu_thread_create(&r->reaper_thr, "kvm-reaper", in kvm_dirty_ring_reaper_init()
1632 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_init()
1637 s->kvm_dirty_ring_size = 0; in kvm_dirty_ring_init()
1638 s->kvm_dirty_ring_bytes = 0; in kvm_dirty_ring_init()
1662 "(maximum is %ld). Please use a smaller value.", in kvm_dirty_ring_init()
1664 return -EINVAL; in kvm_dirty_ring_init()
1670 "Suggested minimum value is 1024.", strerror(-ret)); in kvm_dirty_ring_init()
1671 return -EIO; in kvm_dirty_ring_init()
1680 "%s. ", strerror(-ret)); in kvm_dirty_ring_init()
1681 return -EIO; in kvm_dirty_ring_init()
1684 s->kvm_dirty_ring_with_bitmap = true; in kvm_dirty_ring_init()
1687 s->kvm_dirty_ring_size = ring_size; in kvm_dirty_ring_init()
1688 s->kvm_dirty_ring_bytes = ring_bytes; in kvm_dirty_ring_init()
1700 update->section = *section; in kvm_region_add()
1702 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next); in kvm_region_add()
1712 update->section = *section; in kvm_region_del()
1714 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next); in kvm_region_del()
1724 if (QSIMPLEQ_EMPTY(&kml->transaction_add) && in kvm_region_commit()
1725 QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1736 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1737 u2 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1741 range_init_nofail(&r1, u1->section.offset_within_address_space, in kvm_region_commit()
1742 int128_get64(u1->section.size)); in kvm_region_commit()
1743 range_init_nofail(&r2, u2->section.offset_within_address_space, in kvm_region_commit()
1744 int128_get64(u2->section.size)); in kvm_region_commit()
1763 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1764 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1765 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next); in kvm_region_commit()
1767 kvm_set_phys_mem(kml, &u1->section, false); in kvm_region_commit()
1768 memory_region_unref(u1->section.mr); in kvm_region_commit()
1772 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) { in kvm_region_commit()
1773 u1 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1774 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next); in kvm_region_commit()
1776 memory_region_ref(u1->section.mr); in kvm_region_commit()
1777 kvm_set_phys_mem(kml, &u1->section, true); in kvm_region_commit()
1809 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_log_sync_global()
1810 mem = &kml->slots[i]; in kvm_log_sync_global()
1811 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_log_sync_global()
1814 if (s->kvm_dirty_ring_with_bitmap && last_stage && in kvm_log_sync_global()
1840 section->mr->name, section->offset_within_region, in kvm_log_clear()
1841 int128_get64(section->size)); in kvm_log_clear()
1854 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_add()
1855 data, true, int128_get64(section->size), in kvm_mem_ioeventfd_add()
1859 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_add()
1872 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_del()
1873 data, false, int128_get64(section->size), in kvm_mem_ioeventfd_del()
1877 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_del()
1890 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_add()
1891 data, true, int128_get64(section->size), in kvm_io_ioeventfd_add()
1895 __func__, strerror(-r), -r); in kvm_io_ioeventfd_add()
1909 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_del()
1910 data, false, int128_get64(section->size), in kvm_io_ioeventfd_del()
1914 __func__, strerror(-r), -r); in kvm_io_ioeventfd_del()
1924 kml->as_id = as_id; in kvm_memory_listener_register()
1928 QSIMPLEQ_INIT(&kml->transaction_add); in kvm_memory_listener_register()
1929 QSIMPLEQ_INIT(&kml->transaction_del); in kvm_memory_listener_register()
1931 kml->listener.region_add = kvm_region_add; in kvm_memory_listener_register()
1932 kml->listener.region_del = kvm_region_del; in kvm_memory_listener_register()
1933 kml->listener.commit = kvm_region_commit; in kvm_memory_listener_register()
1934 kml->listener.log_start = kvm_log_start; in kvm_memory_listener_register()
1935 kml->listener.log_stop = kvm_log_stop; in kvm_memory_listener_register()
1936 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; in kvm_memory_listener_register()
1937 kml->listener.name = name; in kvm_memory_listener_register()
1939 if (s->kvm_dirty_ring_size) { in kvm_memory_listener_register()
1940 kml->listener.log_sync_global = kvm_log_sync_global; in kvm_memory_listener_register()
1942 kml->listener.log_sync = kvm_log_sync; in kvm_memory_listener_register()
1943 kml->listener.log_clear = kvm_log_clear; in kvm_memory_listener_register()
1946 memory_listener_register(&kml->listener, as); in kvm_memory_listener_register()
1948 for (i = 0; i < s->nr_as; ++i) { in kvm_memory_listener_register()
1949 if (!s->as[i].as) { in kvm_memory_listener_register()
1950 s->as[i].as = as; in kvm_memory_listener_register()
1951 s->as[i].ml = kml; in kvm_memory_listener_register()
1958 .name = "kvm-io",
1975 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); in kvm_set_irq()
1981 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; in kvm_set_irq()
1992 set_bit(gsi, s->used_gsi_bitmap); in set_gsi()
1997 clear_bit(gsi, s->used_gsi_bitmap); in clear_gsi()
2004 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; in kvm_init_irq_routing()
2007 s->used_gsi_bitmap = bitmap_new(gsi_count); in kvm_init_irq_routing()
2008 s->gsi_count = gsi_count; in kvm_init_irq_routing()
2011 s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); in kvm_init_irq_routing()
2012 s->nr_allocated_irq_routes = 0; in kvm_init_irq_routing()
2029 s->irq_routes->flags = 0; in kvm_irqchip_commit_routes()
2031 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); in kvm_irqchip_commit_routes()
2041 if (s->irq_routes->nr == s->nr_allocated_irq_routes) { in kvm_add_routing_entry()
2042 n = s->nr_allocated_irq_routes * 2; in kvm_add_routing_entry()
2048 s->irq_routes = g_realloc(s->irq_routes, size); in kvm_add_routing_entry()
2049 s->nr_allocated_irq_routes = n; in kvm_add_routing_entry()
2051 n = s->irq_routes->nr++; in kvm_add_routing_entry()
2052 new = &s->irq_routes->entries[n]; in kvm_add_routing_entry()
2056 set_gsi(s, entry->gsi); in kvm_add_routing_entry()
2065 for (n = 0; n < s->irq_routes->nr; n++) { in kvm_update_routing_entry()
2066 entry = &s->irq_routes->entries[n]; in kvm_update_routing_entry()
2067 if (entry->gsi != new_entry->gsi) { in kvm_update_routing_entry()
2080 return -ESRCH; in kvm_update_routing_entry()
2087 assert(pin < s->gsi_count); in kvm_irqchip_add_irq_route()
2106 for (i = 0; i < s->irq_routes->nr; i++) { in kvm_irqchip_release_virq()
2107 e = &s->irq_routes->entries[i]; in kvm_irqchip_release_virq()
2108 if (e->gsi == virq) { in kvm_irqchip_release_virq()
2109 s->irq_routes->nr--; in kvm_irqchip_release_virq()
2110 *e = s->irq_routes->entries[s->irq_routes->nr]; in kvm_irqchip_release_virq()
2138 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); in kvm_irqchip_get_virq()
2139 if (next_virq >= s->gsi_count) { in kvm_irqchip_get_virq()
2140 return -ENOSPC; in kvm_irqchip_get_virq()
2163 KVMState *s = c->s; in kvm_irqchip_add_msi_route()
2175 return -ENOSYS; in kvm_irqchip_add_msi_route()
2195 return -EINVAL; in kvm_irqchip_add_msi_route()
2198 if (s->irq_routes->nr < s->gsi_count) { in kvm_irqchip_add_msi_route()
2199 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", in kvm_irqchip_add_msi_route()
2204 c->changes++; in kvm_irqchip_add_msi_route()
2207 return -ENOSPC; in kvm_irqchip_add_msi_route()
2223 return -ENOSYS; in kvm_irqchip_update_msi_route()
2237 return -EINVAL; in kvm_irqchip_update_msi_route()
2250 int rfd = resample ? event_notifier_get_fd(resample) : -1; in kvm_irqchip_assign_irqfd()
2258 if (rfd != -1) { in kvm_irqchip_assign_irqfd()
2308 return -ENOSYS; in kvm_irqchip_add_msi_route()
2313 return -ENOSYS; in kvm_irqchip_add_adapter_route()
2318 return -ENOSYS; in kvm_irqchip_add_hv_sint_route()
2330 return -ENOSYS; in kvm_irqchip_update_msi_route()
2350 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_add_irqfd_notifier()
2353 return -ENXIO; in kvm_irqchip_add_irqfd_notifier()
2362 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_remove_irqfd_notifier()
2365 return -ENXIO; in kvm_irqchip_remove_irqfd_notifier()
2372 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); in kvm_irqchip_set_qemuirq_gsi()
2379 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); in kvm_irqchip_create()
2385 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2397 /* First probe and see if there's a arch-specific hook to create the in kvm_irqchip_create()
2398 * in-kernel irqchip for us */ in kvm_irqchip_create()
2401 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { in kvm_irqchip_create()
2409 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2414 /* If we have an in-kernel IRQ chip then we must have asynchronous in kvm_irqchip_create()
2422 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); in kvm_irqchip_create()
2455 return kvm_state && kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_enabled()
2464 return kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_size()
2472 s = KVM_STATE(ms->accelerator); in do_kvm_create_vm()
2476 } while (ret == -EINTR); in do_kvm_create_vm()
2479 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret)); in do_kvm_create_vm()
2482 if (ret == -EINVAL) { in do_kvm_create_vm()
2485 error_printf("- for kernels supporting the" in do_kvm_create_vm()
2488 error_printf("- for kernels supporting the vm.allocate_pgste" in do_kvm_create_vm()
2492 if (ret == -EINVAL) { in do_kvm_create_vm()
2507 if (object_property_find(OBJECT(current_machine), "kvm-type")) { in find_kvm_machine_type()
2510 "kvm-type", in find_kvm_machine_type()
2512 type = mc->kvm_type(ms, kvm_type); in find_kvm_machine_type()
2513 } else if (mc->kvm_type) { in find_kvm_machine_type()
2514 type = mc->kvm_type(ms, NULL); in find_kvm_machine_type()
2538 * page is wr-protected initially, which is against how kvm dirty ring is in kvm_setup_dirty_ring()
2539 * usage - kvm dirty ring requires all pages are wr-protected at the very in kvm_setup_dirty_ring()
2545 * instead of clearing dirty bit, it can be a way to explicitly wr-protect in kvm_setup_dirty_ring()
2548 if (!s->kvm_dirty_ring_size) { in kvm_setup_dirty_ring()
2553 s->manual_dirty_log_protect = dirty_log_manual_caps; in kvm_setup_dirty_ring()
2562 s->manual_dirty_log_protect = 0; in kvm_setup_dirty_ring()
2574 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" in kvm_init()
2580 { "SMP", ms->smp.cpus }, in kvm_init()
2581 { "hotpluggable", ms->smp.max_cpus }, in kvm_init()
2592 s = KVM_STATE(ms->accelerator); in kvm_init()
2602 s->sigmask_len = 8; in kvm_init()
2606 QTAILQ_INIT(&s->kvm_sw_breakpoints); in kvm_init()
2608 QLIST_INIT(&s->kvm_parked_vcpus); in kvm_init()
2609 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); in kvm_init()
2610 if (s->fd == -1) { in kvm_init()
2612 ret = -errno; in kvm_init()
2619 ret = -EINVAL; in kvm_init()
2626 ret = -EINVAL; in kvm_init()
2632 s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); in kvm_init()
2634 /* If unspecified, use the default value */ in kvm_init()
2635 if (!s->nr_slots_max) { in kvm_init()
2636 s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT; in kvm_init()
2641 ret = -EINVAL; in kvm_init()
2650 s->vmfd = ret; in kvm_init()
2652 s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); in kvm_init()
2653 if (s->nr_as <= 1) { in kvm_init()
2654 s->nr_as = 1; in kvm_init()
2656 s->as = g_new0(struct KVMAs, s->nr_as); in kvm_init()
2662 while (nc->name) { in kvm_init()
2663 if (nc->num > soft_vcpus_limit) { in kvm_init()
2666 nc->name, nc->num, soft_vcpus_limit); in kvm_init()
2668 if (nc->num > hard_vcpus_limit) { in kvm_init()
2671 nc->name, nc->num, hard_vcpus_limit); in kvm_init()
2684 ret = -EINVAL; in kvm_init()
2685 error_report("kvm does not support %s", missing_cap->name); in kvm_init()
2690 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); in kvm_init()
2691 s->coalesced_pio = s->coalesced_mmio && in kvm_init()
2700 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); in kvm_init()
2702 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); in kvm_init()
2704 s->irq_set_ioctl = KVM_IRQ_LINE; in kvm_init()
2706 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; in kvm_init()
2751 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { in kvm_init()
2752 … s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in kvm_init()
2757 if (s->kernel_irqchip_allowed) { in kvm_init()
2761 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; in kvm_init()
2762 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; in kvm_init()
2763 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; in kvm_init()
2764 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; in kvm_init()
2766 kvm_memory_listener_register(s, &s->memory_listener, in kvm_init()
2767 &address_space_memory, 0, "kvm-memory"); in kvm_init()
2771 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); in kvm_init()
2772 if (!s->sync_mmu) { in kvm_init()
2777 if (s->kvm_dirty_ring_size) { in kvm_init()
2790 if (s->vmfd >= 0) { in kvm_init()
2791 close(s->vmfd); in kvm_init()
2793 if (s->fd != -1) { in kvm_init()
2794 close(s->fd); in kvm_init()
2796 g_free(s->as); in kvm_init()
2797 g_free(s->memory_listener.slots); in kvm_init()
2804 s->sigmask_len = sigmask_len; in kvm_set_sigmask_len()
2826 run->internal.suberror); in kvm_handle_internal_error()
2828 for (i = 0; i < run->internal.ndata; ++i) { in kvm_handle_internal_error()
2830 i, (uint64_t)run->internal.data[i]); in kvm_handle_internal_error()
2832 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { in kvm_handle_internal_error()
2842 return -1; in kvm_handle_internal_error()
2849 if (!s || s->coalesced_flush_in_progress) { in kvm_flush_coalesced_mmio_buffer()
2853 s->coalesced_flush_in_progress = true; in kvm_flush_coalesced_mmio_buffer()
2855 if (s->coalesced_mmio_ring) { in kvm_flush_coalesced_mmio_buffer()
2856 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; in kvm_flush_coalesced_mmio_buffer()
2857 while (ring->first != ring->last) { in kvm_flush_coalesced_mmio_buffer()
2860 ent = &ring->coalesced_mmio[ring->first]; in kvm_flush_coalesced_mmio_buffer()
2862 if (ent->pio == 1) { in kvm_flush_coalesced_mmio_buffer()
2863 address_space_write(&address_space_io, ent->phys_addr, in kvm_flush_coalesced_mmio_buffer()
2864 MEMTXATTRS_UNSPECIFIED, ent->data, in kvm_flush_coalesced_mmio_buffer()
2865 ent->len); in kvm_flush_coalesced_mmio_buffer()
2867 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); in kvm_flush_coalesced_mmio_buffer()
2870 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; in kvm_flush_coalesced_mmio_buffer()
2874 s->coalesced_flush_in_progress = false; in kvm_flush_coalesced_mmio_buffer()
2879 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in do_kvm_cpu_synchronize_state()
2886 error_report("Failed to get registers: %s", strerror(-ret)); in do_kvm_cpu_synchronize_state()
2893 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_state()
2899 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in kvm_cpu_synchronize_state()
2913 strerror(-ret)); in do_kvm_cpu_synchronize_post_reset()
2919 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_reset()
2940 strerror(-ret)); in do_kvm_cpu_synchronize_post_init()
2945 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_init()
2950 if (!kvm_state->guest_state_protected) { in kvm_cpu_synchronize_post_init()
2961 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_pre_loadvm()
2977 qatomic_set(&cpu->kvm_run->immediate_exit, 1); in kvm_cpu_kick()
2998 qatomic_set(&cpu->kvm_run->immediate_exit, 0); in kvm_eat_signals()
2999 /* Write kvm_run->immediate_exit before the cpu->exit_request in kvm_eat_signals()
3011 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { in kvm_eat_signals()
3017 if (r == -1) { in kvm_eat_signals()
3024 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private) in kvm_convert_memory() argument
3031 int ret = -EINVAL; in kvm_convert_memory()
3033 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared"); in kvm_convert_memory()
3035 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) || in kvm_convert_memory()
3044 section = memory_region_find(get_system_memory(), start, size); in kvm_convert_memory()
3048 * Ignore converting non-assigned region to shared. in kvm_convert_memory()
3052 * and vIO-APIC 0xFEC00000 4K page. in kvm_convert_memory()
3077 start, size, to_private ? "private" : "shared"); in kvm_convert_memory()
3083 ret = kvm_set_memory_attributes_private(start, size); in kvm_convert_memory()
3085 ret = kvm_set_memory_attributes_shared(start, size); in kvm_convert_memory()
3095 if (rb->page_size != qemu_real_host_page_size()) { in kvm_convert_memory()
3098 * pre-allocated and doesn't need to be discarded in kvm_convert_memory()
3114 struct kvm_run *run = cpu->kvm_run; in kvm_cpu_exec()
3120 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3130 if (cpu->vcpu_dirty) { in kvm_cpu_exec()
3138 strerror(-ret)); in kvm_cpu_exec()
3140 ret = -1; in kvm_cpu_exec()
3144 cpu->vcpu_dirty = false; in kvm_cpu_exec()
3148 if (qatomic_read(&cpu->exit_request)) { in kvm_cpu_exec()
3152 * instruction emulation. This self-signal will ensure that we in kvm_cpu_exec()
3158 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. in kvm_cpu_exec()
3178 if (run_ret == -EINTR || run_ret == -EAGAIN) { in kvm_cpu_exec()
3184 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) { in kvm_cpu_exec()
3186 strerror(-run_ret)); in kvm_cpu_exec()
3188 if (run_ret == -EBUSY) { in kvm_cpu_exec()
3195 ret = -1; in kvm_cpu_exec()
3200 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); in kvm_cpu_exec()
3201 switch (run->exit_reason) { in kvm_cpu_exec()
3204 kvm_handle_io(run->io.port, attrs, in kvm_cpu_exec()
3205 (uint8_t *)run + run->io.data_offset, in kvm_cpu_exec()
3206 run->io.direction, in kvm_cpu_exec()
3207 run->io.size, in kvm_cpu_exec()
3208 run->io.count); in kvm_cpu_exec()
3214 run->mmio.phys_addr, attrs, in kvm_cpu_exec()
3215 run->mmio.data, in kvm_cpu_exec()
3216 run->mmio.len, in kvm_cpu_exec()
3217 run->mmio.is_write); in kvm_cpu_exec()
3229 (uint64_t)run->hw.hardware_exit_reason); in kvm_cpu_exec()
3230 ret = -1; in kvm_cpu_exec()
3240 trace_kvm_dirty_ring_full(cpu->cpu_index); in kvm_cpu_exec()
3246 * the miss of sleep, so just reap the ring-fulled vCPU. in kvm_cpu_exec()
3258 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); in kvm_cpu_exec()
3259 switch (run->system_event.type) { in kvm_cpu_exec()
3281 trace_kvm_memory_fault(run->memory_fault.gpa, in kvm_cpu_exec()
3282 run->memory_fault.size, in kvm_cpu_exec()
3283 run->memory_fault.flags); in kvm_cpu_exec()
3284 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) { in kvm_cpu_exec()
3286 (uint64_t)run->memory_fault.flags); in kvm_cpu_exec()
3287 ret = -1; in kvm_cpu_exec()
3290 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size, in kvm_cpu_exec()
3291 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE); in kvm_cpu_exec()
3307 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3322 ret = ioctl(s->fd, type, arg); in kvm_ioctl()
3323 if (ret == -1) { in kvm_ioctl()
3324 ret = -errno; in kvm_ioctl()
3341 ret = ioctl(s->vmfd, type, arg); in kvm_vm_ioctl()
3343 if (ret == -1) { in kvm_vm_ioctl()
3344 ret = -errno; in kvm_vm_ioctl()
3359 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); in kvm_vcpu_ioctl()
3361 ret = ioctl(cpu->kvm_fd, type, arg); in kvm_vcpu_ioctl()
3363 if (ret == -1) { in kvm_vcpu_ioctl()
3364 ret = -errno; in kvm_vcpu_ioctl()
3383 if (ret == -1) { in kvm_device_ioctl()
3384 ret = -errno; in kvm_device_ioctl()
3432 error_setg_errno(errp, -err, in kvm_device_access()
3442 return kvm_state->sync_mmu; in kvm_has_sync_mmu()
3447 return kvm_state->vcpu_events; in kvm_has_vcpu_events()
3452 return kvm_state->max_nested_state_len; in kvm_max_nested_state_length()
3474 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { in kvm_find_sw_breakpoint()
3475 if (bp->pc == pc) { in kvm_find_sw_breakpoint()
3484 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); in kvm_sw_breakpoints_active()
3497 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, in kvm_invoke_set_guest_debug()
3498 &dbg_data->dbg); in kvm_invoke_set_guest_debug()
3507 if (cpu->singlestep_enabled) { in kvm_update_guest_debug()
3510 if (cpu->singlestep_enabled & SSTEP_NOIRQ) { in kvm_update_guest_debug()
3535 bp->use_count++; in kvm_insert_breakpoint()
3540 bp->pc = addr; in kvm_insert_breakpoint()
3541 bp->use_count = 1; in kvm_insert_breakpoint()
3548 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_insert_breakpoint()
3573 return -ENOENT; in kvm_remove_breakpoint()
3576 if (bp->use_count > 1) { in kvm_remove_breakpoint()
3577 bp->use_count--; in kvm_remove_breakpoint()
3586 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_remove_breakpoint()
3607 KVMState *s = cpu->kvm_state; in kvm_remove_all_breakpoints()
3610 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { in kvm_remove_all_breakpoints()
3619 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); in kvm_remove_all_breakpoints()
3639 sigmask->len = s->sigmask_len; in kvm_set_signal_mask()
3640 memcpy(sigmask->sigset, sigset, sizeof(*sigset)); in kvm_set_signal_mask()
3677 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); in kvm_init_cpu_signals()
3692 qatomic_set(&cpu->exit_request, 1); in kvm_on_sigbus_vcpu()
3721 create_dev.fd = -1; in kvm_create_device()
3725 return -ENOTSUP; in kvm_create_device()
3740 .fd = -1, in kvm_device_supported()
3760 trace_kvm_failed_reg_set(id, strerror(-r)); in kvm_set_one_reg()
3774 trace_kvm_failed_reg_get(id, strerror(-r)); in kvm_get_one_reg()
3782 KVMState *kvm = KVM_STATE(ms->accelerator); in kvm_accel_has_memory()
3785 for (i = 0; i < kvm->nr_as; ++i) { in kvm_accel_has_memory()
3786 if (kvm->as[i].as == as && kvm->as[i].ml) { in kvm_accel_has_memory()
3788 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, in kvm_accel_has_memory()
3801 int64_t value = s->kvm_shadow_mem; in kvm_get_kvm_shadow_mem() local
3803 visit_type_int(v, name, &value, errp); in kvm_get_kvm_shadow_mem()
3811 int64_t value; in kvm_set_kvm_shadow_mem() local
3813 if (s->fd != -1) { in kvm_set_kvm_shadow_mem()
3818 if (!visit_type_int(v, name, &value, errp)) { in kvm_set_kvm_shadow_mem()
3822 s->kvm_shadow_mem = value; in kvm_set_kvm_shadow_mem()
3832 if (s->fd != -1) { in kvm_set_kernel_irqchip()
3842 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3843 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3844 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3847 s->kernel_irqchip_allowed = false; in kvm_set_kernel_irqchip()
3848 s->kernel_irqchip_required = false; in kvm_set_kernel_irqchip()
3849 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3852 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3853 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3854 s->kernel_irqchip_split = ON_OFF_AUTO_ON; in kvm_set_kernel_irqchip()
3857 /* The value was checked in visit_type_OnOffSplit() above. If in kvm_set_kernel_irqchip()
3866 return kvm_state->kernel_irqchip_allowed; in kvm_kernel_irqchip_allowed()
3871 return kvm_state->kernel_irqchip_required; in kvm_kernel_irqchip_required()
3876 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; in kvm_kernel_irqchip_split()
3884 uint32_t value = s->kvm_dirty_ring_size; in kvm_get_dirty_ring_size() local
3886 visit_type_uint32(v, name, &value, errp); in kvm_get_dirty_ring_size()
3894 uint32_t value; in kvm_set_dirty_ring_size() local
3896 if (s->fd != -1) { in kvm_set_dirty_ring_size()
3901 if (!visit_type_uint32(v, name, &value, errp)) { in kvm_set_dirty_ring_size()
3904 if (value & (value - 1)) { in kvm_set_dirty_ring_size()
3905 error_setg(errp, "dirty-ring-size must be a power of two."); in kvm_set_dirty_ring_size()
3909 s->kvm_dirty_ring_size = value; in kvm_set_dirty_ring_size()
3917 return g_strdup(s->device); in kvm_get_device()
3921 const char *value, in kvm_set_device() argument
3926 g_free(s->device); in kvm_set_device()
3927 s->device = g_strdup(value); in kvm_set_device()
3930 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp) in kvm_set_kvm_rapl() argument
3933 s->msr_energy.enable = value; in kvm_set_kvm_rapl()
3941 g_free(s->msr_energy.socket_path); in kvm_set_kvm_rapl_socket_path()
3942 s->msr_energy.socket_path = g_strdup(str); in kvm_set_kvm_rapl_socket_path()
3949 s->fd = -1; in kvm_accel_instance_init()
3950 s->vmfd = -1; in kvm_accel_instance_init()
3951 s->kvm_shadow_mem = -1; in kvm_accel_instance_init()
3952 s->kernel_irqchip_allowed = true; in kvm_accel_instance_init()
3953 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; in kvm_accel_instance_init()
3955 s->kvm_dirty_ring_size = 0; in kvm_accel_instance_init()
3956 s->kvm_dirty_ring_with_bitmap = false; in kvm_accel_instance_init()
3957 s->kvm_eager_split_size = 0; in kvm_accel_instance_init()
3958 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; in kvm_accel_instance_init()
3959 s->notify_window = 0; in kvm_accel_instance_init()
3960 s->xen_version = 0; in kvm_accel_instance_init()
3961 s->xen_gnttab_max_frames = 64; in kvm_accel_instance_init()
3962 s->xen_evtchn_max_pirq = 256; in kvm_accel_instance_init()
3963 s->device = NULL; in kvm_accel_instance_init()
3964 s->msr_energy.enable = false; in kvm_accel_instance_init()
3981 ac->name = "KVM"; in kvm_accel_class_init()
3982 ac->init_machine = kvm_init; in kvm_accel_class_init()
3983 ac->has_memory = kvm_accel_has_memory; in kvm_accel_class_init()
3984 ac->allowed = &kvm_allowed; in kvm_accel_class_init()
3985 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; in kvm_accel_class_init()
3987 object_class_property_add(oc, "kernel-irqchip", "on|off|split", in kvm_accel_class_init()
3990 object_class_property_set_description(oc, "kernel-irqchip", in kvm_accel_class_init()
3991 "Configure KVM in-kernel irqchip"); in kvm_accel_class_init()
3993 object_class_property_add(oc, "kvm-shadow-mem", "int", in kvm_accel_class_init()
3996 object_class_property_set_description(oc, "kvm-shadow-mem", in kvm_accel_class_init()
3999 object_class_property_add(oc, "dirty-ring-size", "uint32", in kvm_accel_class_init()
4002 object_class_property_set_description(oc, "dirty-ring-size", in kvm_accel_class_init()
4015 object_class_property_add_str(oc, "rapl-helper-socket", NULL, in kvm_accel_class_init()
4017 object_class_property_set_description(oc, "rapl-helper-socket", in kvm_accel_class_init()
4057 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmstat_entry()
4068 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmstat_entry()
4079 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmstat_entry()
4089 stats->name = g_strdup(pdesc->name); in add_kvmstat_entry()
4090 stats->value = g_new0(StatsValue, 1); in add_kvmstat_entry()
4092 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { in add_kvmstat_entry()
4093 stats->value->u.boolean = *stats_data; in add_kvmstat_entry()
4094 stats->value->type = QTYPE_QBOOL; in add_kvmstat_entry()
4095 } else if (pdesc->size == 1) { in add_kvmstat_entry()
4096 stats->value->u.scalar = *stats_data; in add_kvmstat_entry()
4097 stats->value->type = QTYPE_QNUM; in add_kvmstat_entry()
4100 for (i = 0; i < pdesc->size; i++) { in add_kvmstat_entry()
4103 stats->value->u.list = val_list; in add_kvmstat_entry()
4104 stats->value->type = QTYPE_QLIST; in add_kvmstat_entry()
4116 schema_entry->value = g_new0(StatsSchemaValue, 1); in add_kvmschema_entry()
4118 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmschema_entry()
4120 schema_entry->value->type = STATS_TYPE_CUMULATIVE; in add_kvmschema_entry()
4123 schema_entry->value->type = STATS_TYPE_INSTANT; in add_kvmschema_entry()
4126 schema_entry->value->type = STATS_TYPE_PEAK; in add_kvmschema_entry()
4129 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; in add_kvmschema_entry()
4130 schema_entry->value->bucket_size = pdesc->bucket_size; in add_kvmschema_entry()
4131 schema_entry->value->has_bucket_size = true; in add_kvmschema_entry()
4134 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; in add_kvmschema_entry()
4140 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmschema_entry()
4144 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4145 schema_entry->value->unit = STATS_UNIT_BOOLEAN; in add_kvmschema_entry()
4148 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4149 schema_entry->value->unit = STATS_UNIT_BYTES; in add_kvmschema_entry()
4152 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4153 schema_entry->value->unit = STATS_UNIT_CYCLES; in add_kvmschema_entry()
4156 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4157 schema_entry->value->unit = STATS_UNIT_SECONDS; in add_kvmschema_entry()
4163 schema_entry->value->exponent = pdesc->exponent; in add_kvmschema_entry()
4164 if (pdesc->exponent) { in add_kvmschema_entry()
4165 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmschema_entry()
4167 schema_entry->value->has_base = true; in add_kvmschema_entry()
4168 schema_entry->value->base = 10; in add_kvmschema_entry()
4171 schema_entry->value->has_base = true; in add_kvmschema_entry()
4172 schema_entry->value->base = 2; in add_kvmschema_entry()
4179 schema_entry->value->name = g_strdup(pdesc->name); in add_kvmschema_entry()
4180 schema_entry->next = list; in add_kvmschema_entry()
4183 g_free(schema_entry->value); in add_kvmschema_entry()
4215 if (g_str_equal(descriptors->ident, ident)) { in find_stats_descriptors()
4223 kvm_stats_header = &descriptors->kvm_stats_header; in find_stats_descriptors()
4232 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in find_stats_descriptors()
4235 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); in find_stats_descriptors()
4237 size_desc * kvm_stats_header->num_desc, in find_stats_descriptors()
4238 kvm_stats_header->desc_offset); in find_stats_descriptors()
4240 if (ret != size_desc * kvm_stats_header->num_desc) { in find_stats_descriptors()
4243 size_desc * kvm_stats_header->num_desc, ret); in find_stats_descriptors()
4248 descriptors->kvm_stats_desc = kvm_stats_desc; in find_stats_descriptors()
4249 descriptors->ident = ident; in find_stats_descriptors()
4273 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats()
4274 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats()
4275 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats()
4278 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4280 size_data += pdesc->size * sizeof(*stats_data); in query_stats()
4284 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); in query_stats()
4292 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4297 stats = (void *)stats_data + pdesc->offset; in query_stats()
4298 if (!apply_str_list_filter(pdesc->name, names)) { in query_stats()
4314 cpu->parent_obj.canonical_path, in query_stats()
4338 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats_schema()
4339 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats_schema()
4340 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats_schema()
4343 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats_schema()
4353 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_vcpu()
4356 if (stats_fd == -1) { in query_stats_vcpu()
4358 error_propagate(kvm_stats_args->errp, local_err); in query_stats_vcpu()
4361 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, in query_stats_vcpu()
4362 kvm_stats_args->names, stats_fd, cpu, in query_stats_vcpu()
4363 kvm_stats_args->errp); in query_stats_vcpu()
4368 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_schema_vcpu()
4371 if (stats_fd == -1) { in query_stats_schema_vcpu()
4373 error_propagate(kvm_stats_args->errp, local_err); in query_stats_schema_vcpu()
4376 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, in query_stats_schema_vcpu()
4377 kvm_stats_args->errp); in query_stats_schema_vcpu()
4391 if (stats_fd == -1) { in query_stats_cb()
4406 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { in query_stats_cb()
4425 if (stats_fd == -1) { in query_stats_schemas_cb()
4441 kvm_state->guest_state_protected = true; in kvm_mark_guest_state_protected()
4454 return -1; in kvm_create_guest_memfd()
4460 return -1; in kvm_create_guest_memfd()