Lines Matching full:mem
142 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
302 KVMSlot *mem = &kml->slots[i]; in kvm_lookup_matching_slot() local
304 if (start_addr == mem->start_addr && size == mem->memory_size) { in kvm_lookup_matching_slot()
305 return mem; in kvm_lookup_matching_slot()
344 KVMSlot *mem = &kml->slots[i]; in kvm_physical_memory_addr_from_host() local
346 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { in kvm_physical_memory_addr_from_host()
347 *phys_addr = mem->start_addr + (ram - mem->ram); in kvm_physical_memory_addr_from_host()
360 struct kvm_userspace_memory_region2 mem; in kvm_set_user_memory_region() local
363 mem.slot = slot->slot | (kml->as_id << 16); in kvm_set_user_memory_region()
364 mem.guest_phys_addr = slot->start_addr; in kvm_set_user_memory_region()
365 mem.userspace_addr = (unsigned long)slot->ram; in kvm_set_user_memory_region()
366 mem.flags = slot->flags; in kvm_set_user_memory_region()
367 mem.guest_memfd = slot->guest_memfd; in kvm_set_user_memory_region()
368 mem.guest_memfd_offset = slot->guest_memfd_offset; in kvm_set_user_memory_region()
370 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { in kvm_set_user_memory_region()
373 mem.memory_size = 0; in kvm_set_user_memory_region()
376 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem); in kvm_set_user_memory_region()
378 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); in kvm_set_user_memory_region()
384 mem.memory_size = slot->memory_size; in kvm_set_user_memory_region()
386 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem); in kvm_set_user_memory_region()
388 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); in kvm_set_user_memory_region()
390 slot->old_flags = mem.flags; in kvm_set_user_memory_region()
392 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags, in kvm_set_user_memory_region()
393 mem.guest_phys_addr, mem.memory_size, in kvm_set_user_memory_region()
394 mem.userspace_addr, mem.guest_memfd, in kvm_set_user_memory_region()
395 mem.guest_memfd_offset, ret); in kvm_set_user_memory_region()
402 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
403 (uint64_t)mem.memory_size, mem.flags, in kvm_set_user_memory_region()
404 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset, in kvm_set_user_memory_region()
409 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
410 (uint64_t)mem.memory_size, strerror(errno)); in kvm_set_user_memory_region()
634 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, in kvm_slot_update_flags() argument
637 mem->flags = kvm_mem_flags(mr); in kvm_slot_update_flags()
640 if (mem->flags == mem->old_flags) { in kvm_slot_update_flags()
644 kvm_slot_init_dirty_bitmap(mem); in kvm_slot_update_flags()
645 return kvm_set_user_memory_region(kml, mem, false); in kvm_slot_update_flags()
652 KVMSlot *mem; in kvm_section_update_flags() local
664 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); in kvm_section_update_flags()
665 if (!mem) { in kvm_section_update_flags()
670 ret = kvm_slot_update_flags(kml, mem, section->mr); in kvm_section_update_flags()
731 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem) in kvm_slot_init_dirty_bitmap() argument
733 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { in kvm_slot_init_dirty_bitmap()
751 * And mem->memory_size is aligned to it (otherwise this mem can't in kvm_slot_init_dirty_bitmap()
754 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), in kvm_slot_init_dirty_bitmap()
756 mem->dirty_bmap = g_malloc0(bitmap_size); in kvm_slot_init_dirty_bitmap()
757 mem->dirty_bmap_size = bitmap_size; in kvm_slot_init_dirty_bitmap()
789 KVMSlot *mem; in kvm_dirty_ring_mark_page() local
796 mem = &kml->slots[slot_id]; in kvm_dirty_ring_mark_page()
798 if (!mem->memory_size || offset >= in kvm_dirty_ring_mark_page()
799 (mem->memory_size / qemu_real_host_page_size())) { in kvm_dirty_ring_mark_page()
803 set_bit(offset, mem->dirty_bmap); in kvm_dirty_ring_mark_page()
997 KVMSlot *mem; in kvm_physical_sync_dirty_bitmap() local
1004 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); in kvm_physical_sync_dirty_bitmap()
1005 if (!mem) { in kvm_physical_sync_dirty_bitmap()
1009 if (kvm_slot_get_dirty_log(s, mem)) { in kvm_physical_sync_dirty_bitmap()
1010 kvm_slot_sync_dirty_pages(mem); in kvm_physical_sync_dirty_bitmap()
1022 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, in kvm_log_clear_one_slot() argument
1048 end = mem->memory_size / psize; in kvm_log_clear_one_slot()
1078 assert(mem->dirty_bmap); in kvm_log_clear_one_slot()
1082 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, in kvm_log_clear_one_slot()
1096 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); in kvm_log_clear_one_slot()
1103 d.slot = mem->slot | (as_id << 16); in kvm_log_clear_one_slot()
1122 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, in kvm_log_clear_one_slot()
1145 KVMSlot *mem; in kvm_physical_log_clear() local
1164 mem = &kml->slots[i]; in kvm_physical_log_clear()
1166 if (!mem->memory_size || in kvm_physical_log_clear()
1167 mem->start_addr > start + size - 1 || in kvm_physical_log_clear()
1168 start > mem->start_addr + mem->memory_size - 1) { in kvm_physical_log_clear()
1172 if (start >= mem->start_addr) { in kvm_physical_log_clear()
1174 offset = start - mem->start_addr; in kvm_physical_log_clear()
1175 count = MIN(mem->memory_size - offset, size); in kvm_physical_log_clear()
1179 count = MIN(mem->memory_size, size - (mem->start_addr - start)); in kvm_physical_log_clear()
1181 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); in kvm_physical_log_clear()
1466 KVMSlot *mem; in kvm_set_phys_mem() local
1500 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); in kvm_set_phys_mem()
1501 if (!mem) { in kvm_set_phys_mem()
1504 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_set_phys_mem()
1521 kvm_slot_sync_dirty_pages(mem); in kvm_set_phys_mem()
1522 kvm_slot_get_dirty_log(kvm_state, mem); in kvm_set_phys_mem()
1525 kvm_slot_get_dirty_log(kvm_state, mem); in kvm_set_phys_mem()
1527 kvm_slot_sync_dirty_pages(mem); in kvm_set_phys_mem()
1531 g_free(mem->dirty_bmap); in kvm_set_phys_mem()
1532 mem->dirty_bmap = NULL; in kvm_set_phys_mem()
1533 mem->memory_size = 0; in kvm_set_phys_mem()
1534 mem->flags = 0; in kvm_set_phys_mem()
1535 err = kvm_set_user_memory_region(kml, mem, false); in kvm_set_phys_mem()
1551 mem = kvm_alloc_slot(kml); in kvm_set_phys_mem()
1552 mem->as_id = kml->as_id; in kvm_set_phys_mem()
1553 mem->memory_size = slot_size; in kvm_set_phys_mem()
1554 mem->start_addr = start_addr; in kvm_set_phys_mem()
1555 mem->ram_start_offset = ram_start_offset; in kvm_set_phys_mem()
1556 mem->ram = ram; in kvm_set_phys_mem()
1557 mem->flags = kvm_mem_flags(mr); in kvm_set_phys_mem()
1558 mem->guest_memfd = mr->ram_block->guest_memfd; in kvm_set_phys_mem()
1559 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; in kvm_set_phys_mem()
1561 kvm_slot_init_dirty_bitmap(mem); in kvm_set_phys_mem()
1562 err = kvm_set_user_memory_region(kml, mem, true); in kvm_set_phys_mem()
1802 KVMSlot *mem; in kvm_log_sync_global() local
1810 mem = &kml->slots[i]; in kvm_log_sync_global()
1811 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_log_sync_global()
1812 kvm_slot_sync_dirty_pages(mem); in kvm_log_sync_global()
1815 kvm_slot_get_dirty_log(s, mem)) { in kvm_log_sync_global()
1816 kvm_slot_sync_dirty_pages(mem); in kvm_log_sync_global()
1824 kvm_slot_reset_dirty_pages(mem); in kvm_log_sync_global()
4002 object_class_property_add(oc, "kvm-shadow-mem", "int", in kvm_accel_class_init()
4005 object_class_property_set_description(oc, "kvm-shadow-mem", in kvm_accel_class_init()