Lines Matching +full:next +full:- +full:level +full:- +full:cache
21 #include "exec/page-vary.h"
31 #include "accel/tcg/cpu-ops.h"
36 #include "exec/page-protection.h"
38 #include "exec/translation-block.h"
39 #include "hw/qdev-core.h"
40 #include "hw/qdev-properties.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qemu/qemu-print.h"
58 #include "system/xen-mapcache.h"
66 #include "qemu/main-loop.h"
73 #include "qapi/qapi-types-migration.h"
81 #include "qemu/mmap-alloc.h"
90 #include "memory-internal.h"
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { in phys_map_node_reserve()
188 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); in phys_map_node_reserve()
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); in phys_map_node_reserve()
190 alloc_hint = map->nodes_nb_alloc; in phys_map_node_reserve()
201 ret = map->nodes_nb++; in phys_map_node_alloc()
202 p = map->nodes[ret]; in phys_map_node_alloc()
204 assert(ret != map->nodes_nb_alloc); in phys_map_node_alloc()
216 int level) in phys_page_set_level() argument
219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); in phys_page_set_level()
221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_set_level()
222 lp->ptr = phys_map_node_alloc(map, level == 0); in phys_page_set_level()
224 p = map->nodes[lp->ptr]; in phys_page_set_level()
225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_set_level()
228 if ((*index & (step - 1)) == 0 && *nb >= step) { in phys_page_set_level()
229 lp->skip = 0; in phys_page_set_level()
230 lp->ptr = leaf; in phys_page_set_level()
232 *nb -= step; in phys_page_set_level()
234 phys_page_set_level(map, lp, index, nb, leaf, level - 1); in phys_page_set_level()
244 /* Wildly overreserve - it doesn't matter much. */ in phys_page_set()
245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); in phys_page_set()
247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); in phys_page_set()
260 if (lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_compact()
264 p = nodes[lp->ptr]; in phys_page_compact()
286 lp->skip + p[valid_ptr].skip >= (1 << 6)) { in phys_page_compact()
290 lp->ptr = p[valid_ptr].ptr; in phys_page_compact()
298 lp->skip = 0; in phys_page_compact()
300 lp->skip += p[valid_ptr].skip; in phys_page_compact()
306 if (d->phys_map.skip) { in address_space_dispatch_compact()
307 phys_page_compact(&d->phys_map, d->map.nodes); in address_space_dispatch_compact()
317 return int128_gethi(section->size) || in section_covers_addr()
318 range_covers_byte(section->offset_within_address_space, in section_covers_addr()
319 int128_getlo(section->size), addr); in section_covers_addr()
324 PhysPageEntry lp = d->phys_map, *p; in phys_page_find()
325 Node *nodes = d->map.nodes; in phys_page_find()
326 MemoryRegionSection *sections = d->map.sections; in phys_page_find()
330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { in phys_page_find()
335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_find()
350 MemoryRegionSection *section = qatomic_read(&d->mru_section); in address_space_lookup_region()
353 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || in address_space_lookup_region()
356 qatomic_set(&d->mru_section, section); in address_space_lookup_region()
358 if (resolve_subpage && section->mr->subpage) { in address_space_lookup_region()
359 subpage = container_of(section->mr, subpage_t, iomem); in address_space_lookup_region()
360 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; in address_space_lookup_region()
376 addr -= section->offset_within_address_space; in address_space_translate_internal()
379 *xlat = addr + section->offset_within_region; in address_space_translate_internal()
381 mr = section->mr; in address_space_translate_internal()
383 /* MMIO registers can be expected to perform full-width accesses based only in address_space_translate_internal()
395 diff = int128_sub(section->size, int128_make64(addr)); in address_space_translate_internal()
402 * address_space_translate_iommu - translate an address through an IOMMU
433 hwaddr page_mask = (hwaddr)-1; in address_space_translate_iommu()
441 if (imrc->attrs_to_index) { in address_space_translate_iommu()
442 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_iommu()
445 iotlb = imrc->translate(iommu_mr, addr, is_write ? in address_space_translate_iommu()
455 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); in address_space_translate_iommu()
462 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_iommu()
475 * flatview_do_translate - translate an address in FlatView
506 hwaddr plen = (hwaddr)(-1); in flatview_do_translate()
516 iommu_mr = memory_region_get_iommu(section->mr); in flatview_do_translate()
552 xlat += section.offset_within_address_space - in address_space_get_iotlb_entry()
583 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; in flatview_translate()
604 if (!notifier->active) { in tcg_iommu_unmap_notify()
607 tlb_flush(notifier->cpu); in tcg_iommu_unmap_notify()
608 notifier->active = false; in tcg_iommu_unmap_notify()
628 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_register_iommu_notifier()
629 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_register_iommu_notifier()
630 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { in tcg_register_iommu_notifier()
634 if (i == cpu->iommu_notifiers->len) { in tcg_register_iommu_notifier()
636 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); in tcg_register_iommu_notifier()
638 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; in tcg_register_iommu_notifier()
640 notifier->mr = mr; in tcg_register_iommu_notifier()
641 notifier->iommu_idx = iommu_idx; in tcg_register_iommu_notifier()
642 notifier->cpu = cpu; in tcg_register_iommu_notifier()
649 iommu_notifier_init(¬ifier->n, in tcg_register_iommu_notifier()
655 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, in tcg_register_iommu_notifier()
659 if (!notifier->active) { in tcg_register_iommu_notifier()
660 notifier->active = true; in tcg_register_iommu_notifier()
670 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_iommu_free_notifier_list()
671 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_iommu_free_notifier_list()
672 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); in tcg_iommu_free_notifier_list()
675 g_array_free(cpu->iommu_notifiers, true); in tcg_iommu_free_notifier_list()
680 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); in tcg_iommu_init_notifier_list()
695 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; in address_space_translate_for_iotlb()
700 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_for_iotlb()
707 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_for_iotlb()
710 * doesn't short-cut its translation table walk. in address_space_translate_for_iotlb()
712 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); in address_space_translate_for_iotlb()
733 assert(!memory_region_is_iommu(section->mr)); in address_space_translate_for_iotlb()
739 * We should be given a page-aligned address -- certainly in address_space_translate_for_iotlb()
748 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; in address_space_translate_for_iotlb()
755 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; in iotlb_to_section()
756 AddressSpaceDispatch *d = cpuas->memory_dispatch; in iotlb_to_section()
760 assert(section_index < d->map.sections_nb); in iotlb_to_section()
761 ret = d->map.sections + section_index; in iotlb_to_section()
762 assert(ret->mr); in iotlb_to_section()
763 assert(ret->mr->ops); in iotlb_to_section()
772 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); in memory_region_section_get_iotlb()
773 return section - d->map.sections; in memory_region_section_get_iotlb()
786 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); in cpu_address_space_init()
791 assert(asidx < cpu->num_ases); in cpu_address_space_init()
795 cpu->as = as; in cpu_address_space_init()
801 if (!cpu->cpu_ases) { in cpu_address_space_init()
802 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); in cpu_address_space_init()
803 cpu->cpu_ases_count = cpu->num_ases; in cpu_address_space_init()
806 newas = &cpu->cpu_ases[asidx]; in cpu_address_space_init()
807 newas->cpu = cpu; in cpu_address_space_init()
808 newas->as = as; in cpu_address_space_init()
810 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; in cpu_address_space_init()
811 newas->tcg_as_listener.commit = tcg_commit; in cpu_address_space_init()
812 newas->tcg_as_listener.name = "tcg"; in cpu_address_space_init()
813 memory_listener_register(&newas->tcg_as_listener, as); in cpu_address_space_init()
821 assert(cpu->cpu_ases); in cpu_address_space_destroy()
822 assert(asidx >= 0 && asidx < cpu->num_ases); in cpu_address_space_destroy()
826 cpuas = &cpu->cpu_ases[asidx]; in cpu_address_space_destroy()
828 memory_listener_unregister(&cpuas->tcg_as_listener); in cpu_address_space_destroy()
831 address_space_destroy(cpuas->as); in cpu_address_space_destroy()
832 g_free_rcu(cpuas->as, rcu); in cpu_address_space_destroy()
836 cpu->as = NULL; in cpu_address_space_destroy()
839 if (--cpu->cpu_ases_count == 0) { in cpu_address_space_destroy()
840 g_free(cpu->cpu_ases); in cpu_address_space_destroy()
841 cpu->cpu_ases = NULL; in cpu_address_space_destroy()
848 return cpu->cpu_ases[asidx].as; in cpu_get_address_space()
857 if (block && addr - block->offset < block->max_length) { in qemu_get_ram_block()
861 if (addr - block->offset < block->max_length) { in qemu_get_ram_block()
903 assert(block == qemu_get_ram_block(end - 1)); in tlb_reset_dirty_range_all()
904 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); in tlb_reset_dirty_range_all()
933 assert(start >= ramblock->offset && in cpu_physical_memory_test_and_clear_dirty()
934 start + length <= ramblock->offset + ramblock->used_length); in cpu_physical_memory_test_and_clear_dirty()
939 unsigned long num = MIN(end - page, in cpu_physical_memory_test_and_clear_dirty()
940 DIRTY_MEMORY_BLOCK_SIZE - offset); in cpu_physical_memory_test_and_clear_dirty()
942 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], in cpu_physical_memory_test_and_clear_dirty()
947 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; in cpu_physical_memory_test_and_clear_dirty()
948 mr_size = (end - start_page) << TARGET_PAGE_BITS; in cpu_physical_memory_test_and_clear_dirty()
949 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); in cpu_physical_memory_test_and_clear_dirty()
977 ((last - first) >> (TARGET_PAGE_BITS + 3))); in cpu_physical_memory_snapshot_and_clear_dirty()
978 snap->start = first; in cpu_physical_memory_snapshot_and_clear_dirty()
979 snap->end = last; in cpu_physical_memory_snapshot_and_clear_dirty()
991 unsigned long num = MIN(end - page, in cpu_physical_memory_snapshot_and_clear_dirty()
992 DIRTY_MEMORY_BLOCK_SIZE - ofs); in cpu_physical_memory_snapshot_and_clear_dirty()
998 bitmap_copy_and_clear_atomic(snap->dirty + dest, in cpu_physical_memory_snapshot_and_clear_dirty()
999 blocks->blocks[idx] + ofs, in cpu_physical_memory_snapshot_and_clear_dirty()
1019 assert(start >= snap->start); in cpu_physical_memory_snapshot_get_dirty()
1020 assert(start + length <= snap->end); in cpu_physical_memory_snapshot_get_dirty()
1022 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
1023 page = (start - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
1026 if (test_bit(page, snap->dirty)) { in cpu_physical_memory_snapshot_get_dirty()
1041 /* The physical section number is ORed with a page-aligned in phys_section_add()
1043 * never overflow into the page-aligned value. in phys_section_add()
1045 assert(map->sections_nb < TARGET_PAGE_SIZE); in phys_section_add()
1047 if (map->sections_nb == map->sections_nb_alloc) { in phys_section_add()
1048 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); in phys_section_add()
1049 map->sections = g_renew(MemoryRegionSection, map->sections, in phys_section_add()
1050 map->sections_nb_alloc); in phys_section_add()
1052 map->sections[map->sections_nb] = *section; in phys_section_add()
1053 memory_region_ref(section->mr); in phys_section_add()
1054 return map->sections_nb++; in phys_section_add()
1059 bool have_sub_page = mr->subpage; in phys_section_destroy()
1065 object_unref(OBJECT(&subpage->iomem)); in phys_section_destroy()
1072 while (map->sections_nb > 0) { in phys_sections_free()
1073 MemoryRegionSection *section = &map->sections[--map->sections_nb]; in phys_sections_free()
1074 phys_section_destroy(section->mr); in phys_sections_free()
1076 g_free(map->sections); in phys_sections_free()
1077 g_free(map->nodes); in phys_sections_free()
1084 hwaddr base = section->offset_within_address_space in register_subpage()
1093 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); in register_subpage()
1095 if (!(existing->mr->subpage)) { in register_subpage()
1098 subsection.mr = &subpage->iomem; in register_subpage()
1100 phys_section_add(&d->map, &subsection)); in register_subpage()
1102 subpage = container_of(existing->mr, subpage_t, iomem); in register_subpage()
1104 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; in register_subpage()
1105 end = start + int128_get64(section->size) - 1; in register_subpage()
1107 phys_section_add(&d->map, section)); in register_subpage()
1115 hwaddr start_addr = section->offset_within_address_space; in register_multipage()
1116 uint16_t section_index = phys_section_add(&d->map, section); in register_multipage()
1117 uint64_t num_pages = int128_get64(int128_rshift(section->size, in register_multipage()
1139 - remain.offset_within_address_space; in flatview_add_to_dispatch()
1197 psize = size_to_str(block->page_size); in ram_block_format()
1200 block->idstr, psize, in ram_block_format()
1201 (uint64_t)block->offset, in ram_block_format()
1202 (uint64_t)block->used_length, in ram_block_format()
1203 (uint64_t)block->max_length, in ram_block_format()
1204 (uint64_t)(uintptr_t)block->host, in ram_block_format()
1205 block->mr->readonly ? "ro" : "rw"); in ram_block_format()
1275 return -errno; in get_file_size()
1304 return -errno; in get_file_size()
1311 int64_t align = -1; in get_file_align()
1316 return -errno; in get_file_align()
1331 return -errno; in get_file_align()
1336 return -1; in get_file_align()
1360 int fd = -1; in file_ram_open()
1379 return -errno; in file_ram_open()
1382 return -EISDIR; in file_ram_open()
1391 return -ENOENT; in file_ram_open()
1422 return -errno; in file_ram_open()
1443 block->page_size = qemu_fd_getpagesize(fd); in file_ram_alloc()
1444 if (block->mr->align % block->page_size) { in file_ram_alloc()
1447 block->mr->align, block->page_size); in file_ram_alloc()
1449 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { in file_ram_alloc()
1451 " must be a power of two", block->mr->align); in file_ram_alloc()
1453 } else if (offset % block->page_size) { in file_ram_alloc()
1456 offset, block->page_size); in file_ram_alloc()
1459 block->mr->align = MAX(block->page_size, block->mr->align); in file_ram_alloc()
1462 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); in file_ram_alloc()
1466 if (memory < block->page_size) { in file_ram_alloc()
1469 memory, block->page_size); in file_ram_alloc()
1473 memory = ROUND_UP(memory, block->page_size); in file_ram_alloc()
1481 * Do not truncate the non-empty backend file to avoid corrupting in file_ram_alloc()
1486 * those labels. Therefore, extending the non-empty backend file in file_ram_alloc()
1493 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; in file_ram_alloc()
1494 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; in file_ram_alloc()
1495 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; in file_ram_alloc()
1496 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; in file_ram_alloc()
1497 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); in file_ram_alloc()
1504 block->fd = fd; in file_ram_alloc()
1505 block->fd_offset = offset; in file_ram_alloc()
1526 ram_addr_t candidate, next = RAM_ADDR_MAX; in find_ram_offset() local
1531 candidate = block->offset + block->max_length; in find_ram_offset()
1538 if (next_block->offset >= candidate) { in find_ram_offset()
1539 next = MIN(next, next_block->offset); in find_ram_offset()
1547 if (next - candidate >= size && next - candidate < mingap) { in find_ram_offset()
1549 mingap = next - candidate; in find_ram_offset()
1552 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); in find_ram_offset()
1576 "but dump-guest-core=off specified\n"); in qemu_ram_setup_dump()
1583 return rb->idstr; in qemu_ram_get_idstr()
1588 return rb->host; in qemu_ram_get_host_addr()
1593 return rb->offset; in qemu_ram_get_offset()
1598 return rb->used_length; in qemu_ram_get_used_length()
1603 return rb->max_length; in qemu_ram_get_max_length()
1608 return rb->flags & RAM_SHARED; in qemu_ram_is_shared()
1613 return rb->flags & RAM_NORESERVE; in qemu_ram_is_noreserve()
1619 return rb->flags & RAM_UF_ZEROPAGE; in qemu_ram_is_uf_zeroable()
1624 rb->flags |= RAM_UF_ZEROPAGE; in qemu_ram_set_uf_zeroable()
1629 return rb->flags & RAM_MIGRATABLE; in qemu_ram_is_migratable()
1634 rb->flags |= RAM_MIGRATABLE; in qemu_ram_set_migratable()
1639 rb->flags &= ~RAM_MIGRATABLE; in qemu_ram_unset_migratable()
1644 return rb->flags & RAM_NAMED_FILE; in qemu_ram_is_named_file()
1649 return rb->fd; in qemu_ram_get_fd()
1658 assert(!new_block->idstr[0]); in qemu_ram_set_idstr()
1663 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); in qemu_ram_set_idstr()
1667 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); in qemu_ram_set_idstr()
1672 !strcmp(block->idstr, new_block->idstr)) { in qemu_ram_set_idstr()
1674 new_block->idstr); in qemu_ram_set_idstr()
1684 * migration. Ignore the problem since hot-unplug during migration in qemu_ram_unset_idstr()
1688 memset(block->idstr, 0, sizeof(block->idstr)); in qemu_ram_unset_idstr()
1695 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; in cpr_name()
1706 return rb->page_size; in qemu_ram_pagesize()
1742 const ram_addr_t oldsize = block->used_length; in qemu_ram_resize()
1750 if (block->used_length == newsize) { in qemu_ram_resize()
1755 if (unaligned_size != memory_region_size(block->mr)) { in qemu_ram_resize()
1756 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1757 if (block->resized) { in qemu_ram_resize()
1758 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1764 if (!(block->flags & RAM_RESIZEABLE)) { in qemu_ram_resize()
1767 " != 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1768 newsize, block->used_length); in qemu_ram_resize()
1769 return -EINVAL; in qemu_ram_resize()
1772 if (block->max_length < newsize) { in qemu_ram_resize()
1775 " > 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1776 newsize, block->max_length); in qemu_ram_resize()
1777 return -EINVAL; in qemu_ram_resize()
1781 if (block->host) { in qemu_ram_resize()
1782 ram_block_notify_resize(block->host, oldsize, newsize); in qemu_ram_resize()
1785 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); in qemu_ram_resize()
1786 block->used_length = newsize; in qemu_ram_resize()
1787 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, in qemu_ram_resize()
1789 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1790 if (block->resized) { in qemu_ram_resize()
1791 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1799 * Otherwise no-op.
1805 g_assert((start + length) <= block->used_length); in qemu_ram_msync()
1815 if (block->fd >= 0) { in qemu_ram_msync()
1818 * specified as persistent (or is not one) - use the msync. in qemu_ram_msync()
1822 if (qemu_msync(addr, length, block->fd)) { in qemu_ram_msync()
1850 sizeof(new_blocks->blocks[0]) * new_num_blocks); in dirty_memory_extend()
1853 memcpy(new_blocks->blocks, old_blocks->blocks, in dirty_memory_extend()
1854 old_num_blocks * sizeof(old_blocks->blocks[0])); in dirty_memory_extend()
1858 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); in dirty_memory_extend()
1882 new_block->offset = find_ram_offset(new_block->max_length); in ram_block_add()
1884 if (!new_block->host) { in ram_block_add()
1886 xen_ram_alloc(new_block->offset, new_block->max_length, in ram_block_add()
1887 new_block->mr, &err); in ram_block_add()
1894 new_block->host = qemu_anon_ram_alloc(new_block->max_length, in ram_block_add()
1895 &new_block->mr->align, in ram_block_add()
1897 if (!new_block->host) { in ram_block_add()
1900 memory_region_name(new_block->mr)); in ram_block_add()
1904 memory_try_enable_merging(new_block->host, new_block->max_length); in ram_block_add()
1909 if (new_block->flags & RAM_GUEST_MEMFD) { in ram_block_add()
1914 object_get_typename(OBJECT(current_machine->cgs))); in ram_block_add()
1917 assert(new_block->guest_memfd < 0); in ram_block_add()
1921 error_setg_errno(errp, -ret, in ram_block_add()
1927 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, in ram_block_add()
1929 if (new_block->guest_memfd < 0) { in ram_block_add()
1943 new_block->attributes = ram_block_attributes_create(new_block); in ram_block_add()
1944 if (!new_block->attributes) { in ram_block_add()
1946 close(new_block->guest_memfd); in ram_block_add()
1957 error_setg(&new_block->cpr_blocker, in ram_block_add()
1960 memory_region_name(new_block->mr)); in ram_block_add()
1961 migrate_add_blocker_modes(&new_block->cpr_blocker, errp, in ram_block_add()
1962 MIG_MODE_CPR_TRANSFER, -1); in ram_block_add()
1966 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; in ram_block_add()
1969 * QLIST (which has an RCU-friendly variant) does not have insertion at in ram_block_add()
1974 if (block->max_length < new_block->max_length) { in ram_block_add()
1979 QLIST_INSERT_BEFORE_RCU(block, new_block, next); in ram_block_add()
1981 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); in ram_block_add()
1983 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); in ram_block_add()
1992 cpu_physical_memory_set_dirty_range(new_block->offset, in ram_block_add()
1993 new_block->used_length, in ram_block_add()
1996 if (new_block->host) { in ram_block_add()
1997 qemu_ram_setup_dump(new_block->host, new_block->max_length); in ram_block_add()
1998 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); in ram_block_add()
2005 qemu_madvise(new_block->host, new_block->max_length, in ram_block_add()
2008 ram_block_notify_add(new_block->host, new_block->used_length, in ram_block_add()
2009 new_block->max_length); in ram_block_add()
2015 qemu_anon_ram_free(new_block->host, new_block->max_length); in ram_block_add()
2016 new_block->host = NULL; in ram_block_add()
2044 error_setg(errp, "-mem-path not supported with Xen"); in qemu_ram_alloc_from_fd()
2050 "host lacks kvm mmu notifiers, -mem-path unsupported"); in qemu_ram_alloc_from_fd()
2070 if (file_align > 0 && file_align > mr->align) { in qemu_ram_alloc_from_fd()
2073 file_align, mr->align); in qemu_ram_alloc_from_fd()
2078 new_block->mr = mr; in qemu_ram_alloc_from_fd()
2079 new_block->used_length = size; in qemu_ram_alloc_from_fd()
2080 new_block->max_length = max_size; in qemu_ram_alloc_from_fd()
2081 new_block->resized = resized; in qemu_ram_alloc_from_fd()
2082 new_block->flags = ram_flags; in qemu_ram_alloc_from_fd()
2083 new_block->guest_memfd = -1; in qemu_ram_alloc_from_fd()
2084 new_block->host = file_ram_alloc(new_block, max_size, fd, in qemu_ram_alloc_from_fd()
2087 if (!new_block->host) { in qemu_ram_alloc_from_fd()
2114 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", in qemu_ram_alloc_from_file()
2117 fd == -EACCES) { in qemu_ram_alloc_from_file()
2131 " read-only but still creating writable RAM using" in qemu_ram_alloc_from_file()
2132 " '-object memory-backend-file,readonly=on,rom=off...'" in qemu_ram_alloc_from_file()
2203 if (!share_flags && current_machine->aux_ram_share) { in qemu_ram_alloc_internal()
2216 mr->align = QEMU_VMALLOC_ALIGN; in qemu_ram_alloc_internal()
2224 * After cpr-transfer, new QEMU could create a memory region in qemu_ram_alloc_internal()
2232 trace_qemu_ram_alloc_shared(name, new_block->used_length, in qemu_ram_alloc_internal()
2233 new_block->max_length, fd, in qemu_ram_alloc_internal()
2234 new_block->host); in qemu_ram_alloc_internal()
2251 new_block->mr = mr; in qemu_ram_alloc_internal()
2252 new_block->resized = resized; in qemu_ram_alloc_internal()
2253 new_block->used_length = size; in qemu_ram_alloc_internal()
2254 new_block->max_length = max_size; in qemu_ram_alloc_internal()
2255 new_block->fd = -1; in qemu_ram_alloc_internal()
2256 new_block->guest_memfd = -1; in qemu_ram_alloc_internal()
2257 new_block->page_size = qemu_real_host_page_size(); in qemu_ram_alloc_internal()
2258 new_block->host = host; in qemu_ram_alloc_internal()
2259 new_block->flags = ram_flags; in qemu_ram_alloc_internal()
2294 if (block->flags & RAM_PREALLOC) { in reclaim_ramblock()
2297 xen_invalidate_map_cache_entry(block->host); in reclaim_ramblock()
2299 } else if (block->fd >= 0) { in reclaim_ramblock()
2300 qemu_ram_munmap(block->fd, block->host, block->max_length); in reclaim_ramblock()
2301 close(block->fd); in reclaim_ramblock()
2304 qemu_anon_ram_free(block->host, block->max_length); in reclaim_ramblock()
2307 if (block->guest_memfd >= 0) { in reclaim_ramblock()
2308 ram_block_attributes_destroy(block->attributes); in reclaim_ramblock()
2309 close(block->guest_memfd); in reclaim_ramblock()
2324 if (block->host) { in qemu_ram_free()
2325 ram_block_notify_remove(block->host, block->used_length, in qemu_ram_free()
2326 block->max_length); in qemu_ram_free()
2330 name = cpr_name(block->mr); in qemu_ram_free()
2332 QLIST_REMOVE_RCU(block, next); in qemu_ram_free()
2347 void *host_startaddr = block->host + start; in qemu_ram_remap_mmap()
2349 assert(block->fd < 0); in qemu_ram_remap_mmap()
2351 flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; in qemu_ram_remap_mmap()
2352 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; in qemu_ram_remap_mmap()
2354 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; in qemu_ram_remap_mmap()
2355 area = mmap(host_startaddr, length, prot, flags, -1, 0); in qemu_ram_remap_mmap()
2356 return area != host_startaddr ? -errno : 0; in qemu_ram_remap_mmap()
2360 * qemu_ram_remap - remap a single RAM page
2380 offset = addr - block->offset; in qemu_ram_remap()
2381 if (offset < block->max_length) { in qemu_ram_remap()
2387 if (block->flags & RAM_PREALLOC) { in qemu_ram_remap()
2399 if (block->fd >= 0) { in qemu_ram_remap()
2401 PRIx64 " +%zx", block->idstr, offset, in qemu_ram_remap()
2402 block->fd_offset, page_size); in qemu_ram_remap()
2407 block->idstr, offset, page_size); in qemu_ram_remap()
2430 * @lock: wether to lock the mapping in xen-mapcache until invalidated.
2431 * @is_write: hint wether to map RW or RO in the xen-mapcache.
2448 addr -= block->offset; in qemu_ram_ptr_length()
2451 *size = MIN(*size, block->max_length - addr); in qemu_ram_ptr_length()
2455 if (xen_enabled() && block->host == NULL) { in qemu_ram_ptr_length()
2460 if (xen_mr_is_memory(block->mr)) { in qemu_ram_ptr_length()
2461 return xen_map_cache(block->mr, block->offset + addr, in qemu_ram_ptr_length()
2462 len, block->offset, in qemu_ram_ptr_length()
2466 block->host = xen_map_cache(block->mr, block->offset, in qemu_ram_ptr_length()
2467 block->max_length, in qemu_ram_ptr_length()
2468 block->offset, in qemu_ram_ptr_length()
2491 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; in qemu_ram_block_host_offset()
2492 assert((uintptr_t)host >= (uintptr_t)rb->host); in qemu_ram_block_host_offset()
2493 assert(res < rb->max_length); in qemu_ram_block_host_offset()
2514 *offset = ram_addr - block->offset; in qemu_ram_block_from_host()
2521 if (block && block->host && host - block->host < block->max_length) { in qemu_ram_block_from_host()
2527 if (block->host == NULL) { in qemu_ram_block_from_host()
2530 if (host - block->host < block->max_length) { in qemu_ram_block_from_host()
2538 *offset = (host - block->host); in qemu_ram_block_from_host()
2557 if (!strcmp(name, block->idstr)) { in qemu_ram_block_by_name()
2579 return block->offset + offset; in qemu_ram_addr_from_host()
2612 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_read()
2632 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_write()
2645 return flatview_access_valid(subpage->fv, addr + subpage->base, in subpage_accepts()
2666 return -1; in subpage_register()
2674 mmio->sub_section[idx] = section; in subpage_register()
2684 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ in subpage_init()
2686 mmio->fv = fv; in subpage_init()
2687 mmio->base = base; in subpage_init()
2688 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, in subpage_init()
2690 mmio->iomem.subpage = true; in subpage_init()
2724 n = dummy_section(&d->map, fv, &io_mem_unassigned); in address_space_dispatch_new()
2727 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; in address_space_dispatch_new()
2734 phys_sections_free(&d->map); in address_space_dispatch_free()
2750 * ---------------------- ------------------------- in tcg_log_global_after_sync()
2751 * TLB check -> slow path in tcg_log_global_after_sync()
2756 * TLB check -> fast path in tcg_log_global_after_sync()
2772 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); in tcg_log_global_after_sync()
2780 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); in tcg_commit_cpu()
2790 /* since each CPU stores ram addresses in its TLB cache, we must in tcg_commit()
2793 cpu = cpuas->cpu; in tcg_commit()
2796 * Defer changes to as->memory_dispatch until the cpu is quiescent. in tcg_commit()
2805 * all of the tcg machinery for run-on is initialized: thus halt_cond. in tcg_commit()
2807 if (cpu->halt_cond) { in tcg_commit()
2857 tb_invalidate_phys_range(NULL, addr, addr + length - 1); in invalidate_and_set_dirty()
2878 unsigned access_size_max = mr->ops->valid.max_access_size; in memory_access_size()
2880 /* Regions are assumed to support 1-4 byte accesses unless in memory_access_size()
2887 if (!mr->ops->impl.unaligned) { in memory_access_size()
2888 unsigned align_size_max = addr & -addr; in memory_access_size()
2911 if (mr->flush_coalesced_mmio) { in prepare_mmio_access()
2939 "Invalid access to non-RAM device at " in flatview_access_allowed()
2985 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_write_continue_step()
3009 len -= l; in flatview_write_continue()
3078 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_read_continue_step()
3100 len -= l; in flatview_read_continue()
3185 len -= l; in address_space_set()
3226 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); in address_space_write_rom_internal()
3237 len -= l; in address_space_write_rom_internal()
3257 * triggered from within the guest. For TCG we are always cache coherent, in cpu_flush_icache_range()
3259 * the host's instruction cache at least. in cpu_flush_icache_range()
3295 while (!QLIST_EMPTY(&as->map_client_list)) { in address_space_notify_map_clients_locked()
3296 client = QLIST_FIRST(&as->map_client_list); in address_space_notify_map_clients_locked()
3297 qemu_bh_schedule(client->bh); in address_space_notify_map_clients_locked()
3306 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_register_map_client()
3307 client->bh = bh; in address_space_register_map_client()
3308 QLIST_INSERT_HEAD(&as->map_client_list, client, link); in address_space_register_map_client()
3311 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { in address_space_register_map_client()
3335 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_unregister_map_client()
3336 QLIST_FOREACH(client, &as->map_client_list, link) { in address_space_unregister_map_client()
3337 if (client->bh == bh) { in address_space_unregister_map_client()
3346 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_notify_map_clients()
3366 len -= l; in flatview_access_valid()
3394 target_len -= len; in flatview_extend_translation()
3413 * Use only for reads OR writes - not for read-modify-write operations.
3440 size_t used = qatomic_read(&as->bounce_buffer_size); in address_space_map()
3442 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); in address_space_map()
3445 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); in address_space_map()
3459 bounce->magic = BOUNCE_BUFFER_MAGIC; in address_space_map()
3461 bounce->mr = mr; in address_space_map()
3462 bounce->addr = addr; in address_space_map()
3463 bounce->len = l; in address_space_map()
3467 bounce->buffer, l); in address_space_map()
3471 return bounce->buffer; in address_space_map()
3478 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); in address_space_map()
3505 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); in address_space_unmap()
3508 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, in address_space_unmap()
3509 bounce->buffer, access_len); in address_space_unmap()
3512 qatomic_sub(&as->bounce_buffer_size, bounce->len); in address_space_unmap()
3513 bounce->magic = ~BOUNCE_BUFFER_MAGIC; in address_space_unmap()
3514 memory_region_unref(bounce->mr); in address_space_unmap()
3543 int64_t address_space_cache_init(MemoryRegionCache *cache, in address_space_cache_init() argument
3557 cache->fv = address_space_get_flatview(as); in address_space_cache_init()
3558 d = flatview_to_dispatch(cache->fv); in address_space_cache_init()
3559 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); in address_space_cache_init()
3562 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. in address_space_cache_init()
3564 * cache->xlat and the end of the section. in address_space_cache_init()
3566 diff = int128_sub(cache->mrs.size, in address_space_cache_init()
3567 int128_make64(cache->xlat - cache->mrs.offset_within_region)); in address_space_cache_init()
3570 mr = cache->mrs.mr; in address_space_cache_init()
3577 l = flatview_extend_translation(cache->fv, addr, len, mr, in address_space_cache_init()
3578 cache->xlat, l, is_write, in address_space_cache_init()
3580 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, in address_space_cache_init()
3583 cache->ptr = NULL; in address_space_cache_init()
3586 cache->len = l; in address_space_cache_init()
3587 cache->is_write = is_write; in address_space_cache_init()
3591 void address_space_cache_invalidate(MemoryRegionCache *cache, in address_space_cache_invalidate() argument
3595 assert(cache->is_write); in address_space_cache_invalidate()
3596 if (likely(cache->ptr)) { in address_space_cache_invalidate()
3597 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); in address_space_cache_invalidate()
3601 void address_space_cache_destroy(MemoryRegionCache *cache) in address_space_cache_destroy() argument
3603 if (!cache->mrs.mr) { in address_space_cache_destroy()
3608 xen_invalidate_map_cache_entry(cache->ptr); in address_space_cache_destroy()
3610 memory_region_unref(cache->mrs.mr); in address_space_cache_destroy()
3611 flatview_unref(cache->fv); in address_space_cache_destroy()
3612 cache->mrs.mr = NULL; in address_space_cache_destroy()
3613 cache->fv = NULL; in address_space_cache_destroy()
3622 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, in address_space_translate_cached() argument
3630 assert(!cache->ptr); in address_space_translate_cached()
3631 *xlat = addr + cache->xlat; in address_space_translate_cached()
3633 mr = cache->mrs.mr; in address_space_translate_cached()
3661 len -= l; in address_space_write_continue_cached()
3686 len -= l; in address_space_read_continue_cached()
3703 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_read_cached_slow() argument
3710 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, in address_space_read_cached_slow()
3720 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_write_cached_slow() argument
3727 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, in address_space_write_cached_slow()
3733 #define ARG1_DECL MemoryRegionCache *cache
3734 #define ARG1 cache
3736 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3759 if (phys_addr == -1) in cpu_memory_rw_debug()
3760 return -1; in cpu_memory_rw_debug()
3761 l = (page + TARGET_PAGE_SIZE) - addr; in cpu_memory_rw_debug()
3765 res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, in cpu_memory_rw_debug()
3768 return -1; in cpu_memory_rw_debug()
3770 len -= l; in cpu_memory_rw_debug()
3810 * Returns: 0 on success, none-0 on failure
3815 int ret = -1; in ram_block_discard_range()
3817 uint8_t *host_startaddr = rb->host + start; in ram_block_discard_range()
3819 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { in ram_block_discard_range()
3825 if ((start + length) <= rb->max_length) { in ram_block_discard_range()
3827 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { in ram_block_discard_range()
3839 need_madvise = (rb->page_size == qemu_real_host_page_size()); in ram_block_discard_range()
3840 need_fallocate = rb->fd != -1; in ram_block_discard_range()
3851 if (rb->flags & RAM_READONLY_FD) { in ram_block_discard_range()
3875 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_range()
3876 start + rb->fd_offset, length); in ram_block_discard_range()
3878 ret = -errno; in ram_block_discard_range()
3880 " +%zx (%d)", __func__, rb->idstr, start, in ram_block_discard_range()
3881 rb->fd_offset, length, ret); in ram_block_discard_range()
3885 ret = -ENOSYS; in ram_block_discard_range()
3888 rb->idstr, start, rb->fd_offset, length, ret); in ram_block_discard_range()
3899 if (qemu_ram_is_shared(rb) && rb->fd < 0) { in ram_block_discard_range()
3905 ret = -errno; in ram_block_discard_range()
3908 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3912 ret = -ENOSYS; in ram_block_discard_range()
3914 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3918 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, in ram_block_discard_range()
3922 __func__, rb->idstr, start, length, rb->max_length); in ram_block_discard_range()
3932 int ret = -1; in ram_block_discard_guest_memfd_range()
3936 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_guest_memfd_range()
3940 ret = -errno; in ram_block_discard_guest_memfd_range()
3942 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3945 ret = -ENOSYS; in ram_block_discard_guest_memfd_range()
3947 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3955 return rb->flags & RAM_PMEM; in ramblock_is_pmem()
3960 if (start == end - 1) { in mtree_print_phys_entries()
3963 qemu_printf("\t%3d..%-3d ", start, end - 1); in mtree_print_phys_entries()
3986 for (i = 0; i < d->map.sections_nb; ++i) { in mtree_print_dispatch()
3987 MemoryRegionSection *s = d->map.sections + i; in mtree_print_dispatch()
3994 s->offset_within_address_space, in mtree_print_dispatch()
3995 s->offset_within_address_space + MR_SIZE(s->size), in mtree_print_dispatch()
3996 s->mr->name ? s->mr->name : "(noname)", in mtree_print_dispatch()
3998 s->mr == root ? " [ROOT]" : "", in mtree_print_dispatch()
3999 s == d->mru_section ? " [MRU]" : "", in mtree_print_dispatch()
4000 s->mr->is_iommu ? " [iommu]" : ""); in mtree_print_dispatch()
4002 if (s->mr->alias) { in mtree_print_dispatch()
4003 qemu_printf(" alias=%s", s->mr->alias->name ? in mtree_print_dispatch()
4004 s->mr->alias->name : "noname"); in mtree_print_dispatch()
4009 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", in mtree_print_dispatch()
4010 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); in mtree_print_dispatch()
4011 for (i = 0; i < d->map.nodes_nb; ++i) { in mtree_print_dispatch()
4014 Node *n = d->map.nodes + i; in mtree_print_dispatch()
4021 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { in mtree_print_dispatch()
4069 ram_block_discard_disabled_cnt--; in ram_block_discard_disable()
4072 ret = -EBUSY; in ram_block_discard_disable()
4086 ram_block_uncoordinated_discard_disabled_cnt--; in ram_block_uncoordinated_discard_disable()
4088 ret = -EBUSY; in ram_block_uncoordinated_discard_disable()
4102 ram_block_discard_required_cnt--; in ram_block_discard_require()
4105 ret = -EBUSY; in ram_block_discard_require()
4119 ram_block_coordinated_discard_required_cnt--; in ram_block_coordinated_discard_require()
4121 ret = -EBUSY; in ram_block_coordinated_discard_require()
4147 MemoryRegion *mr = rb->mr; in ram_is_cpr_compatible()
4162 if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { in ram_is_cpr_compatible()
4171 * called after we know that the block is migratable. Non-migratable blocks
4172 * are either re-created in new QEMU, or are handled specially, or are covered
4173 * by a device-level CPR blocker.
4183 error_setg(&rb->cpr_blocker, in ram_block_add_cpr_blocker()
4185 "required for memory-backend objects, and aux-ram-share=on is " in ram_block_add_cpr_blocker()
4186 "required.", memory_region_name(rb->mr)); in ram_block_add_cpr_blocker()
4187 migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, in ram_block_add_cpr_blocker()
4188 -1); in ram_block_add_cpr_blocker()
4193 migrate_del_blocker(&rb->cpr_blocker); in ram_block_del_cpr_blocker()