Lines Matching +full:- +full:- +full:target +full:- +full:list +full:- +full:exclude
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
34 #include "qemu/main-loop.h"
38 #include "migration-stats.h"
41 #include "qemu-file.h"
42 #include "postcopy-ram.h"
44 #include "qemu/error-report.h"
46 #include "qapi/qapi-types-migration.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qapi-commands-migration.h"
55 #include "system/cpu-throttle.h"
75 * mapped-ram migration supports O_DIRECT, so we need to make sure the
87 * When doing mapped-ram migration, this is the amount we read from
182 * Returns 0 for success or -1 for error
196 return -1; in xbzrle_cache_resize()
209 ret = -1; in xbzrle_cache_resize()
258 assert(!rb->receivedmap); in ramblock_recv_map_init()
259 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); in ramblock_recv_map_init()
266 rb->receivedmap); in ramblock_recv_bitmap_test()
271 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_test_byte_offset()
276 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); in ramblock_recv_bitmap_set()
282 bitmap_set_atomic(rb->receivedmap, in ramblock_recv_bitmap_set_range()
289 set_bit_atomic(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_set_offset()
307 return -1; in ramblock_recv_bitmap_send()
310 nbits = block->postcopy_length >> TARGET_PAGE_BITS; in ramblock_recv_bitmap_send()
324 bitmap_to_le(le_bitmap, block->receivedmap, nbits); in ramblock_recv_bitmap_send()
372 /* UFFD file descriptor, used in 'write-tracking' migration */
378 /* Last dirty target page we have sent */
402 /* total handled target pages at the beginning of period */
404 /* total handled target pages since start */
410 * - dirty/clear bitmap
411 * - migration_dirty_pages
412 * - pss structures
445 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); in postcopy_has_request()
473 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : in ram_bytes_remaining()
493 pss->block = rb; in pss_init()
494 pss->page = page; in pss_init()
495 pss->complete_round = false; in pss_init()
504 return pss1->host_page_sending && pss2->host_page_sending && in pss_overlap()
505 (pss1->host_page_start == pss2->host_page_start); in pss_overlap()
524 bool same_block = (block == pss->last_sent_block); in save_page_header()
533 len = strlen(block->idstr); in save_page_header()
535 qemu_put_buffer(f, (uint8_t *)block->idstr, len); in save_page_header()
537 pss->last_sent_block = block; in save_page_header()
549 * fast and will not effectively converge, even with auto-converge.
572 cpu_now = 100 - throttle_now; in mig_throttle_guest_down()
575 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); in mig_throttle_guest_down()
585 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in mig_throttle_counter_reset()
586 rs->num_dirty_pages_period = 0; in mig_throttle_counter_reset()
587 rs->bytes_xfer_prev = migration_transferred_bytes(); in mig_throttle_counter_reset()
596 * The important thing is that a stale (not-yet-0'd) page be replaced
616 * -1 means that xbzrle would be longer than normal
631 QEMUFile *file = pss->pss_channel; in save_xbzrle_page()
636 if (!rs->last_stage) { in save_xbzrle_page()
638 generation) == -1) { in save_xbzrle_page()
639 return -1; in save_xbzrle_page()
646 return -1; in save_xbzrle_page()
675 if (!rs->last_stage && encoded_len != 0) { in save_xbzrle_page()
688 } else if (encoded_len == -1) { in save_xbzrle_page()
692 return -1; in save_xbzrle_page()
696 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, in save_xbzrle_page()
706 xbzrle_counters.bytes += bytes_xbzrle - 8; in save_xbzrle_page()
715 * This function updates pss->page to point to the next dirty page index
717 * found. Note that when pss->host_page_sending==true it means we're
725 RAMBlock *rb = pss->block; in pss_find_next_dirty()
726 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in pss_find_next_dirty()
727 unsigned long *bitmap = rb->bmap; in pss_find_next_dirty()
731 pss->page = size; in pss_find_next_dirty()
739 if (pss->host_page_sending) { in pss_find_next_dirty()
740 assert(pss->host_page_end); in pss_find_next_dirty()
741 size = MIN(size, pss->host_page_end); in pss_find_next_dirty()
744 pss->page = find_next_bit(bitmap, size, pss->page); in pss_find_next_dirty()
753 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { in migration_clear_memory_region_dirty_bitmap()
757 shift = rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap()
770 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); in migration_clear_memory_region_dirty_bitmap()
771 memory_region_clear_dirty_bitmap(rb->mr, start, size); in migration_clear_memory_region_dirty_bitmap()
779 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap_range()
784 * Clear pages from start to start + npages - 1, so the end boundary is in migration_clear_memory_region_dirty_bitmap_range()
807 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in colo_bitmap_find_dirty()
808 unsigned long *bitmap = rb->bmap; in colo_bitmap_find_dirty()
823 *num = next - first; in colo_bitmap_find_dirty()
843 ret = test_and_clear_bit(page, rb->bmap); in migration_bitmap_clear_dirty()
845 rs->migration_dirty_pages--; in migration_bitmap_clear_dirty()
854 const hwaddr offset = section->offset_within_region; in dirty_bitmap_clear_section()
855 const hwaddr size = int128_get64(section->size); in dirty_bitmap_clear_section()
858 RAMBlock *rb = section->mr->ram_block; in dirty_bitmap_clear_section()
862 * We don't grab ram_state->bitmap_mutex because we expect to run in dirty_bitmap_clear_section()
869 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); in dirty_bitmap_clear_section()
870 bitmap_clear(rb->bmap, start, npages); in dirty_bitmap_clear_section()
874 * Exclude all dirty pages from migration that fall into a discarded range as
890 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_dirty_bitmap_clear_discarded_pages()
891 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_dirty_bitmap_clear_discarded_pages()
893 .mr = rb->mr, in ramblock_dirty_bitmap_clear_discarded_pages()
906 * Check if a host-page aligned page falls into a discarded range as managed by
913 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_page_is_discarded()
914 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_page_is_discarded()
916 .mr = rb->mr, in ramblock_page_is_discarded()
930 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); in ramblock_sync_dirty_bitmap()
932 rs->migration_dirty_pages += new_dirty_pages; in ramblock_sync_dirty_bitmap()
933 rs->num_dirty_pages_period += new_dirty_pages; in ramblock_sync_dirty_bitmap()
951 summary |= block->page_size; in ram_pagesize_summary()
966 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; in migration_update_rates()
970 rs->num_dirty_pages_period * 1000 / in migration_update_rates()
971 (end_time - rs->time_last_bitmap_sync)); in migration_update_rates()
980 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - in migration_update_rates()
981 rs->xbzrle_cache_miss_prev) / page_count; in migration_update_rates()
982 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; in migration_update_rates()
983 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * in migration_update_rates()
985 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; in migration_update_rates()
986 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { in migration_update_rates()
991 rs->xbzrle_pages_prev = xbzrle_counters.pages; in migration_update_rates()
992 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; in migration_update_rates()
997 * Enable dirty-limit to throttle down the guest
1010 * vcpu-dirty-limit untouched. in migration_dirty_limit_guest()
1013 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { in migration_dirty_limit_guest()
1017 quota_dirtyrate = s->parameters.vcpu_dirty_limit; in migration_dirty_limit_guest()
1023 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); in migration_dirty_limit_guest()
1031 migration_transferred_bytes() - rs->bytes_xfer_prev; in migration_trigger_throttle()
1032 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; in migration_trigger_throttle()
1043 (++rs->dirty_rate_high_cnt >= 2)) { in migration_trigger_throttle()
1044 rs->dirty_rate_high_cnt = 0; in migration_trigger_throttle()
1062 if (!rs->time_last_bitmap_sync) { in migration_bitmap_sync()
1063 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in migration_bitmap_sync()
1069 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in migration_bitmap_sync()
1079 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); in migration_bitmap_sync()
1084 if (end_time > rs->time_last_bitmap_sync + 1000) { in migration_bitmap_sync()
1089 rs->target_page_count_prev = rs->target_page_count; in migration_bitmap_sync()
1092 rs->time_last_bitmap_sync = end_time; in migration_bitmap_sync()
1093 rs->num_dirty_pages_period = 0; in migration_bitmap_sync()
1094 rs->bytes_xfer_prev = migration_transferred_bytes(); in migration_bitmap_sync()
1144 uint8_t *p = pss->block->host + offset; in save_zero_page()
1145 QEMUFile *file = pss->pss_channel; in save_zero_page()
1159 /* zero pages are not transferred with mapped-ram */ in save_zero_page()
1160 clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); in save_zero_page()
1164 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); in save_zero_page()
1167 ram_release_page(pss->block->idstr, offset); in save_zero_page()
1174 if (rs->xbzrle_started) { in save_zero_page()
1176 xbzrle_cache_zero_page(pss->block->offset + offset); in save_zero_page()
1197 QEMUFile *file = pss->pss_channel; in save_normal_page()
1201 block->pages_offset + offset); in save_normal_page()
1202 set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); in save_normal_page()
1204 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, in save_normal_page()
1223 * < 0 - error
1224 * >=0 - Number of pages written - this might legally be 0
1233 int pages = -1; in ram_save_page()
1236 RAMBlock *block = pss->block; in ram_save_page()
1237 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_page()
1238 ram_addr_t current_addr = block->offset + offset; in ram_save_page()
1240 p = block->host + offset; in ram_save_page()
1241 trace_ram_save_page(block->idstr, (uint64_t)offset, p); in ram_save_page()
1244 if (rs->xbzrle_started && !migration_in_postcopy()) { in ram_save_page()
1247 if (!rs->last_stage) { in ram_save_page()
1256 if (pages == -1) { in ram_save_page()
1268 return -1; in ram_save_multifd_page()
1294 /* Update pss->page for the next dirty bit in ramblock */ in find_dirty_block()
1297 if (pss->complete_round && pss->block == rs->last_seen_block && in find_dirty_block()
1298 pss->page >= rs->last_page) { in find_dirty_block()
1305 if (!offset_in_ramblock(pss->block, in find_dirty_block()
1306 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { in find_dirty_block()
1308 pss->page = 0; in find_dirty_block()
1309 pss->block = QLIST_NEXT_RCU(pss->block, next); in find_dirty_block()
1310 if (!pss->block) { in find_dirty_block()
1312 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; in find_dirty_block()
1319 /* Hit the end of the list */ in find_dirty_block()
1320 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); in find_dirty_block()
1322 pss->complete_round = true; in find_dirty_block()
1325 rs->xbzrle_started = true; in find_dirty_block()
1339 * Helper for 'get_queued_page' - gets a page off the queue
1355 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); in unqueue_page()
1359 * should be taking anything off the request list other than us. in unqueue_page()
1363 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); in unqueue_page()
1364 block = entry->rb; in unqueue_page()
1365 *offset = entry->offset; in unqueue_page()
1367 if (entry->len > TARGET_PAGE_SIZE) { in unqueue_page()
1368 entry->len -= TARGET_PAGE_SIZE; in unqueue_page()
1369 entry->offset += TARGET_PAGE_SIZE; in unqueue_page()
1371 memory_region_unref(block->mr); in unqueue_page()
1372 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in unqueue_page()
1402 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); in poll_fault_page()
1409 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); in poll_fault_page()
1418 * @pss: page-search-status structure
1419 * @start_page: index of the first page in the range relative to pss->block
1428 /* Check if page is from UFFD-managed region. */ in ram_save_release_protection()
1429 if (pss->block->flags & RAM_UF_WRITEPROTECT) { in ram_save_release_protection()
1430 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); in ram_save_release_protection()
1431 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; in ram_save_release_protection()
1433 /* Flush async buffers before un-protect. */ in ram_save_release_protection()
1434 qemu_fflush(pss->pss_channel); in ram_save_release_protection()
1435 /* Un-protect memory range. */ in ram_save_release_protection()
1436 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, in ram_save_release_protection()
1458 * compatible with 'write-tracking'
1480 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_compatible()
1481 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_compatible()
1484 /* Try to register block memory via UFFD-IO to track writes */ in ram_write_tracking_compatible()
1485 if (uffd_register_memory(uffd_fd, block->host, block->max_length, in ram_write_tracking_compatible()
1511 for (; offset < end; offset += block->page_size) { in populate_read_range()
1512 char tmp = *((char *)block->host + offset); in populate_read_range()
1522 const hwaddr size = int128_get64(section->size); in populate_read_section()
1523 hwaddr offset = section->offset_within_region; in populate_read_section()
1524 RAMBlock *block = section->mr->ram_block; in populate_read_section()
1547 * not be part of the migration stream either way -- see in ram_block_populate_read()
1552 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_populate_read()
1553 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_populate_read()
1555 .mr = rb->mr, in ram_block_populate_read()
1557 .size = rb->mr->size, in ram_block_populate_read()
1563 populate_read_range(rb, 0, rb->used_length); in ram_block_populate_read()
1568 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1577 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_prepare()
1578 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_prepare()
1597 const hwaddr size = int128_get64(section->size); in uffd_protect_section()
1598 const hwaddr offset = section->offset_within_region; in uffd_protect_section()
1599 RAMBlock *rb = section->mr->ram_block; in uffd_protect_section()
1602 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, in uffd_protect_section()
1608 assert(rb->flags & RAM_UF_WRITEPROTECT); in ram_block_uffd_protect()
1611 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_uffd_protect()
1612 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_uffd_protect()
1614 .mr = rb->mr, in ram_block_uffd_protect()
1616 .size = rb->mr->size, in ram_block_uffd_protect()
1623 return uffd_change_protection(uffd_fd, rb->host, in ram_block_uffd_protect()
1624 rb->used_length, true, false); in ram_block_uffd_protect()
1628 * ram_write_tracking_start: start UFFD-WP memory tracking
1643 rs->uffdio_fd = uffd_fd; in ram_write_tracking_start()
1648 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_start()
1649 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_start()
1654 if (uffd_register_memory(rs->uffdio_fd, block->host, in ram_write_tracking_start()
1655 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { in ram_write_tracking_start()
1658 block->flags |= RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1659 memory_region_ref(block->mr); in ram_write_tracking_start()
1666 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, in ram_write_tracking_start()
1667 block->host, block->max_length); in ram_write_tracking_start()
1676 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_start()
1679 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_start()
1681 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1682 memory_region_unref(block->mr); in ram_write_tracking_start()
1686 rs->uffdio_fd = -1; in ram_write_tracking_start()
1687 return -1; in ram_write_tracking_start()
1691 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1701 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_stop()
1704 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_stop()
1706 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, in ram_write_tracking_stop()
1707 block->host, block->max_length); in ram_write_tracking_stop()
1710 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_stop()
1711 memory_region_unref(block->mr); in ram_write_tracking_stop()
1715 uffd_close_fd(rs->uffdio_fd); in ram_write_tracking_stop()
1716 rs->uffdio_fd = -1; in ram_write_tracking_stop()
1720 /* No target OS support, stubs just fail or ignore */
1789 dirty = test_bit(page, block->bmap); in get_queued_page()
1791 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, in get_queued_page()
1794 trace_get_queued_page(block->idstr, (uint64_t)offset, page); in get_queued_page()
1814 pss->block = block; in get_queued_page()
1815 pss->page = offset >> TARGET_PAGE_BITS; in get_queued_page()
1821 pss->complete_round = false; in get_queued_page()
1838 /* This queue generally should be empty - but in the case of a failed in migration_page_queue_free()
1842 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { in migration_page_queue_free()
1843 memory_region_unref(mspr->rb->mr); in migration_page_queue_free()
1844 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in migration_page_queue_free()
1872 ramblock = rs->last_req_rb; in ram_save_queue_pages()
1880 return -1; in ram_save_queue_pages()
1886 /* We shouldn't be asked for a non-existent RAMBlock */ in ram_save_queue_pages()
1888 return -1; in ram_save_queue_pages()
1890 rs->last_req_rb = ramblock; in ram_save_queue_pages()
1892 trace_ram_save_queue_pages(ramblock->idstr, start, len); in ram_save_queue_pages()
1893 if (!offset_in_ramblock(ramblock, start + len - 1)) { in ram_save_queue_pages()
1897 start, len, ramblock->used_length); in ram_save_queue_pages()
1898 return -1; in ram_save_queue_pages()
1903 * rp-return thread. in ram_save_queue_pages()
1908 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; in ram_save_queue_pages()
1911 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_queue_pages()
1916 * safe to access without lock, because when rp-thread is running in ram_save_queue_pages()
1919 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; in ram_save_queue_pages()
1920 assert(pss->pss_channel); in ram_save_queue_pages()
1931 ramblock->idstr, start); in ram_save_queue_pages()
1932 ret = -1; in ram_save_queue_pages()
1936 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page in ram_save_queue_pages()
1944 len -= page_size; in ram_save_queue_pages()
1946 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_queue_pages()
1953 new_entry->rb = ramblock; in ram_save_queue_pages()
1954 new_entry->offset = start; in ram_save_queue_pages()
1955 new_entry->len = len; in ram_save_queue_pages()
1957 memory_region_ref(ramblock->mr); in ram_save_queue_pages()
1958 qemu_mutex_lock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1959 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); in ram_save_queue_pages()
1961 qemu_mutex_unlock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1967 * ram_save_target_page: save one target page to the precopy thread
1975 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_target_page()
1980 res = rdma_control_save_page(pss->pss_channel, pss->block->offset, in ram_save_target_page()
1997 return ram_save_multifd_page(pss->block, offset); in ram_save_target_page()
2007 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in pss_host_page_prepare()
2009 pss->host_page_sending = true; in pss_host_page_prepare()
2019 pss->host_page_start = pss->page; in pss_host_page_prepare()
2020 pss->host_page_end = pss->page + 1; in pss_host_page_prepare()
2026 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); in pss_host_page_prepare()
2027 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); in pss_host_page_prepare()
2039 assert(pss->host_page_sending); in pss_within_range()
2041 /* Over host-page boundary? */ in pss_within_range()
2042 if (pss->page >= pss->host_page_end) { in pss_within_range()
2046 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in pss_within_range()
2048 return offset_in_ramblock(pss->block, ram_addr); in pss_within_range()
2053 pss->host_page_sending = false; in pss_host_page_finish()
2055 pss->host_page_start = pss->host_page_end = 0; in pss_host_page_finish()
2060 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_update()
2063 if (hint->valid) { in ram_page_hint_update()
2068 hint->location.block = pss->block; in ram_page_hint_update()
2069 hint->location.offset = pss->page; in ram_page_hint_update()
2070 hint->valid = true; in ram_page_hint_update()
2085 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); in ram_save_host_page_urgent()
2093 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { in ram_save_host_page_urgent()
2094 trace_postcopy_preempt_hit(pss->block->idstr, in ram_save_host_page_urgent()
2095 pss->page << TARGET_PAGE_BITS); in ram_save_host_page_urgent()
2100 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page_urgent()
2106 ret = -1; in ram_save_host_page_urgent()
2117 qemu_fflush(pss->pss_channel); in ram_save_host_page_urgent()
2129 * Only dirty target pages are sent. Note that the host page size may
2149 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in ram_save_host_page()
2150 unsigned long start_page = pss->page; in ram_save_host_page()
2153 if (migrate_ram_is_ignored(pss->block)) { in ram_save_host_page()
2154 error_report("block %s should not be migrated !", pss->block->idstr); in ram_save_host_page()
2162 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page()
2168 * because both migration thread and rp-return thread can in ram_save_host_page()
2172 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_host_page()
2186 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_host_page()
2213 return rs->page_hint.valid; in ram_page_hint_valid()
2219 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_collect()
2221 assert(hint->valid); in ram_page_hint_collect()
2223 *block = hint->location.block; in ram_page_hint_collect()
2224 *page = hint->location.offset; in ram_page_hint_collect()
2227 hint->valid = false; in ram_page_hint_collect()
2240 * On systems where host-page-size > target-page-size it will send all the
2245 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; in ram_find_and_save_block()
2251 if (!rs->ram_bytes_total) { in ram_find_and_save_block()
2262 if (!rs->last_seen_block) { in ram_find_and_save_block()
2263 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); in ram_find_and_save_block()
2264 rs->last_page = 0; in ram_find_and_save_block()
2270 next_block = rs->last_seen_block; in ram_find_and_save_block()
2271 next_page = rs->last_page; in ram_find_and_save_block()
2297 rs->last_seen_block = pss->block; in ram_find_and_save_block()
2298 rs->last_page = pss->page; in ram_find_and_save_block()
2311 total += block->used_length; in ram_bytes_total_with_ignored()
2324 total += block->used_length; in ram_bytes_total()
2344 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); in ram_state_cleanup()
2345 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); in ram_state_cleanup()
2372 g_free(block->clear_bmap); in ram_bitmaps_destroy()
2373 block->clear_bmap = NULL; in ram_bitmaps_destroy()
2374 g_free(block->bmap); in ram_bitmaps_destroy()
2375 block->bmap = NULL; in ram_bitmaps_destroy()
2376 g_free(block->file_bmap); in ram_bitmaps_destroy()
2377 block->file_bmap = NULL; in ram_bitmaps_destroy()
2409 hint->location.block = NULL; in ram_page_hint_reset()
2410 hint->location.offset = 0; in ram_page_hint_reset()
2411 hint->valid = false; in ram_page_hint_reset()
2419 rs->pss[i].last_sent_block = NULL; in ram_state_reset()
2422 rs->last_seen_block = NULL; in ram_state_reset()
2423 rs->last_page = 0; in ram_state_reset()
2424 rs->last_version = ram_list.version; in ram_state_reset()
2425 rs->xbzrle_started = false; in ram_state_reset()
2427 ram_page_hint_reset(&rs->page_hint); in ram_state_reset()
2439 unsigned long *bitmap = block->bmap; in ram_postcopy_migrated_memory_release()
2440 unsigned long range = block->used_length >> TARGET_PAGE_BITS; in ram_postcopy_migrated_memory_release()
2445 ram_discard_range(block->idstr, in ram_postcopy_migrated_memory_release()
2447 ((ram_addr_t)(run_end - run_start)) in ram_postcopy_migrated_memory_release()
2464 unsigned long end = block->used_length >> TARGET_PAGE_BITS; in postcopy_send_discard_bm_ram()
2466 unsigned long *bitmap = block->bmap; in postcopy_send_discard_bm_ram()
2479 discard_length = end - one; in postcopy_send_discard_bm_ram()
2481 discard_length = zero - one; in postcopy_send_discard_bm_ram()
2497 * which would mean postcopy code would have to deal with target page)
2506 postcopy_discard_send_init(ms, block->idstr); in postcopy_each_ram_send_discard()
2510 * host-page size chunks, mark any partially dirty host-page size in postcopy_each_ram_send_discard()
2511 * chunks as all dirty. In this case the host-page is the host-page in postcopy_each_ram_send_discard()
2519 * target page specific code. in postcopy_each_ram_send_discard()
2533 * Postcopy requires that all target pages in a hostpage are dirty or
2542 unsigned long *bitmap = block->bmap; in postcopy_chunk_hostpages_pass()
2543 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; in postcopy_chunk_hostpages_pass()
2544 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; in postcopy_chunk_hostpages_pass()
2547 if (block->page_size == TARGET_PAGE_SIZE) { in postcopy_chunk_hostpages_pass()
2548 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ in postcopy_chunk_hostpages_pass()
2584 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); in postcopy_chunk_hostpages_pass()
2596 * Transmit the set of pages to be discarded after precopy to the target
2615 /* Easiest way to make sure we don't resume in the middle of a host-page */ in ram_postcopy_send_discard_bitmap()
2616 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; in ram_postcopy_send_discard_bitmap()
2617 rs->last_seen_block = NULL; in ram_postcopy_send_discard_bitmap()
2618 rs->last_page = 0; in ram_postcopy_send_discard_bitmap()
2644 return -1; in ram_discard_range()
2651 if (rb->receivedmap) { in ram_discard_range()
2652 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), in ram_discard_range()
2722 qemu_mutex_init(&(*rsp)->bitmap_mutex); in ram_state_init()
2723 qemu_mutex_init(&(*rsp)->src_page_req_mutex); in ram_state_init()
2724 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); in ram_state_init()
2725 (*rsp)->ram_bytes_total = ram_bytes_total(); in ram_state_init()
2732 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; in ram_state_init()
2747 shift = ms->clear_bitmap_shift; in ram_list_init_bitmaps()
2759 pages = block->max_length >> TARGET_PAGE_BITS; in ram_list_init_bitmaps()
2769 block->bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2770 bitmap_set(block->bmap, 0, pages); in ram_list_init_bitmaps()
2772 block->file_bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2774 block->clear_bmap_shift = shift; in ram_list_init_bitmaps()
2775 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); in ram_list_init_bitmaps()
2789 rs->migration_dirty_pages -= pages; in migration_bitmap_clear_discarded_pages()
2820 * containing all 1s to exclude any discarded pages from migration. in ram_init_bitmaps()
2829 return -1; in ram_init_all()
2834 return -1; in ram_init_all()
2838 return -1; in ram_init_all()
2856 pages += bitmap_count_one(block->bmap, in ram_state_resume_prepare()
2857 block->used_length >> TARGET_PAGE_BITS); in ram_state_resume_prepare()
2861 rs->migration_dirty_pages = pages; in ram_state_resume_prepare()
2866 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; in ram_state_resume_prepare()
2888 for (; len > 0; len -= used_len, addr += used_len) { in qemu_guest_free_page_hint()
2890 if (unlikely(!block || offset >= block->used_length)) { in qemu_guest_free_page_hint()
2900 if (len <= block->used_length - offset) { in qemu_guest_free_page_hint()
2903 used_len = block->used_length - offset; in qemu_guest_free_page_hint()
2909 qemu_mutex_lock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2917 ram_state->migration_dirty_pages -= in qemu_guest_free_page_hint()
2918 bitmap_count_one_with_offset(block->bmap, start, npages); in qemu_guest_free_page_hint()
2919 bitmap_clear(block->bmap, start, npages); in qemu_guest_free_page_hint()
2920 qemu_mutex_unlock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2928 * The target's page size, so we know how many pages are in the
2954 num_pages = block->used_length >> TARGET_PAGE_BITS; in mapped_ram_setup_ramblock()
2962 block->bitmap_offset = qemu_get_offset(file) + header_size; in mapped_ram_setup_ramblock()
2963 block->pages_offset = ROUND_UP(block->bitmap_offset + in mapped_ram_setup_ramblock()
2967 header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); in mapped_ram_setup_ramblock()
2968 header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); in mapped_ram_setup_ramblock()
2969 header->bitmap_offset = cpu_to_be64(block->bitmap_offset); in mapped_ram_setup_ramblock()
2970 header->pages_offset = cpu_to_be64(block->pages_offset); in mapped_ram_setup_ramblock()
2975 qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); in mapped_ram_setup_ramblock()
2985 error_setg(errp, "Could not read whole mapped-ram migration header " in mapped_ram_read_header()
2990 /* migration stream is big-endian */ in mapped_ram_read_header()
2991 header->version = be32_to_cpu(header->version); in mapped_ram_read_header()
2993 if (header->version > MAPPED_RAM_HDR_VERSION) { in mapped_ram_read_header()
2994 error_setg(errp, "Migration mapped-ram capability version not " in mapped_ram_read_header()
2996 header->version); in mapped_ram_read_header()
3000 header->page_size = be64_to_cpu(header->page_size); in mapped_ram_read_header()
3001 header->bitmap_offset = be64_to_cpu(header->bitmap_offset); in mapped_ram_read_header()
3002 header->pages_offset = be64_to_cpu(header->pages_offset); in mapped_ram_read_header()
3009 * long-running RCU critical section. When rcu-reclaims in the code
3032 return -1; in ram_save_setup()
3035 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; in ram_save_setup()
3048 qemu_put_byte(f, strlen(block->idstr)); in ram_save_setup()
3049 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); in ram_save_setup()
3050 qemu_put_be64(f, block->used_length); in ram_save_setup()
3052 block->page_size != max_hg_page_size) { in ram_save_setup()
3053 qemu_put_be64(f, block->page_size); in ram_save_setup()
3056 qemu_put_be64(f, block->mr->addr); in ram_save_setup()
3086 * For legacy QEMUs using per-section sync in ram_save_setup()
3090 * per-channel to work. in ram_save_setup()
3092 * For modern QEMUs using per-round sync in ram_save_setup()
3119 error_setg_errno(errp, -ret, "%s failed", __func__); in ram_save_setup()
3129 long num_pages = block->used_length >> TARGET_PAGE_BITS; in ram_save_file_bmap()
3132 qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, in ram_save_file_bmap()
3133 block->bitmap_offset); in ram_save_file_bmap()
3141 g_free(block->file_bmap); in ram_save_file_bmap()
3142 block->file_bmap = NULL; in ram_save_file_bmap()
3149 set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3151 clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3179 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in ram_save_iterate()
3181 if (ram_list.version != rs->last_version) { in ram_save_iterate()
3216 rs->target_page_count += pages; in ram_save_iterate()
3225 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / in ram_save_iterate()
3282 rs->last_stage = !migration_in_colo_state(); in ram_save_complete()
3298 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_complete()
3308 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3312 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3340 return -err; in ram_save_complete()
3354 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_estimate()
3379 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_exact()
3400 error_report("Failed to load XBZRLE page - wrong compression!"); in load_xbzrle()
3401 return -1; in load_xbzrle()
3405 error_report("Failed to load XBZRLE page - len overflow!"); in load_xbzrle()
3406 return -1; in load_xbzrle()
3415 TARGET_PAGE_SIZE) == -1) { in load_xbzrle()
3416 error_report("Failed to load XBZRLE page - decode error!"); in load_xbzrle()
3417 return -1; in load_xbzrle()
3428 * Returns a pointer from within the RCU-protected ram_list.
3439 RAMBlock *block = mis->last_recv_block[channel]; in ram_block_from_stream()
3466 mis->last_recv_block[channel] = block; in ram_block_from_stream()
3478 return block->host + offset; in host_from_ram_block_offset()
3485 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), in host_page_from_ram_block_offset()
3486 block->page_size); in host_page_from_ram_block_offset()
3492 return ((uintptr_t)block->host + offset) & (block->page_size - 1); in host_page_offset_from_ram_block_offset()
3497 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3500 ram_state->migration_dirty_pages += !test_and_set_bit( in colo_record_bitmap()
3502 block->bmap); in colo_record_bitmap()
3504 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3513 if (!block->colo_cache) { in colo_cache_from_block_offset()
3515 __func__, block->idstr); in colo_cache_from_block_offset()
3527 return block->colo_cache + offset; in colo_cache_from_block_offset()
3567 block->colo_cache = qemu_anon_ram_alloc(block->used_length, in colo_init_ram_cache()
3569 if (!block->colo_cache) { in colo_init_ram_cache()
3571 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, in colo_init_ram_cache()
3572 block->used_length); in colo_init_ram_cache()
3574 if (block->colo_cache) { in colo_init_ram_cache()
3575 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_init_ram_cache()
3576 block->colo_cache = NULL; in colo_init_ram_cache()
3579 return -errno; in colo_init_ram_cache()
3582 qemu_madvise(block->colo_cache, block->used_length, in colo_init_ram_cache()
3595 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; in colo_init_ram_cache()
3596 block->bmap = bitmap_new(pages); in colo_init_ram_cache()
3619 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); in colo_incoming_start_dirty_log()
3626 ram_state->migration_dirty_pages = 0; in colo_incoming_start_dirty_log()
3638 g_free(block->bmap); in colo_release_ram_cache()
3639 block->bmap = NULL; in colo_release_ram_cache()
3644 if (block->colo_cache) { in colo_release_ram_cache()
3645 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_release_ram_cache()
3646 block->colo_cache = NULL; in colo_release_ram_cache()
3681 g_free(rb->receivedmap); in ram_load_cleanup()
3682 rb->receivedmap = NULL; in ram_load_cleanup()
3696 * postcopy-ram. postcopy-ram's similarly names
3707 * Returns 0 for success or -errno in case of error
3721 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; in ram_load_postcopy()
3748 ret = -EINVAL; in ram_load_postcopy()
3755 * while in postcopy, which is fine - trying to place via in ram_load_postcopy()
3758 if (!block->host || addr >= block->postcopy_length) { in ram_load_postcopy()
3760 ret = -EINVAL; in ram_load_postcopy()
3763 tmp_page->target_pages++; in ram_load_postcopy()
3764 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; in ram_load_postcopy()
3771 * The migration protocol uses, possibly smaller, target-pages in ram_load_postcopy()
3775 page_buffer = tmp_page->tmp_huge_page + in ram_load_postcopy()
3778 if (tmp_page->target_pages == 1) { in ram_load_postcopy()
3779 tmp_page->host_addr = in ram_load_postcopy()
3781 } else if (tmp_page->host_addr != in ram_load_postcopy()
3784 error_report("Non-same host page detected on channel %d: " in ram_load_postcopy()
3785 "Target host page %p, received host page %p " in ram_load_postcopy()
3787 channel, tmp_page->host_addr, in ram_load_postcopy()
3789 block->idstr, addr, tmp_page->target_pages); in ram_load_postcopy()
3790 ret = -EINVAL; in ram_load_postcopy()
3798 if (tmp_page->target_pages == in ram_load_postcopy()
3799 (block->page_size / TARGET_PAGE_SIZE)) { in ram_load_postcopy()
3802 place_source = tmp_page->tmp_huge_page; in ram_load_postcopy()
3810 ret = -EINVAL; in ram_load_postcopy()
3815 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). in ram_load_postcopy()
3823 tmp_page->all_zero = false; in ram_load_postcopy()
3829 * For small pages that matches target page size, we in ram_load_postcopy()
3845 ret = -EINVAL; in ram_load_postcopy()
3855 if (tmp_page->all_zero) { in ram_load_postcopy()
3856 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); in ram_load_postcopy()
3858 ret = postcopy_place_page(mis, tmp_page->host_addr, in ram_load_postcopy()
3887 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3894 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); in colo_flush_ram_cache()
3913 dst_host = block->host in colo_flush_ram_cache()
3915 src_host = block->colo_cache in colo_flush_ram_cache()
3922 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3931 data->opaque = host_addr; in ram_load_multifd_pages()
3932 data->file_offset = offset; in ram_load_multifd_pages()
3933 data->size = size; in ram_load_multifd_pages()
3958 unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); in read_ramblock_mapped_ram()
3965 block->idstr); in read_ramblock_mapped_ram()
3973 block->pages_offset + offset); in read_ramblock_mapped_ram()
3976 block->pages_offset + offset); in read_ramblock_mapped_ram()
3983 unread -= read; in read_ramblock_mapped_ram()
3992 "from file offset %" PRIx64 ": ", block->idstr, offset, in read_ramblock_mapped_ram()
3993 block->pages_offset + offset); in read_ramblock_mapped_ram()
4009 block->pages_offset = header.pages_offset; in parse_ramblock_mapped_ram()
4017 if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { in parse_ramblock_mapped_ram()
4020 block->idstr); in parse_ramblock_mapped_ram()
4039 qemu_set_offset(f, block->pages_offset + length, SEEK_SET); in parse_ramblock_mapped_ram()
4056 return -EINVAL; in parse_ramblock()
4062 error_report("block %s should not be migrated !", block->idstr); in parse_ramblock()
4063 return -EINVAL; in parse_ramblock()
4066 if (length != block->used_length) { in parse_ramblock()
4082 block->page_size != max_hg_page_size) { in parse_ramblock()
4084 if (remote_page_size != block->page_size) { in parse_ramblock()
4086 "(local) %zd != %" PRId64, block->idstr, in parse_ramblock()
4087 block->page_size, remote_page_size); in parse_ramblock()
4088 return -EINVAL; in parse_ramblock()
4094 block->mr->addr != addr) { in parse_ramblock()
4096 "%" PRId64 "!= %" PRId64, block->idstr, in parse_ramblock()
4097 (uint64_t)addr, (uint64_t)block->mr->addr); in parse_ramblock()
4098 return -EINVAL; in parse_ramblock()
4101 ret = rdma_block_notification_handle(f, block->idstr); in parse_ramblock()
4113 /* Synchronize RAM block list */ in parse_ramblocks()
4130 ret = -EINVAL; in parse_ramblocks()
4132 total_ram_bytes -= length; in parse_ramblocks()
4141 * Returns 0 for success or -errno in case of error
4188 ret = -EINVAL; in ram_load_precopy()
4203 * while we need to stop VM, which is a time-consuming process. in ram_load_precopy()
4204 * Here we optimize it by a trick, back-up every page while in in ram_load_precopy()
4207 * back-up all SVM'S memory in COLO preparing stage. in ram_load_precopy()
4223 ret = -EINVAL; in ram_load_precopy()
4230 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); in ram_load_precopy()
4237 * For mapped-ram migration (to a file) using multifd, we sync in ram_load_precopy()
4252 ret = -EINVAL; in ram_load_precopy()
4266 ret = -EINVAL; in ram_load_precopy()
4278 * Mapped-ram migration flushes once and for all after in ram_load_precopy()
4293 ret = -EINVAL; in ram_load_precopy()
4319 return -EINVAL; in ram_load()
4352 "is not supported now!", rb->idstr, rb->host); in ram_has_postcopy()
4364 QEMUFile *file = s->to_dst_file; in ram_dirty_bitmap_sync_all()
4368 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); in ram_dirty_bitmap_sync_all()
4370 qemu_savevm_send_recv_bitmap(file, block->idstr); in ram_dirty_bitmap_sync_all()
4371 trace_ram_dirty_bitmap_request(block->idstr); in ram_dirty_bitmap_sync_all()
4372 qatomic_inc(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_sync_all()
4378 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { in ram_dirty_bitmap_sync_all()
4380 return -1; in ram_dirty_bitmap_sync_all()
4399 QEMUFile *file = s->rp_state.from_dst_file; in ram_dirty_bitmap_reload()
4401 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; in ram_dirty_bitmap_reload()
4406 trace_ram_dirty_bitmap_reload_begin(block->idstr); in ram_dirty_bitmap_reload()
4408 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { in ram_dirty_bitmap_reload()
4410 MigrationStatus_str(s->state)); in ram_dirty_bitmap_reload()
4428 " != 0x%"PRIx64")", block->idstr, size, local_size); in ram_dirty_bitmap_reload()
4438 block->idstr, local_size, size); in ram_dirty_bitmap_reload()
4444 block->idstr, end_mark); in ram_dirty_bitmap_reload()
4452 bitmap_from_le(block->bmap, le_bitmap, nbits); in ram_dirty_bitmap_reload()
4458 bitmap_complement(block->bmap, block->bmap, nbits); in ram_dirty_bitmap_reload()
4464 trace_ram_dirty_bitmap_reload_complete(block->idstr); in ram_dirty_bitmap_reload()
4466 qatomic_dec(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_reload()
4490 ram_state_resume_prepare(rs, s->to_dst_file); in ram_resume_prepare()
4506 * threads is still non-atomic, so the load cannot happen with vCPUs in ram_save_postcopy_prepare()
4533 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); in postcopy_preempt_shutdown_file()
4534 qemu_fflush(s->postcopy_qemufile_src); in postcopy_preempt_shutdown_file()
4573 * changing at random points in time - especially after sending the in ram_mig_ram_block_resized()
4577 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); in ram_mig_ram_block_resized()
4587 * Update what ram_postcopy_incoming_init()->init_range() does at the in ram_mig_ram_block_resized()
4592 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { in ram_mig_ram_block_resized()
4594 rb->idstr); in ram_mig_ram_block_resized()
4597 rb->postcopy_length = new_size; in ram_mig_ram_block_resized()
4610 rb->idstr, ps); in ram_mig_ram_block_resized()
4611 exit(-1); in ram_mig_ram_block_resized()