Lines Matching +full:stop +full:- +full:ack
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
34 #include "qemu/main-loop.h"
38 #include "migration-stats.h"
41 #include "qemu-file.h"
42 #include "postcopy-ram.h"
44 #include "qemu/error-report.h"
46 #include "qapi/qapi-types-migration.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qapi-commands-migration.h"
55 #include "system/cpu-throttle.h"
75 * mapped-ram migration supports O_DIRECT, so we need to make sure the
87 * When doing mapped-ram migration, this is the amount we read from
182 * Returns 0 for success or -1 for error
196 return -1; in xbzrle_cache_resize()
209 ret = -1; in xbzrle_cache_resize()
258 assert(!rb->receivedmap); in ramblock_recv_map_init()
259 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); in ramblock_recv_map_init()
266 rb->receivedmap); in ramblock_recv_bitmap_test()
271 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_test_byte_offset()
276 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap); in ramblock_recv_bitmap_set()
282 bitmap_set_atomic(rb->receivedmap, in ramblock_recv_bitmap_set_range()
289 set_bit_atomic(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap); in ramblock_recv_bitmap_set_offset()
307 return -1; in ramblock_recv_bitmap_send()
310 nbits = block->postcopy_length >> TARGET_PAGE_BITS; in ramblock_recv_bitmap_send()
324 bitmap_to_le(le_bitmap, block->receivedmap, nbits); in ramblock_recv_bitmap_send()
372 /* UFFD file descriptor, used in 'write-tracking' migration */
410 * - dirty/clear bitmap
411 * - migration_dirty_pages
412 * - pss structures
445 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); in postcopy_has_request()
473 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : in ram_bytes_remaining()
493 pss->block = rb; in pss_init()
494 pss->page = page; in pss_init()
495 pss->complete_round = false; in pss_init()
504 return pss1->host_page_sending && pss2->host_page_sending && in pss_overlap()
505 (pss1->host_page_start == pss2->host_page_start); in pss_overlap()
524 bool same_block = (block == pss->last_sent_block); in save_page_header()
533 len = strlen(block->idstr); in save_page_header()
535 qemu_put_buffer(f, (uint8_t *)block->idstr, len); in save_page_header()
537 pss->last_sent_block = block; in save_page_header()
549 * fast and will not effectively converge, even with auto-converge.
572 cpu_now = 100 - throttle_now; in mig_throttle_guest_down()
575 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); in mig_throttle_guest_down()
585 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in mig_throttle_counter_reset()
586 rs->num_dirty_pages_period = 0; in mig_throttle_counter_reset()
587 rs->bytes_xfer_prev = migration_transferred_bytes(); in mig_throttle_counter_reset()
596 * The important thing is that a stale (not-yet-0'd) page be replaced
616 * -1 means that xbzrle would be longer than normal
631 QEMUFile *file = pss->pss_channel; in save_xbzrle_page()
636 if (!rs->last_stage) { in save_xbzrle_page()
638 generation) == -1) { in save_xbzrle_page()
639 return -1; in save_xbzrle_page()
646 return -1; in save_xbzrle_page()
675 if (!rs->last_stage && encoded_len != 0) { in save_xbzrle_page()
688 } else if (encoded_len == -1) { in save_xbzrle_page()
692 return -1; in save_xbzrle_page()
696 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, in save_xbzrle_page()
706 xbzrle_counters.bytes += bytes_xbzrle - 8; in save_xbzrle_page()
715 * This function updates pss->page to point to the next dirty page index
717 * found. Note that when pss->host_page_sending==true it means we're
725 RAMBlock *rb = pss->block; in pss_find_next_dirty()
726 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in pss_find_next_dirty()
727 unsigned long *bitmap = rb->bmap; in pss_find_next_dirty()
731 pss->page = size; in pss_find_next_dirty()
739 if (pss->host_page_sending) { in pss_find_next_dirty()
740 assert(pss->host_page_end); in pss_find_next_dirty()
741 size = MIN(size, pss->host_page_end); in pss_find_next_dirty()
744 pss->page = find_next_bit(bitmap, size, pss->page); in pss_find_next_dirty()
753 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) { in migration_clear_memory_region_dirty_bitmap()
757 shift = rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap()
770 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); in migration_clear_memory_region_dirty_bitmap()
771 memory_region_clear_dirty_bitmap(rb->mr, start, size); in migration_clear_memory_region_dirty_bitmap()
779 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift; in migration_clear_memory_region_dirty_bitmap_range()
784 * Clear pages from start to start + npages - 1, so the end boundary is in migration_clear_memory_region_dirty_bitmap_range()
807 unsigned long size = rb->used_length >> TARGET_PAGE_BITS; in colo_bitmap_find_dirty()
808 unsigned long *bitmap = rb->bmap; in colo_bitmap_find_dirty()
823 *num = next - first; in colo_bitmap_find_dirty()
839 if (!rs->last_stage) { in migration_bitmap_clear_dirty()
851 ret = test_and_clear_bit(page, rb->bmap); in migration_bitmap_clear_dirty()
853 rs->migration_dirty_pages--; in migration_bitmap_clear_dirty()
862 const hwaddr offset = section->offset_within_region; in dirty_bitmap_clear_section()
863 const hwaddr size = int128_get64(section->size); in dirty_bitmap_clear_section()
866 RAMBlock *rb = section->mr->ram_block; in dirty_bitmap_clear_section()
870 * We don't grab ram_state->bitmap_mutex because we expect to run in dirty_bitmap_clear_section()
877 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); in dirty_bitmap_clear_section()
878 bitmap_clear(rb->bmap, start, npages); in dirty_bitmap_clear_section()
899 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_dirty_bitmap_clear_discarded_pages()
900 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_dirty_bitmap_clear_discarded_pages()
902 .mr = rb->mr, in ramblock_dirty_bitmap_clear_discarded_pages()
915 * Check if a host-page aligned page falls into a discarded range as managed by
922 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ramblock_page_is_discarded()
923 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ramblock_page_is_discarded()
925 .mr = rb->mr, in ramblock_page_is_discarded()
939 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); in ramblock_sync_dirty_bitmap()
941 rs->migration_dirty_pages += new_dirty_pages; in ramblock_sync_dirty_bitmap()
942 rs->num_dirty_pages_period += new_dirty_pages; in ramblock_sync_dirty_bitmap()
960 summary |= block->page_size; in ram_pagesize_summary()
975 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev; in migration_update_rates()
979 rs->num_dirty_pages_period * 1000 / in migration_update_rates()
980 (end_time - rs->time_last_bitmap_sync)); in migration_update_rates()
989 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - in migration_update_rates()
990 rs->xbzrle_cache_miss_prev) / page_count; in migration_update_rates()
991 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; in migration_update_rates()
992 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) * in migration_update_rates()
994 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev; in migration_update_rates()
995 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) { in migration_update_rates()
1000 rs->xbzrle_pages_prev = xbzrle_counters.pages; in migration_update_rates()
1001 rs->xbzrle_bytes_prev = xbzrle_counters.bytes; in migration_update_rates()
1006 * Enable dirty-limit to throttle down the guest
1019 * vcpu-dirty-limit untouched. in migration_dirty_limit_guest()
1022 quota_dirtyrate == s->parameters.vcpu_dirty_limit) { in migration_dirty_limit_guest()
1026 quota_dirtyrate = s->parameters.vcpu_dirty_limit; in migration_dirty_limit_guest()
1032 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); in migration_dirty_limit_guest()
1040 migration_transferred_bytes() - rs->bytes_xfer_prev; in migration_trigger_throttle()
1041 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; in migration_trigger_throttle()
1052 (++rs->dirty_rate_high_cnt >= 2)) { in migration_trigger_throttle()
1053 rs->dirty_rate_high_cnt = 0; in migration_trigger_throttle()
1071 if (!rs->time_last_bitmap_sync) { in migration_bitmap_sync()
1072 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); in migration_bitmap_sync()
1078 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in migration_bitmap_sync()
1088 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); in migration_bitmap_sync()
1093 if (end_time > rs->time_last_bitmap_sync + 1000) { in migration_bitmap_sync()
1098 rs->target_page_count_prev = rs->target_page_count; in migration_bitmap_sync()
1101 rs->time_last_bitmap_sync = end_time; in migration_bitmap_sync()
1102 rs->num_dirty_pages_period = 0; in migration_bitmap_sync()
1103 rs->bytes_xfer_prev = migration_transferred_bytes(); in migration_bitmap_sync()
1118 * don't stop the normal migration process in the error case. in migration_bitmap_sync_precopy()
1153 uint8_t *p = pss->block->host + offset; in save_zero_page()
1154 QEMUFile *file = pss->pss_channel; in save_zero_page()
1168 /* zero pages are not transferred with mapped-ram */ in save_zero_page()
1169 clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap); in save_zero_page()
1173 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO); in save_zero_page()
1176 ram_release_page(pss->block->idstr, offset); in save_zero_page()
1183 if (rs->xbzrle_started) { in save_zero_page()
1185 xbzrle_cache_zero_page(pss->block->offset + offset); in save_zero_page()
1206 QEMUFile *file = pss->pss_channel; in save_normal_page()
1210 block->pages_offset + offset); in save_normal_page()
1211 set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap); in save_normal_page()
1213 ram_transferred_add(save_page_header(pss, pss->pss_channel, block, in save_normal_page()
1232 * < 0 - error
1233 * >=0 - Number of pages written - this might legally be 0
1242 int pages = -1; in ram_save_page()
1245 RAMBlock *block = pss->block; in ram_save_page()
1246 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_page()
1247 ram_addr_t current_addr = block->offset + offset; in ram_save_page()
1249 p = block->host + offset; in ram_save_page()
1250 trace_ram_save_page(block->idstr, (uint64_t)offset, p); in ram_save_page()
1253 if (rs->xbzrle_started && !migration_in_postcopy()) { in ram_save_page()
1256 if (!rs->last_stage) { in ram_save_page()
1265 if (pages == -1) { in ram_save_page()
1277 return -1; in ram_save_multifd_page()
1303 /* Update pss->page for the next dirty bit in ramblock */ in find_dirty_block()
1306 if (pss->complete_round && pss->block == rs->last_seen_block && in find_dirty_block()
1307 pss->page >= rs->last_page) { in find_dirty_block()
1314 if (!offset_in_ramblock(pss->block, in find_dirty_block()
1315 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) { in find_dirty_block()
1317 pss->page = 0; in find_dirty_block()
1318 pss->block = QLIST_NEXT_RCU(pss->block, next); in find_dirty_block()
1319 if (!pss->block) { in find_dirty_block()
1321 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; in find_dirty_block()
1329 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); in find_dirty_block()
1331 pss->complete_round = true; in find_dirty_block()
1334 rs->xbzrle_started = true; in find_dirty_block()
1348 * Helper for 'get_queued_page' - gets a page off the queue
1364 QEMU_LOCK_GUARD(&rs->src_page_req_mutex); in unqueue_page()
1372 entry = QSIMPLEQ_FIRST(&rs->src_page_requests); in unqueue_page()
1373 block = entry->rb; in unqueue_page()
1374 *offset = entry->offset; in unqueue_page()
1376 if (entry->len > TARGET_PAGE_SIZE) { in unqueue_page()
1377 entry->len -= TARGET_PAGE_SIZE; in unqueue_page()
1378 entry->offset += TARGET_PAGE_SIZE; in unqueue_page()
1380 memory_region_unref(block->mr); in unqueue_page()
1381 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in unqueue_page()
1411 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1); in poll_fault_page()
1418 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0); in poll_fault_page()
1427 * @pss: page-search-status structure
1428 * @start_page: index of the first page in the range relative to pss->block
1437 /* Check if page is from UFFD-managed region. */ in ram_save_release_protection()
1438 if (pss->block->flags & RAM_UF_WRITEPROTECT) { in ram_save_release_protection()
1439 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); in ram_save_release_protection()
1440 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; in ram_save_release_protection()
1442 /* Flush async buffers before un-protect. */ in ram_save_release_protection()
1443 qemu_fflush(pss->pss_channel); in ram_save_release_protection()
1444 /* Un-protect memory range. */ in ram_save_release_protection()
1445 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, in ram_save_release_protection()
1467 * compatible with 'write-tracking'
1489 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_compatible()
1490 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_compatible()
1493 /* Try to register block memory via UFFD-IO to track writes */ in ram_write_tracking_compatible()
1494 if (uffd_register_memory(uffd_fd, block->host, block->max_length, in ram_write_tracking_compatible()
1520 for (; offset < end; offset += block->page_size) { in populate_read_range()
1521 char tmp = *((char *)block->host + offset); in populate_read_range()
1531 const hwaddr size = int128_get64(section->size); in populate_read_section()
1532 hwaddr offset = section->offset_within_region; in populate_read_section()
1533 RAMBlock *block = section->mr->ram_block; in populate_read_section()
1556 * not be part of the migration stream either way -- see in ram_block_populate_read()
1561 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_populate_read()
1562 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_populate_read()
1564 .mr = rb->mr, in ram_block_populate_read()
1566 .size = rb->mr->size, in ram_block_populate_read()
1572 populate_read_range(rb, 0, rb->used_length); in ram_block_populate_read()
1577 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1586 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_prepare()
1587 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_prepare()
1606 const hwaddr size = int128_get64(section->size); in uffd_protect_section()
1607 const hwaddr offset = section->offset_within_region; in uffd_protect_section()
1608 RAMBlock *rb = section->mr->ram_block; in uffd_protect_section()
1611 return uffd_change_protection(uffd_fd, rb->host + offset, size, true, in uffd_protect_section()
1617 assert(rb->flags & RAM_UF_WRITEPROTECT); in ram_block_uffd_protect()
1620 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { in ram_block_uffd_protect()
1621 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); in ram_block_uffd_protect()
1623 .mr = rb->mr, in ram_block_uffd_protect()
1625 .size = rb->mr->size, in ram_block_uffd_protect()
1632 return uffd_change_protection(uffd_fd, rb->host, in ram_block_uffd_protect()
1633 rb->used_length, true, false); in ram_block_uffd_protect()
1637 * ram_write_tracking_start: start UFFD-WP memory tracking
1652 rs->uffdio_fd = uffd_fd; in ram_write_tracking_start()
1657 /* Nothing to do with read-only and MMIO-writable regions */ in ram_write_tracking_start()
1658 if (block->mr->readonly || block->mr->rom_device) { in ram_write_tracking_start()
1663 if (uffd_register_memory(rs->uffdio_fd, block->host, in ram_write_tracking_start()
1664 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { in ram_write_tracking_start()
1667 block->flags |= RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1668 memory_region_ref(block->mr); in ram_write_tracking_start()
1675 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, in ram_write_tracking_start()
1676 block->host, block->max_length); in ram_write_tracking_start()
1685 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_start()
1688 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_start()
1690 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_start()
1691 memory_region_unref(block->mr); in ram_write_tracking_start()
1695 rs->uffdio_fd = -1; in ram_write_tracking_start()
1696 return -1; in ram_write_tracking_start()
1700 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1710 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) { in ram_write_tracking_stop()
1713 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length); in ram_write_tracking_stop()
1715 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size, in ram_write_tracking_stop()
1716 block->host, block->max_length); in ram_write_tracking_stop()
1719 block->flags &= ~RAM_UF_WRITEPROTECT; in ram_write_tracking_stop()
1720 memory_region_unref(block->mr); in ram_write_tracking_stop()
1724 uffd_close_fd(rs->uffdio_fd); in ram_write_tracking_stop()
1725 rs->uffdio_fd = -1; in ram_write_tracking_stop()
1798 dirty = test_bit(page, block->bmap); in get_queued_page()
1800 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, in get_queued_page()
1803 trace_get_queued_page(block->idstr, (uint64_t)offset, page); in get_queued_page()
1823 pss->block = block; in get_queued_page()
1824 pss->page = offset >> TARGET_PAGE_BITS; in get_queued_page()
1830 pss->complete_round = false; in get_queued_page()
1847 /* This queue generally should be empty - but in the case of a failed in migration_page_queue_free()
1851 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { in migration_page_queue_free()
1852 memory_region_unref(mspr->rb->mr); in migration_page_queue_free()
1853 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); in migration_page_queue_free()
1881 ramblock = rs->last_req_rb; in ram_save_queue_pages()
1889 return -1; in ram_save_queue_pages()
1895 /* We shouldn't be asked for a non-existent RAMBlock */ in ram_save_queue_pages()
1897 return -1; in ram_save_queue_pages()
1899 rs->last_req_rb = ramblock; in ram_save_queue_pages()
1901 trace_ram_save_queue_pages(ramblock->idstr, start, len); in ram_save_queue_pages()
1902 if (!offset_in_ramblock(ramblock, start + len - 1)) { in ram_save_queue_pages()
1906 start, len, ramblock->used_length); in ram_save_queue_pages()
1907 return -1; in ram_save_queue_pages()
1912 * rp-return thread. in ram_save_queue_pages()
1917 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; in ram_save_queue_pages()
1920 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_queue_pages()
1925 * safe to access without lock, because when rp-thread is running in ram_save_queue_pages()
1928 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; in ram_save_queue_pages()
1929 assert(pss->pss_channel); in ram_save_queue_pages()
1940 ramblock->idstr, start); in ram_save_queue_pages()
1941 ret = -1; in ram_save_queue_pages()
1945 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page in ram_save_queue_pages()
1953 len -= page_size; in ram_save_queue_pages()
1955 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_queue_pages()
1962 new_entry->rb = ramblock; in ram_save_queue_pages()
1963 new_entry->offset = start; in ram_save_queue_pages()
1964 new_entry->len = len; in ram_save_queue_pages()
1966 memory_region_ref(ramblock->mr); in ram_save_queue_pages()
1967 qemu_mutex_lock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1968 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); in ram_save_queue_pages()
1970 qemu_mutex_unlock(&rs->src_page_req_mutex); in ram_save_queue_pages()
1984 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in ram_save_target_page()
1989 res = rdma_control_save_page(pss->pss_channel, pss->block->offset, in ram_save_target_page()
2006 return ram_save_multifd_page(pss->block, offset); in ram_save_target_page()
2016 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in pss_host_page_prepare()
2018 pss->host_page_sending = true; in pss_host_page_prepare()
2028 pss->host_page_start = pss->page; in pss_host_page_prepare()
2029 pss->host_page_end = pss->page + 1; in pss_host_page_prepare()
2035 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); in pss_host_page_prepare()
2036 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); in pss_host_page_prepare()
2048 assert(pss->host_page_sending); in pss_within_range()
2050 /* Over host-page boundary? */ in pss_within_range()
2051 if (pss->page >= pss->host_page_end) { in pss_within_range()
2055 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; in pss_within_range()
2057 return offset_in_ramblock(pss->block, ram_addr); in pss_within_range()
2062 pss->host_page_sending = false; in pss_host_page_finish()
2064 pss->host_page_start = pss->host_page_end = 0; in pss_host_page_finish()
2069 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_update()
2072 if (hint->valid) { in ram_page_hint_update()
2077 hint->location.block = pss->block; in ram_page_hint_update()
2078 hint->location.offset = pss->page; in ram_page_hint_update()
2079 hint->valid = true; in ram_page_hint_update()
2094 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); in ram_save_host_page_urgent()
2102 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { in ram_save_host_page_urgent()
2103 trace_postcopy_preempt_hit(pss->block->idstr, in ram_save_host_page_urgent()
2104 pss->page << TARGET_PAGE_BITS); in ram_save_host_page_urgent()
2109 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page_urgent()
2115 ret = -1; in ram_save_host_page_urgent()
2126 qemu_fflush(pss->pss_channel); in ram_save_host_page_urgent()
2158 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; in ram_save_host_page()
2159 unsigned long start_page = pss->page; in ram_save_host_page()
2162 if (migrate_ram_is_ignored(pss->block)) { in ram_save_host_page()
2163 error_report("block %s should not be migrated !", pss->block->idstr); in ram_save_host_page()
2171 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); in ram_save_host_page()
2177 * because both migration thread and rp-return thread can in ram_save_host_page()
2181 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_host_page()
2195 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_host_page()
2222 return rs->page_hint.valid; in ram_page_hint_valid()
2228 PageLocationHint *hint = &rs->page_hint; in ram_page_hint_collect()
2230 assert(hint->valid); in ram_page_hint_collect()
2232 *block = hint->location.block; in ram_page_hint_collect()
2233 *page = hint->location.offset; in ram_page_hint_collect()
2236 hint->valid = false; in ram_page_hint_collect()
2249 * On systems where host-page-size > target-page-size it will send all the
2254 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; in ram_find_and_save_block()
2260 if (!rs->ram_bytes_total) { in ram_find_and_save_block()
2271 if (!rs->last_seen_block) { in ram_find_and_save_block()
2272 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks); in ram_find_and_save_block()
2273 rs->last_page = 0; in ram_find_and_save_block()
2279 next_block = rs->last_seen_block; in ram_find_and_save_block()
2280 next_page = rs->last_page; in ram_find_and_save_block()
2306 rs->last_seen_block = pss->block; in ram_find_and_save_block()
2307 rs->last_page = pss->page; in ram_find_and_save_block()
2320 total += block->used_length; in ram_bytes_total_with_ignored()
2333 total += block->used_length; in ram_bytes_total()
2353 qemu_mutex_destroy(&(*rsp)->bitmap_mutex); in ram_state_cleanup()
2354 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex); in ram_state_cleanup()
2381 g_free(block->clear_bmap); in ram_bitmaps_destroy()
2382 block->clear_bmap = NULL; in ram_bitmaps_destroy()
2383 g_free(block->bmap); in ram_bitmaps_destroy()
2384 block->bmap = NULL; in ram_bitmaps_destroy()
2385 g_free(block->file_bmap); in ram_bitmaps_destroy()
2386 block->file_bmap = NULL; in ram_bitmaps_destroy()
2401 * do not stop dirty log without starting it, since in ram_save_cleanup()
2403 * memory_global_dirty_log_start/stop used in pairs in ram_save_cleanup()
2418 hint->location.block = NULL; in ram_page_hint_reset()
2419 hint->location.offset = 0; in ram_page_hint_reset()
2420 hint->valid = false; in ram_page_hint_reset()
2428 rs->pss[i].last_sent_block = NULL; in ram_state_reset()
2431 rs->last_seen_block = NULL; in ram_state_reset()
2432 rs->last_page = 0; in ram_state_reset()
2433 rs->last_version = ram_list.version; in ram_state_reset()
2434 rs->xbzrle_started = false; in ram_state_reset()
2436 ram_page_hint_reset(&rs->page_hint); in ram_state_reset()
2448 unsigned long *bitmap = block->bmap; in ram_postcopy_migrated_memory_release()
2449 unsigned long range = block->used_length >> TARGET_PAGE_BITS; in ram_postcopy_migrated_memory_release()
2454 ram_discard_range(block->idstr, in ram_postcopy_migrated_memory_release()
2456 ((ram_addr_t)(run_end - run_start)) in ram_postcopy_migrated_memory_release()
2473 unsigned long end = block->used_length >> TARGET_PAGE_BITS; in postcopy_send_discard_bm_ram()
2475 unsigned long *bitmap = block->bmap; in postcopy_send_discard_bm_ram()
2488 discard_length = end - one; in postcopy_send_discard_bm_ram()
2490 discard_length = zero - one; in postcopy_send_discard_bm_ram()
2515 postcopy_discard_send_init(ms, block->idstr); in postcopy_each_ram_send_discard()
2519 * host-page size chunks, mark any partially dirty host-page size in postcopy_each_ram_send_discard()
2520 * chunks as all dirty. In this case the host-page is the host-page in postcopy_each_ram_send_discard()
2551 unsigned long *bitmap = block->bmap; in postcopy_chunk_hostpages_pass()
2552 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; in postcopy_chunk_hostpages_pass()
2553 unsigned long pages = block->used_length >> TARGET_PAGE_BITS; in postcopy_chunk_hostpages_pass()
2556 if (block->page_size == TARGET_PAGE_SIZE) { in postcopy_chunk_hostpages_pass()
2557 /* Easy case - TPS==HPS for a non-huge page RAMBlock */ in postcopy_chunk_hostpages_pass()
2593 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); in postcopy_chunk_hostpages_pass()
2624 /* Easiest way to make sure we don't resume in the middle of a host-page */ in ram_postcopy_send_discard_bitmap()
2625 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; in ram_postcopy_send_discard_bitmap()
2626 rs->last_seen_block = NULL; in ram_postcopy_send_discard_bitmap()
2627 rs->last_page = 0; in ram_postcopy_send_discard_bitmap()
2653 return -1; in ram_discard_range()
2660 if (rb->receivedmap) { in ram_discard_range()
2661 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(), in ram_discard_range()
2731 qemu_mutex_init(&(*rsp)->bitmap_mutex); in ram_state_init()
2732 qemu_mutex_init(&(*rsp)->src_page_req_mutex); in ram_state_init()
2733 QSIMPLEQ_INIT(&(*rsp)->src_page_requests); in ram_state_init()
2734 (*rsp)->ram_bytes_total = ram_bytes_total(); in ram_state_init()
2741 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; in ram_state_init()
2756 shift = ms->clear_bitmap_shift; in ram_list_init_bitmaps()
2768 pages = block->max_length >> TARGET_PAGE_BITS; in ram_list_init_bitmaps()
2778 block->bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2779 bitmap_set(block->bmap, 0, pages); in ram_list_init_bitmaps()
2781 block->file_bmap = bitmap_new(pages); in ram_list_init_bitmaps()
2783 block->clear_bmap_shift = shift; in ram_list_init_bitmaps()
2784 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); in ram_list_init_bitmaps()
2798 rs->migration_dirty_pages -= pages; in migration_bitmap_clear_discarded_pages()
2838 return -1; in ram_init_all()
2843 return -1; in ram_init_all()
2847 return -1; in ram_init_all()
2865 pages += bitmap_count_one(block->bmap, in ram_state_resume_prepare()
2866 block->used_length >> TARGET_PAGE_BITS); in ram_state_resume_prepare()
2870 rs->migration_dirty_pages = pages; in ram_state_resume_prepare()
2875 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; in ram_state_resume_prepare()
2897 for (; len > 0; len -= used_len, addr += used_len) { in qemu_guest_free_page_hint()
2899 if (unlikely(!block || offset >= block->used_length)) { in qemu_guest_free_page_hint()
2909 if (len <= block->used_length - offset) { in qemu_guest_free_page_hint()
2912 used_len = block->used_length - offset; in qemu_guest_free_page_hint()
2918 qemu_mutex_lock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2926 ram_state->migration_dirty_pages -= in qemu_guest_free_page_hint()
2927 bitmap_count_one_with_offset(block->bmap, start, npages); in qemu_guest_free_page_hint()
2928 bitmap_clear(block->bmap, start, npages); in qemu_guest_free_page_hint()
2929 qemu_mutex_unlock(&ram_state->bitmap_mutex); in qemu_guest_free_page_hint()
2963 num_pages = block->used_length >> TARGET_PAGE_BITS; in mapped_ram_setup_ramblock()
2971 block->bitmap_offset = qemu_get_offset(file) + header_size; in mapped_ram_setup_ramblock()
2972 block->pages_offset = ROUND_UP(block->bitmap_offset + in mapped_ram_setup_ramblock()
2976 header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION); in mapped_ram_setup_ramblock()
2977 header->page_size = cpu_to_be64(TARGET_PAGE_SIZE); in mapped_ram_setup_ramblock()
2978 header->bitmap_offset = cpu_to_be64(block->bitmap_offset); in mapped_ram_setup_ramblock()
2979 header->pages_offset = cpu_to_be64(block->pages_offset); in mapped_ram_setup_ramblock()
2984 qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET); in mapped_ram_setup_ramblock()
2994 error_setg(errp, "Could not read whole mapped-ram migration header " in mapped_ram_read_header()
2999 /* migration stream is big-endian */ in mapped_ram_read_header()
3000 header->version = be32_to_cpu(header->version); in mapped_ram_read_header()
3002 if (header->version > MAPPED_RAM_HDR_VERSION) { in mapped_ram_read_header()
3003 error_setg(errp, "Migration mapped-ram capability version not " in mapped_ram_read_header()
3005 header->version); in mapped_ram_read_header()
3009 header->page_size = be64_to_cpu(header->page_size); in mapped_ram_read_header()
3010 header->bitmap_offset = be64_to_cpu(header->bitmap_offset); in mapped_ram_read_header()
3011 header->pages_offset = be64_to_cpu(header->pages_offset); in mapped_ram_read_header()
3018 * long-running RCU critical section. When rcu-reclaims in the code
3041 return -1; in ram_save_setup()
3044 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; in ram_save_setup()
3057 qemu_put_byte(f, strlen(block->idstr)); in ram_save_setup()
3058 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); in ram_save_setup()
3059 qemu_put_be64(f, block->used_length); in ram_save_setup()
3061 block->page_size != max_hg_page_size) { in ram_save_setup()
3062 qemu_put_be64(f, block->page_size); in ram_save_setup()
3065 qemu_put_be64(f, block->mr->addr); in ram_save_setup()
3083 error_setg(errp, "%s: failed to stop RDMA registration", __func__); in ram_save_setup()
3095 * For legacy QEMUs using per-section sync in ram_save_setup()
3099 * per-channel to work. in ram_save_setup()
3101 * For modern QEMUs using per-round sync in ram_save_setup()
3128 error_setg_errno(errp, -ret, "%s failed", __func__); in ram_save_setup()
3138 long num_pages = block->used_length >> TARGET_PAGE_BITS; in ram_save_file_bmap()
3141 qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size, in ram_save_file_bmap()
3142 block->bitmap_offset); in ram_save_file_bmap()
3150 g_free(block->file_bmap); in ram_save_file_bmap()
3151 block->file_bmap = NULL; in ram_save_file_bmap()
3158 set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3160 clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); in ramblock_set_file_bmap_atomic()
3188 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { in ram_save_iterate()
3190 if (ram_list.version != rs->last_version) { in ram_save_iterate()
3225 rs->target_page_count += pages; in ram_save_iterate()
3234 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / in ram_save_iterate()
3291 rs->last_stage = !migration_in_colo_state(); in ram_save_complete()
3307 qemu_mutex_lock(&rs->bitmap_mutex); in ram_save_complete()
3317 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3321 qemu_mutex_unlock(&rs->bitmap_mutex); in ram_save_complete()
3349 return -err; in ram_save_complete()
3363 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_estimate()
3388 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; in ram_state_pending_exact()
3409 error_report("Failed to load XBZRLE page - wrong compression!"); in load_xbzrle()
3410 return -1; in load_xbzrle()
3414 error_report("Failed to load XBZRLE page - len overflow!"); in load_xbzrle()
3415 return -1; in load_xbzrle()
3424 TARGET_PAGE_SIZE) == -1) { in load_xbzrle()
3425 error_report("Failed to load XBZRLE page - decode error!"); in load_xbzrle()
3426 return -1; in load_xbzrle()
3437 * Returns a pointer from within the RCU-protected ram_list.
3448 RAMBlock *block = mis->last_recv_block[channel]; in ram_block_from_stream()
3454 error_report("Ack, bad migration stream!"); in ram_block_from_stream()
3475 mis->last_recv_block[channel] = block; in ram_block_from_stream()
3487 return block->host + offset; in host_from_ram_block_offset()
3494 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset), in host_page_from_ram_block_offset()
3495 block->page_size); in host_page_from_ram_block_offset()
3501 return ((uintptr_t)block->host + offset) & (block->page_size - 1); in host_page_offset_from_ram_block_offset()
3506 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3509 ram_state->migration_dirty_pages += !test_and_set_bit( in colo_record_bitmap()
3511 block->bmap); in colo_record_bitmap()
3513 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_record_bitmap()
3522 if (!block->colo_cache) { in colo_cache_from_block_offset()
3524 __func__, block->idstr); in colo_cache_from_block_offset()
3536 return block->colo_cache + offset; in colo_cache_from_block_offset()
3576 block->colo_cache = qemu_anon_ram_alloc(block->used_length, in colo_init_ram_cache()
3578 if (!block->colo_cache) { in colo_init_ram_cache()
3580 "size 0x" RAM_ADDR_FMT, __func__, block->idstr, in colo_init_ram_cache()
3581 block->used_length); in colo_init_ram_cache()
3583 if (block->colo_cache) { in colo_init_ram_cache()
3584 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_init_ram_cache()
3585 block->colo_cache = NULL; in colo_init_ram_cache()
3588 return -errno; in colo_init_ram_cache()
3591 qemu_madvise(block->colo_cache, block->used_length, in colo_init_ram_cache()
3604 unsigned long pages = block->max_length >> TARGET_PAGE_BITS; in colo_init_ram_cache()
3605 block->bmap = bitmap_new(pages); in colo_init_ram_cache()
3628 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); in colo_incoming_start_dirty_log()
3635 ram_state->migration_dirty_pages = 0; in colo_incoming_start_dirty_log()
3647 g_free(block->bmap); in colo_release_ram_cache()
3648 block->bmap = NULL; in colo_release_ram_cache()
3653 if (block->colo_cache) { in colo_release_ram_cache()
3654 qemu_anon_ram_free(block->colo_cache, block->used_length); in colo_release_ram_cache()
3655 block->colo_cache = NULL; in colo_release_ram_cache()
3684 if (memory_region_is_nonvolatile(rb->mr)) { in ram_load_cleanup()
3692 g_free(rb->receivedmap); in ram_load_cleanup()
3693 rb->receivedmap = NULL; in ram_load_cleanup()
3707 * postcopy-ram. postcopy-ram's similarly names
3718 * Returns 0 for success or -errno in case of error
3732 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel]; in ram_load_postcopy()
3744 * If qemu file error, we should stop here, and then "addr" in ram_load_postcopy()
3759 ret = -EINVAL; in ram_load_postcopy()
3766 * while in postcopy, which is fine - trying to place via in ram_load_postcopy()
3769 if (!block->host || addr >= block->postcopy_length) { in ram_load_postcopy()
3771 ret = -EINVAL; in ram_load_postcopy()
3774 tmp_page->target_pages++; in ram_load_postcopy()
3775 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE; in ram_load_postcopy()
3782 * The migration protocol uses, possibly smaller, target-pages in ram_load_postcopy()
3786 page_buffer = tmp_page->tmp_huge_page + in ram_load_postcopy()
3789 if (tmp_page->target_pages == 1) { in ram_load_postcopy()
3790 tmp_page->host_addr = in ram_load_postcopy()
3792 } else if (tmp_page->host_addr != in ram_load_postcopy()
3795 error_report("Non-same host page detected on channel %d: " in ram_load_postcopy()
3798 channel, tmp_page->host_addr, in ram_load_postcopy()
3800 block->idstr, addr, tmp_page->target_pages); in ram_load_postcopy()
3801 ret = -EINVAL; in ram_load_postcopy()
3809 if (tmp_page->target_pages == in ram_load_postcopy()
3810 (block->page_size / TARGET_PAGE_SIZE)) { in ram_load_postcopy()
3813 place_source = tmp_page->tmp_huge_page; in ram_load_postcopy()
3821 ret = -EINVAL; in ram_load_postcopy()
3826 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE). in ram_load_postcopy()
3834 tmp_page->all_zero = false; in ram_load_postcopy()
3856 ret = -EINVAL; in ram_load_postcopy()
3866 if (tmp_page->all_zero) { in ram_load_postcopy()
3867 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block); in ram_load_postcopy()
3869 ret = postcopy_place_page(mis, tmp_page->host_addr, in ram_load_postcopy()
3898 qemu_mutex_lock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3905 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); in colo_flush_ram_cache()
3924 dst_host = block->host in colo_flush_ram_cache()
3926 src_host = block->colo_cache in colo_flush_ram_cache()
3933 qemu_mutex_unlock(&ram_state->bitmap_mutex); in colo_flush_ram_cache()
3942 data->opaque = host_addr; in ram_load_multifd_pages()
3943 data->file_offset = offset; in ram_load_multifd_pages()
3944 data->size = size; in ram_load_multifd_pages()
3969 unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx); in read_ramblock_mapped_ram()
3976 block->idstr); in read_ramblock_mapped_ram()
3984 block->pages_offset + offset); in read_ramblock_mapped_ram()
3987 block->pages_offset + offset); in read_ramblock_mapped_ram()
3994 unread -= read; in read_ramblock_mapped_ram()
4003 "from file offset %" PRIx64 ": ", block->idstr, offset, in read_ramblock_mapped_ram()
4004 block->pages_offset + offset); in read_ramblock_mapped_ram()
4020 block->pages_offset = header.pages_offset; in parse_ramblock_mapped_ram()
4028 if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) { in parse_ramblock_mapped_ram()
4031 block->idstr); in parse_ramblock_mapped_ram()
4050 qemu_set_offset(f, block->pages_offset + length, SEEK_SET); in parse_ramblock_mapped_ram()
4067 return -EINVAL; in parse_ramblock()
4073 error_report("block %s should not be migrated !", block->idstr); in parse_ramblock()
4074 return -EINVAL; in parse_ramblock()
4077 if (length != block->used_length) { in parse_ramblock()
4093 block->page_size != max_hg_page_size) { in parse_ramblock()
4095 if (remote_page_size != block->page_size) { in parse_ramblock()
4097 "(local) %zd != %" PRId64, block->idstr, in parse_ramblock()
4098 block->page_size, remote_page_size); in parse_ramblock()
4099 return -EINVAL; in parse_ramblock()
4105 block->mr->addr != addr) { in parse_ramblock()
4107 "%" PRId64 "!= %" PRId64, block->idstr, in parse_ramblock()
4108 (uint64_t)addr, (uint64_t)block->mr->addr); in parse_ramblock()
4109 return -EINVAL; in parse_ramblock()
4112 ret = rdma_block_notification_handle(f, block->idstr); in parse_ramblock()
4141 ret = -EINVAL; in parse_ramblocks()
4143 total_ram_bytes -= length; in parse_ramblocks()
4152 * Returns 0 for success or -errno in case of error
4199 ret = -EINVAL; in ram_load_precopy()
4214 * while we need to stop VM, which is a time-consuming process. in ram_load_precopy()
4215 * Here we optimize it by a trick, back-up every page while in in ram_load_precopy()
4218 * back-up all SVM'S memory in COLO preparing stage. in ram_load_precopy()
4234 ret = -EINVAL; in ram_load_precopy()
4241 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); in ram_load_precopy()
4248 * For mapped-ram migration (to a file) using multifd, we sync in ram_load_precopy()
4263 ret = -EINVAL; in ram_load_precopy()
4277 ret = -EINVAL; in ram_load_precopy()
4289 * Mapped-ram migration flushes once and for all after in ram_load_precopy()
4304 ret = -EINVAL; in ram_load_precopy()
4330 return -EINVAL; in ram_load()
4363 "is not supported now!", rb->idstr, rb->host); in ram_has_postcopy()
4375 QEMUFile *file = s->to_dst_file; in ram_dirty_bitmap_sync_all()
4379 qatomic_set(&rs->postcopy_bmap_sync_requested, 0); in ram_dirty_bitmap_sync_all()
4381 qemu_savevm_send_recv_bitmap(file, block->idstr); in ram_dirty_bitmap_sync_all()
4382 trace_ram_dirty_bitmap_request(block->idstr); in ram_dirty_bitmap_sync_all()
4383 qatomic_inc(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_sync_all()
4389 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) { in ram_dirty_bitmap_sync_all()
4391 return -1; in ram_dirty_bitmap_sync_all()
4410 QEMUFile *file = s->rp_state.from_dst_file; in ram_dirty_bitmap_reload()
4412 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS; in ram_dirty_bitmap_reload()
4417 trace_ram_dirty_bitmap_reload_begin(block->idstr); in ram_dirty_bitmap_reload()
4419 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { in ram_dirty_bitmap_reload()
4421 MigrationStatus_str(s->state)); in ram_dirty_bitmap_reload()
4439 " != 0x%"PRIx64")", block->idstr, size, local_size); in ram_dirty_bitmap_reload()
4449 block->idstr, local_size, size); in ram_dirty_bitmap_reload()
4455 block->idstr, end_mark); in ram_dirty_bitmap_reload()
4463 bitmap_from_le(block->bmap, le_bitmap, nbits); in ram_dirty_bitmap_reload()
4469 bitmap_complement(block->bmap, block->bmap, nbits); in ram_dirty_bitmap_reload()
4475 trace_ram_dirty_bitmap_reload_complete(block->idstr); in ram_dirty_bitmap_reload()
4477 qatomic_dec(&rs->postcopy_bmap_sync_requested); in ram_dirty_bitmap_reload()
4501 ram_state_resume_prepare(rs, s->to_dst_file); in ram_resume_prepare()
4517 * threads is still non-atomic, so the load cannot happen with vCPUs in ram_save_postcopy_prepare()
4544 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); in postcopy_preempt_shutdown_file()
4545 qemu_fflush(s->postcopy_qemufile_src); in postcopy_preempt_shutdown_file()
4584 * changing at random points in time - especially after sending the in ram_mig_ram_block_resized()
4588 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); in ram_mig_ram_block_resized()
4598 * Update what ram_postcopy_incoming_init()->init_range() does at the in ram_mig_ram_block_resized()
4603 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) { in ram_mig_ram_block_resized()
4605 rb->idstr); in ram_mig_ram_block_resized()
4608 rb->postcopy_length = new_size; in ram_mig_ram_block_resized()
4621 rb->idstr, ps); in ram_mig_ram_block_resized()
4622 exit(-1); in ram_mig_ram_block_resized()