1eb59db53SDr. David Alan Gilbert /* 2eb59db53SDr. David Alan Gilbert * Postcopy migration for RAM 3eb59db53SDr. David Alan Gilbert * 4eb59db53SDr. David Alan Gilbert * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5eb59db53SDr. David Alan Gilbert * 6eb59db53SDr. David Alan Gilbert * Authors: 7eb59db53SDr. David Alan Gilbert * Dave Gilbert <dgilbert@redhat.com> 8eb59db53SDr. David Alan Gilbert * 9eb59db53SDr. David Alan Gilbert * This work is licensed under the terms of the GNU GPL, version 2 or later. 10eb59db53SDr. David Alan Gilbert * See the COPYING file in the top-level directory. 11eb59db53SDr. David Alan Gilbert * 12eb59db53SDr. David Alan Gilbert */ 13eb59db53SDr. David Alan Gilbert 14eb59db53SDr. David Alan Gilbert /* 15eb59db53SDr. David Alan Gilbert * Postcopy is a migration technique where the execution flips from the 16eb59db53SDr. David Alan Gilbert * source to the destination before all the data has been copied. 17eb59db53SDr. David Alan Gilbert */ 18eb59db53SDr. David Alan Gilbert 191393a485SPeter Maydell #include "qemu/osdep.h" 2051180423SJuan Quintela #include "exec/target_page.h" 216666c96aSJuan Quintela #include "migration.h" 2208a0aee1SJuan Quintela #include "qemu-file.h" 2320a519a0SJuan Quintela #include "savevm.h" 24be07b0acSJuan Quintela #include "postcopy-ram.h" 257b1e1a22SJuan Quintela #include "ram.h" 261693c64cSDr. David Alan Gilbert #include "qapi/error.h" 271693c64cSDr. David Alan Gilbert #include "qemu/notify.h" 28eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h" 29371ff5a3SDr. David Alan Gilbert #include "sysemu/balloon.h" 30eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h" 31eb59db53SDr. David Alan Gilbert #include "trace.h" 32eb59db53SDr. David Alan Gilbert 33e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command, 34e0b266f0SDr. David Alan Gilbert * keeps them around ~200 bytes 35e0b266f0SDr. David Alan Gilbert */ 36e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12 37e0b266f0SDr. David Alan Gilbert 38e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState { 39e0b266f0SDr. David Alan Gilbert const char *ramblock_name; 40e0b266f0SDr. David Alan Gilbert uint16_t cur_entry; 41e0b266f0SDr. David Alan Gilbert /* 42e0b266f0SDr. David Alan Gilbert * Start and length of a discard range (bytes) 43e0b266f0SDr. David Alan Gilbert */ 44e0b266f0SDr. David Alan Gilbert uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 45e0b266f0SDr. David Alan Gilbert uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 46e0b266f0SDr. David Alan Gilbert unsigned int nsentwords; 47e0b266f0SDr. David Alan Gilbert unsigned int nsentcmds; 48e0b266f0SDr. David Alan Gilbert }; 49e0b266f0SDr. David Alan Gilbert 501693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list; 511693c64cSDr. David Alan Gilbert 521693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void) 531693c64cSDr. David Alan Gilbert { 541693c64cSDr. David Alan Gilbert notifier_with_return_list_init(&postcopy_notifier_list); 551693c64cSDr. David Alan Gilbert } 561693c64cSDr. David Alan Gilbert 571693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn) 581693c64cSDr. David Alan Gilbert { 591693c64cSDr. David Alan Gilbert notifier_with_return_list_add(&postcopy_notifier_list, nn); 601693c64cSDr. David Alan Gilbert } 611693c64cSDr. David Alan Gilbert 621693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n) 631693c64cSDr. David Alan Gilbert { 641693c64cSDr. David Alan Gilbert notifier_with_return_remove(n); 651693c64cSDr. David Alan Gilbert } 661693c64cSDr. David Alan Gilbert 671693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 681693c64cSDr. David Alan Gilbert { 691693c64cSDr. David Alan Gilbert struct PostcopyNotifyData pnd; 701693c64cSDr. David Alan Gilbert pnd.reason = reason; 711693c64cSDr. David Alan Gilbert pnd.errp = errp; 721693c64cSDr. David Alan Gilbert 731693c64cSDr. David Alan Gilbert return notifier_with_return_list_notify(&postcopy_notifier_list, 741693c64cSDr. David Alan Gilbert &pnd); 751693c64cSDr. David Alan Gilbert } 761693c64cSDr. David Alan Gilbert 77eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied 78eb59db53SDr. David Alan Gilbert * across, and efficiently map new pages in, the techniques for doing this 79eb59db53SDr. David Alan Gilbert * are target OS specific. 80eb59db53SDr. David Alan Gilbert */ 81eb59db53SDr. David Alan Gilbert #if defined(__linux__) 82eb59db53SDr. David Alan Gilbert 83c4faeed2SDr. David Alan Gilbert #include <poll.h> 84eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h> 85eb59db53SDr. David Alan Gilbert #include <sys/syscall.h> 86eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */ 87eb59db53SDr. David Alan Gilbert #endif 88eb59db53SDr. David Alan Gilbert 89d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 90d8b9d771SMatthew Fortune #include <sys/eventfd.h> 91eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h> 92eb59db53SDr. David Alan Gilbert 93ca6011c2SAlexey Perevalov 9454ae0886SAlexey Perevalov /** 9554ae0886SAlexey Perevalov * receive_ufd_features: check userfault fd features, to request only supported 9654ae0886SAlexey Perevalov * features in the future. 9754ae0886SAlexey Perevalov * 9854ae0886SAlexey Perevalov * Returns: true on success 9954ae0886SAlexey Perevalov * 10054ae0886SAlexey Perevalov * __NR_userfaultfd - should be checked before 10154ae0886SAlexey Perevalov * @features: out parameter will contain uffdio_api.features provided by kernel 10254ae0886SAlexey Perevalov * in case of success 10354ae0886SAlexey Perevalov */ 10454ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features) 10554ae0886SAlexey Perevalov { 10654ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 10754ae0886SAlexey Perevalov int ufd; 10854ae0886SAlexey Perevalov bool ret = true; 10954ae0886SAlexey Perevalov 11054ae0886SAlexey Perevalov /* if we are here __NR_userfaultfd should exists */ 11154ae0886SAlexey Perevalov ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 11254ae0886SAlexey Perevalov if (ufd == -1) { 11354ae0886SAlexey Perevalov error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, 11454ae0886SAlexey Perevalov strerror(errno)); 11554ae0886SAlexey Perevalov return false; 11654ae0886SAlexey Perevalov } 11754ae0886SAlexey Perevalov 11854ae0886SAlexey Perevalov /* ask features */ 119eb59db53SDr. David Alan Gilbert api_struct.api = UFFD_API; 120eb59db53SDr. David Alan Gilbert api_struct.features = 0; 121eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_API, &api_struct)) { 1225553499fSAlexey Perevalov error_report("%s: UFFDIO_API failed: %s", __func__, 123eb59db53SDr. David Alan Gilbert strerror(errno)); 12454ae0886SAlexey Perevalov ret = false; 12554ae0886SAlexey Perevalov goto release_ufd; 12654ae0886SAlexey Perevalov } 12754ae0886SAlexey Perevalov 12854ae0886SAlexey Perevalov *features = api_struct.features; 12954ae0886SAlexey Perevalov 13054ae0886SAlexey Perevalov release_ufd: 13154ae0886SAlexey Perevalov close(ufd); 13254ae0886SAlexey Perevalov return ret; 13354ae0886SAlexey Perevalov } 13454ae0886SAlexey Perevalov 13554ae0886SAlexey Perevalov /** 13654ae0886SAlexey Perevalov * request_ufd_features: this function should be called only once on a newly 13754ae0886SAlexey Perevalov * opened ufd, subsequent calls will lead to error. 13854ae0886SAlexey Perevalov * 13954ae0886SAlexey Perevalov * Returns: true on succes 14054ae0886SAlexey Perevalov * 14154ae0886SAlexey Perevalov * @ufd: fd obtained from userfaultfd syscall 14254ae0886SAlexey Perevalov * @features: bit mask see UFFD_API_FEATURES 14354ae0886SAlexey Perevalov */ 14454ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features) 14554ae0886SAlexey Perevalov { 14654ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 14754ae0886SAlexey Perevalov uint64_t ioctl_mask; 14854ae0886SAlexey Perevalov 14954ae0886SAlexey Perevalov api_struct.api = UFFD_API; 15054ae0886SAlexey Perevalov api_struct.features = features; 15154ae0886SAlexey Perevalov if (ioctl(ufd, UFFDIO_API, &api_struct)) { 15254ae0886SAlexey Perevalov error_report("%s failed: UFFDIO_API failed: %s", __func__, 15354ae0886SAlexey Perevalov strerror(errno)); 154eb59db53SDr. David Alan Gilbert return false; 155eb59db53SDr. David Alan Gilbert } 156eb59db53SDr. David Alan Gilbert 157eb59db53SDr. David Alan Gilbert ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 158eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_UNREGISTER; 159eb59db53SDr. David Alan Gilbert if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 160eb59db53SDr. David Alan Gilbert error_report("Missing userfault features: %" PRIx64, 161eb59db53SDr. David Alan Gilbert (uint64_t)(~api_struct.ioctls & ioctl_mask)); 162eb59db53SDr. David Alan Gilbert return false; 163eb59db53SDr. David Alan Gilbert } 164eb59db53SDr. David Alan Gilbert 16554ae0886SAlexey Perevalov return true; 16654ae0886SAlexey Perevalov } 16754ae0886SAlexey Perevalov 16854ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 16954ae0886SAlexey Perevalov { 17054ae0886SAlexey Perevalov uint64_t asked_features = 0; 17154ae0886SAlexey Perevalov static uint64_t supported_features; 17254ae0886SAlexey Perevalov 17354ae0886SAlexey Perevalov /* 17454ae0886SAlexey Perevalov * it's not possible to 17554ae0886SAlexey Perevalov * request UFFD_API twice per one fd 17654ae0886SAlexey Perevalov * userfault fd features is persistent 17754ae0886SAlexey Perevalov */ 17854ae0886SAlexey Perevalov if (!supported_features) { 17954ae0886SAlexey Perevalov if (!receive_ufd_features(&supported_features)) { 18054ae0886SAlexey Perevalov error_report("%s failed", __func__); 18154ae0886SAlexey Perevalov return false; 18254ae0886SAlexey Perevalov } 18354ae0886SAlexey Perevalov } 18454ae0886SAlexey Perevalov 18554ae0886SAlexey Perevalov /* 18654ae0886SAlexey Perevalov * request features, even if asked_features is 0, due to 18754ae0886SAlexey Perevalov * kernel expects UFFD_API before UFFDIO_REGISTER, per 18854ae0886SAlexey Perevalov * userfault file descriptor 18954ae0886SAlexey Perevalov */ 19054ae0886SAlexey Perevalov if (!request_ufd_features(ufd, asked_features)) { 19154ae0886SAlexey Perevalov error_report("%s failed: features %" PRIu64, __func__, 19254ae0886SAlexey Perevalov asked_features); 19354ae0886SAlexey Perevalov return false; 19454ae0886SAlexey Perevalov } 19554ae0886SAlexey Perevalov 1967e8cafb7SDr. David Alan Gilbert if (getpagesize() != ram_pagesize_summary()) { 1977e8cafb7SDr. David Alan Gilbert bool have_hp = false; 1987e8cafb7SDr. David Alan Gilbert /* We've got a huge page */ 1997e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 20054ae0886SAlexey Perevalov have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 2017e8cafb7SDr. David Alan Gilbert #endif 2027e8cafb7SDr. David Alan Gilbert if (!have_hp) { 2037e8cafb7SDr. David Alan Gilbert error_report("Userfault on this host does not support huge pages"); 2047e8cafb7SDr. David Alan Gilbert return false; 2057e8cafb7SDr. David Alan Gilbert } 2067e8cafb7SDr. David Alan Gilbert } 207eb59db53SDr. David Alan Gilbert return true; 208eb59db53SDr. David Alan Gilbert } 209eb59db53SDr. David Alan Gilbert 2108679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator. 2118679638bSDr. David Alan Gilbert */ 2125d214a92SDr. David Alan Gilbert static int test_ramblock_postcopiable(const char *block_name, void *host_addr, 2138679638bSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 2148679638bSDr. David Alan Gilbert { 2155d214a92SDr. David Alan Gilbert RAMBlock *rb = qemu_ram_block_by_name(block_name); 2165d214a92SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 2175d214a92SDr. David Alan Gilbert 2185d214a92SDr. David Alan Gilbert if (qemu_ram_is_shared(rb)) { 2198679638bSDr. David Alan Gilbert error_report("Postcopy on shared RAM (%s) is not yet supported", 2208679638bSDr. David Alan Gilbert block_name); 2218679638bSDr. David Alan Gilbert return 1; 2228679638bSDr. David Alan Gilbert } 2235d214a92SDr. David Alan Gilbert 2245d214a92SDr. David Alan Gilbert if (length % pagesize) { 2255d214a92SDr. David Alan Gilbert error_report("Postcopy requires RAM blocks to be a page size multiple," 2265d214a92SDr. David Alan Gilbert " block %s is 0x" RAM_ADDR_FMT " bytes with a " 2275d214a92SDr. David Alan Gilbert "page size of 0x%zx", block_name, length, pagesize); 2285d214a92SDr. David Alan Gilbert return 1; 2295d214a92SDr. David Alan Gilbert } 2308679638bSDr. David Alan Gilbert return 0; 2318679638bSDr. David Alan Gilbert } 2328679638bSDr. David Alan Gilbert 23358b7c17eSDr. David Alan Gilbert /* 23458b7c17eSDr. David Alan Gilbert * Note: This has the side effect of munlock'ing all of RAM, that's 23558b7c17eSDr. David Alan Gilbert * normally fine since if the postcopy succeeds it gets turned back on at the 23658b7c17eSDr. David Alan Gilbert * end. 23758b7c17eSDr. David Alan Gilbert */ 238d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 239eb59db53SDr. David Alan Gilbert { 240eb59db53SDr. David Alan Gilbert long pagesize = getpagesize(); 241eb59db53SDr. David Alan Gilbert int ufd = -1; 242eb59db53SDr. David Alan Gilbert bool ret = false; /* Error unless we change it */ 243eb59db53SDr. David Alan Gilbert void *testarea = NULL; 244eb59db53SDr. David Alan Gilbert struct uffdio_register reg_struct; 245eb59db53SDr. David Alan Gilbert struct uffdio_range range_struct; 246eb59db53SDr. David Alan Gilbert uint64_t feature_mask; 2471693c64cSDr. David Alan Gilbert Error *local_err = NULL; 248eb59db53SDr. David Alan Gilbert 24920afaed9SJuan Quintela if (qemu_target_page_size() > pagesize) { 250eb59db53SDr. David Alan Gilbert error_report("Target page size bigger than host page size"); 251eb59db53SDr. David Alan Gilbert goto out; 252eb59db53SDr. David Alan Gilbert } 253eb59db53SDr. David Alan Gilbert 254eb59db53SDr. David Alan Gilbert ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 255eb59db53SDr. David Alan Gilbert if (ufd == -1) { 256eb59db53SDr. David Alan Gilbert error_report("%s: userfaultfd not available: %s", __func__, 257eb59db53SDr. David Alan Gilbert strerror(errno)); 258eb59db53SDr. David Alan Gilbert goto out; 259eb59db53SDr. David Alan Gilbert } 260eb59db53SDr. David Alan Gilbert 2611693c64cSDr. David Alan Gilbert /* Give devices a chance to object */ 2621693c64cSDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 2631693c64cSDr. David Alan Gilbert error_report_err(local_err); 2641693c64cSDr. David Alan Gilbert goto out; 2651693c64cSDr. David Alan Gilbert } 2661693c64cSDr. David Alan Gilbert 267eb59db53SDr. David Alan Gilbert /* Version and features check */ 26854ae0886SAlexey Perevalov if (!ufd_check_and_apply(ufd, mis)) { 269eb59db53SDr. David Alan Gilbert goto out; 270eb59db53SDr. David Alan Gilbert } 271eb59db53SDr. David Alan Gilbert 2728679638bSDr. David Alan Gilbert /* We don't support postcopy with shared RAM yet */ 2735d214a92SDr. David Alan Gilbert if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) { 2748679638bSDr. David Alan Gilbert goto out; 2758679638bSDr. David Alan Gilbert } 2768679638bSDr. David Alan Gilbert 277eb59db53SDr. David Alan Gilbert /* 27858b7c17eSDr. David Alan Gilbert * userfault and mlock don't go together; we'll put it back later if 27958b7c17eSDr. David Alan Gilbert * it was enabled. 28058b7c17eSDr. David Alan Gilbert */ 28158b7c17eSDr. David Alan Gilbert if (munlockall()) { 28258b7c17eSDr. David Alan Gilbert error_report("%s: munlockall: %s", __func__, strerror(errno)); 28358b7c17eSDr. David Alan Gilbert return -1; 28458b7c17eSDr. David Alan Gilbert } 28558b7c17eSDr. David Alan Gilbert 28658b7c17eSDr. David Alan Gilbert /* 287eb59db53SDr. David Alan Gilbert * We need to check that the ops we need are supported on anon memory 288eb59db53SDr. David Alan Gilbert * To do that we need to register a chunk and see the flags that 289eb59db53SDr. David Alan Gilbert * are returned. 290eb59db53SDr. David Alan Gilbert */ 291eb59db53SDr. David Alan Gilbert testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 292eb59db53SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 293eb59db53SDr. David Alan Gilbert if (testarea == MAP_FAILED) { 294eb59db53SDr. David Alan Gilbert error_report("%s: Failed to map test area: %s", __func__, 295eb59db53SDr. David Alan Gilbert strerror(errno)); 296eb59db53SDr. David Alan Gilbert goto out; 297eb59db53SDr. David Alan Gilbert } 298eb59db53SDr. David Alan Gilbert g_assert(((size_t)testarea & (pagesize-1)) == 0); 299eb59db53SDr. David Alan Gilbert 300eb59db53SDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)testarea; 301eb59db53SDr. David Alan Gilbert reg_struct.range.len = pagesize; 302eb59db53SDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 303eb59db53SDr. David Alan Gilbert 304eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 305eb59db53SDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 306eb59db53SDr. David Alan Gilbert goto out; 307eb59db53SDr. David Alan Gilbert } 308eb59db53SDr. David Alan Gilbert 309eb59db53SDr. David Alan Gilbert range_struct.start = (uintptr_t)testarea; 310eb59db53SDr. David Alan Gilbert range_struct.len = pagesize; 311eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 312eb59db53SDr. David Alan Gilbert error_report("%s userfault unregister: %s", __func__, strerror(errno)); 313eb59db53SDr. David Alan Gilbert goto out; 314eb59db53SDr. David Alan Gilbert } 315eb59db53SDr. David Alan Gilbert 316eb59db53SDr. David Alan Gilbert feature_mask = (__u64)1 << _UFFDIO_WAKE | 317eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_COPY | 318eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_ZEROPAGE; 319eb59db53SDr. David Alan Gilbert if ((reg_struct.ioctls & feature_mask) != feature_mask) { 320eb59db53SDr. David Alan Gilbert error_report("Missing userfault map features: %" PRIx64, 321eb59db53SDr. David Alan Gilbert (uint64_t)(~reg_struct.ioctls & feature_mask)); 322eb59db53SDr. David Alan Gilbert goto out; 323eb59db53SDr. David Alan Gilbert } 324eb59db53SDr. David Alan Gilbert 325eb59db53SDr. David Alan Gilbert /* Success! */ 326eb59db53SDr. David Alan Gilbert ret = true; 327eb59db53SDr. David Alan Gilbert out: 328eb59db53SDr. David Alan Gilbert if (testarea) { 329eb59db53SDr. David Alan Gilbert munmap(testarea, pagesize); 330eb59db53SDr. David Alan Gilbert } 331eb59db53SDr. David Alan Gilbert if (ufd != -1) { 332eb59db53SDr. David Alan Gilbert close(ufd); 333eb59db53SDr. David Alan Gilbert } 334eb59db53SDr. David Alan Gilbert return ret; 335eb59db53SDr. David Alan Gilbert } 336eb59db53SDr. David Alan Gilbert 3371caddf8aSDr. David Alan Gilbert /* 3381caddf8aSDr. David Alan Gilbert * Setup an area of RAM so that it *can* be used for postcopy later; this 3391caddf8aSDr. David Alan Gilbert * must be done right at the start prior to pre-copy. 3401caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 3411caddf8aSDr. David Alan Gilbert */ 3421caddf8aSDr. David Alan Gilbert static int init_range(const char *block_name, void *host_addr, 3431caddf8aSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 3441caddf8aSDr. David Alan Gilbert { 3451caddf8aSDr. David Alan Gilbert trace_postcopy_init_range(block_name, host_addr, offset, length); 3461caddf8aSDr. David Alan Gilbert 3471caddf8aSDr. David Alan Gilbert /* 3481caddf8aSDr. David Alan Gilbert * We need the whole of RAM to be truly empty for postcopy, so things 3491caddf8aSDr. David Alan Gilbert * like ROMs and any data tables built during init must be zero'd 3501caddf8aSDr. David Alan Gilbert * - we're going to get the copy from the source anyway. 3511caddf8aSDr. David Alan Gilbert * (Precopy will just overwrite this data, so doesn't need the discard) 3521caddf8aSDr. David Alan Gilbert */ 353aaa2064cSJuan Quintela if (ram_discard_range(block_name, 0, length)) { 3541caddf8aSDr. David Alan Gilbert return -1; 3551caddf8aSDr. David Alan Gilbert } 3561caddf8aSDr. David Alan Gilbert 3571caddf8aSDr. David Alan Gilbert return 0; 3581caddf8aSDr. David Alan Gilbert } 3591caddf8aSDr. David Alan Gilbert 3601caddf8aSDr. David Alan Gilbert /* 3611caddf8aSDr. David Alan Gilbert * At the end of migration, undo the effects of init_range 3621caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 3631caddf8aSDr. David Alan Gilbert */ 3641caddf8aSDr. David Alan Gilbert static int cleanup_range(const char *block_name, void *host_addr, 3651caddf8aSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 3661caddf8aSDr. David Alan Gilbert { 3671caddf8aSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 3681caddf8aSDr. David Alan Gilbert struct uffdio_range range_struct; 3691caddf8aSDr. David Alan Gilbert trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 3701caddf8aSDr. David Alan Gilbert 3711caddf8aSDr. David Alan Gilbert /* 3721caddf8aSDr. David Alan Gilbert * We turned off hugepage for the precopy stage with postcopy enabled 3731caddf8aSDr. David Alan Gilbert * we can turn it back on now. 3741caddf8aSDr. David Alan Gilbert */ 3751d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 3761caddf8aSDr. David Alan Gilbert 3771caddf8aSDr. David Alan Gilbert /* 3781caddf8aSDr. David Alan Gilbert * We can also turn off userfault now since we should have all the 3791caddf8aSDr. David Alan Gilbert * pages. It can be useful to leave it on to debug postcopy 3801caddf8aSDr. David Alan Gilbert * if you're not sure it's always getting every page. 3811caddf8aSDr. David Alan Gilbert */ 3821caddf8aSDr. David Alan Gilbert range_struct.start = (uintptr_t)host_addr; 3831caddf8aSDr. David Alan Gilbert range_struct.len = length; 3841caddf8aSDr. David Alan Gilbert 3851caddf8aSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 3861caddf8aSDr. David Alan Gilbert error_report("%s: userfault unregister %s", __func__, strerror(errno)); 3871caddf8aSDr. David Alan Gilbert 3881caddf8aSDr. David Alan Gilbert return -1; 3891caddf8aSDr. David Alan Gilbert } 3901caddf8aSDr. David Alan Gilbert 3911caddf8aSDr. David Alan Gilbert return 0; 3921caddf8aSDr. David Alan Gilbert } 3931caddf8aSDr. David Alan Gilbert 3941caddf8aSDr. David Alan Gilbert /* 3951caddf8aSDr. David Alan Gilbert * Initialise postcopy-ram, setting the RAM to a state where we can go into 3961caddf8aSDr. David Alan Gilbert * postcopy later; must be called prior to any precopy. 3971caddf8aSDr. David Alan Gilbert * called from arch_init's similarly named ram_postcopy_incoming_init 3981caddf8aSDr. David Alan Gilbert */ 3991caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 4001caddf8aSDr. David Alan Gilbert { 401aaa2064cSJuan Quintela if (qemu_ram_foreach_block(init_range, NULL)) { 4021caddf8aSDr. David Alan Gilbert return -1; 4031caddf8aSDr. David Alan Gilbert } 4041caddf8aSDr. David Alan Gilbert 4051caddf8aSDr. David Alan Gilbert return 0; 4061caddf8aSDr. David Alan Gilbert } 4071caddf8aSDr. David Alan Gilbert 4081caddf8aSDr. David Alan Gilbert /* 4091caddf8aSDr. David Alan Gilbert * At the end of a migration where postcopy_ram_incoming_init was called. 4101caddf8aSDr. David Alan Gilbert */ 4111caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 4121caddf8aSDr. David Alan Gilbert { 413c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_entry(); 414c4faeed2SDr. David Alan Gilbert 415c4faeed2SDr. David Alan Gilbert if (mis->have_fault_thread) { 4161caddf8aSDr. David Alan Gilbert if (qemu_ram_foreach_block(cleanup_range, mis)) { 4171caddf8aSDr. David Alan Gilbert return -1; 4181caddf8aSDr. David Alan Gilbert } 4199ab7ef9bSPeter Xu /* Let the fault thread quit */ 42064f615feSPeter Xu atomic_set(&mis->fault_thread_quit, 1); 4219ab7ef9bSPeter Xu postcopy_fault_thread_notify(mis); 422c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_join(); 423c4faeed2SDr. David Alan Gilbert qemu_thread_join(&mis->fault_thread); 4249ab7ef9bSPeter Xu 425c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_closeuf(); 426c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 42764f615feSPeter Xu close(mis->userfault_event_fd); 428c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = false; 429c4faeed2SDr. David Alan Gilbert } 430c4faeed2SDr. David Alan Gilbert 431371ff5a3SDr. David Alan Gilbert qemu_balloon_inhibit(false); 432371ff5a3SDr. David Alan Gilbert 43358b7c17eSDr. David Alan Gilbert if (enable_mlock) { 43458b7c17eSDr. David Alan Gilbert if (os_mlock() < 0) { 43558b7c17eSDr. David Alan Gilbert error_report("mlock: %s", strerror(errno)); 43658b7c17eSDr. David Alan Gilbert /* 43758b7c17eSDr. David Alan Gilbert * It doesn't feel right to fail at this point, we have a valid 43858b7c17eSDr. David Alan Gilbert * VM state. 43958b7c17eSDr. David Alan Gilbert */ 44058b7c17eSDr. David Alan Gilbert } 44158b7c17eSDr. David Alan Gilbert } 44258b7c17eSDr. David Alan Gilbert 443c4faeed2SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_END); 4441caddf8aSDr. David Alan Gilbert 445696ed9a9SDr. David Alan Gilbert if (mis->postcopy_tmp_page) { 446df9ff5e1SDr. David Alan Gilbert munmap(mis->postcopy_tmp_page, mis->largest_page_size); 447696ed9a9SDr. David Alan Gilbert mis->postcopy_tmp_page = NULL; 448696ed9a9SDr. David Alan Gilbert } 44941d84210SDr. David Alan Gilbert if (mis->postcopy_tmp_zero_page) { 45041d84210SDr. David Alan Gilbert munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 45141d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = NULL; 45241d84210SDr. David Alan Gilbert } 453c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_exit(); 4541caddf8aSDr. David Alan Gilbert return 0; 4551caddf8aSDr. David Alan Gilbert } 4561caddf8aSDr. David Alan Gilbert 457f0a227adSDr. David Alan Gilbert /* 458f9527107SDr. David Alan Gilbert * Disable huge pages on an area 459f9527107SDr. David Alan Gilbert */ 460f9527107SDr. David Alan Gilbert static int nhp_range(const char *block_name, void *host_addr, 461f9527107SDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 462f9527107SDr. David Alan Gilbert { 463f9527107SDr. David Alan Gilbert trace_postcopy_nhp_range(block_name, host_addr, offset, length); 464f9527107SDr. David Alan Gilbert 465f9527107SDr. David Alan Gilbert /* 466f9527107SDr. David Alan Gilbert * Before we do discards we need to ensure those discards really 467f9527107SDr. David Alan Gilbert * do delete areas of the page, even if THP thinks a hugepage would 468f9527107SDr. David Alan Gilbert * be a good idea, so force hugepages off. 469f9527107SDr. David Alan Gilbert */ 4701d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 471f9527107SDr. David Alan Gilbert 472f9527107SDr. David Alan Gilbert return 0; 473f9527107SDr. David Alan Gilbert } 474f9527107SDr. David Alan Gilbert 475f9527107SDr. David Alan Gilbert /* 476f9527107SDr. David Alan Gilbert * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 477f9527107SDr. David Alan Gilbert * however leaving it until after precopy means that most of the precopy 478f9527107SDr. David Alan Gilbert * data is still THPd 479f9527107SDr. David Alan Gilbert */ 480f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 481f9527107SDr. David Alan Gilbert { 482f9527107SDr. David Alan Gilbert if (qemu_ram_foreach_block(nhp_range, mis)) { 483f9527107SDr. David Alan Gilbert return -1; 484f9527107SDr. David Alan Gilbert } 485f9527107SDr. David Alan Gilbert 486f9527107SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 487f9527107SDr. David Alan Gilbert 488f9527107SDr. David Alan Gilbert return 0; 489f9527107SDr. David Alan Gilbert } 490f9527107SDr. David Alan Gilbert 491f9527107SDr. David Alan Gilbert /* 492f0a227adSDr. David Alan Gilbert * Mark the given area of RAM as requiring notification to unwritten areas 493f0a227adSDr. David Alan Gilbert * Used as a callback on qemu_ram_foreach_block. 494f0a227adSDr. David Alan Gilbert * host_addr: Base of area to mark 495f0a227adSDr. David Alan Gilbert * offset: Offset in the whole ram arena 496f0a227adSDr. David Alan Gilbert * length: Length of the section 497f0a227adSDr. David Alan Gilbert * opaque: MigrationIncomingState pointer 498f0a227adSDr. David Alan Gilbert * Returns 0 on success 499f0a227adSDr. David Alan Gilbert */ 500f0a227adSDr. David Alan Gilbert static int ram_block_enable_notify(const char *block_name, void *host_addr, 501f0a227adSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, 502f0a227adSDr. David Alan Gilbert void *opaque) 503f0a227adSDr. David Alan Gilbert { 504f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 505f0a227adSDr. David Alan Gilbert struct uffdio_register reg_struct; 506f0a227adSDr. David Alan Gilbert 507f0a227adSDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)host_addr; 508f0a227adSDr. David Alan Gilbert reg_struct.range.len = length; 509f0a227adSDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 510f0a227adSDr. David Alan Gilbert 511f0a227adSDr. David Alan Gilbert /* Now tell our userfault_fd that it's responsible for this area */ 512f0a227adSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 513f0a227adSDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 514f0a227adSDr. David Alan Gilbert return -1; 515f0a227adSDr. David Alan Gilbert } 516665414adSDr. David Alan Gilbert if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 517665414adSDr. David Alan Gilbert error_report("%s userfault: Region doesn't support COPY", __func__); 518665414adSDr. David Alan Gilbert return -1; 519665414adSDr. David Alan Gilbert } 5202ce16640SDr. David Alan Gilbert if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 5212ce16640SDr. David Alan Gilbert RAMBlock *rb = qemu_ram_block_by_name(block_name); 5222ce16640SDr. David Alan Gilbert qemu_ram_set_uf_zeroable(rb); 5232ce16640SDr. David Alan Gilbert } 524f0a227adSDr. David Alan Gilbert 525f0a227adSDr. David Alan Gilbert return 0; 526f0a227adSDr. David Alan Gilbert } 527f0a227adSDr. David Alan Gilbert 528f0a227adSDr. David Alan Gilbert /* 529f0a227adSDr. David Alan Gilbert * Handle faults detected by the USERFAULT markings 530f0a227adSDr. David Alan Gilbert */ 531f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque) 532f0a227adSDr. David Alan Gilbert { 533f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 534c4faeed2SDr. David Alan Gilbert struct uffd_msg msg; 535c4faeed2SDr. David Alan Gilbert int ret; 536*00fa4fc8SDr. David Alan Gilbert size_t index; 537c4faeed2SDr. David Alan Gilbert RAMBlock *rb = NULL; 538c4faeed2SDr. David Alan Gilbert RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ 539f0a227adSDr. David Alan Gilbert 540c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_entry(); 541f0a227adSDr. David Alan Gilbert qemu_sem_post(&mis->fault_thread_sem); 542c4faeed2SDr. David Alan Gilbert 543*00fa4fc8SDr. David Alan Gilbert struct pollfd *pfd; 544*00fa4fc8SDr. David Alan Gilbert size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 545*00fa4fc8SDr. David Alan Gilbert 546*00fa4fc8SDr. David Alan Gilbert pfd = g_new0(struct pollfd, pfd_len); 547*00fa4fc8SDr. David Alan Gilbert 548*00fa4fc8SDr. David Alan Gilbert pfd[0].fd = mis->userfault_fd; 549*00fa4fc8SDr. David Alan Gilbert pfd[0].events = POLLIN; 550*00fa4fc8SDr. David Alan Gilbert pfd[1].fd = mis->userfault_event_fd; 551*00fa4fc8SDr. David Alan Gilbert pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 552*00fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 553*00fa4fc8SDr. David Alan Gilbert for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 554*00fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 555*00fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index); 556*00fa4fc8SDr. David Alan Gilbert pfd[2 + index].fd = pcfd->fd; 557*00fa4fc8SDr. David Alan Gilbert pfd[2 + index].events = POLLIN; 558*00fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 559*00fa4fc8SDr. David Alan Gilbert pcfd->fd); 560*00fa4fc8SDr. David Alan Gilbert } 561*00fa4fc8SDr. David Alan Gilbert 562c4faeed2SDr. David Alan Gilbert while (true) { 563c4faeed2SDr. David Alan Gilbert ram_addr_t rb_offset; 564*00fa4fc8SDr. David Alan Gilbert int poll_result; 565c4faeed2SDr. David Alan Gilbert 566c4faeed2SDr. David Alan Gilbert /* 567c4faeed2SDr. David Alan Gilbert * We're mainly waiting for the kernel to give us a faulting HVA, 568c4faeed2SDr. David Alan Gilbert * however we can be told to quit via userfault_quit_fd which is 569c4faeed2SDr. David Alan Gilbert * an eventfd 570c4faeed2SDr. David Alan Gilbert */ 571c4faeed2SDr. David Alan Gilbert 572*00fa4fc8SDr. David Alan Gilbert poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 573*00fa4fc8SDr. David Alan Gilbert if (poll_result == -1) { 574c4faeed2SDr. David Alan Gilbert error_report("%s: userfault poll: %s", __func__, strerror(errno)); 575c4faeed2SDr. David Alan Gilbert break; 576f0a227adSDr. David Alan Gilbert } 577f0a227adSDr. David Alan Gilbert 578c4faeed2SDr. David Alan Gilbert if (pfd[1].revents) { 57964f615feSPeter Xu uint64_t tmp64 = 0; 58064f615feSPeter Xu 58164f615feSPeter Xu /* Consume the signal */ 58264f615feSPeter Xu if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 58364f615feSPeter Xu /* Nothing obviously nicer than posting this error. */ 58464f615feSPeter Xu error_report("%s: read() failed", __func__); 58564f615feSPeter Xu } 58664f615feSPeter Xu 58764f615feSPeter Xu if (atomic_read(&mis->fault_thread_quit)) { 588c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_quit(); 589c4faeed2SDr. David Alan Gilbert break; 590c4faeed2SDr. David Alan Gilbert } 59164f615feSPeter Xu } 592c4faeed2SDr. David Alan Gilbert 593*00fa4fc8SDr. David Alan Gilbert if (pfd[0].revents) { 594*00fa4fc8SDr. David Alan Gilbert poll_result--; 595c4faeed2SDr. David Alan Gilbert ret = read(mis->userfault_fd, &msg, sizeof(msg)); 596c4faeed2SDr. David Alan Gilbert if (ret != sizeof(msg)) { 597c4faeed2SDr. David Alan Gilbert if (errno == EAGAIN) { 598c4faeed2SDr. David Alan Gilbert /* 599c4faeed2SDr. David Alan Gilbert * if a wake up happens on the other thread just after 600c4faeed2SDr. David Alan Gilbert * the poll, there is nothing to read. 601c4faeed2SDr. David Alan Gilbert */ 602c4faeed2SDr. David Alan Gilbert continue; 603c4faeed2SDr. David Alan Gilbert } 604c4faeed2SDr. David Alan Gilbert if (ret < 0) { 605*00fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 606*00fa4fc8SDr. David Alan Gilbert "message: %s", 607c4faeed2SDr. David Alan Gilbert __func__, strerror(errno)); 608c4faeed2SDr. David Alan Gilbert break; 609c4faeed2SDr. David Alan Gilbert } else { 610*00fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 611*00fa4fc8SDr. David Alan Gilbert "expected %zd", 612c4faeed2SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 613c4faeed2SDr. David Alan Gilbert break; /* Lost alignment, don't know what we'd read next */ 614c4faeed2SDr. David Alan Gilbert } 615c4faeed2SDr. David Alan Gilbert } 616c4faeed2SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 617c4faeed2SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud from userfaultfd", 618c4faeed2SDr. David Alan Gilbert __func__, msg.event); 619c4faeed2SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 620c4faeed2SDr. David Alan Gilbert } 621c4faeed2SDr. David Alan Gilbert 622c4faeed2SDr. David Alan Gilbert rb = qemu_ram_block_from_host( 623c4faeed2SDr. David Alan Gilbert (void *)(uintptr_t)msg.arg.pagefault.address, 624f615f396SPaolo Bonzini true, &rb_offset); 625c4faeed2SDr. David Alan Gilbert if (!rb) { 626c4faeed2SDr. David Alan Gilbert error_report("postcopy_ram_fault_thread: Fault outside guest: %" 627c4faeed2SDr. David Alan Gilbert PRIx64, (uint64_t)msg.arg.pagefault.address); 628c4faeed2SDr. David Alan Gilbert break; 629c4faeed2SDr. David Alan Gilbert } 630c4faeed2SDr. David Alan Gilbert 631332847f0SDr. David Alan Gilbert rb_offset &= ~(qemu_ram_pagesize(rb) - 1); 632c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 633c4faeed2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), 634ee86981bSPeter Maydell rb_offset); 635c4faeed2SDr. David Alan Gilbert /* 636c4faeed2SDr. David Alan Gilbert * Send the request to the source - we want to request one 637c4faeed2SDr. David Alan Gilbert * of our host page sizes (which is >= TPS) 638c4faeed2SDr. David Alan Gilbert */ 639c4faeed2SDr. David Alan Gilbert if (rb != last_rb) { 640c4faeed2SDr. David Alan Gilbert last_rb = rb; 641c4faeed2SDr. David Alan Gilbert migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 642332847f0SDr. David Alan Gilbert rb_offset, qemu_ram_pagesize(rb)); 643c4faeed2SDr. David Alan Gilbert } else { 644c4faeed2SDr. David Alan Gilbert /* Save some space */ 645c4faeed2SDr. David Alan Gilbert migrate_send_rp_req_pages(mis, NULL, 646332847f0SDr. David Alan Gilbert rb_offset, qemu_ram_pagesize(rb)); 647c4faeed2SDr. David Alan Gilbert } 648c4faeed2SDr. David Alan Gilbert } 649*00fa4fc8SDr. David Alan Gilbert 650*00fa4fc8SDr. David Alan Gilbert /* Now handle any requests from external processes on shared memory */ 651*00fa4fc8SDr. David Alan Gilbert /* TODO: May need to handle devices deregistering during postcopy */ 652*00fa4fc8SDr. David Alan Gilbert for (index = 2; index < pfd_len && poll_result; index++) { 653*00fa4fc8SDr. David Alan Gilbert if (pfd[index].revents) { 654*00fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = 655*00fa4fc8SDr. David Alan Gilbert &g_array_index(mis->postcopy_remote_fds, 656*00fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index - 2); 657*00fa4fc8SDr. David Alan Gilbert 658*00fa4fc8SDr. David Alan Gilbert poll_result--; 659*00fa4fc8SDr. David Alan Gilbert if (pfd[index].revents & POLLERR) { 660*00fa4fc8SDr. David Alan Gilbert error_report("%s: POLLERR on poll %zd fd=%d", 661*00fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->fd); 662*00fa4fc8SDr. David Alan Gilbert pfd[index].events = 0; 663*00fa4fc8SDr. David Alan Gilbert continue; 664*00fa4fc8SDr. David Alan Gilbert } 665*00fa4fc8SDr. David Alan Gilbert 666*00fa4fc8SDr. David Alan Gilbert ret = read(pcfd->fd, &msg, sizeof(msg)); 667*00fa4fc8SDr. David Alan Gilbert if (ret != sizeof(msg)) { 668*00fa4fc8SDr. David Alan Gilbert if (errno == EAGAIN) { 669*00fa4fc8SDr. David Alan Gilbert /* 670*00fa4fc8SDr. David Alan Gilbert * if a wake up happens on the other thread just after 671*00fa4fc8SDr. David Alan Gilbert * the poll, there is nothing to read. 672*00fa4fc8SDr. David Alan Gilbert */ 673*00fa4fc8SDr. David Alan Gilbert continue; 674*00fa4fc8SDr. David Alan Gilbert } 675*00fa4fc8SDr. David Alan Gilbert if (ret < 0) { 676*00fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 677*00fa4fc8SDr. David Alan Gilbert "message: %s (shared) revents=%d", 678*00fa4fc8SDr. David Alan Gilbert __func__, strerror(errno), 679*00fa4fc8SDr. David Alan Gilbert pfd[index].revents); 680*00fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 681*00fa4fc8SDr. David Alan Gilbert break; 682*00fa4fc8SDr. David Alan Gilbert } else { 683*00fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 684*00fa4fc8SDr. David Alan Gilbert "expected %zd (shared)", 685*00fa4fc8SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 686*00fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 687*00fa4fc8SDr. David Alan Gilbert break; /*Lost alignment,don't know what we'd read next*/ 688*00fa4fc8SDr. David Alan Gilbert } 689*00fa4fc8SDr. David Alan Gilbert } 690*00fa4fc8SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 691*00fa4fc8SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud " 692*00fa4fc8SDr. David Alan Gilbert "from userfaultfd (shared)", 693*00fa4fc8SDr. David Alan Gilbert __func__, msg.event); 694*00fa4fc8SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 695*00fa4fc8SDr. David Alan Gilbert } 696*00fa4fc8SDr. David Alan Gilbert /* Call the device handler registered with us */ 697*00fa4fc8SDr. David Alan Gilbert ret = pcfd->handler(pcfd, &msg); 698*00fa4fc8SDr. David Alan Gilbert if (ret) { 699*00fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to resolve shared fault on %zd/%s", 700*00fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->idstr); 701*00fa4fc8SDr. David Alan Gilbert /* TODO: Fail? Disable this sharer? */ 702*00fa4fc8SDr. David Alan Gilbert } 703*00fa4fc8SDr. David Alan Gilbert } 704*00fa4fc8SDr. David Alan Gilbert } 705*00fa4fc8SDr. David Alan Gilbert } 706c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_exit(); 707f0a227adSDr. David Alan Gilbert return NULL; 708f0a227adSDr. David Alan Gilbert } 709f0a227adSDr. David Alan Gilbert 710f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis) 711f0a227adSDr. David Alan Gilbert { 712c4faeed2SDr. David Alan Gilbert /* Open the fd for the kernel to give us userfaults */ 713c4faeed2SDr. David Alan Gilbert mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 714c4faeed2SDr. David Alan Gilbert if (mis->userfault_fd == -1) { 715c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to open userfault fd: %s", __func__, 716c4faeed2SDr. David Alan Gilbert strerror(errno)); 717c4faeed2SDr. David Alan Gilbert return -1; 718c4faeed2SDr. David Alan Gilbert } 719c4faeed2SDr. David Alan Gilbert 720c4faeed2SDr. David Alan Gilbert /* 721c4faeed2SDr. David Alan Gilbert * Although the host check already tested the API, we need to 722c4faeed2SDr. David Alan Gilbert * do the check again as an ABI handshake on the new fd. 723c4faeed2SDr. David Alan Gilbert */ 72454ae0886SAlexey Perevalov if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 725c4faeed2SDr. David Alan Gilbert return -1; 726c4faeed2SDr. David Alan Gilbert } 727c4faeed2SDr. David Alan Gilbert 728c4faeed2SDr. David Alan Gilbert /* Now an eventfd we use to tell the fault-thread to quit */ 72964f615feSPeter Xu mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 73064f615feSPeter Xu if (mis->userfault_event_fd == -1) { 73164f615feSPeter Xu error_report("%s: Opening userfault_event_fd: %s", __func__, 732c4faeed2SDr. David Alan Gilbert strerror(errno)); 733c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 734c4faeed2SDr. David Alan Gilbert return -1; 735c4faeed2SDr. David Alan Gilbert } 736c4faeed2SDr. David Alan Gilbert 737f0a227adSDr. David Alan Gilbert qemu_sem_init(&mis->fault_thread_sem, 0); 738f0a227adSDr. David Alan Gilbert qemu_thread_create(&mis->fault_thread, "postcopy/fault", 739f0a227adSDr. David Alan Gilbert postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 740f0a227adSDr. David Alan Gilbert qemu_sem_wait(&mis->fault_thread_sem); 741f0a227adSDr. David Alan Gilbert qemu_sem_destroy(&mis->fault_thread_sem); 742c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = true; 743f0a227adSDr. David Alan Gilbert 744f0a227adSDr. David Alan Gilbert /* Mark so that we get notified of accesses to unwritten areas */ 745f0a227adSDr. David Alan Gilbert if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { 746f0a227adSDr. David Alan Gilbert return -1; 747f0a227adSDr. David Alan Gilbert } 748f0a227adSDr. David Alan Gilbert 749371ff5a3SDr. David Alan Gilbert /* 750371ff5a3SDr. David Alan Gilbert * Ballooning can mark pages as absent while we're postcopying 751371ff5a3SDr. David Alan Gilbert * that would cause false userfaults. 752371ff5a3SDr. David Alan Gilbert */ 753371ff5a3SDr. David Alan Gilbert qemu_balloon_inhibit(true); 754371ff5a3SDr. David Alan Gilbert 755c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_enable_notify(); 756c4faeed2SDr. David Alan Gilbert 757f0a227adSDr. David Alan Gilbert return 0; 758f0a227adSDr. David Alan Gilbert } 759f0a227adSDr. David Alan Gilbert 760727b9d7eSAlexey Perevalov static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr, 761f9494614SAlexey Perevalov void *from_addr, uint64_t pagesize, RAMBlock *rb) 762727b9d7eSAlexey Perevalov { 763f9494614SAlexey Perevalov int ret; 764727b9d7eSAlexey Perevalov if (from_addr) { 765727b9d7eSAlexey Perevalov struct uffdio_copy copy_struct; 766727b9d7eSAlexey Perevalov copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 767727b9d7eSAlexey Perevalov copy_struct.src = (uint64_t)(uintptr_t)from_addr; 768727b9d7eSAlexey Perevalov copy_struct.len = pagesize; 769727b9d7eSAlexey Perevalov copy_struct.mode = 0; 770f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 771727b9d7eSAlexey Perevalov } else { 772727b9d7eSAlexey Perevalov struct uffdio_zeropage zero_struct; 773727b9d7eSAlexey Perevalov zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 774727b9d7eSAlexey Perevalov zero_struct.range.len = pagesize; 775727b9d7eSAlexey Perevalov zero_struct.mode = 0; 776f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 777727b9d7eSAlexey Perevalov } 778f9494614SAlexey Perevalov if (!ret) { 779f9494614SAlexey Perevalov ramblock_recv_bitmap_set_range(rb, host_addr, 780f9494614SAlexey Perevalov pagesize / qemu_target_page_size()); 781f9494614SAlexey Perevalov } 782f9494614SAlexey Perevalov return ret; 783727b9d7eSAlexey Perevalov } 784727b9d7eSAlexey Perevalov 785696ed9a9SDr. David Alan Gilbert /* 786696ed9a9SDr. David Alan Gilbert * Place a host page (from) at (host) atomically 787696ed9a9SDr. David Alan Gilbert * returns 0 on success 788696ed9a9SDr. David Alan Gilbert */ 789df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 7908be4620bSAlexey Perevalov RAMBlock *rb) 791696ed9a9SDr. David Alan Gilbert { 7928be4620bSAlexey Perevalov size_t pagesize = qemu_ram_pagesize(rb); 793696ed9a9SDr. David Alan Gilbert 794696ed9a9SDr. David Alan Gilbert /* copy also acks to the kernel waking the stalled thread up 795696ed9a9SDr. David Alan Gilbert * TODO: We can inhibit that ack and only do it if it was requested 796696ed9a9SDr. David Alan Gilbert * which would be slightly cheaper, but we'd have to be careful 797696ed9a9SDr. David Alan Gilbert * of the order of updating our page state. 798696ed9a9SDr. David Alan Gilbert */ 799f9494614SAlexey Perevalov if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) { 800696ed9a9SDr. David Alan Gilbert int e = errno; 801df9ff5e1SDr. David Alan Gilbert error_report("%s: %s copy host: %p from: %p (size: %zd)", 802df9ff5e1SDr. David Alan Gilbert __func__, strerror(e), host, from, pagesize); 803696ed9a9SDr. David Alan Gilbert 804696ed9a9SDr. David Alan Gilbert return -e; 805696ed9a9SDr. David Alan Gilbert } 806696ed9a9SDr. David Alan Gilbert 807696ed9a9SDr. David Alan Gilbert trace_postcopy_place_page(host); 808696ed9a9SDr. David Alan Gilbert return 0; 809696ed9a9SDr. David Alan Gilbert } 810696ed9a9SDr. David Alan Gilbert 811696ed9a9SDr. David Alan Gilbert /* 812696ed9a9SDr. David Alan Gilbert * Place a zero page at (host) atomically 813696ed9a9SDr. David Alan Gilbert * returns 0 on success 814696ed9a9SDr. David Alan Gilbert */ 815df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 8168be4620bSAlexey Perevalov RAMBlock *rb) 817696ed9a9SDr. David Alan Gilbert { 8182ce16640SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 819df9ff5e1SDr. David Alan Gilbert trace_postcopy_place_page_zero(host); 820696ed9a9SDr. David Alan Gilbert 8212ce16640SDr. David Alan Gilbert /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 8222ce16640SDr. David Alan Gilbert * but it's not available for everything (e.g. hugetlbpages) 8232ce16640SDr. David Alan Gilbert */ 8242ce16640SDr. David Alan Gilbert if (qemu_ram_is_uf_zeroable(rb)) { 8252ce16640SDr. David Alan Gilbert if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) { 826696ed9a9SDr. David Alan Gilbert int e = errno; 827696ed9a9SDr. David Alan Gilbert error_report("%s: %s zero host: %p", 828696ed9a9SDr. David Alan Gilbert __func__, strerror(e), host); 829696ed9a9SDr. David Alan Gilbert 830696ed9a9SDr. David Alan Gilbert return -e; 831696ed9a9SDr. David Alan Gilbert } 832df9ff5e1SDr. David Alan Gilbert } else { 83341d84210SDr. David Alan Gilbert /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ 83441d84210SDr. David Alan Gilbert if (!mis->postcopy_tmp_zero_page) { 83541d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 83641d84210SDr. David Alan Gilbert PROT_READ | PROT_WRITE, 83741d84210SDr. David Alan Gilbert MAP_PRIVATE | MAP_ANONYMOUS, 83841d84210SDr. David Alan Gilbert -1, 0); 83941d84210SDr. David Alan Gilbert if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 84041d84210SDr. David Alan Gilbert int e = errno; 84141d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = NULL; 84241d84210SDr. David Alan Gilbert error_report("%s: %s mapping large zero page", 84341d84210SDr. David Alan Gilbert __func__, strerror(e)); 84441d84210SDr. David Alan Gilbert return -e; 84541d84210SDr. David Alan Gilbert } 84641d84210SDr. David Alan Gilbert memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 84741d84210SDr. David Alan Gilbert } 84841d84210SDr. David Alan Gilbert return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, 8498be4620bSAlexey Perevalov rb); 850df9ff5e1SDr. David Alan Gilbert } 851696ed9a9SDr. David Alan Gilbert 852696ed9a9SDr. David Alan Gilbert return 0; 853696ed9a9SDr. David Alan Gilbert } 854696ed9a9SDr. David Alan Gilbert 855696ed9a9SDr. David Alan Gilbert /* 856696ed9a9SDr. David Alan Gilbert * Returns a target page of memory that can be mapped at a later point in time 857696ed9a9SDr. David Alan Gilbert * using postcopy_place_page 858696ed9a9SDr. David Alan Gilbert * The same address is used repeatedly, postcopy_place_page just takes the 859696ed9a9SDr. David Alan Gilbert * backing page away. 860696ed9a9SDr. David Alan Gilbert * Returns: Pointer to allocated page 861696ed9a9SDr. David Alan Gilbert * 862696ed9a9SDr. David Alan Gilbert */ 863696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis) 864696ed9a9SDr. David Alan Gilbert { 865696ed9a9SDr. David Alan Gilbert if (!mis->postcopy_tmp_page) { 866df9ff5e1SDr. David Alan Gilbert mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, 867696ed9a9SDr. David Alan Gilbert PROT_READ | PROT_WRITE, MAP_PRIVATE | 868696ed9a9SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 8690e8b3cdfSEvgeny Yakovlev if (mis->postcopy_tmp_page == MAP_FAILED) { 8700e8b3cdfSEvgeny Yakovlev mis->postcopy_tmp_page = NULL; 871696ed9a9SDr. David Alan Gilbert error_report("%s: %s", __func__, strerror(errno)); 872696ed9a9SDr. David Alan Gilbert return NULL; 873696ed9a9SDr. David Alan Gilbert } 874696ed9a9SDr. David Alan Gilbert } 875696ed9a9SDr. David Alan Gilbert 876696ed9a9SDr. David Alan Gilbert return mis->postcopy_tmp_page; 877696ed9a9SDr. David Alan Gilbert } 878696ed9a9SDr. David Alan Gilbert 879eb59db53SDr. David Alan Gilbert #else 880eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */ 881d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 882eb59db53SDr. David Alan Gilbert { 883eb59db53SDr. David Alan Gilbert error_report("%s: No OS support", __func__); 884eb59db53SDr. David Alan Gilbert return false; 885eb59db53SDr. David Alan Gilbert } 886eb59db53SDr. David Alan Gilbert 8871caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 8881caddf8aSDr. David Alan Gilbert { 8891caddf8aSDr. David Alan Gilbert error_report("postcopy_ram_incoming_init: No OS support"); 8901caddf8aSDr. David Alan Gilbert return -1; 8911caddf8aSDr. David Alan Gilbert } 8921caddf8aSDr. David Alan Gilbert 8931caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 8941caddf8aSDr. David Alan Gilbert { 8951caddf8aSDr. David Alan Gilbert assert(0); 8961caddf8aSDr. David Alan Gilbert return -1; 8971caddf8aSDr. David Alan Gilbert } 8981caddf8aSDr. David Alan Gilbert 899f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 900f9527107SDr. David Alan Gilbert { 901f9527107SDr. David Alan Gilbert assert(0); 902f9527107SDr. David Alan Gilbert return -1; 903f9527107SDr. David Alan Gilbert } 904f9527107SDr. David Alan Gilbert 905f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis) 906f0a227adSDr. David Alan Gilbert { 907f0a227adSDr. David Alan Gilbert assert(0); 908f0a227adSDr. David Alan Gilbert return -1; 909f0a227adSDr. David Alan Gilbert } 910696ed9a9SDr. David Alan Gilbert 911df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 9128be4620bSAlexey Perevalov RAMBlock *rb) 913696ed9a9SDr. David Alan Gilbert { 914696ed9a9SDr. David Alan Gilbert assert(0); 915696ed9a9SDr. David Alan Gilbert return -1; 916696ed9a9SDr. David Alan Gilbert } 917696ed9a9SDr. David Alan Gilbert 918df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 9198be4620bSAlexey Perevalov RAMBlock *rb) 920696ed9a9SDr. David Alan Gilbert { 921696ed9a9SDr. David Alan Gilbert assert(0); 922696ed9a9SDr. David Alan Gilbert return -1; 923696ed9a9SDr. David Alan Gilbert } 924696ed9a9SDr. David Alan Gilbert 925696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis) 926696ed9a9SDr. David Alan Gilbert { 927696ed9a9SDr. David Alan Gilbert assert(0); 928696ed9a9SDr. David Alan Gilbert return NULL; 929696ed9a9SDr. David Alan Gilbert } 930696ed9a9SDr. David Alan Gilbert 931eb59db53SDr. David Alan Gilbert #endif 932eb59db53SDr. David Alan Gilbert 933e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */ 934e0b266f0SDr. David Alan Gilbert 9359ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis) 9369ab7ef9bSPeter Xu { 9379ab7ef9bSPeter Xu uint64_t tmp64 = 1; 9389ab7ef9bSPeter Xu 9399ab7ef9bSPeter Xu /* 9409ab7ef9bSPeter Xu * Wakeup the fault_thread. It's an eventfd that should currently 9419ab7ef9bSPeter Xu * be at 0, we're going to increment it to 1 9429ab7ef9bSPeter Xu */ 9439ab7ef9bSPeter Xu if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 9449ab7ef9bSPeter Xu /* Not much we can do here, but may as well report it */ 9459ab7ef9bSPeter Xu error_report("%s: incrementing failed: %s", __func__, 9469ab7ef9bSPeter Xu strerror(errno)); 9479ab7ef9bSPeter Xu } 9489ab7ef9bSPeter Xu } 9499ab7ef9bSPeter Xu 950e0b266f0SDr. David Alan Gilbert /** 951e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_init: Called at the start of each RAMBlock before 952e0b266f0SDr. David Alan Gilbert * asking to discard individual ranges. 953e0b266f0SDr. David Alan Gilbert * 954e0b266f0SDr. David Alan Gilbert * @ms: The current migration state. 955e0b266f0SDr. David Alan Gilbert * @offset: the bitmap offset of the named RAMBlock in the migration 956e0b266f0SDr. David Alan Gilbert * bitmap. 957e0b266f0SDr. David Alan Gilbert * @name: RAMBlock that discards will operate on. 958e0b266f0SDr. David Alan Gilbert * 959e0b266f0SDr. David Alan Gilbert * returns: a new PDS. 960e0b266f0SDr. David Alan Gilbert */ 961e0b266f0SDr. David Alan Gilbert PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 962e0b266f0SDr. David Alan Gilbert const char *name) 963e0b266f0SDr. David Alan Gilbert { 964e0b266f0SDr. David Alan Gilbert PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 965e0b266f0SDr. David Alan Gilbert 966e0b266f0SDr. David Alan Gilbert if (res) { 967e0b266f0SDr. David Alan Gilbert res->ramblock_name = name; 968e0b266f0SDr. David Alan Gilbert } 969e0b266f0SDr. David Alan Gilbert 970e0b266f0SDr. David Alan Gilbert return res; 971e0b266f0SDr. David Alan Gilbert } 972e0b266f0SDr. David Alan Gilbert 973e0b266f0SDr. David Alan Gilbert /** 974e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_range: Called by the bitmap code for each chunk to 975e0b266f0SDr. David Alan Gilbert * discard. May send a discard message, may just leave it queued to 976e0b266f0SDr. David Alan Gilbert * be sent later. 977e0b266f0SDr. David Alan Gilbert * 978e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 979e0b266f0SDr. David Alan Gilbert * @pds: Structure initialised by postcopy_discard_send_init(). 980e0b266f0SDr. David Alan Gilbert * @start,@length: a range of pages in the migration bitmap in the 981e0b266f0SDr. David Alan Gilbert * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 982e0b266f0SDr. David Alan Gilbert */ 983e0b266f0SDr. David Alan Gilbert void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 984e0b266f0SDr. David Alan Gilbert unsigned long start, unsigned long length) 985e0b266f0SDr. David Alan Gilbert { 98620afaed9SJuan Quintela size_t tp_size = qemu_target_page_size(); 987e0b266f0SDr. David Alan Gilbert /* Convert to byte offsets within the RAM block */ 9886b6712efSJuan Quintela pds->start_list[pds->cur_entry] = start * tp_size; 98920afaed9SJuan Quintela pds->length_list[pds->cur_entry] = length * tp_size; 990e0b266f0SDr. David Alan Gilbert trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 991e0b266f0SDr. David Alan Gilbert pds->cur_entry++; 992e0b266f0SDr. David Alan Gilbert pds->nsentwords++; 993e0b266f0SDr. David Alan Gilbert 994e0b266f0SDr. David Alan Gilbert if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 995e0b266f0SDr. David Alan Gilbert /* Full set, ship it! */ 99689a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 99789a02a9fSzhanghailiang pds->ramblock_name, 998e0b266f0SDr. David Alan Gilbert pds->cur_entry, 999e0b266f0SDr. David Alan Gilbert pds->start_list, 1000e0b266f0SDr. David Alan Gilbert pds->length_list); 1001e0b266f0SDr. David Alan Gilbert pds->nsentcmds++; 1002e0b266f0SDr. David Alan Gilbert pds->cur_entry = 0; 1003e0b266f0SDr. David Alan Gilbert } 1004e0b266f0SDr. David Alan Gilbert } 1005e0b266f0SDr. David Alan Gilbert 1006e0b266f0SDr. David Alan Gilbert /** 1007e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1008e0b266f0SDr. David Alan Gilbert * bitmap code. Sends any outstanding discard messages, frees the PDS 1009e0b266f0SDr. David Alan Gilbert * 1010e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1011e0b266f0SDr. David Alan Gilbert * @pds: Structure initialised by postcopy_discard_send_init(). 1012e0b266f0SDr. David Alan Gilbert */ 1013e0b266f0SDr. David Alan Gilbert void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 1014e0b266f0SDr. David Alan Gilbert { 1015e0b266f0SDr. David Alan Gilbert /* Anything unsent? */ 1016e0b266f0SDr. David Alan Gilbert if (pds->cur_entry) { 101789a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 101889a02a9fSzhanghailiang pds->ramblock_name, 1019e0b266f0SDr. David Alan Gilbert pds->cur_entry, 1020e0b266f0SDr. David Alan Gilbert pds->start_list, 1021e0b266f0SDr. David Alan Gilbert pds->length_list); 1022e0b266f0SDr. David Alan Gilbert pds->nsentcmds++; 1023e0b266f0SDr. David Alan Gilbert } 1024e0b266f0SDr. David Alan Gilbert 1025e0b266f0SDr. David Alan Gilbert trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 1026e0b266f0SDr. David Alan Gilbert pds->nsentcmds); 1027e0b266f0SDr. David Alan Gilbert 1028e0b266f0SDr. David Alan Gilbert g_free(pds); 1029e0b266f0SDr. David Alan Gilbert } 1030bac3b212SJuan Quintela 1031bac3b212SJuan Quintela /* 1032bac3b212SJuan Quintela * Current state of incoming postcopy; note this is not part of 1033bac3b212SJuan Quintela * MigrationIncomingState since it's state is used during cleanup 1034bac3b212SJuan Quintela * at the end as MIS is being freed. 1035bac3b212SJuan Quintela */ 1036bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state; 1037bac3b212SJuan Quintela 1038bac3b212SJuan Quintela PostcopyState postcopy_state_get(void) 1039bac3b212SJuan Quintela { 1040bac3b212SJuan Quintela return atomic_mb_read(&incoming_postcopy_state); 1041bac3b212SJuan Quintela } 1042bac3b212SJuan Quintela 1043bac3b212SJuan Quintela /* Set the state and return the old state */ 1044bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state) 1045bac3b212SJuan Quintela { 1046bac3b212SJuan Quintela return atomic_xchg(&incoming_postcopy_state, new_state); 1047bac3b212SJuan Quintela } 1048*00fa4fc8SDr. David Alan Gilbert 1049*00fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy 1050*00fa4fc8SDr. David Alan Gilbert * called on the destination. 1051*00fa4fc8SDr. David Alan Gilbert */ 1052*00fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 1053*00fa4fc8SDr. David Alan Gilbert { 1054*00fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 1055*00fa4fc8SDr. David Alan Gilbert 1056*00fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 1057*00fa4fc8SDr. David Alan Gilbert *pcfd); 1058*00fa4fc8SDr. David Alan Gilbert } 1059*00fa4fc8SDr. David Alan Gilbert 1060*00fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy 1061*00fa4fc8SDr. David Alan Gilbert */ 1062*00fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 1063*00fa4fc8SDr. David Alan Gilbert { 1064*00fa4fc8SDr. David Alan Gilbert guint i; 1065*00fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 1066*00fa4fc8SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 1067*00fa4fc8SDr. David Alan Gilbert 1068*00fa4fc8SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 1069*00fa4fc8SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1070*00fa4fc8SDr. David Alan Gilbert if (cur->fd == pcfd->fd) { 1071*00fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 1072*00fa4fc8SDr. David Alan Gilbert return; 1073*00fa4fc8SDr. David Alan Gilbert } 1074*00fa4fc8SDr. David Alan Gilbert } 1075*00fa4fc8SDr. David Alan Gilbert } 1076