1eb59db53SDr. David Alan Gilbert /* 2eb59db53SDr. David Alan Gilbert * Postcopy migration for RAM 3eb59db53SDr. David Alan Gilbert * 4eb59db53SDr. David Alan Gilbert * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5eb59db53SDr. David Alan Gilbert * 6eb59db53SDr. David Alan Gilbert * Authors: 7eb59db53SDr. David Alan Gilbert * Dave Gilbert <dgilbert@redhat.com> 8eb59db53SDr. David Alan Gilbert * 9eb59db53SDr. David Alan Gilbert * This work is licensed under the terms of the GNU GPL, version 2 or later. 10eb59db53SDr. David Alan Gilbert * See the COPYING file in the top-level directory. 11eb59db53SDr. David Alan Gilbert * 12eb59db53SDr. David Alan Gilbert */ 13eb59db53SDr. David Alan Gilbert 14eb59db53SDr. David Alan Gilbert /* 15eb59db53SDr. David Alan Gilbert * Postcopy is a migration technique where the execution flips from the 16eb59db53SDr. David Alan Gilbert * source to the destination before all the data has been copied. 17eb59db53SDr. David Alan Gilbert */ 18eb59db53SDr. David Alan Gilbert 191393a485SPeter Maydell #include "qemu/osdep.h" 2051180423SJuan Quintela #include "exec/target_page.h" 216666c96aSJuan Quintela #include "migration.h" 2208a0aee1SJuan Quintela #include "qemu-file.h" 2320a519a0SJuan Quintela #include "savevm.h" 24be07b0acSJuan Quintela #include "postcopy-ram.h" 257b1e1a22SJuan Quintela #include "ram.h" 26*1693c64cSDr. David Alan Gilbert #include "qapi/error.h" 27*1693c64cSDr. David Alan Gilbert #include "qemu/notify.h" 28eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h" 29371ff5a3SDr. David Alan Gilbert #include "sysemu/balloon.h" 30eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h" 31eb59db53SDr. David Alan Gilbert #include "trace.h" 32eb59db53SDr. David Alan Gilbert 33e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command, 34e0b266f0SDr. David Alan Gilbert * keeps them around ~200 bytes 35e0b266f0SDr. David Alan Gilbert */ 36e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12 37e0b266f0SDr. David Alan Gilbert 38e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState { 39e0b266f0SDr. David Alan Gilbert const char *ramblock_name; 40e0b266f0SDr. David Alan Gilbert uint16_t cur_entry; 41e0b266f0SDr. David Alan Gilbert /* 42e0b266f0SDr. David Alan Gilbert * Start and length of a discard range (bytes) 43e0b266f0SDr. David Alan Gilbert */ 44e0b266f0SDr. David Alan Gilbert uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 45e0b266f0SDr. David Alan Gilbert uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 46e0b266f0SDr. David Alan Gilbert unsigned int nsentwords; 47e0b266f0SDr. David Alan Gilbert unsigned int nsentcmds; 48e0b266f0SDr. David Alan Gilbert }; 49e0b266f0SDr. David Alan Gilbert 50*1693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list; 51*1693c64cSDr. David Alan Gilbert 52*1693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void) 53*1693c64cSDr. David Alan Gilbert { 54*1693c64cSDr. David Alan Gilbert notifier_with_return_list_init(&postcopy_notifier_list); 55*1693c64cSDr. David Alan Gilbert } 56*1693c64cSDr. David Alan Gilbert 57*1693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn) 58*1693c64cSDr. David Alan Gilbert { 59*1693c64cSDr. David Alan Gilbert notifier_with_return_list_add(&postcopy_notifier_list, nn); 60*1693c64cSDr. David Alan Gilbert } 61*1693c64cSDr. David Alan Gilbert 62*1693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n) 63*1693c64cSDr. David Alan Gilbert { 64*1693c64cSDr. David Alan Gilbert notifier_with_return_remove(n); 65*1693c64cSDr. David Alan Gilbert } 66*1693c64cSDr. David Alan Gilbert 67*1693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 68*1693c64cSDr. David Alan Gilbert { 69*1693c64cSDr. David Alan Gilbert struct PostcopyNotifyData pnd; 70*1693c64cSDr. David Alan Gilbert pnd.reason = reason; 71*1693c64cSDr. David Alan Gilbert pnd.errp = errp; 72*1693c64cSDr. David Alan Gilbert 73*1693c64cSDr. David Alan Gilbert return notifier_with_return_list_notify(&postcopy_notifier_list, 74*1693c64cSDr. David Alan Gilbert &pnd); 75*1693c64cSDr. David Alan Gilbert } 76*1693c64cSDr. David Alan Gilbert 77eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied 78eb59db53SDr. David Alan Gilbert * across, and efficiently map new pages in, the techniques for doing this 79eb59db53SDr. David Alan Gilbert * are target OS specific. 80eb59db53SDr. David Alan Gilbert */ 81eb59db53SDr. David Alan Gilbert #if defined(__linux__) 82eb59db53SDr. David Alan Gilbert 83c4faeed2SDr. David Alan Gilbert #include <poll.h> 84eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h> 85eb59db53SDr. David Alan Gilbert #include <sys/syscall.h> 86eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */ 87eb59db53SDr. David Alan Gilbert #endif 88eb59db53SDr. David Alan Gilbert 89d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 90d8b9d771SMatthew Fortune #include <sys/eventfd.h> 91eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h> 92eb59db53SDr. David Alan Gilbert 93ca6011c2SAlexey Perevalov 9454ae0886SAlexey Perevalov /** 9554ae0886SAlexey Perevalov * receive_ufd_features: check userfault fd features, to request only supported 9654ae0886SAlexey Perevalov * features in the future. 9754ae0886SAlexey Perevalov * 9854ae0886SAlexey Perevalov * Returns: true on success 9954ae0886SAlexey Perevalov * 10054ae0886SAlexey Perevalov * __NR_userfaultfd - should be checked before 10154ae0886SAlexey Perevalov * @features: out parameter will contain uffdio_api.features provided by kernel 10254ae0886SAlexey Perevalov * in case of success 10354ae0886SAlexey Perevalov */ 10454ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features) 10554ae0886SAlexey Perevalov { 10654ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 10754ae0886SAlexey Perevalov int ufd; 10854ae0886SAlexey Perevalov bool ret = true; 10954ae0886SAlexey Perevalov 11054ae0886SAlexey Perevalov /* if we are here __NR_userfaultfd should exists */ 11154ae0886SAlexey Perevalov ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 11254ae0886SAlexey Perevalov if (ufd == -1) { 11354ae0886SAlexey Perevalov error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, 11454ae0886SAlexey Perevalov strerror(errno)); 11554ae0886SAlexey Perevalov return false; 11654ae0886SAlexey Perevalov } 11754ae0886SAlexey Perevalov 11854ae0886SAlexey Perevalov /* ask features */ 119eb59db53SDr. David Alan Gilbert api_struct.api = UFFD_API; 120eb59db53SDr. David Alan Gilbert api_struct.features = 0; 121eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_API, &api_struct)) { 1225553499fSAlexey Perevalov error_report("%s: UFFDIO_API failed: %s", __func__, 123eb59db53SDr. David Alan Gilbert strerror(errno)); 12454ae0886SAlexey Perevalov ret = false; 12554ae0886SAlexey Perevalov goto release_ufd; 12654ae0886SAlexey Perevalov } 12754ae0886SAlexey Perevalov 12854ae0886SAlexey Perevalov *features = api_struct.features; 12954ae0886SAlexey Perevalov 13054ae0886SAlexey Perevalov release_ufd: 13154ae0886SAlexey Perevalov close(ufd); 13254ae0886SAlexey Perevalov return ret; 13354ae0886SAlexey Perevalov } 13454ae0886SAlexey Perevalov 13554ae0886SAlexey Perevalov /** 13654ae0886SAlexey Perevalov * request_ufd_features: this function should be called only once on a newly 13754ae0886SAlexey Perevalov * opened ufd, subsequent calls will lead to error. 13854ae0886SAlexey Perevalov * 13954ae0886SAlexey Perevalov * Returns: true on succes 14054ae0886SAlexey Perevalov * 14154ae0886SAlexey Perevalov * @ufd: fd obtained from userfaultfd syscall 14254ae0886SAlexey Perevalov * @features: bit mask see UFFD_API_FEATURES 14354ae0886SAlexey Perevalov */ 14454ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features) 14554ae0886SAlexey Perevalov { 14654ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 14754ae0886SAlexey Perevalov uint64_t ioctl_mask; 14854ae0886SAlexey Perevalov 14954ae0886SAlexey Perevalov api_struct.api = UFFD_API; 15054ae0886SAlexey Perevalov api_struct.features = features; 15154ae0886SAlexey Perevalov if (ioctl(ufd, UFFDIO_API, &api_struct)) { 15254ae0886SAlexey Perevalov error_report("%s failed: UFFDIO_API failed: %s", __func__, 15354ae0886SAlexey Perevalov strerror(errno)); 154eb59db53SDr. David Alan Gilbert return false; 155eb59db53SDr. David Alan Gilbert } 156eb59db53SDr. David Alan Gilbert 157eb59db53SDr. David Alan Gilbert ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 158eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_UNREGISTER; 159eb59db53SDr. David Alan Gilbert if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 160eb59db53SDr. David Alan Gilbert error_report("Missing userfault features: %" PRIx64, 161eb59db53SDr. David Alan Gilbert (uint64_t)(~api_struct.ioctls & ioctl_mask)); 162eb59db53SDr. David Alan Gilbert return false; 163eb59db53SDr. David Alan Gilbert } 164eb59db53SDr. David Alan Gilbert 16554ae0886SAlexey Perevalov return true; 16654ae0886SAlexey Perevalov } 16754ae0886SAlexey Perevalov 16854ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 16954ae0886SAlexey Perevalov { 17054ae0886SAlexey Perevalov uint64_t asked_features = 0; 17154ae0886SAlexey Perevalov static uint64_t supported_features; 17254ae0886SAlexey Perevalov 17354ae0886SAlexey Perevalov /* 17454ae0886SAlexey Perevalov * it's not possible to 17554ae0886SAlexey Perevalov * request UFFD_API twice per one fd 17654ae0886SAlexey Perevalov * userfault fd features is persistent 17754ae0886SAlexey Perevalov */ 17854ae0886SAlexey Perevalov if (!supported_features) { 17954ae0886SAlexey Perevalov if (!receive_ufd_features(&supported_features)) { 18054ae0886SAlexey Perevalov error_report("%s failed", __func__); 18154ae0886SAlexey Perevalov return false; 18254ae0886SAlexey Perevalov } 18354ae0886SAlexey Perevalov } 18454ae0886SAlexey Perevalov 18554ae0886SAlexey Perevalov /* 18654ae0886SAlexey Perevalov * request features, even if asked_features is 0, due to 18754ae0886SAlexey Perevalov * kernel expects UFFD_API before UFFDIO_REGISTER, per 18854ae0886SAlexey Perevalov * userfault file descriptor 18954ae0886SAlexey Perevalov */ 19054ae0886SAlexey Perevalov if (!request_ufd_features(ufd, asked_features)) { 19154ae0886SAlexey Perevalov error_report("%s failed: features %" PRIu64, __func__, 19254ae0886SAlexey Perevalov asked_features); 19354ae0886SAlexey Perevalov return false; 19454ae0886SAlexey Perevalov } 19554ae0886SAlexey Perevalov 1967e8cafb7SDr. David Alan Gilbert if (getpagesize() != ram_pagesize_summary()) { 1977e8cafb7SDr. David Alan Gilbert bool have_hp = false; 1987e8cafb7SDr. David Alan Gilbert /* We've got a huge page */ 1997e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 20054ae0886SAlexey Perevalov have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 2017e8cafb7SDr. David Alan Gilbert #endif 2027e8cafb7SDr. David Alan Gilbert if (!have_hp) { 2037e8cafb7SDr. David Alan Gilbert error_report("Userfault on this host does not support huge pages"); 2047e8cafb7SDr. David Alan Gilbert return false; 2057e8cafb7SDr. David Alan Gilbert } 2067e8cafb7SDr. David Alan Gilbert } 207eb59db53SDr. David Alan Gilbert return true; 208eb59db53SDr. David Alan Gilbert } 209eb59db53SDr. David Alan Gilbert 2108679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator. 2118679638bSDr. David Alan Gilbert */ 2125d214a92SDr. David Alan Gilbert static int test_ramblock_postcopiable(const char *block_name, void *host_addr, 2138679638bSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 2148679638bSDr. David Alan Gilbert { 2155d214a92SDr. David Alan Gilbert RAMBlock *rb = qemu_ram_block_by_name(block_name); 2165d214a92SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 2175d214a92SDr. David Alan Gilbert 2185d214a92SDr. David Alan Gilbert if (qemu_ram_is_shared(rb)) { 2198679638bSDr. David Alan Gilbert error_report("Postcopy on shared RAM (%s) is not yet supported", 2208679638bSDr. David Alan Gilbert block_name); 2218679638bSDr. David Alan Gilbert return 1; 2228679638bSDr. David Alan Gilbert } 2235d214a92SDr. David Alan Gilbert 2245d214a92SDr. David Alan Gilbert if (length % pagesize) { 2255d214a92SDr. David Alan Gilbert error_report("Postcopy requires RAM blocks to be a page size multiple," 2265d214a92SDr. David Alan Gilbert " block %s is 0x" RAM_ADDR_FMT " bytes with a " 2275d214a92SDr. David Alan Gilbert "page size of 0x%zx", block_name, length, pagesize); 2285d214a92SDr. David Alan Gilbert return 1; 2295d214a92SDr. David Alan Gilbert } 2308679638bSDr. David Alan Gilbert return 0; 2318679638bSDr. David Alan Gilbert } 2328679638bSDr. David Alan Gilbert 23358b7c17eSDr. David Alan Gilbert /* 23458b7c17eSDr. David Alan Gilbert * Note: This has the side effect of munlock'ing all of RAM, that's 23558b7c17eSDr. David Alan Gilbert * normally fine since if the postcopy succeeds it gets turned back on at the 23658b7c17eSDr. David Alan Gilbert * end. 23758b7c17eSDr. David Alan Gilbert */ 238d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 239eb59db53SDr. David Alan Gilbert { 240eb59db53SDr. David Alan Gilbert long pagesize = getpagesize(); 241eb59db53SDr. David Alan Gilbert int ufd = -1; 242eb59db53SDr. David Alan Gilbert bool ret = false; /* Error unless we change it */ 243eb59db53SDr. David Alan Gilbert void *testarea = NULL; 244eb59db53SDr. David Alan Gilbert struct uffdio_register reg_struct; 245eb59db53SDr. David Alan Gilbert struct uffdio_range range_struct; 246eb59db53SDr. David Alan Gilbert uint64_t feature_mask; 247*1693c64cSDr. David Alan Gilbert Error *local_err = NULL; 248eb59db53SDr. David Alan Gilbert 24920afaed9SJuan Quintela if (qemu_target_page_size() > pagesize) { 250eb59db53SDr. David Alan Gilbert error_report("Target page size bigger than host page size"); 251eb59db53SDr. David Alan Gilbert goto out; 252eb59db53SDr. David Alan Gilbert } 253eb59db53SDr. David Alan Gilbert 254eb59db53SDr. David Alan Gilbert ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 255eb59db53SDr. David Alan Gilbert if (ufd == -1) { 256eb59db53SDr. David Alan Gilbert error_report("%s: userfaultfd not available: %s", __func__, 257eb59db53SDr. David Alan Gilbert strerror(errno)); 258eb59db53SDr. David Alan Gilbert goto out; 259eb59db53SDr. David Alan Gilbert } 260eb59db53SDr. David Alan Gilbert 261*1693c64cSDr. David Alan Gilbert /* Give devices a chance to object */ 262*1693c64cSDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 263*1693c64cSDr. David Alan Gilbert error_report_err(local_err); 264*1693c64cSDr. David Alan Gilbert goto out; 265*1693c64cSDr. David Alan Gilbert } 266*1693c64cSDr. David Alan Gilbert 267eb59db53SDr. David Alan Gilbert /* Version and features check */ 26854ae0886SAlexey Perevalov if (!ufd_check_and_apply(ufd, mis)) { 269eb59db53SDr. David Alan Gilbert goto out; 270eb59db53SDr. David Alan Gilbert } 271eb59db53SDr. David Alan Gilbert 2728679638bSDr. David Alan Gilbert /* We don't support postcopy with shared RAM yet */ 2735d214a92SDr. David Alan Gilbert if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) { 2748679638bSDr. David Alan Gilbert goto out; 2758679638bSDr. David Alan Gilbert } 2768679638bSDr. David Alan Gilbert 277eb59db53SDr. David Alan Gilbert /* 27858b7c17eSDr. David Alan Gilbert * userfault and mlock don't go together; we'll put it back later if 27958b7c17eSDr. David Alan Gilbert * it was enabled. 28058b7c17eSDr. David Alan Gilbert */ 28158b7c17eSDr. David Alan Gilbert if (munlockall()) { 28258b7c17eSDr. David Alan Gilbert error_report("%s: munlockall: %s", __func__, strerror(errno)); 28358b7c17eSDr. David Alan Gilbert return -1; 28458b7c17eSDr. David Alan Gilbert } 28558b7c17eSDr. David Alan Gilbert 28658b7c17eSDr. David Alan Gilbert /* 287eb59db53SDr. David Alan Gilbert * We need to check that the ops we need are supported on anon memory 288eb59db53SDr. David Alan Gilbert * To do that we need to register a chunk and see the flags that 289eb59db53SDr. David Alan Gilbert * are returned. 290eb59db53SDr. David Alan Gilbert */ 291eb59db53SDr. David Alan Gilbert testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 292eb59db53SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 293eb59db53SDr. David Alan Gilbert if (testarea == MAP_FAILED) { 294eb59db53SDr. David Alan Gilbert error_report("%s: Failed to map test area: %s", __func__, 295eb59db53SDr. David Alan Gilbert strerror(errno)); 296eb59db53SDr. David Alan Gilbert goto out; 297eb59db53SDr. David Alan Gilbert } 298eb59db53SDr. David Alan Gilbert g_assert(((size_t)testarea & (pagesize-1)) == 0); 299eb59db53SDr. David Alan Gilbert 300eb59db53SDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)testarea; 301eb59db53SDr. David Alan Gilbert reg_struct.range.len = pagesize; 302eb59db53SDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 303eb59db53SDr. David Alan Gilbert 304eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 305eb59db53SDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 306eb59db53SDr. David Alan Gilbert goto out; 307eb59db53SDr. David Alan Gilbert } 308eb59db53SDr. David Alan Gilbert 309eb59db53SDr. David Alan Gilbert range_struct.start = (uintptr_t)testarea; 310eb59db53SDr. David Alan Gilbert range_struct.len = pagesize; 311eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 312eb59db53SDr. David Alan Gilbert error_report("%s userfault unregister: %s", __func__, strerror(errno)); 313eb59db53SDr. David Alan Gilbert goto out; 314eb59db53SDr. David Alan Gilbert } 315eb59db53SDr. David Alan Gilbert 316eb59db53SDr. David Alan Gilbert feature_mask = (__u64)1 << _UFFDIO_WAKE | 317eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_COPY | 318eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_ZEROPAGE; 319eb59db53SDr. David Alan Gilbert if ((reg_struct.ioctls & feature_mask) != feature_mask) { 320eb59db53SDr. David Alan Gilbert error_report("Missing userfault map features: %" PRIx64, 321eb59db53SDr. David Alan Gilbert (uint64_t)(~reg_struct.ioctls & feature_mask)); 322eb59db53SDr. David Alan Gilbert goto out; 323eb59db53SDr. David Alan Gilbert } 324eb59db53SDr. David Alan Gilbert 325eb59db53SDr. David Alan Gilbert /* Success! */ 326eb59db53SDr. David Alan Gilbert ret = true; 327eb59db53SDr. David Alan Gilbert out: 328eb59db53SDr. David Alan Gilbert if (testarea) { 329eb59db53SDr. David Alan Gilbert munmap(testarea, pagesize); 330eb59db53SDr. David Alan Gilbert } 331eb59db53SDr. David Alan Gilbert if (ufd != -1) { 332eb59db53SDr. David Alan Gilbert close(ufd); 333eb59db53SDr. David Alan Gilbert } 334eb59db53SDr. David Alan Gilbert return ret; 335eb59db53SDr. David Alan Gilbert } 336eb59db53SDr. David Alan Gilbert 3371caddf8aSDr. David Alan Gilbert /* 3381caddf8aSDr. David Alan Gilbert * Setup an area of RAM so that it *can* be used for postcopy later; this 3391caddf8aSDr. David Alan Gilbert * must be done right at the start prior to pre-copy. 3401caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 3411caddf8aSDr. David Alan Gilbert */ 3421caddf8aSDr. David Alan Gilbert static int init_range(const char *block_name, void *host_addr, 3431caddf8aSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 3441caddf8aSDr. David Alan Gilbert { 3451caddf8aSDr. David Alan Gilbert trace_postcopy_init_range(block_name, host_addr, offset, length); 3461caddf8aSDr. David Alan Gilbert 3471caddf8aSDr. David Alan Gilbert /* 3481caddf8aSDr. David Alan Gilbert * We need the whole of RAM to be truly empty for postcopy, so things 3491caddf8aSDr. David Alan Gilbert * like ROMs and any data tables built during init must be zero'd 3501caddf8aSDr. David Alan Gilbert * - we're going to get the copy from the source anyway. 3511caddf8aSDr. David Alan Gilbert * (Precopy will just overwrite this data, so doesn't need the discard) 3521caddf8aSDr. David Alan Gilbert */ 353aaa2064cSJuan Quintela if (ram_discard_range(block_name, 0, length)) { 3541caddf8aSDr. David Alan Gilbert return -1; 3551caddf8aSDr. David Alan Gilbert } 3561caddf8aSDr. David Alan Gilbert 3571caddf8aSDr. David Alan Gilbert return 0; 3581caddf8aSDr. David Alan Gilbert } 3591caddf8aSDr. David Alan Gilbert 3601caddf8aSDr. David Alan Gilbert /* 3611caddf8aSDr. David Alan Gilbert * At the end of migration, undo the effects of init_range 3621caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 3631caddf8aSDr. David Alan Gilbert */ 3641caddf8aSDr. David Alan Gilbert static int cleanup_range(const char *block_name, void *host_addr, 3651caddf8aSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 3661caddf8aSDr. David Alan Gilbert { 3671caddf8aSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 3681caddf8aSDr. David Alan Gilbert struct uffdio_range range_struct; 3691caddf8aSDr. David Alan Gilbert trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 3701caddf8aSDr. David Alan Gilbert 3711caddf8aSDr. David Alan Gilbert /* 3721caddf8aSDr. David Alan Gilbert * We turned off hugepage for the precopy stage with postcopy enabled 3731caddf8aSDr. David Alan Gilbert * we can turn it back on now. 3741caddf8aSDr. David Alan Gilbert */ 3751d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 3761caddf8aSDr. David Alan Gilbert 3771caddf8aSDr. David Alan Gilbert /* 3781caddf8aSDr. David Alan Gilbert * We can also turn off userfault now since we should have all the 3791caddf8aSDr. David Alan Gilbert * pages. It can be useful to leave it on to debug postcopy 3801caddf8aSDr. David Alan Gilbert * if you're not sure it's always getting every page. 3811caddf8aSDr. David Alan Gilbert */ 3821caddf8aSDr. David Alan Gilbert range_struct.start = (uintptr_t)host_addr; 3831caddf8aSDr. David Alan Gilbert range_struct.len = length; 3841caddf8aSDr. David Alan Gilbert 3851caddf8aSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 3861caddf8aSDr. David Alan Gilbert error_report("%s: userfault unregister %s", __func__, strerror(errno)); 3871caddf8aSDr. David Alan Gilbert 3881caddf8aSDr. David Alan Gilbert return -1; 3891caddf8aSDr. David Alan Gilbert } 3901caddf8aSDr. David Alan Gilbert 3911caddf8aSDr. David Alan Gilbert return 0; 3921caddf8aSDr. David Alan Gilbert } 3931caddf8aSDr. David Alan Gilbert 3941caddf8aSDr. David Alan Gilbert /* 3951caddf8aSDr. David Alan Gilbert * Initialise postcopy-ram, setting the RAM to a state where we can go into 3961caddf8aSDr. David Alan Gilbert * postcopy later; must be called prior to any precopy. 3971caddf8aSDr. David Alan Gilbert * called from arch_init's similarly named ram_postcopy_incoming_init 3981caddf8aSDr. David Alan Gilbert */ 3991caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 4001caddf8aSDr. David Alan Gilbert { 401aaa2064cSJuan Quintela if (qemu_ram_foreach_block(init_range, NULL)) { 4021caddf8aSDr. David Alan Gilbert return -1; 4031caddf8aSDr. David Alan Gilbert } 4041caddf8aSDr. David Alan Gilbert 4051caddf8aSDr. David Alan Gilbert return 0; 4061caddf8aSDr. David Alan Gilbert } 4071caddf8aSDr. David Alan Gilbert 4081caddf8aSDr. David Alan Gilbert /* 4091caddf8aSDr. David Alan Gilbert * At the end of a migration where postcopy_ram_incoming_init was called. 4101caddf8aSDr. David Alan Gilbert */ 4111caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 4121caddf8aSDr. David Alan Gilbert { 413c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_entry(); 414c4faeed2SDr. David Alan Gilbert 415c4faeed2SDr. David Alan Gilbert if (mis->have_fault_thread) { 4161caddf8aSDr. David Alan Gilbert if (qemu_ram_foreach_block(cleanup_range, mis)) { 4171caddf8aSDr. David Alan Gilbert return -1; 4181caddf8aSDr. David Alan Gilbert } 4199ab7ef9bSPeter Xu /* Let the fault thread quit */ 42064f615feSPeter Xu atomic_set(&mis->fault_thread_quit, 1); 4219ab7ef9bSPeter Xu postcopy_fault_thread_notify(mis); 422c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_join(); 423c4faeed2SDr. David Alan Gilbert qemu_thread_join(&mis->fault_thread); 4249ab7ef9bSPeter Xu 425c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_closeuf(); 426c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 42764f615feSPeter Xu close(mis->userfault_event_fd); 428c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = false; 429c4faeed2SDr. David Alan Gilbert } 430c4faeed2SDr. David Alan Gilbert 431371ff5a3SDr. David Alan Gilbert qemu_balloon_inhibit(false); 432371ff5a3SDr. David Alan Gilbert 43358b7c17eSDr. David Alan Gilbert if (enable_mlock) { 43458b7c17eSDr. David Alan Gilbert if (os_mlock() < 0) { 43558b7c17eSDr. David Alan Gilbert error_report("mlock: %s", strerror(errno)); 43658b7c17eSDr. David Alan Gilbert /* 43758b7c17eSDr. David Alan Gilbert * It doesn't feel right to fail at this point, we have a valid 43858b7c17eSDr. David Alan Gilbert * VM state. 43958b7c17eSDr. David Alan Gilbert */ 44058b7c17eSDr. David Alan Gilbert } 44158b7c17eSDr. David Alan Gilbert } 44258b7c17eSDr. David Alan Gilbert 443c4faeed2SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_END); 4441caddf8aSDr. David Alan Gilbert 445696ed9a9SDr. David Alan Gilbert if (mis->postcopy_tmp_page) { 446df9ff5e1SDr. David Alan Gilbert munmap(mis->postcopy_tmp_page, mis->largest_page_size); 447696ed9a9SDr. David Alan Gilbert mis->postcopy_tmp_page = NULL; 448696ed9a9SDr. David Alan Gilbert } 44941d84210SDr. David Alan Gilbert if (mis->postcopy_tmp_zero_page) { 45041d84210SDr. David Alan Gilbert munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 45141d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = NULL; 45241d84210SDr. David Alan Gilbert } 453c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_exit(); 4541caddf8aSDr. David Alan Gilbert return 0; 4551caddf8aSDr. David Alan Gilbert } 4561caddf8aSDr. David Alan Gilbert 457f0a227adSDr. David Alan Gilbert /* 458f9527107SDr. David Alan Gilbert * Disable huge pages on an area 459f9527107SDr. David Alan Gilbert */ 460f9527107SDr. David Alan Gilbert static int nhp_range(const char *block_name, void *host_addr, 461f9527107SDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, void *opaque) 462f9527107SDr. David Alan Gilbert { 463f9527107SDr. David Alan Gilbert trace_postcopy_nhp_range(block_name, host_addr, offset, length); 464f9527107SDr. David Alan Gilbert 465f9527107SDr. David Alan Gilbert /* 466f9527107SDr. David Alan Gilbert * Before we do discards we need to ensure those discards really 467f9527107SDr. David Alan Gilbert * do delete areas of the page, even if THP thinks a hugepage would 468f9527107SDr. David Alan Gilbert * be a good idea, so force hugepages off. 469f9527107SDr. David Alan Gilbert */ 4701d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 471f9527107SDr. David Alan Gilbert 472f9527107SDr. David Alan Gilbert return 0; 473f9527107SDr. David Alan Gilbert } 474f9527107SDr. David Alan Gilbert 475f9527107SDr. David Alan Gilbert /* 476f9527107SDr. David Alan Gilbert * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 477f9527107SDr. David Alan Gilbert * however leaving it until after precopy means that most of the precopy 478f9527107SDr. David Alan Gilbert * data is still THPd 479f9527107SDr. David Alan Gilbert */ 480f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 481f9527107SDr. David Alan Gilbert { 482f9527107SDr. David Alan Gilbert if (qemu_ram_foreach_block(nhp_range, mis)) { 483f9527107SDr. David Alan Gilbert return -1; 484f9527107SDr. David Alan Gilbert } 485f9527107SDr. David Alan Gilbert 486f9527107SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 487f9527107SDr. David Alan Gilbert 488f9527107SDr. David Alan Gilbert return 0; 489f9527107SDr. David Alan Gilbert } 490f9527107SDr. David Alan Gilbert 491f9527107SDr. David Alan Gilbert /* 492f0a227adSDr. David Alan Gilbert * Mark the given area of RAM as requiring notification to unwritten areas 493f0a227adSDr. David Alan Gilbert * Used as a callback on qemu_ram_foreach_block. 494f0a227adSDr. David Alan Gilbert * host_addr: Base of area to mark 495f0a227adSDr. David Alan Gilbert * offset: Offset in the whole ram arena 496f0a227adSDr. David Alan Gilbert * length: Length of the section 497f0a227adSDr. David Alan Gilbert * opaque: MigrationIncomingState pointer 498f0a227adSDr. David Alan Gilbert * Returns 0 on success 499f0a227adSDr. David Alan Gilbert */ 500f0a227adSDr. David Alan Gilbert static int ram_block_enable_notify(const char *block_name, void *host_addr, 501f0a227adSDr. David Alan Gilbert ram_addr_t offset, ram_addr_t length, 502f0a227adSDr. David Alan Gilbert void *opaque) 503f0a227adSDr. David Alan Gilbert { 504f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 505f0a227adSDr. David Alan Gilbert struct uffdio_register reg_struct; 506f0a227adSDr. David Alan Gilbert 507f0a227adSDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)host_addr; 508f0a227adSDr. David Alan Gilbert reg_struct.range.len = length; 509f0a227adSDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 510f0a227adSDr. David Alan Gilbert 511f0a227adSDr. David Alan Gilbert /* Now tell our userfault_fd that it's responsible for this area */ 512f0a227adSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 513f0a227adSDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 514f0a227adSDr. David Alan Gilbert return -1; 515f0a227adSDr. David Alan Gilbert } 516665414adSDr. David Alan Gilbert if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 517665414adSDr. David Alan Gilbert error_report("%s userfault: Region doesn't support COPY", __func__); 518665414adSDr. David Alan Gilbert return -1; 519665414adSDr. David Alan Gilbert } 5202ce16640SDr. David Alan Gilbert if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 5212ce16640SDr. David Alan Gilbert RAMBlock *rb = qemu_ram_block_by_name(block_name); 5222ce16640SDr. David Alan Gilbert qemu_ram_set_uf_zeroable(rb); 5232ce16640SDr. David Alan Gilbert } 524f0a227adSDr. David Alan Gilbert 525f0a227adSDr. David Alan Gilbert return 0; 526f0a227adSDr. David Alan Gilbert } 527f0a227adSDr. David Alan Gilbert 528f0a227adSDr. David Alan Gilbert /* 529f0a227adSDr. David Alan Gilbert * Handle faults detected by the USERFAULT markings 530f0a227adSDr. David Alan Gilbert */ 531f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque) 532f0a227adSDr. David Alan Gilbert { 533f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 534c4faeed2SDr. David Alan Gilbert struct uffd_msg msg; 535c4faeed2SDr. David Alan Gilbert int ret; 536c4faeed2SDr. David Alan Gilbert RAMBlock *rb = NULL; 537c4faeed2SDr. David Alan Gilbert RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ 538f0a227adSDr. David Alan Gilbert 539c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_entry(); 540f0a227adSDr. David Alan Gilbert qemu_sem_post(&mis->fault_thread_sem); 541c4faeed2SDr. David Alan Gilbert 542c4faeed2SDr. David Alan Gilbert while (true) { 543c4faeed2SDr. David Alan Gilbert ram_addr_t rb_offset; 544c4faeed2SDr. David Alan Gilbert struct pollfd pfd[2]; 545c4faeed2SDr. David Alan Gilbert 546c4faeed2SDr. David Alan Gilbert /* 547c4faeed2SDr. David Alan Gilbert * We're mainly waiting for the kernel to give us a faulting HVA, 548c4faeed2SDr. David Alan Gilbert * however we can be told to quit via userfault_quit_fd which is 549c4faeed2SDr. David Alan Gilbert * an eventfd 550c4faeed2SDr. David Alan Gilbert */ 551c4faeed2SDr. David Alan Gilbert pfd[0].fd = mis->userfault_fd; 552c4faeed2SDr. David Alan Gilbert pfd[0].events = POLLIN; 553c4faeed2SDr. David Alan Gilbert pfd[0].revents = 0; 55464f615feSPeter Xu pfd[1].fd = mis->userfault_event_fd; 555c4faeed2SDr. David Alan Gilbert pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 556c4faeed2SDr. David Alan Gilbert pfd[1].revents = 0; 557c4faeed2SDr. David Alan Gilbert 558c4faeed2SDr. David Alan Gilbert if (poll(pfd, 2, -1 /* Wait forever */) == -1) { 559c4faeed2SDr. David Alan Gilbert error_report("%s: userfault poll: %s", __func__, strerror(errno)); 560c4faeed2SDr. David Alan Gilbert break; 561f0a227adSDr. David Alan Gilbert } 562f0a227adSDr. David Alan Gilbert 563c4faeed2SDr. David Alan Gilbert if (pfd[1].revents) { 56464f615feSPeter Xu uint64_t tmp64 = 0; 56564f615feSPeter Xu 56664f615feSPeter Xu /* Consume the signal */ 56764f615feSPeter Xu if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 56864f615feSPeter Xu /* Nothing obviously nicer than posting this error. */ 56964f615feSPeter Xu error_report("%s: read() failed", __func__); 57064f615feSPeter Xu } 57164f615feSPeter Xu 57264f615feSPeter Xu if (atomic_read(&mis->fault_thread_quit)) { 573c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_quit(); 574c4faeed2SDr. David Alan Gilbert break; 575c4faeed2SDr. David Alan Gilbert } 57664f615feSPeter Xu } 577c4faeed2SDr. David Alan Gilbert 578c4faeed2SDr. David Alan Gilbert ret = read(mis->userfault_fd, &msg, sizeof(msg)); 579c4faeed2SDr. David Alan Gilbert if (ret != sizeof(msg)) { 580c4faeed2SDr. David Alan Gilbert if (errno == EAGAIN) { 581c4faeed2SDr. David Alan Gilbert /* 582c4faeed2SDr. David Alan Gilbert * if a wake up happens on the other thread just after 583c4faeed2SDr. David Alan Gilbert * the poll, there is nothing to read. 584c4faeed2SDr. David Alan Gilbert */ 585c4faeed2SDr. David Alan Gilbert continue; 586c4faeed2SDr. David Alan Gilbert } 587c4faeed2SDr. David Alan Gilbert if (ret < 0) { 588c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to read full userfault message: %s", 589c4faeed2SDr. David Alan Gilbert __func__, strerror(errno)); 590c4faeed2SDr. David Alan Gilbert break; 591c4faeed2SDr. David Alan Gilbert } else { 592c4faeed2SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd expected %zd", 593c4faeed2SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 594c4faeed2SDr. David Alan Gilbert break; /* Lost alignment, don't know what we'd read next */ 595c4faeed2SDr. David Alan Gilbert } 596c4faeed2SDr. David Alan Gilbert } 597c4faeed2SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 598c4faeed2SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud from userfaultfd", 599c4faeed2SDr. David Alan Gilbert __func__, msg.event); 600c4faeed2SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 601c4faeed2SDr. David Alan Gilbert } 602c4faeed2SDr. David Alan Gilbert 603c4faeed2SDr. David Alan Gilbert rb = qemu_ram_block_from_host( 604c4faeed2SDr. David Alan Gilbert (void *)(uintptr_t)msg.arg.pagefault.address, 605f615f396SPaolo Bonzini true, &rb_offset); 606c4faeed2SDr. David Alan Gilbert if (!rb) { 607c4faeed2SDr. David Alan Gilbert error_report("postcopy_ram_fault_thread: Fault outside guest: %" 608c4faeed2SDr. David Alan Gilbert PRIx64, (uint64_t)msg.arg.pagefault.address); 609c4faeed2SDr. David Alan Gilbert break; 610c4faeed2SDr. David Alan Gilbert } 611c4faeed2SDr. David Alan Gilbert 612332847f0SDr. David Alan Gilbert rb_offset &= ~(qemu_ram_pagesize(rb) - 1); 613c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 614c4faeed2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), 615ee86981bSPeter Maydell rb_offset); 616c4faeed2SDr. David Alan Gilbert 617c4faeed2SDr. David Alan Gilbert /* 618c4faeed2SDr. David Alan Gilbert * Send the request to the source - we want to request one 619c4faeed2SDr. David Alan Gilbert * of our host page sizes (which is >= TPS) 620c4faeed2SDr. David Alan Gilbert */ 621c4faeed2SDr. David Alan Gilbert if (rb != last_rb) { 622c4faeed2SDr. David Alan Gilbert last_rb = rb; 623c4faeed2SDr. David Alan Gilbert migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), 624332847f0SDr. David Alan Gilbert rb_offset, qemu_ram_pagesize(rb)); 625c4faeed2SDr. David Alan Gilbert } else { 626c4faeed2SDr. David Alan Gilbert /* Save some space */ 627c4faeed2SDr. David Alan Gilbert migrate_send_rp_req_pages(mis, NULL, 628332847f0SDr. David Alan Gilbert rb_offset, qemu_ram_pagesize(rb)); 629c4faeed2SDr. David Alan Gilbert } 630c4faeed2SDr. David Alan Gilbert } 631c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_exit(); 632f0a227adSDr. David Alan Gilbert return NULL; 633f0a227adSDr. David Alan Gilbert } 634f0a227adSDr. David Alan Gilbert 635f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis) 636f0a227adSDr. David Alan Gilbert { 637c4faeed2SDr. David Alan Gilbert /* Open the fd for the kernel to give us userfaults */ 638c4faeed2SDr. David Alan Gilbert mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 639c4faeed2SDr. David Alan Gilbert if (mis->userfault_fd == -1) { 640c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to open userfault fd: %s", __func__, 641c4faeed2SDr. David Alan Gilbert strerror(errno)); 642c4faeed2SDr. David Alan Gilbert return -1; 643c4faeed2SDr. David Alan Gilbert } 644c4faeed2SDr. David Alan Gilbert 645c4faeed2SDr. David Alan Gilbert /* 646c4faeed2SDr. David Alan Gilbert * Although the host check already tested the API, we need to 647c4faeed2SDr. David Alan Gilbert * do the check again as an ABI handshake on the new fd. 648c4faeed2SDr. David Alan Gilbert */ 64954ae0886SAlexey Perevalov if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 650c4faeed2SDr. David Alan Gilbert return -1; 651c4faeed2SDr. David Alan Gilbert } 652c4faeed2SDr. David Alan Gilbert 653c4faeed2SDr. David Alan Gilbert /* Now an eventfd we use to tell the fault-thread to quit */ 65464f615feSPeter Xu mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 65564f615feSPeter Xu if (mis->userfault_event_fd == -1) { 65664f615feSPeter Xu error_report("%s: Opening userfault_event_fd: %s", __func__, 657c4faeed2SDr. David Alan Gilbert strerror(errno)); 658c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 659c4faeed2SDr. David Alan Gilbert return -1; 660c4faeed2SDr. David Alan Gilbert } 661c4faeed2SDr. David Alan Gilbert 662f0a227adSDr. David Alan Gilbert qemu_sem_init(&mis->fault_thread_sem, 0); 663f0a227adSDr. David Alan Gilbert qemu_thread_create(&mis->fault_thread, "postcopy/fault", 664f0a227adSDr. David Alan Gilbert postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 665f0a227adSDr. David Alan Gilbert qemu_sem_wait(&mis->fault_thread_sem); 666f0a227adSDr. David Alan Gilbert qemu_sem_destroy(&mis->fault_thread_sem); 667c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = true; 668f0a227adSDr. David Alan Gilbert 669f0a227adSDr. David Alan Gilbert /* Mark so that we get notified of accesses to unwritten areas */ 670f0a227adSDr. David Alan Gilbert if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { 671f0a227adSDr. David Alan Gilbert return -1; 672f0a227adSDr. David Alan Gilbert } 673f0a227adSDr. David Alan Gilbert 674371ff5a3SDr. David Alan Gilbert /* 675371ff5a3SDr. David Alan Gilbert * Ballooning can mark pages as absent while we're postcopying 676371ff5a3SDr. David Alan Gilbert * that would cause false userfaults. 677371ff5a3SDr. David Alan Gilbert */ 678371ff5a3SDr. David Alan Gilbert qemu_balloon_inhibit(true); 679371ff5a3SDr. David Alan Gilbert 680c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_enable_notify(); 681c4faeed2SDr. David Alan Gilbert 682f0a227adSDr. David Alan Gilbert return 0; 683f0a227adSDr. David Alan Gilbert } 684f0a227adSDr. David Alan Gilbert 685727b9d7eSAlexey Perevalov static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr, 686f9494614SAlexey Perevalov void *from_addr, uint64_t pagesize, RAMBlock *rb) 687727b9d7eSAlexey Perevalov { 688f9494614SAlexey Perevalov int ret; 689727b9d7eSAlexey Perevalov if (from_addr) { 690727b9d7eSAlexey Perevalov struct uffdio_copy copy_struct; 691727b9d7eSAlexey Perevalov copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 692727b9d7eSAlexey Perevalov copy_struct.src = (uint64_t)(uintptr_t)from_addr; 693727b9d7eSAlexey Perevalov copy_struct.len = pagesize; 694727b9d7eSAlexey Perevalov copy_struct.mode = 0; 695f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 696727b9d7eSAlexey Perevalov } else { 697727b9d7eSAlexey Perevalov struct uffdio_zeropage zero_struct; 698727b9d7eSAlexey Perevalov zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 699727b9d7eSAlexey Perevalov zero_struct.range.len = pagesize; 700727b9d7eSAlexey Perevalov zero_struct.mode = 0; 701f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 702727b9d7eSAlexey Perevalov } 703f9494614SAlexey Perevalov if (!ret) { 704f9494614SAlexey Perevalov ramblock_recv_bitmap_set_range(rb, host_addr, 705f9494614SAlexey Perevalov pagesize / qemu_target_page_size()); 706f9494614SAlexey Perevalov } 707f9494614SAlexey Perevalov return ret; 708727b9d7eSAlexey Perevalov } 709727b9d7eSAlexey Perevalov 710696ed9a9SDr. David Alan Gilbert /* 711696ed9a9SDr. David Alan Gilbert * Place a host page (from) at (host) atomically 712696ed9a9SDr. David Alan Gilbert * returns 0 on success 713696ed9a9SDr. David Alan Gilbert */ 714df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 7158be4620bSAlexey Perevalov RAMBlock *rb) 716696ed9a9SDr. David Alan Gilbert { 7178be4620bSAlexey Perevalov size_t pagesize = qemu_ram_pagesize(rb); 718696ed9a9SDr. David Alan Gilbert 719696ed9a9SDr. David Alan Gilbert /* copy also acks to the kernel waking the stalled thread up 720696ed9a9SDr. David Alan Gilbert * TODO: We can inhibit that ack and only do it if it was requested 721696ed9a9SDr. David Alan Gilbert * which would be slightly cheaper, but we'd have to be careful 722696ed9a9SDr. David Alan Gilbert * of the order of updating our page state. 723696ed9a9SDr. David Alan Gilbert */ 724f9494614SAlexey Perevalov if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) { 725696ed9a9SDr. David Alan Gilbert int e = errno; 726df9ff5e1SDr. David Alan Gilbert error_report("%s: %s copy host: %p from: %p (size: %zd)", 727df9ff5e1SDr. David Alan Gilbert __func__, strerror(e), host, from, pagesize); 728696ed9a9SDr. David Alan Gilbert 729696ed9a9SDr. David Alan Gilbert return -e; 730696ed9a9SDr. David Alan Gilbert } 731696ed9a9SDr. David Alan Gilbert 732696ed9a9SDr. David Alan Gilbert trace_postcopy_place_page(host); 733696ed9a9SDr. David Alan Gilbert return 0; 734696ed9a9SDr. David Alan Gilbert } 735696ed9a9SDr. David Alan Gilbert 736696ed9a9SDr. David Alan Gilbert /* 737696ed9a9SDr. David Alan Gilbert * Place a zero page at (host) atomically 738696ed9a9SDr. David Alan Gilbert * returns 0 on success 739696ed9a9SDr. David Alan Gilbert */ 740df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 7418be4620bSAlexey Perevalov RAMBlock *rb) 742696ed9a9SDr. David Alan Gilbert { 7432ce16640SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 744df9ff5e1SDr. David Alan Gilbert trace_postcopy_place_page_zero(host); 745696ed9a9SDr. David Alan Gilbert 7462ce16640SDr. David Alan Gilbert /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 7472ce16640SDr. David Alan Gilbert * but it's not available for everything (e.g. hugetlbpages) 7482ce16640SDr. David Alan Gilbert */ 7492ce16640SDr. David Alan Gilbert if (qemu_ram_is_uf_zeroable(rb)) { 7502ce16640SDr. David Alan Gilbert if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) { 751696ed9a9SDr. David Alan Gilbert int e = errno; 752696ed9a9SDr. David Alan Gilbert error_report("%s: %s zero host: %p", 753696ed9a9SDr. David Alan Gilbert __func__, strerror(e), host); 754696ed9a9SDr. David Alan Gilbert 755696ed9a9SDr. David Alan Gilbert return -e; 756696ed9a9SDr. David Alan Gilbert } 757df9ff5e1SDr. David Alan Gilbert } else { 75841d84210SDr. David Alan Gilbert /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ 75941d84210SDr. David Alan Gilbert if (!mis->postcopy_tmp_zero_page) { 76041d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 76141d84210SDr. David Alan Gilbert PROT_READ | PROT_WRITE, 76241d84210SDr. David Alan Gilbert MAP_PRIVATE | MAP_ANONYMOUS, 76341d84210SDr. David Alan Gilbert -1, 0); 76441d84210SDr. David Alan Gilbert if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 76541d84210SDr. David Alan Gilbert int e = errno; 76641d84210SDr. David Alan Gilbert mis->postcopy_tmp_zero_page = NULL; 76741d84210SDr. David Alan Gilbert error_report("%s: %s mapping large zero page", 76841d84210SDr. David Alan Gilbert __func__, strerror(e)); 76941d84210SDr. David Alan Gilbert return -e; 77041d84210SDr. David Alan Gilbert } 77141d84210SDr. David Alan Gilbert memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 77241d84210SDr. David Alan Gilbert } 77341d84210SDr. David Alan Gilbert return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, 7748be4620bSAlexey Perevalov rb); 775df9ff5e1SDr. David Alan Gilbert } 776696ed9a9SDr. David Alan Gilbert 777696ed9a9SDr. David Alan Gilbert return 0; 778696ed9a9SDr. David Alan Gilbert } 779696ed9a9SDr. David Alan Gilbert 780696ed9a9SDr. David Alan Gilbert /* 781696ed9a9SDr. David Alan Gilbert * Returns a target page of memory that can be mapped at a later point in time 782696ed9a9SDr. David Alan Gilbert * using postcopy_place_page 783696ed9a9SDr. David Alan Gilbert * The same address is used repeatedly, postcopy_place_page just takes the 784696ed9a9SDr. David Alan Gilbert * backing page away. 785696ed9a9SDr. David Alan Gilbert * Returns: Pointer to allocated page 786696ed9a9SDr. David Alan Gilbert * 787696ed9a9SDr. David Alan Gilbert */ 788696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis) 789696ed9a9SDr. David Alan Gilbert { 790696ed9a9SDr. David Alan Gilbert if (!mis->postcopy_tmp_page) { 791df9ff5e1SDr. David Alan Gilbert mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, 792696ed9a9SDr. David Alan Gilbert PROT_READ | PROT_WRITE, MAP_PRIVATE | 793696ed9a9SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 7940e8b3cdfSEvgeny Yakovlev if (mis->postcopy_tmp_page == MAP_FAILED) { 7950e8b3cdfSEvgeny Yakovlev mis->postcopy_tmp_page = NULL; 796696ed9a9SDr. David Alan Gilbert error_report("%s: %s", __func__, strerror(errno)); 797696ed9a9SDr. David Alan Gilbert return NULL; 798696ed9a9SDr. David Alan Gilbert } 799696ed9a9SDr. David Alan Gilbert } 800696ed9a9SDr. David Alan Gilbert 801696ed9a9SDr. David Alan Gilbert return mis->postcopy_tmp_page; 802696ed9a9SDr. David Alan Gilbert } 803696ed9a9SDr. David Alan Gilbert 804eb59db53SDr. David Alan Gilbert #else 805eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */ 806d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 807eb59db53SDr. David Alan Gilbert { 808eb59db53SDr. David Alan Gilbert error_report("%s: No OS support", __func__); 809eb59db53SDr. David Alan Gilbert return false; 810eb59db53SDr. David Alan Gilbert } 811eb59db53SDr. David Alan Gilbert 8121caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) 8131caddf8aSDr. David Alan Gilbert { 8141caddf8aSDr. David Alan Gilbert error_report("postcopy_ram_incoming_init: No OS support"); 8151caddf8aSDr. David Alan Gilbert return -1; 8161caddf8aSDr. David Alan Gilbert } 8171caddf8aSDr. David Alan Gilbert 8181caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 8191caddf8aSDr. David Alan Gilbert { 8201caddf8aSDr. David Alan Gilbert assert(0); 8211caddf8aSDr. David Alan Gilbert return -1; 8221caddf8aSDr. David Alan Gilbert } 8231caddf8aSDr. David Alan Gilbert 824f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 825f9527107SDr. David Alan Gilbert { 826f9527107SDr. David Alan Gilbert assert(0); 827f9527107SDr. David Alan Gilbert return -1; 828f9527107SDr. David Alan Gilbert } 829f9527107SDr. David Alan Gilbert 830f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis) 831f0a227adSDr. David Alan Gilbert { 832f0a227adSDr. David Alan Gilbert assert(0); 833f0a227adSDr. David Alan Gilbert return -1; 834f0a227adSDr. David Alan Gilbert } 835696ed9a9SDr. David Alan Gilbert 836df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 8378be4620bSAlexey Perevalov RAMBlock *rb) 838696ed9a9SDr. David Alan Gilbert { 839696ed9a9SDr. David Alan Gilbert assert(0); 840696ed9a9SDr. David Alan Gilbert return -1; 841696ed9a9SDr. David Alan Gilbert } 842696ed9a9SDr. David Alan Gilbert 843df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 8448be4620bSAlexey Perevalov RAMBlock *rb) 845696ed9a9SDr. David Alan Gilbert { 846696ed9a9SDr. David Alan Gilbert assert(0); 847696ed9a9SDr. David Alan Gilbert return -1; 848696ed9a9SDr. David Alan Gilbert } 849696ed9a9SDr. David Alan Gilbert 850696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis) 851696ed9a9SDr. David Alan Gilbert { 852696ed9a9SDr. David Alan Gilbert assert(0); 853696ed9a9SDr. David Alan Gilbert return NULL; 854696ed9a9SDr. David Alan Gilbert } 855696ed9a9SDr. David Alan Gilbert 856eb59db53SDr. David Alan Gilbert #endif 857eb59db53SDr. David Alan Gilbert 858e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */ 859e0b266f0SDr. David Alan Gilbert 8609ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis) 8619ab7ef9bSPeter Xu { 8629ab7ef9bSPeter Xu uint64_t tmp64 = 1; 8639ab7ef9bSPeter Xu 8649ab7ef9bSPeter Xu /* 8659ab7ef9bSPeter Xu * Wakeup the fault_thread. It's an eventfd that should currently 8669ab7ef9bSPeter Xu * be at 0, we're going to increment it to 1 8679ab7ef9bSPeter Xu */ 8689ab7ef9bSPeter Xu if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 8699ab7ef9bSPeter Xu /* Not much we can do here, but may as well report it */ 8709ab7ef9bSPeter Xu error_report("%s: incrementing failed: %s", __func__, 8719ab7ef9bSPeter Xu strerror(errno)); 8729ab7ef9bSPeter Xu } 8739ab7ef9bSPeter Xu } 8749ab7ef9bSPeter Xu 875e0b266f0SDr. David Alan Gilbert /** 876e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_init: Called at the start of each RAMBlock before 877e0b266f0SDr. David Alan Gilbert * asking to discard individual ranges. 878e0b266f0SDr. David Alan Gilbert * 879e0b266f0SDr. David Alan Gilbert * @ms: The current migration state. 880e0b266f0SDr. David Alan Gilbert * @offset: the bitmap offset of the named RAMBlock in the migration 881e0b266f0SDr. David Alan Gilbert * bitmap. 882e0b266f0SDr. David Alan Gilbert * @name: RAMBlock that discards will operate on. 883e0b266f0SDr. David Alan Gilbert * 884e0b266f0SDr. David Alan Gilbert * returns: a new PDS. 885e0b266f0SDr. David Alan Gilbert */ 886e0b266f0SDr. David Alan Gilbert PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, 887e0b266f0SDr. David Alan Gilbert const char *name) 888e0b266f0SDr. David Alan Gilbert { 889e0b266f0SDr. David Alan Gilbert PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); 890e0b266f0SDr. David Alan Gilbert 891e0b266f0SDr. David Alan Gilbert if (res) { 892e0b266f0SDr. David Alan Gilbert res->ramblock_name = name; 893e0b266f0SDr. David Alan Gilbert } 894e0b266f0SDr. David Alan Gilbert 895e0b266f0SDr. David Alan Gilbert return res; 896e0b266f0SDr. David Alan Gilbert } 897e0b266f0SDr. David Alan Gilbert 898e0b266f0SDr. David Alan Gilbert /** 899e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_range: Called by the bitmap code for each chunk to 900e0b266f0SDr. David Alan Gilbert * discard. May send a discard message, may just leave it queued to 901e0b266f0SDr. David Alan Gilbert * be sent later. 902e0b266f0SDr. David Alan Gilbert * 903e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 904e0b266f0SDr. David Alan Gilbert * @pds: Structure initialised by postcopy_discard_send_init(). 905e0b266f0SDr. David Alan Gilbert * @start,@length: a range of pages in the migration bitmap in the 906e0b266f0SDr. David Alan Gilbert * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 907e0b266f0SDr. David Alan Gilbert */ 908e0b266f0SDr. David Alan Gilbert void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, 909e0b266f0SDr. David Alan Gilbert unsigned long start, unsigned long length) 910e0b266f0SDr. David Alan Gilbert { 91120afaed9SJuan Quintela size_t tp_size = qemu_target_page_size(); 912e0b266f0SDr. David Alan Gilbert /* Convert to byte offsets within the RAM block */ 9136b6712efSJuan Quintela pds->start_list[pds->cur_entry] = start * tp_size; 91420afaed9SJuan Quintela pds->length_list[pds->cur_entry] = length * tp_size; 915e0b266f0SDr. David Alan Gilbert trace_postcopy_discard_send_range(pds->ramblock_name, start, length); 916e0b266f0SDr. David Alan Gilbert pds->cur_entry++; 917e0b266f0SDr. David Alan Gilbert pds->nsentwords++; 918e0b266f0SDr. David Alan Gilbert 919e0b266f0SDr. David Alan Gilbert if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { 920e0b266f0SDr. David Alan Gilbert /* Full set, ship it! */ 92189a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 92289a02a9fSzhanghailiang pds->ramblock_name, 923e0b266f0SDr. David Alan Gilbert pds->cur_entry, 924e0b266f0SDr. David Alan Gilbert pds->start_list, 925e0b266f0SDr. David Alan Gilbert pds->length_list); 926e0b266f0SDr. David Alan Gilbert pds->nsentcmds++; 927e0b266f0SDr. David Alan Gilbert pds->cur_entry = 0; 928e0b266f0SDr. David Alan Gilbert } 929e0b266f0SDr. David Alan Gilbert } 930e0b266f0SDr. David Alan Gilbert 931e0b266f0SDr. David Alan Gilbert /** 932e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 933e0b266f0SDr. David Alan Gilbert * bitmap code. Sends any outstanding discard messages, frees the PDS 934e0b266f0SDr. David Alan Gilbert * 935e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 936e0b266f0SDr. David Alan Gilbert * @pds: Structure initialised by postcopy_discard_send_init(). 937e0b266f0SDr. David Alan Gilbert */ 938e0b266f0SDr. David Alan Gilbert void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) 939e0b266f0SDr. David Alan Gilbert { 940e0b266f0SDr. David Alan Gilbert /* Anything unsent? */ 941e0b266f0SDr. David Alan Gilbert if (pds->cur_entry) { 94289a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 94389a02a9fSzhanghailiang pds->ramblock_name, 944e0b266f0SDr. David Alan Gilbert pds->cur_entry, 945e0b266f0SDr. David Alan Gilbert pds->start_list, 946e0b266f0SDr. David Alan Gilbert pds->length_list); 947e0b266f0SDr. David Alan Gilbert pds->nsentcmds++; 948e0b266f0SDr. David Alan Gilbert } 949e0b266f0SDr. David Alan Gilbert 950e0b266f0SDr. David Alan Gilbert trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, 951e0b266f0SDr. David Alan Gilbert pds->nsentcmds); 952e0b266f0SDr. David Alan Gilbert 953e0b266f0SDr. David Alan Gilbert g_free(pds); 954e0b266f0SDr. David Alan Gilbert } 955bac3b212SJuan Quintela 956bac3b212SJuan Quintela /* 957bac3b212SJuan Quintela * Current state of incoming postcopy; note this is not part of 958bac3b212SJuan Quintela * MigrationIncomingState since it's state is used during cleanup 959bac3b212SJuan Quintela * at the end as MIS is being freed. 960bac3b212SJuan Quintela */ 961bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state; 962bac3b212SJuan Quintela 963bac3b212SJuan Quintela PostcopyState postcopy_state_get(void) 964bac3b212SJuan Quintela { 965bac3b212SJuan Quintela return atomic_mb_read(&incoming_postcopy_state); 966bac3b212SJuan Quintela } 967bac3b212SJuan Quintela 968bac3b212SJuan Quintela /* Set the state and return the old state */ 969bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state) 970bac3b212SJuan Quintela { 971bac3b212SJuan Quintela return atomic_xchg(&incoming_postcopy_state, new_state); 972bac3b212SJuan Quintela } 973