1eb59db53SDr. David Alan Gilbert /* 2eb59db53SDr. David Alan Gilbert * Postcopy migration for RAM 3eb59db53SDr. David Alan Gilbert * 4eb59db53SDr. David Alan Gilbert * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5eb59db53SDr. David Alan Gilbert * 6eb59db53SDr. David Alan Gilbert * Authors: 7eb59db53SDr. David Alan Gilbert * Dave Gilbert <dgilbert@redhat.com> 8eb59db53SDr. David Alan Gilbert * 9eb59db53SDr. David Alan Gilbert * This work is licensed under the terms of the GNU GPL, version 2 or later. 10eb59db53SDr. David Alan Gilbert * See the COPYING file in the top-level directory. 11eb59db53SDr. David Alan Gilbert * 12eb59db53SDr. David Alan Gilbert */ 13eb59db53SDr. David Alan Gilbert 14eb59db53SDr. David Alan Gilbert /* 15eb59db53SDr. David Alan Gilbert * Postcopy is a migration technique where the execution flips from the 16eb59db53SDr. David Alan Gilbert * source to the destination before all the data has been copied. 17eb59db53SDr. David Alan Gilbert */ 18eb59db53SDr. David Alan Gilbert 191393a485SPeter Maydell #include "qemu/osdep.h" 20b85ea5faSPeter Maydell #include "qemu/madvise.h" 2151180423SJuan Quintela #include "exec/target_page.h" 226666c96aSJuan Quintela #include "migration.h" 2308a0aee1SJuan Quintela #include "qemu-file.h" 2420a519a0SJuan Quintela #include "savevm.h" 25be07b0acSJuan Quintela #include "postcopy-ram.h" 267b1e1a22SJuan Quintela #include "ram.h" 271693c64cSDr. David Alan Gilbert #include "qapi/error.h" 281693c64cSDr. David Alan Gilbert #include "qemu/notify.h" 29d4842052SMarkus Armbruster #include "qemu/rcu.h" 30eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h" 31eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h" 32eb59db53SDr. David Alan Gilbert #include "trace.h" 335cc8767dSLike Xu #include "hw/boards.h" 34898ba906SDavid Hildenbrand #include "exec/ramblock.h" 3536f62f11SPeter Xu #include "socket.h" 3636f62f11SPeter Xu #include "yank_functions.h" 37f0afaf6cSPeter Xu #include "tls.h" 38d5890ea0SPeter Xu #include "qemu/userfaultfd.h" 39*ae30b9b2SPeter Xu #include "qemu/mmap-alloc.h" 40eb59db53SDr. David Alan Gilbert 41e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command, 42e0b266f0SDr. David Alan Gilbert * keeps them around ~200 bytes 43e0b266f0SDr. David Alan Gilbert */ 44e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12 45e0b266f0SDr. David Alan Gilbert 46e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState { 47e0b266f0SDr. David Alan Gilbert const char *ramblock_name; 48e0b266f0SDr. David Alan Gilbert uint16_t cur_entry; 49e0b266f0SDr. David Alan Gilbert /* 50e0b266f0SDr. David Alan Gilbert * Start and length of a discard range (bytes) 51e0b266f0SDr. David Alan Gilbert */ 52e0b266f0SDr. David Alan Gilbert uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 53e0b266f0SDr. David Alan Gilbert uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 54e0b266f0SDr. David Alan Gilbert unsigned int nsentwords; 55e0b266f0SDr. David Alan Gilbert unsigned int nsentcmds; 56e0b266f0SDr. David Alan Gilbert }; 57e0b266f0SDr. David Alan Gilbert 581693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list; 591693c64cSDr. David Alan Gilbert 601693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void) 611693c64cSDr. David Alan Gilbert { 621693c64cSDr. David Alan Gilbert notifier_with_return_list_init(&postcopy_notifier_list); 631693c64cSDr. David Alan Gilbert } 641693c64cSDr. David Alan Gilbert 651693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn) 661693c64cSDr. David Alan Gilbert { 671693c64cSDr. David Alan Gilbert notifier_with_return_list_add(&postcopy_notifier_list, nn); 681693c64cSDr. David Alan Gilbert } 691693c64cSDr. David Alan Gilbert 701693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n) 711693c64cSDr. David Alan Gilbert { 721693c64cSDr. David Alan Gilbert notifier_with_return_remove(n); 731693c64cSDr. David Alan Gilbert } 741693c64cSDr. David Alan Gilbert 751693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 761693c64cSDr. David Alan Gilbert { 771693c64cSDr. David Alan Gilbert struct PostcopyNotifyData pnd; 781693c64cSDr. David Alan Gilbert pnd.reason = reason; 791693c64cSDr. David Alan Gilbert pnd.errp = errp; 801693c64cSDr. David Alan Gilbert 811693c64cSDr. David Alan Gilbert return notifier_with_return_list_notify(&postcopy_notifier_list, 821693c64cSDr. David Alan Gilbert &pnd); 831693c64cSDr. David Alan Gilbert } 841693c64cSDr. David Alan Gilbert 85095c12a4SPeter Xu /* 86095c12a4SPeter Xu * NOTE: this routine is not thread safe, we can't call it concurrently. But it 87095c12a4SPeter Xu * should be good enough for migration's purposes. 88095c12a4SPeter Xu */ 89095c12a4SPeter Xu void postcopy_thread_create(MigrationIncomingState *mis, 90095c12a4SPeter Xu QemuThread *thread, const char *name, 91095c12a4SPeter Xu void *(*fn)(void *), int joinable) 92095c12a4SPeter Xu { 93095c12a4SPeter Xu qemu_sem_init(&mis->thread_sync_sem, 0); 94095c12a4SPeter Xu qemu_thread_create(thread, name, fn, mis, joinable); 95095c12a4SPeter Xu qemu_sem_wait(&mis->thread_sync_sem); 96095c12a4SPeter Xu qemu_sem_destroy(&mis->thread_sync_sem); 97095c12a4SPeter Xu } 98095c12a4SPeter Xu 99eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied 100eb59db53SDr. David Alan Gilbert * across, and efficiently map new pages in, the techniques for doing this 101eb59db53SDr. David Alan Gilbert * are target OS specific. 102eb59db53SDr. David Alan Gilbert */ 103eb59db53SDr. David Alan Gilbert #if defined(__linux__) 104eb59db53SDr. David Alan Gilbert 105c4faeed2SDr. David Alan Gilbert #include <poll.h> 106eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h> 107eb59db53SDr. David Alan Gilbert #include <sys/syscall.h> 108eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */ 109eb59db53SDr. David Alan Gilbert #endif 110eb59db53SDr. David Alan Gilbert 111d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 112d8b9d771SMatthew Fortune #include <sys/eventfd.h> 113eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h> 114eb59db53SDr. David Alan Gilbert 1152a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext { 1162a4c42f1SAlexey Perevalov /* time when page fault initiated per vCPU */ 1172a4c42f1SAlexey Perevalov uint32_t *page_fault_vcpu_time; 1182a4c42f1SAlexey Perevalov /* page address per vCPU */ 1192a4c42f1SAlexey Perevalov uintptr_t *vcpu_addr; 1202a4c42f1SAlexey Perevalov uint32_t total_blocktime; 1212a4c42f1SAlexey Perevalov /* blocktime per vCPU */ 1222a4c42f1SAlexey Perevalov uint32_t *vcpu_blocktime; 1232a4c42f1SAlexey Perevalov /* point in time when last page fault was initiated */ 1242a4c42f1SAlexey Perevalov uint32_t last_begin; 1252a4c42f1SAlexey Perevalov /* number of vCPU are suspended */ 1262a4c42f1SAlexey Perevalov int smp_cpus_down; 1272a4c42f1SAlexey Perevalov uint64_t start_time; 1282a4c42f1SAlexey Perevalov 1292a4c42f1SAlexey Perevalov /* 1302a4c42f1SAlexey Perevalov * Handler for exit event, necessary for 1312a4c42f1SAlexey Perevalov * releasing whole blocktime_ctx 1322a4c42f1SAlexey Perevalov */ 1332a4c42f1SAlexey Perevalov Notifier exit_notifier; 1342a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext; 1352a4c42f1SAlexey Perevalov 1362a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 1372a4c42f1SAlexey Perevalov { 1382a4c42f1SAlexey Perevalov g_free(ctx->page_fault_vcpu_time); 1392a4c42f1SAlexey Perevalov g_free(ctx->vcpu_addr); 1402a4c42f1SAlexey Perevalov g_free(ctx->vcpu_blocktime); 1412a4c42f1SAlexey Perevalov g_free(ctx); 1422a4c42f1SAlexey Perevalov } 1432a4c42f1SAlexey Perevalov 1442a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data) 1452a4c42f1SAlexey Perevalov { 1462a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 1472a4c42f1SAlexey Perevalov exit_notifier); 1482a4c42f1SAlexey Perevalov destroy_blocktime_context(ctx); 1492a4c42f1SAlexey Perevalov } 1502a4c42f1SAlexey Perevalov 1512a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void) 1522a4c42f1SAlexey Perevalov { 1535cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 1545cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 1552a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 1562a4c42f1SAlexey Perevalov ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 1572a4c42f1SAlexey Perevalov ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 1582a4c42f1SAlexey Perevalov ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 1592a4c42f1SAlexey Perevalov 1602a4c42f1SAlexey Perevalov ctx->exit_notifier.notify = migration_exit_cb; 1612a4c42f1SAlexey Perevalov ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1622a4c42f1SAlexey Perevalov qemu_add_exit_notifier(&ctx->exit_notifier); 1632a4c42f1SAlexey Perevalov return ctx; 1642a4c42f1SAlexey Perevalov } 165ca6011c2SAlexey Perevalov 16665ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 16765ace060SAlexey Perevalov { 1685cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 16954aa3de7SEric Blake uint32List *list = NULL; 17065ace060SAlexey Perevalov int i; 17165ace060SAlexey Perevalov 1725cc8767dSLike Xu for (i = ms->smp.cpus - 1; i >= 0; i--) { 17354aa3de7SEric Blake QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); 17465ace060SAlexey Perevalov } 17565ace060SAlexey Perevalov 17665ace060SAlexey Perevalov return list; 17765ace060SAlexey Perevalov } 17865ace060SAlexey Perevalov 17965ace060SAlexey Perevalov /* 18065ace060SAlexey Perevalov * This function just populates MigrationInfo from postcopy's 18165ace060SAlexey Perevalov * blocktime context. It will not populate MigrationInfo, 18265ace060SAlexey Perevalov * unless postcopy-blocktime capability was set. 18365ace060SAlexey Perevalov * 18465ace060SAlexey Perevalov * @info: pointer to MigrationInfo to populate 18565ace060SAlexey Perevalov */ 18665ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 18765ace060SAlexey Perevalov { 18865ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 18965ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 19065ace060SAlexey Perevalov 19165ace060SAlexey Perevalov if (!bc) { 19265ace060SAlexey Perevalov return; 19365ace060SAlexey Perevalov } 19465ace060SAlexey Perevalov 19565ace060SAlexey Perevalov info->has_postcopy_blocktime = true; 19665ace060SAlexey Perevalov info->postcopy_blocktime = bc->total_blocktime; 19765ace060SAlexey Perevalov info->has_postcopy_vcpu_blocktime = true; 19865ace060SAlexey Perevalov info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 19965ace060SAlexey Perevalov } 20065ace060SAlexey Perevalov 20165ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void) 20265ace060SAlexey Perevalov { 20365ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 20465ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 20565ace060SAlexey Perevalov 20665ace060SAlexey Perevalov if (!bc) { 20765ace060SAlexey Perevalov return 0; 20865ace060SAlexey Perevalov } 20965ace060SAlexey Perevalov 21065ace060SAlexey Perevalov return bc->total_blocktime; 21165ace060SAlexey Perevalov } 21265ace060SAlexey Perevalov 21354ae0886SAlexey Perevalov /** 21454ae0886SAlexey Perevalov * receive_ufd_features: check userfault fd features, to request only supported 21554ae0886SAlexey Perevalov * features in the future. 21654ae0886SAlexey Perevalov * 21754ae0886SAlexey Perevalov * Returns: true on success 21854ae0886SAlexey Perevalov * 21954ae0886SAlexey Perevalov * __NR_userfaultfd - should be checked before 22054ae0886SAlexey Perevalov * @features: out parameter will contain uffdio_api.features provided by kernel 22154ae0886SAlexey Perevalov * in case of success 22254ae0886SAlexey Perevalov */ 22354ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features) 22454ae0886SAlexey Perevalov { 22554ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 22654ae0886SAlexey Perevalov int ufd; 22754ae0886SAlexey Perevalov bool ret = true; 22854ae0886SAlexey Perevalov 229d5890ea0SPeter Xu ufd = uffd_open(O_CLOEXEC); 23054ae0886SAlexey Perevalov if (ufd == -1) { 231d5890ea0SPeter Xu error_report("%s: uffd_open() failed: %s", __func__, strerror(errno)); 23254ae0886SAlexey Perevalov return false; 23354ae0886SAlexey Perevalov } 23454ae0886SAlexey Perevalov 23554ae0886SAlexey Perevalov /* ask features */ 236eb59db53SDr. David Alan Gilbert api_struct.api = UFFD_API; 237eb59db53SDr. David Alan Gilbert api_struct.features = 0; 238eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_API, &api_struct)) { 2395553499fSAlexey Perevalov error_report("%s: UFFDIO_API failed: %s", __func__, 240eb59db53SDr. David Alan Gilbert strerror(errno)); 24154ae0886SAlexey Perevalov ret = false; 24254ae0886SAlexey Perevalov goto release_ufd; 24354ae0886SAlexey Perevalov } 24454ae0886SAlexey Perevalov 24554ae0886SAlexey Perevalov *features = api_struct.features; 24654ae0886SAlexey Perevalov 24754ae0886SAlexey Perevalov release_ufd: 24854ae0886SAlexey Perevalov close(ufd); 24954ae0886SAlexey Perevalov return ret; 25054ae0886SAlexey Perevalov } 25154ae0886SAlexey Perevalov 25254ae0886SAlexey Perevalov /** 25354ae0886SAlexey Perevalov * request_ufd_features: this function should be called only once on a newly 25454ae0886SAlexey Perevalov * opened ufd, subsequent calls will lead to error. 25554ae0886SAlexey Perevalov * 2563a4452d8Szhaolichang * Returns: true on success 25754ae0886SAlexey Perevalov * 25854ae0886SAlexey Perevalov * @ufd: fd obtained from userfaultfd syscall 25954ae0886SAlexey Perevalov * @features: bit mask see UFFD_API_FEATURES 26054ae0886SAlexey Perevalov */ 26154ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features) 26254ae0886SAlexey Perevalov { 26354ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 26454ae0886SAlexey Perevalov uint64_t ioctl_mask; 26554ae0886SAlexey Perevalov 26654ae0886SAlexey Perevalov api_struct.api = UFFD_API; 26754ae0886SAlexey Perevalov api_struct.features = features; 26854ae0886SAlexey Perevalov if (ioctl(ufd, UFFDIO_API, &api_struct)) { 26954ae0886SAlexey Perevalov error_report("%s failed: UFFDIO_API failed: %s", __func__, 27054ae0886SAlexey Perevalov strerror(errno)); 271eb59db53SDr. David Alan Gilbert return false; 272eb59db53SDr. David Alan Gilbert } 273eb59db53SDr. David Alan Gilbert 274eb59db53SDr. David Alan Gilbert ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 275eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_UNREGISTER; 276eb59db53SDr. David Alan Gilbert if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 277eb59db53SDr. David Alan Gilbert error_report("Missing userfault features: %" PRIx64, 278eb59db53SDr. David Alan Gilbert (uint64_t)(~api_struct.ioctls & ioctl_mask)); 279eb59db53SDr. David Alan Gilbert return false; 280eb59db53SDr. David Alan Gilbert } 281eb59db53SDr. David Alan Gilbert 28254ae0886SAlexey Perevalov return true; 28354ae0886SAlexey Perevalov } 28454ae0886SAlexey Perevalov 28554ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 28654ae0886SAlexey Perevalov { 28754ae0886SAlexey Perevalov uint64_t asked_features = 0; 28854ae0886SAlexey Perevalov static uint64_t supported_features; 28954ae0886SAlexey Perevalov 29054ae0886SAlexey Perevalov /* 29154ae0886SAlexey Perevalov * it's not possible to 29254ae0886SAlexey Perevalov * request UFFD_API twice per one fd 29354ae0886SAlexey Perevalov * userfault fd features is persistent 29454ae0886SAlexey Perevalov */ 29554ae0886SAlexey Perevalov if (!supported_features) { 29654ae0886SAlexey Perevalov if (!receive_ufd_features(&supported_features)) { 29754ae0886SAlexey Perevalov error_report("%s failed", __func__); 29854ae0886SAlexey Perevalov return false; 29954ae0886SAlexey Perevalov } 30054ae0886SAlexey Perevalov } 30154ae0886SAlexey Perevalov 3022a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID 3032d1c37c6SPeter Xu if (UFFD_FEATURE_THREAD_ID & supported_features) { 3042d1c37c6SPeter Xu asked_features |= UFFD_FEATURE_THREAD_ID; 3052d1c37c6SPeter Xu if (migrate_postcopy_blocktime()) { 3062a4c42f1SAlexey Perevalov if (!mis->blocktime_ctx) { 3072a4c42f1SAlexey Perevalov mis->blocktime_ctx = blocktime_context_new(); 3082a4c42f1SAlexey Perevalov } 3092d1c37c6SPeter Xu } 3102a4c42f1SAlexey Perevalov } 3112a4c42f1SAlexey Perevalov #endif 3122a4c42f1SAlexey Perevalov 31354ae0886SAlexey Perevalov /* 31454ae0886SAlexey Perevalov * request features, even if asked_features is 0, due to 31554ae0886SAlexey Perevalov * kernel expects UFFD_API before UFFDIO_REGISTER, per 31654ae0886SAlexey Perevalov * userfault file descriptor 31754ae0886SAlexey Perevalov */ 31854ae0886SAlexey Perevalov if (!request_ufd_features(ufd, asked_features)) { 31954ae0886SAlexey Perevalov error_report("%s failed: features %" PRIu64, __func__, 32054ae0886SAlexey Perevalov asked_features); 32154ae0886SAlexey Perevalov return false; 32254ae0886SAlexey Perevalov } 32354ae0886SAlexey Perevalov 3248e3b0cbbSMarc-André Lureau if (qemu_real_host_page_size() != ram_pagesize_summary()) { 3257e8cafb7SDr. David Alan Gilbert bool have_hp = false; 3267e8cafb7SDr. David Alan Gilbert /* We've got a huge page */ 3277e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 32854ae0886SAlexey Perevalov have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 3297e8cafb7SDr. David Alan Gilbert #endif 3307e8cafb7SDr. David Alan Gilbert if (!have_hp) { 3317e8cafb7SDr. David Alan Gilbert error_report("Userfault on this host does not support huge pages"); 3327e8cafb7SDr. David Alan Gilbert return false; 3337e8cafb7SDr. David Alan Gilbert } 3347e8cafb7SDr. David Alan Gilbert } 335eb59db53SDr. David Alan Gilbert return true; 336eb59db53SDr. David Alan Gilbert } 337eb59db53SDr. David Alan Gilbert 3388679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator. 3398679638bSDr. David Alan Gilbert */ 340*ae30b9b2SPeter Xu static int test_ramblock_postcopiable(RAMBlock *rb) 3418679638bSDr. David Alan Gilbert { 342754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 343754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 3445d214a92SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 345*ae30b9b2SPeter Xu QemuFsType fs; 3465d214a92SDr. David Alan Gilbert 3475d214a92SDr. David Alan Gilbert if (length % pagesize) { 3485d214a92SDr. David Alan Gilbert error_report("Postcopy requires RAM blocks to be a page size multiple," 3495d214a92SDr. David Alan Gilbert " block %s is 0x" RAM_ADDR_FMT " bytes with a " 3505d214a92SDr. David Alan Gilbert "page size of 0x%zx", block_name, length, pagesize); 3515d214a92SDr. David Alan Gilbert return 1; 3525d214a92SDr. David Alan Gilbert } 353*ae30b9b2SPeter Xu 354*ae30b9b2SPeter Xu if (rb->fd >= 0) { 355*ae30b9b2SPeter Xu fs = qemu_fd_getfs(rb->fd); 356*ae30b9b2SPeter Xu if (fs != QEMU_FS_TYPE_TMPFS && fs != QEMU_FS_TYPE_HUGETLBFS) { 357*ae30b9b2SPeter Xu error_report("Host backend files need to be TMPFS or HUGETLBFS only"); 358*ae30b9b2SPeter Xu return 1; 359*ae30b9b2SPeter Xu } 360*ae30b9b2SPeter Xu } 361*ae30b9b2SPeter Xu 3628679638bSDr. David Alan Gilbert return 0; 3638679638bSDr. David Alan Gilbert } 3648679638bSDr. David Alan Gilbert 36558b7c17eSDr. David Alan Gilbert /* 36658b7c17eSDr. David Alan Gilbert * Note: This has the side effect of munlock'ing all of RAM, that's 36758b7c17eSDr. David Alan Gilbert * normally fine since if the postcopy succeeds it gets turned back on at the 36858b7c17eSDr. David Alan Gilbert * end. 36958b7c17eSDr. David Alan Gilbert */ 370d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 371eb59db53SDr. David Alan Gilbert { 3728e3b0cbbSMarc-André Lureau long pagesize = qemu_real_host_page_size(); 373eb59db53SDr. David Alan Gilbert int ufd = -1; 374eb59db53SDr. David Alan Gilbert bool ret = false; /* Error unless we change it */ 375eb59db53SDr. David Alan Gilbert void *testarea = NULL; 376eb59db53SDr. David Alan Gilbert struct uffdio_register reg_struct; 377eb59db53SDr. David Alan Gilbert struct uffdio_range range_struct; 378eb59db53SDr. David Alan Gilbert uint64_t feature_mask; 3791693c64cSDr. David Alan Gilbert Error *local_err = NULL; 380*ae30b9b2SPeter Xu RAMBlock *block; 381eb59db53SDr. David Alan Gilbert 38220afaed9SJuan Quintela if (qemu_target_page_size() > pagesize) { 383eb59db53SDr. David Alan Gilbert error_report("Target page size bigger than host page size"); 384eb59db53SDr. David Alan Gilbert goto out; 385eb59db53SDr. David Alan Gilbert } 386eb59db53SDr. David Alan Gilbert 387d5890ea0SPeter Xu ufd = uffd_open(O_CLOEXEC); 388eb59db53SDr. David Alan Gilbert if (ufd == -1) { 389eb59db53SDr. David Alan Gilbert error_report("%s: userfaultfd not available: %s", __func__, 390eb59db53SDr. David Alan Gilbert strerror(errno)); 391eb59db53SDr. David Alan Gilbert goto out; 392eb59db53SDr. David Alan Gilbert } 393eb59db53SDr. David Alan Gilbert 3941693c64cSDr. David Alan Gilbert /* Give devices a chance to object */ 3951693c64cSDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 3961693c64cSDr. David Alan Gilbert error_report_err(local_err); 3971693c64cSDr. David Alan Gilbert goto out; 3981693c64cSDr. David Alan Gilbert } 3991693c64cSDr. David Alan Gilbert 400eb59db53SDr. David Alan Gilbert /* Version and features check */ 40154ae0886SAlexey Perevalov if (!ufd_check_and_apply(ufd, mis)) { 402eb59db53SDr. David Alan Gilbert goto out; 403eb59db53SDr. David Alan Gilbert } 404eb59db53SDr. David Alan Gilbert 405*ae30b9b2SPeter Xu /* 406*ae30b9b2SPeter Xu * We don't support postcopy with some type of ramblocks. 407*ae30b9b2SPeter Xu * 408*ae30b9b2SPeter Xu * NOTE: we explicitly ignored ramblock_is_ignored() instead we checked 409*ae30b9b2SPeter Xu * all possible ramblocks. This is because this function can be called 410*ae30b9b2SPeter Xu * when creating the migration object, during the phase RAM_MIGRATABLE 411*ae30b9b2SPeter Xu * is not even properly set for all the ramblocks. 412*ae30b9b2SPeter Xu * 413*ae30b9b2SPeter Xu * A side effect of this is we'll also check against RAM_SHARED 414*ae30b9b2SPeter Xu * ramblocks even if migrate_ignore_shared() is set (in which case 415*ae30b9b2SPeter Xu * we'll never migrate RAM_SHARED at all), but normally this shouldn't 416*ae30b9b2SPeter Xu * affect in reality, or we can revisit. 417*ae30b9b2SPeter Xu */ 418*ae30b9b2SPeter Xu RAMBLOCK_FOREACH(block) { 419*ae30b9b2SPeter Xu if (test_ramblock_postcopiable(block)) { 4208679638bSDr. David Alan Gilbert goto out; 4218679638bSDr. David Alan Gilbert } 422*ae30b9b2SPeter Xu } 4238679638bSDr. David Alan Gilbert 424eb59db53SDr. David Alan Gilbert /* 42558b7c17eSDr. David Alan Gilbert * userfault and mlock don't go together; we'll put it back later if 42658b7c17eSDr. David Alan Gilbert * it was enabled. 42758b7c17eSDr. David Alan Gilbert */ 42858b7c17eSDr. David Alan Gilbert if (munlockall()) { 42958b7c17eSDr. David Alan Gilbert error_report("%s: munlockall: %s", __func__, strerror(errno)); 430617a32f5SDr. David Alan Gilbert goto out; 43158b7c17eSDr. David Alan Gilbert } 43258b7c17eSDr. David Alan Gilbert 43358b7c17eSDr. David Alan Gilbert /* 434eb59db53SDr. David Alan Gilbert * We need to check that the ops we need are supported on anon memory 435eb59db53SDr. David Alan Gilbert * To do that we need to register a chunk and see the flags that 436eb59db53SDr. David Alan Gilbert * are returned. 437eb59db53SDr. David Alan Gilbert */ 438eb59db53SDr. David Alan Gilbert testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 439eb59db53SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 440eb59db53SDr. David Alan Gilbert if (testarea == MAP_FAILED) { 441eb59db53SDr. David Alan Gilbert error_report("%s: Failed to map test area: %s", __func__, 442eb59db53SDr. David Alan Gilbert strerror(errno)); 443eb59db53SDr. David Alan Gilbert goto out; 444eb59db53SDr. David Alan Gilbert } 4457648297dSDavid Hildenbrand g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); 446eb59db53SDr. David Alan Gilbert 447eb59db53SDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)testarea; 448eb59db53SDr. David Alan Gilbert reg_struct.range.len = pagesize; 449eb59db53SDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 450eb59db53SDr. David Alan Gilbert 451eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 452eb59db53SDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 453eb59db53SDr. David Alan Gilbert goto out; 454eb59db53SDr. David Alan Gilbert } 455eb59db53SDr. David Alan Gilbert 456eb59db53SDr. David Alan Gilbert range_struct.start = (uintptr_t)testarea; 457eb59db53SDr. David Alan Gilbert range_struct.len = pagesize; 458eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 459eb59db53SDr. David Alan Gilbert error_report("%s userfault unregister: %s", __func__, strerror(errno)); 460eb59db53SDr. David Alan Gilbert goto out; 461eb59db53SDr. David Alan Gilbert } 462eb59db53SDr. David Alan Gilbert 463eb59db53SDr. David Alan Gilbert feature_mask = (__u64)1 << _UFFDIO_WAKE | 464eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_COPY | 465eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_ZEROPAGE; 466eb59db53SDr. David Alan Gilbert if ((reg_struct.ioctls & feature_mask) != feature_mask) { 467eb59db53SDr. David Alan Gilbert error_report("Missing userfault map features: %" PRIx64, 468eb59db53SDr. David Alan Gilbert (uint64_t)(~reg_struct.ioctls & feature_mask)); 469eb59db53SDr. David Alan Gilbert goto out; 470eb59db53SDr. David Alan Gilbert } 471eb59db53SDr. David Alan Gilbert 472eb59db53SDr. David Alan Gilbert /* Success! */ 473eb59db53SDr. David Alan Gilbert ret = true; 474eb59db53SDr. David Alan Gilbert out: 475eb59db53SDr. David Alan Gilbert if (testarea) { 476eb59db53SDr. David Alan Gilbert munmap(testarea, pagesize); 477eb59db53SDr. David Alan Gilbert } 478eb59db53SDr. David Alan Gilbert if (ufd != -1) { 479eb59db53SDr. David Alan Gilbert close(ufd); 480eb59db53SDr. David Alan Gilbert } 481eb59db53SDr. David Alan Gilbert return ret; 482eb59db53SDr. David Alan Gilbert } 483eb59db53SDr. David Alan Gilbert 4841caddf8aSDr. David Alan Gilbert /* 4851caddf8aSDr. David Alan Gilbert * Setup an area of RAM so that it *can* be used for postcopy later; this 4861caddf8aSDr. David Alan Gilbert * must be done right at the start prior to pre-copy. 4871caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 4881caddf8aSDr. David Alan Gilbert */ 489754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque) 4901caddf8aSDr. David Alan Gilbert { 491754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 492754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 493754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 494754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 4951caddf8aSDr. David Alan Gilbert trace_postcopy_init_range(block_name, host_addr, offset, length); 4961caddf8aSDr. David Alan Gilbert 4971caddf8aSDr. David Alan Gilbert /* 498898ba906SDavid Hildenbrand * Save the used_length before running the guest. In case we have to 499898ba906SDavid Hildenbrand * resize RAM blocks when syncing RAM block sizes from the source during 500898ba906SDavid Hildenbrand * precopy, we'll update it manually via the ram block notifier. 501898ba906SDavid Hildenbrand */ 502898ba906SDavid Hildenbrand rb->postcopy_length = length; 503898ba906SDavid Hildenbrand 504898ba906SDavid Hildenbrand /* 5051caddf8aSDr. David Alan Gilbert * We need the whole of RAM to be truly empty for postcopy, so things 5061caddf8aSDr. David Alan Gilbert * like ROMs and any data tables built during init must be zero'd 5071caddf8aSDr. David Alan Gilbert * - we're going to get the copy from the source anyway. 5081caddf8aSDr. David Alan Gilbert * (Precopy will just overwrite this data, so doesn't need the discard) 5091caddf8aSDr. David Alan Gilbert */ 510aaa2064cSJuan Quintela if (ram_discard_range(block_name, 0, length)) { 5111caddf8aSDr. David Alan Gilbert return -1; 5121caddf8aSDr. David Alan Gilbert } 5131caddf8aSDr. David Alan Gilbert 5141caddf8aSDr. David Alan Gilbert return 0; 5151caddf8aSDr. David Alan Gilbert } 5161caddf8aSDr. David Alan Gilbert 5171caddf8aSDr. David Alan Gilbert /* 5181caddf8aSDr. David Alan Gilbert * At the end of migration, undo the effects of init_range 5191caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 5201caddf8aSDr. David Alan Gilbert */ 521754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque) 5221caddf8aSDr. David Alan Gilbert { 523754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 524754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 525754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 526898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 5271caddf8aSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 5281caddf8aSDr. David Alan Gilbert struct uffdio_range range_struct; 5291caddf8aSDr. David Alan Gilbert trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 5301caddf8aSDr. David Alan Gilbert 5311caddf8aSDr. David Alan Gilbert /* 5321caddf8aSDr. David Alan Gilbert * We turned off hugepage for the precopy stage with postcopy enabled 5331caddf8aSDr. David Alan Gilbert * we can turn it back on now. 5341caddf8aSDr. David Alan Gilbert */ 5351d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 5361caddf8aSDr. David Alan Gilbert 5371caddf8aSDr. David Alan Gilbert /* 5381caddf8aSDr. David Alan Gilbert * We can also turn off userfault now since we should have all the 5391caddf8aSDr. David Alan Gilbert * pages. It can be useful to leave it on to debug postcopy 5401caddf8aSDr. David Alan Gilbert * if you're not sure it's always getting every page. 5411caddf8aSDr. David Alan Gilbert */ 5421caddf8aSDr. David Alan Gilbert range_struct.start = (uintptr_t)host_addr; 5431caddf8aSDr. David Alan Gilbert range_struct.len = length; 5441caddf8aSDr. David Alan Gilbert 5451caddf8aSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 5461caddf8aSDr. David Alan Gilbert error_report("%s: userfault unregister %s", __func__, strerror(errno)); 5471caddf8aSDr. David Alan Gilbert 5481caddf8aSDr. David Alan Gilbert return -1; 5491caddf8aSDr. David Alan Gilbert } 5501caddf8aSDr. David Alan Gilbert 5511caddf8aSDr. David Alan Gilbert return 0; 5521caddf8aSDr. David Alan Gilbert } 5531caddf8aSDr. David Alan Gilbert 5541caddf8aSDr. David Alan Gilbert /* 5551caddf8aSDr. David Alan Gilbert * Initialise postcopy-ram, setting the RAM to a state where we can go into 5561caddf8aSDr. David Alan Gilbert * postcopy later; must be called prior to any precopy. 5571caddf8aSDr. David Alan Gilbert * called from arch_init's similarly named ram_postcopy_incoming_init 5581caddf8aSDr. David Alan Gilbert */ 559c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 5601caddf8aSDr. David Alan Gilbert { 561fbd162e6SYury Kotov if (foreach_not_ignored_block(init_range, NULL)) { 5621caddf8aSDr. David Alan Gilbert return -1; 5631caddf8aSDr. David Alan Gilbert } 5641caddf8aSDr. David Alan Gilbert 5651caddf8aSDr. David Alan Gilbert return 0; 5661caddf8aSDr. David Alan Gilbert } 5671caddf8aSDr. David Alan Gilbert 568476ebf77SPeter Xu static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) 569476ebf77SPeter Xu { 57077dadc3fSPeter Xu int i; 57177dadc3fSPeter Xu 57277dadc3fSPeter Xu if (mis->postcopy_tmp_pages) { 57377dadc3fSPeter Xu for (i = 0; i < mis->postcopy_channels; i++) { 57477dadc3fSPeter Xu if (mis->postcopy_tmp_pages[i].tmp_huge_page) { 57577dadc3fSPeter Xu munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, 57677dadc3fSPeter Xu mis->largest_page_size); 57777dadc3fSPeter Xu mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; 57877dadc3fSPeter Xu } 57977dadc3fSPeter Xu } 58077dadc3fSPeter Xu g_free(mis->postcopy_tmp_pages); 58177dadc3fSPeter Xu mis->postcopy_tmp_pages = NULL; 582476ebf77SPeter Xu } 583476ebf77SPeter Xu 584476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page) { 585476ebf77SPeter Xu munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 586476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 587476ebf77SPeter Xu } 588476ebf77SPeter Xu } 589476ebf77SPeter Xu 5901caddf8aSDr. David Alan Gilbert /* 5911caddf8aSDr. David Alan Gilbert * At the end of a migration where postcopy_ram_incoming_init was called. 5921caddf8aSDr. David Alan Gilbert */ 5931caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 5941caddf8aSDr. David Alan Gilbert { 595c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_entry(); 596c4faeed2SDr. David Alan Gilbert 5976621883fSPeter Xu if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { 5986621883fSPeter Xu /* Notify the fast load thread to quit */ 5996621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_QUIT; 6006621883fSPeter Xu if (mis->postcopy_qemufile_dst) { 6016621883fSPeter Xu qemu_file_shutdown(mis->postcopy_qemufile_dst); 6026621883fSPeter Xu } 60336f62f11SPeter Xu qemu_thread_join(&mis->postcopy_prio_thread); 6046621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_NONE; 60536f62f11SPeter Xu } 60636f62f11SPeter Xu 607c4faeed2SDr. David Alan Gilbert if (mis->have_fault_thread) { 60846343570SDr. David Alan Gilbert Error *local_err = NULL; 60946343570SDr. David Alan Gilbert 61055d0fe82SIlya Maximets /* Let the fault thread quit */ 611d73415a3SStefan Hajnoczi qatomic_set(&mis->fault_thread_quit, 1); 61255d0fe82SIlya Maximets postcopy_fault_thread_notify(mis); 61355d0fe82SIlya Maximets trace_postcopy_ram_incoming_cleanup_join(); 61455d0fe82SIlya Maximets qemu_thread_join(&mis->fault_thread); 61555d0fe82SIlya Maximets 61646343570SDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 61746343570SDr. David Alan Gilbert error_report_err(local_err); 61846343570SDr. David Alan Gilbert return -1; 61946343570SDr. David Alan Gilbert } 62046343570SDr. David Alan Gilbert 621fbd162e6SYury Kotov if (foreach_not_ignored_block(cleanup_range, mis)) { 6221caddf8aSDr. David Alan Gilbert return -1; 6231caddf8aSDr. David Alan Gilbert } 6249ab7ef9bSPeter Xu 625c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_closeuf(); 626c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 62764f615feSPeter Xu close(mis->userfault_event_fd); 628c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = false; 629c4faeed2SDr. David Alan Gilbert } 630c4faeed2SDr. David Alan Gilbert 63158b7c17eSDr. David Alan Gilbert if (enable_mlock) { 63258b7c17eSDr. David Alan Gilbert if (os_mlock() < 0) { 63358b7c17eSDr. David Alan Gilbert error_report("mlock: %s", strerror(errno)); 63458b7c17eSDr. David Alan Gilbert /* 63558b7c17eSDr. David Alan Gilbert * It doesn't feel right to fail at this point, we have a valid 63658b7c17eSDr. David Alan Gilbert * VM state. 63758b7c17eSDr. David Alan Gilbert */ 63858b7c17eSDr. David Alan Gilbert } 63958b7c17eSDr. David Alan Gilbert } 64058b7c17eSDr. David Alan Gilbert 641476ebf77SPeter Xu postcopy_temp_pages_cleanup(mis); 642476ebf77SPeter Xu 64365ace060SAlexey Perevalov trace_postcopy_ram_incoming_cleanup_blocktime( 64465ace060SAlexey Perevalov get_postcopy_total_blocktime()); 64565ace060SAlexey Perevalov 646c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_exit(); 6471caddf8aSDr. David Alan Gilbert return 0; 6481caddf8aSDr. David Alan Gilbert } 6491caddf8aSDr. David Alan Gilbert 650f0a227adSDr. David Alan Gilbert /* 651f9527107SDr. David Alan Gilbert * Disable huge pages on an area 652f9527107SDr. David Alan Gilbert */ 653754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque) 654f9527107SDr. David Alan Gilbert { 655754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 656754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 657754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 658898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 659f9527107SDr. David Alan Gilbert trace_postcopy_nhp_range(block_name, host_addr, offset, length); 660f9527107SDr. David Alan Gilbert 661f9527107SDr. David Alan Gilbert /* 662f9527107SDr. David Alan Gilbert * Before we do discards we need to ensure those discards really 663f9527107SDr. David Alan Gilbert * do delete areas of the page, even if THP thinks a hugepage would 664f9527107SDr. David Alan Gilbert * be a good idea, so force hugepages off. 665f9527107SDr. David Alan Gilbert */ 6661d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 667f9527107SDr. David Alan Gilbert 668f9527107SDr. David Alan Gilbert return 0; 669f9527107SDr. David Alan Gilbert } 670f9527107SDr. David Alan Gilbert 671f9527107SDr. David Alan Gilbert /* 672f9527107SDr. David Alan Gilbert * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 673f9527107SDr. David Alan Gilbert * however leaving it until after precopy means that most of the precopy 674f9527107SDr. David Alan Gilbert * data is still THPd 675f9527107SDr. David Alan Gilbert */ 676f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 677f9527107SDr. David Alan Gilbert { 678fbd162e6SYury Kotov if (foreach_not_ignored_block(nhp_range, mis)) { 679f9527107SDr. David Alan Gilbert return -1; 680f9527107SDr. David Alan Gilbert } 681f9527107SDr. David Alan Gilbert 682f9527107SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 683f9527107SDr. David Alan Gilbert 684f9527107SDr. David Alan Gilbert return 0; 685f9527107SDr. David Alan Gilbert } 686f9527107SDr. David Alan Gilbert 687f9527107SDr. David Alan Gilbert /* 688f0a227adSDr. David Alan Gilbert * Mark the given area of RAM as requiring notification to unwritten areas 689fbd162e6SYury Kotov * Used as a callback on foreach_not_ignored_block. 690f0a227adSDr. David Alan Gilbert * host_addr: Base of area to mark 691f0a227adSDr. David Alan Gilbert * offset: Offset in the whole ram arena 692f0a227adSDr. David Alan Gilbert * length: Length of the section 693f0a227adSDr. David Alan Gilbert * opaque: MigrationIncomingState pointer 694f0a227adSDr. David Alan Gilbert * Returns 0 on success 695f0a227adSDr. David Alan Gilbert */ 696754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque) 697f0a227adSDr. David Alan Gilbert { 698f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 699f0a227adSDr. David Alan Gilbert struct uffdio_register reg_struct; 700f0a227adSDr. David Alan Gilbert 701754cb9c0SYury Kotov reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); 702898ba906SDavid Hildenbrand reg_struct.range.len = rb->postcopy_length; 703f0a227adSDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 704f0a227adSDr. David Alan Gilbert 705f0a227adSDr. David Alan Gilbert /* Now tell our userfault_fd that it's responsible for this area */ 706f0a227adSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 707f0a227adSDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 708f0a227adSDr. David Alan Gilbert return -1; 709f0a227adSDr. David Alan Gilbert } 710665414adSDr. David Alan Gilbert if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 711665414adSDr. David Alan Gilbert error_report("%s userfault: Region doesn't support COPY", __func__); 712665414adSDr. David Alan Gilbert return -1; 713665414adSDr. David Alan Gilbert } 7142ce16640SDr. David Alan Gilbert if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 7152ce16640SDr. David Alan Gilbert qemu_ram_set_uf_zeroable(rb); 7162ce16640SDr. David Alan Gilbert } 717f0a227adSDr. David Alan Gilbert 718f0a227adSDr. David Alan Gilbert return 0; 719f0a227adSDr. David Alan Gilbert } 720f0a227adSDr. David Alan Gilbert 7215efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 7225efc3564SDr. David Alan Gilbert uint64_t client_addr, 7235efc3564SDr. David Alan Gilbert RAMBlock *rb) 7245efc3564SDr. David Alan Gilbert { 7255efc3564SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 7265efc3564SDr. David Alan Gilbert struct uffdio_range range; 7275efc3564SDr. David Alan Gilbert int ret; 7285efc3564SDr. David Alan Gilbert trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 7297648297dSDavid Hildenbrand range.start = ROUND_DOWN(client_addr, pagesize); 7305efc3564SDr. David Alan Gilbert range.len = pagesize; 7315efc3564SDr. David Alan Gilbert ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 7325efc3564SDr. David Alan Gilbert if (ret) { 7335efc3564SDr. David Alan Gilbert error_report("%s: Failed to wake: %zx in %s (%s)", 7345efc3564SDr. David Alan Gilbert __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 7355efc3564SDr. David Alan Gilbert strerror(errno)); 7365efc3564SDr. David Alan Gilbert } 7375efc3564SDr. David Alan Gilbert return ret; 7385efc3564SDr. David Alan Gilbert } 7395efc3564SDr. David Alan Gilbert 7409470c5e0SDavid Hildenbrand static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, 7419470c5e0SDavid Hildenbrand ram_addr_t start, uint64_t haddr) 7429470c5e0SDavid Hildenbrand { 7439470c5e0SDavid Hildenbrand void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 7449470c5e0SDavid Hildenbrand 7459470c5e0SDavid Hildenbrand /* 7469470c5e0SDavid Hildenbrand * Discarded pages (via RamDiscardManager) are never migrated. On unlikely 7479470c5e0SDavid Hildenbrand * access, place a zeropage, which will also set the relevant bits in the 7489470c5e0SDavid Hildenbrand * recv_bitmap accordingly, so we won't try placing a zeropage twice. 7499470c5e0SDavid Hildenbrand * 7509470c5e0SDavid Hildenbrand * Checking a single bit is sufficient to handle pagesize > TPS as either 7519470c5e0SDavid Hildenbrand * all relevant bits are set or not. 7529470c5e0SDavid Hildenbrand */ 7539470c5e0SDavid Hildenbrand assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); 7549470c5e0SDavid Hildenbrand if (ramblock_page_is_discarded(rb, start)) { 7559470c5e0SDavid Hildenbrand bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); 7569470c5e0SDavid Hildenbrand 7579470c5e0SDavid Hildenbrand return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); 7589470c5e0SDavid Hildenbrand } 7599470c5e0SDavid Hildenbrand 7609470c5e0SDavid Hildenbrand return migrate_send_rp_req_pages(mis, rb, start, haddr); 7619470c5e0SDavid Hildenbrand } 7629470c5e0SDavid Hildenbrand 763f0a227adSDr. David Alan Gilbert /* 764096bf4c8SDr. David Alan Gilbert * Callback from shared fault handlers to ask for a page, 765096bf4c8SDr. David Alan Gilbert * the page must be specified by a RAMBlock and an offset in that rb 766096bf4c8SDr. David Alan Gilbert * Note: Only for use by shared fault handlers (in fault thread) 767096bf4c8SDr. David Alan Gilbert */ 768096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 769096bf4c8SDr. David Alan Gilbert uint64_t client_addr, uint64_t rb_offset) 770096bf4c8SDr. David Alan Gilbert { 7717648297dSDavid Hildenbrand uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 772096bf4c8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 773096bf4c8SDr. David Alan Gilbert 774096bf4c8SDr. David Alan Gilbert trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 775096bf4c8SDr. David Alan Gilbert rb_offset); 776dedfb4b2SDr. David Alan Gilbert if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 777dedfb4b2SDr. David Alan Gilbert trace_postcopy_request_shared_page_present(pcfd->idstr, 778dedfb4b2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), rb_offset); 779dedfb4b2SDr. David Alan Gilbert return postcopy_wake_shared(pcfd, client_addr, rb); 780dedfb4b2SDr. David Alan Gilbert } 7819470c5e0SDavid Hildenbrand postcopy_request_page(mis, rb, aligned_rbo, client_addr); 782096bf4c8SDr. David Alan Gilbert return 0; 783096bf4c8SDr. David Alan Gilbert } 784096bf4c8SDr. David Alan Gilbert 785575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid) 786575b0b33SAlexey Perevalov { 787575b0b33SAlexey Perevalov CPUState *cpu_iter; 788575b0b33SAlexey Perevalov 789575b0b33SAlexey Perevalov CPU_FOREACH(cpu_iter) { 790575b0b33SAlexey Perevalov if (cpu_iter->thread_id == pid) { 791575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 792575b0b33SAlexey Perevalov return cpu_iter->cpu_index; 793575b0b33SAlexey Perevalov } 794575b0b33SAlexey Perevalov } 795575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(-1, pid); 796575b0b33SAlexey Perevalov return -1; 797575b0b33SAlexey Perevalov } 798575b0b33SAlexey Perevalov 799575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 800575b0b33SAlexey Perevalov { 801575b0b33SAlexey Perevalov int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 802575b0b33SAlexey Perevalov dc->start_time; 803575b0b33SAlexey Perevalov return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 804575b0b33SAlexey Perevalov } 805575b0b33SAlexey Perevalov 806575b0b33SAlexey Perevalov /* 807575b0b33SAlexey Perevalov * This function is being called when pagefault occurs. It 808575b0b33SAlexey Perevalov * tracks down vCPU blocking time. 809575b0b33SAlexey Perevalov * 810575b0b33SAlexey Perevalov * @addr: faulted host virtual address 811575b0b33SAlexey Perevalov * @ptid: faulted process thread id 812575b0b33SAlexey Perevalov * @rb: ramblock appropriate to addr 813575b0b33SAlexey Perevalov */ 814575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 815575b0b33SAlexey Perevalov RAMBlock *rb) 816575b0b33SAlexey Perevalov { 817575b0b33SAlexey Perevalov int cpu, already_received; 818575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 819575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 820575b0b33SAlexey Perevalov uint32_t low_time_offset; 821575b0b33SAlexey Perevalov 822575b0b33SAlexey Perevalov if (!dc || ptid == 0) { 823575b0b33SAlexey Perevalov return; 824575b0b33SAlexey Perevalov } 825575b0b33SAlexey Perevalov cpu = get_mem_fault_cpu_index(ptid); 826575b0b33SAlexey Perevalov if (cpu < 0) { 827575b0b33SAlexey Perevalov return; 828575b0b33SAlexey Perevalov } 829575b0b33SAlexey Perevalov 830575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 831575b0b33SAlexey Perevalov if (dc->vcpu_addr[cpu] == 0) { 832d73415a3SStefan Hajnoczi qatomic_inc(&dc->smp_cpus_down); 833575b0b33SAlexey Perevalov } 834575b0b33SAlexey Perevalov 835d73415a3SStefan Hajnoczi qatomic_xchg(&dc->last_begin, low_time_offset); 836d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 837d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], addr); 838575b0b33SAlexey Perevalov 839da1725d3SWei Yang /* 840da1725d3SWei Yang * check it here, not at the beginning of the function, 841da1725d3SWei Yang * due to, check could occur early than bitmap_set in 842da1725d3SWei Yang * qemu_ufd_copy_ioctl 843da1725d3SWei Yang */ 844575b0b33SAlexey Perevalov already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 845575b0b33SAlexey Perevalov if (already_received) { 846d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], 0); 847d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 848d73415a3SStefan Hajnoczi qatomic_dec(&dc->smp_cpus_down); 849575b0b33SAlexey Perevalov } 850575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 851575b0b33SAlexey Perevalov cpu, already_received); 852575b0b33SAlexey Perevalov } 853575b0b33SAlexey Perevalov 854575b0b33SAlexey Perevalov /* 855575b0b33SAlexey Perevalov * This function just provide calculated blocktime per cpu and trace it. 856575b0b33SAlexey Perevalov * Total blocktime is calculated in mark_postcopy_blocktime_end. 857575b0b33SAlexey Perevalov * 858575b0b33SAlexey Perevalov * 859575b0b33SAlexey Perevalov * Assume we have 3 CPU 860575b0b33SAlexey Perevalov * 861575b0b33SAlexey Perevalov * S1 E1 S1 E1 862575b0b33SAlexey Perevalov * -----***********------------xxx***************------------------------> CPU1 863575b0b33SAlexey Perevalov * 864575b0b33SAlexey Perevalov * S2 E2 865575b0b33SAlexey Perevalov * ------------****************xxx---------------------------------------> CPU2 866575b0b33SAlexey Perevalov * 867575b0b33SAlexey Perevalov * S3 E3 868575b0b33SAlexey Perevalov * ------------------------****xxx********-------------------------------> CPU3 869575b0b33SAlexey Perevalov * 870575b0b33SAlexey Perevalov * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 871575b0b33SAlexey Perevalov * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 872575b0b33SAlexey Perevalov * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 873575b0b33SAlexey Perevalov * it's a part of total blocktime. 874575b0b33SAlexey Perevalov * S1 - here is last_begin 875575b0b33SAlexey Perevalov * Legend of the picture is following: 876575b0b33SAlexey Perevalov * * - means blocktime per vCPU 877575b0b33SAlexey Perevalov * x - means overlapped blocktime (total blocktime) 878575b0b33SAlexey Perevalov * 879575b0b33SAlexey Perevalov * @addr: host virtual address 880575b0b33SAlexey Perevalov */ 881575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr) 882575b0b33SAlexey Perevalov { 883575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 884575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 8855cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 8865cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 887575b0b33SAlexey Perevalov int i, affected_cpu = 0; 888575b0b33SAlexey Perevalov bool vcpu_total_blocktime = false; 889575b0b33SAlexey Perevalov uint32_t read_vcpu_time, low_time_offset; 890575b0b33SAlexey Perevalov 891575b0b33SAlexey Perevalov if (!dc) { 892575b0b33SAlexey Perevalov return; 893575b0b33SAlexey Perevalov } 894575b0b33SAlexey Perevalov 895575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 896575b0b33SAlexey Perevalov /* lookup cpu, to clear it, 8973a4452d8Szhaolichang * that algorithm looks straightforward, but it's not 898575b0b33SAlexey Perevalov * optimal, more optimal algorithm is keeping tree or hash 899575b0b33SAlexey Perevalov * where key is address value is a list of */ 900575b0b33SAlexey Perevalov for (i = 0; i < smp_cpus; i++) { 901575b0b33SAlexey Perevalov uint32_t vcpu_blocktime = 0; 902575b0b33SAlexey Perevalov 903d73415a3SStefan Hajnoczi read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 904d73415a3SStefan Hajnoczi if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 905575b0b33SAlexey Perevalov read_vcpu_time == 0) { 906575b0b33SAlexey Perevalov continue; 907575b0b33SAlexey Perevalov } 908d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[i], 0); 909575b0b33SAlexey Perevalov vcpu_blocktime = low_time_offset - read_vcpu_time; 910575b0b33SAlexey Perevalov affected_cpu += 1; 911575b0b33SAlexey Perevalov /* we need to know is that mark_postcopy_end was due to 912575b0b33SAlexey Perevalov * faulted page, another possible case it's prefetched 913575b0b33SAlexey Perevalov * page and in that case we shouldn't be here */ 914575b0b33SAlexey Perevalov if (!vcpu_total_blocktime && 915d73415a3SStefan Hajnoczi qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 916575b0b33SAlexey Perevalov vcpu_total_blocktime = true; 917575b0b33SAlexey Perevalov } 918575b0b33SAlexey Perevalov /* continue cycle, due to one page could affect several vCPUs */ 919575b0b33SAlexey Perevalov dc->vcpu_blocktime[i] += vcpu_blocktime; 920575b0b33SAlexey Perevalov } 921575b0b33SAlexey Perevalov 922d73415a3SStefan Hajnoczi qatomic_sub(&dc->smp_cpus_down, affected_cpu); 923575b0b33SAlexey Perevalov if (vcpu_total_blocktime) { 924d73415a3SStefan Hajnoczi dc->total_blocktime += low_time_offset - qatomic_fetch_add( 925575b0b33SAlexey Perevalov &dc->last_begin, 0); 926575b0b33SAlexey Perevalov } 927575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 928575b0b33SAlexey Perevalov affected_cpu); 929575b0b33SAlexey Perevalov } 930575b0b33SAlexey Perevalov 93127dd21b4SPeter Xu static void postcopy_pause_fault_thread(MigrationIncomingState *mis) 9323a7804c3SPeter Xu { 9333a7804c3SPeter Xu trace_postcopy_pause_fault_thread(); 9343a7804c3SPeter Xu qemu_sem_wait(&mis->postcopy_pause_sem_fault); 9353a7804c3SPeter Xu trace_postcopy_pause_fault_thread_continued(); 9363a7804c3SPeter Xu } 9373a7804c3SPeter Xu 938096bf4c8SDr. David Alan Gilbert /* 939f0a227adSDr. David Alan Gilbert * Handle faults detected by the USERFAULT markings 940f0a227adSDr. David Alan Gilbert */ 941f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque) 942f0a227adSDr. David Alan Gilbert { 943f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 944c4faeed2SDr. David Alan Gilbert struct uffd_msg msg; 945c4faeed2SDr. David Alan Gilbert int ret; 94600fa4fc8SDr. David Alan Gilbert size_t index; 947c4faeed2SDr. David Alan Gilbert RAMBlock *rb = NULL; 948f0a227adSDr. David Alan Gilbert 949c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_entry(); 95074637e6fSLidong Chen rcu_register_thread(); 951096bf4c8SDr. David Alan Gilbert mis->last_rb = NULL; /* last RAMBlock we sent part of */ 952095c12a4SPeter Xu qemu_sem_post(&mis->thread_sync_sem); 953c4faeed2SDr. David Alan Gilbert 95400fa4fc8SDr. David Alan Gilbert struct pollfd *pfd; 95500fa4fc8SDr. David Alan Gilbert size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 95600fa4fc8SDr. David Alan Gilbert 95700fa4fc8SDr. David Alan Gilbert pfd = g_new0(struct pollfd, pfd_len); 95800fa4fc8SDr. David Alan Gilbert 95900fa4fc8SDr. David Alan Gilbert pfd[0].fd = mis->userfault_fd; 96000fa4fc8SDr. David Alan Gilbert pfd[0].events = POLLIN; 96100fa4fc8SDr. David Alan Gilbert pfd[1].fd = mis->userfault_event_fd; 96200fa4fc8SDr. David Alan Gilbert pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 96300fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 96400fa4fc8SDr. David Alan Gilbert for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 96500fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 96600fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index); 96700fa4fc8SDr. David Alan Gilbert pfd[2 + index].fd = pcfd->fd; 96800fa4fc8SDr. David Alan Gilbert pfd[2 + index].events = POLLIN; 96900fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 97000fa4fc8SDr. David Alan Gilbert pcfd->fd); 97100fa4fc8SDr. David Alan Gilbert } 97200fa4fc8SDr. David Alan Gilbert 973c4faeed2SDr. David Alan Gilbert while (true) { 974c4faeed2SDr. David Alan Gilbert ram_addr_t rb_offset; 97500fa4fc8SDr. David Alan Gilbert int poll_result; 976c4faeed2SDr. David Alan Gilbert 977c4faeed2SDr. David Alan Gilbert /* 978c4faeed2SDr. David Alan Gilbert * We're mainly waiting for the kernel to give us a faulting HVA, 979c4faeed2SDr. David Alan Gilbert * however we can be told to quit via userfault_quit_fd which is 980c4faeed2SDr. David Alan Gilbert * an eventfd 981c4faeed2SDr. David Alan Gilbert */ 982c4faeed2SDr. David Alan Gilbert 98300fa4fc8SDr. David Alan Gilbert poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 98400fa4fc8SDr. David Alan Gilbert if (poll_result == -1) { 985c4faeed2SDr. David Alan Gilbert error_report("%s: userfault poll: %s", __func__, strerror(errno)); 986c4faeed2SDr. David Alan Gilbert break; 987f0a227adSDr. David Alan Gilbert } 988f0a227adSDr. David Alan Gilbert 9893a7804c3SPeter Xu if (!mis->to_src_file) { 9903a7804c3SPeter Xu /* 9913a7804c3SPeter Xu * Possibly someone tells us that the return path is 9923a7804c3SPeter Xu * broken already using the event. We should hold until 9933a7804c3SPeter Xu * the channel is rebuilt. 9943a7804c3SPeter Xu */ 99527dd21b4SPeter Xu postcopy_pause_fault_thread(mis); 9963a7804c3SPeter Xu } 9973a7804c3SPeter Xu 998c4faeed2SDr. David Alan Gilbert if (pfd[1].revents) { 99964f615feSPeter Xu uint64_t tmp64 = 0; 100064f615feSPeter Xu 100164f615feSPeter Xu /* Consume the signal */ 100264f615feSPeter Xu if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 100364f615feSPeter Xu /* Nothing obviously nicer than posting this error. */ 100464f615feSPeter Xu error_report("%s: read() failed", __func__); 100564f615feSPeter Xu } 100664f615feSPeter Xu 1007d73415a3SStefan Hajnoczi if (qatomic_read(&mis->fault_thread_quit)) { 1008c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_quit(); 1009c4faeed2SDr. David Alan Gilbert break; 1010c4faeed2SDr. David Alan Gilbert } 101164f615feSPeter Xu } 1012c4faeed2SDr. David Alan Gilbert 101300fa4fc8SDr. David Alan Gilbert if (pfd[0].revents) { 101400fa4fc8SDr. David Alan Gilbert poll_result--; 1015c4faeed2SDr. David Alan Gilbert ret = read(mis->userfault_fd, &msg, sizeof(msg)); 1016c4faeed2SDr. David Alan Gilbert if (ret != sizeof(msg)) { 1017c4faeed2SDr. David Alan Gilbert if (errno == EAGAIN) { 1018c4faeed2SDr. David Alan Gilbert /* 1019c4faeed2SDr. David Alan Gilbert * if a wake up happens on the other thread just after 1020c4faeed2SDr. David Alan Gilbert * the poll, there is nothing to read. 1021c4faeed2SDr. David Alan Gilbert */ 1022c4faeed2SDr. David Alan Gilbert continue; 1023c4faeed2SDr. David Alan Gilbert } 1024c4faeed2SDr. David Alan Gilbert if (ret < 0) { 102500fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 102600fa4fc8SDr. David Alan Gilbert "message: %s", 1027c4faeed2SDr. David Alan Gilbert __func__, strerror(errno)); 1028c4faeed2SDr. David Alan Gilbert break; 1029c4faeed2SDr. David Alan Gilbert } else { 103000fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 103100fa4fc8SDr. David Alan Gilbert "expected %zd", 1032c4faeed2SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 1033c4faeed2SDr. David Alan Gilbert break; /* Lost alignment, don't know what we'd read next */ 1034c4faeed2SDr. David Alan Gilbert } 1035c4faeed2SDr. David Alan Gilbert } 1036c4faeed2SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 1037c4faeed2SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud from userfaultfd", 1038c4faeed2SDr. David Alan Gilbert __func__, msg.event); 1039c4faeed2SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 1040c4faeed2SDr. David Alan Gilbert } 1041c4faeed2SDr. David Alan Gilbert 1042c4faeed2SDr. David Alan Gilbert rb = qemu_ram_block_from_host( 1043c4faeed2SDr. David Alan Gilbert (void *)(uintptr_t)msg.arg.pagefault.address, 1044f615f396SPaolo Bonzini true, &rb_offset); 1045c4faeed2SDr. David Alan Gilbert if (!rb) { 1046c4faeed2SDr. David Alan Gilbert error_report("postcopy_ram_fault_thread: Fault outside guest: %" 1047c4faeed2SDr. David Alan Gilbert PRIx64, (uint64_t)msg.arg.pagefault.address); 1048c4faeed2SDr. David Alan Gilbert break; 1049c4faeed2SDr. David Alan Gilbert } 1050c4faeed2SDr. David Alan Gilbert 10517648297dSDavid Hildenbrand rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 1052c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 1053c4faeed2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), 1054575b0b33SAlexey Perevalov rb_offset, 1055575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid); 1056575b0b33SAlexey Perevalov mark_postcopy_blocktime_begin( 1057575b0b33SAlexey Perevalov (uintptr_t)(msg.arg.pagefault.address), 1058575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid, rb); 1059575b0b33SAlexey Perevalov 10603a7804c3SPeter Xu retry: 1061c4faeed2SDr. David Alan Gilbert /* 1062c4faeed2SDr. David Alan Gilbert * Send the request to the source - we want to request one 1063c4faeed2SDr. David Alan Gilbert * of our host page sizes (which is >= TPS) 1064c4faeed2SDr. David Alan Gilbert */ 10659470c5e0SDavid Hildenbrand ret = postcopy_request_page(mis, rb, rb_offset, 10668f8bfffcSPeter Xu msg.arg.pagefault.address); 10673a7804c3SPeter Xu if (ret) { 10683a7804c3SPeter Xu /* May be network failure, try to wait for recovery */ 106927dd21b4SPeter Xu postcopy_pause_fault_thread(mis); 10703a7804c3SPeter Xu goto retry; 1071c4faeed2SDr. David Alan Gilbert } 1072c4faeed2SDr. David Alan Gilbert } 107300fa4fc8SDr. David Alan Gilbert 107400fa4fc8SDr. David Alan Gilbert /* Now handle any requests from external processes on shared memory */ 107500fa4fc8SDr. David Alan Gilbert /* TODO: May need to handle devices deregistering during postcopy */ 107600fa4fc8SDr. David Alan Gilbert for (index = 2; index < pfd_len && poll_result; index++) { 107700fa4fc8SDr. David Alan Gilbert if (pfd[index].revents) { 107800fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = 107900fa4fc8SDr. David Alan Gilbert &g_array_index(mis->postcopy_remote_fds, 108000fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index - 2); 108100fa4fc8SDr. David Alan Gilbert 108200fa4fc8SDr. David Alan Gilbert poll_result--; 108300fa4fc8SDr. David Alan Gilbert if (pfd[index].revents & POLLERR) { 108400fa4fc8SDr. David Alan Gilbert error_report("%s: POLLERR on poll %zd fd=%d", 108500fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->fd); 108600fa4fc8SDr. David Alan Gilbert pfd[index].events = 0; 108700fa4fc8SDr. David Alan Gilbert continue; 108800fa4fc8SDr. David Alan Gilbert } 108900fa4fc8SDr. David Alan Gilbert 109000fa4fc8SDr. David Alan Gilbert ret = read(pcfd->fd, &msg, sizeof(msg)); 109100fa4fc8SDr. David Alan Gilbert if (ret != sizeof(msg)) { 109200fa4fc8SDr. David Alan Gilbert if (errno == EAGAIN) { 109300fa4fc8SDr. David Alan Gilbert /* 109400fa4fc8SDr. David Alan Gilbert * if a wake up happens on the other thread just after 109500fa4fc8SDr. David Alan Gilbert * the poll, there is nothing to read. 109600fa4fc8SDr. David Alan Gilbert */ 109700fa4fc8SDr. David Alan Gilbert continue; 109800fa4fc8SDr. David Alan Gilbert } 109900fa4fc8SDr. David Alan Gilbert if (ret < 0) { 110000fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 110100fa4fc8SDr. David Alan Gilbert "message: %s (shared) revents=%d", 110200fa4fc8SDr. David Alan Gilbert __func__, strerror(errno), 110300fa4fc8SDr. David Alan Gilbert pfd[index].revents); 110400fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 110500fa4fc8SDr. David Alan Gilbert break; 110600fa4fc8SDr. David Alan Gilbert } else { 110700fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 110800fa4fc8SDr. David Alan Gilbert "expected %zd (shared)", 110900fa4fc8SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 111000fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 111100fa4fc8SDr. David Alan Gilbert break; /*Lost alignment,don't know what we'd read next*/ 111200fa4fc8SDr. David Alan Gilbert } 111300fa4fc8SDr. David Alan Gilbert } 111400fa4fc8SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 111500fa4fc8SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud " 111600fa4fc8SDr. David Alan Gilbert "from userfaultfd (shared)", 111700fa4fc8SDr. David Alan Gilbert __func__, msg.event); 111800fa4fc8SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 111900fa4fc8SDr. David Alan Gilbert } 112000fa4fc8SDr. David Alan Gilbert /* Call the device handler registered with us */ 112100fa4fc8SDr. David Alan Gilbert ret = pcfd->handler(pcfd, &msg); 112200fa4fc8SDr. David Alan Gilbert if (ret) { 112300fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to resolve shared fault on %zd/%s", 112400fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->idstr); 112500fa4fc8SDr. David Alan Gilbert /* TODO: Fail? Disable this sharer? */ 112600fa4fc8SDr. David Alan Gilbert } 112700fa4fc8SDr. David Alan Gilbert } 112800fa4fc8SDr. David Alan Gilbert } 112900fa4fc8SDr. David Alan Gilbert } 113074637e6fSLidong Chen rcu_unregister_thread(); 1131c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_exit(); 1132fc6008f3SMarc-André Lureau g_free(pfd); 1133f0a227adSDr. David Alan Gilbert return NULL; 1134f0a227adSDr. David Alan Gilbert } 1135f0a227adSDr. David Alan Gilbert 1136476ebf77SPeter Xu static int postcopy_temp_pages_setup(MigrationIncomingState *mis) 1137476ebf77SPeter Xu { 113877dadc3fSPeter Xu PostcopyTmpPage *tmp_page; 113977dadc3fSPeter Xu int err, i, channels; 114077dadc3fSPeter Xu void *temp_page; 1141476ebf77SPeter Xu 114236f62f11SPeter Xu if (migrate_postcopy_preempt()) { 114336f62f11SPeter Xu /* If preemption enabled, need extra channel for urgent requests */ 114436f62f11SPeter Xu mis->postcopy_channels = RAM_CHANNEL_MAX; 114536f62f11SPeter Xu } else { 114636f62f11SPeter Xu /* Both precopy/postcopy on the same channel */ 114777dadc3fSPeter Xu mis->postcopy_channels = 1; 114836f62f11SPeter Xu } 114977dadc3fSPeter Xu 115077dadc3fSPeter Xu channels = mis->postcopy_channels; 115177dadc3fSPeter Xu mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); 115277dadc3fSPeter Xu 115377dadc3fSPeter Xu for (i = 0; i < channels; i++) { 115477dadc3fSPeter Xu tmp_page = &mis->postcopy_tmp_pages[i]; 115577dadc3fSPeter Xu temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, 1156476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 115777dadc3fSPeter Xu if (temp_page == MAP_FAILED) { 1158476ebf77SPeter Xu err = errno; 115977dadc3fSPeter Xu error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", 116077dadc3fSPeter Xu __func__, i, strerror(err)); 116177dadc3fSPeter Xu /* Clean up will be done later */ 1162476ebf77SPeter Xu return -err; 1163476ebf77SPeter Xu } 116477dadc3fSPeter Xu tmp_page->tmp_huge_page = temp_page; 116577dadc3fSPeter Xu /* Initialize default states for each tmp page */ 116677dadc3fSPeter Xu postcopy_temp_page_reset(tmp_page); 116777dadc3fSPeter Xu } 1168476ebf77SPeter Xu 1169476ebf77SPeter Xu /* 1170476ebf77SPeter Xu * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages 1171476ebf77SPeter Xu */ 1172476ebf77SPeter Xu mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1173476ebf77SPeter Xu PROT_READ | PROT_WRITE, 1174476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1175476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1176476ebf77SPeter Xu err = errno; 1177476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 1178476ebf77SPeter Xu error_report("%s: Failed to map large zero page %s", 1179476ebf77SPeter Xu __func__, strerror(err)); 1180476ebf77SPeter Xu return -err; 1181476ebf77SPeter Xu } 1182476ebf77SPeter Xu 1183476ebf77SPeter Xu memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1184476ebf77SPeter Xu 1185476ebf77SPeter Xu return 0; 1186476ebf77SPeter Xu } 1187476ebf77SPeter Xu 11882a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1189f0a227adSDr. David Alan Gilbert { 1190c4faeed2SDr. David Alan Gilbert /* Open the fd for the kernel to give us userfaults */ 1191d5890ea0SPeter Xu mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK); 1192c4faeed2SDr. David Alan Gilbert if (mis->userfault_fd == -1) { 1193c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to open userfault fd: %s", __func__, 1194c4faeed2SDr. David Alan Gilbert strerror(errno)); 1195c4faeed2SDr. David Alan Gilbert return -1; 1196c4faeed2SDr. David Alan Gilbert } 1197c4faeed2SDr. David Alan Gilbert 1198c4faeed2SDr. David Alan Gilbert /* 1199c4faeed2SDr. David Alan Gilbert * Although the host check already tested the API, we need to 1200c4faeed2SDr. David Alan Gilbert * do the check again as an ABI handshake on the new fd. 1201c4faeed2SDr. David Alan Gilbert */ 120254ae0886SAlexey Perevalov if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 1203c4faeed2SDr. David Alan Gilbert return -1; 1204c4faeed2SDr. David Alan Gilbert } 1205c4faeed2SDr. David Alan Gilbert 1206c4faeed2SDr. David Alan Gilbert /* Now an eventfd we use to tell the fault-thread to quit */ 120764f615feSPeter Xu mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 120864f615feSPeter Xu if (mis->userfault_event_fd == -1) { 120964f615feSPeter Xu error_report("%s: Opening userfault_event_fd: %s", __func__, 1210c4faeed2SDr. David Alan Gilbert strerror(errno)); 1211c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 1212c4faeed2SDr. David Alan Gilbert return -1; 1213c4faeed2SDr. David Alan Gilbert } 1214c4faeed2SDr. David Alan Gilbert 121536f62f11SPeter Xu postcopy_thread_create(mis, &mis->fault_thread, "fault-default", 1216095c12a4SPeter Xu postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); 1217c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = true; 1218f0a227adSDr. David Alan Gilbert 1219f0a227adSDr. David Alan Gilbert /* Mark so that we get notified of accesses to unwritten areas */ 1220fbd162e6SYury Kotov if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { 122191b02dc7SFei Li error_report("ram_block_enable_notify failed"); 1222f0a227adSDr. David Alan Gilbert return -1; 1223f0a227adSDr. David Alan Gilbert } 1224f0a227adSDr. David Alan Gilbert 1225476ebf77SPeter Xu if (postcopy_temp_pages_setup(mis)) { 1226476ebf77SPeter Xu /* Error dumped in the sub-function */ 12273414322aSWei Yang return -1; 12283414322aSWei Yang } 12293414322aSWei Yang 123036f62f11SPeter Xu if (migrate_postcopy_preempt()) { 123136f62f11SPeter Xu /* 123236f62f11SPeter Xu * This thread needs to be created after the temp pages because 123336f62f11SPeter Xu * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. 123436f62f11SPeter Xu */ 123536f62f11SPeter Xu postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", 123636f62f11SPeter Xu postcopy_preempt_thread, QEMU_THREAD_JOINABLE); 12376621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_CREATED; 123836f62f11SPeter Xu } 123936f62f11SPeter Xu 1240c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_enable_notify(); 1241c4faeed2SDr. David Alan Gilbert 1242f0a227adSDr. David Alan Gilbert return 0; 1243f0a227adSDr. David Alan Gilbert } 1244f0a227adSDr. David Alan Gilbert 1245eef621c4SPeter Xu static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, 1246f9494614SAlexey Perevalov void *from_addr, uint64_t pagesize, RAMBlock *rb) 1247727b9d7eSAlexey Perevalov { 1248eef621c4SPeter Xu int userfault_fd = mis->userfault_fd; 1249f9494614SAlexey Perevalov int ret; 1250eef621c4SPeter Xu 1251727b9d7eSAlexey Perevalov if (from_addr) { 1252727b9d7eSAlexey Perevalov struct uffdio_copy copy_struct; 1253727b9d7eSAlexey Perevalov copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1254727b9d7eSAlexey Perevalov copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1255727b9d7eSAlexey Perevalov copy_struct.len = pagesize; 1256727b9d7eSAlexey Perevalov copy_struct.mode = 0; 1257f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1258727b9d7eSAlexey Perevalov } else { 1259727b9d7eSAlexey Perevalov struct uffdio_zeropage zero_struct; 1260727b9d7eSAlexey Perevalov zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1261727b9d7eSAlexey Perevalov zero_struct.range.len = pagesize; 1262727b9d7eSAlexey Perevalov zero_struct.mode = 0; 1263f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1264727b9d7eSAlexey Perevalov } 1265f9494614SAlexey Perevalov if (!ret) { 12668f8bfffcSPeter Xu qemu_mutex_lock(&mis->page_request_mutex); 1267f9494614SAlexey Perevalov ramblock_recv_bitmap_set_range(rb, host_addr, 1268f9494614SAlexey Perevalov pagesize / qemu_target_page_size()); 12698f8bfffcSPeter Xu /* 12708f8bfffcSPeter Xu * If this page resolves a page fault for a previous recorded faulted 12718f8bfffcSPeter Xu * address, take a special note to maintain the requested page list. 12728f8bfffcSPeter Xu */ 12738f8bfffcSPeter Xu if (g_tree_lookup(mis->page_requested, host_addr)) { 12748f8bfffcSPeter Xu g_tree_remove(mis->page_requested, host_addr); 12758f8bfffcSPeter Xu mis->page_requested_count--; 12768f8bfffcSPeter Xu trace_postcopy_page_req_del(host_addr, mis->page_requested_count); 12778f8bfffcSPeter Xu } 12788f8bfffcSPeter Xu qemu_mutex_unlock(&mis->page_request_mutex); 1279575b0b33SAlexey Perevalov mark_postcopy_blocktime_end((uintptr_t)host_addr); 1280f9494614SAlexey Perevalov } 1281f9494614SAlexey Perevalov return ret; 1282727b9d7eSAlexey Perevalov } 1283727b9d7eSAlexey Perevalov 1284d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1285d488b349SDr. David Alan Gilbert { 1286d488b349SDr. David Alan Gilbert int i; 1287d488b349SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 1288d488b349SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 1289d488b349SDr. David Alan Gilbert 1290d488b349SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 1291d488b349SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1292d488b349SDr. David Alan Gilbert int ret = cur->waker(cur, rb, offset); 1293d488b349SDr. David Alan Gilbert if (ret) { 1294d488b349SDr. David Alan Gilbert return ret; 1295d488b349SDr. David Alan Gilbert } 1296d488b349SDr. David Alan Gilbert } 1297d488b349SDr. David Alan Gilbert return 0; 1298d488b349SDr. David Alan Gilbert } 1299d488b349SDr. David Alan Gilbert 1300696ed9a9SDr. David Alan Gilbert /* 1301696ed9a9SDr. David Alan Gilbert * Place a host page (from) at (host) atomically 1302696ed9a9SDr. David Alan Gilbert * returns 0 on success 1303696ed9a9SDr. David Alan Gilbert */ 1304df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 13058be4620bSAlexey Perevalov RAMBlock *rb) 1306696ed9a9SDr. David Alan Gilbert { 13078be4620bSAlexey Perevalov size_t pagesize = qemu_ram_pagesize(rb); 1308696ed9a9SDr. David Alan Gilbert 1309696ed9a9SDr. David Alan Gilbert /* copy also acks to the kernel waking the stalled thread up 1310696ed9a9SDr. David Alan Gilbert * TODO: We can inhibit that ack and only do it if it was requested 1311696ed9a9SDr. David Alan Gilbert * which would be slightly cheaper, but we'd have to be careful 1312696ed9a9SDr. David Alan Gilbert * of the order of updating our page state. 1313696ed9a9SDr. David Alan Gilbert */ 1314eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { 1315696ed9a9SDr. David Alan Gilbert int e = errno; 1316df9ff5e1SDr. David Alan Gilbert error_report("%s: %s copy host: %p from: %p (size: %zd)", 1317df9ff5e1SDr. David Alan Gilbert __func__, strerror(e), host, from, pagesize); 1318696ed9a9SDr. David Alan Gilbert 1319696ed9a9SDr. David Alan Gilbert return -e; 1320696ed9a9SDr. David Alan Gilbert } 1321696ed9a9SDr. David Alan Gilbert 1322696ed9a9SDr. David Alan Gilbert trace_postcopy_place_page(host); 1323dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1324dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, host)); 1325696ed9a9SDr. David Alan Gilbert } 1326696ed9a9SDr. David Alan Gilbert 1327696ed9a9SDr. David Alan Gilbert /* 1328696ed9a9SDr. David Alan Gilbert * Place a zero page at (host) atomically 1329696ed9a9SDr. David Alan Gilbert * returns 0 on success 1330696ed9a9SDr. David Alan Gilbert */ 1331df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 13328be4620bSAlexey Perevalov RAMBlock *rb) 1333696ed9a9SDr. David Alan Gilbert { 13342ce16640SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 1335df9ff5e1SDr. David Alan Gilbert trace_postcopy_place_page_zero(host); 1336696ed9a9SDr. David Alan Gilbert 13372ce16640SDr. David Alan Gilbert /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 13382ce16640SDr. David Alan Gilbert * but it's not available for everything (e.g. hugetlbpages) 13392ce16640SDr. David Alan Gilbert */ 13402ce16640SDr. David Alan Gilbert if (qemu_ram_is_uf_zeroable(rb)) { 1341eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { 1342696ed9a9SDr. David Alan Gilbert int e = errno; 1343696ed9a9SDr. David Alan Gilbert error_report("%s: %s zero host: %p", 1344696ed9a9SDr. David Alan Gilbert __func__, strerror(e), host); 1345696ed9a9SDr. David Alan Gilbert 1346696ed9a9SDr. David Alan Gilbert return -e; 1347696ed9a9SDr. David Alan Gilbert } 1348dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1349dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, 1350dedfb4b2SDr. David Alan Gilbert host)); 1351df9ff5e1SDr. David Alan Gilbert } else { 13526629890dSWei Yang return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); 1353df9ff5e1SDr. David Alan Gilbert } 1354696ed9a9SDr. David Alan Gilbert } 1355696ed9a9SDr. David Alan Gilbert 1356eb59db53SDr. David Alan Gilbert #else 1357eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */ 135865ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 135965ace060SAlexey Perevalov { 136065ace060SAlexey Perevalov } 136165ace060SAlexey Perevalov 1362d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 1363eb59db53SDr. David Alan Gilbert { 1364eb59db53SDr. David Alan Gilbert error_report("%s: No OS support", __func__); 1365eb59db53SDr. David Alan Gilbert return false; 1366eb59db53SDr. David Alan Gilbert } 1367eb59db53SDr. David Alan Gilbert 1368c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 13691caddf8aSDr. David Alan Gilbert { 13701caddf8aSDr. David Alan Gilbert error_report("postcopy_ram_incoming_init: No OS support"); 13711caddf8aSDr. David Alan Gilbert return -1; 13721caddf8aSDr. David Alan Gilbert } 13731caddf8aSDr. David Alan Gilbert 13741caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 13751caddf8aSDr. David Alan Gilbert { 13761caddf8aSDr. David Alan Gilbert assert(0); 13771caddf8aSDr. David Alan Gilbert return -1; 13781caddf8aSDr. David Alan Gilbert } 13791caddf8aSDr. David Alan Gilbert 1380f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1381f9527107SDr. David Alan Gilbert { 1382f9527107SDr. David Alan Gilbert assert(0); 1383f9527107SDr. David Alan Gilbert return -1; 1384f9527107SDr. David Alan Gilbert } 1385f9527107SDr. David Alan Gilbert 1386c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1387c188c539SMichael S. Tsirkin uint64_t client_addr, uint64_t rb_offset) 1388c188c539SMichael S. Tsirkin { 1389c188c539SMichael S. Tsirkin assert(0); 1390c188c539SMichael S. Tsirkin return -1; 1391c188c539SMichael S. Tsirkin } 1392c188c539SMichael S. Tsirkin 13932a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1394f0a227adSDr. David Alan Gilbert { 1395f0a227adSDr. David Alan Gilbert assert(0); 1396f0a227adSDr. David Alan Gilbert return -1; 1397f0a227adSDr. David Alan Gilbert } 1398696ed9a9SDr. David Alan Gilbert 1399df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 14008be4620bSAlexey Perevalov RAMBlock *rb) 1401696ed9a9SDr. David Alan Gilbert { 1402696ed9a9SDr. David Alan Gilbert assert(0); 1403696ed9a9SDr. David Alan Gilbert return -1; 1404696ed9a9SDr. David Alan Gilbert } 1405696ed9a9SDr. David Alan Gilbert 1406df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 14078be4620bSAlexey Perevalov RAMBlock *rb) 1408696ed9a9SDr. David Alan Gilbert { 1409696ed9a9SDr. David Alan Gilbert assert(0); 1410696ed9a9SDr. David Alan Gilbert return -1; 1411696ed9a9SDr. David Alan Gilbert } 1412696ed9a9SDr. David Alan Gilbert 14135efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 14145efc3564SDr. David Alan Gilbert uint64_t client_addr, 14155efc3564SDr. David Alan Gilbert RAMBlock *rb) 14165efc3564SDr. David Alan Gilbert { 14175efc3564SDr. David Alan Gilbert assert(0); 14185efc3564SDr. David Alan Gilbert return -1; 14195efc3564SDr. David Alan Gilbert } 1420eb59db53SDr. David Alan Gilbert #endif 1421eb59db53SDr. David Alan Gilbert 1422e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */ 142377dadc3fSPeter Xu void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) 142477dadc3fSPeter Xu { 142577dadc3fSPeter Xu tmp_page->target_pages = 0; 142677dadc3fSPeter Xu tmp_page->host_addr = NULL; 142777dadc3fSPeter Xu /* 142877dadc3fSPeter Xu * This is set to true when reset, and cleared as long as we received any 142977dadc3fSPeter Xu * of the non-zero small page within this huge page. 143077dadc3fSPeter Xu */ 143177dadc3fSPeter Xu tmp_page->all_zero = true; 143277dadc3fSPeter Xu } 1433e0b266f0SDr. David Alan Gilbert 14349ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis) 14359ab7ef9bSPeter Xu { 14369ab7ef9bSPeter Xu uint64_t tmp64 = 1; 14379ab7ef9bSPeter Xu 14389ab7ef9bSPeter Xu /* 14399ab7ef9bSPeter Xu * Wakeup the fault_thread. It's an eventfd that should currently 14409ab7ef9bSPeter Xu * be at 0, we're going to increment it to 1 14419ab7ef9bSPeter Xu */ 14429ab7ef9bSPeter Xu if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 14439ab7ef9bSPeter Xu /* Not much we can do here, but may as well report it */ 14449ab7ef9bSPeter Xu error_report("%s: incrementing failed: %s", __func__, 14459ab7ef9bSPeter Xu strerror(errno)); 14469ab7ef9bSPeter Xu } 14479ab7ef9bSPeter Xu } 14489ab7ef9bSPeter Xu 1449e0b266f0SDr. David Alan Gilbert /** 1450e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_init: Called at the start of each RAMBlock before 1451e0b266f0SDr. David Alan Gilbert * asking to discard individual ranges. 1452e0b266f0SDr. David Alan Gilbert * 1453e0b266f0SDr. David Alan Gilbert * @ms: The current migration state. 1454810cf2bbSWei Yang * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. 1455e0b266f0SDr. David Alan Gilbert * @name: RAMBlock that discards will operate on. 1456e0b266f0SDr. David Alan Gilbert */ 1457810cf2bbSWei Yang static PostcopyDiscardState pds = {0}; 1458810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name) 1459e0b266f0SDr. David Alan Gilbert { 1460810cf2bbSWei Yang pds.ramblock_name = name; 1461810cf2bbSWei Yang pds.cur_entry = 0; 1462810cf2bbSWei Yang pds.nsentwords = 0; 1463810cf2bbSWei Yang pds.nsentcmds = 0; 1464e0b266f0SDr. David Alan Gilbert } 1465e0b266f0SDr. David Alan Gilbert 1466e0b266f0SDr. David Alan Gilbert /** 1467e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1468e0b266f0SDr. David Alan Gilbert * discard. May send a discard message, may just leave it queued to 1469e0b266f0SDr. David Alan Gilbert * be sent later. 1470e0b266f0SDr. David Alan Gilbert * 1471e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1472e0b266f0SDr. David Alan Gilbert * @start,@length: a range of pages in the migration bitmap in the 1473e0b266f0SDr. David Alan Gilbert * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1474e0b266f0SDr. David Alan Gilbert */ 1475810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start, 1476810cf2bbSWei Yang unsigned long length) 1477e0b266f0SDr. David Alan Gilbert { 147820afaed9SJuan Quintela size_t tp_size = qemu_target_page_size(); 1479e0b266f0SDr. David Alan Gilbert /* Convert to byte offsets within the RAM block */ 1480810cf2bbSWei Yang pds.start_list[pds.cur_entry] = start * tp_size; 1481810cf2bbSWei Yang pds.length_list[pds.cur_entry] = length * tp_size; 1482810cf2bbSWei Yang trace_postcopy_discard_send_range(pds.ramblock_name, start, length); 1483810cf2bbSWei Yang pds.cur_entry++; 1484810cf2bbSWei Yang pds.nsentwords++; 1485e0b266f0SDr. David Alan Gilbert 1486810cf2bbSWei Yang if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { 1487e0b266f0SDr. David Alan Gilbert /* Full set, ship it! */ 148889a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1489810cf2bbSWei Yang pds.ramblock_name, 1490810cf2bbSWei Yang pds.cur_entry, 1491810cf2bbSWei Yang pds.start_list, 1492810cf2bbSWei Yang pds.length_list); 1493810cf2bbSWei Yang pds.nsentcmds++; 1494810cf2bbSWei Yang pds.cur_entry = 0; 1495e0b266f0SDr. David Alan Gilbert } 1496e0b266f0SDr. David Alan Gilbert } 1497e0b266f0SDr. David Alan Gilbert 1498e0b266f0SDr. David Alan Gilbert /** 1499e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1500e0b266f0SDr. David Alan Gilbert * bitmap code. Sends any outstanding discard messages, frees the PDS 1501e0b266f0SDr. David Alan Gilbert * 1502e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1503e0b266f0SDr. David Alan Gilbert */ 1504810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms) 1505e0b266f0SDr. David Alan Gilbert { 1506e0b266f0SDr. David Alan Gilbert /* Anything unsent? */ 1507810cf2bbSWei Yang if (pds.cur_entry) { 150889a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1509810cf2bbSWei Yang pds.ramblock_name, 1510810cf2bbSWei Yang pds.cur_entry, 1511810cf2bbSWei Yang pds.start_list, 1512810cf2bbSWei Yang pds.length_list); 1513810cf2bbSWei Yang pds.nsentcmds++; 1514e0b266f0SDr. David Alan Gilbert } 1515e0b266f0SDr. David Alan Gilbert 1516810cf2bbSWei Yang trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, 1517810cf2bbSWei Yang pds.nsentcmds); 1518e0b266f0SDr. David Alan Gilbert } 1519bac3b212SJuan Quintela 1520bac3b212SJuan Quintela /* 1521bac3b212SJuan Quintela * Current state of incoming postcopy; note this is not part of 1522bac3b212SJuan Quintela * MigrationIncomingState since it's state is used during cleanup 1523bac3b212SJuan Quintela * at the end as MIS is being freed. 1524bac3b212SJuan Quintela */ 1525bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state; 1526bac3b212SJuan Quintela 1527bac3b212SJuan Quintela PostcopyState postcopy_state_get(void) 1528bac3b212SJuan Quintela { 15294592eaf3SPaolo Bonzini return qatomic_load_acquire(&incoming_postcopy_state); 1530bac3b212SJuan Quintela } 1531bac3b212SJuan Quintela 1532bac3b212SJuan Quintela /* Set the state and return the old state */ 1533bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state) 1534bac3b212SJuan Quintela { 1535d73415a3SStefan Hajnoczi return qatomic_xchg(&incoming_postcopy_state, new_state); 1536bac3b212SJuan Quintela } 153700fa4fc8SDr. David Alan Gilbert 153800fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy 153900fa4fc8SDr. David Alan Gilbert * called on the destination. 154000fa4fc8SDr. David Alan Gilbert */ 154100fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 154200fa4fc8SDr. David Alan Gilbert { 154300fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 154400fa4fc8SDr. David Alan Gilbert 154500fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 154600fa4fc8SDr. David Alan Gilbert *pcfd); 154700fa4fc8SDr. David Alan Gilbert } 154800fa4fc8SDr. David Alan Gilbert 154900fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy 155000fa4fc8SDr. David Alan Gilbert */ 155100fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 155200fa4fc8SDr. David Alan Gilbert { 155300fa4fc8SDr. David Alan Gilbert guint i; 155400fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 155500fa4fc8SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 155600fa4fc8SDr. David Alan Gilbert 155756559980SJuan Quintela if (!pcrfds) { 155856559980SJuan Quintela /* migration has already finished and freed the array */ 155956559980SJuan Quintela return; 156056559980SJuan Quintela } 156100fa4fc8SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 156200fa4fc8SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 156300fa4fc8SDr. David Alan Gilbert if (cur->fd == pcfd->fd) { 156400fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 156500fa4fc8SDr. David Alan Gilbert return; 156600fa4fc8SDr. David Alan Gilbert } 156700fa4fc8SDr. David Alan Gilbert } 156800fa4fc8SDr. David Alan Gilbert } 156936f62f11SPeter Xu 15706720c2b3Smanish.mishra void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) 157136f62f11SPeter Xu { 157236f62f11SPeter Xu /* 157336f62f11SPeter Xu * The new loading channel has its own threads, so it needs to be 157436f62f11SPeter Xu * blocked too. It's by default true, just be explicit. 157536f62f11SPeter Xu */ 157636f62f11SPeter Xu qemu_file_set_blocking(file, true); 157736f62f11SPeter Xu mis->postcopy_qemufile_dst = file; 15785655aab0SPeter Xu qemu_sem_post(&mis->postcopy_qemufile_dst_done); 157936f62f11SPeter Xu trace_postcopy_preempt_new_channel(); 158036f62f11SPeter Xu } 158136f62f11SPeter Xu 1582f0afaf6cSPeter Xu /* 1583f0afaf6cSPeter Xu * Setup the postcopy preempt channel with the IOC. If ERROR is specified, 1584f0afaf6cSPeter Xu * setup the error instead. This helper will free the ERROR if specified. 1585f0afaf6cSPeter Xu */ 1586d0edb8a1SPeter Xu static void 1587f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(MigrationState *s, 1588f0afaf6cSPeter Xu QIOChannel *ioc, Error *local_err) 1589d0edb8a1SPeter Xu { 1590f0afaf6cSPeter Xu if (local_err) { 1591d0edb8a1SPeter Xu migrate_set_error(s, local_err); 1592d0edb8a1SPeter Xu error_free(local_err); 1593d0edb8a1SPeter Xu } else { 1594d0edb8a1SPeter Xu migration_ioc_register_yank(ioc); 1595d0edb8a1SPeter Xu s->postcopy_qemufile_src = qemu_file_new_output(ioc); 1596d0edb8a1SPeter Xu trace_postcopy_preempt_new_channel(); 1597d0edb8a1SPeter Xu } 1598d0edb8a1SPeter Xu 1599d0edb8a1SPeter Xu /* 1600d0edb8a1SPeter Xu * Kick the waiter in all cases. The waiter should check upon 1601d0edb8a1SPeter Xu * postcopy_qemufile_src to know whether it failed or not. 1602d0edb8a1SPeter Xu */ 1603d0edb8a1SPeter Xu qemu_sem_post(&s->postcopy_qemufile_src_sem); 1604f0afaf6cSPeter Xu } 1605f0afaf6cSPeter Xu 1606f0afaf6cSPeter Xu static void 1607f0afaf6cSPeter Xu postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) 1608f0afaf6cSPeter Xu { 1609f0afaf6cSPeter Xu g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1610f0afaf6cSPeter Xu MigrationState *s = opaque; 1611f0afaf6cSPeter Xu Error *local_err = NULL; 1612f0afaf6cSPeter Xu 1613f0afaf6cSPeter Xu qio_task_propagate_error(task, &local_err); 1614f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(s, ioc, local_err); 1615f0afaf6cSPeter Xu } 1616f0afaf6cSPeter Xu 1617f0afaf6cSPeter Xu static void 1618f0afaf6cSPeter Xu postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) 1619f0afaf6cSPeter Xu { 1620f0afaf6cSPeter Xu g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1621f0afaf6cSPeter Xu MigrationState *s = opaque; 1622f0afaf6cSPeter Xu QIOChannelTLS *tioc; 1623f0afaf6cSPeter Xu Error *local_err = NULL; 1624f0afaf6cSPeter Xu 1625f0afaf6cSPeter Xu if (qio_task_propagate_error(task, &local_err)) { 1626f0afaf6cSPeter Xu goto out; 1627f0afaf6cSPeter Xu } 1628f0afaf6cSPeter Xu 1629f0afaf6cSPeter Xu if (migrate_channel_requires_tls_upgrade(ioc)) { 1630f0afaf6cSPeter Xu tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err); 1631f0afaf6cSPeter Xu if (!tioc) { 1632f0afaf6cSPeter Xu goto out; 1633f0afaf6cSPeter Xu } 1634f0afaf6cSPeter Xu trace_postcopy_preempt_tls_handshake(); 1635f0afaf6cSPeter Xu qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); 1636f0afaf6cSPeter Xu qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, 1637f0afaf6cSPeter Xu s, NULL, NULL); 1638f0afaf6cSPeter Xu /* Setup the channel until TLS handshake finished */ 1639f0afaf6cSPeter Xu return; 1640f0afaf6cSPeter Xu } 1641f0afaf6cSPeter Xu 1642f0afaf6cSPeter Xu out: 1643f0afaf6cSPeter Xu /* This handles both good and error cases */ 1644f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(s, ioc, local_err); 1645d0edb8a1SPeter Xu } 1646d0edb8a1SPeter Xu 16475655aab0SPeter Xu /* 16485655aab0SPeter Xu * This function will kick off an async task to establish the preempt 16495655aab0SPeter Xu * channel, and wait until the connection setup completed. Returns 0 if 16505655aab0SPeter Xu * channel established, -1 for error. 16515655aab0SPeter Xu */ 16525655aab0SPeter Xu int postcopy_preempt_establish_channel(MigrationState *s) 1653d0edb8a1SPeter Xu { 1654d0edb8a1SPeter Xu /* If preempt not enabled, no need to wait */ 1655d0edb8a1SPeter Xu if (!migrate_postcopy_preempt()) { 1656d0edb8a1SPeter Xu return 0; 1657d0edb8a1SPeter Xu } 1658d0edb8a1SPeter Xu 165906064a67SPeter Xu /* 166006064a67SPeter Xu * Kick off async task to establish preempt channel. Only do so with 166106064a67SPeter Xu * 8.0+ machines, because 7.1/7.2 require the channel to be created in 166206064a67SPeter Xu * setup phase of migration (even if racy in an unreliable network). 166306064a67SPeter Xu */ 166406064a67SPeter Xu if (!s->preempt_pre_7_2) { 16655655aab0SPeter Xu postcopy_preempt_setup(s); 166606064a67SPeter Xu } 16675655aab0SPeter Xu 1668d0edb8a1SPeter Xu /* 1669d0edb8a1SPeter Xu * We need the postcopy preempt channel to be established before 1670d0edb8a1SPeter Xu * starting doing anything. 1671d0edb8a1SPeter Xu */ 1672d0edb8a1SPeter Xu qemu_sem_wait(&s->postcopy_qemufile_src_sem); 1673d0edb8a1SPeter Xu 1674d0edb8a1SPeter Xu return s->postcopy_qemufile_src ? 0 : -1; 1675d0edb8a1SPeter Xu } 1676d0edb8a1SPeter Xu 1677fc063a7bSPeter Xu void postcopy_preempt_setup(MigrationState *s) 167836f62f11SPeter Xu { 1679d0edb8a1SPeter Xu /* Kick an async task to connect */ 1680d0edb8a1SPeter Xu socket_send_channel_create(postcopy_preempt_send_channel_new, s); 168136f62f11SPeter Xu } 168236f62f11SPeter Xu 168360bb3c58SPeter Xu static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) 168460bb3c58SPeter Xu { 168560bb3c58SPeter Xu trace_postcopy_pause_fast_load(); 168660bb3c58SPeter Xu qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 168760bb3c58SPeter Xu qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); 168860bb3c58SPeter Xu qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 168960bb3c58SPeter Xu trace_postcopy_pause_fast_load_continued(); 169060bb3c58SPeter Xu } 169160bb3c58SPeter Xu 16926621883fSPeter Xu static bool preempt_thread_should_run(MigrationIncomingState *mis) 16936621883fSPeter Xu { 16946621883fSPeter Xu return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; 16956621883fSPeter Xu } 16966621883fSPeter Xu 169736f62f11SPeter Xu void *postcopy_preempt_thread(void *opaque) 169836f62f11SPeter Xu { 169936f62f11SPeter Xu MigrationIncomingState *mis = opaque; 170036f62f11SPeter Xu int ret; 170136f62f11SPeter Xu 170236f62f11SPeter Xu trace_postcopy_preempt_thread_entry(); 170336f62f11SPeter Xu 170436f62f11SPeter Xu rcu_register_thread(); 170536f62f11SPeter Xu 170636f62f11SPeter Xu qemu_sem_post(&mis->thread_sync_sem); 170736f62f11SPeter Xu 1708a5d35dc7SPeter Xu /* 1709a5d35dc7SPeter Xu * The preempt channel is established in asynchronous way. Wait 1710a5d35dc7SPeter Xu * for its completion. 1711a5d35dc7SPeter Xu */ 1712a5d35dc7SPeter Xu qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 1713a5d35dc7SPeter Xu 171436f62f11SPeter Xu /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ 171560bb3c58SPeter Xu qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 17166621883fSPeter Xu while (preempt_thread_should_run(mis)) { 171760bb3c58SPeter Xu ret = ram_load_postcopy(mis->postcopy_qemufile_dst, 171860bb3c58SPeter Xu RAM_CHANNEL_POSTCOPY); 171960bb3c58SPeter Xu /* If error happened, go into recovery routine */ 17206621883fSPeter Xu if (ret && preempt_thread_should_run(mis)) { 172160bb3c58SPeter Xu postcopy_pause_ram_fast_load(mis); 172260bb3c58SPeter Xu } else { 172360bb3c58SPeter Xu /* We're done */ 172460bb3c58SPeter Xu break; 172560bb3c58SPeter Xu } 172660bb3c58SPeter Xu } 172760bb3c58SPeter Xu qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 172836f62f11SPeter Xu 172936f62f11SPeter Xu rcu_unregister_thread(); 173036f62f11SPeter Xu 173136f62f11SPeter Xu trace_postcopy_preempt_thread_exit(); 173236f62f11SPeter Xu 173360bb3c58SPeter Xu return NULL; 173436f62f11SPeter Xu } 1735