1eb59db53SDr. David Alan Gilbert /* 2eb59db53SDr. David Alan Gilbert * Postcopy migration for RAM 3eb59db53SDr. David Alan Gilbert * 4eb59db53SDr. David Alan Gilbert * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5eb59db53SDr. David Alan Gilbert * 6eb59db53SDr. David Alan Gilbert * Authors: 7eb59db53SDr. David Alan Gilbert * Dave Gilbert <dgilbert@redhat.com> 8eb59db53SDr. David Alan Gilbert * 9eb59db53SDr. David Alan Gilbert * This work is licensed under the terms of the GNU GPL, version 2 or later. 10eb59db53SDr. David Alan Gilbert * See the COPYING file in the top-level directory. 11eb59db53SDr. David Alan Gilbert * 12eb59db53SDr. David Alan Gilbert */ 13eb59db53SDr. David Alan Gilbert 14eb59db53SDr. David Alan Gilbert /* 15eb59db53SDr. David Alan Gilbert * Postcopy is a migration technique where the execution flips from the 16eb59db53SDr. David Alan Gilbert * source to the destination before all the data has been copied. 17eb59db53SDr. David Alan Gilbert */ 18eb59db53SDr. David Alan Gilbert 191393a485SPeter Maydell #include "qemu/osdep.h" 20b85ea5faSPeter Maydell #include "qemu/madvise.h" 2151180423SJuan Quintela #include "exec/target_page.h" 226666c96aSJuan Quintela #include "migration.h" 2308a0aee1SJuan Quintela #include "qemu-file.h" 2420a519a0SJuan Quintela #include "savevm.h" 25be07b0acSJuan Quintela #include "postcopy-ram.h" 267b1e1a22SJuan Quintela #include "ram.h" 271693c64cSDr. David Alan Gilbert #include "qapi/error.h" 281693c64cSDr. David Alan Gilbert #include "qemu/notify.h" 29d4842052SMarkus Armbruster #include "qemu/rcu.h" 30eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h" 31eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h" 32eb59db53SDr. David Alan Gilbert #include "trace.h" 335cc8767dSLike Xu #include "hw/boards.h" 34898ba906SDavid Hildenbrand #include "exec/ramblock.h" 3536f62f11SPeter Xu #include "socket.h" 3636f62f11SPeter Xu #include "yank_functions.h" 37f0afaf6cSPeter Xu #include "tls.h" 38d5890ea0SPeter Xu #include "qemu/userfaultfd.h" 39ae30b9b2SPeter Xu #include "qemu/mmap-alloc.h" 401f0776f1SJuan Quintela #include "options.h" 41eb59db53SDr. David Alan Gilbert 42e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command, 43e0b266f0SDr. David Alan Gilbert * keeps them around ~200 bytes 44e0b266f0SDr. David Alan Gilbert */ 45e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12 46e0b266f0SDr. David Alan Gilbert 47e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState { 48e0b266f0SDr. David Alan Gilbert const char *ramblock_name; 49e0b266f0SDr. David Alan Gilbert uint16_t cur_entry; 50e0b266f0SDr. David Alan Gilbert /* 51e0b266f0SDr. David Alan Gilbert * Start and length of a discard range (bytes) 52e0b266f0SDr. David Alan Gilbert */ 53e0b266f0SDr. David Alan Gilbert uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 54e0b266f0SDr. David Alan Gilbert uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 55e0b266f0SDr. David Alan Gilbert unsigned int nsentwords; 56e0b266f0SDr. David Alan Gilbert unsigned int nsentcmds; 57e0b266f0SDr. David Alan Gilbert }; 58e0b266f0SDr. David Alan Gilbert 591693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list; 601693c64cSDr. David Alan Gilbert 611693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void) 621693c64cSDr. David Alan Gilbert { 631693c64cSDr. David Alan Gilbert notifier_with_return_list_init(&postcopy_notifier_list); 641693c64cSDr. David Alan Gilbert } 651693c64cSDr. David Alan Gilbert 661693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn) 671693c64cSDr. David Alan Gilbert { 681693c64cSDr. David Alan Gilbert notifier_with_return_list_add(&postcopy_notifier_list, nn); 691693c64cSDr. David Alan Gilbert } 701693c64cSDr. David Alan Gilbert 711693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n) 721693c64cSDr. David Alan Gilbert { 731693c64cSDr. David Alan Gilbert notifier_with_return_remove(n); 741693c64cSDr. David Alan Gilbert } 751693c64cSDr. David Alan Gilbert 761693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 771693c64cSDr. David Alan Gilbert { 781693c64cSDr. David Alan Gilbert struct PostcopyNotifyData pnd; 791693c64cSDr. David Alan Gilbert pnd.reason = reason; 801693c64cSDr. David Alan Gilbert pnd.errp = errp; 811693c64cSDr. David Alan Gilbert 821693c64cSDr. David Alan Gilbert return notifier_with_return_list_notify(&postcopy_notifier_list, 831693c64cSDr. David Alan Gilbert &pnd); 841693c64cSDr. David Alan Gilbert } 851693c64cSDr. David Alan Gilbert 86095c12a4SPeter Xu /* 87095c12a4SPeter Xu * NOTE: this routine is not thread safe, we can't call it concurrently. But it 88095c12a4SPeter Xu * should be good enough for migration's purposes. 89095c12a4SPeter Xu */ 90095c12a4SPeter Xu void postcopy_thread_create(MigrationIncomingState *mis, 91095c12a4SPeter Xu QemuThread *thread, const char *name, 92095c12a4SPeter Xu void *(*fn)(void *), int joinable) 93095c12a4SPeter Xu { 94095c12a4SPeter Xu qemu_sem_init(&mis->thread_sync_sem, 0); 95095c12a4SPeter Xu qemu_thread_create(thread, name, fn, mis, joinable); 96095c12a4SPeter Xu qemu_sem_wait(&mis->thread_sync_sem); 97095c12a4SPeter Xu qemu_sem_destroy(&mis->thread_sync_sem); 98095c12a4SPeter Xu } 99095c12a4SPeter Xu 100eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied 101eb59db53SDr. David Alan Gilbert * across, and efficiently map new pages in, the techniques for doing this 102eb59db53SDr. David Alan Gilbert * are target OS specific. 103eb59db53SDr. David Alan Gilbert */ 104eb59db53SDr. David Alan Gilbert #if defined(__linux__) 105eb59db53SDr. David Alan Gilbert 106c4faeed2SDr. David Alan Gilbert #include <poll.h> 107eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h> 108eb59db53SDr. David Alan Gilbert #include <sys/syscall.h> 109eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */ 110eb59db53SDr. David Alan Gilbert #endif 111eb59db53SDr. David Alan Gilbert 112d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 113d8b9d771SMatthew Fortune #include <sys/eventfd.h> 114eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h> 115eb59db53SDr. David Alan Gilbert 1162a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext { 1172a4c42f1SAlexey Perevalov /* time when page fault initiated per vCPU */ 1182a4c42f1SAlexey Perevalov uint32_t *page_fault_vcpu_time; 1192a4c42f1SAlexey Perevalov /* page address per vCPU */ 1202a4c42f1SAlexey Perevalov uintptr_t *vcpu_addr; 1212a4c42f1SAlexey Perevalov uint32_t total_blocktime; 1222a4c42f1SAlexey Perevalov /* blocktime per vCPU */ 1232a4c42f1SAlexey Perevalov uint32_t *vcpu_blocktime; 1242a4c42f1SAlexey Perevalov /* point in time when last page fault was initiated */ 1252a4c42f1SAlexey Perevalov uint32_t last_begin; 1262a4c42f1SAlexey Perevalov /* number of vCPU are suspended */ 1272a4c42f1SAlexey Perevalov int smp_cpus_down; 1282a4c42f1SAlexey Perevalov uint64_t start_time; 1292a4c42f1SAlexey Perevalov 1302a4c42f1SAlexey Perevalov /* 1312a4c42f1SAlexey Perevalov * Handler for exit event, necessary for 1322a4c42f1SAlexey Perevalov * releasing whole blocktime_ctx 1332a4c42f1SAlexey Perevalov */ 1342a4c42f1SAlexey Perevalov Notifier exit_notifier; 1352a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext; 1362a4c42f1SAlexey Perevalov 1372a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 1382a4c42f1SAlexey Perevalov { 1392a4c42f1SAlexey Perevalov g_free(ctx->page_fault_vcpu_time); 1402a4c42f1SAlexey Perevalov g_free(ctx->vcpu_addr); 1412a4c42f1SAlexey Perevalov g_free(ctx->vcpu_blocktime); 1422a4c42f1SAlexey Perevalov g_free(ctx); 1432a4c42f1SAlexey Perevalov } 1442a4c42f1SAlexey Perevalov 1452a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data) 1462a4c42f1SAlexey Perevalov { 1472a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 1482a4c42f1SAlexey Perevalov exit_notifier); 1492a4c42f1SAlexey Perevalov destroy_blocktime_context(ctx); 1502a4c42f1SAlexey Perevalov } 1512a4c42f1SAlexey Perevalov 1522a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void) 1532a4c42f1SAlexey Perevalov { 1545cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 1555cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 1562a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 1572a4c42f1SAlexey Perevalov ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 1582a4c42f1SAlexey Perevalov ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 1592a4c42f1SAlexey Perevalov ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 1602a4c42f1SAlexey Perevalov 1612a4c42f1SAlexey Perevalov ctx->exit_notifier.notify = migration_exit_cb; 1622a4c42f1SAlexey Perevalov ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1632a4c42f1SAlexey Perevalov qemu_add_exit_notifier(&ctx->exit_notifier); 1642a4c42f1SAlexey Perevalov return ctx; 1652a4c42f1SAlexey Perevalov } 166ca6011c2SAlexey Perevalov 16765ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 16865ace060SAlexey Perevalov { 1695cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 17054aa3de7SEric Blake uint32List *list = NULL; 17165ace060SAlexey Perevalov int i; 17265ace060SAlexey Perevalov 1735cc8767dSLike Xu for (i = ms->smp.cpus - 1; i >= 0; i--) { 17454aa3de7SEric Blake QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); 17565ace060SAlexey Perevalov } 17665ace060SAlexey Perevalov 17765ace060SAlexey Perevalov return list; 17865ace060SAlexey Perevalov } 17965ace060SAlexey Perevalov 18065ace060SAlexey Perevalov /* 18165ace060SAlexey Perevalov * This function just populates MigrationInfo from postcopy's 18265ace060SAlexey Perevalov * blocktime context. It will not populate MigrationInfo, 18365ace060SAlexey Perevalov * unless postcopy-blocktime capability was set. 18465ace060SAlexey Perevalov * 18565ace060SAlexey Perevalov * @info: pointer to MigrationInfo to populate 18665ace060SAlexey Perevalov */ 18765ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 18865ace060SAlexey Perevalov { 18965ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 19065ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 19165ace060SAlexey Perevalov 19265ace060SAlexey Perevalov if (!bc) { 19365ace060SAlexey Perevalov return; 19465ace060SAlexey Perevalov } 19565ace060SAlexey Perevalov 19665ace060SAlexey Perevalov info->has_postcopy_blocktime = true; 19765ace060SAlexey Perevalov info->postcopy_blocktime = bc->total_blocktime; 19865ace060SAlexey Perevalov info->has_postcopy_vcpu_blocktime = true; 19965ace060SAlexey Perevalov info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 20065ace060SAlexey Perevalov } 20165ace060SAlexey Perevalov 20265ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void) 20365ace060SAlexey Perevalov { 20465ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 20565ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 20665ace060SAlexey Perevalov 20765ace060SAlexey Perevalov if (!bc) { 20865ace060SAlexey Perevalov return 0; 20965ace060SAlexey Perevalov } 21065ace060SAlexey Perevalov 21165ace060SAlexey Perevalov return bc->total_blocktime; 21265ace060SAlexey Perevalov } 21365ace060SAlexey Perevalov 21454ae0886SAlexey Perevalov /** 21554ae0886SAlexey Perevalov * receive_ufd_features: check userfault fd features, to request only supported 21654ae0886SAlexey Perevalov * features in the future. 21754ae0886SAlexey Perevalov * 21854ae0886SAlexey Perevalov * Returns: true on success 21954ae0886SAlexey Perevalov * 22054ae0886SAlexey Perevalov * __NR_userfaultfd - should be checked before 22154ae0886SAlexey Perevalov * @features: out parameter will contain uffdio_api.features provided by kernel 22254ae0886SAlexey Perevalov * in case of success 22354ae0886SAlexey Perevalov */ 22454ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features) 22554ae0886SAlexey Perevalov { 22654ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 22754ae0886SAlexey Perevalov int ufd; 22854ae0886SAlexey Perevalov bool ret = true; 22954ae0886SAlexey Perevalov 230d5890ea0SPeter Xu ufd = uffd_open(O_CLOEXEC); 23154ae0886SAlexey Perevalov if (ufd == -1) { 232d5890ea0SPeter Xu error_report("%s: uffd_open() failed: %s", __func__, strerror(errno)); 23354ae0886SAlexey Perevalov return false; 23454ae0886SAlexey Perevalov } 23554ae0886SAlexey Perevalov 23654ae0886SAlexey Perevalov /* ask features */ 237eb59db53SDr. David Alan Gilbert api_struct.api = UFFD_API; 238eb59db53SDr. David Alan Gilbert api_struct.features = 0; 239eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_API, &api_struct)) { 2405553499fSAlexey Perevalov error_report("%s: UFFDIO_API failed: %s", __func__, 241eb59db53SDr. David Alan Gilbert strerror(errno)); 24254ae0886SAlexey Perevalov ret = false; 24354ae0886SAlexey Perevalov goto release_ufd; 24454ae0886SAlexey Perevalov } 24554ae0886SAlexey Perevalov 24654ae0886SAlexey Perevalov *features = api_struct.features; 24754ae0886SAlexey Perevalov 24854ae0886SAlexey Perevalov release_ufd: 24954ae0886SAlexey Perevalov close(ufd); 25054ae0886SAlexey Perevalov return ret; 25154ae0886SAlexey Perevalov } 25254ae0886SAlexey Perevalov 25354ae0886SAlexey Perevalov /** 25454ae0886SAlexey Perevalov * request_ufd_features: this function should be called only once on a newly 25554ae0886SAlexey Perevalov * opened ufd, subsequent calls will lead to error. 25654ae0886SAlexey Perevalov * 2573a4452d8Szhaolichang * Returns: true on success 25854ae0886SAlexey Perevalov * 25954ae0886SAlexey Perevalov * @ufd: fd obtained from userfaultfd syscall 26054ae0886SAlexey Perevalov * @features: bit mask see UFFD_API_FEATURES 26154ae0886SAlexey Perevalov */ 26254ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features) 26354ae0886SAlexey Perevalov { 26454ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 26554ae0886SAlexey Perevalov uint64_t ioctl_mask; 26654ae0886SAlexey Perevalov 26754ae0886SAlexey Perevalov api_struct.api = UFFD_API; 26854ae0886SAlexey Perevalov api_struct.features = features; 26954ae0886SAlexey Perevalov if (ioctl(ufd, UFFDIO_API, &api_struct)) { 27054ae0886SAlexey Perevalov error_report("%s failed: UFFDIO_API failed: %s", __func__, 27154ae0886SAlexey Perevalov strerror(errno)); 272eb59db53SDr. David Alan Gilbert return false; 273eb59db53SDr. David Alan Gilbert } 274eb59db53SDr. David Alan Gilbert 275eb59db53SDr. David Alan Gilbert ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 276eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_UNREGISTER; 277eb59db53SDr. David Alan Gilbert if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 278eb59db53SDr. David Alan Gilbert error_report("Missing userfault features: %" PRIx64, 279eb59db53SDr. David Alan Gilbert (uint64_t)(~api_struct.ioctls & ioctl_mask)); 280eb59db53SDr. David Alan Gilbert return false; 281eb59db53SDr. David Alan Gilbert } 282eb59db53SDr. David Alan Gilbert 28354ae0886SAlexey Perevalov return true; 28454ae0886SAlexey Perevalov } 28554ae0886SAlexey Perevalov 28674c38cf7SPeter Xu static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis, 28774c38cf7SPeter Xu Error **errp) 28854ae0886SAlexey Perevalov { 28954ae0886SAlexey Perevalov uint64_t asked_features = 0; 29054ae0886SAlexey Perevalov static uint64_t supported_features; 29154ae0886SAlexey Perevalov 29274c38cf7SPeter Xu ERRP_GUARD(); 29354ae0886SAlexey Perevalov /* 29454ae0886SAlexey Perevalov * it's not possible to 29554ae0886SAlexey Perevalov * request UFFD_API twice per one fd 29654ae0886SAlexey Perevalov * userfault fd features is persistent 29754ae0886SAlexey Perevalov */ 29854ae0886SAlexey Perevalov if (!supported_features) { 29954ae0886SAlexey Perevalov if (!receive_ufd_features(&supported_features)) { 30074c38cf7SPeter Xu error_setg(errp, "Userfault feature detection failed"); 30154ae0886SAlexey Perevalov return false; 30254ae0886SAlexey Perevalov } 30354ae0886SAlexey Perevalov } 30454ae0886SAlexey Perevalov 3052a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID 3062d1c37c6SPeter Xu if (UFFD_FEATURE_THREAD_ID & supported_features) { 3072d1c37c6SPeter Xu asked_features |= UFFD_FEATURE_THREAD_ID; 3082d1c37c6SPeter Xu if (migrate_postcopy_blocktime()) { 3092a4c42f1SAlexey Perevalov if (!mis->blocktime_ctx) { 3102a4c42f1SAlexey Perevalov mis->blocktime_ctx = blocktime_context_new(); 3112a4c42f1SAlexey Perevalov } 3122d1c37c6SPeter Xu } 3132a4c42f1SAlexey Perevalov } 3142a4c42f1SAlexey Perevalov #endif 3152a4c42f1SAlexey Perevalov 31654ae0886SAlexey Perevalov /* 31754ae0886SAlexey Perevalov * request features, even if asked_features is 0, due to 31854ae0886SAlexey Perevalov * kernel expects UFFD_API before UFFDIO_REGISTER, per 31954ae0886SAlexey Perevalov * userfault file descriptor 32054ae0886SAlexey Perevalov */ 32154ae0886SAlexey Perevalov if (!request_ufd_features(ufd, asked_features)) { 32274c38cf7SPeter Xu error_setg(errp, "Failed features %" PRIu64, asked_features); 32354ae0886SAlexey Perevalov return false; 32454ae0886SAlexey Perevalov } 32554ae0886SAlexey Perevalov 3268e3b0cbbSMarc-André Lureau if (qemu_real_host_page_size() != ram_pagesize_summary()) { 3277e8cafb7SDr. David Alan Gilbert bool have_hp = false; 3287e8cafb7SDr. David Alan Gilbert /* We've got a huge page */ 3297e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 33054ae0886SAlexey Perevalov have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 3317e8cafb7SDr. David Alan Gilbert #endif 3327e8cafb7SDr. David Alan Gilbert if (!have_hp) { 33374c38cf7SPeter Xu error_setg(errp, 33474c38cf7SPeter Xu "Userfault on this host does not support huge pages"); 3357e8cafb7SDr. David Alan Gilbert return false; 3367e8cafb7SDr. David Alan Gilbert } 3377e8cafb7SDr. David Alan Gilbert } 338eb59db53SDr. David Alan Gilbert return true; 339eb59db53SDr. David Alan Gilbert } 340eb59db53SDr. David Alan Gilbert 3418679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator. 3428679638bSDr. David Alan Gilbert */ 34374c38cf7SPeter Xu static int test_ramblock_postcopiable(RAMBlock *rb, Error **errp) 3448679638bSDr. David Alan Gilbert { 345754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 346754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 3475d214a92SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 348ae30b9b2SPeter Xu QemuFsType fs; 3495d214a92SDr. David Alan Gilbert 3505d214a92SDr. David Alan Gilbert if (length % pagesize) { 35174c38cf7SPeter Xu error_setg(errp, 35274c38cf7SPeter Xu "Postcopy requires RAM blocks to be a page size multiple," 3535d214a92SDr. David Alan Gilbert " block %s is 0x" RAM_ADDR_FMT " bytes with a " 3545d214a92SDr. David Alan Gilbert "page size of 0x%zx", block_name, length, pagesize); 3555d214a92SDr. David Alan Gilbert return 1; 3565d214a92SDr. David Alan Gilbert } 357ae30b9b2SPeter Xu 358ae30b9b2SPeter Xu if (rb->fd >= 0) { 359ae30b9b2SPeter Xu fs = qemu_fd_getfs(rb->fd); 360ae30b9b2SPeter Xu if (fs != QEMU_FS_TYPE_TMPFS && fs != QEMU_FS_TYPE_HUGETLBFS) { 36174c38cf7SPeter Xu error_setg(errp, 36274c38cf7SPeter Xu "Host backend files need to be TMPFS or HUGETLBFS only"); 363ae30b9b2SPeter Xu return 1; 364ae30b9b2SPeter Xu } 365ae30b9b2SPeter Xu } 366ae30b9b2SPeter Xu 3678679638bSDr. David Alan Gilbert return 0; 3688679638bSDr. David Alan Gilbert } 3698679638bSDr. David Alan Gilbert 37058b7c17eSDr. David Alan Gilbert /* 37158b7c17eSDr. David Alan Gilbert * Note: This has the side effect of munlock'ing all of RAM, that's 37258b7c17eSDr. David Alan Gilbert * normally fine since if the postcopy succeeds it gets turned back on at the 37358b7c17eSDr. David Alan Gilbert * end. 37458b7c17eSDr. David Alan Gilbert */ 37574c38cf7SPeter Xu bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) 376eb59db53SDr. David Alan Gilbert { 3778e3b0cbbSMarc-André Lureau long pagesize = qemu_real_host_page_size(); 378eb59db53SDr. David Alan Gilbert int ufd = -1; 379eb59db53SDr. David Alan Gilbert bool ret = false; /* Error unless we change it */ 380eb59db53SDr. David Alan Gilbert void *testarea = NULL; 381eb59db53SDr. David Alan Gilbert struct uffdio_register reg_struct; 382eb59db53SDr. David Alan Gilbert struct uffdio_range range_struct; 383eb59db53SDr. David Alan Gilbert uint64_t feature_mask; 384ae30b9b2SPeter Xu RAMBlock *block; 385eb59db53SDr. David Alan Gilbert 38674c38cf7SPeter Xu ERRP_GUARD(); 38720afaed9SJuan Quintela if (qemu_target_page_size() > pagesize) { 38874c38cf7SPeter Xu error_setg(errp, "Target page size bigger than host page size"); 389eb59db53SDr. David Alan Gilbert goto out; 390eb59db53SDr. David Alan Gilbert } 391eb59db53SDr. David Alan Gilbert 392d5890ea0SPeter Xu ufd = uffd_open(O_CLOEXEC); 393eb59db53SDr. David Alan Gilbert if (ufd == -1) { 39474c38cf7SPeter Xu error_setg(errp, "Userfaultfd not available: %s", strerror(errno)); 395eb59db53SDr. David Alan Gilbert goto out; 396eb59db53SDr. David Alan Gilbert } 397eb59db53SDr. David Alan Gilbert 3981693c64cSDr. David Alan Gilbert /* Give devices a chance to object */ 39974c38cf7SPeter Xu if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, errp)) { 4001693c64cSDr. David Alan Gilbert goto out; 4011693c64cSDr. David Alan Gilbert } 4021693c64cSDr. David Alan Gilbert 403eb59db53SDr. David Alan Gilbert /* Version and features check */ 40474c38cf7SPeter Xu if (!ufd_check_and_apply(ufd, mis, errp)) { 405eb59db53SDr. David Alan Gilbert goto out; 406eb59db53SDr. David Alan Gilbert } 407eb59db53SDr. David Alan Gilbert 408ae30b9b2SPeter Xu /* 409ae30b9b2SPeter Xu * We don't support postcopy with some type of ramblocks. 410ae30b9b2SPeter Xu * 411ae30b9b2SPeter Xu * NOTE: we explicitly ignored ramblock_is_ignored() instead we checked 412ae30b9b2SPeter Xu * all possible ramblocks. This is because this function can be called 413ae30b9b2SPeter Xu * when creating the migration object, during the phase RAM_MIGRATABLE 414ae30b9b2SPeter Xu * is not even properly set for all the ramblocks. 415ae30b9b2SPeter Xu * 416ae30b9b2SPeter Xu * A side effect of this is we'll also check against RAM_SHARED 417ae30b9b2SPeter Xu * ramblocks even if migrate_ignore_shared() is set (in which case 418ae30b9b2SPeter Xu * we'll never migrate RAM_SHARED at all), but normally this shouldn't 419ae30b9b2SPeter Xu * affect in reality, or we can revisit. 420ae30b9b2SPeter Xu */ 421ae30b9b2SPeter Xu RAMBLOCK_FOREACH(block) { 42274c38cf7SPeter Xu if (test_ramblock_postcopiable(block, errp)) { 4238679638bSDr. David Alan Gilbert goto out; 4248679638bSDr. David Alan Gilbert } 425ae30b9b2SPeter Xu } 4268679638bSDr. David Alan Gilbert 427eb59db53SDr. David Alan Gilbert /* 42858b7c17eSDr. David Alan Gilbert * userfault and mlock don't go together; we'll put it back later if 42958b7c17eSDr. David Alan Gilbert * it was enabled. 43058b7c17eSDr. David Alan Gilbert */ 43158b7c17eSDr. David Alan Gilbert if (munlockall()) { 43274c38cf7SPeter Xu error_setg(errp, "munlockall() failed: %s", strerror(errno)); 433617a32f5SDr. David Alan Gilbert goto out; 43458b7c17eSDr. David Alan Gilbert } 43558b7c17eSDr. David Alan Gilbert 43658b7c17eSDr. David Alan Gilbert /* 437eb59db53SDr. David Alan Gilbert * We need to check that the ops we need are supported on anon memory 438eb59db53SDr. David Alan Gilbert * To do that we need to register a chunk and see the flags that 439eb59db53SDr. David Alan Gilbert * are returned. 440eb59db53SDr. David Alan Gilbert */ 441eb59db53SDr. David Alan Gilbert testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 442eb59db53SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 443eb59db53SDr. David Alan Gilbert if (testarea == MAP_FAILED) { 44474c38cf7SPeter Xu error_setg(errp, "Failed to map test area: %s", strerror(errno)); 445eb59db53SDr. David Alan Gilbert goto out; 446eb59db53SDr. David Alan Gilbert } 4477648297dSDavid Hildenbrand g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); 448eb59db53SDr. David Alan Gilbert 449eb59db53SDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)testarea; 450eb59db53SDr. David Alan Gilbert reg_struct.range.len = pagesize; 451eb59db53SDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 452eb59db53SDr. David Alan Gilbert 453eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 45474c38cf7SPeter Xu error_setg(errp, "UFFDIO_REGISTER failed: %s", strerror(errno)); 455eb59db53SDr. David Alan Gilbert goto out; 456eb59db53SDr. David Alan Gilbert } 457eb59db53SDr. David Alan Gilbert 458eb59db53SDr. David Alan Gilbert range_struct.start = (uintptr_t)testarea; 459eb59db53SDr. David Alan Gilbert range_struct.len = pagesize; 460eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 46174c38cf7SPeter Xu error_setg(errp, "UFFDIO_UNREGISTER failed: %s", strerror(errno)); 462eb59db53SDr. David Alan Gilbert goto out; 463eb59db53SDr. David Alan Gilbert } 464eb59db53SDr. David Alan Gilbert 465eb59db53SDr. David Alan Gilbert feature_mask = (__u64)1 << _UFFDIO_WAKE | 466eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_COPY | 467eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_ZEROPAGE; 468eb59db53SDr. David Alan Gilbert if ((reg_struct.ioctls & feature_mask) != feature_mask) { 46974c38cf7SPeter Xu error_setg(errp, "Missing userfault map features: %" PRIx64, 470eb59db53SDr. David Alan Gilbert (uint64_t)(~reg_struct.ioctls & feature_mask)); 471eb59db53SDr. David Alan Gilbert goto out; 472eb59db53SDr. David Alan Gilbert } 473eb59db53SDr. David Alan Gilbert 474eb59db53SDr. David Alan Gilbert /* Success! */ 475eb59db53SDr. David Alan Gilbert ret = true; 476eb59db53SDr. David Alan Gilbert out: 477eb59db53SDr. David Alan Gilbert if (testarea) { 478eb59db53SDr. David Alan Gilbert munmap(testarea, pagesize); 479eb59db53SDr. David Alan Gilbert } 480eb59db53SDr. David Alan Gilbert if (ufd != -1) { 481eb59db53SDr. David Alan Gilbert close(ufd); 482eb59db53SDr. David Alan Gilbert } 483eb59db53SDr. David Alan Gilbert return ret; 484eb59db53SDr. David Alan Gilbert } 485eb59db53SDr. David Alan Gilbert 4861caddf8aSDr. David Alan Gilbert /* 4871caddf8aSDr. David Alan Gilbert * Setup an area of RAM so that it *can* be used for postcopy later; this 4881caddf8aSDr. David Alan Gilbert * must be done right at the start prior to pre-copy. 4891caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 4901caddf8aSDr. David Alan Gilbert */ 491754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque) 4921caddf8aSDr. David Alan Gilbert { 493754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 494754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 495754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 496754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 4971caddf8aSDr. David Alan Gilbert trace_postcopy_init_range(block_name, host_addr, offset, length); 4981caddf8aSDr. David Alan Gilbert 4991caddf8aSDr. David Alan Gilbert /* 500898ba906SDavid Hildenbrand * Save the used_length before running the guest. In case we have to 501898ba906SDavid Hildenbrand * resize RAM blocks when syncing RAM block sizes from the source during 502898ba906SDavid Hildenbrand * precopy, we'll update it manually via the ram block notifier. 503898ba906SDavid Hildenbrand */ 504898ba906SDavid Hildenbrand rb->postcopy_length = length; 505898ba906SDavid Hildenbrand 506898ba906SDavid Hildenbrand /* 5071caddf8aSDr. David Alan Gilbert * We need the whole of RAM to be truly empty for postcopy, so things 5081caddf8aSDr. David Alan Gilbert * like ROMs and any data tables built during init must be zero'd 5091caddf8aSDr. David Alan Gilbert * - we're going to get the copy from the source anyway. 5101caddf8aSDr. David Alan Gilbert * (Precopy will just overwrite this data, so doesn't need the discard) 5111caddf8aSDr. David Alan Gilbert */ 512aaa2064cSJuan Quintela if (ram_discard_range(block_name, 0, length)) { 5131caddf8aSDr. David Alan Gilbert return -1; 5141caddf8aSDr. David Alan Gilbert } 5151caddf8aSDr. David Alan Gilbert 5161caddf8aSDr. David Alan Gilbert return 0; 5171caddf8aSDr. David Alan Gilbert } 5181caddf8aSDr. David Alan Gilbert 5191caddf8aSDr. David Alan Gilbert /* 5201caddf8aSDr. David Alan Gilbert * At the end of migration, undo the effects of init_range 5211caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 5221caddf8aSDr. David Alan Gilbert */ 523754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque) 5241caddf8aSDr. David Alan Gilbert { 525754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 526754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 527754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 528898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 5291caddf8aSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 5301caddf8aSDr. David Alan Gilbert struct uffdio_range range_struct; 5311caddf8aSDr. David Alan Gilbert trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 5321caddf8aSDr. David Alan Gilbert 5331caddf8aSDr. David Alan Gilbert /* 5341caddf8aSDr. David Alan Gilbert * We turned off hugepage for the precopy stage with postcopy enabled 5351caddf8aSDr. David Alan Gilbert * we can turn it back on now. 5361caddf8aSDr. David Alan Gilbert */ 5371d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 5381caddf8aSDr. David Alan Gilbert 5391caddf8aSDr. David Alan Gilbert /* 5401caddf8aSDr. David Alan Gilbert * We can also turn off userfault now since we should have all the 5411caddf8aSDr. David Alan Gilbert * pages. It can be useful to leave it on to debug postcopy 5421caddf8aSDr. David Alan Gilbert * if you're not sure it's always getting every page. 5431caddf8aSDr. David Alan Gilbert */ 5441caddf8aSDr. David Alan Gilbert range_struct.start = (uintptr_t)host_addr; 5451caddf8aSDr. David Alan Gilbert range_struct.len = length; 5461caddf8aSDr. David Alan Gilbert 5471caddf8aSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 5481caddf8aSDr. David Alan Gilbert error_report("%s: userfault unregister %s", __func__, strerror(errno)); 5491caddf8aSDr. David Alan Gilbert 5501caddf8aSDr. David Alan Gilbert return -1; 5511caddf8aSDr. David Alan Gilbert } 5521caddf8aSDr. David Alan Gilbert 5531caddf8aSDr. David Alan Gilbert return 0; 5541caddf8aSDr. David Alan Gilbert } 5551caddf8aSDr. David Alan Gilbert 5561caddf8aSDr. David Alan Gilbert /* 5571caddf8aSDr. David Alan Gilbert * Initialise postcopy-ram, setting the RAM to a state where we can go into 5581caddf8aSDr. David Alan Gilbert * postcopy later; must be called prior to any precopy. 5591caddf8aSDr. David Alan Gilbert * called from arch_init's similarly named ram_postcopy_incoming_init 5601caddf8aSDr. David Alan Gilbert */ 561c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 5621caddf8aSDr. David Alan Gilbert { 563fbd162e6SYury Kotov if (foreach_not_ignored_block(init_range, NULL)) { 5641caddf8aSDr. David Alan Gilbert return -1; 5651caddf8aSDr. David Alan Gilbert } 5661caddf8aSDr. David Alan Gilbert 5671caddf8aSDr. David Alan Gilbert return 0; 5681caddf8aSDr. David Alan Gilbert } 5691caddf8aSDr. David Alan Gilbert 570476ebf77SPeter Xu static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) 571476ebf77SPeter Xu { 57277dadc3fSPeter Xu int i; 57377dadc3fSPeter Xu 57477dadc3fSPeter Xu if (mis->postcopy_tmp_pages) { 57577dadc3fSPeter Xu for (i = 0; i < mis->postcopy_channels; i++) { 57677dadc3fSPeter Xu if (mis->postcopy_tmp_pages[i].tmp_huge_page) { 57777dadc3fSPeter Xu munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, 57877dadc3fSPeter Xu mis->largest_page_size); 57977dadc3fSPeter Xu mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; 58077dadc3fSPeter Xu } 58177dadc3fSPeter Xu } 58277dadc3fSPeter Xu g_free(mis->postcopy_tmp_pages); 58377dadc3fSPeter Xu mis->postcopy_tmp_pages = NULL; 584476ebf77SPeter Xu } 585476ebf77SPeter Xu 586476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page) { 587476ebf77SPeter Xu munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 588476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 589476ebf77SPeter Xu } 590476ebf77SPeter Xu } 591476ebf77SPeter Xu 5921caddf8aSDr. David Alan Gilbert /* 5931caddf8aSDr. David Alan Gilbert * At the end of a migration where postcopy_ram_incoming_init was called. 5941caddf8aSDr. David Alan Gilbert */ 5951caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 5961caddf8aSDr. David Alan Gilbert { 597c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_entry(); 598c4faeed2SDr. David Alan Gilbert 5996621883fSPeter Xu if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { 6006621883fSPeter Xu /* Notify the fast load thread to quit */ 6016621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_QUIT; 6026621883fSPeter Xu if (mis->postcopy_qemufile_dst) { 6036621883fSPeter Xu qemu_file_shutdown(mis->postcopy_qemufile_dst); 6046621883fSPeter Xu } 60536f62f11SPeter Xu qemu_thread_join(&mis->postcopy_prio_thread); 6066621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_NONE; 60736f62f11SPeter Xu } 60836f62f11SPeter Xu 609c4faeed2SDr. David Alan Gilbert if (mis->have_fault_thread) { 61046343570SDr. David Alan Gilbert Error *local_err = NULL; 61146343570SDr. David Alan Gilbert 61255d0fe82SIlya Maximets /* Let the fault thread quit */ 613d73415a3SStefan Hajnoczi qatomic_set(&mis->fault_thread_quit, 1); 61455d0fe82SIlya Maximets postcopy_fault_thread_notify(mis); 61555d0fe82SIlya Maximets trace_postcopy_ram_incoming_cleanup_join(); 61655d0fe82SIlya Maximets qemu_thread_join(&mis->fault_thread); 61755d0fe82SIlya Maximets 61846343570SDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 61946343570SDr. David Alan Gilbert error_report_err(local_err); 62046343570SDr. David Alan Gilbert return -1; 62146343570SDr. David Alan Gilbert } 62246343570SDr. David Alan Gilbert 623fbd162e6SYury Kotov if (foreach_not_ignored_block(cleanup_range, mis)) { 6241caddf8aSDr. David Alan Gilbert return -1; 6251caddf8aSDr. David Alan Gilbert } 6269ab7ef9bSPeter Xu 627c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_closeuf(); 628c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 62964f615feSPeter Xu close(mis->userfault_event_fd); 630c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = false; 631c4faeed2SDr. David Alan Gilbert } 632c4faeed2SDr. David Alan Gilbert 63358b7c17eSDr. David Alan Gilbert if (enable_mlock) { 63458b7c17eSDr. David Alan Gilbert if (os_mlock() < 0) { 63558b7c17eSDr. David Alan Gilbert error_report("mlock: %s", strerror(errno)); 63658b7c17eSDr. David Alan Gilbert /* 63758b7c17eSDr. David Alan Gilbert * It doesn't feel right to fail at this point, we have a valid 63858b7c17eSDr. David Alan Gilbert * VM state. 63958b7c17eSDr. David Alan Gilbert */ 64058b7c17eSDr. David Alan Gilbert } 64158b7c17eSDr. David Alan Gilbert } 64258b7c17eSDr. David Alan Gilbert 643476ebf77SPeter Xu postcopy_temp_pages_cleanup(mis); 644476ebf77SPeter Xu 64565ace060SAlexey Perevalov trace_postcopy_ram_incoming_cleanup_blocktime( 64665ace060SAlexey Perevalov get_postcopy_total_blocktime()); 64765ace060SAlexey Perevalov 648c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_exit(); 6491caddf8aSDr. David Alan Gilbert return 0; 6501caddf8aSDr. David Alan Gilbert } 6511caddf8aSDr. David Alan Gilbert 652f0a227adSDr. David Alan Gilbert /* 653f9527107SDr. David Alan Gilbert * Disable huge pages on an area 654f9527107SDr. David Alan Gilbert */ 655754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque) 656f9527107SDr. David Alan Gilbert { 657754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 658754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 659754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 660898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 661f9527107SDr. David Alan Gilbert trace_postcopy_nhp_range(block_name, host_addr, offset, length); 662f9527107SDr. David Alan Gilbert 663f9527107SDr. David Alan Gilbert /* 664f9527107SDr. David Alan Gilbert * Before we do discards we need to ensure those discards really 665f9527107SDr. David Alan Gilbert * do delete areas of the page, even if THP thinks a hugepage would 666f9527107SDr. David Alan Gilbert * be a good idea, so force hugepages off. 667f9527107SDr. David Alan Gilbert */ 6681d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 669f9527107SDr. David Alan Gilbert 670f9527107SDr. David Alan Gilbert return 0; 671f9527107SDr. David Alan Gilbert } 672f9527107SDr. David Alan Gilbert 673f9527107SDr. David Alan Gilbert /* 674f9527107SDr. David Alan Gilbert * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 675f9527107SDr. David Alan Gilbert * however leaving it until after precopy means that most of the precopy 676f9527107SDr. David Alan Gilbert * data is still THPd 677f9527107SDr. David Alan Gilbert */ 678f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 679f9527107SDr. David Alan Gilbert { 680fbd162e6SYury Kotov if (foreach_not_ignored_block(nhp_range, mis)) { 681f9527107SDr. David Alan Gilbert return -1; 682f9527107SDr. David Alan Gilbert } 683f9527107SDr. David Alan Gilbert 684f9527107SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 685f9527107SDr. David Alan Gilbert 686f9527107SDr. David Alan Gilbert return 0; 687f9527107SDr. David Alan Gilbert } 688f9527107SDr. David Alan Gilbert 689f9527107SDr. David Alan Gilbert /* 690f0a227adSDr. David Alan Gilbert * Mark the given area of RAM as requiring notification to unwritten areas 691fbd162e6SYury Kotov * Used as a callback on foreach_not_ignored_block. 692f0a227adSDr. David Alan Gilbert * host_addr: Base of area to mark 693f0a227adSDr. David Alan Gilbert * offset: Offset in the whole ram arena 694f0a227adSDr. David Alan Gilbert * length: Length of the section 695f0a227adSDr. David Alan Gilbert * opaque: MigrationIncomingState pointer 696f0a227adSDr. David Alan Gilbert * Returns 0 on success 697f0a227adSDr. David Alan Gilbert */ 698754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque) 699f0a227adSDr. David Alan Gilbert { 700f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 701f0a227adSDr. David Alan Gilbert struct uffdio_register reg_struct; 702f0a227adSDr. David Alan Gilbert 703754cb9c0SYury Kotov reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); 704898ba906SDavid Hildenbrand reg_struct.range.len = rb->postcopy_length; 705f0a227adSDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 706f0a227adSDr. David Alan Gilbert 707f0a227adSDr. David Alan Gilbert /* Now tell our userfault_fd that it's responsible for this area */ 708f0a227adSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 709f0a227adSDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 710f0a227adSDr. David Alan Gilbert return -1; 711f0a227adSDr. David Alan Gilbert } 712665414adSDr. David Alan Gilbert if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 713665414adSDr. David Alan Gilbert error_report("%s userfault: Region doesn't support COPY", __func__); 714665414adSDr. David Alan Gilbert return -1; 715665414adSDr. David Alan Gilbert } 7162ce16640SDr. David Alan Gilbert if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 7172ce16640SDr. David Alan Gilbert qemu_ram_set_uf_zeroable(rb); 7182ce16640SDr. David Alan Gilbert } 719f0a227adSDr. David Alan Gilbert 720f0a227adSDr. David Alan Gilbert return 0; 721f0a227adSDr. David Alan Gilbert } 722f0a227adSDr. David Alan Gilbert 7235efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 7245efc3564SDr. David Alan Gilbert uint64_t client_addr, 7255efc3564SDr. David Alan Gilbert RAMBlock *rb) 7265efc3564SDr. David Alan Gilbert { 7275efc3564SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 7285efc3564SDr. David Alan Gilbert struct uffdio_range range; 7295efc3564SDr. David Alan Gilbert int ret; 7305efc3564SDr. David Alan Gilbert trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 7317648297dSDavid Hildenbrand range.start = ROUND_DOWN(client_addr, pagesize); 7325efc3564SDr. David Alan Gilbert range.len = pagesize; 7335efc3564SDr. David Alan Gilbert ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 7345efc3564SDr. David Alan Gilbert if (ret) { 7355efc3564SDr. David Alan Gilbert error_report("%s: Failed to wake: %zx in %s (%s)", 7365efc3564SDr. David Alan Gilbert __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 7375efc3564SDr. David Alan Gilbert strerror(errno)); 7385efc3564SDr. David Alan Gilbert } 7395efc3564SDr. David Alan Gilbert return ret; 7405efc3564SDr. David Alan Gilbert } 7415efc3564SDr. David Alan Gilbert 7429470c5e0SDavid Hildenbrand static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, 7439470c5e0SDavid Hildenbrand ram_addr_t start, uint64_t haddr) 7449470c5e0SDavid Hildenbrand { 7459470c5e0SDavid Hildenbrand void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 7469470c5e0SDavid Hildenbrand 7479470c5e0SDavid Hildenbrand /* 7489470c5e0SDavid Hildenbrand * Discarded pages (via RamDiscardManager) are never migrated. On unlikely 7499470c5e0SDavid Hildenbrand * access, place a zeropage, which will also set the relevant bits in the 7509470c5e0SDavid Hildenbrand * recv_bitmap accordingly, so we won't try placing a zeropage twice. 7519470c5e0SDavid Hildenbrand * 7529470c5e0SDavid Hildenbrand * Checking a single bit is sufficient to handle pagesize > TPS as either 7539470c5e0SDavid Hildenbrand * all relevant bits are set or not. 7549470c5e0SDavid Hildenbrand */ 7559470c5e0SDavid Hildenbrand assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); 7569470c5e0SDavid Hildenbrand if (ramblock_page_is_discarded(rb, start)) { 7579470c5e0SDavid Hildenbrand bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); 7589470c5e0SDavid Hildenbrand 7599470c5e0SDavid Hildenbrand return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); 7609470c5e0SDavid Hildenbrand } 7619470c5e0SDavid Hildenbrand 7629470c5e0SDavid Hildenbrand return migrate_send_rp_req_pages(mis, rb, start, haddr); 7639470c5e0SDavid Hildenbrand } 7649470c5e0SDavid Hildenbrand 765f0a227adSDr. David Alan Gilbert /* 766096bf4c8SDr. David Alan Gilbert * Callback from shared fault handlers to ask for a page, 767096bf4c8SDr. David Alan Gilbert * the page must be specified by a RAMBlock and an offset in that rb 768096bf4c8SDr. David Alan Gilbert * Note: Only for use by shared fault handlers (in fault thread) 769096bf4c8SDr. David Alan Gilbert */ 770096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 771096bf4c8SDr. David Alan Gilbert uint64_t client_addr, uint64_t rb_offset) 772096bf4c8SDr. David Alan Gilbert { 7737648297dSDavid Hildenbrand uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 774096bf4c8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 775096bf4c8SDr. David Alan Gilbert 776096bf4c8SDr. David Alan Gilbert trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 777096bf4c8SDr. David Alan Gilbert rb_offset); 778dedfb4b2SDr. David Alan Gilbert if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 779dedfb4b2SDr. David Alan Gilbert trace_postcopy_request_shared_page_present(pcfd->idstr, 780dedfb4b2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), rb_offset); 781dedfb4b2SDr. David Alan Gilbert return postcopy_wake_shared(pcfd, client_addr, rb); 782dedfb4b2SDr. David Alan Gilbert } 7839470c5e0SDavid Hildenbrand postcopy_request_page(mis, rb, aligned_rbo, client_addr); 784096bf4c8SDr. David Alan Gilbert return 0; 785096bf4c8SDr. David Alan Gilbert } 786096bf4c8SDr. David Alan Gilbert 787575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid) 788575b0b33SAlexey Perevalov { 789575b0b33SAlexey Perevalov CPUState *cpu_iter; 790575b0b33SAlexey Perevalov 791575b0b33SAlexey Perevalov CPU_FOREACH(cpu_iter) { 792575b0b33SAlexey Perevalov if (cpu_iter->thread_id == pid) { 793575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 794575b0b33SAlexey Perevalov return cpu_iter->cpu_index; 795575b0b33SAlexey Perevalov } 796575b0b33SAlexey Perevalov } 797575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(-1, pid); 798575b0b33SAlexey Perevalov return -1; 799575b0b33SAlexey Perevalov } 800575b0b33SAlexey Perevalov 801575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 802575b0b33SAlexey Perevalov { 803575b0b33SAlexey Perevalov int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 804575b0b33SAlexey Perevalov dc->start_time; 805575b0b33SAlexey Perevalov return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 806575b0b33SAlexey Perevalov } 807575b0b33SAlexey Perevalov 808575b0b33SAlexey Perevalov /* 809575b0b33SAlexey Perevalov * This function is being called when pagefault occurs. It 810575b0b33SAlexey Perevalov * tracks down vCPU blocking time. 811575b0b33SAlexey Perevalov * 812575b0b33SAlexey Perevalov * @addr: faulted host virtual address 813575b0b33SAlexey Perevalov * @ptid: faulted process thread id 814575b0b33SAlexey Perevalov * @rb: ramblock appropriate to addr 815575b0b33SAlexey Perevalov */ 816575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 817575b0b33SAlexey Perevalov RAMBlock *rb) 818575b0b33SAlexey Perevalov { 819575b0b33SAlexey Perevalov int cpu, already_received; 820575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 821575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 822575b0b33SAlexey Perevalov uint32_t low_time_offset; 823575b0b33SAlexey Perevalov 824575b0b33SAlexey Perevalov if (!dc || ptid == 0) { 825575b0b33SAlexey Perevalov return; 826575b0b33SAlexey Perevalov } 827575b0b33SAlexey Perevalov cpu = get_mem_fault_cpu_index(ptid); 828575b0b33SAlexey Perevalov if (cpu < 0) { 829575b0b33SAlexey Perevalov return; 830575b0b33SAlexey Perevalov } 831575b0b33SAlexey Perevalov 832575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 833575b0b33SAlexey Perevalov if (dc->vcpu_addr[cpu] == 0) { 834d73415a3SStefan Hajnoczi qatomic_inc(&dc->smp_cpus_down); 835575b0b33SAlexey Perevalov } 836575b0b33SAlexey Perevalov 837d73415a3SStefan Hajnoczi qatomic_xchg(&dc->last_begin, low_time_offset); 838d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 839d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], addr); 840575b0b33SAlexey Perevalov 841da1725d3SWei Yang /* 842da1725d3SWei Yang * check it here, not at the beginning of the function, 843da1725d3SWei Yang * due to, check could occur early than bitmap_set in 844da1725d3SWei Yang * qemu_ufd_copy_ioctl 845da1725d3SWei Yang */ 846575b0b33SAlexey Perevalov already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 847575b0b33SAlexey Perevalov if (already_received) { 848d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], 0); 849d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 850d73415a3SStefan Hajnoczi qatomic_dec(&dc->smp_cpus_down); 851575b0b33SAlexey Perevalov } 852575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 853575b0b33SAlexey Perevalov cpu, already_received); 854575b0b33SAlexey Perevalov } 855575b0b33SAlexey Perevalov 856575b0b33SAlexey Perevalov /* 857575b0b33SAlexey Perevalov * This function just provide calculated blocktime per cpu and trace it. 858575b0b33SAlexey Perevalov * Total blocktime is calculated in mark_postcopy_blocktime_end. 859575b0b33SAlexey Perevalov * 860575b0b33SAlexey Perevalov * 861575b0b33SAlexey Perevalov * Assume we have 3 CPU 862575b0b33SAlexey Perevalov * 863575b0b33SAlexey Perevalov * S1 E1 S1 E1 864575b0b33SAlexey Perevalov * -----***********------------xxx***************------------------------> CPU1 865575b0b33SAlexey Perevalov * 866575b0b33SAlexey Perevalov * S2 E2 867575b0b33SAlexey Perevalov * ------------****************xxx---------------------------------------> CPU2 868575b0b33SAlexey Perevalov * 869575b0b33SAlexey Perevalov * S3 E3 870575b0b33SAlexey Perevalov * ------------------------****xxx********-------------------------------> CPU3 871575b0b33SAlexey Perevalov * 872575b0b33SAlexey Perevalov * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 873575b0b33SAlexey Perevalov * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 874575b0b33SAlexey Perevalov * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 875575b0b33SAlexey Perevalov * it's a part of total blocktime. 876575b0b33SAlexey Perevalov * S1 - here is last_begin 877575b0b33SAlexey Perevalov * Legend of the picture is following: 878575b0b33SAlexey Perevalov * * - means blocktime per vCPU 879575b0b33SAlexey Perevalov * x - means overlapped blocktime (total blocktime) 880575b0b33SAlexey Perevalov * 881575b0b33SAlexey Perevalov * @addr: host virtual address 882575b0b33SAlexey Perevalov */ 883575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr) 884575b0b33SAlexey Perevalov { 885575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 886575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 8875cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 8885cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 889575b0b33SAlexey Perevalov int i, affected_cpu = 0; 890575b0b33SAlexey Perevalov bool vcpu_total_blocktime = false; 891575b0b33SAlexey Perevalov uint32_t read_vcpu_time, low_time_offset; 892575b0b33SAlexey Perevalov 893575b0b33SAlexey Perevalov if (!dc) { 894575b0b33SAlexey Perevalov return; 895575b0b33SAlexey Perevalov } 896575b0b33SAlexey Perevalov 897575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 898575b0b33SAlexey Perevalov /* lookup cpu, to clear it, 8993a4452d8Szhaolichang * that algorithm looks straightforward, but it's not 900575b0b33SAlexey Perevalov * optimal, more optimal algorithm is keeping tree or hash 901575b0b33SAlexey Perevalov * where key is address value is a list of */ 902575b0b33SAlexey Perevalov for (i = 0; i < smp_cpus; i++) { 903575b0b33SAlexey Perevalov uint32_t vcpu_blocktime = 0; 904575b0b33SAlexey Perevalov 905d73415a3SStefan Hajnoczi read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 906d73415a3SStefan Hajnoczi if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 907575b0b33SAlexey Perevalov read_vcpu_time == 0) { 908575b0b33SAlexey Perevalov continue; 909575b0b33SAlexey Perevalov } 910d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[i], 0); 911575b0b33SAlexey Perevalov vcpu_blocktime = low_time_offset - read_vcpu_time; 912575b0b33SAlexey Perevalov affected_cpu += 1; 913575b0b33SAlexey Perevalov /* we need to know is that mark_postcopy_end was due to 914575b0b33SAlexey Perevalov * faulted page, another possible case it's prefetched 915575b0b33SAlexey Perevalov * page and in that case we shouldn't be here */ 916575b0b33SAlexey Perevalov if (!vcpu_total_blocktime && 917d73415a3SStefan Hajnoczi qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 918575b0b33SAlexey Perevalov vcpu_total_blocktime = true; 919575b0b33SAlexey Perevalov } 920575b0b33SAlexey Perevalov /* continue cycle, due to one page could affect several vCPUs */ 921575b0b33SAlexey Perevalov dc->vcpu_blocktime[i] += vcpu_blocktime; 922575b0b33SAlexey Perevalov } 923575b0b33SAlexey Perevalov 924d73415a3SStefan Hajnoczi qatomic_sub(&dc->smp_cpus_down, affected_cpu); 925575b0b33SAlexey Perevalov if (vcpu_total_blocktime) { 926d73415a3SStefan Hajnoczi dc->total_blocktime += low_time_offset - qatomic_fetch_add( 927575b0b33SAlexey Perevalov &dc->last_begin, 0); 928575b0b33SAlexey Perevalov } 929575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 930575b0b33SAlexey Perevalov affected_cpu); 931575b0b33SAlexey Perevalov } 932575b0b33SAlexey Perevalov 93327dd21b4SPeter Xu static void postcopy_pause_fault_thread(MigrationIncomingState *mis) 9343a7804c3SPeter Xu { 9353a7804c3SPeter Xu trace_postcopy_pause_fault_thread(); 9363a7804c3SPeter Xu qemu_sem_wait(&mis->postcopy_pause_sem_fault); 9373a7804c3SPeter Xu trace_postcopy_pause_fault_thread_continued(); 9383a7804c3SPeter Xu } 9393a7804c3SPeter Xu 940096bf4c8SDr. David Alan Gilbert /* 941f0a227adSDr. David Alan Gilbert * Handle faults detected by the USERFAULT markings 942f0a227adSDr. David Alan Gilbert */ 943f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque) 944f0a227adSDr. David Alan Gilbert { 945f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 946c4faeed2SDr. David Alan Gilbert struct uffd_msg msg; 947c4faeed2SDr. David Alan Gilbert int ret; 94800fa4fc8SDr. David Alan Gilbert size_t index; 949c4faeed2SDr. David Alan Gilbert RAMBlock *rb = NULL; 950f0a227adSDr. David Alan Gilbert 951c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_entry(); 95274637e6fSLidong Chen rcu_register_thread(); 953096bf4c8SDr. David Alan Gilbert mis->last_rb = NULL; /* last RAMBlock we sent part of */ 954095c12a4SPeter Xu qemu_sem_post(&mis->thread_sync_sem); 955c4faeed2SDr. David Alan Gilbert 95600fa4fc8SDr. David Alan Gilbert struct pollfd *pfd; 95700fa4fc8SDr. David Alan Gilbert size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 95800fa4fc8SDr. David Alan Gilbert 95900fa4fc8SDr. David Alan Gilbert pfd = g_new0(struct pollfd, pfd_len); 96000fa4fc8SDr. David Alan Gilbert 96100fa4fc8SDr. David Alan Gilbert pfd[0].fd = mis->userfault_fd; 96200fa4fc8SDr. David Alan Gilbert pfd[0].events = POLLIN; 96300fa4fc8SDr. David Alan Gilbert pfd[1].fd = mis->userfault_event_fd; 96400fa4fc8SDr. David Alan Gilbert pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 96500fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 96600fa4fc8SDr. David Alan Gilbert for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 96700fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 96800fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index); 96900fa4fc8SDr. David Alan Gilbert pfd[2 + index].fd = pcfd->fd; 97000fa4fc8SDr. David Alan Gilbert pfd[2 + index].events = POLLIN; 97100fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 97200fa4fc8SDr. David Alan Gilbert pcfd->fd); 97300fa4fc8SDr. David Alan Gilbert } 97400fa4fc8SDr. David Alan Gilbert 975c4faeed2SDr. David Alan Gilbert while (true) { 976c4faeed2SDr. David Alan Gilbert ram_addr_t rb_offset; 97700fa4fc8SDr. David Alan Gilbert int poll_result; 978c4faeed2SDr. David Alan Gilbert 979c4faeed2SDr. David Alan Gilbert /* 980c4faeed2SDr. David Alan Gilbert * We're mainly waiting for the kernel to give us a faulting HVA, 981c4faeed2SDr. David Alan Gilbert * however we can be told to quit via userfault_quit_fd which is 982c4faeed2SDr. David Alan Gilbert * an eventfd 983c4faeed2SDr. David Alan Gilbert */ 984c4faeed2SDr. David Alan Gilbert 98500fa4fc8SDr. David Alan Gilbert poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 98600fa4fc8SDr. David Alan Gilbert if (poll_result == -1) { 987c4faeed2SDr. David Alan Gilbert error_report("%s: userfault poll: %s", __func__, strerror(errno)); 988c4faeed2SDr. David Alan Gilbert break; 989f0a227adSDr. David Alan Gilbert } 990f0a227adSDr. David Alan Gilbert 9913a7804c3SPeter Xu if (!mis->to_src_file) { 9923a7804c3SPeter Xu /* 9933a7804c3SPeter Xu * Possibly someone tells us that the return path is 9943a7804c3SPeter Xu * broken already using the event. We should hold until 9953a7804c3SPeter Xu * the channel is rebuilt. 9963a7804c3SPeter Xu */ 99727dd21b4SPeter Xu postcopy_pause_fault_thread(mis); 9983a7804c3SPeter Xu } 9993a7804c3SPeter Xu 1000c4faeed2SDr. David Alan Gilbert if (pfd[1].revents) { 100164f615feSPeter Xu uint64_t tmp64 = 0; 100264f615feSPeter Xu 100364f615feSPeter Xu /* Consume the signal */ 100464f615feSPeter Xu if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 100564f615feSPeter Xu /* Nothing obviously nicer than posting this error. */ 100664f615feSPeter Xu error_report("%s: read() failed", __func__); 100764f615feSPeter Xu } 100864f615feSPeter Xu 1009d73415a3SStefan Hajnoczi if (qatomic_read(&mis->fault_thread_quit)) { 1010c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_quit(); 1011c4faeed2SDr. David Alan Gilbert break; 1012c4faeed2SDr. David Alan Gilbert } 101364f615feSPeter Xu } 1014c4faeed2SDr. David Alan Gilbert 101500fa4fc8SDr. David Alan Gilbert if (pfd[0].revents) { 101600fa4fc8SDr. David Alan Gilbert poll_result--; 1017c4faeed2SDr. David Alan Gilbert ret = read(mis->userfault_fd, &msg, sizeof(msg)); 1018c4faeed2SDr. David Alan Gilbert if (ret != sizeof(msg)) { 1019c4faeed2SDr. David Alan Gilbert if (errno == EAGAIN) { 1020c4faeed2SDr. David Alan Gilbert /* 1021c4faeed2SDr. David Alan Gilbert * if a wake up happens on the other thread just after 1022c4faeed2SDr. David Alan Gilbert * the poll, there is nothing to read. 1023c4faeed2SDr. David Alan Gilbert */ 1024c4faeed2SDr. David Alan Gilbert continue; 1025c4faeed2SDr. David Alan Gilbert } 1026c4faeed2SDr. David Alan Gilbert if (ret < 0) { 102700fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 102800fa4fc8SDr. David Alan Gilbert "message: %s", 1029c4faeed2SDr. David Alan Gilbert __func__, strerror(errno)); 1030c4faeed2SDr. David Alan Gilbert break; 1031c4faeed2SDr. David Alan Gilbert } else { 103200fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 103300fa4fc8SDr. David Alan Gilbert "expected %zd", 1034c4faeed2SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 1035c4faeed2SDr. David Alan Gilbert break; /* Lost alignment, don't know what we'd read next */ 1036c4faeed2SDr. David Alan Gilbert } 1037c4faeed2SDr. David Alan Gilbert } 1038c4faeed2SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 1039c4faeed2SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud from userfaultfd", 1040c4faeed2SDr. David Alan Gilbert __func__, msg.event); 1041c4faeed2SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 1042c4faeed2SDr. David Alan Gilbert } 1043c4faeed2SDr. David Alan Gilbert 1044c4faeed2SDr. David Alan Gilbert rb = qemu_ram_block_from_host( 1045c4faeed2SDr. David Alan Gilbert (void *)(uintptr_t)msg.arg.pagefault.address, 1046f615f396SPaolo Bonzini true, &rb_offset); 1047c4faeed2SDr. David Alan Gilbert if (!rb) { 1048c4faeed2SDr. David Alan Gilbert error_report("postcopy_ram_fault_thread: Fault outside guest: %" 1049c4faeed2SDr. David Alan Gilbert PRIx64, (uint64_t)msg.arg.pagefault.address); 1050c4faeed2SDr. David Alan Gilbert break; 1051c4faeed2SDr. David Alan Gilbert } 1052c4faeed2SDr. David Alan Gilbert 10537648297dSDavid Hildenbrand rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 1054c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 1055c4faeed2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), 1056575b0b33SAlexey Perevalov rb_offset, 1057575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid); 1058575b0b33SAlexey Perevalov mark_postcopy_blocktime_begin( 1059575b0b33SAlexey Perevalov (uintptr_t)(msg.arg.pagefault.address), 1060575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid, rb); 1061575b0b33SAlexey Perevalov 10623a7804c3SPeter Xu retry: 1063c4faeed2SDr. David Alan Gilbert /* 1064c4faeed2SDr. David Alan Gilbert * Send the request to the source - we want to request one 1065c4faeed2SDr. David Alan Gilbert * of our host page sizes (which is >= TPS) 1066c4faeed2SDr. David Alan Gilbert */ 10679470c5e0SDavid Hildenbrand ret = postcopy_request_page(mis, rb, rb_offset, 10688f8bfffcSPeter Xu msg.arg.pagefault.address); 10693a7804c3SPeter Xu if (ret) { 10703a7804c3SPeter Xu /* May be network failure, try to wait for recovery */ 107127dd21b4SPeter Xu postcopy_pause_fault_thread(mis); 10723a7804c3SPeter Xu goto retry; 1073c4faeed2SDr. David Alan Gilbert } 1074c4faeed2SDr. David Alan Gilbert } 107500fa4fc8SDr. David Alan Gilbert 107600fa4fc8SDr. David Alan Gilbert /* Now handle any requests from external processes on shared memory */ 107700fa4fc8SDr. David Alan Gilbert /* TODO: May need to handle devices deregistering during postcopy */ 107800fa4fc8SDr. David Alan Gilbert for (index = 2; index < pfd_len && poll_result; index++) { 107900fa4fc8SDr. David Alan Gilbert if (pfd[index].revents) { 108000fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = 108100fa4fc8SDr. David Alan Gilbert &g_array_index(mis->postcopy_remote_fds, 108200fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index - 2); 108300fa4fc8SDr. David Alan Gilbert 108400fa4fc8SDr. David Alan Gilbert poll_result--; 108500fa4fc8SDr. David Alan Gilbert if (pfd[index].revents & POLLERR) { 108600fa4fc8SDr. David Alan Gilbert error_report("%s: POLLERR on poll %zd fd=%d", 108700fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->fd); 108800fa4fc8SDr. David Alan Gilbert pfd[index].events = 0; 108900fa4fc8SDr. David Alan Gilbert continue; 109000fa4fc8SDr. David Alan Gilbert } 109100fa4fc8SDr. David Alan Gilbert 109200fa4fc8SDr. David Alan Gilbert ret = read(pcfd->fd, &msg, sizeof(msg)); 109300fa4fc8SDr. David Alan Gilbert if (ret != sizeof(msg)) { 109400fa4fc8SDr. David Alan Gilbert if (errno == EAGAIN) { 109500fa4fc8SDr. David Alan Gilbert /* 109600fa4fc8SDr. David Alan Gilbert * if a wake up happens on the other thread just after 109700fa4fc8SDr. David Alan Gilbert * the poll, there is nothing to read. 109800fa4fc8SDr. David Alan Gilbert */ 109900fa4fc8SDr. David Alan Gilbert continue; 110000fa4fc8SDr. David Alan Gilbert } 110100fa4fc8SDr. David Alan Gilbert if (ret < 0) { 110200fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 110300fa4fc8SDr. David Alan Gilbert "message: %s (shared) revents=%d", 110400fa4fc8SDr. David Alan Gilbert __func__, strerror(errno), 110500fa4fc8SDr. David Alan Gilbert pfd[index].revents); 110600fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 110700fa4fc8SDr. David Alan Gilbert break; 110800fa4fc8SDr. David Alan Gilbert } else { 110900fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 111000fa4fc8SDr. David Alan Gilbert "expected %zd (shared)", 111100fa4fc8SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 111200fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 111300fa4fc8SDr. David Alan Gilbert break; /*Lost alignment,don't know what we'd read next*/ 111400fa4fc8SDr. David Alan Gilbert } 111500fa4fc8SDr. David Alan Gilbert } 111600fa4fc8SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 111700fa4fc8SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud " 111800fa4fc8SDr. David Alan Gilbert "from userfaultfd (shared)", 111900fa4fc8SDr. David Alan Gilbert __func__, msg.event); 112000fa4fc8SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 112100fa4fc8SDr. David Alan Gilbert } 112200fa4fc8SDr. David Alan Gilbert /* Call the device handler registered with us */ 112300fa4fc8SDr. David Alan Gilbert ret = pcfd->handler(pcfd, &msg); 112400fa4fc8SDr. David Alan Gilbert if (ret) { 112500fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to resolve shared fault on %zd/%s", 112600fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->idstr); 112700fa4fc8SDr. David Alan Gilbert /* TODO: Fail? Disable this sharer? */ 112800fa4fc8SDr. David Alan Gilbert } 112900fa4fc8SDr. David Alan Gilbert } 113000fa4fc8SDr. David Alan Gilbert } 113100fa4fc8SDr. David Alan Gilbert } 113274637e6fSLidong Chen rcu_unregister_thread(); 1133c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_exit(); 1134fc6008f3SMarc-André Lureau g_free(pfd); 1135f0a227adSDr. David Alan Gilbert return NULL; 1136f0a227adSDr. David Alan Gilbert } 1137f0a227adSDr. David Alan Gilbert 1138476ebf77SPeter Xu static int postcopy_temp_pages_setup(MigrationIncomingState *mis) 1139476ebf77SPeter Xu { 114077dadc3fSPeter Xu PostcopyTmpPage *tmp_page; 114177dadc3fSPeter Xu int err, i, channels; 114277dadc3fSPeter Xu void *temp_page; 1143476ebf77SPeter Xu 114436f62f11SPeter Xu if (migrate_postcopy_preempt()) { 114536f62f11SPeter Xu /* If preemption enabled, need extra channel for urgent requests */ 114636f62f11SPeter Xu mis->postcopy_channels = RAM_CHANNEL_MAX; 114736f62f11SPeter Xu } else { 114836f62f11SPeter Xu /* Both precopy/postcopy on the same channel */ 114977dadc3fSPeter Xu mis->postcopy_channels = 1; 115036f62f11SPeter Xu } 115177dadc3fSPeter Xu 115277dadc3fSPeter Xu channels = mis->postcopy_channels; 115377dadc3fSPeter Xu mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); 115477dadc3fSPeter Xu 115577dadc3fSPeter Xu for (i = 0; i < channels; i++) { 115677dadc3fSPeter Xu tmp_page = &mis->postcopy_tmp_pages[i]; 115777dadc3fSPeter Xu temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, 1158476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 115977dadc3fSPeter Xu if (temp_page == MAP_FAILED) { 1160476ebf77SPeter Xu err = errno; 116177dadc3fSPeter Xu error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", 116277dadc3fSPeter Xu __func__, i, strerror(err)); 116377dadc3fSPeter Xu /* Clean up will be done later */ 1164476ebf77SPeter Xu return -err; 1165476ebf77SPeter Xu } 116677dadc3fSPeter Xu tmp_page->tmp_huge_page = temp_page; 116777dadc3fSPeter Xu /* Initialize default states for each tmp page */ 116877dadc3fSPeter Xu postcopy_temp_page_reset(tmp_page); 116977dadc3fSPeter Xu } 1170476ebf77SPeter Xu 1171476ebf77SPeter Xu /* 1172476ebf77SPeter Xu * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages 1173476ebf77SPeter Xu */ 1174476ebf77SPeter Xu mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1175476ebf77SPeter Xu PROT_READ | PROT_WRITE, 1176476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1177476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1178476ebf77SPeter Xu err = errno; 1179476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 1180476ebf77SPeter Xu error_report("%s: Failed to map large zero page %s", 1181476ebf77SPeter Xu __func__, strerror(err)); 1182476ebf77SPeter Xu return -err; 1183476ebf77SPeter Xu } 1184476ebf77SPeter Xu 1185476ebf77SPeter Xu memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1186476ebf77SPeter Xu 1187476ebf77SPeter Xu return 0; 1188476ebf77SPeter Xu } 1189476ebf77SPeter Xu 11902a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1191f0a227adSDr. David Alan Gilbert { 119274c38cf7SPeter Xu Error *local_err = NULL; 119374c38cf7SPeter Xu 1194c4faeed2SDr. David Alan Gilbert /* Open the fd for the kernel to give us userfaults */ 1195d5890ea0SPeter Xu mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK); 1196c4faeed2SDr. David Alan Gilbert if (mis->userfault_fd == -1) { 1197c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to open userfault fd: %s", __func__, 1198c4faeed2SDr. David Alan Gilbert strerror(errno)); 1199c4faeed2SDr. David Alan Gilbert return -1; 1200c4faeed2SDr. David Alan Gilbert } 1201c4faeed2SDr. David Alan Gilbert 1202c4faeed2SDr. David Alan Gilbert /* 1203c4faeed2SDr. David Alan Gilbert * Although the host check already tested the API, we need to 1204c4faeed2SDr. David Alan Gilbert * do the check again as an ABI handshake on the new fd. 1205c4faeed2SDr. David Alan Gilbert */ 120674c38cf7SPeter Xu if (!ufd_check_and_apply(mis->userfault_fd, mis, &local_err)) { 120774c38cf7SPeter Xu error_report_err(local_err); 1208c4faeed2SDr. David Alan Gilbert return -1; 1209c4faeed2SDr. David Alan Gilbert } 1210c4faeed2SDr. David Alan Gilbert 1211c4faeed2SDr. David Alan Gilbert /* Now an eventfd we use to tell the fault-thread to quit */ 121264f615feSPeter Xu mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 121364f615feSPeter Xu if (mis->userfault_event_fd == -1) { 121464f615feSPeter Xu error_report("%s: Opening userfault_event_fd: %s", __func__, 1215c4faeed2SDr. David Alan Gilbert strerror(errno)); 1216c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 1217c4faeed2SDr. David Alan Gilbert return -1; 1218c4faeed2SDr. David Alan Gilbert } 1219c4faeed2SDr. David Alan Gilbert 122036f62f11SPeter Xu postcopy_thread_create(mis, &mis->fault_thread, "fault-default", 1221095c12a4SPeter Xu postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); 1222c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = true; 1223f0a227adSDr. David Alan Gilbert 1224f0a227adSDr. David Alan Gilbert /* Mark so that we get notified of accesses to unwritten areas */ 1225fbd162e6SYury Kotov if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { 122691b02dc7SFei Li error_report("ram_block_enable_notify failed"); 1227f0a227adSDr. David Alan Gilbert return -1; 1228f0a227adSDr. David Alan Gilbert } 1229f0a227adSDr. David Alan Gilbert 1230476ebf77SPeter Xu if (postcopy_temp_pages_setup(mis)) { 1231476ebf77SPeter Xu /* Error dumped in the sub-function */ 12323414322aSWei Yang return -1; 12333414322aSWei Yang } 12343414322aSWei Yang 123536f62f11SPeter Xu if (migrate_postcopy_preempt()) { 123636f62f11SPeter Xu /* 123736f62f11SPeter Xu * This thread needs to be created after the temp pages because 123836f62f11SPeter Xu * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. 123936f62f11SPeter Xu */ 124036f62f11SPeter Xu postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", 124136f62f11SPeter Xu postcopy_preempt_thread, QEMU_THREAD_JOINABLE); 12426621883fSPeter Xu mis->preempt_thread_status = PREEMPT_THREAD_CREATED; 124336f62f11SPeter Xu } 124436f62f11SPeter Xu 1245c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_enable_notify(); 1246c4faeed2SDr. David Alan Gilbert 1247f0a227adSDr. David Alan Gilbert return 0; 1248f0a227adSDr. David Alan Gilbert } 1249f0a227adSDr. David Alan Gilbert 1250eef621c4SPeter Xu static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, 1251f9494614SAlexey Perevalov void *from_addr, uint64_t pagesize, RAMBlock *rb) 1252727b9d7eSAlexey Perevalov { 1253eef621c4SPeter Xu int userfault_fd = mis->userfault_fd; 1254f9494614SAlexey Perevalov int ret; 1255eef621c4SPeter Xu 1256727b9d7eSAlexey Perevalov if (from_addr) { 1257727b9d7eSAlexey Perevalov struct uffdio_copy copy_struct; 1258727b9d7eSAlexey Perevalov copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1259727b9d7eSAlexey Perevalov copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1260727b9d7eSAlexey Perevalov copy_struct.len = pagesize; 1261727b9d7eSAlexey Perevalov copy_struct.mode = 0; 1262f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1263727b9d7eSAlexey Perevalov } else { 1264727b9d7eSAlexey Perevalov struct uffdio_zeropage zero_struct; 1265727b9d7eSAlexey Perevalov zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1266727b9d7eSAlexey Perevalov zero_struct.range.len = pagesize; 1267727b9d7eSAlexey Perevalov zero_struct.mode = 0; 1268f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1269727b9d7eSAlexey Perevalov } 1270f9494614SAlexey Perevalov if (!ret) { 12718f8bfffcSPeter Xu qemu_mutex_lock(&mis->page_request_mutex); 1272f9494614SAlexey Perevalov ramblock_recv_bitmap_set_range(rb, host_addr, 1273f9494614SAlexey Perevalov pagesize / qemu_target_page_size()); 12748f8bfffcSPeter Xu /* 12758f8bfffcSPeter Xu * If this page resolves a page fault for a previous recorded faulted 12768f8bfffcSPeter Xu * address, take a special note to maintain the requested page list. 12778f8bfffcSPeter Xu */ 12788f8bfffcSPeter Xu if (g_tree_lookup(mis->page_requested, host_addr)) { 12798f8bfffcSPeter Xu g_tree_remove(mis->page_requested, host_addr); 12808f8bfffcSPeter Xu mis->page_requested_count--; 12818f8bfffcSPeter Xu trace_postcopy_page_req_del(host_addr, mis->page_requested_count); 12828f8bfffcSPeter Xu } 12838f8bfffcSPeter Xu qemu_mutex_unlock(&mis->page_request_mutex); 1284575b0b33SAlexey Perevalov mark_postcopy_blocktime_end((uintptr_t)host_addr); 1285f9494614SAlexey Perevalov } 1286f9494614SAlexey Perevalov return ret; 1287727b9d7eSAlexey Perevalov } 1288727b9d7eSAlexey Perevalov 1289d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1290d488b349SDr. David Alan Gilbert { 1291d488b349SDr. David Alan Gilbert int i; 1292d488b349SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 1293d488b349SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 1294d488b349SDr. David Alan Gilbert 1295d488b349SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 1296d488b349SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1297d488b349SDr. David Alan Gilbert int ret = cur->waker(cur, rb, offset); 1298d488b349SDr. David Alan Gilbert if (ret) { 1299d488b349SDr. David Alan Gilbert return ret; 1300d488b349SDr. David Alan Gilbert } 1301d488b349SDr. David Alan Gilbert } 1302d488b349SDr. David Alan Gilbert return 0; 1303d488b349SDr. David Alan Gilbert } 1304d488b349SDr. David Alan Gilbert 1305696ed9a9SDr. David Alan Gilbert /* 1306696ed9a9SDr. David Alan Gilbert * Place a host page (from) at (host) atomically 1307696ed9a9SDr. David Alan Gilbert * returns 0 on success 1308696ed9a9SDr. David Alan Gilbert */ 1309df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 13108be4620bSAlexey Perevalov RAMBlock *rb) 1311696ed9a9SDr. David Alan Gilbert { 13128be4620bSAlexey Perevalov size_t pagesize = qemu_ram_pagesize(rb); 1313696ed9a9SDr. David Alan Gilbert 1314696ed9a9SDr. David Alan Gilbert /* copy also acks to the kernel waking the stalled thread up 1315696ed9a9SDr. David Alan Gilbert * TODO: We can inhibit that ack and only do it if it was requested 1316696ed9a9SDr. David Alan Gilbert * which would be slightly cheaper, but we'd have to be careful 1317696ed9a9SDr. David Alan Gilbert * of the order of updating our page state. 1318696ed9a9SDr. David Alan Gilbert */ 1319eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { 1320696ed9a9SDr. David Alan Gilbert int e = errno; 1321df9ff5e1SDr. David Alan Gilbert error_report("%s: %s copy host: %p from: %p (size: %zd)", 1322df9ff5e1SDr. David Alan Gilbert __func__, strerror(e), host, from, pagesize); 1323696ed9a9SDr. David Alan Gilbert 1324696ed9a9SDr. David Alan Gilbert return -e; 1325696ed9a9SDr. David Alan Gilbert } 1326696ed9a9SDr. David Alan Gilbert 1327696ed9a9SDr. David Alan Gilbert trace_postcopy_place_page(host); 1328dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1329dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, host)); 1330696ed9a9SDr. David Alan Gilbert } 1331696ed9a9SDr. David Alan Gilbert 1332696ed9a9SDr. David Alan Gilbert /* 1333696ed9a9SDr. David Alan Gilbert * Place a zero page at (host) atomically 1334696ed9a9SDr. David Alan Gilbert * returns 0 on success 1335696ed9a9SDr. David Alan Gilbert */ 1336df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 13378be4620bSAlexey Perevalov RAMBlock *rb) 1338696ed9a9SDr. David Alan Gilbert { 13392ce16640SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 1340df9ff5e1SDr. David Alan Gilbert trace_postcopy_place_page_zero(host); 1341696ed9a9SDr. David Alan Gilbert 13422ce16640SDr. David Alan Gilbert /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 13432ce16640SDr. David Alan Gilbert * but it's not available for everything (e.g. hugetlbpages) 13442ce16640SDr. David Alan Gilbert */ 13452ce16640SDr. David Alan Gilbert if (qemu_ram_is_uf_zeroable(rb)) { 1346eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { 1347696ed9a9SDr. David Alan Gilbert int e = errno; 1348696ed9a9SDr. David Alan Gilbert error_report("%s: %s zero host: %p", 1349696ed9a9SDr. David Alan Gilbert __func__, strerror(e), host); 1350696ed9a9SDr. David Alan Gilbert 1351696ed9a9SDr. David Alan Gilbert return -e; 1352696ed9a9SDr. David Alan Gilbert } 1353dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1354dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, 1355dedfb4b2SDr. David Alan Gilbert host)); 1356df9ff5e1SDr. David Alan Gilbert } else { 13576629890dSWei Yang return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); 1358df9ff5e1SDr. David Alan Gilbert } 1359696ed9a9SDr. David Alan Gilbert } 1360696ed9a9SDr. David Alan Gilbert 1361eb59db53SDr. David Alan Gilbert #else 1362eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */ 136365ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 136465ace060SAlexey Perevalov { 136565ace060SAlexey Perevalov } 136665ace060SAlexey Perevalov 136774c38cf7SPeter Xu bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) 1368eb59db53SDr. David Alan Gilbert { 1369eb59db53SDr. David Alan Gilbert error_report("%s: No OS support", __func__); 1370eb59db53SDr. David Alan Gilbert return false; 1371eb59db53SDr. David Alan Gilbert } 1372eb59db53SDr. David Alan Gilbert 1373c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 13741caddf8aSDr. David Alan Gilbert { 13751caddf8aSDr. David Alan Gilbert error_report("postcopy_ram_incoming_init: No OS support"); 13761caddf8aSDr. David Alan Gilbert return -1; 13771caddf8aSDr. David Alan Gilbert } 13781caddf8aSDr. David Alan Gilbert 13791caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 13801caddf8aSDr. David Alan Gilbert { 13811caddf8aSDr. David Alan Gilbert assert(0); 13821caddf8aSDr. David Alan Gilbert return -1; 13831caddf8aSDr. David Alan Gilbert } 13841caddf8aSDr. David Alan Gilbert 1385f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1386f9527107SDr. David Alan Gilbert { 1387f9527107SDr. David Alan Gilbert assert(0); 1388f9527107SDr. David Alan Gilbert return -1; 1389f9527107SDr. David Alan Gilbert } 1390f9527107SDr. David Alan Gilbert 1391c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1392c188c539SMichael S. Tsirkin uint64_t client_addr, uint64_t rb_offset) 1393c188c539SMichael S. Tsirkin { 1394c188c539SMichael S. Tsirkin assert(0); 1395c188c539SMichael S. Tsirkin return -1; 1396c188c539SMichael S. Tsirkin } 1397c188c539SMichael S. Tsirkin 13982a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1399f0a227adSDr. David Alan Gilbert { 1400f0a227adSDr. David Alan Gilbert assert(0); 1401f0a227adSDr. David Alan Gilbert return -1; 1402f0a227adSDr. David Alan Gilbert } 1403696ed9a9SDr. David Alan Gilbert 1404df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 14058be4620bSAlexey Perevalov RAMBlock *rb) 1406696ed9a9SDr. David Alan Gilbert { 1407696ed9a9SDr. David Alan Gilbert assert(0); 1408696ed9a9SDr. David Alan Gilbert return -1; 1409696ed9a9SDr. David Alan Gilbert } 1410696ed9a9SDr. David Alan Gilbert 1411df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 14128be4620bSAlexey Perevalov RAMBlock *rb) 1413696ed9a9SDr. David Alan Gilbert { 1414696ed9a9SDr. David Alan Gilbert assert(0); 1415696ed9a9SDr. David Alan Gilbert return -1; 1416696ed9a9SDr. David Alan Gilbert } 1417696ed9a9SDr. David Alan Gilbert 14185efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 14195efc3564SDr. David Alan Gilbert uint64_t client_addr, 14205efc3564SDr. David Alan Gilbert RAMBlock *rb) 14215efc3564SDr. David Alan Gilbert { 14225efc3564SDr. David Alan Gilbert assert(0); 14235efc3564SDr. David Alan Gilbert return -1; 14245efc3564SDr. David Alan Gilbert } 1425eb59db53SDr. David Alan Gilbert #endif 1426eb59db53SDr. David Alan Gilbert 1427e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */ 142877dadc3fSPeter Xu void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) 142977dadc3fSPeter Xu { 143077dadc3fSPeter Xu tmp_page->target_pages = 0; 143177dadc3fSPeter Xu tmp_page->host_addr = NULL; 143277dadc3fSPeter Xu /* 143377dadc3fSPeter Xu * This is set to true when reset, and cleared as long as we received any 143477dadc3fSPeter Xu * of the non-zero small page within this huge page. 143577dadc3fSPeter Xu */ 143677dadc3fSPeter Xu tmp_page->all_zero = true; 143777dadc3fSPeter Xu } 1438e0b266f0SDr. David Alan Gilbert 14399ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis) 14409ab7ef9bSPeter Xu { 14419ab7ef9bSPeter Xu uint64_t tmp64 = 1; 14429ab7ef9bSPeter Xu 14439ab7ef9bSPeter Xu /* 14449ab7ef9bSPeter Xu * Wakeup the fault_thread. It's an eventfd that should currently 14459ab7ef9bSPeter Xu * be at 0, we're going to increment it to 1 14469ab7ef9bSPeter Xu */ 14479ab7ef9bSPeter Xu if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 14489ab7ef9bSPeter Xu /* Not much we can do here, but may as well report it */ 14499ab7ef9bSPeter Xu error_report("%s: incrementing failed: %s", __func__, 14509ab7ef9bSPeter Xu strerror(errno)); 14519ab7ef9bSPeter Xu } 14529ab7ef9bSPeter Xu } 14539ab7ef9bSPeter Xu 1454e0b266f0SDr. David Alan Gilbert /** 1455e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_init: Called at the start of each RAMBlock before 1456e0b266f0SDr. David Alan Gilbert * asking to discard individual ranges. 1457e0b266f0SDr. David Alan Gilbert * 1458e0b266f0SDr. David Alan Gilbert * @ms: The current migration state. 1459810cf2bbSWei Yang * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. 1460e0b266f0SDr. David Alan Gilbert * @name: RAMBlock that discards will operate on. 1461e0b266f0SDr. David Alan Gilbert */ 1462810cf2bbSWei Yang static PostcopyDiscardState pds = {0}; 1463810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name) 1464e0b266f0SDr. David Alan Gilbert { 1465810cf2bbSWei Yang pds.ramblock_name = name; 1466810cf2bbSWei Yang pds.cur_entry = 0; 1467810cf2bbSWei Yang pds.nsentwords = 0; 1468810cf2bbSWei Yang pds.nsentcmds = 0; 1469e0b266f0SDr. David Alan Gilbert } 1470e0b266f0SDr. David Alan Gilbert 1471e0b266f0SDr. David Alan Gilbert /** 1472e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1473e0b266f0SDr. David Alan Gilbert * discard. May send a discard message, may just leave it queued to 1474e0b266f0SDr. David Alan Gilbert * be sent later. 1475e0b266f0SDr. David Alan Gilbert * 1476e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1477e0b266f0SDr. David Alan Gilbert * @start,@length: a range of pages in the migration bitmap in the 1478e0b266f0SDr. David Alan Gilbert * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1479e0b266f0SDr. David Alan Gilbert */ 1480810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start, 1481810cf2bbSWei Yang unsigned long length) 1482e0b266f0SDr. David Alan Gilbert { 148320afaed9SJuan Quintela size_t tp_size = qemu_target_page_size(); 1484e0b266f0SDr. David Alan Gilbert /* Convert to byte offsets within the RAM block */ 1485810cf2bbSWei Yang pds.start_list[pds.cur_entry] = start * tp_size; 1486810cf2bbSWei Yang pds.length_list[pds.cur_entry] = length * tp_size; 1487810cf2bbSWei Yang trace_postcopy_discard_send_range(pds.ramblock_name, start, length); 1488810cf2bbSWei Yang pds.cur_entry++; 1489810cf2bbSWei Yang pds.nsentwords++; 1490e0b266f0SDr. David Alan Gilbert 1491810cf2bbSWei Yang if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { 1492e0b266f0SDr. David Alan Gilbert /* Full set, ship it! */ 149389a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1494810cf2bbSWei Yang pds.ramblock_name, 1495810cf2bbSWei Yang pds.cur_entry, 1496810cf2bbSWei Yang pds.start_list, 1497810cf2bbSWei Yang pds.length_list); 1498810cf2bbSWei Yang pds.nsentcmds++; 1499810cf2bbSWei Yang pds.cur_entry = 0; 1500e0b266f0SDr. David Alan Gilbert } 1501e0b266f0SDr. David Alan Gilbert } 1502e0b266f0SDr. David Alan Gilbert 1503e0b266f0SDr. David Alan Gilbert /** 1504e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1505e0b266f0SDr. David Alan Gilbert * bitmap code. Sends any outstanding discard messages, frees the PDS 1506e0b266f0SDr. David Alan Gilbert * 1507e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1508e0b266f0SDr. David Alan Gilbert */ 1509810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms) 1510e0b266f0SDr. David Alan Gilbert { 1511e0b266f0SDr. David Alan Gilbert /* Anything unsent? */ 1512810cf2bbSWei Yang if (pds.cur_entry) { 151389a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1514810cf2bbSWei Yang pds.ramblock_name, 1515810cf2bbSWei Yang pds.cur_entry, 1516810cf2bbSWei Yang pds.start_list, 1517810cf2bbSWei Yang pds.length_list); 1518810cf2bbSWei Yang pds.nsentcmds++; 1519e0b266f0SDr. David Alan Gilbert } 1520e0b266f0SDr. David Alan Gilbert 1521810cf2bbSWei Yang trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, 1522810cf2bbSWei Yang pds.nsentcmds); 1523e0b266f0SDr. David Alan Gilbert } 1524bac3b212SJuan Quintela 1525bac3b212SJuan Quintela /* 1526bac3b212SJuan Quintela * Current state of incoming postcopy; note this is not part of 1527bac3b212SJuan Quintela * MigrationIncomingState since it's state is used during cleanup 1528bac3b212SJuan Quintela * at the end as MIS is being freed. 1529bac3b212SJuan Quintela */ 1530bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state; 1531bac3b212SJuan Quintela 1532bac3b212SJuan Quintela PostcopyState postcopy_state_get(void) 1533bac3b212SJuan Quintela { 15344592eaf3SPaolo Bonzini return qatomic_load_acquire(&incoming_postcopy_state); 1535bac3b212SJuan Quintela } 1536bac3b212SJuan Quintela 1537bac3b212SJuan Quintela /* Set the state and return the old state */ 1538bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state) 1539bac3b212SJuan Quintela { 1540d73415a3SStefan Hajnoczi return qatomic_xchg(&incoming_postcopy_state, new_state); 1541bac3b212SJuan Quintela } 154200fa4fc8SDr. David Alan Gilbert 154300fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy 154400fa4fc8SDr. David Alan Gilbert * called on the destination. 154500fa4fc8SDr. David Alan Gilbert */ 154600fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 154700fa4fc8SDr. David Alan Gilbert { 154800fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 154900fa4fc8SDr. David Alan Gilbert 155000fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 155100fa4fc8SDr. David Alan Gilbert *pcfd); 155200fa4fc8SDr. David Alan Gilbert } 155300fa4fc8SDr. David Alan Gilbert 155400fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy 155500fa4fc8SDr. David Alan Gilbert */ 155600fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 155700fa4fc8SDr. David Alan Gilbert { 155800fa4fc8SDr. David Alan Gilbert guint i; 155900fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 156000fa4fc8SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 156100fa4fc8SDr. David Alan Gilbert 156256559980SJuan Quintela if (!pcrfds) { 156356559980SJuan Quintela /* migration has already finished and freed the array */ 156456559980SJuan Quintela return; 156556559980SJuan Quintela } 156600fa4fc8SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 156700fa4fc8SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 156800fa4fc8SDr. David Alan Gilbert if (cur->fd == pcfd->fd) { 156900fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 157000fa4fc8SDr. David Alan Gilbert return; 157100fa4fc8SDr. David Alan Gilbert } 157200fa4fc8SDr. David Alan Gilbert } 157300fa4fc8SDr. David Alan Gilbert } 157436f62f11SPeter Xu 15756720c2b3Smanish.mishra void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) 157636f62f11SPeter Xu { 157736f62f11SPeter Xu /* 157836f62f11SPeter Xu * The new loading channel has its own threads, so it needs to be 157936f62f11SPeter Xu * blocked too. It's by default true, just be explicit. 158036f62f11SPeter Xu */ 158136f62f11SPeter Xu qemu_file_set_blocking(file, true); 158236f62f11SPeter Xu mis->postcopy_qemufile_dst = file; 15835655aab0SPeter Xu qemu_sem_post(&mis->postcopy_qemufile_dst_done); 158436f62f11SPeter Xu trace_postcopy_preempt_new_channel(); 158536f62f11SPeter Xu } 158636f62f11SPeter Xu 1587f0afaf6cSPeter Xu /* 1588f0afaf6cSPeter Xu * Setup the postcopy preempt channel with the IOC. If ERROR is specified, 1589f0afaf6cSPeter Xu * setup the error instead. This helper will free the ERROR if specified. 1590f0afaf6cSPeter Xu */ 1591d0edb8a1SPeter Xu static void 1592f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(MigrationState *s, 1593f0afaf6cSPeter Xu QIOChannel *ioc, Error *local_err) 1594d0edb8a1SPeter Xu { 1595f0afaf6cSPeter Xu if (local_err) { 1596d0edb8a1SPeter Xu migrate_set_error(s, local_err); 1597d0edb8a1SPeter Xu error_free(local_err); 1598d0edb8a1SPeter Xu } else { 1599d0edb8a1SPeter Xu migration_ioc_register_yank(ioc); 1600d0edb8a1SPeter Xu s->postcopy_qemufile_src = qemu_file_new_output(ioc); 1601d0edb8a1SPeter Xu trace_postcopy_preempt_new_channel(); 1602d0edb8a1SPeter Xu } 1603d0edb8a1SPeter Xu 1604d0edb8a1SPeter Xu /* 1605d0edb8a1SPeter Xu * Kick the waiter in all cases. The waiter should check upon 1606d0edb8a1SPeter Xu * postcopy_qemufile_src to know whether it failed or not. 1607d0edb8a1SPeter Xu */ 1608d0edb8a1SPeter Xu qemu_sem_post(&s->postcopy_qemufile_src_sem); 1609f0afaf6cSPeter Xu } 1610f0afaf6cSPeter Xu 1611f0afaf6cSPeter Xu static void 1612f0afaf6cSPeter Xu postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) 1613f0afaf6cSPeter Xu { 1614f0afaf6cSPeter Xu g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1615f0afaf6cSPeter Xu MigrationState *s = opaque; 1616f0afaf6cSPeter Xu Error *local_err = NULL; 1617f0afaf6cSPeter Xu 1618f0afaf6cSPeter Xu qio_task_propagate_error(task, &local_err); 1619f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(s, ioc, local_err); 1620f0afaf6cSPeter Xu } 1621f0afaf6cSPeter Xu 1622f0afaf6cSPeter Xu static void 1623f0afaf6cSPeter Xu postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) 1624f0afaf6cSPeter Xu { 1625f0afaf6cSPeter Xu g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); 1626f0afaf6cSPeter Xu MigrationState *s = opaque; 1627f0afaf6cSPeter Xu QIOChannelTLS *tioc; 1628f0afaf6cSPeter Xu Error *local_err = NULL; 1629f0afaf6cSPeter Xu 1630f0afaf6cSPeter Xu if (qio_task_propagate_error(task, &local_err)) { 1631f0afaf6cSPeter Xu goto out; 1632f0afaf6cSPeter Xu } 1633f0afaf6cSPeter Xu 1634f0afaf6cSPeter Xu if (migrate_channel_requires_tls_upgrade(ioc)) { 1635*0deb7e9bSJuan Quintela tioc = migration_tls_client_create(ioc, s->hostname, &local_err); 1636f0afaf6cSPeter Xu if (!tioc) { 1637f0afaf6cSPeter Xu goto out; 1638f0afaf6cSPeter Xu } 1639f0afaf6cSPeter Xu trace_postcopy_preempt_tls_handshake(); 1640f0afaf6cSPeter Xu qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); 1641f0afaf6cSPeter Xu qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, 1642f0afaf6cSPeter Xu s, NULL, NULL); 1643f0afaf6cSPeter Xu /* Setup the channel until TLS handshake finished */ 1644f0afaf6cSPeter Xu return; 1645f0afaf6cSPeter Xu } 1646f0afaf6cSPeter Xu 1647f0afaf6cSPeter Xu out: 1648f0afaf6cSPeter Xu /* This handles both good and error cases */ 1649f0afaf6cSPeter Xu postcopy_preempt_send_channel_done(s, ioc, local_err); 1650d0edb8a1SPeter Xu } 1651d0edb8a1SPeter Xu 16525655aab0SPeter Xu /* 16535655aab0SPeter Xu * This function will kick off an async task to establish the preempt 16545655aab0SPeter Xu * channel, and wait until the connection setup completed. Returns 0 if 16555655aab0SPeter Xu * channel established, -1 for error. 16565655aab0SPeter Xu */ 16575655aab0SPeter Xu int postcopy_preempt_establish_channel(MigrationState *s) 1658d0edb8a1SPeter Xu { 1659d0edb8a1SPeter Xu /* If preempt not enabled, no need to wait */ 1660d0edb8a1SPeter Xu if (!migrate_postcopy_preempt()) { 1661d0edb8a1SPeter Xu return 0; 1662d0edb8a1SPeter Xu } 1663d0edb8a1SPeter Xu 166406064a67SPeter Xu /* 166506064a67SPeter Xu * Kick off async task to establish preempt channel. Only do so with 166606064a67SPeter Xu * 8.0+ machines, because 7.1/7.2 require the channel to be created in 166706064a67SPeter Xu * setup phase of migration (even if racy in an unreliable network). 166806064a67SPeter Xu */ 166906064a67SPeter Xu if (!s->preempt_pre_7_2) { 16705655aab0SPeter Xu postcopy_preempt_setup(s); 167106064a67SPeter Xu } 16725655aab0SPeter Xu 1673d0edb8a1SPeter Xu /* 1674d0edb8a1SPeter Xu * We need the postcopy preempt channel to be established before 1675d0edb8a1SPeter Xu * starting doing anything. 1676d0edb8a1SPeter Xu */ 1677d0edb8a1SPeter Xu qemu_sem_wait(&s->postcopy_qemufile_src_sem); 1678d0edb8a1SPeter Xu 1679d0edb8a1SPeter Xu return s->postcopy_qemufile_src ? 0 : -1; 1680d0edb8a1SPeter Xu } 1681d0edb8a1SPeter Xu 1682fc063a7bSPeter Xu void postcopy_preempt_setup(MigrationState *s) 168336f62f11SPeter Xu { 1684d0edb8a1SPeter Xu /* Kick an async task to connect */ 1685d0edb8a1SPeter Xu socket_send_channel_create(postcopy_preempt_send_channel_new, s); 168636f62f11SPeter Xu } 168736f62f11SPeter Xu 168860bb3c58SPeter Xu static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) 168960bb3c58SPeter Xu { 169060bb3c58SPeter Xu trace_postcopy_pause_fast_load(); 169160bb3c58SPeter Xu qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 169260bb3c58SPeter Xu qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); 169360bb3c58SPeter Xu qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 169460bb3c58SPeter Xu trace_postcopy_pause_fast_load_continued(); 169560bb3c58SPeter Xu } 169660bb3c58SPeter Xu 16976621883fSPeter Xu static bool preempt_thread_should_run(MigrationIncomingState *mis) 16986621883fSPeter Xu { 16996621883fSPeter Xu return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; 17006621883fSPeter Xu } 17016621883fSPeter Xu 170236f62f11SPeter Xu void *postcopy_preempt_thread(void *opaque) 170336f62f11SPeter Xu { 170436f62f11SPeter Xu MigrationIncomingState *mis = opaque; 170536f62f11SPeter Xu int ret; 170636f62f11SPeter Xu 170736f62f11SPeter Xu trace_postcopy_preempt_thread_entry(); 170836f62f11SPeter Xu 170936f62f11SPeter Xu rcu_register_thread(); 171036f62f11SPeter Xu 171136f62f11SPeter Xu qemu_sem_post(&mis->thread_sync_sem); 171236f62f11SPeter Xu 1713a5d35dc7SPeter Xu /* 1714a5d35dc7SPeter Xu * The preempt channel is established in asynchronous way. Wait 1715a5d35dc7SPeter Xu * for its completion. 1716a5d35dc7SPeter Xu */ 1717a5d35dc7SPeter Xu qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 1718a5d35dc7SPeter Xu 171936f62f11SPeter Xu /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ 172060bb3c58SPeter Xu qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 17216621883fSPeter Xu while (preempt_thread_should_run(mis)) { 172260bb3c58SPeter Xu ret = ram_load_postcopy(mis->postcopy_qemufile_dst, 172360bb3c58SPeter Xu RAM_CHANNEL_POSTCOPY); 172460bb3c58SPeter Xu /* If error happened, go into recovery routine */ 17256621883fSPeter Xu if (ret && preempt_thread_should_run(mis)) { 172660bb3c58SPeter Xu postcopy_pause_ram_fast_load(mis); 172760bb3c58SPeter Xu } else { 172860bb3c58SPeter Xu /* We're done */ 172960bb3c58SPeter Xu break; 173060bb3c58SPeter Xu } 173160bb3c58SPeter Xu } 173260bb3c58SPeter Xu qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 173336f62f11SPeter Xu 173436f62f11SPeter Xu rcu_unregister_thread(); 173536f62f11SPeter Xu 173636f62f11SPeter Xu trace_postcopy_preempt_thread_exit(); 173736f62f11SPeter Xu 173860bb3c58SPeter Xu return NULL; 173936f62f11SPeter Xu } 1740