1eb59db53SDr. David Alan Gilbert /* 2eb59db53SDr. David Alan Gilbert * Postcopy migration for RAM 3eb59db53SDr. David Alan Gilbert * 4eb59db53SDr. David Alan Gilbert * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates 5eb59db53SDr. David Alan Gilbert * 6eb59db53SDr. David Alan Gilbert * Authors: 7eb59db53SDr. David Alan Gilbert * Dave Gilbert <dgilbert@redhat.com> 8eb59db53SDr. David Alan Gilbert * 9eb59db53SDr. David Alan Gilbert * This work is licensed under the terms of the GNU GPL, version 2 or later. 10eb59db53SDr. David Alan Gilbert * See the COPYING file in the top-level directory. 11eb59db53SDr. David Alan Gilbert * 12eb59db53SDr. David Alan Gilbert */ 13eb59db53SDr. David Alan Gilbert 14eb59db53SDr. David Alan Gilbert /* 15eb59db53SDr. David Alan Gilbert * Postcopy is a migration technique where the execution flips from the 16eb59db53SDr. David Alan Gilbert * source to the destination before all the data has been copied. 17eb59db53SDr. David Alan Gilbert */ 18eb59db53SDr. David Alan Gilbert 191393a485SPeter Maydell #include "qemu/osdep.h" 20898ba906SDavid Hildenbrand #include "qemu/rcu.h" 21b85ea5faSPeter Maydell #include "qemu/madvise.h" 2251180423SJuan Quintela #include "exec/target_page.h" 236666c96aSJuan Quintela #include "migration.h" 2408a0aee1SJuan Quintela #include "qemu-file.h" 2520a519a0SJuan Quintela #include "savevm.h" 26be07b0acSJuan Quintela #include "postcopy-ram.h" 277b1e1a22SJuan Quintela #include "ram.h" 281693c64cSDr. David Alan Gilbert #include "qapi/error.h" 291693c64cSDr. David Alan Gilbert #include "qemu/notify.h" 30d4842052SMarkus Armbruster #include "qemu/rcu.h" 31eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h" 32eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h" 33eb59db53SDr. David Alan Gilbert #include "trace.h" 345cc8767dSLike Xu #include "hw/boards.h" 35898ba906SDavid Hildenbrand #include "exec/ramblock.h" 36eb59db53SDr. David Alan Gilbert 37e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command, 38e0b266f0SDr. David Alan Gilbert * keeps them around ~200 bytes 39e0b266f0SDr. David Alan Gilbert */ 40e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12 41e0b266f0SDr. David Alan Gilbert 42e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState { 43e0b266f0SDr. David Alan Gilbert const char *ramblock_name; 44e0b266f0SDr. David Alan Gilbert uint16_t cur_entry; 45e0b266f0SDr. David Alan Gilbert /* 46e0b266f0SDr. David Alan Gilbert * Start and length of a discard range (bytes) 47e0b266f0SDr. David Alan Gilbert */ 48e0b266f0SDr. David Alan Gilbert uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; 49e0b266f0SDr. David Alan Gilbert uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; 50e0b266f0SDr. David Alan Gilbert unsigned int nsentwords; 51e0b266f0SDr. David Alan Gilbert unsigned int nsentcmds; 52e0b266f0SDr. David Alan Gilbert }; 53e0b266f0SDr. David Alan Gilbert 541693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list; 551693c64cSDr. David Alan Gilbert 561693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void) 571693c64cSDr. David Alan Gilbert { 581693c64cSDr. David Alan Gilbert notifier_with_return_list_init(&postcopy_notifier_list); 591693c64cSDr. David Alan Gilbert } 601693c64cSDr. David Alan Gilbert 611693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn) 621693c64cSDr. David Alan Gilbert { 631693c64cSDr. David Alan Gilbert notifier_with_return_list_add(&postcopy_notifier_list, nn); 641693c64cSDr. David Alan Gilbert } 651693c64cSDr. David Alan Gilbert 661693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n) 671693c64cSDr. David Alan Gilbert { 681693c64cSDr. David Alan Gilbert notifier_with_return_remove(n); 691693c64cSDr. David Alan Gilbert } 701693c64cSDr. David Alan Gilbert 711693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) 721693c64cSDr. David Alan Gilbert { 731693c64cSDr. David Alan Gilbert struct PostcopyNotifyData pnd; 741693c64cSDr. David Alan Gilbert pnd.reason = reason; 751693c64cSDr. David Alan Gilbert pnd.errp = errp; 761693c64cSDr. David Alan Gilbert 771693c64cSDr. David Alan Gilbert return notifier_with_return_list_notify(&postcopy_notifier_list, 781693c64cSDr. David Alan Gilbert &pnd); 791693c64cSDr. David Alan Gilbert } 801693c64cSDr. David Alan Gilbert 81eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied 82eb59db53SDr. David Alan Gilbert * across, and efficiently map new pages in, the techniques for doing this 83eb59db53SDr. David Alan Gilbert * are target OS specific. 84eb59db53SDr. David Alan Gilbert */ 85eb59db53SDr. David Alan Gilbert #if defined(__linux__) 86eb59db53SDr. David Alan Gilbert 87c4faeed2SDr. David Alan Gilbert #include <poll.h> 88eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h> 89eb59db53SDr. David Alan Gilbert #include <sys/syscall.h> 90eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */ 91eb59db53SDr. David Alan Gilbert #endif 92eb59db53SDr. David Alan Gilbert 93d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) 94d8b9d771SMatthew Fortune #include <sys/eventfd.h> 95eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h> 96eb59db53SDr. David Alan Gilbert 972a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext { 982a4c42f1SAlexey Perevalov /* time when page fault initiated per vCPU */ 992a4c42f1SAlexey Perevalov uint32_t *page_fault_vcpu_time; 1002a4c42f1SAlexey Perevalov /* page address per vCPU */ 1012a4c42f1SAlexey Perevalov uintptr_t *vcpu_addr; 1022a4c42f1SAlexey Perevalov uint32_t total_blocktime; 1032a4c42f1SAlexey Perevalov /* blocktime per vCPU */ 1042a4c42f1SAlexey Perevalov uint32_t *vcpu_blocktime; 1052a4c42f1SAlexey Perevalov /* point in time when last page fault was initiated */ 1062a4c42f1SAlexey Perevalov uint32_t last_begin; 1072a4c42f1SAlexey Perevalov /* number of vCPU are suspended */ 1082a4c42f1SAlexey Perevalov int smp_cpus_down; 1092a4c42f1SAlexey Perevalov uint64_t start_time; 1102a4c42f1SAlexey Perevalov 1112a4c42f1SAlexey Perevalov /* 1122a4c42f1SAlexey Perevalov * Handler for exit event, necessary for 1132a4c42f1SAlexey Perevalov * releasing whole blocktime_ctx 1142a4c42f1SAlexey Perevalov */ 1152a4c42f1SAlexey Perevalov Notifier exit_notifier; 1162a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext; 1172a4c42f1SAlexey Perevalov 1182a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) 1192a4c42f1SAlexey Perevalov { 1202a4c42f1SAlexey Perevalov g_free(ctx->page_fault_vcpu_time); 1212a4c42f1SAlexey Perevalov g_free(ctx->vcpu_addr); 1222a4c42f1SAlexey Perevalov g_free(ctx->vcpu_blocktime); 1232a4c42f1SAlexey Perevalov g_free(ctx); 1242a4c42f1SAlexey Perevalov } 1252a4c42f1SAlexey Perevalov 1262a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data) 1272a4c42f1SAlexey Perevalov { 1282a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, 1292a4c42f1SAlexey Perevalov exit_notifier); 1302a4c42f1SAlexey Perevalov destroy_blocktime_context(ctx); 1312a4c42f1SAlexey Perevalov } 1322a4c42f1SAlexey Perevalov 1332a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void) 1342a4c42f1SAlexey Perevalov { 1355cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 1365cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 1372a4c42f1SAlexey Perevalov PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); 1382a4c42f1SAlexey Perevalov ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); 1392a4c42f1SAlexey Perevalov ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); 1402a4c42f1SAlexey Perevalov ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); 1412a4c42f1SAlexey Perevalov 1422a4c42f1SAlexey Perevalov ctx->exit_notifier.notify = migration_exit_cb; 1432a4c42f1SAlexey Perevalov ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1442a4c42f1SAlexey Perevalov qemu_add_exit_notifier(&ctx->exit_notifier); 1452a4c42f1SAlexey Perevalov return ctx; 1462a4c42f1SAlexey Perevalov } 147ca6011c2SAlexey Perevalov 14865ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) 14965ace060SAlexey Perevalov { 1505cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 15154aa3de7SEric Blake uint32List *list = NULL; 15265ace060SAlexey Perevalov int i; 15365ace060SAlexey Perevalov 1545cc8767dSLike Xu for (i = ms->smp.cpus - 1; i >= 0; i--) { 15554aa3de7SEric Blake QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); 15665ace060SAlexey Perevalov } 15765ace060SAlexey Perevalov 15865ace060SAlexey Perevalov return list; 15965ace060SAlexey Perevalov } 16065ace060SAlexey Perevalov 16165ace060SAlexey Perevalov /* 16265ace060SAlexey Perevalov * This function just populates MigrationInfo from postcopy's 16365ace060SAlexey Perevalov * blocktime context. It will not populate MigrationInfo, 16465ace060SAlexey Perevalov * unless postcopy-blocktime capability was set. 16565ace060SAlexey Perevalov * 16665ace060SAlexey Perevalov * @info: pointer to MigrationInfo to populate 16765ace060SAlexey Perevalov */ 16865ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 16965ace060SAlexey Perevalov { 17065ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 17165ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 17265ace060SAlexey Perevalov 17365ace060SAlexey Perevalov if (!bc) { 17465ace060SAlexey Perevalov return; 17565ace060SAlexey Perevalov } 17665ace060SAlexey Perevalov 17765ace060SAlexey Perevalov info->has_postcopy_blocktime = true; 17865ace060SAlexey Perevalov info->postcopy_blocktime = bc->total_blocktime; 17965ace060SAlexey Perevalov info->has_postcopy_vcpu_blocktime = true; 18065ace060SAlexey Perevalov info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); 18165ace060SAlexey Perevalov } 18265ace060SAlexey Perevalov 18365ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void) 18465ace060SAlexey Perevalov { 18565ace060SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 18665ace060SAlexey Perevalov PostcopyBlocktimeContext *bc = mis->blocktime_ctx; 18765ace060SAlexey Perevalov 18865ace060SAlexey Perevalov if (!bc) { 18965ace060SAlexey Perevalov return 0; 19065ace060SAlexey Perevalov } 19165ace060SAlexey Perevalov 19265ace060SAlexey Perevalov return bc->total_blocktime; 19365ace060SAlexey Perevalov } 19465ace060SAlexey Perevalov 19554ae0886SAlexey Perevalov /** 19654ae0886SAlexey Perevalov * receive_ufd_features: check userfault fd features, to request only supported 19754ae0886SAlexey Perevalov * features in the future. 19854ae0886SAlexey Perevalov * 19954ae0886SAlexey Perevalov * Returns: true on success 20054ae0886SAlexey Perevalov * 20154ae0886SAlexey Perevalov * __NR_userfaultfd - should be checked before 20254ae0886SAlexey Perevalov * @features: out parameter will contain uffdio_api.features provided by kernel 20354ae0886SAlexey Perevalov * in case of success 20454ae0886SAlexey Perevalov */ 20554ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features) 20654ae0886SAlexey Perevalov { 20754ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 20854ae0886SAlexey Perevalov int ufd; 20954ae0886SAlexey Perevalov bool ret = true; 21054ae0886SAlexey Perevalov 21154ae0886SAlexey Perevalov /* if we are here __NR_userfaultfd should exists */ 21254ae0886SAlexey Perevalov ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 21354ae0886SAlexey Perevalov if (ufd == -1) { 21454ae0886SAlexey Perevalov error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, 21554ae0886SAlexey Perevalov strerror(errno)); 21654ae0886SAlexey Perevalov return false; 21754ae0886SAlexey Perevalov } 21854ae0886SAlexey Perevalov 21954ae0886SAlexey Perevalov /* ask features */ 220eb59db53SDr. David Alan Gilbert api_struct.api = UFFD_API; 221eb59db53SDr. David Alan Gilbert api_struct.features = 0; 222eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_API, &api_struct)) { 2235553499fSAlexey Perevalov error_report("%s: UFFDIO_API failed: %s", __func__, 224eb59db53SDr. David Alan Gilbert strerror(errno)); 22554ae0886SAlexey Perevalov ret = false; 22654ae0886SAlexey Perevalov goto release_ufd; 22754ae0886SAlexey Perevalov } 22854ae0886SAlexey Perevalov 22954ae0886SAlexey Perevalov *features = api_struct.features; 23054ae0886SAlexey Perevalov 23154ae0886SAlexey Perevalov release_ufd: 23254ae0886SAlexey Perevalov close(ufd); 23354ae0886SAlexey Perevalov return ret; 23454ae0886SAlexey Perevalov } 23554ae0886SAlexey Perevalov 23654ae0886SAlexey Perevalov /** 23754ae0886SAlexey Perevalov * request_ufd_features: this function should be called only once on a newly 23854ae0886SAlexey Perevalov * opened ufd, subsequent calls will lead to error. 23954ae0886SAlexey Perevalov * 2403a4452d8Szhaolichang * Returns: true on success 24154ae0886SAlexey Perevalov * 24254ae0886SAlexey Perevalov * @ufd: fd obtained from userfaultfd syscall 24354ae0886SAlexey Perevalov * @features: bit mask see UFFD_API_FEATURES 24454ae0886SAlexey Perevalov */ 24554ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features) 24654ae0886SAlexey Perevalov { 24754ae0886SAlexey Perevalov struct uffdio_api api_struct = {0}; 24854ae0886SAlexey Perevalov uint64_t ioctl_mask; 24954ae0886SAlexey Perevalov 25054ae0886SAlexey Perevalov api_struct.api = UFFD_API; 25154ae0886SAlexey Perevalov api_struct.features = features; 25254ae0886SAlexey Perevalov if (ioctl(ufd, UFFDIO_API, &api_struct)) { 25354ae0886SAlexey Perevalov error_report("%s failed: UFFDIO_API failed: %s", __func__, 25454ae0886SAlexey Perevalov strerror(errno)); 255eb59db53SDr. David Alan Gilbert return false; 256eb59db53SDr. David Alan Gilbert } 257eb59db53SDr. David Alan Gilbert 258eb59db53SDr. David Alan Gilbert ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | 259eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_UNREGISTER; 260eb59db53SDr. David Alan Gilbert if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { 261eb59db53SDr. David Alan Gilbert error_report("Missing userfault features: %" PRIx64, 262eb59db53SDr. David Alan Gilbert (uint64_t)(~api_struct.ioctls & ioctl_mask)); 263eb59db53SDr. David Alan Gilbert return false; 264eb59db53SDr. David Alan Gilbert } 265eb59db53SDr. David Alan Gilbert 26654ae0886SAlexey Perevalov return true; 26754ae0886SAlexey Perevalov } 26854ae0886SAlexey Perevalov 26954ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) 27054ae0886SAlexey Perevalov { 27154ae0886SAlexey Perevalov uint64_t asked_features = 0; 27254ae0886SAlexey Perevalov static uint64_t supported_features; 27354ae0886SAlexey Perevalov 27454ae0886SAlexey Perevalov /* 27554ae0886SAlexey Perevalov * it's not possible to 27654ae0886SAlexey Perevalov * request UFFD_API twice per one fd 27754ae0886SAlexey Perevalov * userfault fd features is persistent 27854ae0886SAlexey Perevalov */ 27954ae0886SAlexey Perevalov if (!supported_features) { 28054ae0886SAlexey Perevalov if (!receive_ufd_features(&supported_features)) { 28154ae0886SAlexey Perevalov error_report("%s failed", __func__); 28254ae0886SAlexey Perevalov return false; 28354ae0886SAlexey Perevalov } 28454ae0886SAlexey Perevalov } 28554ae0886SAlexey Perevalov 2862a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID 2872d1c37c6SPeter Xu if (UFFD_FEATURE_THREAD_ID & supported_features) { 2882d1c37c6SPeter Xu asked_features |= UFFD_FEATURE_THREAD_ID; 2892d1c37c6SPeter Xu if (migrate_postcopy_blocktime()) { 2902a4c42f1SAlexey Perevalov if (!mis->blocktime_ctx) { 2912a4c42f1SAlexey Perevalov mis->blocktime_ctx = blocktime_context_new(); 2922a4c42f1SAlexey Perevalov } 2932d1c37c6SPeter Xu } 2942a4c42f1SAlexey Perevalov } 2952a4c42f1SAlexey Perevalov #endif 2962a4c42f1SAlexey Perevalov 29754ae0886SAlexey Perevalov /* 29854ae0886SAlexey Perevalov * request features, even if asked_features is 0, due to 29954ae0886SAlexey Perevalov * kernel expects UFFD_API before UFFDIO_REGISTER, per 30054ae0886SAlexey Perevalov * userfault file descriptor 30154ae0886SAlexey Perevalov */ 30254ae0886SAlexey Perevalov if (!request_ufd_features(ufd, asked_features)) { 30354ae0886SAlexey Perevalov error_report("%s failed: features %" PRIu64, __func__, 30454ae0886SAlexey Perevalov asked_features); 30554ae0886SAlexey Perevalov return false; 30654ae0886SAlexey Perevalov } 30754ae0886SAlexey Perevalov 308038adc2fSWei Yang if (qemu_real_host_page_size != ram_pagesize_summary()) { 3097e8cafb7SDr. David Alan Gilbert bool have_hp = false; 3107e8cafb7SDr. David Alan Gilbert /* We've got a huge page */ 3117e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS 31254ae0886SAlexey Perevalov have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; 3137e8cafb7SDr. David Alan Gilbert #endif 3147e8cafb7SDr. David Alan Gilbert if (!have_hp) { 3157e8cafb7SDr. David Alan Gilbert error_report("Userfault on this host does not support huge pages"); 3167e8cafb7SDr. David Alan Gilbert return false; 3177e8cafb7SDr. David Alan Gilbert } 3187e8cafb7SDr. David Alan Gilbert } 319eb59db53SDr. David Alan Gilbert return true; 320eb59db53SDr. David Alan Gilbert } 321eb59db53SDr. David Alan Gilbert 3228679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator. 3238679638bSDr. David Alan Gilbert */ 324754cb9c0SYury Kotov static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque) 3258679638bSDr. David Alan Gilbert { 326754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 327754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 3285d214a92SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 3295d214a92SDr. David Alan Gilbert 3305d214a92SDr. David Alan Gilbert if (length % pagesize) { 3315d214a92SDr. David Alan Gilbert error_report("Postcopy requires RAM blocks to be a page size multiple," 3325d214a92SDr. David Alan Gilbert " block %s is 0x" RAM_ADDR_FMT " bytes with a " 3335d214a92SDr. David Alan Gilbert "page size of 0x%zx", block_name, length, pagesize); 3345d214a92SDr. David Alan Gilbert return 1; 3355d214a92SDr. David Alan Gilbert } 3368679638bSDr. David Alan Gilbert return 0; 3378679638bSDr. David Alan Gilbert } 3388679638bSDr. David Alan Gilbert 33958b7c17eSDr. David Alan Gilbert /* 34058b7c17eSDr. David Alan Gilbert * Note: This has the side effect of munlock'ing all of RAM, that's 34158b7c17eSDr. David Alan Gilbert * normally fine since if the postcopy succeeds it gets turned back on at the 34258b7c17eSDr. David Alan Gilbert * end. 34358b7c17eSDr. David Alan Gilbert */ 344d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 345eb59db53SDr. David Alan Gilbert { 346038adc2fSWei Yang long pagesize = qemu_real_host_page_size; 347eb59db53SDr. David Alan Gilbert int ufd = -1; 348eb59db53SDr. David Alan Gilbert bool ret = false; /* Error unless we change it */ 349eb59db53SDr. David Alan Gilbert void *testarea = NULL; 350eb59db53SDr. David Alan Gilbert struct uffdio_register reg_struct; 351eb59db53SDr. David Alan Gilbert struct uffdio_range range_struct; 352eb59db53SDr. David Alan Gilbert uint64_t feature_mask; 3531693c64cSDr. David Alan Gilbert Error *local_err = NULL; 354eb59db53SDr. David Alan Gilbert 35520afaed9SJuan Quintela if (qemu_target_page_size() > pagesize) { 356eb59db53SDr. David Alan Gilbert error_report("Target page size bigger than host page size"); 357eb59db53SDr. David Alan Gilbert goto out; 358eb59db53SDr. David Alan Gilbert } 359eb59db53SDr. David Alan Gilbert 360eb59db53SDr. David Alan Gilbert ufd = syscall(__NR_userfaultfd, O_CLOEXEC); 361eb59db53SDr. David Alan Gilbert if (ufd == -1) { 362eb59db53SDr. David Alan Gilbert error_report("%s: userfaultfd not available: %s", __func__, 363eb59db53SDr. David Alan Gilbert strerror(errno)); 364eb59db53SDr. David Alan Gilbert goto out; 365eb59db53SDr. David Alan Gilbert } 366eb59db53SDr. David Alan Gilbert 3671693c64cSDr. David Alan Gilbert /* Give devices a chance to object */ 3681693c64cSDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) { 3691693c64cSDr. David Alan Gilbert error_report_err(local_err); 3701693c64cSDr. David Alan Gilbert goto out; 3711693c64cSDr. David Alan Gilbert } 3721693c64cSDr. David Alan Gilbert 373eb59db53SDr. David Alan Gilbert /* Version and features check */ 37454ae0886SAlexey Perevalov if (!ufd_check_and_apply(ufd, mis)) { 375eb59db53SDr. David Alan Gilbert goto out; 376eb59db53SDr. David Alan Gilbert } 377eb59db53SDr. David Alan Gilbert 3788679638bSDr. David Alan Gilbert /* We don't support postcopy with shared RAM yet */ 379fbd162e6SYury Kotov if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) { 3808679638bSDr. David Alan Gilbert goto out; 3818679638bSDr. David Alan Gilbert } 3828679638bSDr. David Alan Gilbert 383eb59db53SDr. David Alan Gilbert /* 38458b7c17eSDr. David Alan Gilbert * userfault and mlock don't go together; we'll put it back later if 38558b7c17eSDr. David Alan Gilbert * it was enabled. 38658b7c17eSDr. David Alan Gilbert */ 38758b7c17eSDr. David Alan Gilbert if (munlockall()) { 38858b7c17eSDr. David Alan Gilbert error_report("%s: munlockall: %s", __func__, strerror(errno)); 389617a32f5SDr. David Alan Gilbert goto out; 39058b7c17eSDr. David Alan Gilbert } 39158b7c17eSDr. David Alan Gilbert 39258b7c17eSDr. David Alan Gilbert /* 393eb59db53SDr. David Alan Gilbert * We need to check that the ops we need are supported on anon memory 394eb59db53SDr. David Alan Gilbert * To do that we need to register a chunk and see the flags that 395eb59db53SDr. David Alan Gilbert * are returned. 396eb59db53SDr. David Alan Gilbert */ 397eb59db53SDr. David Alan Gilbert testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | 398eb59db53SDr. David Alan Gilbert MAP_ANONYMOUS, -1, 0); 399eb59db53SDr. David Alan Gilbert if (testarea == MAP_FAILED) { 400eb59db53SDr. David Alan Gilbert error_report("%s: Failed to map test area: %s", __func__, 401eb59db53SDr. David Alan Gilbert strerror(errno)); 402eb59db53SDr. David Alan Gilbert goto out; 403eb59db53SDr. David Alan Gilbert } 4047648297dSDavid Hildenbrand g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); 405eb59db53SDr. David Alan Gilbert 406eb59db53SDr. David Alan Gilbert reg_struct.range.start = (uintptr_t)testarea; 407eb59db53SDr. David Alan Gilbert reg_struct.range.len = pagesize; 408eb59db53SDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 409eb59db53SDr. David Alan Gilbert 410eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { 411eb59db53SDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 412eb59db53SDr. David Alan Gilbert goto out; 413eb59db53SDr. David Alan Gilbert } 414eb59db53SDr. David Alan Gilbert 415eb59db53SDr. David Alan Gilbert range_struct.start = (uintptr_t)testarea; 416eb59db53SDr. David Alan Gilbert range_struct.len = pagesize; 417eb59db53SDr. David Alan Gilbert if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { 418eb59db53SDr. David Alan Gilbert error_report("%s userfault unregister: %s", __func__, strerror(errno)); 419eb59db53SDr. David Alan Gilbert goto out; 420eb59db53SDr. David Alan Gilbert } 421eb59db53SDr. David Alan Gilbert 422eb59db53SDr. David Alan Gilbert feature_mask = (__u64)1 << _UFFDIO_WAKE | 423eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_COPY | 424eb59db53SDr. David Alan Gilbert (__u64)1 << _UFFDIO_ZEROPAGE; 425eb59db53SDr. David Alan Gilbert if ((reg_struct.ioctls & feature_mask) != feature_mask) { 426eb59db53SDr. David Alan Gilbert error_report("Missing userfault map features: %" PRIx64, 427eb59db53SDr. David Alan Gilbert (uint64_t)(~reg_struct.ioctls & feature_mask)); 428eb59db53SDr. David Alan Gilbert goto out; 429eb59db53SDr. David Alan Gilbert } 430eb59db53SDr. David Alan Gilbert 431eb59db53SDr. David Alan Gilbert /* Success! */ 432eb59db53SDr. David Alan Gilbert ret = true; 433eb59db53SDr. David Alan Gilbert out: 434eb59db53SDr. David Alan Gilbert if (testarea) { 435eb59db53SDr. David Alan Gilbert munmap(testarea, pagesize); 436eb59db53SDr. David Alan Gilbert } 437eb59db53SDr. David Alan Gilbert if (ufd != -1) { 438eb59db53SDr. David Alan Gilbert close(ufd); 439eb59db53SDr. David Alan Gilbert } 440eb59db53SDr. David Alan Gilbert return ret; 441eb59db53SDr. David Alan Gilbert } 442eb59db53SDr. David Alan Gilbert 4431caddf8aSDr. David Alan Gilbert /* 4441caddf8aSDr. David Alan Gilbert * Setup an area of RAM so that it *can* be used for postcopy later; this 4451caddf8aSDr. David Alan Gilbert * must be done right at the start prior to pre-copy. 4461caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 4471caddf8aSDr. David Alan Gilbert */ 448754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque) 4491caddf8aSDr. David Alan Gilbert { 450754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 451754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 452754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 453754cb9c0SYury Kotov ram_addr_t length = qemu_ram_get_used_length(rb); 4541caddf8aSDr. David Alan Gilbert trace_postcopy_init_range(block_name, host_addr, offset, length); 4551caddf8aSDr. David Alan Gilbert 4561caddf8aSDr. David Alan Gilbert /* 457898ba906SDavid Hildenbrand * Save the used_length before running the guest. In case we have to 458898ba906SDavid Hildenbrand * resize RAM blocks when syncing RAM block sizes from the source during 459898ba906SDavid Hildenbrand * precopy, we'll update it manually via the ram block notifier. 460898ba906SDavid Hildenbrand */ 461898ba906SDavid Hildenbrand rb->postcopy_length = length; 462898ba906SDavid Hildenbrand 463898ba906SDavid Hildenbrand /* 4641caddf8aSDr. David Alan Gilbert * We need the whole of RAM to be truly empty for postcopy, so things 4651caddf8aSDr. David Alan Gilbert * like ROMs and any data tables built during init must be zero'd 4661caddf8aSDr. David Alan Gilbert * - we're going to get the copy from the source anyway. 4671caddf8aSDr. David Alan Gilbert * (Precopy will just overwrite this data, so doesn't need the discard) 4681caddf8aSDr. David Alan Gilbert */ 469aaa2064cSJuan Quintela if (ram_discard_range(block_name, 0, length)) { 4701caddf8aSDr. David Alan Gilbert return -1; 4711caddf8aSDr. David Alan Gilbert } 4721caddf8aSDr. David Alan Gilbert 4731caddf8aSDr. David Alan Gilbert return 0; 4741caddf8aSDr. David Alan Gilbert } 4751caddf8aSDr. David Alan Gilbert 4761caddf8aSDr. David Alan Gilbert /* 4771caddf8aSDr. David Alan Gilbert * At the end of migration, undo the effects of init_range 4781caddf8aSDr. David Alan Gilbert * opaque should be the MIS. 4791caddf8aSDr. David Alan Gilbert */ 480754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque) 4811caddf8aSDr. David Alan Gilbert { 482754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 483754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 484754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 485898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 4861caddf8aSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 4871caddf8aSDr. David Alan Gilbert struct uffdio_range range_struct; 4881caddf8aSDr. David Alan Gilbert trace_postcopy_cleanup_range(block_name, host_addr, offset, length); 4891caddf8aSDr. David Alan Gilbert 4901caddf8aSDr. David Alan Gilbert /* 4911caddf8aSDr. David Alan Gilbert * We turned off hugepage for the precopy stage with postcopy enabled 4921caddf8aSDr. David Alan Gilbert * we can turn it back on now. 4931caddf8aSDr. David Alan Gilbert */ 4941d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); 4951caddf8aSDr. David Alan Gilbert 4961caddf8aSDr. David Alan Gilbert /* 4971caddf8aSDr. David Alan Gilbert * We can also turn off userfault now since we should have all the 4981caddf8aSDr. David Alan Gilbert * pages. It can be useful to leave it on to debug postcopy 4991caddf8aSDr. David Alan Gilbert * if you're not sure it's always getting every page. 5001caddf8aSDr. David Alan Gilbert */ 5011caddf8aSDr. David Alan Gilbert range_struct.start = (uintptr_t)host_addr; 5021caddf8aSDr. David Alan Gilbert range_struct.len = length; 5031caddf8aSDr. David Alan Gilbert 5041caddf8aSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { 5051caddf8aSDr. David Alan Gilbert error_report("%s: userfault unregister %s", __func__, strerror(errno)); 5061caddf8aSDr. David Alan Gilbert 5071caddf8aSDr. David Alan Gilbert return -1; 5081caddf8aSDr. David Alan Gilbert } 5091caddf8aSDr. David Alan Gilbert 5101caddf8aSDr. David Alan Gilbert return 0; 5111caddf8aSDr. David Alan Gilbert } 5121caddf8aSDr. David Alan Gilbert 5131caddf8aSDr. David Alan Gilbert /* 5141caddf8aSDr. David Alan Gilbert * Initialise postcopy-ram, setting the RAM to a state where we can go into 5151caddf8aSDr. David Alan Gilbert * postcopy later; must be called prior to any precopy. 5161caddf8aSDr. David Alan Gilbert * called from arch_init's similarly named ram_postcopy_incoming_init 5171caddf8aSDr. David Alan Gilbert */ 518c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 5191caddf8aSDr. David Alan Gilbert { 520fbd162e6SYury Kotov if (foreach_not_ignored_block(init_range, NULL)) { 5211caddf8aSDr. David Alan Gilbert return -1; 5221caddf8aSDr. David Alan Gilbert } 5231caddf8aSDr. David Alan Gilbert 5241caddf8aSDr. David Alan Gilbert return 0; 5251caddf8aSDr. David Alan Gilbert } 5261caddf8aSDr. David Alan Gilbert 527476ebf77SPeter Xu static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) 528476ebf77SPeter Xu { 529*77dadc3fSPeter Xu int i; 530*77dadc3fSPeter Xu 531*77dadc3fSPeter Xu if (mis->postcopy_tmp_pages) { 532*77dadc3fSPeter Xu for (i = 0; i < mis->postcopy_channels; i++) { 533*77dadc3fSPeter Xu if (mis->postcopy_tmp_pages[i].tmp_huge_page) { 534*77dadc3fSPeter Xu munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, 535*77dadc3fSPeter Xu mis->largest_page_size); 536*77dadc3fSPeter Xu mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; 537*77dadc3fSPeter Xu } 538*77dadc3fSPeter Xu } 539*77dadc3fSPeter Xu g_free(mis->postcopy_tmp_pages); 540*77dadc3fSPeter Xu mis->postcopy_tmp_pages = NULL; 541476ebf77SPeter Xu } 542476ebf77SPeter Xu 543476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page) { 544476ebf77SPeter Xu munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); 545476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 546476ebf77SPeter Xu } 547476ebf77SPeter Xu } 548476ebf77SPeter Xu 5491caddf8aSDr. David Alan Gilbert /* 5501caddf8aSDr. David Alan Gilbert * At the end of a migration where postcopy_ram_incoming_init was called. 5511caddf8aSDr. David Alan Gilbert */ 5521caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 5531caddf8aSDr. David Alan Gilbert { 554c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_entry(); 555c4faeed2SDr. David Alan Gilbert 556c4faeed2SDr. David Alan Gilbert if (mis->have_fault_thread) { 55746343570SDr. David Alan Gilbert Error *local_err = NULL; 55846343570SDr. David Alan Gilbert 55955d0fe82SIlya Maximets /* Let the fault thread quit */ 560d73415a3SStefan Hajnoczi qatomic_set(&mis->fault_thread_quit, 1); 56155d0fe82SIlya Maximets postcopy_fault_thread_notify(mis); 56255d0fe82SIlya Maximets trace_postcopy_ram_incoming_cleanup_join(); 56355d0fe82SIlya Maximets qemu_thread_join(&mis->fault_thread); 56455d0fe82SIlya Maximets 56546343570SDr. David Alan Gilbert if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { 56646343570SDr. David Alan Gilbert error_report_err(local_err); 56746343570SDr. David Alan Gilbert return -1; 56846343570SDr. David Alan Gilbert } 56946343570SDr. David Alan Gilbert 570fbd162e6SYury Kotov if (foreach_not_ignored_block(cleanup_range, mis)) { 5711caddf8aSDr. David Alan Gilbert return -1; 5721caddf8aSDr. David Alan Gilbert } 5739ab7ef9bSPeter Xu 574c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_closeuf(); 575c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 57664f615feSPeter Xu close(mis->userfault_event_fd); 577c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = false; 578c4faeed2SDr. David Alan Gilbert } 579c4faeed2SDr. David Alan Gilbert 58058b7c17eSDr. David Alan Gilbert if (enable_mlock) { 58158b7c17eSDr. David Alan Gilbert if (os_mlock() < 0) { 58258b7c17eSDr. David Alan Gilbert error_report("mlock: %s", strerror(errno)); 58358b7c17eSDr. David Alan Gilbert /* 58458b7c17eSDr. David Alan Gilbert * It doesn't feel right to fail at this point, we have a valid 58558b7c17eSDr. David Alan Gilbert * VM state. 58658b7c17eSDr. David Alan Gilbert */ 58758b7c17eSDr. David Alan Gilbert } 58858b7c17eSDr. David Alan Gilbert } 58958b7c17eSDr. David Alan Gilbert 590476ebf77SPeter Xu postcopy_temp_pages_cleanup(mis); 591476ebf77SPeter Xu 59265ace060SAlexey Perevalov trace_postcopy_ram_incoming_cleanup_blocktime( 59365ace060SAlexey Perevalov get_postcopy_total_blocktime()); 59465ace060SAlexey Perevalov 595c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_incoming_cleanup_exit(); 5961caddf8aSDr. David Alan Gilbert return 0; 5971caddf8aSDr. David Alan Gilbert } 5981caddf8aSDr. David Alan Gilbert 599f0a227adSDr. David Alan Gilbert /* 600f9527107SDr. David Alan Gilbert * Disable huge pages on an area 601f9527107SDr. David Alan Gilbert */ 602754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque) 603f9527107SDr. David Alan Gilbert { 604754cb9c0SYury Kotov const char *block_name = qemu_ram_get_idstr(rb); 605754cb9c0SYury Kotov void *host_addr = qemu_ram_get_host_addr(rb); 606754cb9c0SYury Kotov ram_addr_t offset = qemu_ram_get_offset(rb); 607898ba906SDavid Hildenbrand ram_addr_t length = rb->postcopy_length; 608f9527107SDr. David Alan Gilbert trace_postcopy_nhp_range(block_name, host_addr, offset, length); 609f9527107SDr. David Alan Gilbert 610f9527107SDr. David Alan Gilbert /* 611f9527107SDr. David Alan Gilbert * Before we do discards we need to ensure those discards really 612f9527107SDr. David Alan Gilbert * do delete areas of the page, even if THP thinks a hugepage would 613f9527107SDr. David Alan Gilbert * be a good idea, so force hugepages off. 614f9527107SDr. David Alan Gilbert */ 6151d741439SDr. David Alan Gilbert qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); 616f9527107SDr. David Alan Gilbert 617f9527107SDr. David Alan Gilbert return 0; 618f9527107SDr. David Alan Gilbert } 619f9527107SDr. David Alan Gilbert 620f9527107SDr. David Alan Gilbert /* 621f9527107SDr. David Alan Gilbert * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard 622f9527107SDr. David Alan Gilbert * however leaving it until after precopy means that most of the precopy 623f9527107SDr. David Alan Gilbert * data is still THPd 624f9527107SDr. David Alan Gilbert */ 625f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 626f9527107SDr. David Alan Gilbert { 627fbd162e6SYury Kotov if (foreach_not_ignored_block(nhp_range, mis)) { 628f9527107SDr. David Alan Gilbert return -1; 629f9527107SDr. David Alan Gilbert } 630f9527107SDr. David Alan Gilbert 631f9527107SDr. David Alan Gilbert postcopy_state_set(POSTCOPY_INCOMING_DISCARD); 632f9527107SDr. David Alan Gilbert 633f9527107SDr. David Alan Gilbert return 0; 634f9527107SDr. David Alan Gilbert } 635f9527107SDr. David Alan Gilbert 636f9527107SDr. David Alan Gilbert /* 637f0a227adSDr. David Alan Gilbert * Mark the given area of RAM as requiring notification to unwritten areas 638fbd162e6SYury Kotov * Used as a callback on foreach_not_ignored_block. 639f0a227adSDr. David Alan Gilbert * host_addr: Base of area to mark 640f0a227adSDr. David Alan Gilbert * offset: Offset in the whole ram arena 641f0a227adSDr. David Alan Gilbert * length: Length of the section 642f0a227adSDr. David Alan Gilbert * opaque: MigrationIncomingState pointer 643f0a227adSDr. David Alan Gilbert * Returns 0 on success 644f0a227adSDr. David Alan Gilbert */ 645754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque) 646f0a227adSDr. David Alan Gilbert { 647f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 648f0a227adSDr. David Alan Gilbert struct uffdio_register reg_struct; 649f0a227adSDr. David Alan Gilbert 650754cb9c0SYury Kotov reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); 651898ba906SDavid Hildenbrand reg_struct.range.len = rb->postcopy_length; 652f0a227adSDr. David Alan Gilbert reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 653f0a227adSDr. David Alan Gilbert 654f0a227adSDr. David Alan Gilbert /* Now tell our userfault_fd that it's responsible for this area */ 655f0a227adSDr. David Alan Gilbert if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { 656f0a227adSDr. David Alan Gilbert error_report("%s userfault register: %s", __func__, strerror(errno)); 657f0a227adSDr. David Alan Gilbert return -1; 658f0a227adSDr. David Alan Gilbert } 659665414adSDr. David Alan Gilbert if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 660665414adSDr. David Alan Gilbert error_report("%s userfault: Region doesn't support COPY", __func__); 661665414adSDr. David Alan Gilbert return -1; 662665414adSDr. David Alan Gilbert } 6632ce16640SDr. David Alan Gilbert if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { 6642ce16640SDr. David Alan Gilbert qemu_ram_set_uf_zeroable(rb); 6652ce16640SDr. David Alan Gilbert } 666f0a227adSDr. David Alan Gilbert 667f0a227adSDr. David Alan Gilbert return 0; 668f0a227adSDr. David Alan Gilbert } 669f0a227adSDr. David Alan Gilbert 6705efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 6715efc3564SDr. David Alan Gilbert uint64_t client_addr, 6725efc3564SDr. David Alan Gilbert RAMBlock *rb) 6735efc3564SDr. David Alan Gilbert { 6745efc3564SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 6755efc3564SDr. David Alan Gilbert struct uffdio_range range; 6765efc3564SDr. David Alan Gilbert int ret; 6775efc3564SDr. David Alan Gilbert trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); 6787648297dSDavid Hildenbrand range.start = ROUND_DOWN(client_addr, pagesize); 6795efc3564SDr. David Alan Gilbert range.len = pagesize; 6805efc3564SDr. David Alan Gilbert ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); 6815efc3564SDr. David Alan Gilbert if (ret) { 6825efc3564SDr. David Alan Gilbert error_report("%s: Failed to wake: %zx in %s (%s)", 6835efc3564SDr. David Alan Gilbert __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), 6845efc3564SDr. David Alan Gilbert strerror(errno)); 6855efc3564SDr. David Alan Gilbert } 6865efc3564SDr. David Alan Gilbert return ret; 6875efc3564SDr. David Alan Gilbert } 6885efc3564SDr. David Alan Gilbert 6899470c5e0SDavid Hildenbrand static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, 6909470c5e0SDavid Hildenbrand ram_addr_t start, uint64_t haddr) 6919470c5e0SDavid Hildenbrand { 6929470c5e0SDavid Hildenbrand void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 6939470c5e0SDavid Hildenbrand 6949470c5e0SDavid Hildenbrand /* 6959470c5e0SDavid Hildenbrand * Discarded pages (via RamDiscardManager) are never migrated. On unlikely 6969470c5e0SDavid Hildenbrand * access, place a zeropage, which will also set the relevant bits in the 6979470c5e0SDavid Hildenbrand * recv_bitmap accordingly, so we won't try placing a zeropage twice. 6989470c5e0SDavid Hildenbrand * 6999470c5e0SDavid Hildenbrand * Checking a single bit is sufficient to handle pagesize > TPS as either 7009470c5e0SDavid Hildenbrand * all relevant bits are set or not. 7019470c5e0SDavid Hildenbrand */ 7029470c5e0SDavid Hildenbrand assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); 7039470c5e0SDavid Hildenbrand if (ramblock_page_is_discarded(rb, start)) { 7049470c5e0SDavid Hildenbrand bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); 7059470c5e0SDavid Hildenbrand 7069470c5e0SDavid Hildenbrand return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); 7079470c5e0SDavid Hildenbrand } 7089470c5e0SDavid Hildenbrand 7099470c5e0SDavid Hildenbrand return migrate_send_rp_req_pages(mis, rb, start, haddr); 7109470c5e0SDavid Hildenbrand } 7119470c5e0SDavid Hildenbrand 712f0a227adSDr. David Alan Gilbert /* 713096bf4c8SDr. David Alan Gilbert * Callback from shared fault handlers to ask for a page, 714096bf4c8SDr. David Alan Gilbert * the page must be specified by a RAMBlock and an offset in that rb 715096bf4c8SDr. David Alan Gilbert * Note: Only for use by shared fault handlers (in fault thread) 716096bf4c8SDr. David Alan Gilbert */ 717096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 718096bf4c8SDr. David Alan Gilbert uint64_t client_addr, uint64_t rb_offset) 719096bf4c8SDr. David Alan Gilbert { 7207648297dSDavid Hildenbrand uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 721096bf4c8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 722096bf4c8SDr. David Alan Gilbert 723096bf4c8SDr. David Alan Gilbert trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), 724096bf4c8SDr. David Alan Gilbert rb_offset); 725dedfb4b2SDr. David Alan Gilbert if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { 726dedfb4b2SDr. David Alan Gilbert trace_postcopy_request_shared_page_present(pcfd->idstr, 727dedfb4b2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), rb_offset); 728dedfb4b2SDr. David Alan Gilbert return postcopy_wake_shared(pcfd, client_addr, rb); 729dedfb4b2SDr. David Alan Gilbert } 7309470c5e0SDavid Hildenbrand postcopy_request_page(mis, rb, aligned_rbo, client_addr); 731096bf4c8SDr. David Alan Gilbert return 0; 732096bf4c8SDr. David Alan Gilbert } 733096bf4c8SDr. David Alan Gilbert 734575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid) 735575b0b33SAlexey Perevalov { 736575b0b33SAlexey Perevalov CPUState *cpu_iter; 737575b0b33SAlexey Perevalov 738575b0b33SAlexey Perevalov CPU_FOREACH(cpu_iter) { 739575b0b33SAlexey Perevalov if (cpu_iter->thread_id == pid) { 740575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); 741575b0b33SAlexey Perevalov return cpu_iter->cpu_index; 742575b0b33SAlexey Perevalov } 743575b0b33SAlexey Perevalov } 744575b0b33SAlexey Perevalov trace_get_mem_fault_cpu_index(-1, pid); 745575b0b33SAlexey Perevalov return -1; 746575b0b33SAlexey Perevalov } 747575b0b33SAlexey Perevalov 748575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) 749575b0b33SAlexey Perevalov { 750575b0b33SAlexey Perevalov int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 751575b0b33SAlexey Perevalov dc->start_time; 752575b0b33SAlexey Perevalov return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; 753575b0b33SAlexey Perevalov } 754575b0b33SAlexey Perevalov 755575b0b33SAlexey Perevalov /* 756575b0b33SAlexey Perevalov * This function is being called when pagefault occurs. It 757575b0b33SAlexey Perevalov * tracks down vCPU blocking time. 758575b0b33SAlexey Perevalov * 759575b0b33SAlexey Perevalov * @addr: faulted host virtual address 760575b0b33SAlexey Perevalov * @ptid: faulted process thread id 761575b0b33SAlexey Perevalov * @rb: ramblock appropriate to addr 762575b0b33SAlexey Perevalov */ 763575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, 764575b0b33SAlexey Perevalov RAMBlock *rb) 765575b0b33SAlexey Perevalov { 766575b0b33SAlexey Perevalov int cpu, already_received; 767575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 768575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 769575b0b33SAlexey Perevalov uint32_t low_time_offset; 770575b0b33SAlexey Perevalov 771575b0b33SAlexey Perevalov if (!dc || ptid == 0) { 772575b0b33SAlexey Perevalov return; 773575b0b33SAlexey Perevalov } 774575b0b33SAlexey Perevalov cpu = get_mem_fault_cpu_index(ptid); 775575b0b33SAlexey Perevalov if (cpu < 0) { 776575b0b33SAlexey Perevalov return; 777575b0b33SAlexey Perevalov } 778575b0b33SAlexey Perevalov 779575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 780575b0b33SAlexey Perevalov if (dc->vcpu_addr[cpu] == 0) { 781d73415a3SStefan Hajnoczi qatomic_inc(&dc->smp_cpus_down); 782575b0b33SAlexey Perevalov } 783575b0b33SAlexey Perevalov 784d73415a3SStefan Hajnoczi qatomic_xchg(&dc->last_begin, low_time_offset); 785d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); 786d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], addr); 787575b0b33SAlexey Perevalov 788da1725d3SWei Yang /* 789da1725d3SWei Yang * check it here, not at the beginning of the function, 790da1725d3SWei Yang * due to, check could occur early than bitmap_set in 791da1725d3SWei Yang * qemu_ufd_copy_ioctl 792da1725d3SWei Yang */ 793575b0b33SAlexey Perevalov already_received = ramblock_recv_bitmap_test(rb, (void *)addr); 794575b0b33SAlexey Perevalov if (already_received) { 795d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[cpu], 0); 796d73415a3SStefan Hajnoczi qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); 797d73415a3SStefan Hajnoczi qatomic_dec(&dc->smp_cpus_down); 798575b0b33SAlexey Perevalov } 799575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], 800575b0b33SAlexey Perevalov cpu, already_received); 801575b0b33SAlexey Perevalov } 802575b0b33SAlexey Perevalov 803575b0b33SAlexey Perevalov /* 804575b0b33SAlexey Perevalov * This function just provide calculated blocktime per cpu and trace it. 805575b0b33SAlexey Perevalov * Total blocktime is calculated in mark_postcopy_blocktime_end. 806575b0b33SAlexey Perevalov * 807575b0b33SAlexey Perevalov * 808575b0b33SAlexey Perevalov * Assume we have 3 CPU 809575b0b33SAlexey Perevalov * 810575b0b33SAlexey Perevalov * S1 E1 S1 E1 811575b0b33SAlexey Perevalov * -----***********------------xxx***************------------------------> CPU1 812575b0b33SAlexey Perevalov * 813575b0b33SAlexey Perevalov * S2 E2 814575b0b33SAlexey Perevalov * ------------****************xxx---------------------------------------> CPU2 815575b0b33SAlexey Perevalov * 816575b0b33SAlexey Perevalov * S3 E3 817575b0b33SAlexey Perevalov * ------------------------****xxx********-------------------------------> CPU3 818575b0b33SAlexey Perevalov * 819575b0b33SAlexey Perevalov * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 820575b0b33SAlexey Perevalov * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 821575b0b33SAlexey Perevalov * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - 822575b0b33SAlexey Perevalov * it's a part of total blocktime. 823575b0b33SAlexey Perevalov * S1 - here is last_begin 824575b0b33SAlexey Perevalov * Legend of the picture is following: 825575b0b33SAlexey Perevalov * * - means blocktime per vCPU 826575b0b33SAlexey Perevalov * x - means overlapped blocktime (total blocktime) 827575b0b33SAlexey Perevalov * 828575b0b33SAlexey Perevalov * @addr: host virtual address 829575b0b33SAlexey Perevalov */ 830575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr) 831575b0b33SAlexey Perevalov { 832575b0b33SAlexey Perevalov MigrationIncomingState *mis = migration_incoming_get_current(); 833575b0b33SAlexey Perevalov PostcopyBlocktimeContext *dc = mis->blocktime_ctx; 8345cc8767dSLike Xu MachineState *ms = MACHINE(qdev_get_machine()); 8355cc8767dSLike Xu unsigned int smp_cpus = ms->smp.cpus; 836575b0b33SAlexey Perevalov int i, affected_cpu = 0; 837575b0b33SAlexey Perevalov bool vcpu_total_blocktime = false; 838575b0b33SAlexey Perevalov uint32_t read_vcpu_time, low_time_offset; 839575b0b33SAlexey Perevalov 840575b0b33SAlexey Perevalov if (!dc) { 841575b0b33SAlexey Perevalov return; 842575b0b33SAlexey Perevalov } 843575b0b33SAlexey Perevalov 844575b0b33SAlexey Perevalov low_time_offset = get_low_time_offset(dc); 845575b0b33SAlexey Perevalov /* lookup cpu, to clear it, 8463a4452d8Szhaolichang * that algorithm looks straightforward, but it's not 847575b0b33SAlexey Perevalov * optimal, more optimal algorithm is keeping tree or hash 848575b0b33SAlexey Perevalov * where key is address value is a list of */ 849575b0b33SAlexey Perevalov for (i = 0; i < smp_cpus; i++) { 850575b0b33SAlexey Perevalov uint32_t vcpu_blocktime = 0; 851575b0b33SAlexey Perevalov 852d73415a3SStefan Hajnoczi read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); 853d73415a3SStefan Hajnoczi if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || 854575b0b33SAlexey Perevalov read_vcpu_time == 0) { 855575b0b33SAlexey Perevalov continue; 856575b0b33SAlexey Perevalov } 857d73415a3SStefan Hajnoczi qatomic_xchg(&dc->vcpu_addr[i], 0); 858575b0b33SAlexey Perevalov vcpu_blocktime = low_time_offset - read_vcpu_time; 859575b0b33SAlexey Perevalov affected_cpu += 1; 860575b0b33SAlexey Perevalov /* we need to know is that mark_postcopy_end was due to 861575b0b33SAlexey Perevalov * faulted page, another possible case it's prefetched 862575b0b33SAlexey Perevalov * page and in that case we shouldn't be here */ 863575b0b33SAlexey Perevalov if (!vcpu_total_blocktime && 864d73415a3SStefan Hajnoczi qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { 865575b0b33SAlexey Perevalov vcpu_total_blocktime = true; 866575b0b33SAlexey Perevalov } 867575b0b33SAlexey Perevalov /* continue cycle, due to one page could affect several vCPUs */ 868575b0b33SAlexey Perevalov dc->vcpu_blocktime[i] += vcpu_blocktime; 869575b0b33SAlexey Perevalov } 870575b0b33SAlexey Perevalov 871d73415a3SStefan Hajnoczi qatomic_sub(&dc->smp_cpus_down, affected_cpu); 872575b0b33SAlexey Perevalov if (vcpu_total_blocktime) { 873d73415a3SStefan Hajnoczi dc->total_blocktime += low_time_offset - qatomic_fetch_add( 874575b0b33SAlexey Perevalov &dc->last_begin, 0); 875575b0b33SAlexey Perevalov } 876575b0b33SAlexey Perevalov trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, 877575b0b33SAlexey Perevalov affected_cpu); 878575b0b33SAlexey Perevalov } 879575b0b33SAlexey Perevalov 8803a7804c3SPeter Xu static bool postcopy_pause_fault_thread(MigrationIncomingState *mis) 8813a7804c3SPeter Xu { 8823a7804c3SPeter Xu trace_postcopy_pause_fault_thread(); 8833a7804c3SPeter Xu 8843a7804c3SPeter Xu qemu_sem_wait(&mis->postcopy_pause_sem_fault); 8853a7804c3SPeter Xu 8863a7804c3SPeter Xu trace_postcopy_pause_fault_thread_continued(); 8873a7804c3SPeter Xu 8883a7804c3SPeter Xu return true; 8893a7804c3SPeter Xu } 8903a7804c3SPeter Xu 891096bf4c8SDr. David Alan Gilbert /* 892f0a227adSDr. David Alan Gilbert * Handle faults detected by the USERFAULT markings 893f0a227adSDr. David Alan Gilbert */ 894f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque) 895f0a227adSDr. David Alan Gilbert { 896f0a227adSDr. David Alan Gilbert MigrationIncomingState *mis = opaque; 897c4faeed2SDr. David Alan Gilbert struct uffd_msg msg; 898c4faeed2SDr. David Alan Gilbert int ret; 89900fa4fc8SDr. David Alan Gilbert size_t index; 900c4faeed2SDr. David Alan Gilbert RAMBlock *rb = NULL; 901f0a227adSDr. David Alan Gilbert 902c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_entry(); 90374637e6fSLidong Chen rcu_register_thread(); 904096bf4c8SDr. David Alan Gilbert mis->last_rb = NULL; /* last RAMBlock we sent part of */ 905f0a227adSDr. David Alan Gilbert qemu_sem_post(&mis->fault_thread_sem); 906c4faeed2SDr. David Alan Gilbert 90700fa4fc8SDr. David Alan Gilbert struct pollfd *pfd; 90800fa4fc8SDr. David Alan Gilbert size_t pfd_len = 2 + mis->postcopy_remote_fds->len; 90900fa4fc8SDr. David Alan Gilbert 91000fa4fc8SDr. David Alan Gilbert pfd = g_new0(struct pollfd, pfd_len); 91100fa4fc8SDr. David Alan Gilbert 91200fa4fc8SDr. David Alan Gilbert pfd[0].fd = mis->userfault_fd; 91300fa4fc8SDr. David Alan Gilbert pfd[0].events = POLLIN; 91400fa4fc8SDr. David Alan Gilbert pfd[1].fd = mis->userfault_event_fd; 91500fa4fc8SDr. David Alan Gilbert pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ 91600fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); 91700fa4fc8SDr. David Alan Gilbert for (index = 0; index < mis->postcopy_remote_fds->len; index++) { 91800fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, 91900fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index); 92000fa4fc8SDr. David Alan Gilbert pfd[2 + index].fd = pcfd->fd; 92100fa4fc8SDr. David Alan Gilbert pfd[2 + index].events = POLLIN; 92200fa4fc8SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, 92300fa4fc8SDr. David Alan Gilbert pcfd->fd); 92400fa4fc8SDr. David Alan Gilbert } 92500fa4fc8SDr. David Alan Gilbert 926c4faeed2SDr. David Alan Gilbert while (true) { 927c4faeed2SDr. David Alan Gilbert ram_addr_t rb_offset; 92800fa4fc8SDr. David Alan Gilbert int poll_result; 929c4faeed2SDr. David Alan Gilbert 930c4faeed2SDr. David Alan Gilbert /* 931c4faeed2SDr. David Alan Gilbert * We're mainly waiting for the kernel to give us a faulting HVA, 932c4faeed2SDr. David Alan Gilbert * however we can be told to quit via userfault_quit_fd which is 933c4faeed2SDr. David Alan Gilbert * an eventfd 934c4faeed2SDr. David Alan Gilbert */ 935c4faeed2SDr. David Alan Gilbert 93600fa4fc8SDr. David Alan Gilbert poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); 93700fa4fc8SDr. David Alan Gilbert if (poll_result == -1) { 938c4faeed2SDr. David Alan Gilbert error_report("%s: userfault poll: %s", __func__, strerror(errno)); 939c4faeed2SDr. David Alan Gilbert break; 940f0a227adSDr. David Alan Gilbert } 941f0a227adSDr. David Alan Gilbert 9423a7804c3SPeter Xu if (!mis->to_src_file) { 9433a7804c3SPeter Xu /* 9443a7804c3SPeter Xu * Possibly someone tells us that the return path is 9453a7804c3SPeter Xu * broken already using the event. We should hold until 9463a7804c3SPeter Xu * the channel is rebuilt. 9473a7804c3SPeter Xu */ 9483a7804c3SPeter Xu if (postcopy_pause_fault_thread(mis)) { 9493a7804c3SPeter Xu /* Continue to read the userfaultfd */ 9503a7804c3SPeter Xu } else { 9513a7804c3SPeter Xu error_report("%s: paused but don't allow to continue", 9523a7804c3SPeter Xu __func__); 9533a7804c3SPeter Xu break; 9543a7804c3SPeter Xu } 9553a7804c3SPeter Xu } 9563a7804c3SPeter Xu 957c4faeed2SDr. David Alan Gilbert if (pfd[1].revents) { 95864f615feSPeter Xu uint64_t tmp64 = 0; 95964f615feSPeter Xu 96064f615feSPeter Xu /* Consume the signal */ 96164f615feSPeter Xu if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { 96264f615feSPeter Xu /* Nothing obviously nicer than posting this error. */ 96364f615feSPeter Xu error_report("%s: read() failed", __func__); 96464f615feSPeter Xu } 96564f615feSPeter Xu 966d73415a3SStefan Hajnoczi if (qatomic_read(&mis->fault_thread_quit)) { 967c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_quit(); 968c4faeed2SDr. David Alan Gilbert break; 969c4faeed2SDr. David Alan Gilbert } 97064f615feSPeter Xu } 971c4faeed2SDr. David Alan Gilbert 97200fa4fc8SDr. David Alan Gilbert if (pfd[0].revents) { 97300fa4fc8SDr. David Alan Gilbert poll_result--; 974c4faeed2SDr. David Alan Gilbert ret = read(mis->userfault_fd, &msg, sizeof(msg)); 975c4faeed2SDr. David Alan Gilbert if (ret != sizeof(msg)) { 976c4faeed2SDr. David Alan Gilbert if (errno == EAGAIN) { 977c4faeed2SDr. David Alan Gilbert /* 978c4faeed2SDr. David Alan Gilbert * if a wake up happens on the other thread just after 979c4faeed2SDr. David Alan Gilbert * the poll, there is nothing to read. 980c4faeed2SDr. David Alan Gilbert */ 981c4faeed2SDr. David Alan Gilbert continue; 982c4faeed2SDr. David Alan Gilbert } 983c4faeed2SDr. David Alan Gilbert if (ret < 0) { 98400fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 98500fa4fc8SDr. David Alan Gilbert "message: %s", 986c4faeed2SDr. David Alan Gilbert __func__, strerror(errno)); 987c4faeed2SDr. David Alan Gilbert break; 988c4faeed2SDr. David Alan Gilbert } else { 98900fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 99000fa4fc8SDr. David Alan Gilbert "expected %zd", 991c4faeed2SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 992c4faeed2SDr. David Alan Gilbert break; /* Lost alignment, don't know what we'd read next */ 993c4faeed2SDr. David Alan Gilbert } 994c4faeed2SDr. David Alan Gilbert } 995c4faeed2SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 996c4faeed2SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud from userfaultfd", 997c4faeed2SDr. David Alan Gilbert __func__, msg.event); 998c4faeed2SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 999c4faeed2SDr. David Alan Gilbert } 1000c4faeed2SDr. David Alan Gilbert 1001c4faeed2SDr. David Alan Gilbert rb = qemu_ram_block_from_host( 1002c4faeed2SDr. David Alan Gilbert (void *)(uintptr_t)msg.arg.pagefault.address, 1003f615f396SPaolo Bonzini true, &rb_offset); 1004c4faeed2SDr. David Alan Gilbert if (!rb) { 1005c4faeed2SDr. David Alan Gilbert error_report("postcopy_ram_fault_thread: Fault outside guest: %" 1006c4faeed2SDr. David Alan Gilbert PRIx64, (uint64_t)msg.arg.pagefault.address); 1007c4faeed2SDr. David Alan Gilbert break; 1008c4faeed2SDr. David Alan Gilbert } 1009c4faeed2SDr. David Alan Gilbert 10107648297dSDavid Hildenbrand rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); 1011c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, 1012c4faeed2SDr. David Alan Gilbert qemu_ram_get_idstr(rb), 1013575b0b33SAlexey Perevalov rb_offset, 1014575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid); 1015575b0b33SAlexey Perevalov mark_postcopy_blocktime_begin( 1016575b0b33SAlexey Perevalov (uintptr_t)(msg.arg.pagefault.address), 1017575b0b33SAlexey Perevalov msg.arg.pagefault.feat.ptid, rb); 1018575b0b33SAlexey Perevalov 10193a7804c3SPeter Xu retry: 1020c4faeed2SDr. David Alan Gilbert /* 1021c4faeed2SDr. David Alan Gilbert * Send the request to the source - we want to request one 1022c4faeed2SDr. David Alan Gilbert * of our host page sizes (which is >= TPS) 1023c4faeed2SDr. David Alan Gilbert */ 10249470c5e0SDavid Hildenbrand ret = postcopy_request_page(mis, rb, rb_offset, 10258f8bfffcSPeter Xu msg.arg.pagefault.address); 10263a7804c3SPeter Xu if (ret) { 10273a7804c3SPeter Xu /* May be network failure, try to wait for recovery */ 10283a7804c3SPeter Xu if (ret == -EIO && postcopy_pause_fault_thread(mis)) { 10293a7804c3SPeter Xu /* We got reconnected somehow, try to continue */ 10303a7804c3SPeter Xu goto retry; 10313a7804c3SPeter Xu } else { 10323a7804c3SPeter Xu /* This is a unavoidable fault */ 10339470c5e0SDavid Hildenbrand error_report("%s: postcopy_request_page() get %d", 10343a7804c3SPeter Xu __func__, ret); 10353a7804c3SPeter Xu break; 10363a7804c3SPeter Xu } 1037c4faeed2SDr. David Alan Gilbert } 1038c4faeed2SDr. David Alan Gilbert } 103900fa4fc8SDr. David Alan Gilbert 104000fa4fc8SDr. David Alan Gilbert /* Now handle any requests from external processes on shared memory */ 104100fa4fc8SDr. David Alan Gilbert /* TODO: May need to handle devices deregistering during postcopy */ 104200fa4fc8SDr. David Alan Gilbert for (index = 2; index < pfd_len && poll_result; index++) { 104300fa4fc8SDr. David Alan Gilbert if (pfd[index].revents) { 104400fa4fc8SDr. David Alan Gilbert struct PostCopyFD *pcfd = 104500fa4fc8SDr. David Alan Gilbert &g_array_index(mis->postcopy_remote_fds, 104600fa4fc8SDr. David Alan Gilbert struct PostCopyFD, index - 2); 104700fa4fc8SDr. David Alan Gilbert 104800fa4fc8SDr. David Alan Gilbert poll_result--; 104900fa4fc8SDr. David Alan Gilbert if (pfd[index].revents & POLLERR) { 105000fa4fc8SDr. David Alan Gilbert error_report("%s: POLLERR on poll %zd fd=%d", 105100fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->fd); 105200fa4fc8SDr. David Alan Gilbert pfd[index].events = 0; 105300fa4fc8SDr. David Alan Gilbert continue; 105400fa4fc8SDr. David Alan Gilbert } 105500fa4fc8SDr. David Alan Gilbert 105600fa4fc8SDr. David Alan Gilbert ret = read(pcfd->fd, &msg, sizeof(msg)); 105700fa4fc8SDr. David Alan Gilbert if (ret != sizeof(msg)) { 105800fa4fc8SDr. David Alan Gilbert if (errno == EAGAIN) { 105900fa4fc8SDr. David Alan Gilbert /* 106000fa4fc8SDr. David Alan Gilbert * if a wake up happens on the other thread just after 106100fa4fc8SDr. David Alan Gilbert * the poll, there is nothing to read. 106200fa4fc8SDr. David Alan Gilbert */ 106300fa4fc8SDr. David Alan Gilbert continue; 106400fa4fc8SDr. David Alan Gilbert } 106500fa4fc8SDr. David Alan Gilbert if (ret < 0) { 106600fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to read full userfault " 106700fa4fc8SDr. David Alan Gilbert "message: %s (shared) revents=%d", 106800fa4fc8SDr. David Alan Gilbert __func__, strerror(errno), 106900fa4fc8SDr. David Alan Gilbert pfd[index].revents); 107000fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 107100fa4fc8SDr. David Alan Gilbert break; 107200fa4fc8SDr. David Alan Gilbert } else { 107300fa4fc8SDr. David Alan Gilbert error_report("%s: Read %d bytes from userfaultfd " 107400fa4fc8SDr. David Alan Gilbert "expected %zd (shared)", 107500fa4fc8SDr. David Alan Gilbert __func__, ret, sizeof(msg)); 107600fa4fc8SDr. David Alan Gilbert /*TODO: Could just disable this sharer */ 107700fa4fc8SDr. David Alan Gilbert break; /*Lost alignment,don't know what we'd read next*/ 107800fa4fc8SDr. David Alan Gilbert } 107900fa4fc8SDr. David Alan Gilbert } 108000fa4fc8SDr. David Alan Gilbert if (msg.event != UFFD_EVENT_PAGEFAULT) { 108100fa4fc8SDr. David Alan Gilbert error_report("%s: Read unexpected event %ud " 108200fa4fc8SDr. David Alan Gilbert "from userfaultfd (shared)", 108300fa4fc8SDr. David Alan Gilbert __func__, msg.event); 108400fa4fc8SDr. David Alan Gilbert continue; /* It's not a page fault, shouldn't happen */ 108500fa4fc8SDr. David Alan Gilbert } 108600fa4fc8SDr. David Alan Gilbert /* Call the device handler registered with us */ 108700fa4fc8SDr. David Alan Gilbert ret = pcfd->handler(pcfd, &msg); 108800fa4fc8SDr. David Alan Gilbert if (ret) { 108900fa4fc8SDr. David Alan Gilbert error_report("%s: Failed to resolve shared fault on %zd/%s", 109000fa4fc8SDr. David Alan Gilbert __func__, index, pcfd->idstr); 109100fa4fc8SDr. David Alan Gilbert /* TODO: Fail? Disable this sharer? */ 109200fa4fc8SDr. David Alan Gilbert } 109300fa4fc8SDr. David Alan Gilbert } 109400fa4fc8SDr. David Alan Gilbert } 109500fa4fc8SDr. David Alan Gilbert } 109674637e6fSLidong Chen rcu_unregister_thread(); 1097c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_fault_thread_exit(); 1098fc6008f3SMarc-André Lureau g_free(pfd); 1099f0a227adSDr. David Alan Gilbert return NULL; 1100f0a227adSDr. David Alan Gilbert } 1101f0a227adSDr. David Alan Gilbert 1102476ebf77SPeter Xu static int postcopy_temp_pages_setup(MigrationIncomingState *mis) 1103476ebf77SPeter Xu { 1104*77dadc3fSPeter Xu PostcopyTmpPage *tmp_page; 1105*77dadc3fSPeter Xu int err, i, channels; 1106*77dadc3fSPeter Xu void *temp_page; 1107476ebf77SPeter Xu 1108*77dadc3fSPeter Xu /* TODO: will be boosted when enable postcopy preemption */ 1109*77dadc3fSPeter Xu mis->postcopy_channels = 1; 1110*77dadc3fSPeter Xu 1111*77dadc3fSPeter Xu channels = mis->postcopy_channels; 1112*77dadc3fSPeter Xu mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); 1113*77dadc3fSPeter Xu 1114*77dadc3fSPeter Xu for (i = 0; i < channels; i++) { 1115*77dadc3fSPeter Xu tmp_page = &mis->postcopy_tmp_pages[i]; 1116*77dadc3fSPeter Xu temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, 1117476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1118*77dadc3fSPeter Xu if (temp_page == MAP_FAILED) { 1119476ebf77SPeter Xu err = errno; 1120*77dadc3fSPeter Xu error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", 1121*77dadc3fSPeter Xu __func__, i, strerror(err)); 1122*77dadc3fSPeter Xu /* Clean up will be done later */ 1123476ebf77SPeter Xu return -err; 1124476ebf77SPeter Xu } 1125*77dadc3fSPeter Xu tmp_page->tmp_huge_page = temp_page; 1126*77dadc3fSPeter Xu /* Initialize default states for each tmp page */ 1127*77dadc3fSPeter Xu postcopy_temp_page_reset(tmp_page); 1128*77dadc3fSPeter Xu } 1129476ebf77SPeter Xu 1130476ebf77SPeter Xu /* 1131476ebf77SPeter Xu * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages 1132476ebf77SPeter Xu */ 1133476ebf77SPeter Xu mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, 1134476ebf77SPeter Xu PROT_READ | PROT_WRITE, 1135476ebf77SPeter Xu MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1136476ebf77SPeter Xu if (mis->postcopy_tmp_zero_page == MAP_FAILED) { 1137476ebf77SPeter Xu err = errno; 1138476ebf77SPeter Xu mis->postcopy_tmp_zero_page = NULL; 1139476ebf77SPeter Xu error_report("%s: Failed to map large zero page %s", 1140476ebf77SPeter Xu __func__, strerror(err)); 1141476ebf77SPeter Xu return -err; 1142476ebf77SPeter Xu } 1143476ebf77SPeter Xu 1144476ebf77SPeter Xu memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1145476ebf77SPeter Xu 1146476ebf77SPeter Xu return 0; 1147476ebf77SPeter Xu } 1148476ebf77SPeter Xu 11492a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1150f0a227adSDr. David Alan Gilbert { 1151c4faeed2SDr. David Alan Gilbert /* Open the fd for the kernel to give us userfaults */ 1152c4faeed2SDr. David Alan Gilbert mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1153c4faeed2SDr. David Alan Gilbert if (mis->userfault_fd == -1) { 1154c4faeed2SDr. David Alan Gilbert error_report("%s: Failed to open userfault fd: %s", __func__, 1155c4faeed2SDr. David Alan Gilbert strerror(errno)); 1156c4faeed2SDr. David Alan Gilbert return -1; 1157c4faeed2SDr. David Alan Gilbert } 1158c4faeed2SDr. David Alan Gilbert 1159c4faeed2SDr. David Alan Gilbert /* 1160c4faeed2SDr. David Alan Gilbert * Although the host check already tested the API, we need to 1161c4faeed2SDr. David Alan Gilbert * do the check again as an ABI handshake on the new fd. 1162c4faeed2SDr. David Alan Gilbert */ 116354ae0886SAlexey Perevalov if (!ufd_check_and_apply(mis->userfault_fd, mis)) { 1164c4faeed2SDr. David Alan Gilbert return -1; 1165c4faeed2SDr. David Alan Gilbert } 1166c4faeed2SDr. David Alan Gilbert 1167c4faeed2SDr. David Alan Gilbert /* Now an eventfd we use to tell the fault-thread to quit */ 116864f615feSPeter Xu mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); 116964f615feSPeter Xu if (mis->userfault_event_fd == -1) { 117064f615feSPeter Xu error_report("%s: Opening userfault_event_fd: %s", __func__, 1171c4faeed2SDr. David Alan Gilbert strerror(errno)); 1172c4faeed2SDr. David Alan Gilbert close(mis->userfault_fd); 1173c4faeed2SDr. David Alan Gilbert return -1; 1174c4faeed2SDr. David Alan Gilbert } 1175c4faeed2SDr. David Alan Gilbert 1176f0a227adSDr. David Alan Gilbert qemu_sem_init(&mis->fault_thread_sem, 0); 1177f0a227adSDr. David Alan Gilbert qemu_thread_create(&mis->fault_thread, "postcopy/fault", 1178f0a227adSDr. David Alan Gilbert postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); 1179f0a227adSDr. David Alan Gilbert qemu_sem_wait(&mis->fault_thread_sem); 1180f0a227adSDr. David Alan Gilbert qemu_sem_destroy(&mis->fault_thread_sem); 1181c4faeed2SDr. David Alan Gilbert mis->have_fault_thread = true; 1182f0a227adSDr. David Alan Gilbert 1183f0a227adSDr. David Alan Gilbert /* Mark so that we get notified of accesses to unwritten areas */ 1184fbd162e6SYury Kotov if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { 118591b02dc7SFei Li error_report("ram_block_enable_notify failed"); 1186f0a227adSDr. David Alan Gilbert return -1; 1187f0a227adSDr. David Alan Gilbert } 1188f0a227adSDr. David Alan Gilbert 1189476ebf77SPeter Xu if (postcopy_temp_pages_setup(mis)) { 1190476ebf77SPeter Xu /* Error dumped in the sub-function */ 11913414322aSWei Yang return -1; 11923414322aSWei Yang } 11933414322aSWei Yang 1194c4faeed2SDr. David Alan Gilbert trace_postcopy_ram_enable_notify(); 1195c4faeed2SDr. David Alan Gilbert 1196f0a227adSDr. David Alan Gilbert return 0; 1197f0a227adSDr. David Alan Gilbert } 1198f0a227adSDr. David Alan Gilbert 1199eef621c4SPeter Xu static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, 1200f9494614SAlexey Perevalov void *from_addr, uint64_t pagesize, RAMBlock *rb) 1201727b9d7eSAlexey Perevalov { 1202eef621c4SPeter Xu int userfault_fd = mis->userfault_fd; 1203f9494614SAlexey Perevalov int ret; 1204eef621c4SPeter Xu 1205727b9d7eSAlexey Perevalov if (from_addr) { 1206727b9d7eSAlexey Perevalov struct uffdio_copy copy_struct; 1207727b9d7eSAlexey Perevalov copy_struct.dst = (uint64_t)(uintptr_t)host_addr; 1208727b9d7eSAlexey Perevalov copy_struct.src = (uint64_t)(uintptr_t)from_addr; 1209727b9d7eSAlexey Perevalov copy_struct.len = pagesize; 1210727b9d7eSAlexey Perevalov copy_struct.mode = 0; 1211f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); 1212727b9d7eSAlexey Perevalov } else { 1213727b9d7eSAlexey Perevalov struct uffdio_zeropage zero_struct; 1214727b9d7eSAlexey Perevalov zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; 1215727b9d7eSAlexey Perevalov zero_struct.range.len = pagesize; 1216727b9d7eSAlexey Perevalov zero_struct.mode = 0; 1217f9494614SAlexey Perevalov ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); 1218727b9d7eSAlexey Perevalov } 1219f9494614SAlexey Perevalov if (!ret) { 12208f8bfffcSPeter Xu qemu_mutex_lock(&mis->page_request_mutex); 1221f9494614SAlexey Perevalov ramblock_recv_bitmap_set_range(rb, host_addr, 1222f9494614SAlexey Perevalov pagesize / qemu_target_page_size()); 12238f8bfffcSPeter Xu /* 12248f8bfffcSPeter Xu * If this page resolves a page fault for a previous recorded faulted 12258f8bfffcSPeter Xu * address, take a special note to maintain the requested page list. 12268f8bfffcSPeter Xu */ 12278f8bfffcSPeter Xu if (g_tree_lookup(mis->page_requested, host_addr)) { 12288f8bfffcSPeter Xu g_tree_remove(mis->page_requested, host_addr); 12298f8bfffcSPeter Xu mis->page_requested_count--; 12308f8bfffcSPeter Xu trace_postcopy_page_req_del(host_addr, mis->page_requested_count); 12318f8bfffcSPeter Xu } 12328f8bfffcSPeter Xu qemu_mutex_unlock(&mis->page_request_mutex); 1233575b0b33SAlexey Perevalov mark_postcopy_blocktime_end((uintptr_t)host_addr); 1234f9494614SAlexey Perevalov } 1235f9494614SAlexey Perevalov return ret; 1236727b9d7eSAlexey Perevalov } 1237727b9d7eSAlexey Perevalov 1238d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) 1239d488b349SDr. David Alan Gilbert { 1240d488b349SDr. David Alan Gilbert int i; 1241d488b349SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 1242d488b349SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 1243d488b349SDr. David Alan Gilbert 1244d488b349SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 1245d488b349SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 1246d488b349SDr. David Alan Gilbert int ret = cur->waker(cur, rb, offset); 1247d488b349SDr. David Alan Gilbert if (ret) { 1248d488b349SDr. David Alan Gilbert return ret; 1249d488b349SDr. David Alan Gilbert } 1250d488b349SDr. David Alan Gilbert } 1251d488b349SDr. David Alan Gilbert return 0; 1252d488b349SDr. David Alan Gilbert } 1253d488b349SDr. David Alan Gilbert 1254696ed9a9SDr. David Alan Gilbert /* 1255696ed9a9SDr. David Alan Gilbert * Place a host page (from) at (host) atomically 1256696ed9a9SDr. David Alan Gilbert * returns 0 on success 1257696ed9a9SDr. David Alan Gilbert */ 1258df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 12598be4620bSAlexey Perevalov RAMBlock *rb) 1260696ed9a9SDr. David Alan Gilbert { 12618be4620bSAlexey Perevalov size_t pagesize = qemu_ram_pagesize(rb); 1262696ed9a9SDr. David Alan Gilbert 1263696ed9a9SDr. David Alan Gilbert /* copy also acks to the kernel waking the stalled thread up 1264696ed9a9SDr. David Alan Gilbert * TODO: We can inhibit that ack and only do it if it was requested 1265696ed9a9SDr. David Alan Gilbert * which would be slightly cheaper, but we'd have to be careful 1266696ed9a9SDr. David Alan Gilbert * of the order of updating our page state. 1267696ed9a9SDr. David Alan Gilbert */ 1268eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { 1269696ed9a9SDr. David Alan Gilbert int e = errno; 1270df9ff5e1SDr. David Alan Gilbert error_report("%s: %s copy host: %p from: %p (size: %zd)", 1271df9ff5e1SDr. David Alan Gilbert __func__, strerror(e), host, from, pagesize); 1272696ed9a9SDr. David Alan Gilbert 1273696ed9a9SDr. David Alan Gilbert return -e; 1274696ed9a9SDr. David Alan Gilbert } 1275696ed9a9SDr. David Alan Gilbert 1276696ed9a9SDr. David Alan Gilbert trace_postcopy_place_page(host); 1277dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1278dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, host)); 1279696ed9a9SDr. David Alan Gilbert } 1280696ed9a9SDr. David Alan Gilbert 1281696ed9a9SDr. David Alan Gilbert /* 1282696ed9a9SDr. David Alan Gilbert * Place a zero page at (host) atomically 1283696ed9a9SDr. David Alan Gilbert * returns 0 on success 1284696ed9a9SDr. David Alan Gilbert */ 1285df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 12868be4620bSAlexey Perevalov RAMBlock *rb) 1287696ed9a9SDr. David Alan Gilbert { 12882ce16640SDr. David Alan Gilbert size_t pagesize = qemu_ram_pagesize(rb); 1289df9ff5e1SDr. David Alan Gilbert trace_postcopy_place_page_zero(host); 1290696ed9a9SDr. David Alan Gilbert 12912ce16640SDr. David Alan Gilbert /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE 12922ce16640SDr. David Alan Gilbert * but it's not available for everything (e.g. hugetlbpages) 12932ce16640SDr. David Alan Gilbert */ 12942ce16640SDr. David Alan Gilbert if (qemu_ram_is_uf_zeroable(rb)) { 1295eef621c4SPeter Xu if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { 1296696ed9a9SDr. David Alan Gilbert int e = errno; 1297696ed9a9SDr. David Alan Gilbert error_report("%s: %s zero host: %p", 1298696ed9a9SDr. David Alan Gilbert __func__, strerror(e), host); 1299696ed9a9SDr. David Alan Gilbert 1300696ed9a9SDr. David Alan Gilbert return -e; 1301696ed9a9SDr. David Alan Gilbert } 1302dedfb4b2SDr. David Alan Gilbert return postcopy_notify_shared_wake(rb, 1303dedfb4b2SDr. David Alan Gilbert qemu_ram_block_host_offset(rb, 1304dedfb4b2SDr. David Alan Gilbert host)); 1305df9ff5e1SDr. David Alan Gilbert } else { 13066629890dSWei Yang return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); 1307df9ff5e1SDr. David Alan Gilbert } 1308696ed9a9SDr. David Alan Gilbert } 1309696ed9a9SDr. David Alan Gilbert 1310eb59db53SDr. David Alan Gilbert #else 1311eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */ 131265ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info) 131365ace060SAlexey Perevalov { 131465ace060SAlexey Perevalov } 131565ace060SAlexey Perevalov 1316d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) 1317eb59db53SDr. David Alan Gilbert { 1318eb59db53SDr. David Alan Gilbert error_report("%s: No OS support", __func__); 1319eb59db53SDr. David Alan Gilbert return false; 1320eb59db53SDr. David Alan Gilbert } 1321eb59db53SDr. David Alan Gilbert 1322c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis) 13231caddf8aSDr. David Alan Gilbert { 13241caddf8aSDr. David Alan Gilbert error_report("postcopy_ram_incoming_init: No OS support"); 13251caddf8aSDr. David Alan Gilbert return -1; 13261caddf8aSDr. David Alan Gilbert } 13271caddf8aSDr. David Alan Gilbert 13281caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) 13291caddf8aSDr. David Alan Gilbert { 13301caddf8aSDr. David Alan Gilbert assert(0); 13311caddf8aSDr. David Alan Gilbert return -1; 13321caddf8aSDr. David Alan Gilbert } 13331caddf8aSDr. David Alan Gilbert 1334f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis) 1335f9527107SDr. David Alan Gilbert { 1336f9527107SDr. David Alan Gilbert assert(0); 1337f9527107SDr. David Alan Gilbert return -1; 1338f9527107SDr. David Alan Gilbert } 1339f9527107SDr. David Alan Gilbert 1340c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, 1341c188c539SMichael S. Tsirkin uint64_t client_addr, uint64_t rb_offset) 1342c188c539SMichael S. Tsirkin { 1343c188c539SMichael S. Tsirkin assert(0); 1344c188c539SMichael S. Tsirkin return -1; 1345c188c539SMichael S. Tsirkin } 1346c188c539SMichael S. Tsirkin 13472a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis) 1348f0a227adSDr. David Alan Gilbert { 1349f0a227adSDr. David Alan Gilbert assert(0); 1350f0a227adSDr. David Alan Gilbert return -1; 1351f0a227adSDr. David Alan Gilbert } 1352696ed9a9SDr. David Alan Gilbert 1353df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, 13548be4620bSAlexey Perevalov RAMBlock *rb) 1355696ed9a9SDr. David Alan Gilbert { 1356696ed9a9SDr. David Alan Gilbert assert(0); 1357696ed9a9SDr. David Alan Gilbert return -1; 1358696ed9a9SDr. David Alan Gilbert } 1359696ed9a9SDr. David Alan Gilbert 1360df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, 13618be4620bSAlexey Perevalov RAMBlock *rb) 1362696ed9a9SDr. David Alan Gilbert { 1363696ed9a9SDr. David Alan Gilbert assert(0); 1364696ed9a9SDr. David Alan Gilbert return -1; 1365696ed9a9SDr. David Alan Gilbert } 1366696ed9a9SDr. David Alan Gilbert 13675efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd, 13685efc3564SDr. David Alan Gilbert uint64_t client_addr, 13695efc3564SDr. David Alan Gilbert RAMBlock *rb) 13705efc3564SDr. David Alan Gilbert { 13715efc3564SDr. David Alan Gilbert assert(0); 13725efc3564SDr. David Alan Gilbert return -1; 13735efc3564SDr. David Alan Gilbert } 1374eb59db53SDr. David Alan Gilbert #endif 1375eb59db53SDr. David Alan Gilbert 1376e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */ 1377*77dadc3fSPeter Xu void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) 1378*77dadc3fSPeter Xu { 1379*77dadc3fSPeter Xu tmp_page->target_pages = 0; 1380*77dadc3fSPeter Xu tmp_page->host_addr = NULL; 1381*77dadc3fSPeter Xu /* 1382*77dadc3fSPeter Xu * This is set to true when reset, and cleared as long as we received any 1383*77dadc3fSPeter Xu * of the non-zero small page within this huge page. 1384*77dadc3fSPeter Xu */ 1385*77dadc3fSPeter Xu tmp_page->all_zero = true; 1386*77dadc3fSPeter Xu } 1387e0b266f0SDr. David Alan Gilbert 13889ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis) 13899ab7ef9bSPeter Xu { 13909ab7ef9bSPeter Xu uint64_t tmp64 = 1; 13919ab7ef9bSPeter Xu 13929ab7ef9bSPeter Xu /* 13939ab7ef9bSPeter Xu * Wakeup the fault_thread. It's an eventfd that should currently 13949ab7ef9bSPeter Xu * be at 0, we're going to increment it to 1 13959ab7ef9bSPeter Xu */ 13969ab7ef9bSPeter Xu if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { 13979ab7ef9bSPeter Xu /* Not much we can do here, but may as well report it */ 13989ab7ef9bSPeter Xu error_report("%s: incrementing failed: %s", __func__, 13999ab7ef9bSPeter Xu strerror(errno)); 14009ab7ef9bSPeter Xu } 14019ab7ef9bSPeter Xu } 14029ab7ef9bSPeter Xu 1403e0b266f0SDr. David Alan Gilbert /** 1404e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_init: Called at the start of each RAMBlock before 1405e0b266f0SDr. David Alan Gilbert * asking to discard individual ranges. 1406e0b266f0SDr. David Alan Gilbert * 1407e0b266f0SDr. David Alan Gilbert * @ms: The current migration state. 1408810cf2bbSWei Yang * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. 1409e0b266f0SDr. David Alan Gilbert * @name: RAMBlock that discards will operate on. 1410e0b266f0SDr. David Alan Gilbert */ 1411810cf2bbSWei Yang static PostcopyDiscardState pds = {0}; 1412810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name) 1413e0b266f0SDr. David Alan Gilbert { 1414810cf2bbSWei Yang pds.ramblock_name = name; 1415810cf2bbSWei Yang pds.cur_entry = 0; 1416810cf2bbSWei Yang pds.nsentwords = 0; 1417810cf2bbSWei Yang pds.nsentcmds = 0; 1418e0b266f0SDr. David Alan Gilbert } 1419e0b266f0SDr. David Alan Gilbert 1420e0b266f0SDr. David Alan Gilbert /** 1421e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_range: Called by the bitmap code for each chunk to 1422e0b266f0SDr. David Alan Gilbert * discard. May send a discard message, may just leave it queued to 1423e0b266f0SDr. David Alan Gilbert * be sent later. 1424e0b266f0SDr. David Alan Gilbert * 1425e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1426e0b266f0SDr. David Alan Gilbert * @start,@length: a range of pages in the migration bitmap in the 1427e0b266f0SDr. David Alan Gilbert * RAM block passed to postcopy_discard_send_init() (length=1 is one page) 1428e0b266f0SDr. David Alan Gilbert */ 1429810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start, 1430810cf2bbSWei Yang unsigned long length) 1431e0b266f0SDr. David Alan Gilbert { 143220afaed9SJuan Quintela size_t tp_size = qemu_target_page_size(); 1433e0b266f0SDr. David Alan Gilbert /* Convert to byte offsets within the RAM block */ 1434810cf2bbSWei Yang pds.start_list[pds.cur_entry] = start * tp_size; 1435810cf2bbSWei Yang pds.length_list[pds.cur_entry] = length * tp_size; 1436810cf2bbSWei Yang trace_postcopy_discard_send_range(pds.ramblock_name, start, length); 1437810cf2bbSWei Yang pds.cur_entry++; 1438810cf2bbSWei Yang pds.nsentwords++; 1439e0b266f0SDr. David Alan Gilbert 1440810cf2bbSWei Yang if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { 1441e0b266f0SDr. David Alan Gilbert /* Full set, ship it! */ 144289a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1443810cf2bbSWei Yang pds.ramblock_name, 1444810cf2bbSWei Yang pds.cur_entry, 1445810cf2bbSWei Yang pds.start_list, 1446810cf2bbSWei Yang pds.length_list); 1447810cf2bbSWei Yang pds.nsentcmds++; 1448810cf2bbSWei Yang pds.cur_entry = 0; 1449e0b266f0SDr. David Alan Gilbert } 1450e0b266f0SDr. David Alan Gilbert } 1451e0b266f0SDr. David Alan Gilbert 1452e0b266f0SDr. David Alan Gilbert /** 1453e0b266f0SDr. David Alan Gilbert * postcopy_discard_send_finish: Called at the end of each RAMBlock by the 1454e0b266f0SDr. David Alan Gilbert * bitmap code. Sends any outstanding discard messages, frees the PDS 1455e0b266f0SDr. David Alan Gilbert * 1456e0b266f0SDr. David Alan Gilbert * @ms: Current migration state. 1457e0b266f0SDr. David Alan Gilbert */ 1458810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms) 1459e0b266f0SDr. David Alan Gilbert { 1460e0b266f0SDr. David Alan Gilbert /* Anything unsent? */ 1461810cf2bbSWei Yang if (pds.cur_entry) { 146289a02a9fSzhanghailiang qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, 1463810cf2bbSWei Yang pds.ramblock_name, 1464810cf2bbSWei Yang pds.cur_entry, 1465810cf2bbSWei Yang pds.start_list, 1466810cf2bbSWei Yang pds.length_list); 1467810cf2bbSWei Yang pds.nsentcmds++; 1468e0b266f0SDr. David Alan Gilbert } 1469e0b266f0SDr. David Alan Gilbert 1470810cf2bbSWei Yang trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, 1471810cf2bbSWei Yang pds.nsentcmds); 1472e0b266f0SDr. David Alan Gilbert } 1473bac3b212SJuan Quintela 1474bac3b212SJuan Quintela /* 1475bac3b212SJuan Quintela * Current state of incoming postcopy; note this is not part of 1476bac3b212SJuan Quintela * MigrationIncomingState since it's state is used during cleanup 1477bac3b212SJuan Quintela * at the end as MIS is being freed. 1478bac3b212SJuan Quintela */ 1479bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state; 1480bac3b212SJuan Quintela 1481bac3b212SJuan Quintela PostcopyState postcopy_state_get(void) 1482bac3b212SJuan Quintela { 1483d73415a3SStefan Hajnoczi return qatomic_mb_read(&incoming_postcopy_state); 1484bac3b212SJuan Quintela } 1485bac3b212SJuan Quintela 1486bac3b212SJuan Quintela /* Set the state and return the old state */ 1487bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state) 1488bac3b212SJuan Quintela { 1489d73415a3SStefan Hajnoczi return qatomic_xchg(&incoming_postcopy_state, new_state); 1490bac3b212SJuan Quintela } 149100fa4fc8SDr. David Alan Gilbert 149200fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy 149300fa4fc8SDr. David Alan Gilbert * called on the destination. 149400fa4fc8SDr. David Alan Gilbert */ 149500fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) 149600fa4fc8SDr. David Alan Gilbert { 149700fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 149800fa4fc8SDr. David Alan Gilbert 149900fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, 150000fa4fc8SDr. David Alan Gilbert *pcfd); 150100fa4fc8SDr. David Alan Gilbert } 150200fa4fc8SDr. David Alan Gilbert 150300fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy 150400fa4fc8SDr. David Alan Gilbert */ 150500fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) 150600fa4fc8SDr. David Alan Gilbert { 150700fa4fc8SDr. David Alan Gilbert guint i; 150800fa4fc8SDr. David Alan Gilbert MigrationIncomingState *mis = migration_incoming_get_current(); 150900fa4fc8SDr. David Alan Gilbert GArray *pcrfds = mis->postcopy_remote_fds; 151000fa4fc8SDr. David Alan Gilbert 151156559980SJuan Quintela if (!pcrfds) { 151256559980SJuan Quintela /* migration has already finished and freed the array */ 151356559980SJuan Quintela return; 151456559980SJuan Quintela } 151500fa4fc8SDr. David Alan Gilbert for (i = 0; i < pcrfds->len; i++) { 151600fa4fc8SDr. David Alan Gilbert struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); 151700fa4fc8SDr. David Alan Gilbert if (cur->fd == pcfd->fd) { 151800fa4fc8SDr. David Alan Gilbert mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); 151900fa4fc8SDr. David Alan Gilbert return; 152000fa4fc8SDr. David Alan Gilbert } 152100fa4fc8SDr. David Alan Gilbert } 152200fa4fc8SDr. David Alan Gilbert } 1523