xref: /qemu/migration/postcopy-ram.c (revision 617a32f5295ee4efcc17abadcecc3cf482c98e80)
1eb59db53SDr. David Alan Gilbert /*
2eb59db53SDr. David Alan Gilbert  * Postcopy migration for RAM
3eb59db53SDr. David Alan Gilbert  *
4eb59db53SDr. David Alan Gilbert  * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5eb59db53SDr. David Alan Gilbert  *
6eb59db53SDr. David Alan Gilbert  * Authors:
7eb59db53SDr. David Alan Gilbert  *  Dave Gilbert  <dgilbert@redhat.com>
8eb59db53SDr. David Alan Gilbert  *
9eb59db53SDr. David Alan Gilbert  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10eb59db53SDr. David Alan Gilbert  * See the COPYING file in the top-level directory.
11eb59db53SDr. David Alan Gilbert  *
12eb59db53SDr. David Alan Gilbert  */
13eb59db53SDr. David Alan Gilbert 
14eb59db53SDr. David Alan Gilbert /*
15eb59db53SDr. David Alan Gilbert  * Postcopy is a migration technique where the execution flips from the
16eb59db53SDr. David Alan Gilbert  * source to the destination before all the data has been copied.
17eb59db53SDr. David Alan Gilbert  */
18eb59db53SDr. David Alan Gilbert 
191393a485SPeter Maydell #include "qemu/osdep.h"
2051180423SJuan Quintela #include "exec/target_page.h"
216666c96aSJuan Quintela #include "migration.h"
2208a0aee1SJuan Quintela #include "qemu-file.h"
2320a519a0SJuan Quintela #include "savevm.h"
24be07b0acSJuan Quintela #include "postcopy-ram.h"
257b1e1a22SJuan Quintela #include "ram.h"
261693c64cSDr. David Alan Gilbert #include "qapi/error.h"
271693c64cSDr. David Alan Gilbert #include "qemu/notify.h"
28d4842052SMarkus Armbruster #include "qemu/rcu.h"
29eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h"
30371ff5a3SDr. David Alan Gilbert #include "sysemu/balloon.h"
31eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h"
32eb59db53SDr. David Alan Gilbert #include "trace.h"
335cc8767dSLike Xu #include "hw/boards.h"
34eb59db53SDr. David Alan Gilbert 
35e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command,
36e0b266f0SDr. David Alan Gilbert  * keeps them around ~200 bytes
37e0b266f0SDr. David Alan Gilbert  */
38e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12
39e0b266f0SDr. David Alan Gilbert 
40e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState {
41e0b266f0SDr. David Alan Gilbert     const char *ramblock_name;
42e0b266f0SDr. David Alan Gilbert     uint16_t cur_entry;
43e0b266f0SDr. David Alan Gilbert     /*
44e0b266f0SDr. David Alan Gilbert      * Start and length of a discard range (bytes)
45e0b266f0SDr. David Alan Gilbert      */
46e0b266f0SDr. David Alan Gilbert     uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
47e0b266f0SDr. David Alan Gilbert     uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
48e0b266f0SDr. David Alan Gilbert     unsigned int nsentwords;
49e0b266f0SDr. David Alan Gilbert     unsigned int nsentcmds;
50e0b266f0SDr. David Alan Gilbert };
51e0b266f0SDr. David Alan Gilbert 
521693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list;
531693c64cSDr. David Alan Gilbert 
541693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void)
551693c64cSDr. David Alan Gilbert {
561693c64cSDr. David Alan Gilbert     notifier_with_return_list_init(&postcopy_notifier_list);
571693c64cSDr. David Alan Gilbert }
581693c64cSDr. David Alan Gilbert 
591693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn)
601693c64cSDr. David Alan Gilbert {
611693c64cSDr. David Alan Gilbert     notifier_with_return_list_add(&postcopy_notifier_list, nn);
621693c64cSDr. David Alan Gilbert }
631693c64cSDr. David Alan Gilbert 
641693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n)
651693c64cSDr. David Alan Gilbert {
661693c64cSDr. David Alan Gilbert     notifier_with_return_remove(n);
671693c64cSDr. David Alan Gilbert }
681693c64cSDr. David Alan Gilbert 
691693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
701693c64cSDr. David Alan Gilbert {
711693c64cSDr. David Alan Gilbert     struct PostcopyNotifyData pnd;
721693c64cSDr. David Alan Gilbert     pnd.reason = reason;
731693c64cSDr. David Alan Gilbert     pnd.errp = errp;
741693c64cSDr. David Alan Gilbert 
751693c64cSDr. David Alan Gilbert     return notifier_with_return_list_notify(&postcopy_notifier_list,
761693c64cSDr. David Alan Gilbert                                             &pnd);
771693c64cSDr. David Alan Gilbert }
781693c64cSDr. David Alan Gilbert 
79eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied
80eb59db53SDr. David Alan Gilbert  * across, and efficiently map new pages in, the techniques for doing this
81eb59db53SDr. David Alan Gilbert  * are target OS specific.
82eb59db53SDr. David Alan Gilbert  */
83eb59db53SDr. David Alan Gilbert #if defined(__linux__)
84eb59db53SDr. David Alan Gilbert 
85c4faeed2SDr. David Alan Gilbert #include <poll.h>
86eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h>
87eb59db53SDr. David Alan Gilbert #include <sys/syscall.h>
88eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */
89eb59db53SDr. David Alan Gilbert #endif
90eb59db53SDr. David Alan Gilbert 
91d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
92d8b9d771SMatthew Fortune #include <sys/eventfd.h>
93eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h>
94eb59db53SDr. David Alan Gilbert 
952a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext {
962a4c42f1SAlexey Perevalov     /* time when page fault initiated per vCPU */
972a4c42f1SAlexey Perevalov     uint32_t *page_fault_vcpu_time;
982a4c42f1SAlexey Perevalov     /* page address per vCPU */
992a4c42f1SAlexey Perevalov     uintptr_t *vcpu_addr;
1002a4c42f1SAlexey Perevalov     uint32_t total_blocktime;
1012a4c42f1SAlexey Perevalov     /* blocktime per vCPU */
1022a4c42f1SAlexey Perevalov     uint32_t *vcpu_blocktime;
1032a4c42f1SAlexey Perevalov     /* point in time when last page fault was initiated */
1042a4c42f1SAlexey Perevalov     uint32_t last_begin;
1052a4c42f1SAlexey Perevalov     /* number of vCPU are suspended */
1062a4c42f1SAlexey Perevalov     int smp_cpus_down;
1072a4c42f1SAlexey Perevalov     uint64_t start_time;
1082a4c42f1SAlexey Perevalov 
1092a4c42f1SAlexey Perevalov     /*
1102a4c42f1SAlexey Perevalov      * Handler for exit event, necessary for
1112a4c42f1SAlexey Perevalov      * releasing whole blocktime_ctx
1122a4c42f1SAlexey Perevalov      */
1132a4c42f1SAlexey Perevalov     Notifier exit_notifier;
1142a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext;
1152a4c42f1SAlexey Perevalov 
1162a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
1172a4c42f1SAlexey Perevalov {
1182a4c42f1SAlexey Perevalov     g_free(ctx->page_fault_vcpu_time);
1192a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_addr);
1202a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_blocktime);
1212a4c42f1SAlexey Perevalov     g_free(ctx);
1222a4c42f1SAlexey Perevalov }
1232a4c42f1SAlexey Perevalov 
1242a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data)
1252a4c42f1SAlexey Perevalov {
1262a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
1272a4c42f1SAlexey Perevalov                                                  exit_notifier);
1282a4c42f1SAlexey Perevalov     destroy_blocktime_context(ctx);
1292a4c42f1SAlexey Perevalov }
1302a4c42f1SAlexey Perevalov 
1312a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void)
1322a4c42f1SAlexey Perevalov {
1335cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
1345cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
1352a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
1362a4c42f1SAlexey Perevalov     ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
1372a4c42f1SAlexey Perevalov     ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
1382a4c42f1SAlexey Perevalov     ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
1392a4c42f1SAlexey Perevalov 
1402a4c42f1SAlexey Perevalov     ctx->exit_notifier.notify = migration_exit_cb;
1412a4c42f1SAlexey Perevalov     ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1422a4c42f1SAlexey Perevalov     qemu_add_exit_notifier(&ctx->exit_notifier);
1432a4c42f1SAlexey Perevalov     return ctx;
1442a4c42f1SAlexey Perevalov }
145ca6011c2SAlexey Perevalov 
14665ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
14765ace060SAlexey Perevalov {
1485cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
14965ace060SAlexey Perevalov     uint32List *list = NULL, *entry = NULL;
15065ace060SAlexey Perevalov     int i;
15165ace060SAlexey Perevalov 
1525cc8767dSLike Xu     for (i = ms->smp.cpus - 1; i >= 0; i--) {
15365ace060SAlexey Perevalov         entry = g_new0(uint32List, 1);
15465ace060SAlexey Perevalov         entry->value = ctx->vcpu_blocktime[i];
15565ace060SAlexey Perevalov         entry->next = list;
15665ace060SAlexey Perevalov         list = entry;
15765ace060SAlexey Perevalov     }
15865ace060SAlexey Perevalov 
15965ace060SAlexey Perevalov     return list;
16065ace060SAlexey Perevalov }
16165ace060SAlexey Perevalov 
16265ace060SAlexey Perevalov /*
16365ace060SAlexey Perevalov  * This function just populates MigrationInfo from postcopy's
16465ace060SAlexey Perevalov  * blocktime context. It will not populate MigrationInfo,
16565ace060SAlexey Perevalov  * unless postcopy-blocktime capability was set.
16665ace060SAlexey Perevalov  *
16765ace060SAlexey Perevalov  * @info: pointer to MigrationInfo to populate
16865ace060SAlexey Perevalov  */
16965ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
17065ace060SAlexey Perevalov {
17165ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
17265ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
17365ace060SAlexey Perevalov 
17465ace060SAlexey Perevalov     if (!bc) {
17565ace060SAlexey Perevalov         return;
17665ace060SAlexey Perevalov     }
17765ace060SAlexey Perevalov 
17865ace060SAlexey Perevalov     info->has_postcopy_blocktime = true;
17965ace060SAlexey Perevalov     info->postcopy_blocktime = bc->total_blocktime;
18065ace060SAlexey Perevalov     info->has_postcopy_vcpu_blocktime = true;
18165ace060SAlexey Perevalov     info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
18265ace060SAlexey Perevalov }
18365ace060SAlexey Perevalov 
18465ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void)
18565ace060SAlexey Perevalov {
18665ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
18765ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
18865ace060SAlexey Perevalov 
18965ace060SAlexey Perevalov     if (!bc) {
19065ace060SAlexey Perevalov         return 0;
19165ace060SAlexey Perevalov     }
19265ace060SAlexey Perevalov 
19365ace060SAlexey Perevalov     return bc->total_blocktime;
19465ace060SAlexey Perevalov }
19565ace060SAlexey Perevalov 
19654ae0886SAlexey Perevalov /**
19754ae0886SAlexey Perevalov  * receive_ufd_features: check userfault fd features, to request only supported
19854ae0886SAlexey Perevalov  * features in the future.
19954ae0886SAlexey Perevalov  *
20054ae0886SAlexey Perevalov  * Returns: true on success
20154ae0886SAlexey Perevalov  *
20254ae0886SAlexey Perevalov  * __NR_userfaultfd - should be checked before
20354ae0886SAlexey Perevalov  *  @features: out parameter will contain uffdio_api.features provided by kernel
20454ae0886SAlexey Perevalov  *              in case of success
20554ae0886SAlexey Perevalov  */
20654ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features)
20754ae0886SAlexey Perevalov {
20854ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
20954ae0886SAlexey Perevalov     int ufd;
21054ae0886SAlexey Perevalov     bool ret = true;
21154ae0886SAlexey Perevalov 
21254ae0886SAlexey Perevalov     /* if we are here __NR_userfaultfd should exists */
21354ae0886SAlexey Perevalov     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
21454ae0886SAlexey Perevalov     if (ufd == -1) {
21554ae0886SAlexey Perevalov         error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
21654ae0886SAlexey Perevalov                      strerror(errno));
21754ae0886SAlexey Perevalov         return false;
21854ae0886SAlexey Perevalov     }
21954ae0886SAlexey Perevalov 
22054ae0886SAlexey Perevalov     /* ask features */
221eb59db53SDr. David Alan Gilbert     api_struct.api = UFFD_API;
222eb59db53SDr. David Alan Gilbert     api_struct.features = 0;
223eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
2245553499fSAlexey Perevalov         error_report("%s: UFFDIO_API failed: %s", __func__,
225eb59db53SDr. David Alan Gilbert                      strerror(errno));
22654ae0886SAlexey Perevalov         ret = false;
22754ae0886SAlexey Perevalov         goto release_ufd;
22854ae0886SAlexey Perevalov     }
22954ae0886SAlexey Perevalov 
23054ae0886SAlexey Perevalov     *features = api_struct.features;
23154ae0886SAlexey Perevalov 
23254ae0886SAlexey Perevalov release_ufd:
23354ae0886SAlexey Perevalov     close(ufd);
23454ae0886SAlexey Perevalov     return ret;
23554ae0886SAlexey Perevalov }
23654ae0886SAlexey Perevalov 
23754ae0886SAlexey Perevalov /**
23854ae0886SAlexey Perevalov  * request_ufd_features: this function should be called only once on a newly
23954ae0886SAlexey Perevalov  * opened ufd, subsequent calls will lead to error.
24054ae0886SAlexey Perevalov  *
24154ae0886SAlexey Perevalov  * Returns: true on succes
24254ae0886SAlexey Perevalov  *
24354ae0886SAlexey Perevalov  * @ufd: fd obtained from userfaultfd syscall
24454ae0886SAlexey Perevalov  * @features: bit mask see UFFD_API_FEATURES
24554ae0886SAlexey Perevalov  */
24654ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features)
24754ae0886SAlexey Perevalov {
24854ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
24954ae0886SAlexey Perevalov     uint64_t ioctl_mask;
25054ae0886SAlexey Perevalov 
25154ae0886SAlexey Perevalov     api_struct.api = UFFD_API;
25254ae0886SAlexey Perevalov     api_struct.features = features;
25354ae0886SAlexey Perevalov     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
25454ae0886SAlexey Perevalov         error_report("%s failed: UFFDIO_API failed: %s", __func__,
25554ae0886SAlexey Perevalov                      strerror(errno));
256eb59db53SDr. David Alan Gilbert         return false;
257eb59db53SDr. David Alan Gilbert     }
258eb59db53SDr. David Alan Gilbert 
259eb59db53SDr. David Alan Gilbert     ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
260eb59db53SDr. David Alan Gilbert                  (__u64)1 << _UFFDIO_UNREGISTER;
261eb59db53SDr. David Alan Gilbert     if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
262eb59db53SDr. David Alan Gilbert         error_report("Missing userfault features: %" PRIx64,
263eb59db53SDr. David Alan Gilbert                      (uint64_t)(~api_struct.ioctls & ioctl_mask));
264eb59db53SDr. David Alan Gilbert         return false;
265eb59db53SDr. David Alan Gilbert     }
266eb59db53SDr. David Alan Gilbert 
26754ae0886SAlexey Perevalov     return true;
26854ae0886SAlexey Perevalov }
26954ae0886SAlexey Perevalov 
27054ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
27154ae0886SAlexey Perevalov {
27254ae0886SAlexey Perevalov     uint64_t asked_features = 0;
27354ae0886SAlexey Perevalov     static uint64_t supported_features;
27454ae0886SAlexey Perevalov 
27554ae0886SAlexey Perevalov     /*
27654ae0886SAlexey Perevalov      * it's not possible to
27754ae0886SAlexey Perevalov      * request UFFD_API twice per one fd
27854ae0886SAlexey Perevalov      * userfault fd features is persistent
27954ae0886SAlexey Perevalov      */
28054ae0886SAlexey Perevalov     if (!supported_features) {
28154ae0886SAlexey Perevalov         if (!receive_ufd_features(&supported_features)) {
28254ae0886SAlexey Perevalov             error_report("%s failed", __func__);
28354ae0886SAlexey Perevalov             return false;
28454ae0886SAlexey Perevalov         }
28554ae0886SAlexey Perevalov     }
28654ae0886SAlexey Perevalov 
2872a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID
2882a4c42f1SAlexey Perevalov     if (migrate_postcopy_blocktime() && mis &&
2892a4c42f1SAlexey Perevalov         UFFD_FEATURE_THREAD_ID & supported_features) {
2902a4c42f1SAlexey Perevalov         /* kernel supports that feature */
2912a4c42f1SAlexey Perevalov         /* don't create blocktime_context if it exists */
2922a4c42f1SAlexey Perevalov         if (!mis->blocktime_ctx) {
2932a4c42f1SAlexey Perevalov             mis->blocktime_ctx = blocktime_context_new();
2942a4c42f1SAlexey Perevalov         }
2952a4c42f1SAlexey Perevalov 
2962a4c42f1SAlexey Perevalov         asked_features |= UFFD_FEATURE_THREAD_ID;
2972a4c42f1SAlexey Perevalov     }
2982a4c42f1SAlexey Perevalov #endif
2992a4c42f1SAlexey Perevalov 
30054ae0886SAlexey Perevalov     /*
30154ae0886SAlexey Perevalov      * request features, even if asked_features is 0, due to
30254ae0886SAlexey Perevalov      * kernel expects UFFD_API before UFFDIO_REGISTER, per
30354ae0886SAlexey Perevalov      * userfault file descriptor
30454ae0886SAlexey Perevalov      */
30554ae0886SAlexey Perevalov     if (!request_ufd_features(ufd, asked_features)) {
30654ae0886SAlexey Perevalov         error_report("%s failed: features %" PRIu64, __func__,
30754ae0886SAlexey Perevalov                      asked_features);
30854ae0886SAlexey Perevalov         return false;
30954ae0886SAlexey Perevalov     }
31054ae0886SAlexey Perevalov 
311038adc2fSWei Yang     if (qemu_real_host_page_size != ram_pagesize_summary()) {
3127e8cafb7SDr. David Alan Gilbert         bool have_hp = false;
3137e8cafb7SDr. David Alan Gilbert         /* We've got a huge page */
3147e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
31554ae0886SAlexey Perevalov         have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
3167e8cafb7SDr. David Alan Gilbert #endif
3177e8cafb7SDr. David Alan Gilbert         if (!have_hp) {
3187e8cafb7SDr. David Alan Gilbert             error_report("Userfault on this host does not support huge pages");
3197e8cafb7SDr. David Alan Gilbert             return false;
3207e8cafb7SDr. David Alan Gilbert         }
3217e8cafb7SDr. David Alan Gilbert     }
322eb59db53SDr. David Alan Gilbert     return true;
323eb59db53SDr. David Alan Gilbert }
324eb59db53SDr. David Alan Gilbert 
3258679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator.
3268679638bSDr. David Alan Gilbert  */
327754cb9c0SYury Kotov static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
3288679638bSDr. David Alan Gilbert {
329754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
330754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
3315d214a92SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
3325d214a92SDr. David Alan Gilbert 
3335d214a92SDr. David Alan Gilbert     if (length % pagesize) {
3345d214a92SDr. David Alan Gilbert         error_report("Postcopy requires RAM blocks to be a page size multiple,"
3355d214a92SDr. David Alan Gilbert                      " block %s is 0x" RAM_ADDR_FMT " bytes with a "
3365d214a92SDr. David Alan Gilbert                      "page size of 0x%zx", block_name, length, pagesize);
3375d214a92SDr. David Alan Gilbert         return 1;
3385d214a92SDr. David Alan Gilbert     }
3398679638bSDr. David Alan Gilbert     return 0;
3408679638bSDr. David Alan Gilbert }
3418679638bSDr. David Alan Gilbert 
34258b7c17eSDr. David Alan Gilbert /*
34358b7c17eSDr. David Alan Gilbert  * Note: This has the side effect of munlock'ing all of RAM, that's
34458b7c17eSDr. David Alan Gilbert  * normally fine since if the postcopy succeeds it gets turned back on at the
34558b7c17eSDr. David Alan Gilbert  * end.
34658b7c17eSDr. David Alan Gilbert  */
347d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
348eb59db53SDr. David Alan Gilbert {
349038adc2fSWei Yang     long pagesize = qemu_real_host_page_size;
350eb59db53SDr. David Alan Gilbert     int ufd = -1;
351eb59db53SDr. David Alan Gilbert     bool ret = false; /* Error unless we change it */
352eb59db53SDr. David Alan Gilbert     void *testarea = NULL;
353eb59db53SDr. David Alan Gilbert     struct uffdio_register reg_struct;
354eb59db53SDr. David Alan Gilbert     struct uffdio_range range_struct;
355eb59db53SDr. David Alan Gilbert     uint64_t feature_mask;
3561693c64cSDr. David Alan Gilbert     Error *local_err = NULL;
357eb59db53SDr. David Alan Gilbert 
35820afaed9SJuan Quintela     if (qemu_target_page_size() > pagesize) {
359eb59db53SDr. David Alan Gilbert         error_report("Target page size bigger than host page size");
360eb59db53SDr. David Alan Gilbert         goto out;
361eb59db53SDr. David Alan Gilbert     }
362eb59db53SDr. David Alan Gilbert 
363eb59db53SDr. David Alan Gilbert     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
364eb59db53SDr. David Alan Gilbert     if (ufd == -1) {
365eb59db53SDr. David Alan Gilbert         error_report("%s: userfaultfd not available: %s", __func__,
366eb59db53SDr. David Alan Gilbert                      strerror(errno));
367eb59db53SDr. David Alan Gilbert         goto out;
368eb59db53SDr. David Alan Gilbert     }
369eb59db53SDr. David Alan Gilbert 
3701693c64cSDr. David Alan Gilbert     /* Give devices a chance to object */
3711693c64cSDr. David Alan Gilbert     if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
3721693c64cSDr. David Alan Gilbert         error_report_err(local_err);
3731693c64cSDr. David Alan Gilbert         goto out;
3741693c64cSDr. David Alan Gilbert     }
3751693c64cSDr. David Alan Gilbert 
376eb59db53SDr. David Alan Gilbert     /* Version and features check */
37754ae0886SAlexey Perevalov     if (!ufd_check_and_apply(ufd, mis)) {
378eb59db53SDr. David Alan Gilbert         goto out;
379eb59db53SDr. David Alan Gilbert     }
380eb59db53SDr. David Alan Gilbert 
3818679638bSDr. David Alan Gilbert     /* We don't support postcopy with shared RAM yet */
382fbd162e6SYury Kotov     if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
3838679638bSDr. David Alan Gilbert         goto out;
3848679638bSDr. David Alan Gilbert     }
3858679638bSDr. David Alan Gilbert 
386eb59db53SDr. David Alan Gilbert     /*
38758b7c17eSDr. David Alan Gilbert      * userfault and mlock don't go together; we'll put it back later if
38858b7c17eSDr. David Alan Gilbert      * it was enabled.
38958b7c17eSDr. David Alan Gilbert      */
39058b7c17eSDr. David Alan Gilbert     if (munlockall()) {
39158b7c17eSDr. David Alan Gilbert         error_report("%s: munlockall: %s", __func__,  strerror(errno));
392*617a32f5SDr. David Alan Gilbert         goto out;
39358b7c17eSDr. David Alan Gilbert     }
39458b7c17eSDr. David Alan Gilbert 
39558b7c17eSDr. David Alan Gilbert     /*
396eb59db53SDr. David Alan Gilbert      *  We need to check that the ops we need are supported on anon memory
397eb59db53SDr. David Alan Gilbert      *  To do that we need to register a chunk and see the flags that
398eb59db53SDr. David Alan Gilbert      *  are returned.
399eb59db53SDr. David Alan Gilbert      */
400eb59db53SDr. David Alan Gilbert     testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
401eb59db53SDr. David Alan Gilbert                                     MAP_ANONYMOUS, -1, 0);
402eb59db53SDr. David Alan Gilbert     if (testarea == MAP_FAILED) {
403eb59db53SDr. David Alan Gilbert         error_report("%s: Failed to map test area: %s", __func__,
404eb59db53SDr. David Alan Gilbert                      strerror(errno));
405eb59db53SDr. David Alan Gilbert         goto out;
406eb59db53SDr. David Alan Gilbert     }
407eb59db53SDr. David Alan Gilbert     g_assert(((size_t)testarea & (pagesize-1)) == 0);
408eb59db53SDr. David Alan Gilbert 
409eb59db53SDr. David Alan Gilbert     reg_struct.range.start = (uintptr_t)testarea;
410eb59db53SDr. David Alan Gilbert     reg_struct.range.len = pagesize;
411eb59db53SDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
412eb59db53SDr. David Alan Gilbert 
413eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
414eb59db53SDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
415eb59db53SDr. David Alan Gilbert         goto out;
416eb59db53SDr. David Alan Gilbert     }
417eb59db53SDr. David Alan Gilbert 
418eb59db53SDr. David Alan Gilbert     range_struct.start = (uintptr_t)testarea;
419eb59db53SDr. David Alan Gilbert     range_struct.len = pagesize;
420eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
421eb59db53SDr. David Alan Gilbert         error_report("%s userfault unregister: %s", __func__, strerror(errno));
422eb59db53SDr. David Alan Gilbert         goto out;
423eb59db53SDr. David Alan Gilbert     }
424eb59db53SDr. David Alan Gilbert 
425eb59db53SDr. David Alan Gilbert     feature_mask = (__u64)1 << _UFFDIO_WAKE |
426eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_COPY |
427eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_ZEROPAGE;
428eb59db53SDr. David Alan Gilbert     if ((reg_struct.ioctls & feature_mask) != feature_mask) {
429eb59db53SDr. David Alan Gilbert         error_report("Missing userfault map features: %" PRIx64,
430eb59db53SDr. David Alan Gilbert                      (uint64_t)(~reg_struct.ioctls & feature_mask));
431eb59db53SDr. David Alan Gilbert         goto out;
432eb59db53SDr. David Alan Gilbert     }
433eb59db53SDr. David Alan Gilbert 
434eb59db53SDr. David Alan Gilbert     /* Success! */
435eb59db53SDr. David Alan Gilbert     ret = true;
436eb59db53SDr. David Alan Gilbert out:
437eb59db53SDr. David Alan Gilbert     if (testarea) {
438eb59db53SDr. David Alan Gilbert         munmap(testarea, pagesize);
439eb59db53SDr. David Alan Gilbert     }
440eb59db53SDr. David Alan Gilbert     if (ufd != -1) {
441eb59db53SDr. David Alan Gilbert         close(ufd);
442eb59db53SDr. David Alan Gilbert     }
443eb59db53SDr. David Alan Gilbert     return ret;
444eb59db53SDr. David Alan Gilbert }
445eb59db53SDr. David Alan Gilbert 
4461caddf8aSDr. David Alan Gilbert /*
4471caddf8aSDr. David Alan Gilbert  * Setup an area of RAM so that it *can* be used for postcopy later; this
4481caddf8aSDr. David Alan Gilbert  * must be done right at the start prior to pre-copy.
4491caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4501caddf8aSDr. David Alan Gilbert  */
451754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque)
4521caddf8aSDr. David Alan Gilbert {
453754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
454754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
455754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
456754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4571caddf8aSDr. David Alan Gilbert     trace_postcopy_init_range(block_name, host_addr, offset, length);
4581caddf8aSDr. David Alan Gilbert 
4591caddf8aSDr. David Alan Gilbert     /*
4601caddf8aSDr. David Alan Gilbert      * We need the whole of RAM to be truly empty for postcopy, so things
4611caddf8aSDr. David Alan Gilbert      * like ROMs and any data tables built during init must be zero'd
4621caddf8aSDr. David Alan Gilbert      * - we're going to get the copy from the source anyway.
4631caddf8aSDr. David Alan Gilbert      * (Precopy will just overwrite this data, so doesn't need the discard)
4641caddf8aSDr. David Alan Gilbert      */
465aaa2064cSJuan Quintela     if (ram_discard_range(block_name, 0, length)) {
4661caddf8aSDr. David Alan Gilbert         return -1;
4671caddf8aSDr. David Alan Gilbert     }
4681caddf8aSDr. David Alan Gilbert 
4691caddf8aSDr. David Alan Gilbert     return 0;
4701caddf8aSDr. David Alan Gilbert }
4711caddf8aSDr. David Alan Gilbert 
4721caddf8aSDr. David Alan Gilbert /*
4731caddf8aSDr. David Alan Gilbert  * At the end of migration, undo the effects of init_range
4741caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4751caddf8aSDr. David Alan Gilbert  */
476754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque)
4771caddf8aSDr. David Alan Gilbert {
478754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
479754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
480754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
481754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4821caddf8aSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
4831caddf8aSDr. David Alan Gilbert     struct uffdio_range range_struct;
4841caddf8aSDr. David Alan Gilbert     trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
4851caddf8aSDr. David Alan Gilbert 
4861caddf8aSDr. David Alan Gilbert     /*
4871caddf8aSDr. David Alan Gilbert      * We turned off hugepage for the precopy stage with postcopy enabled
4881caddf8aSDr. David Alan Gilbert      * we can turn it back on now.
4891caddf8aSDr. David Alan Gilbert      */
4901d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
4911caddf8aSDr. David Alan Gilbert 
4921caddf8aSDr. David Alan Gilbert     /*
4931caddf8aSDr. David Alan Gilbert      * We can also turn off userfault now since we should have all the
4941caddf8aSDr. David Alan Gilbert      * pages.   It can be useful to leave it on to debug postcopy
4951caddf8aSDr. David Alan Gilbert      * if you're not sure it's always getting every page.
4961caddf8aSDr. David Alan Gilbert      */
4971caddf8aSDr. David Alan Gilbert     range_struct.start = (uintptr_t)host_addr;
4981caddf8aSDr. David Alan Gilbert     range_struct.len = length;
4991caddf8aSDr. David Alan Gilbert 
5001caddf8aSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
5011caddf8aSDr. David Alan Gilbert         error_report("%s: userfault unregister %s", __func__, strerror(errno));
5021caddf8aSDr. David Alan Gilbert 
5031caddf8aSDr. David Alan Gilbert         return -1;
5041caddf8aSDr. David Alan Gilbert     }
5051caddf8aSDr. David Alan Gilbert 
5061caddf8aSDr. David Alan Gilbert     return 0;
5071caddf8aSDr. David Alan Gilbert }
5081caddf8aSDr. David Alan Gilbert 
5091caddf8aSDr. David Alan Gilbert /*
5101caddf8aSDr. David Alan Gilbert  * Initialise postcopy-ram, setting the RAM to a state where we can go into
5111caddf8aSDr. David Alan Gilbert  * postcopy later; must be called prior to any precopy.
5121caddf8aSDr. David Alan Gilbert  * called from arch_init's similarly named ram_postcopy_incoming_init
5131caddf8aSDr. David Alan Gilbert  */
514c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
5151caddf8aSDr. David Alan Gilbert {
516fbd162e6SYury Kotov     if (foreach_not_ignored_block(init_range, NULL)) {
5171caddf8aSDr. David Alan Gilbert         return -1;
5181caddf8aSDr. David Alan Gilbert     }
5191caddf8aSDr. David Alan Gilbert 
5201caddf8aSDr. David Alan Gilbert     return 0;
5211caddf8aSDr. David Alan Gilbert }
5221caddf8aSDr. David Alan Gilbert 
5231caddf8aSDr. David Alan Gilbert /*
524154304cdSAlex Williamson  * Manage a single vote to the QEMU balloon inhibitor for all postcopy usage,
525154304cdSAlex Williamson  * last caller wins.
526154304cdSAlex Williamson  */
527154304cdSAlex Williamson static void postcopy_balloon_inhibit(bool state)
528154304cdSAlex Williamson {
529154304cdSAlex Williamson     static bool cur_state = false;
530154304cdSAlex Williamson 
531154304cdSAlex Williamson     if (state != cur_state) {
532154304cdSAlex Williamson         qemu_balloon_inhibit(state);
533154304cdSAlex Williamson         cur_state = state;
534154304cdSAlex Williamson     }
535154304cdSAlex Williamson }
536154304cdSAlex Williamson 
537154304cdSAlex Williamson /*
5381caddf8aSDr. David Alan Gilbert  * At the end of a migration where postcopy_ram_incoming_init was called.
5391caddf8aSDr. David Alan Gilbert  */
5401caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
5411caddf8aSDr. David Alan Gilbert {
542c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_entry();
543c4faeed2SDr. David Alan Gilbert 
544c4faeed2SDr. David Alan Gilbert     if (mis->have_fault_thread) {
54546343570SDr. David Alan Gilbert         Error *local_err = NULL;
54646343570SDr. David Alan Gilbert 
54755d0fe82SIlya Maximets         /* Let the fault thread quit */
54855d0fe82SIlya Maximets         atomic_set(&mis->fault_thread_quit, 1);
54955d0fe82SIlya Maximets         postcopy_fault_thread_notify(mis);
55055d0fe82SIlya Maximets         trace_postcopy_ram_incoming_cleanup_join();
55155d0fe82SIlya Maximets         qemu_thread_join(&mis->fault_thread);
55255d0fe82SIlya Maximets 
55346343570SDr. David Alan Gilbert         if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
55446343570SDr. David Alan Gilbert             error_report_err(local_err);
55546343570SDr. David Alan Gilbert             return -1;
55646343570SDr. David Alan Gilbert         }
55746343570SDr. David Alan Gilbert 
558fbd162e6SYury Kotov         if (foreach_not_ignored_block(cleanup_range, mis)) {
5591caddf8aSDr. David Alan Gilbert             return -1;
5601caddf8aSDr. David Alan Gilbert         }
5619ab7ef9bSPeter Xu 
562c4faeed2SDr. David Alan Gilbert         trace_postcopy_ram_incoming_cleanup_closeuf();
563c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
56464f615feSPeter Xu         close(mis->userfault_event_fd);
565c4faeed2SDr. David Alan Gilbert         mis->have_fault_thread = false;
566c4faeed2SDr. David Alan Gilbert     }
567c4faeed2SDr. David Alan Gilbert 
568154304cdSAlex Williamson     postcopy_balloon_inhibit(false);
569371ff5a3SDr. David Alan Gilbert 
57058b7c17eSDr. David Alan Gilbert     if (enable_mlock) {
57158b7c17eSDr. David Alan Gilbert         if (os_mlock() < 0) {
57258b7c17eSDr. David Alan Gilbert             error_report("mlock: %s", strerror(errno));
57358b7c17eSDr. David Alan Gilbert             /*
57458b7c17eSDr. David Alan Gilbert              * It doesn't feel right to fail at this point, we have a valid
57558b7c17eSDr. David Alan Gilbert              * VM state.
57658b7c17eSDr. David Alan Gilbert              */
57758b7c17eSDr. David Alan Gilbert         }
57858b7c17eSDr. David Alan Gilbert     }
57958b7c17eSDr. David Alan Gilbert 
580696ed9a9SDr. David Alan Gilbert     if (mis->postcopy_tmp_page) {
581df9ff5e1SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_page, mis->largest_page_size);
582696ed9a9SDr. David Alan Gilbert         mis->postcopy_tmp_page = NULL;
583696ed9a9SDr. David Alan Gilbert     }
58441d84210SDr. David Alan Gilbert     if (mis->postcopy_tmp_zero_page) {
58541d84210SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
58641d84210SDr. David Alan Gilbert         mis->postcopy_tmp_zero_page = NULL;
58741d84210SDr. David Alan Gilbert     }
58865ace060SAlexey Perevalov     trace_postcopy_ram_incoming_cleanup_blocktime(
58965ace060SAlexey Perevalov             get_postcopy_total_blocktime());
59065ace060SAlexey Perevalov 
591c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_exit();
5921caddf8aSDr. David Alan Gilbert     return 0;
5931caddf8aSDr. David Alan Gilbert }
5941caddf8aSDr. David Alan Gilbert 
595f0a227adSDr. David Alan Gilbert /*
596f9527107SDr. David Alan Gilbert  * Disable huge pages on an area
597f9527107SDr. David Alan Gilbert  */
598754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque)
599f9527107SDr. David Alan Gilbert {
600754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
601754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
602754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
603754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
604f9527107SDr. David Alan Gilbert     trace_postcopy_nhp_range(block_name, host_addr, offset, length);
605f9527107SDr. David Alan Gilbert 
606f9527107SDr. David Alan Gilbert     /*
607f9527107SDr. David Alan Gilbert      * Before we do discards we need to ensure those discards really
608f9527107SDr. David Alan Gilbert      * do delete areas of the page, even if THP thinks a hugepage would
609f9527107SDr. David Alan Gilbert      * be a good idea, so force hugepages off.
610f9527107SDr. David Alan Gilbert      */
6111d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
612f9527107SDr. David Alan Gilbert 
613f9527107SDr. David Alan Gilbert     return 0;
614f9527107SDr. David Alan Gilbert }
615f9527107SDr. David Alan Gilbert 
616f9527107SDr. David Alan Gilbert /*
617f9527107SDr. David Alan Gilbert  * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
618f9527107SDr. David Alan Gilbert  * however leaving it until after precopy means that most of the precopy
619f9527107SDr. David Alan Gilbert  * data is still THPd
620f9527107SDr. David Alan Gilbert  */
621f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
622f9527107SDr. David Alan Gilbert {
623fbd162e6SYury Kotov     if (foreach_not_ignored_block(nhp_range, mis)) {
624f9527107SDr. David Alan Gilbert         return -1;
625f9527107SDr. David Alan Gilbert     }
626f9527107SDr. David Alan Gilbert 
627f9527107SDr. David Alan Gilbert     postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
628f9527107SDr. David Alan Gilbert 
629f9527107SDr. David Alan Gilbert     return 0;
630f9527107SDr. David Alan Gilbert }
631f9527107SDr. David Alan Gilbert 
632f9527107SDr. David Alan Gilbert /*
633f0a227adSDr. David Alan Gilbert  * Mark the given area of RAM as requiring notification to unwritten areas
634fbd162e6SYury Kotov  * Used as a  callback on foreach_not_ignored_block.
635f0a227adSDr. David Alan Gilbert  *   host_addr: Base of area to mark
636f0a227adSDr. David Alan Gilbert  *   offset: Offset in the whole ram arena
637f0a227adSDr. David Alan Gilbert  *   length: Length of the section
638f0a227adSDr. David Alan Gilbert  *   opaque: MigrationIncomingState pointer
639f0a227adSDr. David Alan Gilbert  * Returns 0 on success
640f0a227adSDr. David Alan Gilbert  */
641754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
642f0a227adSDr. David Alan Gilbert {
643f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
644f0a227adSDr. David Alan Gilbert     struct uffdio_register reg_struct;
645f0a227adSDr. David Alan Gilbert 
646754cb9c0SYury Kotov     reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
647754cb9c0SYury Kotov     reg_struct.range.len = qemu_ram_get_used_length(rb);
648f0a227adSDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
649f0a227adSDr. David Alan Gilbert 
650f0a227adSDr. David Alan Gilbert     /* Now tell our userfault_fd that it's responsible for this area */
651f0a227adSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
652f0a227adSDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
653f0a227adSDr. David Alan Gilbert         return -1;
654f0a227adSDr. David Alan Gilbert     }
655665414adSDr. David Alan Gilbert     if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
656665414adSDr. David Alan Gilbert         error_report("%s userfault: Region doesn't support COPY", __func__);
657665414adSDr. David Alan Gilbert         return -1;
658665414adSDr. David Alan Gilbert     }
6592ce16640SDr. David Alan Gilbert     if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
6602ce16640SDr. David Alan Gilbert         qemu_ram_set_uf_zeroable(rb);
6612ce16640SDr. David Alan Gilbert     }
662f0a227adSDr. David Alan Gilbert 
663f0a227adSDr. David Alan Gilbert     return 0;
664f0a227adSDr. David Alan Gilbert }
665f0a227adSDr. David Alan Gilbert 
6665efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
6675efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
6685efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
6695efc3564SDr. David Alan Gilbert {
6705efc3564SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
6715efc3564SDr. David Alan Gilbert     struct uffdio_range range;
6725efc3564SDr. David Alan Gilbert     int ret;
6735efc3564SDr. David Alan Gilbert     trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
6745efc3564SDr. David Alan Gilbert     range.start = client_addr & ~(pagesize - 1);
6755efc3564SDr. David Alan Gilbert     range.len = pagesize;
6765efc3564SDr. David Alan Gilbert     ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
6775efc3564SDr. David Alan Gilbert     if (ret) {
6785efc3564SDr. David Alan Gilbert         error_report("%s: Failed to wake: %zx in %s (%s)",
6795efc3564SDr. David Alan Gilbert                      __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
6805efc3564SDr. David Alan Gilbert                      strerror(errno));
6815efc3564SDr. David Alan Gilbert     }
6825efc3564SDr. David Alan Gilbert     return ret;
6835efc3564SDr. David Alan Gilbert }
6845efc3564SDr. David Alan Gilbert 
685f0a227adSDr. David Alan Gilbert /*
686096bf4c8SDr. David Alan Gilbert  * Callback from shared fault handlers to ask for a page,
687096bf4c8SDr. David Alan Gilbert  * the page must be specified by a RAMBlock and an offset in that rb
688096bf4c8SDr. David Alan Gilbert  * Note: Only for use by shared fault handlers (in fault thread)
689096bf4c8SDr. David Alan Gilbert  */
690096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
691096bf4c8SDr. David Alan Gilbert                                  uint64_t client_addr, uint64_t rb_offset)
692096bf4c8SDr. David Alan Gilbert {
693096bf4c8SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
694096bf4c8SDr. David Alan Gilbert     uint64_t aligned_rbo = rb_offset & ~(pagesize - 1);
695096bf4c8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
696096bf4c8SDr. David Alan Gilbert 
697096bf4c8SDr. David Alan Gilbert     trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
698096bf4c8SDr. David Alan Gilbert                                        rb_offset);
699dedfb4b2SDr. David Alan Gilbert     if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
700dedfb4b2SDr. David Alan Gilbert         trace_postcopy_request_shared_page_present(pcfd->idstr,
701dedfb4b2SDr. David Alan Gilbert                                         qemu_ram_get_idstr(rb), rb_offset);
702dedfb4b2SDr. David Alan Gilbert         return postcopy_wake_shared(pcfd, client_addr, rb);
703dedfb4b2SDr. David Alan Gilbert     }
704096bf4c8SDr. David Alan Gilbert     if (rb != mis->last_rb) {
705096bf4c8SDr. David Alan Gilbert         mis->last_rb = rb;
706096bf4c8SDr. David Alan Gilbert         migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
707096bf4c8SDr. David Alan Gilbert                                   aligned_rbo, pagesize);
708096bf4c8SDr. David Alan Gilbert     } else {
709096bf4c8SDr. David Alan Gilbert         /* Save some space */
710096bf4c8SDr. David Alan Gilbert         migrate_send_rp_req_pages(mis, NULL, aligned_rbo, pagesize);
711096bf4c8SDr. David Alan Gilbert     }
712096bf4c8SDr. David Alan Gilbert     return 0;
713096bf4c8SDr. David Alan Gilbert }
714096bf4c8SDr. David Alan Gilbert 
715575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid)
716575b0b33SAlexey Perevalov {
717575b0b33SAlexey Perevalov     CPUState *cpu_iter;
718575b0b33SAlexey Perevalov 
719575b0b33SAlexey Perevalov     CPU_FOREACH(cpu_iter) {
720575b0b33SAlexey Perevalov         if (cpu_iter->thread_id == pid) {
721575b0b33SAlexey Perevalov             trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
722575b0b33SAlexey Perevalov             return cpu_iter->cpu_index;
723575b0b33SAlexey Perevalov         }
724575b0b33SAlexey Perevalov     }
725575b0b33SAlexey Perevalov     trace_get_mem_fault_cpu_index(-1, pid);
726575b0b33SAlexey Perevalov     return -1;
727575b0b33SAlexey Perevalov }
728575b0b33SAlexey Perevalov 
729575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
730575b0b33SAlexey Perevalov {
731575b0b33SAlexey Perevalov     int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
732575b0b33SAlexey Perevalov                                     dc->start_time;
733575b0b33SAlexey Perevalov     return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
734575b0b33SAlexey Perevalov }
735575b0b33SAlexey Perevalov 
736575b0b33SAlexey Perevalov /*
737575b0b33SAlexey Perevalov  * This function is being called when pagefault occurs. It
738575b0b33SAlexey Perevalov  * tracks down vCPU blocking time.
739575b0b33SAlexey Perevalov  *
740575b0b33SAlexey Perevalov  * @addr: faulted host virtual address
741575b0b33SAlexey Perevalov  * @ptid: faulted process thread id
742575b0b33SAlexey Perevalov  * @rb: ramblock appropriate to addr
743575b0b33SAlexey Perevalov  */
744575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
745575b0b33SAlexey Perevalov                                           RAMBlock *rb)
746575b0b33SAlexey Perevalov {
747575b0b33SAlexey Perevalov     int cpu, already_received;
748575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
749575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
750575b0b33SAlexey Perevalov     uint32_t low_time_offset;
751575b0b33SAlexey Perevalov 
752575b0b33SAlexey Perevalov     if (!dc || ptid == 0) {
753575b0b33SAlexey Perevalov         return;
754575b0b33SAlexey Perevalov     }
755575b0b33SAlexey Perevalov     cpu = get_mem_fault_cpu_index(ptid);
756575b0b33SAlexey Perevalov     if (cpu < 0) {
757575b0b33SAlexey Perevalov         return;
758575b0b33SAlexey Perevalov     }
759575b0b33SAlexey Perevalov 
760575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
761575b0b33SAlexey Perevalov     if (dc->vcpu_addr[cpu] == 0) {
762575b0b33SAlexey Perevalov         atomic_inc(&dc->smp_cpus_down);
763575b0b33SAlexey Perevalov     }
764575b0b33SAlexey Perevalov 
765575b0b33SAlexey Perevalov     atomic_xchg(&dc->last_begin, low_time_offset);
766575b0b33SAlexey Perevalov     atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
767575b0b33SAlexey Perevalov     atomic_xchg(&dc->vcpu_addr[cpu], addr);
768575b0b33SAlexey Perevalov 
769da1725d3SWei Yang     /*
770da1725d3SWei Yang      * check it here, not at the beginning of the function,
771da1725d3SWei Yang      * due to, check could occur early than bitmap_set in
772da1725d3SWei Yang      * qemu_ufd_copy_ioctl
773da1725d3SWei Yang      */
774575b0b33SAlexey Perevalov     already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
775575b0b33SAlexey Perevalov     if (already_received) {
776575b0b33SAlexey Perevalov         atomic_xchg(&dc->vcpu_addr[cpu], 0);
777575b0b33SAlexey Perevalov         atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
778575b0b33SAlexey Perevalov         atomic_dec(&dc->smp_cpus_down);
779575b0b33SAlexey Perevalov     }
780575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
781575b0b33SAlexey Perevalov                                         cpu, already_received);
782575b0b33SAlexey Perevalov }
783575b0b33SAlexey Perevalov 
784575b0b33SAlexey Perevalov /*
785575b0b33SAlexey Perevalov  *  This function just provide calculated blocktime per cpu and trace it.
786575b0b33SAlexey Perevalov  *  Total blocktime is calculated in mark_postcopy_blocktime_end.
787575b0b33SAlexey Perevalov  *
788575b0b33SAlexey Perevalov  *
789575b0b33SAlexey Perevalov  * Assume we have 3 CPU
790575b0b33SAlexey Perevalov  *
791575b0b33SAlexey Perevalov  *      S1        E1           S1               E1
792575b0b33SAlexey Perevalov  * -----***********------------xxx***************------------------------> CPU1
793575b0b33SAlexey Perevalov  *
794575b0b33SAlexey Perevalov  *             S2                E2
795575b0b33SAlexey Perevalov  * ------------****************xxx---------------------------------------> CPU2
796575b0b33SAlexey Perevalov  *
797575b0b33SAlexey Perevalov  *                         S3            E3
798575b0b33SAlexey Perevalov  * ------------------------****xxx********-------------------------------> CPU3
799575b0b33SAlexey Perevalov  *
800575b0b33SAlexey Perevalov  * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
801575b0b33SAlexey Perevalov  * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
802575b0b33SAlexey Perevalov  * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
803575b0b33SAlexey Perevalov  *            it's a part of total blocktime.
804575b0b33SAlexey Perevalov  * S1 - here is last_begin
805575b0b33SAlexey Perevalov  * Legend of the picture is following:
806575b0b33SAlexey Perevalov  *              * - means blocktime per vCPU
807575b0b33SAlexey Perevalov  *              x - means overlapped blocktime (total blocktime)
808575b0b33SAlexey Perevalov  *
809575b0b33SAlexey Perevalov  * @addr: host virtual address
810575b0b33SAlexey Perevalov  */
811575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr)
812575b0b33SAlexey Perevalov {
813575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
814575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
8155cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
8165cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
817575b0b33SAlexey Perevalov     int i, affected_cpu = 0;
818575b0b33SAlexey Perevalov     bool vcpu_total_blocktime = false;
819575b0b33SAlexey Perevalov     uint32_t read_vcpu_time, low_time_offset;
820575b0b33SAlexey Perevalov 
821575b0b33SAlexey Perevalov     if (!dc) {
822575b0b33SAlexey Perevalov         return;
823575b0b33SAlexey Perevalov     }
824575b0b33SAlexey Perevalov 
825575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
826575b0b33SAlexey Perevalov     /* lookup cpu, to clear it,
827575b0b33SAlexey Perevalov      * that algorithm looks straighforward, but it's not
828575b0b33SAlexey Perevalov      * optimal, more optimal algorithm is keeping tree or hash
829575b0b33SAlexey Perevalov      * where key is address value is a list of  */
830575b0b33SAlexey Perevalov     for (i = 0; i < smp_cpus; i++) {
831575b0b33SAlexey Perevalov         uint32_t vcpu_blocktime = 0;
832575b0b33SAlexey Perevalov 
833575b0b33SAlexey Perevalov         read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
834575b0b33SAlexey Perevalov         if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
835575b0b33SAlexey Perevalov             read_vcpu_time == 0) {
836575b0b33SAlexey Perevalov             continue;
837575b0b33SAlexey Perevalov         }
838575b0b33SAlexey Perevalov         atomic_xchg(&dc->vcpu_addr[i], 0);
839575b0b33SAlexey Perevalov         vcpu_blocktime = low_time_offset - read_vcpu_time;
840575b0b33SAlexey Perevalov         affected_cpu += 1;
841575b0b33SAlexey Perevalov         /* we need to know is that mark_postcopy_end was due to
842575b0b33SAlexey Perevalov          * faulted page, another possible case it's prefetched
843575b0b33SAlexey Perevalov          * page and in that case we shouldn't be here */
844575b0b33SAlexey Perevalov         if (!vcpu_total_blocktime &&
845575b0b33SAlexey Perevalov             atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
846575b0b33SAlexey Perevalov             vcpu_total_blocktime = true;
847575b0b33SAlexey Perevalov         }
848575b0b33SAlexey Perevalov         /* continue cycle, due to one page could affect several vCPUs */
849575b0b33SAlexey Perevalov         dc->vcpu_blocktime[i] += vcpu_blocktime;
850575b0b33SAlexey Perevalov     }
851575b0b33SAlexey Perevalov 
852575b0b33SAlexey Perevalov     atomic_sub(&dc->smp_cpus_down, affected_cpu);
853575b0b33SAlexey Perevalov     if (vcpu_total_blocktime) {
854575b0b33SAlexey Perevalov         dc->total_blocktime += low_time_offset - atomic_fetch_add(
855575b0b33SAlexey Perevalov                 &dc->last_begin, 0);
856575b0b33SAlexey Perevalov     }
857575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
858575b0b33SAlexey Perevalov                                       affected_cpu);
859575b0b33SAlexey Perevalov }
860575b0b33SAlexey Perevalov 
8613a7804c3SPeter Xu static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
8623a7804c3SPeter Xu {
8633a7804c3SPeter Xu     trace_postcopy_pause_fault_thread();
8643a7804c3SPeter Xu 
8653a7804c3SPeter Xu     qemu_sem_wait(&mis->postcopy_pause_sem_fault);
8663a7804c3SPeter Xu 
8673a7804c3SPeter Xu     trace_postcopy_pause_fault_thread_continued();
8683a7804c3SPeter Xu 
8693a7804c3SPeter Xu     return true;
8703a7804c3SPeter Xu }
8713a7804c3SPeter Xu 
872096bf4c8SDr. David Alan Gilbert /*
873f0a227adSDr. David Alan Gilbert  * Handle faults detected by the USERFAULT markings
874f0a227adSDr. David Alan Gilbert  */
875f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque)
876f0a227adSDr. David Alan Gilbert {
877f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
878c4faeed2SDr. David Alan Gilbert     struct uffd_msg msg;
879c4faeed2SDr. David Alan Gilbert     int ret;
88000fa4fc8SDr. David Alan Gilbert     size_t index;
881c4faeed2SDr. David Alan Gilbert     RAMBlock *rb = NULL;
882f0a227adSDr. David Alan Gilbert 
883c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_entry();
88474637e6fSLidong Chen     rcu_register_thread();
885096bf4c8SDr. David Alan Gilbert     mis->last_rb = NULL; /* last RAMBlock we sent part of */
886f0a227adSDr. David Alan Gilbert     qemu_sem_post(&mis->fault_thread_sem);
887c4faeed2SDr. David Alan Gilbert 
88800fa4fc8SDr. David Alan Gilbert     struct pollfd *pfd;
88900fa4fc8SDr. David Alan Gilbert     size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
89000fa4fc8SDr. David Alan Gilbert 
89100fa4fc8SDr. David Alan Gilbert     pfd = g_new0(struct pollfd, pfd_len);
89200fa4fc8SDr. David Alan Gilbert 
89300fa4fc8SDr. David Alan Gilbert     pfd[0].fd = mis->userfault_fd;
89400fa4fc8SDr. David Alan Gilbert     pfd[0].events = POLLIN;
89500fa4fc8SDr. David Alan Gilbert     pfd[1].fd = mis->userfault_event_fd;
89600fa4fc8SDr. David Alan Gilbert     pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
89700fa4fc8SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
89800fa4fc8SDr. David Alan Gilbert     for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
89900fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
90000fa4fc8SDr. David Alan Gilbert                                                  struct PostCopyFD, index);
90100fa4fc8SDr. David Alan Gilbert         pfd[2 + index].fd = pcfd->fd;
90200fa4fc8SDr. David Alan Gilbert         pfd[2 + index].events = POLLIN;
90300fa4fc8SDr. David Alan Gilbert         trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
90400fa4fc8SDr. David Alan Gilbert                                                   pcfd->fd);
90500fa4fc8SDr. David Alan Gilbert     }
90600fa4fc8SDr. David Alan Gilbert 
907c4faeed2SDr. David Alan Gilbert     while (true) {
908c4faeed2SDr. David Alan Gilbert         ram_addr_t rb_offset;
90900fa4fc8SDr. David Alan Gilbert         int poll_result;
910c4faeed2SDr. David Alan Gilbert 
911c4faeed2SDr. David Alan Gilbert         /*
912c4faeed2SDr. David Alan Gilbert          * We're mainly waiting for the kernel to give us a faulting HVA,
913c4faeed2SDr. David Alan Gilbert          * however we can be told to quit via userfault_quit_fd which is
914c4faeed2SDr. David Alan Gilbert          * an eventfd
915c4faeed2SDr. David Alan Gilbert          */
916c4faeed2SDr. David Alan Gilbert 
91700fa4fc8SDr. David Alan Gilbert         poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
91800fa4fc8SDr. David Alan Gilbert         if (poll_result == -1) {
919c4faeed2SDr. David Alan Gilbert             error_report("%s: userfault poll: %s", __func__, strerror(errno));
920c4faeed2SDr. David Alan Gilbert             break;
921f0a227adSDr. David Alan Gilbert         }
922f0a227adSDr. David Alan Gilbert 
9233a7804c3SPeter Xu         if (!mis->to_src_file) {
9243a7804c3SPeter Xu             /*
9253a7804c3SPeter Xu              * Possibly someone tells us that the return path is
9263a7804c3SPeter Xu              * broken already using the event. We should hold until
9273a7804c3SPeter Xu              * the channel is rebuilt.
9283a7804c3SPeter Xu              */
9293a7804c3SPeter Xu             if (postcopy_pause_fault_thread(mis)) {
9303a7804c3SPeter Xu                 mis->last_rb = NULL;
9313a7804c3SPeter Xu                 /* Continue to read the userfaultfd */
9323a7804c3SPeter Xu             } else {
9333a7804c3SPeter Xu                 error_report("%s: paused but don't allow to continue",
9343a7804c3SPeter Xu                              __func__);
9353a7804c3SPeter Xu                 break;
9363a7804c3SPeter Xu             }
9373a7804c3SPeter Xu         }
9383a7804c3SPeter Xu 
939c4faeed2SDr. David Alan Gilbert         if (pfd[1].revents) {
94064f615feSPeter Xu             uint64_t tmp64 = 0;
94164f615feSPeter Xu 
94264f615feSPeter Xu             /* Consume the signal */
94364f615feSPeter Xu             if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
94464f615feSPeter Xu                 /* Nothing obviously nicer than posting this error. */
94564f615feSPeter Xu                 error_report("%s: read() failed", __func__);
94664f615feSPeter Xu             }
94764f615feSPeter Xu 
94864f615feSPeter Xu             if (atomic_read(&mis->fault_thread_quit)) {
949c4faeed2SDr. David Alan Gilbert                 trace_postcopy_ram_fault_thread_quit();
950c4faeed2SDr. David Alan Gilbert                 break;
951c4faeed2SDr. David Alan Gilbert             }
95264f615feSPeter Xu         }
953c4faeed2SDr. David Alan Gilbert 
95400fa4fc8SDr. David Alan Gilbert         if (pfd[0].revents) {
95500fa4fc8SDr. David Alan Gilbert             poll_result--;
956c4faeed2SDr. David Alan Gilbert             ret = read(mis->userfault_fd, &msg, sizeof(msg));
957c4faeed2SDr. David Alan Gilbert             if (ret != sizeof(msg)) {
958c4faeed2SDr. David Alan Gilbert                 if (errno == EAGAIN) {
959c4faeed2SDr. David Alan Gilbert                     /*
960c4faeed2SDr. David Alan Gilbert                      * if a wake up happens on the other thread just after
961c4faeed2SDr. David Alan Gilbert                      * the poll, there is nothing to read.
962c4faeed2SDr. David Alan Gilbert                      */
963c4faeed2SDr. David Alan Gilbert                     continue;
964c4faeed2SDr. David Alan Gilbert                 }
965c4faeed2SDr. David Alan Gilbert                 if (ret < 0) {
96600fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to read full userfault "
96700fa4fc8SDr. David Alan Gilbert                                  "message: %s",
968c4faeed2SDr. David Alan Gilbert                                  __func__, strerror(errno));
969c4faeed2SDr. David Alan Gilbert                     break;
970c4faeed2SDr. David Alan Gilbert                 } else {
97100fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read %d bytes from userfaultfd "
97200fa4fc8SDr. David Alan Gilbert                                  "expected %zd",
973c4faeed2SDr. David Alan Gilbert                                  __func__, ret, sizeof(msg));
974c4faeed2SDr. David Alan Gilbert                     break; /* Lost alignment, don't know what we'd read next */
975c4faeed2SDr. David Alan Gilbert                 }
976c4faeed2SDr. David Alan Gilbert             }
977c4faeed2SDr. David Alan Gilbert             if (msg.event != UFFD_EVENT_PAGEFAULT) {
978c4faeed2SDr. David Alan Gilbert                 error_report("%s: Read unexpected event %ud from userfaultfd",
979c4faeed2SDr. David Alan Gilbert                              __func__, msg.event);
980c4faeed2SDr. David Alan Gilbert                 continue; /* It's not a page fault, shouldn't happen */
981c4faeed2SDr. David Alan Gilbert             }
982c4faeed2SDr. David Alan Gilbert 
983c4faeed2SDr. David Alan Gilbert             rb = qemu_ram_block_from_host(
984c4faeed2SDr. David Alan Gilbert                      (void *)(uintptr_t)msg.arg.pagefault.address,
985f615f396SPaolo Bonzini                      true, &rb_offset);
986c4faeed2SDr. David Alan Gilbert             if (!rb) {
987c4faeed2SDr. David Alan Gilbert                 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
988c4faeed2SDr. David Alan Gilbert                              PRIx64, (uint64_t)msg.arg.pagefault.address);
989c4faeed2SDr. David Alan Gilbert                 break;
990c4faeed2SDr. David Alan Gilbert             }
991c4faeed2SDr. David Alan Gilbert 
992332847f0SDr. David Alan Gilbert             rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
993c4faeed2SDr. David Alan Gilbert             trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
994c4faeed2SDr. David Alan Gilbert                                                 qemu_ram_get_idstr(rb),
995575b0b33SAlexey Perevalov                                                 rb_offset,
996575b0b33SAlexey Perevalov                                                 msg.arg.pagefault.feat.ptid);
997575b0b33SAlexey Perevalov             mark_postcopy_blocktime_begin(
998575b0b33SAlexey Perevalov                     (uintptr_t)(msg.arg.pagefault.address),
999575b0b33SAlexey Perevalov                                 msg.arg.pagefault.feat.ptid, rb);
1000575b0b33SAlexey Perevalov 
10013a7804c3SPeter Xu retry:
1002c4faeed2SDr. David Alan Gilbert             /*
1003c4faeed2SDr. David Alan Gilbert              * Send the request to the source - we want to request one
1004c4faeed2SDr. David Alan Gilbert              * of our host page sizes (which is >= TPS)
1005c4faeed2SDr. David Alan Gilbert              */
1006096bf4c8SDr. David Alan Gilbert             if (rb != mis->last_rb) {
1007096bf4c8SDr. David Alan Gilbert                 mis->last_rb = rb;
10083a7804c3SPeter Xu                 ret = migrate_send_rp_req_pages(mis,
10093a7804c3SPeter Xu                                                 qemu_ram_get_idstr(rb),
10103a7804c3SPeter Xu                                                 rb_offset,
10113a7804c3SPeter Xu                                                 qemu_ram_pagesize(rb));
1012c4faeed2SDr. David Alan Gilbert             } else {
1013c4faeed2SDr. David Alan Gilbert                 /* Save some space */
10143a7804c3SPeter Xu                 ret = migrate_send_rp_req_pages(mis,
10153a7804c3SPeter Xu                                                 NULL,
10163a7804c3SPeter Xu                                                 rb_offset,
10173a7804c3SPeter Xu                                                 qemu_ram_pagesize(rb));
10183a7804c3SPeter Xu             }
10193a7804c3SPeter Xu 
10203a7804c3SPeter Xu             if (ret) {
10213a7804c3SPeter Xu                 /* May be network failure, try to wait for recovery */
10223a7804c3SPeter Xu                 if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
10233a7804c3SPeter Xu                     /* We got reconnected somehow, try to continue */
10243a7804c3SPeter Xu                     mis->last_rb = NULL;
10253a7804c3SPeter Xu                     goto retry;
10263a7804c3SPeter Xu                 } else {
10273a7804c3SPeter Xu                     /* This is a unavoidable fault */
10283a7804c3SPeter Xu                     error_report("%s: migrate_send_rp_req_pages() get %d",
10293a7804c3SPeter Xu                                  __func__, ret);
10303a7804c3SPeter Xu                     break;
10313a7804c3SPeter Xu                 }
1032c4faeed2SDr. David Alan Gilbert             }
1033c4faeed2SDr. David Alan Gilbert         }
103400fa4fc8SDr. David Alan Gilbert 
103500fa4fc8SDr. David Alan Gilbert         /* Now handle any requests from external processes on shared memory */
103600fa4fc8SDr. David Alan Gilbert         /* TODO: May need to handle devices deregistering during postcopy */
103700fa4fc8SDr. David Alan Gilbert         for (index = 2; index < pfd_len && poll_result; index++) {
103800fa4fc8SDr. David Alan Gilbert             if (pfd[index].revents) {
103900fa4fc8SDr. David Alan Gilbert                 struct PostCopyFD *pcfd =
104000fa4fc8SDr. David Alan Gilbert                     &g_array_index(mis->postcopy_remote_fds,
104100fa4fc8SDr. David Alan Gilbert                                    struct PostCopyFD, index - 2);
104200fa4fc8SDr. David Alan Gilbert 
104300fa4fc8SDr. David Alan Gilbert                 poll_result--;
104400fa4fc8SDr. David Alan Gilbert                 if (pfd[index].revents & POLLERR) {
104500fa4fc8SDr. David Alan Gilbert                     error_report("%s: POLLERR on poll %zd fd=%d",
104600fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->fd);
104700fa4fc8SDr. David Alan Gilbert                     pfd[index].events = 0;
104800fa4fc8SDr. David Alan Gilbert                     continue;
104900fa4fc8SDr. David Alan Gilbert                 }
105000fa4fc8SDr. David Alan Gilbert 
105100fa4fc8SDr. David Alan Gilbert                 ret = read(pcfd->fd, &msg, sizeof(msg));
105200fa4fc8SDr. David Alan Gilbert                 if (ret != sizeof(msg)) {
105300fa4fc8SDr. David Alan Gilbert                     if (errno == EAGAIN) {
105400fa4fc8SDr. David Alan Gilbert                         /*
105500fa4fc8SDr. David Alan Gilbert                          * if a wake up happens on the other thread just after
105600fa4fc8SDr. David Alan Gilbert                          * the poll, there is nothing to read.
105700fa4fc8SDr. David Alan Gilbert                          */
105800fa4fc8SDr. David Alan Gilbert                         continue;
105900fa4fc8SDr. David Alan Gilbert                     }
106000fa4fc8SDr. David Alan Gilbert                     if (ret < 0) {
106100fa4fc8SDr. David Alan Gilbert                         error_report("%s: Failed to read full userfault "
106200fa4fc8SDr. David Alan Gilbert                                      "message: %s (shared) revents=%d",
106300fa4fc8SDr. David Alan Gilbert                                      __func__, strerror(errno),
106400fa4fc8SDr. David Alan Gilbert                                      pfd[index].revents);
106500fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
106600fa4fc8SDr. David Alan Gilbert                         break;
106700fa4fc8SDr. David Alan Gilbert                     } else {
106800fa4fc8SDr. David Alan Gilbert                         error_report("%s: Read %d bytes from userfaultfd "
106900fa4fc8SDr. David Alan Gilbert                                      "expected %zd (shared)",
107000fa4fc8SDr. David Alan Gilbert                                      __func__, ret, sizeof(msg));
107100fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
107200fa4fc8SDr. David Alan Gilbert                         break; /*Lost alignment,don't know what we'd read next*/
107300fa4fc8SDr. David Alan Gilbert                     }
107400fa4fc8SDr. David Alan Gilbert                 }
107500fa4fc8SDr. David Alan Gilbert                 if (msg.event != UFFD_EVENT_PAGEFAULT) {
107600fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read unexpected event %ud "
107700fa4fc8SDr. David Alan Gilbert                                  "from userfaultfd (shared)",
107800fa4fc8SDr. David Alan Gilbert                                  __func__, msg.event);
107900fa4fc8SDr. David Alan Gilbert                     continue; /* It's not a page fault, shouldn't happen */
108000fa4fc8SDr. David Alan Gilbert                 }
108100fa4fc8SDr. David Alan Gilbert                 /* Call the device handler registered with us */
108200fa4fc8SDr. David Alan Gilbert                 ret = pcfd->handler(pcfd, &msg);
108300fa4fc8SDr. David Alan Gilbert                 if (ret) {
108400fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to resolve shared fault on %zd/%s",
108500fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->idstr);
108600fa4fc8SDr. David Alan Gilbert                     /* TODO: Fail? Disable this sharer? */
108700fa4fc8SDr. David Alan Gilbert                 }
108800fa4fc8SDr. David Alan Gilbert             }
108900fa4fc8SDr. David Alan Gilbert         }
109000fa4fc8SDr. David Alan Gilbert     }
109174637e6fSLidong Chen     rcu_unregister_thread();
1092c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_exit();
1093fc6008f3SMarc-André Lureau     g_free(pfd);
1094f0a227adSDr. David Alan Gilbert     return NULL;
1095f0a227adSDr. David Alan Gilbert }
1096f0a227adSDr. David Alan Gilbert 
10972a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1098f0a227adSDr. David Alan Gilbert {
1099c4faeed2SDr. David Alan Gilbert     /* Open the fd for the kernel to give us userfaults */
1100c4faeed2SDr. David Alan Gilbert     mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1101c4faeed2SDr. David Alan Gilbert     if (mis->userfault_fd == -1) {
1102c4faeed2SDr. David Alan Gilbert         error_report("%s: Failed to open userfault fd: %s", __func__,
1103c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1104c4faeed2SDr. David Alan Gilbert         return -1;
1105c4faeed2SDr. David Alan Gilbert     }
1106c4faeed2SDr. David Alan Gilbert 
1107c4faeed2SDr. David Alan Gilbert     /*
1108c4faeed2SDr. David Alan Gilbert      * Although the host check already tested the API, we need to
1109c4faeed2SDr. David Alan Gilbert      * do the check again as an ABI handshake on the new fd.
1110c4faeed2SDr. David Alan Gilbert      */
111154ae0886SAlexey Perevalov     if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
1112c4faeed2SDr. David Alan Gilbert         return -1;
1113c4faeed2SDr. David Alan Gilbert     }
1114c4faeed2SDr. David Alan Gilbert 
1115c4faeed2SDr. David Alan Gilbert     /* Now an eventfd we use to tell the fault-thread to quit */
111664f615feSPeter Xu     mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
111764f615feSPeter Xu     if (mis->userfault_event_fd == -1) {
111864f615feSPeter Xu         error_report("%s: Opening userfault_event_fd: %s", __func__,
1119c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1120c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
1121c4faeed2SDr. David Alan Gilbert         return -1;
1122c4faeed2SDr. David Alan Gilbert     }
1123c4faeed2SDr. David Alan Gilbert 
1124f0a227adSDr. David Alan Gilbert     qemu_sem_init(&mis->fault_thread_sem, 0);
1125f0a227adSDr. David Alan Gilbert     qemu_thread_create(&mis->fault_thread, "postcopy/fault",
1126f0a227adSDr. David Alan Gilbert                        postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
1127f0a227adSDr. David Alan Gilbert     qemu_sem_wait(&mis->fault_thread_sem);
1128f0a227adSDr. David Alan Gilbert     qemu_sem_destroy(&mis->fault_thread_sem);
1129c4faeed2SDr. David Alan Gilbert     mis->have_fault_thread = true;
1130f0a227adSDr. David Alan Gilbert 
1131f0a227adSDr. David Alan Gilbert     /* Mark so that we get notified of accesses to unwritten areas */
1132fbd162e6SYury Kotov     if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
113391b02dc7SFei Li         error_report("ram_block_enable_notify failed");
1134f0a227adSDr. David Alan Gilbert         return -1;
1135f0a227adSDr. David Alan Gilbert     }
1136f0a227adSDr. David Alan Gilbert 
11373414322aSWei Yang     mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
11383414322aSWei Yang                                   PROT_READ | PROT_WRITE, MAP_PRIVATE |
11393414322aSWei Yang                                   MAP_ANONYMOUS, -1, 0);
11403414322aSWei Yang     if (mis->postcopy_tmp_page == MAP_FAILED) {
11413414322aSWei Yang         mis->postcopy_tmp_page = NULL;
11423414322aSWei Yang         error_report("%s: Failed to map postcopy_tmp_page %s",
11433414322aSWei Yang                      __func__, strerror(errno));
11443414322aSWei Yang         return -1;
11453414322aSWei Yang     }
11463414322aSWei Yang 
1147371ff5a3SDr. David Alan Gilbert     /*
11486629890dSWei Yang      * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
11496629890dSWei Yang      */
11506629890dSWei Yang     mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
11516629890dSWei Yang                                        PROT_READ | PROT_WRITE,
11526629890dSWei Yang                                        MAP_PRIVATE | MAP_ANONYMOUS,
11536629890dSWei Yang                                        -1, 0);
11546629890dSWei Yang     if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
11556629890dSWei Yang         int e = errno;
11566629890dSWei Yang         mis->postcopy_tmp_zero_page = NULL;
11576629890dSWei Yang         error_report("%s: Failed to map large zero page %s",
11586629890dSWei Yang                      __func__, strerror(e));
11596629890dSWei Yang         return -e;
11606629890dSWei Yang     }
11616629890dSWei Yang     memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
11626629890dSWei Yang 
11636629890dSWei Yang     /*
1164371ff5a3SDr. David Alan Gilbert      * Ballooning can mark pages as absent while we're postcopying
1165371ff5a3SDr. David Alan Gilbert      * that would cause false userfaults.
1166371ff5a3SDr. David Alan Gilbert      */
1167154304cdSAlex Williamson     postcopy_balloon_inhibit(true);
1168371ff5a3SDr. David Alan Gilbert 
1169c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_enable_notify();
1170c4faeed2SDr. David Alan Gilbert 
1171f0a227adSDr. David Alan Gilbert     return 0;
1172f0a227adSDr. David Alan Gilbert }
1173f0a227adSDr. David Alan Gilbert 
1174727b9d7eSAlexey Perevalov static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
1175f9494614SAlexey Perevalov                                void *from_addr, uint64_t pagesize, RAMBlock *rb)
1176727b9d7eSAlexey Perevalov {
1177f9494614SAlexey Perevalov     int ret;
1178727b9d7eSAlexey Perevalov     if (from_addr) {
1179727b9d7eSAlexey Perevalov         struct uffdio_copy copy_struct;
1180727b9d7eSAlexey Perevalov         copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
1181727b9d7eSAlexey Perevalov         copy_struct.src = (uint64_t)(uintptr_t)from_addr;
1182727b9d7eSAlexey Perevalov         copy_struct.len = pagesize;
1183727b9d7eSAlexey Perevalov         copy_struct.mode = 0;
1184f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
1185727b9d7eSAlexey Perevalov     } else {
1186727b9d7eSAlexey Perevalov         struct uffdio_zeropage zero_struct;
1187727b9d7eSAlexey Perevalov         zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
1188727b9d7eSAlexey Perevalov         zero_struct.range.len = pagesize;
1189727b9d7eSAlexey Perevalov         zero_struct.mode = 0;
1190f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
1191727b9d7eSAlexey Perevalov     }
1192f9494614SAlexey Perevalov     if (!ret) {
1193f9494614SAlexey Perevalov         ramblock_recv_bitmap_set_range(rb, host_addr,
1194f9494614SAlexey Perevalov                                        pagesize / qemu_target_page_size());
1195575b0b33SAlexey Perevalov         mark_postcopy_blocktime_end((uintptr_t)host_addr);
1196575b0b33SAlexey Perevalov 
1197f9494614SAlexey Perevalov     }
1198f9494614SAlexey Perevalov     return ret;
1199727b9d7eSAlexey Perevalov }
1200727b9d7eSAlexey Perevalov 
1201d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
1202d488b349SDr. David Alan Gilbert {
1203d488b349SDr. David Alan Gilbert     int i;
1204d488b349SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
1205d488b349SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
1206d488b349SDr. David Alan Gilbert 
1207d488b349SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
1208d488b349SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1209d488b349SDr. David Alan Gilbert         int ret = cur->waker(cur, rb, offset);
1210d488b349SDr. David Alan Gilbert         if (ret) {
1211d488b349SDr. David Alan Gilbert             return ret;
1212d488b349SDr. David Alan Gilbert         }
1213d488b349SDr. David Alan Gilbert     }
1214d488b349SDr. David Alan Gilbert     return 0;
1215d488b349SDr. David Alan Gilbert }
1216d488b349SDr. David Alan Gilbert 
1217696ed9a9SDr. David Alan Gilbert /*
1218696ed9a9SDr. David Alan Gilbert  * Place a host page (from) at (host) atomically
1219696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1220696ed9a9SDr. David Alan Gilbert  */
1221df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
12228be4620bSAlexey Perevalov                         RAMBlock *rb)
1223696ed9a9SDr. David Alan Gilbert {
12248be4620bSAlexey Perevalov     size_t pagesize = qemu_ram_pagesize(rb);
1225696ed9a9SDr. David Alan Gilbert 
1226696ed9a9SDr. David Alan Gilbert     /* copy also acks to the kernel waking the stalled thread up
1227696ed9a9SDr. David Alan Gilbert      * TODO: We can inhibit that ack and only do it if it was requested
1228696ed9a9SDr. David Alan Gilbert      * which would be slightly cheaper, but we'd have to be careful
1229696ed9a9SDr. David Alan Gilbert      * of the order of updating our page state.
1230696ed9a9SDr. David Alan Gilbert      */
1231f9494614SAlexey Perevalov     if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) {
1232696ed9a9SDr. David Alan Gilbert         int e = errno;
1233df9ff5e1SDr. David Alan Gilbert         error_report("%s: %s copy host: %p from: %p (size: %zd)",
1234df9ff5e1SDr. David Alan Gilbert                      __func__, strerror(e), host, from, pagesize);
1235696ed9a9SDr. David Alan Gilbert 
1236696ed9a9SDr. David Alan Gilbert         return -e;
1237696ed9a9SDr. David Alan Gilbert     }
1238696ed9a9SDr. David Alan Gilbert 
1239696ed9a9SDr. David Alan Gilbert     trace_postcopy_place_page(host);
1240dedfb4b2SDr. David Alan Gilbert     return postcopy_notify_shared_wake(rb,
1241dedfb4b2SDr. David Alan Gilbert                                        qemu_ram_block_host_offset(rb, host));
1242696ed9a9SDr. David Alan Gilbert }
1243696ed9a9SDr. David Alan Gilbert 
1244696ed9a9SDr. David Alan Gilbert /*
1245696ed9a9SDr. David Alan Gilbert  * Place a zero page at (host) atomically
1246696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1247696ed9a9SDr. David Alan Gilbert  */
1248df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
12498be4620bSAlexey Perevalov                              RAMBlock *rb)
1250696ed9a9SDr. David Alan Gilbert {
12512ce16640SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
1252df9ff5e1SDr. David Alan Gilbert     trace_postcopy_place_page_zero(host);
1253696ed9a9SDr. David Alan Gilbert 
12542ce16640SDr. David Alan Gilbert     /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
12552ce16640SDr. David Alan Gilbert      * but it's not available for everything (e.g. hugetlbpages)
12562ce16640SDr. David Alan Gilbert      */
12572ce16640SDr. David Alan Gilbert     if (qemu_ram_is_uf_zeroable(rb)) {
12582ce16640SDr. David Alan Gilbert         if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) {
1259696ed9a9SDr. David Alan Gilbert             int e = errno;
1260696ed9a9SDr. David Alan Gilbert             error_report("%s: %s zero host: %p",
1261696ed9a9SDr. David Alan Gilbert                          __func__, strerror(e), host);
1262696ed9a9SDr. David Alan Gilbert 
1263696ed9a9SDr. David Alan Gilbert             return -e;
1264696ed9a9SDr. David Alan Gilbert         }
1265dedfb4b2SDr. David Alan Gilbert         return postcopy_notify_shared_wake(rb,
1266dedfb4b2SDr. David Alan Gilbert                                            qemu_ram_block_host_offset(rb,
1267dedfb4b2SDr. David Alan Gilbert                                                                       host));
1268df9ff5e1SDr. David Alan Gilbert     } else {
12696629890dSWei Yang         return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
1270df9ff5e1SDr. David Alan Gilbert     }
1271696ed9a9SDr. David Alan Gilbert }
1272696ed9a9SDr. David Alan Gilbert 
1273eb59db53SDr. David Alan Gilbert #else
1274eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */
127565ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
127665ace060SAlexey Perevalov {
127765ace060SAlexey Perevalov }
127865ace060SAlexey Perevalov 
1279d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
1280eb59db53SDr. David Alan Gilbert {
1281eb59db53SDr. David Alan Gilbert     error_report("%s: No OS support", __func__);
1282eb59db53SDr. David Alan Gilbert     return false;
1283eb59db53SDr. David Alan Gilbert }
1284eb59db53SDr. David Alan Gilbert 
1285c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
12861caddf8aSDr. David Alan Gilbert {
12871caddf8aSDr. David Alan Gilbert     error_report("postcopy_ram_incoming_init: No OS support");
12881caddf8aSDr. David Alan Gilbert     return -1;
12891caddf8aSDr. David Alan Gilbert }
12901caddf8aSDr. David Alan Gilbert 
12911caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
12921caddf8aSDr. David Alan Gilbert {
12931caddf8aSDr. David Alan Gilbert     assert(0);
12941caddf8aSDr. David Alan Gilbert     return -1;
12951caddf8aSDr. David Alan Gilbert }
12961caddf8aSDr. David Alan Gilbert 
1297f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
1298f9527107SDr. David Alan Gilbert {
1299f9527107SDr. David Alan Gilbert     assert(0);
1300f9527107SDr. David Alan Gilbert     return -1;
1301f9527107SDr. David Alan Gilbert }
1302f9527107SDr. David Alan Gilbert 
1303c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
1304c188c539SMichael S. Tsirkin                                  uint64_t client_addr, uint64_t rb_offset)
1305c188c539SMichael S. Tsirkin {
1306c188c539SMichael S. Tsirkin     assert(0);
1307c188c539SMichael S. Tsirkin     return -1;
1308c188c539SMichael S. Tsirkin }
1309c188c539SMichael S. Tsirkin 
13102a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1311f0a227adSDr. David Alan Gilbert {
1312f0a227adSDr. David Alan Gilbert     assert(0);
1313f0a227adSDr. David Alan Gilbert     return -1;
1314f0a227adSDr. David Alan Gilbert }
1315696ed9a9SDr. David Alan Gilbert 
1316df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
13178be4620bSAlexey Perevalov                         RAMBlock *rb)
1318696ed9a9SDr. David Alan Gilbert {
1319696ed9a9SDr. David Alan Gilbert     assert(0);
1320696ed9a9SDr. David Alan Gilbert     return -1;
1321696ed9a9SDr. David Alan Gilbert }
1322696ed9a9SDr. David Alan Gilbert 
1323df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
13248be4620bSAlexey Perevalov                         RAMBlock *rb)
1325696ed9a9SDr. David Alan Gilbert {
1326696ed9a9SDr. David Alan Gilbert     assert(0);
1327696ed9a9SDr. David Alan Gilbert     return -1;
1328696ed9a9SDr. David Alan Gilbert }
1329696ed9a9SDr. David Alan Gilbert 
13305efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
13315efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
13325efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
13335efc3564SDr. David Alan Gilbert {
13345efc3564SDr. David Alan Gilbert     assert(0);
13355efc3564SDr. David Alan Gilbert     return -1;
13365efc3564SDr. David Alan Gilbert }
1337eb59db53SDr. David Alan Gilbert #endif
1338eb59db53SDr. David Alan Gilbert 
1339e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */
1340e0b266f0SDr. David Alan Gilbert 
13419ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis)
13429ab7ef9bSPeter Xu {
13439ab7ef9bSPeter Xu     uint64_t tmp64 = 1;
13449ab7ef9bSPeter Xu 
13459ab7ef9bSPeter Xu     /*
13469ab7ef9bSPeter Xu      * Wakeup the fault_thread.  It's an eventfd that should currently
13479ab7ef9bSPeter Xu      * be at 0, we're going to increment it to 1
13489ab7ef9bSPeter Xu      */
13499ab7ef9bSPeter Xu     if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
13509ab7ef9bSPeter Xu         /* Not much we can do here, but may as well report it */
13519ab7ef9bSPeter Xu         error_report("%s: incrementing failed: %s", __func__,
13529ab7ef9bSPeter Xu                      strerror(errno));
13539ab7ef9bSPeter Xu     }
13549ab7ef9bSPeter Xu }
13559ab7ef9bSPeter Xu 
1356e0b266f0SDr. David Alan Gilbert /**
1357e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_init: Called at the start of each RAMBlock before
1358e0b266f0SDr. David Alan Gilbert  *   asking to discard individual ranges.
1359e0b266f0SDr. David Alan Gilbert  *
1360e0b266f0SDr. David Alan Gilbert  * @ms: The current migration state.
1361810cf2bbSWei Yang  * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
1362e0b266f0SDr. David Alan Gilbert  * @name: RAMBlock that discards will operate on.
1363e0b266f0SDr. David Alan Gilbert  */
1364810cf2bbSWei Yang static PostcopyDiscardState pds = {0};
1365810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name)
1366e0b266f0SDr. David Alan Gilbert {
1367810cf2bbSWei Yang     pds.ramblock_name = name;
1368810cf2bbSWei Yang     pds.cur_entry = 0;
1369810cf2bbSWei Yang     pds.nsentwords = 0;
1370810cf2bbSWei Yang     pds.nsentcmds = 0;
1371e0b266f0SDr. David Alan Gilbert }
1372e0b266f0SDr. David Alan Gilbert 
1373e0b266f0SDr. David Alan Gilbert /**
1374e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1375e0b266f0SDr. David Alan Gilbert  *   discard. May send a discard message, may just leave it queued to
1376e0b266f0SDr. David Alan Gilbert  *   be sent later.
1377e0b266f0SDr. David Alan Gilbert  *
1378e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1379e0b266f0SDr. David Alan Gilbert  * @start,@length: a range of pages in the migration bitmap in the
1380e0b266f0SDr. David Alan Gilbert  *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1381e0b266f0SDr. David Alan Gilbert  */
1382810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
1383810cf2bbSWei Yang                                  unsigned long length)
1384e0b266f0SDr. David Alan Gilbert {
138520afaed9SJuan Quintela     size_t tp_size = qemu_target_page_size();
1386e0b266f0SDr. David Alan Gilbert     /* Convert to byte offsets within the RAM block */
1387810cf2bbSWei Yang     pds.start_list[pds.cur_entry] = start  * tp_size;
1388810cf2bbSWei Yang     pds.length_list[pds.cur_entry] = length * tp_size;
1389810cf2bbSWei Yang     trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
1390810cf2bbSWei Yang     pds.cur_entry++;
1391810cf2bbSWei Yang     pds.nsentwords++;
1392e0b266f0SDr. David Alan Gilbert 
1393810cf2bbSWei Yang     if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
1394e0b266f0SDr. David Alan Gilbert         /* Full set, ship it! */
139589a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1396810cf2bbSWei Yang                                               pds.ramblock_name,
1397810cf2bbSWei Yang                                               pds.cur_entry,
1398810cf2bbSWei Yang                                               pds.start_list,
1399810cf2bbSWei Yang                                               pds.length_list);
1400810cf2bbSWei Yang         pds.nsentcmds++;
1401810cf2bbSWei Yang         pds.cur_entry = 0;
1402e0b266f0SDr. David Alan Gilbert     }
1403e0b266f0SDr. David Alan Gilbert }
1404e0b266f0SDr. David Alan Gilbert 
1405e0b266f0SDr. David Alan Gilbert /**
1406e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1407e0b266f0SDr. David Alan Gilbert  * bitmap code. Sends any outstanding discard messages, frees the PDS
1408e0b266f0SDr. David Alan Gilbert  *
1409e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1410e0b266f0SDr. David Alan Gilbert  */
1411810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms)
1412e0b266f0SDr. David Alan Gilbert {
1413e0b266f0SDr. David Alan Gilbert     /* Anything unsent? */
1414810cf2bbSWei Yang     if (pds.cur_entry) {
141589a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1416810cf2bbSWei Yang                                               pds.ramblock_name,
1417810cf2bbSWei Yang                                               pds.cur_entry,
1418810cf2bbSWei Yang                                               pds.start_list,
1419810cf2bbSWei Yang                                               pds.length_list);
1420810cf2bbSWei Yang         pds.nsentcmds++;
1421e0b266f0SDr. David Alan Gilbert     }
1422e0b266f0SDr. David Alan Gilbert 
1423810cf2bbSWei Yang     trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
1424810cf2bbSWei Yang                                        pds.nsentcmds);
1425e0b266f0SDr. David Alan Gilbert }
1426bac3b212SJuan Quintela 
1427bac3b212SJuan Quintela /*
1428bac3b212SJuan Quintela  * Current state of incoming postcopy; note this is not part of
1429bac3b212SJuan Quintela  * MigrationIncomingState since it's state is used during cleanup
1430bac3b212SJuan Quintela  * at the end as MIS is being freed.
1431bac3b212SJuan Quintela  */
1432bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state;
1433bac3b212SJuan Quintela 
1434bac3b212SJuan Quintela PostcopyState  postcopy_state_get(void)
1435bac3b212SJuan Quintela {
1436bac3b212SJuan Quintela     return atomic_mb_read(&incoming_postcopy_state);
1437bac3b212SJuan Quintela }
1438bac3b212SJuan Quintela 
1439bac3b212SJuan Quintela /* Set the state and return the old state */
1440bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state)
1441bac3b212SJuan Quintela {
1442bac3b212SJuan Quintela     return atomic_xchg(&incoming_postcopy_state, new_state);
1443bac3b212SJuan Quintela }
144400fa4fc8SDr. David Alan Gilbert 
144500fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy
144600fa4fc8SDr. David Alan Gilbert  * called on the destination.
144700fa4fc8SDr. David Alan Gilbert  */
144800fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
144900fa4fc8SDr. David Alan Gilbert {
145000fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
145100fa4fc8SDr. David Alan Gilbert 
145200fa4fc8SDr. David Alan Gilbert     mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
145300fa4fc8SDr. David Alan Gilbert                                                   *pcfd);
145400fa4fc8SDr. David Alan Gilbert }
145500fa4fc8SDr. David Alan Gilbert 
145600fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy
145700fa4fc8SDr. David Alan Gilbert  */
145800fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
145900fa4fc8SDr. David Alan Gilbert {
146000fa4fc8SDr. David Alan Gilbert     guint i;
146100fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
146200fa4fc8SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
146300fa4fc8SDr. David Alan Gilbert 
146400fa4fc8SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
146500fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
146600fa4fc8SDr. David Alan Gilbert         if (cur->fd == pcfd->fd) {
146700fa4fc8SDr. David Alan Gilbert             mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
146800fa4fc8SDr. David Alan Gilbert             return;
146900fa4fc8SDr. David Alan Gilbert         }
147000fa4fc8SDr. David Alan Gilbert     }
147100fa4fc8SDr. David Alan Gilbert }
1472