xref: /qemu/migration/postcopy-ram.c (revision 810cf2bbd4c5c1417bda8bec49caf0ababc22860)
1eb59db53SDr. David Alan Gilbert /*
2eb59db53SDr. David Alan Gilbert  * Postcopy migration for RAM
3eb59db53SDr. David Alan Gilbert  *
4eb59db53SDr. David Alan Gilbert  * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5eb59db53SDr. David Alan Gilbert  *
6eb59db53SDr. David Alan Gilbert  * Authors:
7eb59db53SDr. David Alan Gilbert  *  Dave Gilbert  <dgilbert@redhat.com>
8eb59db53SDr. David Alan Gilbert  *
9eb59db53SDr. David Alan Gilbert  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10eb59db53SDr. David Alan Gilbert  * See the COPYING file in the top-level directory.
11eb59db53SDr. David Alan Gilbert  *
12eb59db53SDr. David Alan Gilbert  */
13eb59db53SDr. David Alan Gilbert 
14eb59db53SDr. David Alan Gilbert /*
15eb59db53SDr. David Alan Gilbert  * Postcopy is a migration technique where the execution flips from the
16eb59db53SDr. David Alan Gilbert  * source to the destination before all the data has been copied.
17eb59db53SDr. David Alan Gilbert  */
18eb59db53SDr. David Alan Gilbert 
191393a485SPeter Maydell #include "qemu/osdep.h"
2051180423SJuan Quintela #include "exec/target_page.h"
216666c96aSJuan Quintela #include "migration.h"
2208a0aee1SJuan Quintela #include "qemu-file.h"
2320a519a0SJuan Quintela #include "savevm.h"
24be07b0acSJuan Quintela #include "postcopy-ram.h"
257b1e1a22SJuan Quintela #include "ram.h"
261693c64cSDr. David Alan Gilbert #include "qapi/error.h"
271693c64cSDr. David Alan Gilbert #include "qemu/notify.h"
28eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h"
29371ff5a3SDr. David Alan Gilbert #include "sysemu/balloon.h"
30eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h"
31eb59db53SDr. David Alan Gilbert #include "trace.h"
325cc8767dSLike Xu #include "hw/boards.h"
33eb59db53SDr. David Alan Gilbert 
34e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command,
35e0b266f0SDr. David Alan Gilbert  * keeps them around ~200 bytes
36e0b266f0SDr. David Alan Gilbert  */
37e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12
38e0b266f0SDr. David Alan Gilbert 
39e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState {
40e0b266f0SDr. David Alan Gilbert     const char *ramblock_name;
41e0b266f0SDr. David Alan Gilbert     uint16_t cur_entry;
42e0b266f0SDr. David Alan Gilbert     /*
43e0b266f0SDr. David Alan Gilbert      * Start and length of a discard range (bytes)
44e0b266f0SDr. David Alan Gilbert      */
45e0b266f0SDr. David Alan Gilbert     uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
46e0b266f0SDr. David Alan Gilbert     uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
47e0b266f0SDr. David Alan Gilbert     unsigned int nsentwords;
48e0b266f0SDr. David Alan Gilbert     unsigned int nsentcmds;
49e0b266f0SDr. David Alan Gilbert };
50e0b266f0SDr. David Alan Gilbert 
511693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list;
521693c64cSDr. David Alan Gilbert 
531693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void)
541693c64cSDr. David Alan Gilbert {
551693c64cSDr. David Alan Gilbert     notifier_with_return_list_init(&postcopy_notifier_list);
561693c64cSDr. David Alan Gilbert }
571693c64cSDr. David Alan Gilbert 
581693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn)
591693c64cSDr. David Alan Gilbert {
601693c64cSDr. David Alan Gilbert     notifier_with_return_list_add(&postcopy_notifier_list, nn);
611693c64cSDr. David Alan Gilbert }
621693c64cSDr. David Alan Gilbert 
631693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n)
641693c64cSDr. David Alan Gilbert {
651693c64cSDr. David Alan Gilbert     notifier_with_return_remove(n);
661693c64cSDr. David Alan Gilbert }
671693c64cSDr. David Alan Gilbert 
681693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
691693c64cSDr. David Alan Gilbert {
701693c64cSDr. David Alan Gilbert     struct PostcopyNotifyData pnd;
711693c64cSDr. David Alan Gilbert     pnd.reason = reason;
721693c64cSDr. David Alan Gilbert     pnd.errp = errp;
731693c64cSDr. David Alan Gilbert 
741693c64cSDr. David Alan Gilbert     return notifier_with_return_list_notify(&postcopy_notifier_list,
751693c64cSDr. David Alan Gilbert                                             &pnd);
761693c64cSDr. David Alan Gilbert }
771693c64cSDr. David Alan Gilbert 
78eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied
79eb59db53SDr. David Alan Gilbert  * across, and efficiently map new pages in, the techniques for doing this
80eb59db53SDr. David Alan Gilbert  * are target OS specific.
81eb59db53SDr. David Alan Gilbert  */
82eb59db53SDr. David Alan Gilbert #if defined(__linux__)
83eb59db53SDr. David Alan Gilbert 
84c4faeed2SDr. David Alan Gilbert #include <poll.h>
85eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h>
86eb59db53SDr. David Alan Gilbert #include <sys/syscall.h>
87eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */
88eb59db53SDr. David Alan Gilbert #endif
89eb59db53SDr. David Alan Gilbert 
90d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
91d8b9d771SMatthew Fortune #include <sys/eventfd.h>
92eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h>
93eb59db53SDr. David Alan Gilbert 
942a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext {
952a4c42f1SAlexey Perevalov     /* time when page fault initiated per vCPU */
962a4c42f1SAlexey Perevalov     uint32_t *page_fault_vcpu_time;
972a4c42f1SAlexey Perevalov     /* page address per vCPU */
982a4c42f1SAlexey Perevalov     uintptr_t *vcpu_addr;
992a4c42f1SAlexey Perevalov     uint32_t total_blocktime;
1002a4c42f1SAlexey Perevalov     /* blocktime per vCPU */
1012a4c42f1SAlexey Perevalov     uint32_t *vcpu_blocktime;
1022a4c42f1SAlexey Perevalov     /* point in time when last page fault was initiated */
1032a4c42f1SAlexey Perevalov     uint32_t last_begin;
1042a4c42f1SAlexey Perevalov     /* number of vCPU are suspended */
1052a4c42f1SAlexey Perevalov     int smp_cpus_down;
1062a4c42f1SAlexey Perevalov     uint64_t start_time;
1072a4c42f1SAlexey Perevalov 
1082a4c42f1SAlexey Perevalov     /*
1092a4c42f1SAlexey Perevalov      * Handler for exit event, necessary for
1102a4c42f1SAlexey Perevalov      * releasing whole blocktime_ctx
1112a4c42f1SAlexey Perevalov      */
1122a4c42f1SAlexey Perevalov     Notifier exit_notifier;
1132a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext;
1142a4c42f1SAlexey Perevalov 
1152a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
1162a4c42f1SAlexey Perevalov {
1172a4c42f1SAlexey Perevalov     g_free(ctx->page_fault_vcpu_time);
1182a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_addr);
1192a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_blocktime);
1202a4c42f1SAlexey Perevalov     g_free(ctx);
1212a4c42f1SAlexey Perevalov }
1222a4c42f1SAlexey Perevalov 
1232a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data)
1242a4c42f1SAlexey Perevalov {
1252a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
1262a4c42f1SAlexey Perevalov                                                  exit_notifier);
1272a4c42f1SAlexey Perevalov     destroy_blocktime_context(ctx);
1282a4c42f1SAlexey Perevalov }
1292a4c42f1SAlexey Perevalov 
1302a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void)
1312a4c42f1SAlexey Perevalov {
1325cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
1335cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
1342a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
1352a4c42f1SAlexey Perevalov     ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
1362a4c42f1SAlexey Perevalov     ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
1372a4c42f1SAlexey Perevalov     ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
1382a4c42f1SAlexey Perevalov 
1392a4c42f1SAlexey Perevalov     ctx->exit_notifier.notify = migration_exit_cb;
1402a4c42f1SAlexey Perevalov     ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1412a4c42f1SAlexey Perevalov     qemu_add_exit_notifier(&ctx->exit_notifier);
1422a4c42f1SAlexey Perevalov     return ctx;
1432a4c42f1SAlexey Perevalov }
144ca6011c2SAlexey Perevalov 
14565ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
14665ace060SAlexey Perevalov {
1475cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
14865ace060SAlexey Perevalov     uint32List *list = NULL, *entry = NULL;
14965ace060SAlexey Perevalov     int i;
15065ace060SAlexey Perevalov 
1515cc8767dSLike Xu     for (i = ms->smp.cpus - 1; i >= 0; i--) {
15265ace060SAlexey Perevalov         entry = g_new0(uint32List, 1);
15365ace060SAlexey Perevalov         entry->value = ctx->vcpu_blocktime[i];
15465ace060SAlexey Perevalov         entry->next = list;
15565ace060SAlexey Perevalov         list = entry;
15665ace060SAlexey Perevalov     }
15765ace060SAlexey Perevalov 
15865ace060SAlexey Perevalov     return list;
15965ace060SAlexey Perevalov }
16065ace060SAlexey Perevalov 
16165ace060SAlexey Perevalov /*
16265ace060SAlexey Perevalov  * This function just populates MigrationInfo from postcopy's
16365ace060SAlexey Perevalov  * blocktime context. It will not populate MigrationInfo,
16465ace060SAlexey Perevalov  * unless postcopy-blocktime capability was set.
16565ace060SAlexey Perevalov  *
16665ace060SAlexey Perevalov  * @info: pointer to MigrationInfo to populate
16765ace060SAlexey Perevalov  */
16865ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
16965ace060SAlexey Perevalov {
17065ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
17165ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
17265ace060SAlexey Perevalov 
17365ace060SAlexey Perevalov     if (!bc) {
17465ace060SAlexey Perevalov         return;
17565ace060SAlexey Perevalov     }
17665ace060SAlexey Perevalov 
17765ace060SAlexey Perevalov     info->has_postcopy_blocktime = true;
17865ace060SAlexey Perevalov     info->postcopy_blocktime = bc->total_blocktime;
17965ace060SAlexey Perevalov     info->has_postcopy_vcpu_blocktime = true;
18065ace060SAlexey Perevalov     info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
18165ace060SAlexey Perevalov }
18265ace060SAlexey Perevalov 
18365ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void)
18465ace060SAlexey Perevalov {
18565ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
18665ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
18765ace060SAlexey Perevalov 
18865ace060SAlexey Perevalov     if (!bc) {
18965ace060SAlexey Perevalov         return 0;
19065ace060SAlexey Perevalov     }
19165ace060SAlexey Perevalov 
19265ace060SAlexey Perevalov     return bc->total_blocktime;
19365ace060SAlexey Perevalov }
19465ace060SAlexey Perevalov 
19554ae0886SAlexey Perevalov /**
19654ae0886SAlexey Perevalov  * receive_ufd_features: check userfault fd features, to request only supported
19754ae0886SAlexey Perevalov  * features in the future.
19854ae0886SAlexey Perevalov  *
19954ae0886SAlexey Perevalov  * Returns: true on success
20054ae0886SAlexey Perevalov  *
20154ae0886SAlexey Perevalov  * __NR_userfaultfd - should be checked before
20254ae0886SAlexey Perevalov  *  @features: out parameter will contain uffdio_api.features provided by kernel
20354ae0886SAlexey Perevalov  *              in case of success
20454ae0886SAlexey Perevalov  */
20554ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features)
20654ae0886SAlexey Perevalov {
20754ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
20854ae0886SAlexey Perevalov     int ufd;
20954ae0886SAlexey Perevalov     bool ret = true;
21054ae0886SAlexey Perevalov 
21154ae0886SAlexey Perevalov     /* if we are here __NR_userfaultfd should exists */
21254ae0886SAlexey Perevalov     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
21354ae0886SAlexey Perevalov     if (ufd == -1) {
21454ae0886SAlexey Perevalov         error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
21554ae0886SAlexey Perevalov                      strerror(errno));
21654ae0886SAlexey Perevalov         return false;
21754ae0886SAlexey Perevalov     }
21854ae0886SAlexey Perevalov 
21954ae0886SAlexey Perevalov     /* ask features */
220eb59db53SDr. David Alan Gilbert     api_struct.api = UFFD_API;
221eb59db53SDr. David Alan Gilbert     api_struct.features = 0;
222eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
2235553499fSAlexey Perevalov         error_report("%s: UFFDIO_API failed: %s", __func__,
224eb59db53SDr. David Alan Gilbert                      strerror(errno));
22554ae0886SAlexey Perevalov         ret = false;
22654ae0886SAlexey Perevalov         goto release_ufd;
22754ae0886SAlexey Perevalov     }
22854ae0886SAlexey Perevalov 
22954ae0886SAlexey Perevalov     *features = api_struct.features;
23054ae0886SAlexey Perevalov 
23154ae0886SAlexey Perevalov release_ufd:
23254ae0886SAlexey Perevalov     close(ufd);
23354ae0886SAlexey Perevalov     return ret;
23454ae0886SAlexey Perevalov }
23554ae0886SAlexey Perevalov 
23654ae0886SAlexey Perevalov /**
23754ae0886SAlexey Perevalov  * request_ufd_features: this function should be called only once on a newly
23854ae0886SAlexey Perevalov  * opened ufd, subsequent calls will lead to error.
23954ae0886SAlexey Perevalov  *
24054ae0886SAlexey Perevalov  * Returns: true on succes
24154ae0886SAlexey Perevalov  *
24254ae0886SAlexey Perevalov  * @ufd: fd obtained from userfaultfd syscall
24354ae0886SAlexey Perevalov  * @features: bit mask see UFFD_API_FEATURES
24454ae0886SAlexey Perevalov  */
24554ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features)
24654ae0886SAlexey Perevalov {
24754ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
24854ae0886SAlexey Perevalov     uint64_t ioctl_mask;
24954ae0886SAlexey Perevalov 
25054ae0886SAlexey Perevalov     api_struct.api = UFFD_API;
25154ae0886SAlexey Perevalov     api_struct.features = features;
25254ae0886SAlexey Perevalov     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
25354ae0886SAlexey Perevalov         error_report("%s failed: UFFDIO_API failed: %s", __func__,
25454ae0886SAlexey Perevalov                      strerror(errno));
255eb59db53SDr. David Alan Gilbert         return false;
256eb59db53SDr. David Alan Gilbert     }
257eb59db53SDr. David Alan Gilbert 
258eb59db53SDr. David Alan Gilbert     ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
259eb59db53SDr. David Alan Gilbert                  (__u64)1 << _UFFDIO_UNREGISTER;
260eb59db53SDr. David Alan Gilbert     if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
261eb59db53SDr. David Alan Gilbert         error_report("Missing userfault features: %" PRIx64,
262eb59db53SDr. David Alan Gilbert                      (uint64_t)(~api_struct.ioctls & ioctl_mask));
263eb59db53SDr. David Alan Gilbert         return false;
264eb59db53SDr. David Alan Gilbert     }
265eb59db53SDr. David Alan Gilbert 
26654ae0886SAlexey Perevalov     return true;
26754ae0886SAlexey Perevalov }
26854ae0886SAlexey Perevalov 
26954ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
27054ae0886SAlexey Perevalov {
27154ae0886SAlexey Perevalov     uint64_t asked_features = 0;
27254ae0886SAlexey Perevalov     static uint64_t supported_features;
27354ae0886SAlexey Perevalov 
27454ae0886SAlexey Perevalov     /*
27554ae0886SAlexey Perevalov      * it's not possible to
27654ae0886SAlexey Perevalov      * request UFFD_API twice per one fd
27754ae0886SAlexey Perevalov      * userfault fd features is persistent
27854ae0886SAlexey Perevalov      */
27954ae0886SAlexey Perevalov     if (!supported_features) {
28054ae0886SAlexey Perevalov         if (!receive_ufd_features(&supported_features)) {
28154ae0886SAlexey Perevalov             error_report("%s failed", __func__);
28254ae0886SAlexey Perevalov             return false;
28354ae0886SAlexey Perevalov         }
28454ae0886SAlexey Perevalov     }
28554ae0886SAlexey Perevalov 
2862a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID
2872a4c42f1SAlexey Perevalov     if (migrate_postcopy_blocktime() && mis &&
2882a4c42f1SAlexey Perevalov         UFFD_FEATURE_THREAD_ID & supported_features) {
2892a4c42f1SAlexey Perevalov         /* kernel supports that feature */
2902a4c42f1SAlexey Perevalov         /* don't create blocktime_context if it exists */
2912a4c42f1SAlexey Perevalov         if (!mis->blocktime_ctx) {
2922a4c42f1SAlexey Perevalov             mis->blocktime_ctx = blocktime_context_new();
2932a4c42f1SAlexey Perevalov         }
2942a4c42f1SAlexey Perevalov 
2952a4c42f1SAlexey Perevalov         asked_features |= UFFD_FEATURE_THREAD_ID;
2962a4c42f1SAlexey Perevalov     }
2972a4c42f1SAlexey Perevalov #endif
2982a4c42f1SAlexey Perevalov 
29954ae0886SAlexey Perevalov     /*
30054ae0886SAlexey Perevalov      * request features, even if asked_features is 0, due to
30154ae0886SAlexey Perevalov      * kernel expects UFFD_API before UFFDIO_REGISTER, per
30254ae0886SAlexey Perevalov      * userfault file descriptor
30354ae0886SAlexey Perevalov      */
30454ae0886SAlexey Perevalov     if (!request_ufd_features(ufd, asked_features)) {
30554ae0886SAlexey Perevalov         error_report("%s failed: features %" PRIu64, __func__,
30654ae0886SAlexey Perevalov                      asked_features);
30754ae0886SAlexey Perevalov         return false;
30854ae0886SAlexey Perevalov     }
30954ae0886SAlexey Perevalov 
3107e8cafb7SDr. David Alan Gilbert     if (getpagesize() != ram_pagesize_summary()) {
3117e8cafb7SDr. David Alan Gilbert         bool have_hp = false;
3127e8cafb7SDr. David Alan Gilbert         /* We've got a huge page */
3137e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
31454ae0886SAlexey Perevalov         have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
3157e8cafb7SDr. David Alan Gilbert #endif
3167e8cafb7SDr. David Alan Gilbert         if (!have_hp) {
3177e8cafb7SDr. David Alan Gilbert             error_report("Userfault on this host does not support huge pages");
3187e8cafb7SDr. David Alan Gilbert             return false;
3197e8cafb7SDr. David Alan Gilbert         }
3207e8cafb7SDr. David Alan Gilbert     }
321eb59db53SDr. David Alan Gilbert     return true;
322eb59db53SDr. David Alan Gilbert }
323eb59db53SDr. David Alan Gilbert 
3248679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator.
3258679638bSDr. David Alan Gilbert  */
326754cb9c0SYury Kotov static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
3278679638bSDr. David Alan Gilbert {
328754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
329754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
3305d214a92SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
3315d214a92SDr. David Alan Gilbert 
3325d214a92SDr. David Alan Gilbert     if (length % pagesize) {
3335d214a92SDr. David Alan Gilbert         error_report("Postcopy requires RAM blocks to be a page size multiple,"
3345d214a92SDr. David Alan Gilbert                      " block %s is 0x" RAM_ADDR_FMT " bytes with a "
3355d214a92SDr. David Alan Gilbert                      "page size of 0x%zx", block_name, length, pagesize);
3365d214a92SDr. David Alan Gilbert         return 1;
3375d214a92SDr. David Alan Gilbert     }
3388679638bSDr. David Alan Gilbert     return 0;
3398679638bSDr. David Alan Gilbert }
3408679638bSDr. David Alan Gilbert 
34158b7c17eSDr. David Alan Gilbert /*
34258b7c17eSDr. David Alan Gilbert  * Note: This has the side effect of munlock'ing all of RAM, that's
34358b7c17eSDr. David Alan Gilbert  * normally fine since if the postcopy succeeds it gets turned back on at the
34458b7c17eSDr. David Alan Gilbert  * end.
34558b7c17eSDr. David Alan Gilbert  */
346d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
347eb59db53SDr. David Alan Gilbert {
348eb59db53SDr. David Alan Gilbert     long pagesize = getpagesize();
349eb59db53SDr. David Alan Gilbert     int ufd = -1;
350eb59db53SDr. David Alan Gilbert     bool ret = false; /* Error unless we change it */
351eb59db53SDr. David Alan Gilbert     void *testarea = NULL;
352eb59db53SDr. David Alan Gilbert     struct uffdio_register reg_struct;
353eb59db53SDr. David Alan Gilbert     struct uffdio_range range_struct;
354eb59db53SDr. David Alan Gilbert     uint64_t feature_mask;
3551693c64cSDr. David Alan Gilbert     Error *local_err = NULL;
356eb59db53SDr. David Alan Gilbert 
35720afaed9SJuan Quintela     if (qemu_target_page_size() > pagesize) {
358eb59db53SDr. David Alan Gilbert         error_report("Target page size bigger than host page size");
359eb59db53SDr. David Alan Gilbert         goto out;
360eb59db53SDr. David Alan Gilbert     }
361eb59db53SDr. David Alan Gilbert 
362eb59db53SDr. David Alan Gilbert     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
363eb59db53SDr. David Alan Gilbert     if (ufd == -1) {
364eb59db53SDr. David Alan Gilbert         error_report("%s: userfaultfd not available: %s", __func__,
365eb59db53SDr. David Alan Gilbert                      strerror(errno));
366eb59db53SDr. David Alan Gilbert         goto out;
367eb59db53SDr. David Alan Gilbert     }
368eb59db53SDr. David Alan Gilbert 
3691693c64cSDr. David Alan Gilbert     /* Give devices a chance to object */
3701693c64cSDr. David Alan Gilbert     if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
3711693c64cSDr. David Alan Gilbert         error_report_err(local_err);
3721693c64cSDr. David Alan Gilbert         goto out;
3731693c64cSDr. David Alan Gilbert     }
3741693c64cSDr. David Alan Gilbert 
375eb59db53SDr. David Alan Gilbert     /* Version and features check */
37654ae0886SAlexey Perevalov     if (!ufd_check_and_apply(ufd, mis)) {
377eb59db53SDr. David Alan Gilbert         goto out;
378eb59db53SDr. David Alan Gilbert     }
379eb59db53SDr. David Alan Gilbert 
3808679638bSDr. David Alan Gilbert     /* We don't support postcopy with shared RAM yet */
381fbd162e6SYury Kotov     if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
3828679638bSDr. David Alan Gilbert         goto out;
3838679638bSDr. David Alan Gilbert     }
3848679638bSDr. David Alan Gilbert 
385eb59db53SDr. David Alan Gilbert     /*
38658b7c17eSDr. David Alan Gilbert      * userfault and mlock don't go together; we'll put it back later if
38758b7c17eSDr. David Alan Gilbert      * it was enabled.
38858b7c17eSDr. David Alan Gilbert      */
38958b7c17eSDr. David Alan Gilbert     if (munlockall()) {
39058b7c17eSDr. David Alan Gilbert         error_report("%s: munlockall: %s", __func__,  strerror(errno));
39158b7c17eSDr. David Alan Gilbert         return -1;
39258b7c17eSDr. David Alan Gilbert     }
39358b7c17eSDr. David Alan Gilbert 
39458b7c17eSDr. David Alan Gilbert     /*
395eb59db53SDr. David Alan Gilbert      *  We need to check that the ops we need are supported on anon memory
396eb59db53SDr. David Alan Gilbert      *  To do that we need to register a chunk and see the flags that
397eb59db53SDr. David Alan Gilbert      *  are returned.
398eb59db53SDr. David Alan Gilbert      */
399eb59db53SDr. David Alan Gilbert     testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
400eb59db53SDr. David Alan Gilbert                                     MAP_ANONYMOUS, -1, 0);
401eb59db53SDr. David Alan Gilbert     if (testarea == MAP_FAILED) {
402eb59db53SDr. David Alan Gilbert         error_report("%s: Failed to map test area: %s", __func__,
403eb59db53SDr. David Alan Gilbert                      strerror(errno));
404eb59db53SDr. David Alan Gilbert         goto out;
405eb59db53SDr. David Alan Gilbert     }
406eb59db53SDr. David Alan Gilbert     g_assert(((size_t)testarea & (pagesize-1)) == 0);
407eb59db53SDr. David Alan Gilbert 
408eb59db53SDr. David Alan Gilbert     reg_struct.range.start = (uintptr_t)testarea;
409eb59db53SDr. David Alan Gilbert     reg_struct.range.len = pagesize;
410eb59db53SDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
411eb59db53SDr. David Alan Gilbert 
412eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
413eb59db53SDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
414eb59db53SDr. David Alan Gilbert         goto out;
415eb59db53SDr. David Alan Gilbert     }
416eb59db53SDr. David Alan Gilbert 
417eb59db53SDr. David Alan Gilbert     range_struct.start = (uintptr_t)testarea;
418eb59db53SDr. David Alan Gilbert     range_struct.len = pagesize;
419eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
420eb59db53SDr. David Alan Gilbert         error_report("%s userfault unregister: %s", __func__, strerror(errno));
421eb59db53SDr. David Alan Gilbert         goto out;
422eb59db53SDr. David Alan Gilbert     }
423eb59db53SDr. David Alan Gilbert 
424eb59db53SDr. David Alan Gilbert     feature_mask = (__u64)1 << _UFFDIO_WAKE |
425eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_COPY |
426eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_ZEROPAGE;
427eb59db53SDr. David Alan Gilbert     if ((reg_struct.ioctls & feature_mask) != feature_mask) {
428eb59db53SDr. David Alan Gilbert         error_report("Missing userfault map features: %" PRIx64,
429eb59db53SDr. David Alan Gilbert                      (uint64_t)(~reg_struct.ioctls & feature_mask));
430eb59db53SDr. David Alan Gilbert         goto out;
431eb59db53SDr. David Alan Gilbert     }
432eb59db53SDr. David Alan Gilbert 
433eb59db53SDr. David Alan Gilbert     /* Success! */
434eb59db53SDr. David Alan Gilbert     ret = true;
435eb59db53SDr. David Alan Gilbert out:
436eb59db53SDr. David Alan Gilbert     if (testarea) {
437eb59db53SDr. David Alan Gilbert         munmap(testarea, pagesize);
438eb59db53SDr. David Alan Gilbert     }
439eb59db53SDr. David Alan Gilbert     if (ufd != -1) {
440eb59db53SDr. David Alan Gilbert         close(ufd);
441eb59db53SDr. David Alan Gilbert     }
442eb59db53SDr. David Alan Gilbert     return ret;
443eb59db53SDr. David Alan Gilbert }
444eb59db53SDr. David Alan Gilbert 
4451caddf8aSDr. David Alan Gilbert /*
4461caddf8aSDr. David Alan Gilbert  * Setup an area of RAM so that it *can* be used for postcopy later; this
4471caddf8aSDr. David Alan Gilbert  * must be done right at the start prior to pre-copy.
4481caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4491caddf8aSDr. David Alan Gilbert  */
450754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque)
4511caddf8aSDr. David Alan Gilbert {
452754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
453754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
454754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
455754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4561caddf8aSDr. David Alan Gilbert     trace_postcopy_init_range(block_name, host_addr, offset, length);
4571caddf8aSDr. David Alan Gilbert 
4581caddf8aSDr. David Alan Gilbert     /*
4591caddf8aSDr. David Alan Gilbert      * We need the whole of RAM to be truly empty for postcopy, so things
4601caddf8aSDr. David Alan Gilbert      * like ROMs and any data tables built during init must be zero'd
4611caddf8aSDr. David Alan Gilbert      * - we're going to get the copy from the source anyway.
4621caddf8aSDr. David Alan Gilbert      * (Precopy will just overwrite this data, so doesn't need the discard)
4631caddf8aSDr. David Alan Gilbert      */
464aaa2064cSJuan Quintela     if (ram_discard_range(block_name, 0, length)) {
4651caddf8aSDr. David Alan Gilbert         return -1;
4661caddf8aSDr. David Alan Gilbert     }
4671caddf8aSDr. David Alan Gilbert 
4681caddf8aSDr. David Alan Gilbert     return 0;
4691caddf8aSDr. David Alan Gilbert }
4701caddf8aSDr. David Alan Gilbert 
4711caddf8aSDr. David Alan Gilbert /*
4721caddf8aSDr. David Alan Gilbert  * At the end of migration, undo the effects of init_range
4731caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4741caddf8aSDr. David Alan Gilbert  */
475754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque)
4761caddf8aSDr. David Alan Gilbert {
477754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
478754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
479754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
480754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4811caddf8aSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
4821caddf8aSDr. David Alan Gilbert     struct uffdio_range range_struct;
4831caddf8aSDr. David Alan Gilbert     trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
4841caddf8aSDr. David Alan Gilbert 
4851caddf8aSDr. David Alan Gilbert     /*
4861caddf8aSDr. David Alan Gilbert      * We turned off hugepage for the precopy stage with postcopy enabled
4871caddf8aSDr. David Alan Gilbert      * we can turn it back on now.
4881caddf8aSDr. David Alan Gilbert      */
4891d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
4901caddf8aSDr. David Alan Gilbert 
4911caddf8aSDr. David Alan Gilbert     /*
4921caddf8aSDr. David Alan Gilbert      * We can also turn off userfault now since we should have all the
4931caddf8aSDr. David Alan Gilbert      * pages.   It can be useful to leave it on to debug postcopy
4941caddf8aSDr. David Alan Gilbert      * if you're not sure it's always getting every page.
4951caddf8aSDr. David Alan Gilbert      */
4961caddf8aSDr. David Alan Gilbert     range_struct.start = (uintptr_t)host_addr;
4971caddf8aSDr. David Alan Gilbert     range_struct.len = length;
4981caddf8aSDr. David Alan Gilbert 
4991caddf8aSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
5001caddf8aSDr. David Alan Gilbert         error_report("%s: userfault unregister %s", __func__, strerror(errno));
5011caddf8aSDr. David Alan Gilbert 
5021caddf8aSDr. David Alan Gilbert         return -1;
5031caddf8aSDr. David Alan Gilbert     }
5041caddf8aSDr. David Alan Gilbert 
5051caddf8aSDr. David Alan Gilbert     return 0;
5061caddf8aSDr. David Alan Gilbert }
5071caddf8aSDr. David Alan Gilbert 
5081caddf8aSDr. David Alan Gilbert /*
5091caddf8aSDr. David Alan Gilbert  * Initialise postcopy-ram, setting the RAM to a state where we can go into
5101caddf8aSDr. David Alan Gilbert  * postcopy later; must be called prior to any precopy.
5111caddf8aSDr. David Alan Gilbert  * called from arch_init's similarly named ram_postcopy_incoming_init
5121caddf8aSDr. David Alan Gilbert  */
513c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
5141caddf8aSDr. David Alan Gilbert {
515fbd162e6SYury Kotov     if (foreach_not_ignored_block(init_range, NULL)) {
5161caddf8aSDr. David Alan Gilbert         return -1;
5171caddf8aSDr. David Alan Gilbert     }
5181caddf8aSDr. David Alan Gilbert 
5191caddf8aSDr. David Alan Gilbert     return 0;
5201caddf8aSDr. David Alan Gilbert }
5211caddf8aSDr. David Alan Gilbert 
5221caddf8aSDr. David Alan Gilbert /*
523154304cdSAlex Williamson  * Manage a single vote to the QEMU balloon inhibitor for all postcopy usage,
524154304cdSAlex Williamson  * last caller wins.
525154304cdSAlex Williamson  */
526154304cdSAlex Williamson static void postcopy_balloon_inhibit(bool state)
527154304cdSAlex Williamson {
528154304cdSAlex Williamson     static bool cur_state = false;
529154304cdSAlex Williamson 
530154304cdSAlex Williamson     if (state != cur_state) {
531154304cdSAlex Williamson         qemu_balloon_inhibit(state);
532154304cdSAlex Williamson         cur_state = state;
533154304cdSAlex Williamson     }
534154304cdSAlex Williamson }
535154304cdSAlex Williamson 
536154304cdSAlex Williamson /*
5371caddf8aSDr. David Alan Gilbert  * At the end of a migration where postcopy_ram_incoming_init was called.
5381caddf8aSDr. David Alan Gilbert  */
5391caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
5401caddf8aSDr. David Alan Gilbert {
541c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_entry();
542c4faeed2SDr. David Alan Gilbert 
543c4faeed2SDr. David Alan Gilbert     if (mis->have_fault_thread) {
54446343570SDr. David Alan Gilbert         Error *local_err = NULL;
54546343570SDr. David Alan Gilbert 
54655d0fe82SIlya Maximets         /* Let the fault thread quit */
54755d0fe82SIlya Maximets         atomic_set(&mis->fault_thread_quit, 1);
54855d0fe82SIlya Maximets         postcopy_fault_thread_notify(mis);
54955d0fe82SIlya Maximets         trace_postcopy_ram_incoming_cleanup_join();
55055d0fe82SIlya Maximets         qemu_thread_join(&mis->fault_thread);
55155d0fe82SIlya Maximets 
55246343570SDr. David Alan Gilbert         if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
55346343570SDr. David Alan Gilbert             error_report_err(local_err);
55446343570SDr. David Alan Gilbert             return -1;
55546343570SDr. David Alan Gilbert         }
55646343570SDr. David Alan Gilbert 
557fbd162e6SYury Kotov         if (foreach_not_ignored_block(cleanup_range, mis)) {
5581caddf8aSDr. David Alan Gilbert             return -1;
5591caddf8aSDr. David Alan Gilbert         }
5609ab7ef9bSPeter Xu 
561c4faeed2SDr. David Alan Gilbert         trace_postcopy_ram_incoming_cleanup_closeuf();
562c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
56364f615feSPeter Xu         close(mis->userfault_event_fd);
564c4faeed2SDr. David Alan Gilbert         mis->have_fault_thread = false;
565c4faeed2SDr. David Alan Gilbert     }
566c4faeed2SDr. David Alan Gilbert 
567154304cdSAlex Williamson     postcopy_balloon_inhibit(false);
568371ff5a3SDr. David Alan Gilbert 
56958b7c17eSDr. David Alan Gilbert     if (enable_mlock) {
57058b7c17eSDr. David Alan Gilbert         if (os_mlock() < 0) {
57158b7c17eSDr. David Alan Gilbert             error_report("mlock: %s", strerror(errno));
57258b7c17eSDr. David Alan Gilbert             /*
57358b7c17eSDr. David Alan Gilbert              * It doesn't feel right to fail at this point, we have a valid
57458b7c17eSDr. David Alan Gilbert              * VM state.
57558b7c17eSDr. David Alan Gilbert              */
57658b7c17eSDr. David Alan Gilbert         }
57758b7c17eSDr. David Alan Gilbert     }
57858b7c17eSDr. David Alan Gilbert 
579c4faeed2SDr. David Alan Gilbert     postcopy_state_set(POSTCOPY_INCOMING_END);
5801caddf8aSDr. David Alan Gilbert 
581696ed9a9SDr. David Alan Gilbert     if (mis->postcopy_tmp_page) {
582df9ff5e1SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_page, mis->largest_page_size);
583696ed9a9SDr. David Alan Gilbert         mis->postcopy_tmp_page = NULL;
584696ed9a9SDr. David Alan Gilbert     }
58541d84210SDr. David Alan Gilbert     if (mis->postcopy_tmp_zero_page) {
58641d84210SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
58741d84210SDr. David Alan Gilbert         mis->postcopy_tmp_zero_page = NULL;
58841d84210SDr. David Alan Gilbert     }
58965ace060SAlexey Perevalov     trace_postcopy_ram_incoming_cleanup_blocktime(
59065ace060SAlexey Perevalov             get_postcopy_total_blocktime());
59165ace060SAlexey Perevalov 
592c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_exit();
5931caddf8aSDr. David Alan Gilbert     return 0;
5941caddf8aSDr. David Alan Gilbert }
5951caddf8aSDr. David Alan Gilbert 
596f0a227adSDr. David Alan Gilbert /*
597f9527107SDr. David Alan Gilbert  * Disable huge pages on an area
598f9527107SDr. David Alan Gilbert  */
599754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque)
600f9527107SDr. David Alan Gilbert {
601754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
602754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
603754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
604754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
605f9527107SDr. David Alan Gilbert     trace_postcopy_nhp_range(block_name, host_addr, offset, length);
606f9527107SDr. David Alan Gilbert 
607f9527107SDr. David Alan Gilbert     /*
608f9527107SDr. David Alan Gilbert      * Before we do discards we need to ensure those discards really
609f9527107SDr. David Alan Gilbert      * do delete areas of the page, even if THP thinks a hugepage would
610f9527107SDr. David Alan Gilbert      * be a good idea, so force hugepages off.
611f9527107SDr. David Alan Gilbert      */
6121d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
613f9527107SDr. David Alan Gilbert 
614f9527107SDr. David Alan Gilbert     return 0;
615f9527107SDr. David Alan Gilbert }
616f9527107SDr. David Alan Gilbert 
617f9527107SDr. David Alan Gilbert /*
618f9527107SDr. David Alan Gilbert  * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
619f9527107SDr. David Alan Gilbert  * however leaving it until after precopy means that most of the precopy
620f9527107SDr. David Alan Gilbert  * data is still THPd
621f9527107SDr. David Alan Gilbert  */
622f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
623f9527107SDr. David Alan Gilbert {
624fbd162e6SYury Kotov     if (foreach_not_ignored_block(nhp_range, mis)) {
625f9527107SDr. David Alan Gilbert         return -1;
626f9527107SDr. David Alan Gilbert     }
627f9527107SDr. David Alan Gilbert 
628f9527107SDr. David Alan Gilbert     postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
629f9527107SDr. David Alan Gilbert 
630f9527107SDr. David Alan Gilbert     return 0;
631f9527107SDr. David Alan Gilbert }
632f9527107SDr. David Alan Gilbert 
633f9527107SDr. David Alan Gilbert /*
634f0a227adSDr. David Alan Gilbert  * Mark the given area of RAM as requiring notification to unwritten areas
635fbd162e6SYury Kotov  * Used as a  callback on foreach_not_ignored_block.
636f0a227adSDr. David Alan Gilbert  *   host_addr: Base of area to mark
637f0a227adSDr. David Alan Gilbert  *   offset: Offset in the whole ram arena
638f0a227adSDr. David Alan Gilbert  *   length: Length of the section
639f0a227adSDr. David Alan Gilbert  *   opaque: MigrationIncomingState pointer
640f0a227adSDr. David Alan Gilbert  * Returns 0 on success
641f0a227adSDr. David Alan Gilbert  */
642754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
643f0a227adSDr. David Alan Gilbert {
644f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
645f0a227adSDr. David Alan Gilbert     struct uffdio_register reg_struct;
646f0a227adSDr. David Alan Gilbert 
647754cb9c0SYury Kotov     reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
648754cb9c0SYury Kotov     reg_struct.range.len = qemu_ram_get_used_length(rb);
649f0a227adSDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
650f0a227adSDr. David Alan Gilbert 
651f0a227adSDr. David Alan Gilbert     /* Now tell our userfault_fd that it's responsible for this area */
652f0a227adSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
653f0a227adSDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
654f0a227adSDr. David Alan Gilbert         return -1;
655f0a227adSDr. David Alan Gilbert     }
656665414adSDr. David Alan Gilbert     if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
657665414adSDr. David Alan Gilbert         error_report("%s userfault: Region doesn't support COPY", __func__);
658665414adSDr. David Alan Gilbert         return -1;
659665414adSDr. David Alan Gilbert     }
6602ce16640SDr. David Alan Gilbert     if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
6612ce16640SDr. David Alan Gilbert         qemu_ram_set_uf_zeroable(rb);
6622ce16640SDr. David Alan Gilbert     }
663f0a227adSDr. David Alan Gilbert 
664f0a227adSDr. David Alan Gilbert     return 0;
665f0a227adSDr. David Alan Gilbert }
666f0a227adSDr. David Alan Gilbert 
6675efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
6685efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
6695efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
6705efc3564SDr. David Alan Gilbert {
6715efc3564SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
6725efc3564SDr. David Alan Gilbert     struct uffdio_range range;
6735efc3564SDr. David Alan Gilbert     int ret;
6745efc3564SDr. David Alan Gilbert     trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
6755efc3564SDr. David Alan Gilbert     range.start = client_addr & ~(pagesize - 1);
6765efc3564SDr. David Alan Gilbert     range.len = pagesize;
6775efc3564SDr. David Alan Gilbert     ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
6785efc3564SDr. David Alan Gilbert     if (ret) {
6795efc3564SDr. David Alan Gilbert         error_report("%s: Failed to wake: %zx in %s (%s)",
6805efc3564SDr. David Alan Gilbert                      __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
6815efc3564SDr. David Alan Gilbert                      strerror(errno));
6825efc3564SDr. David Alan Gilbert     }
6835efc3564SDr. David Alan Gilbert     return ret;
6845efc3564SDr. David Alan Gilbert }
6855efc3564SDr. David Alan Gilbert 
686f0a227adSDr. David Alan Gilbert /*
687096bf4c8SDr. David Alan Gilbert  * Callback from shared fault handlers to ask for a page,
688096bf4c8SDr. David Alan Gilbert  * the page must be specified by a RAMBlock and an offset in that rb
689096bf4c8SDr. David Alan Gilbert  * Note: Only for use by shared fault handlers (in fault thread)
690096bf4c8SDr. David Alan Gilbert  */
691096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
692096bf4c8SDr. David Alan Gilbert                                  uint64_t client_addr, uint64_t rb_offset)
693096bf4c8SDr. David Alan Gilbert {
694096bf4c8SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
695096bf4c8SDr. David Alan Gilbert     uint64_t aligned_rbo = rb_offset & ~(pagesize - 1);
696096bf4c8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
697096bf4c8SDr. David Alan Gilbert 
698096bf4c8SDr. David Alan Gilbert     trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
699096bf4c8SDr. David Alan Gilbert                                        rb_offset);
700dedfb4b2SDr. David Alan Gilbert     if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
701dedfb4b2SDr. David Alan Gilbert         trace_postcopy_request_shared_page_present(pcfd->idstr,
702dedfb4b2SDr. David Alan Gilbert                                         qemu_ram_get_idstr(rb), rb_offset);
703dedfb4b2SDr. David Alan Gilbert         return postcopy_wake_shared(pcfd, client_addr, rb);
704dedfb4b2SDr. David Alan Gilbert     }
705096bf4c8SDr. David Alan Gilbert     if (rb != mis->last_rb) {
706096bf4c8SDr. David Alan Gilbert         mis->last_rb = rb;
707096bf4c8SDr. David Alan Gilbert         migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
708096bf4c8SDr. David Alan Gilbert                                   aligned_rbo, pagesize);
709096bf4c8SDr. David Alan Gilbert     } else {
710096bf4c8SDr. David Alan Gilbert         /* Save some space */
711096bf4c8SDr. David Alan Gilbert         migrate_send_rp_req_pages(mis, NULL, aligned_rbo, pagesize);
712096bf4c8SDr. David Alan Gilbert     }
713096bf4c8SDr. David Alan Gilbert     return 0;
714096bf4c8SDr. David Alan Gilbert }
715096bf4c8SDr. David Alan Gilbert 
716575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid)
717575b0b33SAlexey Perevalov {
718575b0b33SAlexey Perevalov     CPUState *cpu_iter;
719575b0b33SAlexey Perevalov 
720575b0b33SAlexey Perevalov     CPU_FOREACH(cpu_iter) {
721575b0b33SAlexey Perevalov         if (cpu_iter->thread_id == pid) {
722575b0b33SAlexey Perevalov             trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
723575b0b33SAlexey Perevalov             return cpu_iter->cpu_index;
724575b0b33SAlexey Perevalov         }
725575b0b33SAlexey Perevalov     }
726575b0b33SAlexey Perevalov     trace_get_mem_fault_cpu_index(-1, pid);
727575b0b33SAlexey Perevalov     return -1;
728575b0b33SAlexey Perevalov }
729575b0b33SAlexey Perevalov 
730575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
731575b0b33SAlexey Perevalov {
732575b0b33SAlexey Perevalov     int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
733575b0b33SAlexey Perevalov                                     dc->start_time;
734575b0b33SAlexey Perevalov     return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
735575b0b33SAlexey Perevalov }
736575b0b33SAlexey Perevalov 
737575b0b33SAlexey Perevalov /*
738575b0b33SAlexey Perevalov  * This function is being called when pagefault occurs. It
739575b0b33SAlexey Perevalov  * tracks down vCPU blocking time.
740575b0b33SAlexey Perevalov  *
741575b0b33SAlexey Perevalov  * @addr: faulted host virtual address
742575b0b33SAlexey Perevalov  * @ptid: faulted process thread id
743575b0b33SAlexey Perevalov  * @rb: ramblock appropriate to addr
744575b0b33SAlexey Perevalov  */
745575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
746575b0b33SAlexey Perevalov                                           RAMBlock *rb)
747575b0b33SAlexey Perevalov {
748575b0b33SAlexey Perevalov     int cpu, already_received;
749575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
750575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
751575b0b33SAlexey Perevalov     uint32_t low_time_offset;
752575b0b33SAlexey Perevalov 
753575b0b33SAlexey Perevalov     if (!dc || ptid == 0) {
754575b0b33SAlexey Perevalov         return;
755575b0b33SAlexey Perevalov     }
756575b0b33SAlexey Perevalov     cpu = get_mem_fault_cpu_index(ptid);
757575b0b33SAlexey Perevalov     if (cpu < 0) {
758575b0b33SAlexey Perevalov         return;
759575b0b33SAlexey Perevalov     }
760575b0b33SAlexey Perevalov 
761575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
762575b0b33SAlexey Perevalov     if (dc->vcpu_addr[cpu] == 0) {
763575b0b33SAlexey Perevalov         atomic_inc(&dc->smp_cpus_down);
764575b0b33SAlexey Perevalov     }
765575b0b33SAlexey Perevalov 
766575b0b33SAlexey Perevalov     atomic_xchg(&dc->last_begin, low_time_offset);
767575b0b33SAlexey Perevalov     atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
768575b0b33SAlexey Perevalov     atomic_xchg(&dc->vcpu_addr[cpu], addr);
769575b0b33SAlexey Perevalov 
770575b0b33SAlexey Perevalov     /* check it here, not at the begining of the function,
771575b0b33SAlexey Perevalov      * due to, check could accur early than bitmap_set in
772575b0b33SAlexey Perevalov      * qemu_ufd_copy_ioctl */
773575b0b33SAlexey Perevalov     already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
774575b0b33SAlexey Perevalov     if (already_received) {
775575b0b33SAlexey Perevalov         atomic_xchg(&dc->vcpu_addr[cpu], 0);
776575b0b33SAlexey Perevalov         atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
777575b0b33SAlexey Perevalov         atomic_dec(&dc->smp_cpus_down);
778575b0b33SAlexey Perevalov     }
779575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
780575b0b33SAlexey Perevalov                                         cpu, already_received);
781575b0b33SAlexey Perevalov }
782575b0b33SAlexey Perevalov 
783575b0b33SAlexey Perevalov /*
784575b0b33SAlexey Perevalov  *  This function just provide calculated blocktime per cpu and trace it.
785575b0b33SAlexey Perevalov  *  Total blocktime is calculated in mark_postcopy_blocktime_end.
786575b0b33SAlexey Perevalov  *
787575b0b33SAlexey Perevalov  *
788575b0b33SAlexey Perevalov  * Assume we have 3 CPU
789575b0b33SAlexey Perevalov  *
790575b0b33SAlexey Perevalov  *      S1        E1           S1               E1
791575b0b33SAlexey Perevalov  * -----***********------------xxx***************------------------------> CPU1
792575b0b33SAlexey Perevalov  *
793575b0b33SAlexey Perevalov  *             S2                E2
794575b0b33SAlexey Perevalov  * ------------****************xxx---------------------------------------> CPU2
795575b0b33SAlexey Perevalov  *
796575b0b33SAlexey Perevalov  *                         S3            E3
797575b0b33SAlexey Perevalov  * ------------------------****xxx********-------------------------------> CPU3
798575b0b33SAlexey Perevalov  *
799575b0b33SAlexey Perevalov  * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
800575b0b33SAlexey Perevalov  * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
801575b0b33SAlexey Perevalov  * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
802575b0b33SAlexey Perevalov  *            it's a part of total blocktime.
803575b0b33SAlexey Perevalov  * S1 - here is last_begin
804575b0b33SAlexey Perevalov  * Legend of the picture is following:
805575b0b33SAlexey Perevalov  *              * - means blocktime per vCPU
806575b0b33SAlexey Perevalov  *              x - means overlapped blocktime (total blocktime)
807575b0b33SAlexey Perevalov  *
808575b0b33SAlexey Perevalov  * @addr: host virtual address
809575b0b33SAlexey Perevalov  */
810575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr)
811575b0b33SAlexey Perevalov {
812575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
813575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
8145cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
8155cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
816575b0b33SAlexey Perevalov     int i, affected_cpu = 0;
817575b0b33SAlexey Perevalov     bool vcpu_total_blocktime = false;
818575b0b33SAlexey Perevalov     uint32_t read_vcpu_time, low_time_offset;
819575b0b33SAlexey Perevalov 
820575b0b33SAlexey Perevalov     if (!dc) {
821575b0b33SAlexey Perevalov         return;
822575b0b33SAlexey Perevalov     }
823575b0b33SAlexey Perevalov 
824575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
825575b0b33SAlexey Perevalov     /* lookup cpu, to clear it,
826575b0b33SAlexey Perevalov      * that algorithm looks straighforward, but it's not
827575b0b33SAlexey Perevalov      * optimal, more optimal algorithm is keeping tree or hash
828575b0b33SAlexey Perevalov      * where key is address value is a list of  */
829575b0b33SAlexey Perevalov     for (i = 0; i < smp_cpus; i++) {
830575b0b33SAlexey Perevalov         uint32_t vcpu_blocktime = 0;
831575b0b33SAlexey Perevalov 
832575b0b33SAlexey Perevalov         read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
833575b0b33SAlexey Perevalov         if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
834575b0b33SAlexey Perevalov             read_vcpu_time == 0) {
835575b0b33SAlexey Perevalov             continue;
836575b0b33SAlexey Perevalov         }
837575b0b33SAlexey Perevalov         atomic_xchg(&dc->vcpu_addr[i], 0);
838575b0b33SAlexey Perevalov         vcpu_blocktime = low_time_offset - read_vcpu_time;
839575b0b33SAlexey Perevalov         affected_cpu += 1;
840575b0b33SAlexey Perevalov         /* we need to know is that mark_postcopy_end was due to
841575b0b33SAlexey Perevalov          * faulted page, another possible case it's prefetched
842575b0b33SAlexey Perevalov          * page and in that case we shouldn't be here */
843575b0b33SAlexey Perevalov         if (!vcpu_total_blocktime &&
844575b0b33SAlexey Perevalov             atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
845575b0b33SAlexey Perevalov             vcpu_total_blocktime = true;
846575b0b33SAlexey Perevalov         }
847575b0b33SAlexey Perevalov         /* continue cycle, due to one page could affect several vCPUs */
848575b0b33SAlexey Perevalov         dc->vcpu_blocktime[i] += vcpu_blocktime;
849575b0b33SAlexey Perevalov     }
850575b0b33SAlexey Perevalov 
851575b0b33SAlexey Perevalov     atomic_sub(&dc->smp_cpus_down, affected_cpu);
852575b0b33SAlexey Perevalov     if (vcpu_total_blocktime) {
853575b0b33SAlexey Perevalov         dc->total_blocktime += low_time_offset - atomic_fetch_add(
854575b0b33SAlexey Perevalov                 &dc->last_begin, 0);
855575b0b33SAlexey Perevalov     }
856575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
857575b0b33SAlexey Perevalov                                       affected_cpu);
858575b0b33SAlexey Perevalov }
859575b0b33SAlexey Perevalov 
8603a7804c3SPeter Xu static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
8613a7804c3SPeter Xu {
8623a7804c3SPeter Xu     trace_postcopy_pause_fault_thread();
8633a7804c3SPeter Xu 
8643a7804c3SPeter Xu     qemu_sem_wait(&mis->postcopy_pause_sem_fault);
8653a7804c3SPeter Xu 
8663a7804c3SPeter Xu     trace_postcopy_pause_fault_thread_continued();
8673a7804c3SPeter Xu 
8683a7804c3SPeter Xu     return true;
8693a7804c3SPeter Xu }
8703a7804c3SPeter Xu 
871096bf4c8SDr. David Alan Gilbert /*
872f0a227adSDr. David Alan Gilbert  * Handle faults detected by the USERFAULT markings
873f0a227adSDr. David Alan Gilbert  */
874f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque)
875f0a227adSDr. David Alan Gilbert {
876f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
877c4faeed2SDr. David Alan Gilbert     struct uffd_msg msg;
878c4faeed2SDr. David Alan Gilbert     int ret;
87900fa4fc8SDr. David Alan Gilbert     size_t index;
880c4faeed2SDr. David Alan Gilbert     RAMBlock *rb = NULL;
881f0a227adSDr. David Alan Gilbert 
882c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_entry();
88374637e6fSLidong Chen     rcu_register_thread();
884096bf4c8SDr. David Alan Gilbert     mis->last_rb = NULL; /* last RAMBlock we sent part of */
885f0a227adSDr. David Alan Gilbert     qemu_sem_post(&mis->fault_thread_sem);
886c4faeed2SDr. David Alan Gilbert 
88700fa4fc8SDr. David Alan Gilbert     struct pollfd *pfd;
88800fa4fc8SDr. David Alan Gilbert     size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
88900fa4fc8SDr. David Alan Gilbert 
89000fa4fc8SDr. David Alan Gilbert     pfd = g_new0(struct pollfd, pfd_len);
89100fa4fc8SDr. David Alan Gilbert 
89200fa4fc8SDr. David Alan Gilbert     pfd[0].fd = mis->userfault_fd;
89300fa4fc8SDr. David Alan Gilbert     pfd[0].events = POLLIN;
89400fa4fc8SDr. David Alan Gilbert     pfd[1].fd = mis->userfault_event_fd;
89500fa4fc8SDr. David Alan Gilbert     pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
89600fa4fc8SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
89700fa4fc8SDr. David Alan Gilbert     for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
89800fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
89900fa4fc8SDr. David Alan Gilbert                                                  struct PostCopyFD, index);
90000fa4fc8SDr. David Alan Gilbert         pfd[2 + index].fd = pcfd->fd;
90100fa4fc8SDr. David Alan Gilbert         pfd[2 + index].events = POLLIN;
90200fa4fc8SDr. David Alan Gilbert         trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
90300fa4fc8SDr. David Alan Gilbert                                                   pcfd->fd);
90400fa4fc8SDr. David Alan Gilbert     }
90500fa4fc8SDr. David Alan Gilbert 
906c4faeed2SDr. David Alan Gilbert     while (true) {
907c4faeed2SDr. David Alan Gilbert         ram_addr_t rb_offset;
90800fa4fc8SDr. David Alan Gilbert         int poll_result;
909c4faeed2SDr. David Alan Gilbert 
910c4faeed2SDr. David Alan Gilbert         /*
911c4faeed2SDr. David Alan Gilbert          * We're mainly waiting for the kernel to give us a faulting HVA,
912c4faeed2SDr. David Alan Gilbert          * however we can be told to quit via userfault_quit_fd which is
913c4faeed2SDr. David Alan Gilbert          * an eventfd
914c4faeed2SDr. David Alan Gilbert          */
915c4faeed2SDr. David Alan Gilbert 
91600fa4fc8SDr. David Alan Gilbert         poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
91700fa4fc8SDr. David Alan Gilbert         if (poll_result == -1) {
918c4faeed2SDr. David Alan Gilbert             error_report("%s: userfault poll: %s", __func__, strerror(errno));
919c4faeed2SDr. David Alan Gilbert             break;
920f0a227adSDr. David Alan Gilbert         }
921f0a227adSDr. David Alan Gilbert 
9223a7804c3SPeter Xu         if (!mis->to_src_file) {
9233a7804c3SPeter Xu             /*
9243a7804c3SPeter Xu              * Possibly someone tells us that the return path is
9253a7804c3SPeter Xu              * broken already using the event. We should hold until
9263a7804c3SPeter Xu              * the channel is rebuilt.
9273a7804c3SPeter Xu              */
9283a7804c3SPeter Xu             if (postcopy_pause_fault_thread(mis)) {
9293a7804c3SPeter Xu                 mis->last_rb = NULL;
9303a7804c3SPeter Xu                 /* Continue to read the userfaultfd */
9313a7804c3SPeter Xu             } else {
9323a7804c3SPeter Xu                 error_report("%s: paused but don't allow to continue",
9333a7804c3SPeter Xu                              __func__);
9343a7804c3SPeter Xu                 break;
9353a7804c3SPeter Xu             }
9363a7804c3SPeter Xu         }
9373a7804c3SPeter Xu 
938c4faeed2SDr. David Alan Gilbert         if (pfd[1].revents) {
93964f615feSPeter Xu             uint64_t tmp64 = 0;
94064f615feSPeter Xu 
94164f615feSPeter Xu             /* Consume the signal */
94264f615feSPeter Xu             if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
94364f615feSPeter Xu                 /* Nothing obviously nicer than posting this error. */
94464f615feSPeter Xu                 error_report("%s: read() failed", __func__);
94564f615feSPeter Xu             }
94664f615feSPeter Xu 
94764f615feSPeter Xu             if (atomic_read(&mis->fault_thread_quit)) {
948c4faeed2SDr. David Alan Gilbert                 trace_postcopy_ram_fault_thread_quit();
949c4faeed2SDr. David Alan Gilbert                 break;
950c4faeed2SDr. David Alan Gilbert             }
95164f615feSPeter Xu         }
952c4faeed2SDr. David Alan Gilbert 
95300fa4fc8SDr. David Alan Gilbert         if (pfd[0].revents) {
95400fa4fc8SDr. David Alan Gilbert             poll_result--;
955c4faeed2SDr. David Alan Gilbert             ret = read(mis->userfault_fd, &msg, sizeof(msg));
956c4faeed2SDr. David Alan Gilbert             if (ret != sizeof(msg)) {
957c4faeed2SDr. David Alan Gilbert                 if (errno == EAGAIN) {
958c4faeed2SDr. David Alan Gilbert                     /*
959c4faeed2SDr. David Alan Gilbert                      * if a wake up happens on the other thread just after
960c4faeed2SDr. David Alan Gilbert                      * the poll, there is nothing to read.
961c4faeed2SDr. David Alan Gilbert                      */
962c4faeed2SDr. David Alan Gilbert                     continue;
963c4faeed2SDr. David Alan Gilbert                 }
964c4faeed2SDr. David Alan Gilbert                 if (ret < 0) {
96500fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to read full userfault "
96600fa4fc8SDr. David Alan Gilbert                                  "message: %s",
967c4faeed2SDr. David Alan Gilbert                                  __func__, strerror(errno));
968c4faeed2SDr. David Alan Gilbert                     break;
969c4faeed2SDr. David Alan Gilbert                 } else {
97000fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read %d bytes from userfaultfd "
97100fa4fc8SDr. David Alan Gilbert                                  "expected %zd",
972c4faeed2SDr. David Alan Gilbert                                  __func__, ret, sizeof(msg));
973c4faeed2SDr. David Alan Gilbert                     break; /* Lost alignment, don't know what we'd read next */
974c4faeed2SDr. David Alan Gilbert                 }
975c4faeed2SDr. David Alan Gilbert             }
976c4faeed2SDr. David Alan Gilbert             if (msg.event != UFFD_EVENT_PAGEFAULT) {
977c4faeed2SDr. David Alan Gilbert                 error_report("%s: Read unexpected event %ud from userfaultfd",
978c4faeed2SDr. David Alan Gilbert                              __func__, msg.event);
979c4faeed2SDr. David Alan Gilbert                 continue; /* It's not a page fault, shouldn't happen */
980c4faeed2SDr. David Alan Gilbert             }
981c4faeed2SDr. David Alan Gilbert 
982c4faeed2SDr. David Alan Gilbert             rb = qemu_ram_block_from_host(
983c4faeed2SDr. David Alan Gilbert                      (void *)(uintptr_t)msg.arg.pagefault.address,
984f615f396SPaolo Bonzini                      true, &rb_offset);
985c4faeed2SDr. David Alan Gilbert             if (!rb) {
986c4faeed2SDr. David Alan Gilbert                 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
987c4faeed2SDr. David Alan Gilbert                              PRIx64, (uint64_t)msg.arg.pagefault.address);
988c4faeed2SDr. David Alan Gilbert                 break;
989c4faeed2SDr. David Alan Gilbert             }
990c4faeed2SDr. David Alan Gilbert 
991332847f0SDr. David Alan Gilbert             rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
992c4faeed2SDr. David Alan Gilbert             trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
993c4faeed2SDr. David Alan Gilbert                                                 qemu_ram_get_idstr(rb),
994575b0b33SAlexey Perevalov                                                 rb_offset,
995575b0b33SAlexey Perevalov                                                 msg.arg.pagefault.feat.ptid);
996575b0b33SAlexey Perevalov             mark_postcopy_blocktime_begin(
997575b0b33SAlexey Perevalov                     (uintptr_t)(msg.arg.pagefault.address),
998575b0b33SAlexey Perevalov                                 msg.arg.pagefault.feat.ptid, rb);
999575b0b33SAlexey Perevalov 
10003a7804c3SPeter Xu retry:
1001c4faeed2SDr. David Alan Gilbert             /*
1002c4faeed2SDr. David Alan Gilbert              * Send the request to the source - we want to request one
1003c4faeed2SDr. David Alan Gilbert              * of our host page sizes (which is >= TPS)
1004c4faeed2SDr. David Alan Gilbert              */
1005096bf4c8SDr. David Alan Gilbert             if (rb != mis->last_rb) {
1006096bf4c8SDr. David Alan Gilbert                 mis->last_rb = rb;
10073a7804c3SPeter Xu                 ret = migrate_send_rp_req_pages(mis,
10083a7804c3SPeter Xu                                                 qemu_ram_get_idstr(rb),
10093a7804c3SPeter Xu                                                 rb_offset,
10103a7804c3SPeter Xu                                                 qemu_ram_pagesize(rb));
1011c4faeed2SDr. David Alan Gilbert             } else {
1012c4faeed2SDr. David Alan Gilbert                 /* Save some space */
10133a7804c3SPeter Xu                 ret = migrate_send_rp_req_pages(mis,
10143a7804c3SPeter Xu                                                 NULL,
10153a7804c3SPeter Xu                                                 rb_offset,
10163a7804c3SPeter Xu                                                 qemu_ram_pagesize(rb));
10173a7804c3SPeter Xu             }
10183a7804c3SPeter Xu 
10193a7804c3SPeter Xu             if (ret) {
10203a7804c3SPeter Xu                 /* May be network failure, try to wait for recovery */
10213a7804c3SPeter Xu                 if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
10223a7804c3SPeter Xu                     /* We got reconnected somehow, try to continue */
10233a7804c3SPeter Xu                     mis->last_rb = NULL;
10243a7804c3SPeter Xu                     goto retry;
10253a7804c3SPeter Xu                 } else {
10263a7804c3SPeter Xu                     /* This is a unavoidable fault */
10273a7804c3SPeter Xu                     error_report("%s: migrate_send_rp_req_pages() get %d",
10283a7804c3SPeter Xu                                  __func__, ret);
10293a7804c3SPeter Xu                     break;
10303a7804c3SPeter Xu                 }
1031c4faeed2SDr. David Alan Gilbert             }
1032c4faeed2SDr. David Alan Gilbert         }
103300fa4fc8SDr. David Alan Gilbert 
103400fa4fc8SDr. David Alan Gilbert         /* Now handle any requests from external processes on shared memory */
103500fa4fc8SDr. David Alan Gilbert         /* TODO: May need to handle devices deregistering during postcopy */
103600fa4fc8SDr. David Alan Gilbert         for (index = 2; index < pfd_len && poll_result; index++) {
103700fa4fc8SDr. David Alan Gilbert             if (pfd[index].revents) {
103800fa4fc8SDr. David Alan Gilbert                 struct PostCopyFD *pcfd =
103900fa4fc8SDr. David Alan Gilbert                     &g_array_index(mis->postcopy_remote_fds,
104000fa4fc8SDr. David Alan Gilbert                                    struct PostCopyFD, index - 2);
104100fa4fc8SDr. David Alan Gilbert 
104200fa4fc8SDr. David Alan Gilbert                 poll_result--;
104300fa4fc8SDr. David Alan Gilbert                 if (pfd[index].revents & POLLERR) {
104400fa4fc8SDr. David Alan Gilbert                     error_report("%s: POLLERR on poll %zd fd=%d",
104500fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->fd);
104600fa4fc8SDr. David Alan Gilbert                     pfd[index].events = 0;
104700fa4fc8SDr. David Alan Gilbert                     continue;
104800fa4fc8SDr. David Alan Gilbert                 }
104900fa4fc8SDr. David Alan Gilbert 
105000fa4fc8SDr. David Alan Gilbert                 ret = read(pcfd->fd, &msg, sizeof(msg));
105100fa4fc8SDr. David Alan Gilbert                 if (ret != sizeof(msg)) {
105200fa4fc8SDr. David Alan Gilbert                     if (errno == EAGAIN) {
105300fa4fc8SDr. David Alan Gilbert                         /*
105400fa4fc8SDr. David Alan Gilbert                          * if a wake up happens on the other thread just after
105500fa4fc8SDr. David Alan Gilbert                          * the poll, there is nothing to read.
105600fa4fc8SDr. David Alan Gilbert                          */
105700fa4fc8SDr. David Alan Gilbert                         continue;
105800fa4fc8SDr. David Alan Gilbert                     }
105900fa4fc8SDr. David Alan Gilbert                     if (ret < 0) {
106000fa4fc8SDr. David Alan Gilbert                         error_report("%s: Failed to read full userfault "
106100fa4fc8SDr. David Alan Gilbert                                      "message: %s (shared) revents=%d",
106200fa4fc8SDr. David Alan Gilbert                                      __func__, strerror(errno),
106300fa4fc8SDr. David Alan Gilbert                                      pfd[index].revents);
106400fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
106500fa4fc8SDr. David Alan Gilbert                         break;
106600fa4fc8SDr. David Alan Gilbert                     } else {
106700fa4fc8SDr. David Alan Gilbert                         error_report("%s: Read %d bytes from userfaultfd "
106800fa4fc8SDr. David Alan Gilbert                                      "expected %zd (shared)",
106900fa4fc8SDr. David Alan Gilbert                                      __func__, ret, sizeof(msg));
107000fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
107100fa4fc8SDr. David Alan Gilbert                         break; /*Lost alignment,don't know what we'd read next*/
107200fa4fc8SDr. David Alan Gilbert                     }
107300fa4fc8SDr. David Alan Gilbert                 }
107400fa4fc8SDr. David Alan Gilbert                 if (msg.event != UFFD_EVENT_PAGEFAULT) {
107500fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read unexpected event %ud "
107600fa4fc8SDr. David Alan Gilbert                                  "from userfaultfd (shared)",
107700fa4fc8SDr. David Alan Gilbert                                  __func__, msg.event);
107800fa4fc8SDr. David Alan Gilbert                     continue; /* It's not a page fault, shouldn't happen */
107900fa4fc8SDr. David Alan Gilbert                 }
108000fa4fc8SDr. David Alan Gilbert                 /* Call the device handler registered with us */
108100fa4fc8SDr. David Alan Gilbert                 ret = pcfd->handler(pcfd, &msg);
108200fa4fc8SDr. David Alan Gilbert                 if (ret) {
108300fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to resolve shared fault on %zd/%s",
108400fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->idstr);
108500fa4fc8SDr. David Alan Gilbert                     /* TODO: Fail? Disable this sharer? */
108600fa4fc8SDr. David Alan Gilbert                 }
108700fa4fc8SDr. David Alan Gilbert             }
108800fa4fc8SDr. David Alan Gilbert         }
108900fa4fc8SDr. David Alan Gilbert     }
109074637e6fSLidong Chen     rcu_unregister_thread();
1091c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_exit();
1092fc6008f3SMarc-André Lureau     g_free(pfd);
1093f0a227adSDr. David Alan Gilbert     return NULL;
1094f0a227adSDr. David Alan Gilbert }
1095f0a227adSDr. David Alan Gilbert 
1096f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis)
1097f0a227adSDr. David Alan Gilbert {
1098c4faeed2SDr. David Alan Gilbert     /* Open the fd for the kernel to give us userfaults */
1099c4faeed2SDr. David Alan Gilbert     mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1100c4faeed2SDr. David Alan Gilbert     if (mis->userfault_fd == -1) {
1101c4faeed2SDr. David Alan Gilbert         error_report("%s: Failed to open userfault fd: %s", __func__,
1102c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1103c4faeed2SDr. David Alan Gilbert         return -1;
1104c4faeed2SDr. David Alan Gilbert     }
1105c4faeed2SDr. David Alan Gilbert 
1106c4faeed2SDr. David Alan Gilbert     /*
1107c4faeed2SDr. David Alan Gilbert      * Although the host check already tested the API, we need to
1108c4faeed2SDr. David Alan Gilbert      * do the check again as an ABI handshake on the new fd.
1109c4faeed2SDr. David Alan Gilbert      */
111054ae0886SAlexey Perevalov     if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
1111c4faeed2SDr. David Alan Gilbert         return -1;
1112c4faeed2SDr. David Alan Gilbert     }
1113c4faeed2SDr. David Alan Gilbert 
1114c4faeed2SDr. David Alan Gilbert     /* Now an eventfd we use to tell the fault-thread to quit */
111564f615feSPeter Xu     mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
111664f615feSPeter Xu     if (mis->userfault_event_fd == -1) {
111764f615feSPeter Xu         error_report("%s: Opening userfault_event_fd: %s", __func__,
1118c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1119c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
1120c4faeed2SDr. David Alan Gilbert         return -1;
1121c4faeed2SDr. David Alan Gilbert     }
1122c4faeed2SDr. David Alan Gilbert 
1123f0a227adSDr. David Alan Gilbert     qemu_sem_init(&mis->fault_thread_sem, 0);
1124f0a227adSDr. David Alan Gilbert     qemu_thread_create(&mis->fault_thread, "postcopy/fault",
1125f0a227adSDr. David Alan Gilbert                        postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
1126f0a227adSDr. David Alan Gilbert     qemu_sem_wait(&mis->fault_thread_sem);
1127f0a227adSDr. David Alan Gilbert     qemu_sem_destroy(&mis->fault_thread_sem);
1128c4faeed2SDr. David Alan Gilbert     mis->have_fault_thread = true;
1129f0a227adSDr. David Alan Gilbert 
1130f0a227adSDr. David Alan Gilbert     /* Mark so that we get notified of accesses to unwritten areas */
1131fbd162e6SYury Kotov     if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
113291b02dc7SFei Li         error_report("ram_block_enable_notify failed");
1133f0a227adSDr. David Alan Gilbert         return -1;
1134f0a227adSDr. David Alan Gilbert     }
1135f0a227adSDr. David Alan Gilbert 
1136371ff5a3SDr. David Alan Gilbert     /*
1137371ff5a3SDr. David Alan Gilbert      * Ballooning can mark pages as absent while we're postcopying
1138371ff5a3SDr. David Alan Gilbert      * that would cause false userfaults.
1139371ff5a3SDr. David Alan Gilbert      */
1140154304cdSAlex Williamson     postcopy_balloon_inhibit(true);
1141371ff5a3SDr. David Alan Gilbert 
1142c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_enable_notify();
1143c4faeed2SDr. David Alan Gilbert 
1144f0a227adSDr. David Alan Gilbert     return 0;
1145f0a227adSDr. David Alan Gilbert }
1146f0a227adSDr. David Alan Gilbert 
1147727b9d7eSAlexey Perevalov static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
1148f9494614SAlexey Perevalov                                void *from_addr, uint64_t pagesize, RAMBlock *rb)
1149727b9d7eSAlexey Perevalov {
1150f9494614SAlexey Perevalov     int ret;
1151727b9d7eSAlexey Perevalov     if (from_addr) {
1152727b9d7eSAlexey Perevalov         struct uffdio_copy copy_struct;
1153727b9d7eSAlexey Perevalov         copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
1154727b9d7eSAlexey Perevalov         copy_struct.src = (uint64_t)(uintptr_t)from_addr;
1155727b9d7eSAlexey Perevalov         copy_struct.len = pagesize;
1156727b9d7eSAlexey Perevalov         copy_struct.mode = 0;
1157f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
1158727b9d7eSAlexey Perevalov     } else {
1159727b9d7eSAlexey Perevalov         struct uffdio_zeropage zero_struct;
1160727b9d7eSAlexey Perevalov         zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
1161727b9d7eSAlexey Perevalov         zero_struct.range.len = pagesize;
1162727b9d7eSAlexey Perevalov         zero_struct.mode = 0;
1163f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
1164727b9d7eSAlexey Perevalov     }
1165f9494614SAlexey Perevalov     if (!ret) {
1166f9494614SAlexey Perevalov         ramblock_recv_bitmap_set_range(rb, host_addr,
1167f9494614SAlexey Perevalov                                        pagesize / qemu_target_page_size());
1168575b0b33SAlexey Perevalov         mark_postcopy_blocktime_end((uintptr_t)host_addr);
1169575b0b33SAlexey Perevalov 
1170f9494614SAlexey Perevalov     }
1171f9494614SAlexey Perevalov     return ret;
1172727b9d7eSAlexey Perevalov }
1173727b9d7eSAlexey Perevalov 
1174d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
1175d488b349SDr. David Alan Gilbert {
1176d488b349SDr. David Alan Gilbert     int i;
1177d488b349SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
1178d488b349SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
1179d488b349SDr. David Alan Gilbert 
1180d488b349SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
1181d488b349SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1182d488b349SDr. David Alan Gilbert         int ret = cur->waker(cur, rb, offset);
1183d488b349SDr. David Alan Gilbert         if (ret) {
1184d488b349SDr. David Alan Gilbert             return ret;
1185d488b349SDr. David Alan Gilbert         }
1186d488b349SDr. David Alan Gilbert     }
1187d488b349SDr. David Alan Gilbert     return 0;
1188d488b349SDr. David Alan Gilbert }
1189d488b349SDr. David Alan Gilbert 
1190696ed9a9SDr. David Alan Gilbert /*
1191696ed9a9SDr. David Alan Gilbert  * Place a host page (from) at (host) atomically
1192696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1193696ed9a9SDr. David Alan Gilbert  */
1194df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
11958be4620bSAlexey Perevalov                         RAMBlock *rb)
1196696ed9a9SDr. David Alan Gilbert {
11978be4620bSAlexey Perevalov     size_t pagesize = qemu_ram_pagesize(rb);
1198696ed9a9SDr. David Alan Gilbert 
1199696ed9a9SDr. David Alan Gilbert     /* copy also acks to the kernel waking the stalled thread up
1200696ed9a9SDr. David Alan Gilbert      * TODO: We can inhibit that ack and only do it if it was requested
1201696ed9a9SDr. David Alan Gilbert      * which would be slightly cheaper, but we'd have to be careful
1202696ed9a9SDr. David Alan Gilbert      * of the order of updating our page state.
1203696ed9a9SDr. David Alan Gilbert      */
1204f9494614SAlexey Perevalov     if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) {
1205696ed9a9SDr. David Alan Gilbert         int e = errno;
1206df9ff5e1SDr. David Alan Gilbert         error_report("%s: %s copy host: %p from: %p (size: %zd)",
1207df9ff5e1SDr. David Alan Gilbert                      __func__, strerror(e), host, from, pagesize);
1208696ed9a9SDr. David Alan Gilbert 
1209696ed9a9SDr. David Alan Gilbert         return -e;
1210696ed9a9SDr. David Alan Gilbert     }
1211696ed9a9SDr. David Alan Gilbert 
1212696ed9a9SDr. David Alan Gilbert     trace_postcopy_place_page(host);
1213dedfb4b2SDr. David Alan Gilbert     return postcopy_notify_shared_wake(rb,
1214dedfb4b2SDr. David Alan Gilbert                                        qemu_ram_block_host_offset(rb, host));
1215696ed9a9SDr. David Alan Gilbert }
1216696ed9a9SDr. David Alan Gilbert 
1217696ed9a9SDr. David Alan Gilbert /*
1218696ed9a9SDr. David Alan Gilbert  * Place a zero page at (host) atomically
1219696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1220696ed9a9SDr. David Alan Gilbert  */
1221df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
12228be4620bSAlexey Perevalov                              RAMBlock *rb)
1223696ed9a9SDr. David Alan Gilbert {
12242ce16640SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
1225df9ff5e1SDr. David Alan Gilbert     trace_postcopy_place_page_zero(host);
1226696ed9a9SDr. David Alan Gilbert 
12272ce16640SDr. David Alan Gilbert     /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
12282ce16640SDr. David Alan Gilbert      * but it's not available for everything (e.g. hugetlbpages)
12292ce16640SDr. David Alan Gilbert      */
12302ce16640SDr. David Alan Gilbert     if (qemu_ram_is_uf_zeroable(rb)) {
12312ce16640SDr. David Alan Gilbert         if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, pagesize, rb)) {
1232696ed9a9SDr. David Alan Gilbert             int e = errno;
1233696ed9a9SDr. David Alan Gilbert             error_report("%s: %s zero host: %p",
1234696ed9a9SDr. David Alan Gilbert                          __func__, strerror(e), host);
1235696ed9a9SDr. David Alan Gilbert 
1236696ed9a9SDr. David Alan Gilbert             return -e;
1237696ed9a9SDr. David Alan Gilbert         }
1238dedfb4b2SDr. David Alan Gilbert         return postcopy_notify_shared_wake(rb,
1239dedfb4b2SDr. David Alan Gilbert                                            qemu_ram_block_host_offset(rb,
1240dedfb4b2SDr. David Alan Gilbert                                                                       host));
1241df9ff5e1SDr. David Alan Gilbert     } else {
124241d84210SDr. David Alan Gilbert         /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
124341d84210SDr. David Alan Gilbert         if (!mis->postcopy_tmp_zero_page) {
124441d84210SDr. David Alan Gilbert             mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
124541d84210SDr. David Alan Gilbert                                                PROT_READ | PROT_WRITE,
124641d84210SDr. David Alan Gilbert                                                MAP_PRIVATE | MAP_ANONYMOUS,
124741d84210SDr. David Alan Gilbert                                                -1, 0);
124841d84210SDr. David Alan Gilbert             if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
124941d84210SDr. David Alan Gilbert                 int e = errno;
125041d84210SDr. David Alan Gilbert                 mis->postcopy_tmp_zero_page = NULL;
125141d84210SDr. David Alan Gilbert                 error_report("%s: %s mapping large zero page",
125241d84210SDr. David Alan Gilbert                              __func__, strerror(e));
125341d84210SDr. David Alan Gilbert                 return -e;
125441d84210SDr. David Alan Gilbert             }
125541d84210SDr. David Alan Gilbert             memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
125641d84210SDr. David Alan Gilbert         }
125741d84210SDr. David Alan Gilbert         return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
12588be4620bSAlexey Perevalov                                    rb);
1259df9ff5e1SDr. David Alan Gilbert     }
1260696ed9a9SDr. David Alan Gilbert }
1261696ed9a9SDr. David Alan Gilbert 
1262696ed9a9SDr. David Alan Gilbert /*
1263696ed9a9SDr. David Alan Gilbert  * Returns a target page of memory that can be mapped at a later point in time
1264696ed9a9SDr. David Alan Gilbert  * using postcopy_place_page
1265696ed9a9SDr. David Alan Gilbert  * The same address is used repeatedly, postcopy_place_page just takes the
1266696ed9a9SDr. David Alan Gilbert  * backing page away.
1267696ed9a9SDr. David Alan Gilbert  * Returns: Pointer to allocated page
1268696ed9a9SDr. David Alan Gilbert  *
1269696ed9a9SDr. David Alan Gilbert  */
1270696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis)
1271696ed9a9SDr. David Alan Gilbert {
1272696ed9a9SDr. David Alan Gilbert     if (!mis->postcopy_tmp_page) {
1273df9ff5e1SDr. David Alan Gilbert         mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
1274696ed9a9SDr. David Alan Gilbert                              PROT_READ | PROT_WRITE, MAP_PRIVATE |
1275696ed9a9SDr. David Alan Gilbert                              MAP_ANONYMOUS, -1, 0);
12760e8b3cdfSEvgeny Yakovlev         if (mis->postcopy_tmp_page == MAP_FAILED) {
12770e8b3cdfSEvgeny Yakovlev             mis->postcopy_tmp_page = NULL;
1278696ed9a9SDr. David Alan Gilbert             error_report("%s: %s", __func__, strerror(errno));
1279696ed9a9SDr. David Alan Gilbert             return NULL;
1280696ed9a9SDr. David Alan Gilbert         }
1281696ed9a9SDr. David Alan Gilbert     }
1282696ed9a9SDr. David Alan Gilbert 
1283696ed9a9SDr. David Alan Gilbert     return mis->postcopy_tmp_page;
1284696ed9a9SDr. David Alan Gilbert }
1285696ed9a9SDr. David Alan Gilbert 
1286eb59db53SDr. David Alan Gilbert #else
1287eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */
128865ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
128965ace060SAlexey Perevalov {
129065ace060SAlexey Perevalov }
129165ace060SAlexey Perevalov 
1292d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
1293eb59db53SDr. David Alan Gilbert {
1294eb59db53SDr. David Alan Gilbert     error_report("%s: No OS support", __func__);
1295eb59db53SDr. David Alan Gilbert     return false;
1296eb59db53SDr. David Alan Gilbert }
1297eb59db53SDr. David Alan Gilbert 
1298c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
12991caddf8aSDr. David Alan Gilbert {
13001caddf8aSDr. David Alan Gilbert     error_report("postcopy_ram_incoming_init: No OS support");
13011caddf8aSDr. David Alan Gilbert     return -1;
13021caddf8aSDr. David Alan Gilbert }
13031caddf8aSDr. David Alan Gilbert 
13041caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
13051caddf8aSDr. David Alan Gilbert {
13061caddf8aSDr. David Alan Gilbert     assert(0);
13071caddf8aSDr. David Alan Gilbert     return -1;
13081caddf8aSDr. David Alan Gilbert }
13091caddf8aSDr. David Alan Gilbert 
1310f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
1311f9527107SDr. David Alan Gilbert {
1312f9527107SDr. David Alan Gilbert     assert(0);
1313f9527107SDr. David Alan Gilbert     return -1;
1314f9527107SDr. David Alan Gilbert }
1315f9527107SDr. David Alan Gilbert 
1316c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
1317c188c539SMichael S. Tsirkin                                  uint64_t client_addr, uint64_t rb_offset)
1318c188c539SMichael S. Tsirkin {
1319c188c539SMichael S. Tsirkin     assert(0);
1320c188c539SMichael S. Tsirkin     return -1;
1321c188c539SMichael S. Tsirkin }
1322c188c539SMichael S. Tsirkin 
1323f0a227adSDr. David Alan Gilbert int postcopy_ram_enable_notify(MigrationIncomingState *mis)
1324f0a227adSDr. David Alan Gilbert {
1325f0a227adSDr. David Alan Gilbert     assert(0);
1326f0a227adSDr. David Alan Gilbert     return -1;
1327f0a227adSDr. David Alan Gilbert }
1328696ed9a9SDr. David Alan Gilbert 
1329df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
13308be4620bSAlexey Perevalov                         RAMBlock *rb)
1331696ed9a9SDr. David Alan Gilbert {
1332696ed9a9SDr. David Alan Gilbert     assert(0);
1333696ed9a9SDr. David Alan Gilbert     return -1;
1334696ed9a9SDr. David Alan Gilbert }
1335696ed9a9SDr. David Alan Gilbert 
1336df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
13378be4620bSAlexey Perevalov                         RAMBlock *rb)
1338696ed9a9SDr. David Alan Gilbert {
1339696ed9a9SDr. David Alan Gilbert     assert(0);
1340696ed9a9SDr. David Alan Gilbert     return -1;
1341696ed9a9SDr. David Alan Gilbert }
1342696ed9a9SDr. David Alan Gilbert 
1343696ed9a9SDr. David Alan Gilbert void *postcopy_get_tmp_page(MigrationIncomingState *mis)
1344696ed9a9SDr. David Alan Gilbert {
1345696ed9a9SDr. David Alan Gilbert     assert(0);
1346696ed9a9SDr. David Alan Gilbert     return NULL;
1347696ed9a9SDr. David Alan Gilbert }
1348696ed9a9SDr. David Alan Gilbert 
13495efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
13505efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
13515efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
13525efc3564SDr. David Alan Gilbert {
13535efc3564SDr. David Alan Gilbert     assert(0);
13545efc3564SDr. David Alan Gilbert     return -1;
13555efc3564SDr. David Alan Gilbert }
1356eb59db53SDr. David Alan Gilbert #endif
1357eb59db53SDr. David Alan Gilbert 
1358e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */
1359e0b266f0SDr. David Alan Gilbert 
13609ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis)
13619ab7ef9bSPeter Xu {
13629ab7ef9bSPeter Xu     uint64_t tmp64 = 1;
13639ab7ef9bSPeter Xu 
13649ab7ef9bSPeter Xu     /*
13659ab7ef9bSPeter Xu      * Wakeup the fault_thread.  It's an eventfd that should currently
13669ab7ef9bSPeter Xu      * be at 0, we're going to increment it to 1
13679ab7ef9bSPeter Xu      */
13689ab7ef9bSPeter Xu     if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
13699ab7ef9bSPeter Xu         /* Not much we can do here, but may as well report it */
13709ab7ef9bSPeter Xu         error_report("%s: incrementing failed: %s", __func__,
13719ab7ef9bSPeter Xu                      strerror(errno));
13729ab7ef9bSPeter Xu     }
13739ab7ef9bSPeter Xu }
13749ab7ef9bSPeter Xu 
1375e0b266f0SDr. David Alan Gilbert /**
1376e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_init: Called at the start of each RAMBlock before
1377e0b266f0SDr. David Alan Gilbert  *   asking to discard individual ranges.
1378e0b266f0SDr. David Alan Gilbert  *
1379e0b266f0SDr. David Alan Gilbert  * @ms: The current migration state.
1380*810cf2bbSWei Yang  * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
1381e0b266f0SDr. David Alan Gilbert  * @name: RAMBlock that discards will operate on.
1382e0b266f0SDr. David Alan Gilbert  */
1383*810cf2bbSWei Yang static PostcopyDiscardState pds = {0};
1384*810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name)
1385e0b266f0SDr. David Alan Gilbert {
1386*810cf2bbSWei Yang     pds.ramblock_name = name;
1387*810cf2bbSWei Yang     pds.cur_entry = 0;
1388*810cf2bbSWei Yang     pds.nsentwords = 0;
1389*810cf2bbSWei Yang     pds.nsentcmds = 0;
1390e0b266f0SDr. David Alan Gilbert }
1391e0b266f0SDr. David Alan Gilbert 
1392e0b266f0SDr. David Alan Gilbert /**
1393e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1394e0b266f0SDr. David Alan Gilbert  *   discard. May send a discard message, may just leave it queued to
1395e0b266f0SDr. David Alan Gilbert  *   be sent later.
1396e0b266f0SDr. David Alan Gilbert  *
1397e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1398e0b266f0SDr. David Alan Gilbert  * @start,@length: a range of pages in the migration bitmap in the
1399e0b266f0SDr. David Alan Gilbert  *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1400e0b266f0SDr. David Alan Gilbert  */
1401*810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
1402*810cf2bbSWei Yang                                  unsigned long length)
1403e0b266f0SDr. David Alan Gilbert {
140420afaed9SJuan Quintela     size_t tp_size = qemu_target_page_size();
1405e0b266f0SDr. David Alan Gilbert     /* Convert to byte offsets within the RAM block */
1406*810cf2bbSWei Yang     pds.start_list[pds.cur_entry] = start  * tp_size;
1407*810cf2bbSWei Yang     pds.length_list[pds.cur_entry] = length * tp_size;
1408*810cf2bbSWei Yang     trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
1409*810cf2bbSWei Yang     pds.cur_entry++;
1410*810cf2bbSWei Yang     pds.nsentwords++;
1411e0b266f0SDr. David Alan Gilbert 
1412*810cf2bbSWei Yang     if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
1413e0b266f0SDr. David Alan Gilbert         /* Full set, ship it! */
141489a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1415*810cf2bbSWei Yang                                               pds.ramblock_name,
1416*810cf2bbSWei Yang                                               pds.cur_entry,
1417*810cf2bbSWei Yang                                               pds.start_list,
1418*810cf2bbSWei Yang                                               pds.length_list);
1419*810cf2bbSWei Yang         pds.nsentcmds++;
1420*810cf2bbSWei Yang         pds.cur_entry = 0;
1421e0b266f0SDr. David Alan Gilbert     }
1422e0b266f0SDr. David Alan Gilbert }
1423e0b266f0SDr. David Alan Gilbert 
1424e0b266f0SDr. David Alan Gilbert /**
1425e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1426e0b266f0SDr. David Alan Gilbert  * bitmap code. Sends any outstanding discard messages, frees the PDS
1427e0b266f0SDr. David Alan Gilbert  *
1428e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1429e0b266f0SDr. David Alan Gilbert  */
1430*810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms)
1431e0b266f0SDr. David Alan Gilbert {
1432e0b266f0SDr. David Alan Gilbert     /* Anything unsent? */
1433*810cf2bbSWei Yang     if (pds.cur_entry) {
143489a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1435*810cf2bbSWei Yang                                               pds.ramblock_name,
1436*810cf2bbSWei Yang                                               pds.cur_entry,
1437*810cf2bbSWei Yang                                               pds.start_list,
1438*810cf2bbSWei Yang                                               pds.length_list);
1439*810cf2bbSWei Yang         pds.nsentcmds++;
1440e0b266f0SDr. David Alan Gilbert     }
1441e0b266f0SDr. David Alan Gilbert 
1442*810cf2bbSWei Yang     trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
1443*810cf2bbSWei Yang                                        pds.nsentcmds);
1444e0b266f0SDr. David Alan Gilbert }
1445bac3b212SJuan Quintela 
1446bac3b212SJuan Quintela /*
1447bac3b212SJuan Quintela  * Current state of incoming postcopy; note this is not part of
1448bac3b212SJuan Quintela  * MigrationIncomingState since it's state is used during cleanup
1449bac3b212SJuan Quintela  * at the end as MIS is being freed.
1450bac3b212SJuan Quintela  */
1451bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state;
1452bac3b212SJuan Quintela 
1453bac3b212SJuan Quintela PostcopyState  postcopy_state_get(void)
1454bac3b212SJuan Quintela {
1455bac3b212SJuan Quintela     return atomic_mb_read(&incoming_postcopy_state);
1456bac3b212SJuan Quintela }
1457bac3b212SJuan Quintela 
1458bac3b212SJuan Quintela /* Set the state and return the old state */
1459bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state)
1460bac3b212SJuan Quintela {
1461bac3b212SJuan Quintela     return atomic_xchg(&incoming_postcopy_state, new_state);
1462bac3b212SJuan Quintela }
146300fa4fc8SDr. David Alan Gilbert 
146400fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy
146500fa4fc8SDr. David Alan Gilbert  * called on the destination.
146600fa4fc8SDr. David Alan Gilbert  */
146700fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
146800fa4fc8SDr. David Alan Gilbert {
146900fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
147000fa4fc8SDr. David Alan Gilbert 
147100fa4fc8SDr. David Alan Gilbert     mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
147200fa4fc8SDr. David Alan Gilbert                                                   *pcfd);
147300fa4fc8SDr. David Alan Gilbert }
147400fa4fc8SDr. David Alan Gilbert 
147500fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy
147600fa4fc8SDr. David Alan Gilbert  */
147700fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
147800fa4fc8SDr. David Alan Gilbert {
147900fa4fc8SDr. David Alan Gilbert     guint i;
148000fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
148100fa4fc8SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
148200fa4fc8SDr. David Alan Gilbert 
148300fa4fc8SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
148400fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
148500fa4fc8SDr. David Alan Gilbert         if (cur->fd == pcfd->fd) {
148600fa4fc8SDr. David Alan Gilbert             mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
148700fa4fc8SDr. David Alan Gilbert             return;
148800fa4fc8SDr. David Alan Gilbert         }
148900fa4fc8SDr. David Alan Gilbert     }
149000fa4fc8SDr. David Alan Gilbert }
1491