xref: /qemu/migration/postcopy-ram.c (revision 7648297d40649bbffb296b2d8f2f388f19868759)
1eb59db53SDr. David Alan Gilbert /*
2eb59db53SDr. David Alan Gilbert  * Postcopy migration for RAM
3eb59db53SDr. David Alan Gilbert  *
4eb59db53SDr. David Alan Gilbert  * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5eb59db53SDr. David Alan Gilbert  *
6eb59db53SDr. David Alan Gilbert  * Authors:
7eb59db53SDr. David Alan Gilbert  *  Dave Gilbert  <dgilbert@redhat.com>
8eb59db53SDr. David Alan Gilbert  *
9eb59db53SDr. David Alan Gilbert  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10eb59db53SDr. David Alan Gilbert  * See the COPYING file in the top-level directory.
11eb59db53SDr. David Alan Gilbert  *
12eb59db53SDr. David Alan Gilbert  */
13eb59db53SDr. David Alan Gilbert 
14eb59db53SDr. David Alan Gilbert /*
15eb59db53SDr. David Alan Gilbert  * Postcopy is a migration technique where the execution flips from the
16eb59db53SDr. David Alan Gilbert  * source to the destination before all the data has been copied.
17eb59db53SDr. David Alan Gilbert  */
18eb59db53SDr. David Alan Gilbert 
191393a485SPeter Maydell #include "qemu/osdep.h"
20898ba906SDavid Hildenbrand #include "qemu/rcu.h"
2151180423SJuan Quintela #include "exec/target_page.h"
226666c96aSJuan Quintela #include "migration.h"
2308a0aee1SJuan Quintela #include "qemu-file.h"
2420a519a0SJuan Quintela #include "savevm.h"
25be07b0acSJuan Quintela #include "postcopy-ram.h"
267b1e1a22SJuan Quintela #include "ram.h"
271693c64cSDr. David Alan Gilbert #include "qapi/error.h"
281693c64cSDr. David Alan Gilbert #include "qemu/notify.h"
29d4842052SMarkus Armbruster #include "qemu/rcu.h"
30eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h"
31eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h"
32eb59db53SDr. David Alan Gilbert #include "trace.h"
335cc8767dSLike Xu #include "hw/boards.h"
34898ba906SDavid Hildenbrand #include "exec/ramblock.h"
35eb59db53SDr. David Alan Gilbert 
36e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command,
37e0b266f0SDr. David Alan Gilbert  * keeps them around ~200 bytes
38e0b266f0SDr. David Alan Gilbert  */
39e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12
40e0b266f0SDr. David Alan Gilbert 
41e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState {
42e0b266f0SDr. David Alan Gilbert     const char *ramblock_name;
43e0b266f0SDr. David Alan Gilbert     uint16_t cur_entry;
44e0b266f0SDr. David Alan Gilbert     /*
45e0b266f0SDr. David Alan Gilbert      * Start and length of a discard range (bytes)
46e0b266f0SDr. David Alan Gilbert      */
47e0b266f0SDr. David Alan Gilbert     uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
48e0b266f0SDr. David Alan Gilbert     uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
49e0b266f0SDr. David Alan Gilbert     unsigned int nsentwords;
50e0b266f0SDr. David Alan Gilbert     unsigned int nsentcmds;
51e0b266f0SDr. David Alan Gilbert };
52e0b266f0SDr. David Alan Gilbert 
531693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list;
541693c64cSDr. David Alan Gilbert 
551693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void)
561693c64cSDr. David Alan Gilbert {
571693c64cSDr. David Alan Gilbert     notifier_with_return_list_init(&postcopy_notifier_list);
581693c64cSDr. David Alan Gilbert }
591693c64cSDr. David Alan Gilbert 
601693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn)
611693c64cSDr. David Alan Gilbert {
621693c64cSDr. David Alan Gilbert     notifier_with_return_list_add(&postcopy_notifier_list, nn);
631693c64cSDr. David Alan Gilbert }
641693c64cSDr. David Alan Gilbert 
651693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n)
661693c64cSDr. David Alan Gilbert {
671693c64cSDr. David Alan Gilbert     notifier_with_return_remove(n);
681693c64cSDr. David Alan Gilbert }
691693c64cSDr. David Alan Gilbert 
701693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
711693c64cSDr. David Alan Gilbert {
721693c64cSDr. David Alan Gilbert     struct PostcopyNotifyData pnd;
731693c64cSDr. David Alan Gilbert     pnd.reason = reason;
741693c64cSDr. David Alan Gilbert     pnd.errp = errp;
751693c64cSDr. David Alan Gilbert 
761693c64cSDr. David Alan Gilbert     return notifier_with_return_list_notify(&postcopy_notifier_list,
771693c64cSDr. David Alan Gilbert                                             &pnd);
781693c64cSDr. David Alan Gilbert }
791693c64cSDr. David Alan Gilbert 
80eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied
81eb59db53SDr. David Alan Gilbert  * across, and efficiently map new pages in, the techniques for doing this
82eb59db53SDr. David Alan Gilbert  * are target OS specific.
83eb59db53SDr. David Alan Gilbert  */
84eb59db53SDr. David Alan Gilbert #if defined(__linux__)
85eb59db53SDr. David Alan Gilbert 
86c4faeed2SDr. David Alan Gilbert #include <poll.h>
87eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h>
88eb59db53SDr. David Alan Gilbert #include <sys/syscall.h>
89eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */
90eb59db53SDr. David Alan Gilbert #endif
91eb59db53SDr. David Alan Gilbert 
92d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
93d8b9d771SMatthew Fortune #include <sys/eventfd.h>
94eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h>
95eb59db53SDr. David Alan Gilbert 
962a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext {
972a4c42f1SAlexey Perevalov     /* time when page fault initiated per vCPU */
982a4c42f1SAlexey Perevalov     uint32_t *page_fault_vcpu_time;
992a4c42f1SAlexey Perevalov     /* page address per vCPU */
1002a4c42f1SAlexey Perevalov     uintptr_t *vcpu_addr;
1012a4c42f1SAlexey Perevalov     uint32_t total_blocktime;
1022a4c42f1SAlexey Perevalov     /* blocktime per vCPU */
1032a4c42f1SAlexey Perevalov     uint32_t *vcpu_blocktime;
1042a4c42f1SAlexey Perevalov     /* point in time when last page fault was initiated */
1052a4c42f1SAlexey Perevalov     uint32_t last_begin;
1062a4c42f1SAlexey Perevalov     /* number of vCPU are suspended */
1072a4c42f1SAlexey Perevalov     int smp_cpus_down;
1082a4c42f1SAlexey Perevalov     uint64_t start_time;
1092a4c42f1SAlexey Perevalov 
1102a4c42f1SAlexey Perevalov     /*
1112a4c42f1SAlexey Perevalov      * Handler for exit event, necessary for
1122a4c42f1SAlexey Perevalov      * releasing whole blocktime_ctx
1132a4c42f1SAlexey Perevalov      */
1142a4c42f1SAlexey Perevalov     Notifier exit_notifier;
1152a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext;
1162a4c42f1SAlexey Perevalov 
1172a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
1182a4c42f1SAlexey Perevalov {
1192a4c42f1SAlexey Perevalov     g_free(ctx->page_fault_vcpu_time);
1202a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_addr);
1212a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_blocktime);
1222a4c42f1SAlexey Perevalov     g_free(ctx);
1232a4c42f1SAlexey Perevalov }
1242a4c42f1SAlexey Perevalov 
1252a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data)
1262a4c42f1SAlexey Perevalov {
1272a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
1282a4c42f1SAlexey Perevalov                                                  exit_notifier);
1292a4c42f1SAlexey Perevalov     destroy_blocktime_context(ctx);
1302a4c42f1SAlexey Perevalov }
1312a4c42f1SAlexey Perevalov 
1322a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void)
1332a4c42f1SAlexey Perevalov {
1345cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
1355cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
1362a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
1372a4c42f1SAlexey Perevalov     ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
1382a4c42f1SAlexey Perevalov     ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
1392a4c42f1SAlexey Perevalov     ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
1402a4c42f1SAlexey Perevalov 
1412a4c42f1SAlexey Perevalov     ctx->exit_notifier.notify = migration_exit_cb;
1422a4c42f1SAlexey Perevalov     ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1432a4c42f1SAlexey Perevalov     qemu_add_exit_notifier(&ctx->exit_notifier);
1442a4c42f1SAlexey Perevalov     return ctx;
1452a4c42f1SAlexey Perevalov }
146ca6011c2SAlexey Perevalov 
14765ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
14865ace060SAlexey Perevalov {
1495cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
15054aa3de7SEric Blake     uint32List *list = NULL;
15165ace060SAlexey Perevalov     int i;
15265ace060SAlexey Perevalov 
1535cc8767dSLike Xu     for (i = ms->smp.cpus - 1; i >= 0; i--) {
15454aa3de7SEric Blake         QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
15565ace060SAlexey Perevalov     }
15665ace060SAlexey Perevalov 
15765ace060SAlexey Perevalov     return list;
15865ace060SAlexey Perevalov }
15965ace060SAlexey Perevalov 
16065ace060SAlexey Perevalov /*
16165ace060SAlexey Perevalov  * This function just populates MigrationInfo from postcopy's
16265ace060SAlexey Perevalov  * blocktime context. It will not populate MigrationInfo,
16365ace060SAlexey Perevalov  * unless postcopy-blocktime capability was set.
16465ace060SAlexey Perevalov  *
16565ace060SAlexey Perevalov  * @info: pointer to MigrationInfo to populate
16665ace060SAlexey Perevalov  */
16765ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
16865ace060SAlexey Perevalov {
16965ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
17065ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
17165ace060SAlexey Perevalov 
17265ace060SAlexey Perevalov     if (!bc) {
17365ace060SAlexey Perevalov         return;
17465ace060SAlexey Perevalov     }
17565ace060SAlexey Perevalov 
17665ace060SAlexey Perevalov     info->has_postcopy_blocktime = true;
17765ace060SAlexey Perevalov     info->postcopy_blocktime = bc->total_blocktime;
17865ace060SAlexey Perevalov     info->has_postcopy_vcpu_blocktime = true;
17965ace060SAlexey Perevalov     info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
18065ace060SAlexey Perevalov }
18165ace060SAlexey Perevalov 
18265ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void)
18365ace060SAlexey Perevalov {
18465ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
18565ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
18665ace060SAlexey Perevalov 
18765ace060SAlexey Perevalov     if (!bc) {
18865ace060SAlexey Perevalov         return 0;
18965ace060SAlexey Perevalov     }
19065ace060SAlexey Perevalov 
19165ace060SAlexey Perevalov     return bc->total_blocktime;
19265ace060SAlexey Perevalov }
19365ace060SAlexey Perevalov 
19454ae0886SAlexey Perevalov /**
19554ae0886SAlexey Perevalov  * receive_ufd_features: check userfault fd features, to request only supported
19654ae0886SAlexey Perevalov  * features in the future.
19754ae0886SAlexey Perevalov  *
19854ae0886SAlexey Perevalov  * Returns: true on success
19954ae0886SAlexey Perevalov  *
20054ae0886SAlexey Perevalov  * __NR_userfaultfd - should be checked before
20154ae0886SAlexey Perevalov  *  @features: out parameter will contain uffdio_api.features provided by kernel
20254ae0886SAlexey Perevalov  *              in case of success
20354ae0886SAlexey Perevalov  */
20454ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features)
20554ae0886SAlexey Perevalov {
20654ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
20754ae0886SAlexey Perevalov     int ufd;
20854ae0886SAlexey Perevalov     bool ret = true;
20954ae0886SAlexey Perevalov 
21054ae0886SAlexey Perevalov     /* if we are here __NR_userfaultfd should exists */
21154ae0886SAlexey Perevalov     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
21254ae0886SAlexey Perevalov     if (ufd == -1) {
21354ae0886SAlexey Perevalov         error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
21454ae0886SAlexey Perevalov                      strerror(errno));
21554ae0886SAlexey Perevalov         return false;
21654ae0886SAlexey Perevalov     }
21754ae0886SAlexey Perevalov 
21854ae0886SAlexey Perevalov     /* ask features */
219eb59db53SDr. David Alan Gilbert     api_struct.api = UFFD_API;
220eb59db53SDr. David Alan Gilbert     api_struct.features = 0;
221eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
2225553499fSAlexey Perevalov         error_report("%s: UFFDIO_API failed: %s", __func__,
223eb59db53SDr. David Alan Gilbert                      strerror(errno));
22454ae0886SAlexey Perevalov         ret = false;
22554ae0886SAlexey Perevalov         goto release_ufd;
22654ae0886SAlexey Perevalov     }
22754ae0886SAlexey Perevalov 
22854ae0886SAlexey Perevalov     *features = api_struct.features;
22954ae0886SAlexey Perevalov 
23054ae0886SAlexey Perevalov release_ufd:
23154ae0886SAlexey Perevalov     close(ufd);
23254ae0886SAlexey Perevalov     return ret;
23354ae0886SAlexey Perevalov }
23454ae0886SAlexey Perevalov 
23554ae0886SAlexey Perevalov /**
23654ae0886SAlexey Perevalov  * request_ufd_features: this function should be called only once on a newly
23754ae0886SAlexey Perevalov  * opened ufd, subsequent calls will lead to error.
23854ae0886SAlexey Perevalov  *
2393a4452d8Szhaolichang  * Returns: true on success
24054ae0886SAlexey Perevalov  *
24154ae0886SAlexey Perevalov  * @ufd: fd obtained from userfaultfd syscall
24254ae0886SAlexey Perevalov  * @features: bit mask see UFFD_API_FEATURES
24354ae0886SAlexey Perevalov  */
24454ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features)
24554ae0886SAlexey Perevalov {
24654ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
24754ae0886SAlexey Perevalov     uint64_t ioctl_mask;
24854ae0886SAlexey Perevalov 
24954ae0886SAlexey Perevalov     api_struct.api = UFFD_API;
25054ae0886SAlexey Perevalov     api_struct.features = features;
25154ae0886SAlexey Perevalov     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
25254ae0886SAlexey Perevalov         error_report("%s failed: UFFDIO_API failed: %s", __func__,
25354ae0886SAlexey Perevalov                      strerror(errno));
254eb59db53SDr. David Alan Gilbert         return false;
255eb59db53SDr. David Alan Gilbert     }
256eb59db53SDr. David Alan Gilbert 
257eb59db53SDr. David Alan Gilbert     ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
258eb59db53SDr. David Alan Gilbert                  (__u64)1 << _UFFDIO_UNREGISTER;
259eb59db53SDr. David Alan Gilbert     if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
260eb59db53SDr. David Alan Gilbert         error_report("Missing userfault features: %" PRIx64,
261eb59db53SDr. David Alan Gilbert                      (uint64_t)(~api_struct.ioctls & ioctl_mask));
262eb59db53SDr. David Alan Gilbert         return false;
263eb59db53SDr. David Alan Gilbert     }
264eb59db53SDr. David Alan Gilbert 
26554ae0886SAlexey Perevalov     return true;
26654ae0886SAlexey Perevalov }
26754ae0886SAlexey Perevalov 
26854ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
26954ae0886SAlexey Perevalov {
27054ae0886SAlexey Perevalov     uint64_t asked_features = 0;
27154ae0886SAlexey Perevalov     static uint64_t supported_features;
27254ae0886SAlexey Perevalov 
27354ae0886SAlexey Perevalov     /*
27454ae0886SAlexey Perevalov      * it's not possible to
27554ae0886SAlexey Perevalov      * request UFFD_API twice per one fd
27654ae0886SAlexey Perevalov      * userfault fd features is persistent
27754ae0886SAlexey Perevalov      */
27854ae0886SAlexey Perevalov     if (!supported_features) {
27954ae0886SAlexey Perevalov         if (!receive_ufd_features(&supported_features)) {
28054ae0886SAlexey Perevalov             error_report("%s failed", __func__);
28154ae0886SAlexey Perevalov             return false;
28254ae0886SAlexey Perevalov         }
28354ae0886SAlexey Perevalov     }
28454ae0886SAlexey Perevalov 
2852a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID
2862a4c42f1SAlexey Perevalov     if (migrate_postcopy_blocktime() && mis &&
2872a4c42f1SAlexey Perevalov         UFFD_FEATURE_THREAD_ID & supported_features) {
2882a4c42f1SAlexey Perevalov         /* kernel supports that feature */
2892a4c42f1SAlexey Perevalov         /* don't create blocktime_context if it exists */
2902a4c42f1SAlexey Perevalov         if (!mis->blocktime_ctx) {
2912a4c42f1SAlexey Perevalov             mis->blocktime_ctx = blocktime_context_new();
2922a4c42f1SAlexey Perevalov         }
2932a4c42f1SAlexey Perevalov 
2942a4c42f1SAlexey Perevalov         asked_features |= UFFD_FEATURE_THREAD_ID;
2952a4c42f1SAlexey Perevalov     }
2962a4c42f1SAlexey Perevalov #endif
2972a4c42f1SAlexey Perevalov 
29854ae0886SAlexey Perevalov     /*
29954ae0886SAlexey Perevalov      * request features, even if asked_features is 0, due to
30054ae0886SAlexey Perevalov      * kernel expects UFFD_API before UFFDIO_REGISTER, per
30154ae0886SAlexey Perevalov      * userfault file descriptor
30254ae0886SAlexey Perevalov      */
30354ae0886SAlexey Perevalov     if (!request_ufd_features(ufd, asked_features)) {
30454ae0886SAlexey Perevalov         error_report("%s failed: features %" PRIu64, __func__,
30554ae0886SAlexey Perevalov                      asked_features);
30654ae0886SAlexey Perevalov         return false;
30754ae0886SAlexey Perevalov     }
30854ae0886SAlexey Perevalov 
309038adc2fSWei Yang     if (qemu_real_host_page_size != ram_pagesize_summary()) {
3107e8cafb7SDr. David Alan Gilbert         bool have_hp = false;
3117e8cafb7SDr. David Alan Gilbert         /* We've got a huge page */
3127e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
31354ae0886SAlexey Perevalov         have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
3147e8cafb7SDr. David Alan Gilbert #endif
3157e8cafb7SDr. David Alan Gilbert         if (!have_hp) {
3167e8cafb7SDr. David Alan Gilbert             error_report("Userfault on this host does not support huge pages");
3177e8cafb7SDr. David Alan Gilbert             return false;
3187e8cafb7SDr. David Alan Gilbert         }
3197e8cafb7SDr. David Alan Gilbert     }
320eb59db53SDr. David Alan Gilbert     return true;
321eb59db53SDr. David Alan Gilbert }
322eb59db53SDr. David Alan Gilbert 
3238679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator.
3248679638bSDr. David Alan Gilbert  */
325754cb9c0SYury Kotov static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
3268679638bSDr. David Alan Gilbert {
327754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
328754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
3295d214a92SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
3305d214a92SDr. David Alan Gilbert 
3315d214a92SDr. David Alan Gilbert     if (length % pagesize) {
3325d214a92SDr. David Alan Gilbert         error_report("Postcopy requires RAM blocks to be a page size multiple,"
3335d214a92SDr. David Alan Gilbert                      " block %s is 0x" RAM_ADDR_FMT " bytes with a "
3345d214a92SDr. David Alan Gilbert                      "page size of 0x%zx", block_name, length, pagesize);
3355d214a92SDr. David Alan Gilbert         return 1;
3365d214a92SDr. David Alan Gilbert     }
3378679638bSDr. David Alan Gilbert     return 0;
3388679638bSDr. David Alan Gilbert }
3398679638bSDr. David Alan Gilbert 
34058b7c17eSDr. David Alan Gilbert /*
34158b7c17eSDr. David Alan Gilbert  * Note: This has the side effect of munlock'ing all of RAM, that's
34258b7c17eSDr. David Alan Gilbert  * normally fine since if the postcopy succeeds it gets turned back on at the
34358b7c17eSDr. David Alan Gilbert  * end.
34458b7c17eSDr. David Alan Gilbert  */
345d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
346eb59db53SDr. David Alan Gilbert {
347038adc2fSWei Yang     long pagesize = qemu_real_host_page_size;
348eb59db53SDr. David Alan Gilbert     int ufd = -1;
349eb59db53SDr. David Alan Gilbert     bool ret = false; /* Error unless we change it */
350eb59db53SDr. David Alan Gilbert     void *testarea = NULL;
351eb59db53SDr. David Alan Gilbert     struct uffdio_register reg_struct;
352eb59db53SDr. David Alan Gilbert     struct uffdio_range range_struct;
353eb59db53SDr. David Alan Gilbert     uint64_t feature_mask;
3541693c64cSDr. David Alan Gilbert     Error *local_err = NULL;
355eb59db53SDr. David Alan Gilbert 
35620afaed9SJuan Quintela     if (qemu_target_page_size() > pagesize) {
357eb59db53SDr. David Alan Gilbert         error_report("Target page size bigger than host page size");
358eb59db53SDr. David Alan Gilbert         goto out;
359eb59db53SDr. David Alan Gilbert     }
360eb59db53SDr. David Alan Gilbert 
361eb59db53SDr. David Alan Gilbert     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
362eb59db53SDr. David Alan Gilbert     if (ufd == -1) {
363eb59db53SDr. David Alan Gilbert         error_report("%s: userfaultfd not available: %s", __func__,
364eb59db53SDr. David Alan Gilbert                      strerror(errno));
365eb59db53SDr. David Alan Gilbert         goto out;
366eb59db53SDr. David Alan Gilbert     }
367eb59db53SDr. David Alan Gilbert 
3681693c64cSDr. David Alan Gilbert     /* Give devices a chance to object */
3691693c64cSDr. David Alan Gilbert     if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
3701693c64cSDr. David Alan Gilbert         error_report_err(local_err);
3711693c64cSDr. David Alan Gilbert         goto out;
3721693c64cSDr. David Alan Gilbert     }
3731693c64cSDr. David Alan Gilbert 
374eb59db53SDr. David Alan Gilbert     /* Version and features check */
37554ae0886SAlexey Perevalov     if (!ufd_check_and_apply(ufd, mis)) {
376eb59db53SDr. David Alan Gilbert         goto out;
377eb59db53SDr. David Alan Gilbert     }
378eb59db53SDr. David Alan Gilbert 
3798679638bSDr. David Alan Gilbert     /* We don't support postcopy with shared RAM yet */
380fbd162e6SYury Kotov     if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
3818679638bSDr. David Alan Gilbert         goto out;
3828679638bSDr. David Alan Gilbert     }
3838679638bSDr. David Alan Gilbert 
384eb59db53SDr. David Alan Gilbert     /*
38558b7c17eSDr. David Alan Gilbert      * userfault and mlock don't go together; we'll put it back later if
38658b7c17eSDr. David Alan Gilbert      * it was enabled.
38758b7c17eSDr. David Alan Gilbert      */
38858b7c17eSDr. David Alan Gilbert     if (munlockall()) {
38958b7c17eSDr. David Alan Gilbert         error_report("%s: munlockall: %s", __func__,  strerror(errno));
390617a32f5SDr. David Alan Gilbert         goto out;
39158b7c17eSDr. David Alan Gilbert     }
39258b7c17eSDr. David Alan Gilbert 
39358b7c17eSDr. David Alan Gilbert     /*
394eb59db53SDr. David Alan Gilbert      *  We need to check that the ops we need are supported on anon memory
395eb59db53SDr. David Alan Gilbert      *  To do that we need to register a chunk and see the flags that
396eb59db53SDr. David Alan Gilbert      *  are returned.
397eb59db53SDr. David Alan Gilbert      */
398eb59db53SDr. David Alan Gilbert     testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
399eb59db53SDr. David Alan Gilbert                                     MAP_ANONYMOUS, -1, 0);
400eb59db53SDr. David Alan Gilbert     if (testarea == MAP_FAILED) {
401eb59db53SDr. David Alan Gilbert         error_report("%s: Failed to map test area: %s", __func__,
402eb59db53SDr. David Alan Gilbert                      strerror(errno));
403eb59db53SDr. David Alan Gilbert         goto out;
404eb59db53SDr. David Alan Gilbert     }
405*7648297dSDavid Hildenbrand     g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
406eb59db53SDr. David Alan Gilbert 
407eb59db53SDr. David Alan Gilbert     reg_struct.range.start = (uintptr_t)testarea;
408eb59db53SDr. David Alan Gilbert     reg_struct.range.len = pagesize;
409eb59db53SDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
410eb59db53SDr. David Alan Gilbert 
411eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
412eb59db53SDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
413eb59db53SDr. David Alan Gilbert         goto out;
414eb59db53SDr. David Alan Gilbert     }
415eb59db53SDr. David Alan Gilbert 
416eb59db53SDr. David Alan Gilbert     range_struct.start = (uintptr_t)testarea;
417eb59db53SDr. David Alan Gilbert     range_struct.len = pagesize;
418eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
419eb59db53SDr. David Alan Gilbert         error_report("%s userfault unregister: %s", __func__, strerror(errno));
420eb59db53SDr. David Alan Gilbert         goto out;
421eb59db53SDr. David Alan Gilbert     }
422eb59db53SDr. David Alan Gilbert 
423eb59db53SDr. David Alan Gilbert     feature_mask = (__u64)1 << _UFFDIO_WAKE |
424eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_COPY |
425eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_ZEROPAGE;
426eb59db53SDr. David Alan Gilbert     if ((reg_struct.ioctls & feature_mask) != feature_mask) {
427eb59db53SDr. David Alan Gilbert         error_report("Missing userfault map features: %" PRIx64,
428eb59db53SDr. David Alan Gilbert                      (uint64_t)(~reg_struct.ioctls & feature_mask));
429eb59db53SDr. David Alan Gilbert         goto out;
430eb59db53SDr. David Alan Gilbert     }
431eb59db53SDr. David Alan Gilbert 
432eb59db53SDr. David Alan Gilbert     /* Success! */
433eb59db53SDr. David Alan Gilbert     ret = true;
434eb59db53SDr. David Alan Gilbert out:
435eb59db53SDr. David Alan Gilbert     if (testarea) {
436eb59db53SDr. David Alan Gilbert         munmap(testarea, pagesize);
437eb59db53SDr. David Alan Gilbert     }
438eb59db53SDr. David Alan Gilbert     if (ufd != -1) {
439eb59db53SDr. David Alan Gilbert         close(ufd);
440eb59db53SDr. David Alan Gilbert     }
441eb59db53SDr. David Alan Gilbert     return ret;
442eb59db53SDr. David Alan Gilbert }
443eb59db53SDr. David Alan Gilbert 
4441caddf8aSDr. David Alan Gilbert /*
4451caddf8aSDr. David Alan Gilbert  * Setup an area of RAM so that it *can* be used for postcopy later; this
4461caddf8aSDr. David Alan Gilbert  * must be done right at the start prior to pre-copy.
4471caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4481caddf8aSDr. David Alan Gilbert  */
449754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque)
4501caddf8aSDr. David Alan Gilbert {
451754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
452754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
453754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
454754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4551caddf8aSDr. David Alan Gilbert     trace_postcopy_init_range(block_name, host_addr, offset, length);
4561caddf8aSDr. David Alan Gilbert 
4571caddf8aSDr. David Alan Gilbert     /*
458898ba906SDavid Hildenbrand      * Save the used_length before running the guest. In case we have to
459898ba906SDavid Hildenbrand      * resize RAM blocks when syncing RAM block sizes from the source during
460898ba906SDavid Hildenbrand      * precopy, we'll update it manually via the ram block notifier.
461898ba906SDavid Hildenbrand      */
462898ba906SDavid Hildenbrand     rb->postcopy_length = length;
463898ba906SDavid Hildenbrand 
464898ba906SDavid Hildenbrand     /*
4651caddf8aSDr. David Alan Gilbert      * We need the whole of RAM to be truly empty for postcopy, so things
4661caddf8aSDr. David Alan Gilbert      * like ROMs and any data tables built during init must be zero'd
4671caddf8aSDr. David Alan Gilbert      * - we're going to get the copy from the source anyway.
4681caddf8aSDr. David Alan Gilbert      * (Precopy will just overwrite this data, so doesn't need the discard)
4691caddf8aSDr. David Alan Gilbert      */
470aaa2064cSJuan Quintela     if (ram_discard_range(block_name, 0, length)) {
4711caddf8aSDr. David Alan Gilbert         return -1;
4721caddf8aSDr. David Alan Gilbert     }
4731caddf8aSDr. David Alan Gilbert 
4741caddf8aSDr. David Alan Gilbert     return 0;
4751caddf8aSDr. David Alan Gilbert }
4761caddf8aSDr. David Alan Gilbert 
4771caddf8aSDr. David Alan Gilbert /*
4781caddf8aSDr. David Alan Gilbert  * At the end of migration, undo the effects of init_range
4791caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4801caddf8aSDr. David Alan Gilbert  */
481754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque)
4821caddf8aSDr. David Alan Gilbert {
483754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
484754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
485754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
486898ba906SDavid Hildenbrand     ram_addr_t length = rb->postcopy_length;
4871caddf8aSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
4881caddf8aSDr. David Alan Gilbert     struct uffdio_range range_struct;
4891caddf8aSDr. David Alan Gilbert     trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
4901caddf8aSDr. David Alan Gilbert 
4911caddf8aSDr. David Alan Gilbert     /*
4921caddf8aSDr. David Alan Gilbert      * We turned off hugepage for the precopy stage with postcopy enabled
4931caddf8aSDr. David Alan Gilbert      * we can turn it back on now.
4941caddf8aSDr. David Alan Gilbert      */
4951d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
4961caddf8aSDr. David Alan Gilbert 
4971caddf8aSDr. David Alan Gilbert     /*
4981caddf8aSDr. David Alan Gilbert      * We can also turn off userfault now since we should have all the
4991caddf8aSDr. David Alan Gilbert      * pages.   It can be useful to leave it on to debug postcopy
5001caddf8aSDr. David Alan Gilbert      * if you're not sure it's always getting every page.
5011caddf8aSDr. David Alan Gilbert      */
5021caddf8aSDr. David Alan Gilbert     range_struct.start = (uintptr_t)host_addr;
5031caddf8aSDr. David Alan Gilbert     range_struct.len = length;
5041caddf8aSDr. David Alan Gilbert 
5051caddf8aSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
5061caddf8aSDr. David Alan Gilbert         error_report("%s: userfault unregister %s", __func__, strerror(errno));
5071caddf8aSDr. David Alan Gilbert 
5081caddf8aSDr. David Alan Gilbert         return -1;
5091caddf8aSDr. David Alan Gilbert     }
5101caddf8aSDr. David Alan Gilbert 
5111caddf8aSDr. David Alan Gilbert     return 0;
5121caddf8aSDr. David Alan Gilbert }
5131caddf8aSDr. David Alan Gilbert 
5141caddf8aSDr. David Alan Gilbert /*
5151caddf8aSDr. David Alan Gilbert  * Initialise postcopy-ram, setting the RAM to a state where we can go into
5161caddf8aSDr. David Alan Gilbert  * postcopy later; must be called prior to any precopy.
5171caddf8aSDr. David Alan Gilbert  * called from arch_init's similarly named ram_postcopy_incoming_init
5181caddf8aSDr. David Alan Gilbert  */
519c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
5201caddf8aSDr. David Alan Gilbert {
521fbd162e6SYury Kotov     if (foreach_not_ignored_block(init_range, NULL)) {
5221caddf8aSDr. David Alan Gilbert         return -1;
5231caddf8aSDr. David Alan Gilbert     }
5241caddf8aSDr. David Alan Gilbert 
5251caddf8aSDr. David Alan Gilbert     return 0;
5261caddf8aSDr. David Alan Gilbert }
5271caddf8aSDr. David Alan Gilbert 
5281caddf8aSDr. David Alan Gilbert /*
5291caddf8aSDr. David Alan Gilbert  * At the end of a migration where postcopy_ram_incoming_init was called.
5301caddf8aSDr. David Alan Gilbert  */
5311caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
5321caddf8aSDr. David Alan Gilbert {
533c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_entry();
534c4faeed2SDr. David Alan Gilbert 
535c4faeed2SDr. David Alan Gilbert     if (mis->have_fault_thread) {
53646343570SDr. David Alan Gilbert         Error *local_err = NULL;
53746343570SDr. David Alan Gilbert 
53855d0fe82SIlya Maximets         /* Let the fault thread quit */
539d73415a3SStefan Hajnoczi         qatomic_set(&mis->fault_thread_quit, 1);
54055d0fe82SIlya Maximets         postcopy_fault_thread_notify(mis);
54155d0fe82SIlya Maximets         trace_postcopy_ram_incoming_cleanup_join();
54255d0fe82SIlya Maximets         qemu_thread_join(&mis->fault_thread);
54355d0fe82SIlya Maximets 
54446343570SDr. David Alan Gilbert         if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
54546343570SDr. David Alan Gilbert             error_report_err(local_err);
54646343570SDr. David Alan Gilbert             return -1;
54746343570SDr. David Alan Gilbert         }
54846343570SDr. David Alan Gilbert 
549fbd162e6SYury Kotov         if (foreach_not_ignored_block(cleanup_range, mis)) {
5501caddf8aSDr. David Alan Gilbert             return -1;
5511caddf8aSDr. David Alan Gilbert         }
5529ab7ef9bSPeter Xu 
553c4faeed2SDr. David Alan Gilbert         trace_postcopy_ram_incoming_cleanup_closeuf();
554c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
55564f615feSPeter Xu         close(mis->userfault_event_fd);
556c4faeed2SDr. David Alan Gilbert         mis->have_fault_thread = false;
557c4faeed2SDr. David Alan Gilbert     }
558c4faeed2SDr. David Alan Gilbert 
55958b7c17eSDr. David Alan Gilbert     if (enable_mlock) {
56058b7c17eSDr. David Alan Gilbert         if (os_mlock() < 0) {
56158b7c17eSDr. David Alan Gilbert             error_report("mlock: %s", strerror(errno));
56258b7c17eSDr. David Alan Gilbert             /*
56358b7c17eSDr. David Alan Gilbert              * It doesn't feel right to fail at this point, we have a valid
56458b7c17eSDr. David Alan Gilbert              * VM state.
56558b7c17eSDr. David Alan Gilbert              */
56658b7c17eSDr. David Alan Gilbert         }
56758b7c17eSDr. David Alan Gilbert     }
56858b7c17eSDr. David Alan Gilbert 
569696ed9a9SDr. David Alan Gilbert     if (mis->postcopy_tmp_page) {
570df9ff5e1SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_page, mis->largest_page_size);
571696ed9a9SDr. David Alan Gilbert         mis->postcopy_tmp_page = NULL;
572696ed9a9SDr. David Alan Gilbert     }
57341d84210SDr. David Alan Gilbert     if (mis->postcopy_tmp_zero_page) {
57441d84210SDr. David Alan Gilbert         munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
57541d84210SDr. David Alan Gilbert         mis->postcopy_tmp_zero_page = NULL;
57641d84210SDr. David Alan Gilbert     }
57765ace060SAlexey Perevalov     trace_postcopy_ram_incoming_cleanup_blocktime(
57865ace060SAlexey Perevalov             get_postcopy_total_blocktime());
57965ace060SAlexey Perevalov 
580c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_exit();
5811caddf8aSDr. David Alan Gilbert     return 0;
5821caddf8aSDr. David Alan Gilbert }
5831caddf8aSDr. David Alan Gilbert 
584f0a227adSDr. David Alan Gilbert /*
585f9527107SDr. David Alan Gilbert  * Disable huge pages on an area
586f9527107SDr. David Alan Gilbert  */
587754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque)
588f9527107SDr. David Alan Gilbert {
589754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
590754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
591754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
592898ba906SDavid Hildenbrand     ram_addr_t length = rb->postcopy_length;
593f9527107SDr. David Alan Gilbert     trace_postcopy_nhp_range(block_name, host_addr, offset, length);
594f9527107SDr. David Alan Gilbert 
595f9527107SDr. David Alan Gilbert     /*
596f9527107SDr. David Alan Gilbert      * Before we do discards we need to ensure those discards really
597f9527107SDr. David Alan Gilbert      * do delete areas of the page, even if THP thinks a hugepage would
598f9527107SDr. David Alan Gilbert      * be a good idea, so force hugepages off.
599f9527107SDr. David Alan Gilbert      */
6001d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
601f9527107SDr. David Alan Gilbert 
602f9527107SDr. David Alan Gilbert     return 0;
603f9527107SDr. David Alan Gilbert }
604f9527107SDr. David Alan Gilbert 
605f9527107SDr. David Alan Gilbert /*
606f9527107SDr. David Alan Gilbert  * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
607f9527107SDr. David Alan Gilbert  * however leaving it until after precopy means that most of the precopy
608f9527107SDr. David Alan Gilbert  * data is still THPd
609f9527107SDr. David Alan Gilbert  */
610f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
611f9527107SDr. David Alan Gilbert {
612fbd162e6SYury Kotov     if (foreach_not_ignored_block(nhp_range, mis)) {
613f9527107SDr. David Alan Gilbert         return -1;
614f9527107SDr. David Alan Gilbert     }
615f9527107SDr. David Alan Gilbert 
616f9527107SDr. David Alan Gilbert     postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
617f9527107SDr. David Alan Gilbert 
618f9527107SDr. David Alan Gilbert     return 0;
619f9527107SDr. David Alan Gilbert }
620f9527107SDr. David Alan Gilbert 
621f9527107SDr. David Alan Gilbert /*
622f0a227adSDr. David Alan Gilbert  * Mark the given area of RAM as requiring notification to unwritten areas
623fbd162e6SYury Kotov  * Used as a  callback on foreach_not_ignored_block.
624f0a227adSDr. David Alan Gilbert  *   host_addr: Base of area to mark
625f0a227adSDr. David Alan Gilbert  *   offset: Offset in the whole ram arena
626f0a227adSDr. David Alan Gilbert  *   length: Length of the section
627f0a227adSDr. David Alan Gilbert  *   opaque: MigrationIncomingState pointer
628f0a227adSDr. David Alan Gilbert  * Returns 0 on success
629f0a227adSDr. David Alan Gilbert  */
630754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
631f0a227adSDr. David Alan Gilbert {
632f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
633f0a227adSDr. David Alan Gilbert     struct uffdio_register reg_struct;
634f0a227adSDr. David Alan Gilbert 
635754cb9c0SYury Kotov     reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
636898ba906SDavid Hildenbrand     reg_struct.range.len = rb->postcopy_length;
637f0a227adSDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
638f0a227adSDr. David Alan Gilbert 
639f0a227adSDr. David Alan Gilbert     /* Now tell our userfault_fd that it's responsible for this area */
640f0a227adSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
641f0a227adSDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
642f0a227adSDr. David Alan Gilbert         return -1;
643f0a227adSDr. David Alan Gilbert     }
644665414adSDr. David Alan Gilbert     if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
645665414adSDr. David Alan Gilbert         error_report("%s userfault: Region doesn't support COPY", __func__);
646665414adSDr. David Alan Gilbert         return -1;
647665414adSDr. David Alan Gilbert     }
6482ce16640SDr. David Alan Gilbert     if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
6492ce16640SDr. David Alan Gilbert         qemu_ram_set_uf_zeroable(rb);
6502ce16640SDr. David Alan Gilbert     }
651f0a227adSDr. David Alan Gilbert 
652f0a227adSDr. David Alan Gilbert     return 0;
653f0a227adSDr. David Alan Gilbert }
654f0a227adSDr. David Alan Gilbert 
6555efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
6565efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
6575efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
6585efc3564SDr. David Alan Gilbert {
6595efc3564SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
6605efc3564SDr. David Alan Gilbert     struct uffdio_range range;
6615efc3564SDr. David Alan Gilbert     int ret;
6625efc3564SDr. David Alan Gilbert     trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
663*7648297dSDavid Hildenbrand     range.start = ROUND_DOWN(client_addr, pagesize);
6645efc3564SDr. David Alan Gilbert     range.len = pagesize;
6655efc3564SDr. David Alan Gilbert     ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
6665efc3564SDr. David Alan Gilbert     if (ret) {
6675efc3564SDr. David Alan Gilbert         error_report("%s: Failed to wake: %zx in %s (%s)",
6685efc3564SDr. David Alan Gilbert                      __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
6695efc3564SDr. David Alan Gilbert                      strerror(errno));
6705efc3564SDr. David Alan Gilbert     }
6715efc3564SDr. David Alan Gilbert     return ret;
6725efc3564SDr. David Alan Gilbert }
6735efc3564SDr. David Alan Gilbert 
6749470c5e0SDavid Hildenbrand static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
6759470c5e0SDavid Hildenbrand                                  ram_addr_t start, uint64_t haddr)
6769470c5e0SDavid Hildenbrand {
6779470c5e0SDavid Hildenbrand     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
6789470c5e0SDavid Hildenbrand 
6799470c5e0SDavid Hildenbrand     /*
6809470c5e0SDavid Hildenbrand      * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
6819470c5e0SDavid Hildenbrand      * access, place a zeropage, which will also set the relevant bits in the
6829470c5e0SDavid Hildenbrand      * recv_bitmap accordingly, so we won't try placing a zeropage twice.
6839470c5e0SDavid Hildenbrand      *
6849470c5e0SDavid Hildenbrand      * Checking a single bit is sufficient to handle pagesize > TPS as either
6859470c5e0SDavid Hildenbrand      * all relevant bits are set or not.
6869470c5e0SDavid Hildenbrand      */
6879470c5e0SDavid Hildenbrand     assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
6889470c5e0SDavid Hildenbrand     if (ramblock_page_is_discarded(rb, start)) {
6899470c5e0SDavid Hildenbrand         bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
6909470c5e0SDavid Hildenbrand 
6919470c5e0SDavid Hildenbrand         return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
6929470c5e0SDavid Hildenbrand     }
6939470c5e0SDavid Hildenbrand 
6949470c5e0SDavid Hildenbrand     return migrate_send_rp_req_pages(mis, rb, start, haddr);
6959470c5e0SDavid Hildenbrand }
6969470c5e0SDavid Hildenbrand 
697f0a227adSDr. David Alan Gilbert /*
698096bf4c8SDr. David Alan Gilbert  * Callback from shared fault handlers to ask for a page,
699096bf4c8SDr. David Alan Gilbert  * the page must be specified by a RAMBlock and an offset in that rb
700096bf4c8SDr. David Alan Gilbert  * Note: Only for use by shared fault handlers (in fault thread)
701096bf4c8SDr. David Alan Gilbert  */
702096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
703096bf4c8SDr. David Alan Gilbert                                  uint64_t client_addr, uint64_t rb_offset)
704096bf4c8SDr. David Alan Gilbert {
705*7648297dSDavid Hildenbrand     uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
706096bf4c8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
707096bf4c8SDr. David Alan Gilbert 
708096bf4c8SDr. David Alan Gilbert     trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
709096bf4c8SDr. David Alan Gilbert                                        rb_offset);
710dedfb4b2SDr. David Alan Gilbert     if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
711dedfb4b2SDr. David Alan Gilbert         trace_postcopy_request_shared_page_present(pcfd->idstr,
712dedfb4b2SDr. David Alan Gilbert                                         qemu_ram_get_idstr(rb), rb_offset);
713dedfb4b2SDr. David Alan Gilbert         return postcopy_wake_shared(pcfd, client_addr, rb);
714dedfb4b2SDr. David Alan Gilbert     }
7159470c5e0SDavid Hildenbrand     postcopy_request_page(mis, rb, aligned_rbo, client_addr);
716096bf4c8SDr. David Alan Gilbert     return 0;
717096bf4c8SDr. David Alan Gilbert }
718096bf4c8SDr. David Alan Gilbert 
719575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid)
720575b0b33SAlexey Perevalov {
721575b0b33SAlexey Perevalov     CPUState *cpu_iter;
722575b0b33SAlexey Perevalov 
723575b0b33SAlexey Perevalov     CPU_FOREACH(cpu_iter) {
724575b0b33SAlexey Perevalov         if (cpu_iter->thread_id == pid) {
725575b0b33SAlexey Perevalov             trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
726575b0b33SAlexey Perevalov             return cpu_iter->cpu_index;
727575b0b33SAlexey Perevalov         }
728575b0b33SAlexey Perevalov     }
729575b0b33SAlexey Perevalov     trace_get_mem_fault_cpu_index(-1, pid);
730575b0b33SAlexey Perevalov     return -1;
731575b0b33SAlexey Perevalov }
732575b0b33SAlexey Perevalov 
733575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
734575b0b33SAlexey Perevalov {
735575b0b33SAlexey Perevalov     int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
736575b0b33SAlexey Perevalov                                     dc->start_time;
737575b0b33SAlexey Perevalov     return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
738575b0b33SAlexey Perevalov }
739575b0b33SAlexey Perevalov 
740575b0b33SAlexey Perevalov /*
741575b0b33SAlexey Perevalov  * This function is being called when pagefault occurs. It
742575b0b33SAlexey Perevalov  * tracks down vCPU blocking time.
743575b0b33SAlexey Perevalov  *
744575b0b33SAlexey Perevalov  * @addr: faulted host virtual address
745575b0b33SAlexey Perevalov  * @ptid: faulted process thread id
746575b0b33SAlexey Perevalov  * @rb: ramblock appropriate to addr
747575b0b33SAlexey Perevalov  */
748575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
749575b0b33SAlexey Perevalov                                           RAMBlock *rb)
750575b0b33SAlexey Perevalov {
751575b0b33SAlexey Perevalov     int cpu, already_received;
752575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
753575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
754575b0b33SAlexey Perevalov     uint32_t low_time_offset;
755575b0b33SAlexey Perevalov 
756575b0b33SAlexey Perevalov     if (!dc || ptid == 0) {
757575b0b33SAlexey Perevalov         return;
758575b0b33SAlexey Perevalov     }
759575b0b33SAlexey Perevalov     cpu = get_mem_fault_cpu_index(ptid);
760575b0b33SAlexey Perevalov     if (cpu < 0) {
761575b0b33SAlexey Perevalov         return;
762575b0b33SAlexey Perevalov     }
763575b0b33SAlexey Perevalov 
764575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
765575b0b33SAlexey Perevalov     if (dc->vcpu_addr[cpu] == 0) {
766d73415a3SStefan Hajnoczi         qatomic_inc(&dc->smp_cpus_down);
767575b0b33SAlexey Perevalov     }
768575b0b33SAlexey Perevalov 
769d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->last_begin, low_time_offset);
770d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
771d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->vcpu_addr[cpu], addr);
772575b0b33SAlexey Perevalov 
773da1725d3SWei Yang     /*
774da1725d3SWei Yang      * check it here, not at the beginning of the function,
775da1725d3SWei Yang      * due to, check could occur early than bitmap_set in
776da1725d3SWei Yang      * qemu_ufd_copy_ioctl
777da1725d3SWei Yang      */
778575b0b33SAlexey Perevalov     already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
779575b0b33SAlexey Perevalov     if (already_received) {
780d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->vcpu_addr[cpu], 0);
781d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
782d73415a3SStefan Hajnoczi         qatomic_dec(&dc->smp_cpus_down);
783575b0b33SAlexey Perevalov     }
784575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
785575b0b33SAlexey Perevalov                                         cpu, already_received);
786575b0b33SAlexey Perevalov }
787575b0b33SAlexey Perevalov 
788575b0b33SAlexey Perevalov /*
789575b0b33SAlexey Perevalov  *  This function just provide calculated blocktime per cpu and trace it.
790575b0b33SAlexey Perevalov  *  Total blocktime is calculated in mark_postcopy_blocktime_end.
791575b0b33SAlexey Perevalov  *
792575b0b33SAlexey Perevalov  *
793575b0b33SAlexey Perevalov  * Assume we have 3 CPU
794575b0b33SAlexey Perevalov  *
795575b0b33SAlexey Perevalov  *      S1        E1           S1               E1
796575b0b33SAlexey Perevalov  * -----***********------------xxx***************------------------------> CPU1
797575b0b33SAlexey Perevalov  *
798575b0b33SAlexey Perevalov  *             S2                E2
799575b0b33SAlexey Perevalov  * ------------****************xxx---------------------------------------> CPU2
800575b0b33SAlexey Perevalov  *
801575b0b33SAlexey Perevalov  *                         S3            E3
802575b0b33SAlexey Perevalov  * ------------------------****xxx********-------------------------------> CPU3
803575b0b33SAlexey Perevalov  *
804575b0b33SAlexey Perevalov  * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
805575b0b33SAlexey Perevalov  * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
806575b0b33SAlexey Perevalov  * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
807575b0b33SAlexey Perevalov  *            it's a part of total blocktime.
808575b0b33SAlexey Perevalov  * S1 - here is last_begin
809575b0b33SAlexey Perevalov  * Legend of the picture is following:
810575b0b33SAlexey Perevalov  *              * - means blocktime per vCPU
811575b0b33SAlexey Perevalov  *              x - means overlapped blocktime (total blocktime)
812575b0b33SAlexey Perevalov  *
813575b0b33SAlexey Perevalov  * @addr: host virtual address
814575b0b33SAlexey Perevalov  */
815575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr)
816575b0b33SAlexey Perevalov {
817575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
818575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
8195cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
8205cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
821575b0b33SAlexey Perevalov     int i, affected_cpu = 0;
822575b0b33SAlexey Perevalov     bool vcpu_total_blocktime = false;
823575b0b33SAlexey Perevalov     uint32_t read_vcpu_time, low_time_offset;
824575b0b33SAlexey Perevalov 
825575b0b33SAlexey Perevalov     if (!dc) {
826575b0b33SAlexey Perevalov         return;
827575b0b33SAlexey Perevalov     }
828575b0b33SAlexey Perevalov 
829575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
830575b0b33SAlexey Perevalov     /* lookup cpu, to clear it,
8313a4452d8Szhaolichang      * that algorithm looks straightforward, but it's not
832575b0b33SAlexey Perevalov      * optimal, more optimal algorithm is keeping tree or hash
833575b0b33SAlexey Perevalov      * where key is address value is a list of  */
834575b0b33SAlexey Perevalov     for (i = 0; i < smp_cpus; i++) {
835575b0b33SAlexey Perevalov         uint32_t vcpu_blocktime = 0;
836575b0b33SAlexey Perevalov 
837d73415a3SStefan Hajnoczi         read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
838d73415a3SStefan Hajnoczi         if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
839575b0b33SAlexey Perevalov             read_vcpu_time == 0) {
840575b0b33SAlexey Perevalov             continue;
841575b0b33SAlexey Perevalov         }
842d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->vcpu_addr[i], 0);
843575b0b33SAlexey Perevalov         vcpu_blocktime = low_time_offset - read_vcpu_time;
844575b0b33SAlexey Perevalov         affected_cpu += 1;
845575b0b33SAlexey Perevalov         /* we need to know is that mark_postcopy_end was due to
846575b0b33SAlexey Perevalov          * faulted page, another possible case it's prefetched
847575b0b33SAlexey Perevalov          * page and in that case we shouldn't be here */
848575b0b33SAlexey Perevalov         if (!vcpu_total_blocktime &&
849d73415a3SStefan Hajnoczi             qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
850575b0b33SAlexey Perevalov             vcpu_total_blocktime = true;
851575b0b33SAlexey Perevalov         }
852575b0b33SAlexey Perevalov         /* continue cycle, due to one page could affect several vCPUs */
853575b0b33SAlexey Perevalov         dc->vcpu_blocktime[i] += vcpu_blocktime;
854575b0b33SAlexey Perevalov     }
855575b0b33SAlexey Perevalov 
856d73415a3SStefan Hajnoczi     qatomic_sub(&dc->smp_cpus_down, affected_cpu);
857575b0b33SAlexey Perevalov     if (vcpu_total_blocktime) {
858d73415a3SStefan Hajnoczi         dc->total_blocktime += low_time_offset - qatomic_fetch_add(
859575b0b33SAlexey Perevalov                 &dc->last_begin, 0);
860575b0b33SAlexey Perevalov     }
861575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
862575b0b33SAlexey Perevalov                                       affected_cpu);
863575b0b33SAlexey Perevalov }
864575b0b33SAlexey Perevalov 
8653a7804c3SPeter Xu static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
8663a7804c3SPeter Xu {
8673a7804c3SPeter Xu     trace_postcopy_pause_fault_thread();
8683a7804c3SPeter Xu 
8693a7804c3SPeter Xu     qemu_sem_wait(&mis->postcopy_pause_sem_fault);
8703a7804c3SPeter Xu 
8713a7804c3SPeter Xu     trace_postcopy_pause_fault_thread_continued();
8723a7804c3SPeter Xu 
8733a7804c3SPeter Xu     return true;
8743a7804c3SPeter Xu }
8753a7804c3SPeter Xu 
876096bf4c8SDr. David Alan Gilbert /*
877f0a227adSDr. David Alan Gilbert  * Handle faults detected by the USERFAULT markings
878f0a227adSDr. David Alan Gilbert  */
879f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque)
880f0a227adSDr. David Alan Gilbert {
881f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
882c4faeed2SDr. David Alan Gilbert     struct uffd_msg msg;
883c4faeed2SDr. David Alan Gilbert     int ret;
88400fa4fc8SDr. David Alan Gilbert     size_t index;
885c4faeed2SDr. David Alan Gilbert     RAMBlock *rb = NULL;
886f0a227adSDr. David Alan Gilbert 
887c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_entry();
88874637e6fSLidong Chen     rcu_register_thread();
889096bf4c8SDr. David Alan Gilbert     mis->last_rb = NULL; /* last RAMBlock we sent part of */
890f0a227adSDr. David Alan Gilbert     qemu_sem_post(&mis->fault_thread_sem);
891c4faeed2SDr. David Alan Gilbert 
89200fa4fc8SDr. David Alan Gilbert     struct pollfd *pfd;
89300fa4fc8SDr. David Alan Gilbert     size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
89400fa4fc8SDr. David Alan Gilbert 
89500fa4fc8SDr. David Alan Gilbert     pfd = g_new0(struct pollfd, pfd_len);
89600fa4fc8SDr. David Alan Gilbert 
89700fa4fc8SDr. David Alan Gilbert     pfd[0].fd = mis->userfault_fd;
89800fa4fc8SDr. David Alan Gilbert     pfd[0].events = POLLIN;
89900fa4fc8SDr. David Alan Gilbert     pfd[1].fd = mis->userfault_event_fd;
90000fa4fc8SDr. David Alan Gilbert     pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
90100fa4fc8SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
90200fa4fc8SDr. David Alan Gilbert     for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
90300fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
90400fa4fc8SDr. David Alan Gilbert                                                  struct PostCopyFD, index);
90500fa4fc8SDr. David Alan Gilbert         pfd[2 + index].fd = pcfd->fd;
90600fa4fc8SDr. David Alan Gilbert         pfd[2 + index].events = POLLIN;
90700fa4fc8SDr. David Alan Gilbert         trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
90800fa4fc8SDr. David Alan Gilbert                                                   pcfd->fd);
90900fa4fc8SDr. David Alan Gilbert     }
91000fa4fc8SDr. David Alan Gilbert 
911c4faeed2SDr. David Alan Gilbert     while (true) {
912c4faeed2SDr. David Alan Gilbert         ram_addr_t rb_offset;
91300fa4fc8SDr. David Alan Gilbert         int poll_result;
914c4faeed2SDr. David Alan Gilbert 
915c4faeed2SDr. David Alan Gilbert         /*
916c4faeed2SDr. David Alan Gilbert          * We're mainly waiting for the kernel to give us a faulting HVA,
917c4faeed2SDr. David Alan Gilbert          * however we can be told to quit via userfault_quit_fd which is
918c4faeed2SDr. David Alan Gilbert          * an eventfd
919c4faeed2SDr. David Alan Gilbert          */
920c4faeed2SDr. David Alan Gilbert 
92100fa4fc8SDr. David Alan Gilbert         poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
92200fa4fc8SDr. David Alan Gilbert         if (poll_result == -1) {
923c4faeed2SDr. David Alan Gilbert             error_report("%s: userfault poll: %s", __func__, strerror(errno));
924c4faeed2SDr. David Alan Gilbert             break;
925f0a227adSDr. David Alan Gilbert         }
926f0a227adSDr. David Alan Gilbert 
9273a7804c3SPeter Xu         if (!mis->to_src_file) {
9283a7804c3SPeter Xu             /*
9293a7804c3SPeter Xu              * Possibly someone tells us that the return path is
9303a7804c3SPeter Xu              * broken already using the event. We should hold until
9313a7804c3SPeter Xu              * the channel is rebuilt.
9323a7804c3SPeter Xu              */
9333a7804c3SPeter Xu             if (postcopy_pause_fault_thread(mis)) {
9343a7804c3SPeter Xu                 /* Continue to read the userfaultfd */
9353a7804c3SPeter Xu             } else {
9363a7804c3SPeter Xu                 error_report("%s: paused but don't allow to continue",
9373a7804c3SPeter Xu                              __func__);
9383a7804c3SPeter Xu                 break;
9393a7804c3SPeter Xu             }
9403a7804c3SPeter Xu         }
9413a7804c3SPeter Xu 
942c4faeed2SDr. David Alan Gilbert         if (pfd[1].revents) {
94364f615feSPeter Xu             uint64_t tmp64 = 0;
94464f615feSPeter Xu 
94564f615feSPeter Xu             /* Consume the signal */
94664f615feSPeter Xu             if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
94764f615feSPeter Xu                 /* Nothing obviously nicer than posting this error. */
94864f615feSPeter Xu                 error_report("%s: read() failed", __func__);
94964f615feSPeter Xu             }
95064f615feSPeter Xu 
951d73415a3SStefan Hajnoczi             if (qatomic_read(&mis->fault_thread_quit)) {
952c4faeed2SDr. David Alan Gilbert                 trace_postcopy_ram_fault_thread_quit();
953c4faeed2SDr. David Alan Gilbert                 break;
954c4faeed2SDr. David Alan Gilbert             }
95564f615feSPeter Xu         }
956c4faeed2SDr. David Alan Gilbert 
95700fa4fc8SDr. David Alan Gilbert         if (pfd[0].revents) {
95800fa4fc8SDr. David Alan Gilbert             poll_result--;
959c4faeed2SDr. David Alan Gilbert             ret = read(mis->userfault_fd, &msg, sizeof(msg));
960c4faeed2SDr. David Alan Gilbert             if (ret != sizeof(msg)) {
961c4faeed2SDr. David Alan Gilbert                 if (errno == EAGAIN) {
962c4faeed2SDr. David Alan Gilbert                     /*
963c4faeed2SDr. David Alan Gilbert                      * if a wake up happens on the other thread just after
964c4faeed2SDr. David Alan Gilbert                      * the poll, there is nothing to read.
965c4faeed2SDr. David Alan Gilbert                      */
966c4faeed2SDr. David Alan Gilbert                     continue;
967c4faeed2SDr. David Alan Gilbert                 }
968c4faeed2SDr. David Alan Gilbert                 if (ret < 0) {
96900fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to read full userfault "
97000fa4fc8SDr. David Alan Gilbert                                  "message: %s",
971c4faeed2SDr. David Alan Gilbert                                  __func__, strerror(errno));
972c4faeed2SDr. David Alan Gilbert                     break;
973c4faeed2SDr. David Alan Gilbert                 } else {
97400fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read %d bytes from userfaultfd "
97500fa4fc8SDr. David Alan Gilbert                                  "expected %zd",
976c4faeed2SDr. David Alan Gilbert                                  __func__, ret, sizeof(msg));
977c4faeed2SDr. David Alan Gilbert                     break; /* Lost alignment, don't know what we'd read next */
978c4faeed2SDr. David Alan Gilbert                 }
979c4faeed2SDr. David Alan Gilbert             }
980c4faeed2SDr. David Alan Gilbert             if (msg.event != UFFD_EVENT_PAGEFAULT) {
981c4faeed2SDr. David Alan Gilbert                 error_report("%s: Read unexpected event %ud from userfaultfd",
982c4faeed2SDr. David Alan Gilbert                              __func__, msg.event);
983c4faeed2SDr. David Alan Gilbert                 continue; /* It's not a page fault, shouldn't happen */
984c4faeed2SDr. David Alan Gilbert             }
985c4faeed2SDr. David Alan Gilbert 
986c4faeed2SDr. David Alan Gilbert             rb = qemu_ram_block_from_host(
987c4faeed2SDr. David Alan Gilbert                      (void *)(uintptr_t)msg.arg.pagefault.address,
988f615f396SPaolo Bonzini                      true, &rb_offset);
989c4faeed2SDr. David Alan Gilbert             if (!rb) {
990c4faeed2SDr. David Alan Gilbert                 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
991c4faeed2SDr. David Alan Gilbert                              PRIx64, (uint64_t)msg.arg.pagefault.address);
992c4faeed2SDr. David Alan Gilbert                 break;
993c4faeed2SDr. David Alan Gilbert             }
994c4faeed2SDr. David Alan Gilbert 
995*7648297dSDavid Hildenbrand             rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
996c4faeed2SDr. David Alan Gilbert             trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
997c4faeed2SDr. David Alan Gilbert                                                 qemu_ram_get_idstr(rb),
998575b0b33SAlexey Perevalov                                                 rb_offset,
999575b0b33SAlexey Perevalov                                                 msg.arg.pagefault.feat.ptid);
1000575b0b33SAlexey Perevalov             mark_postcopy_blocktime_begin(
1001575b0b33SAlexey Perevalov                     (uintptr_t)(msg.arg.pagefault.address),
1002575b0b33SAlexey Perevalov                                 msg.arg.pagefault.feat.ptid, rb);
1003575b0b33SAlexey Perevalov 
10043a7804c3SPeter Xu retry:
1005c4faeed2SDr. David Alan Gilbert             /*
1006c4faeed2SDr. David Alan Gilbert              * Send the request to the source - we want to request one
1007c4faeed2SDr. David Alan Gilbert              * of our host page sizes (which is >= TPS)
1008c4faeed2SDr. David Alan Gilbert              */
10099470c5e0SDavid Hildenbrand             ret = postcopy_request_page(mis, rb, rb_offset,
10108f8bfffcSPeter Xu                                         msg.arg.pagefault.address);
10113a7804c3SPeter Xu             if (ret) {
10123a7804c3SPeter Xu                 /* May be network failure, try to wait for recovery */
10133a7804c3SPeter Xu                 if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
10143a7804c3SPeter Xu                     /* We got reconnected somehow, try to continue */
10153a7804c3SPeter Xu                     goto retry;
10163a7804c3SPeter Xu                 } else {
10173a7804c3SPeter Xu                     /* This is a unavoidable fault */
10189470c5e0SDavid Hildenbrand                     error_report("%s: postcopy_request_page() get %d",
10193a7804c3SPeter Xu                                  __func__, ret);
10203a7804c3SPeter Xu                     break;
10213a7804c3SPeter Xu                 }
1022c4faeed2SDr. David Alan Gilbert             }
1023c4faeed2SDr. David Alan Gilbert         }
102400fa4fc8SDr. David Alan Gilbert 
102500fa4fc8SDr. David Alan Gilbert         /* Now handle any requests from external processes on shared memory */
102600fa4fc8SDr. David Alan Gilbert         /* TODO: May need to handle devices deregistering during postcopy */
102700fa4fc8SDr. David Alan Gilbert         for (index = 2; index < pfd_len && poll_result; index++) {
102800fa4fc8SDr. David Alan Gilbert             if (pfd[index].revents) {
102900fa4fc8SDr. David Alan Gilbert                 struct PostCopyFD *pcfd =
103000fa4fc8SDr. David Alan Gilbert                     &g_array_index(mis->postcopy_remote_fds,
103100fa4fc8SDr. David Alan Gilbert                                    struct PostCopyFD, index - 2);
103200fa4fc8SDr. David Alan Gilbert 
103300fa4fc8SDr. David Alan Gilbert                 poll_result--;
103400fa4fc8SDr. David Alan Gilbert                 if (pfd[index].revents & POLLERR) {
103500fa4fc8SDr. David Alan Gilbert                     error_report("%s: POLLERR on poll %zd fd=%d",
103600fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->fd);
103700fa4fc8SDr. David Alan Gilbert                     pfd[index].events = 0;
103800fa4fc8SDr. David Alan Gilbert                     continue;
103900fa4fc8SDr. David Alan Gilbert                 }
104000fa4fc8SDr. David Alan Gilbert 
104100fa4fc8SDr. David Alan Gilbert                 ret = read(pcfd->fd, &msg, sizeof(msg));
104200fa4fc8SDr. David Alan Gilbert                 if (ret != sizeof(msg)) {
104300fa4fc8SDr. David Alan Gilbert                     if (errno == EAGAIN) {
104400fa4fc8SDr. David Alan Gilbert                         /*
104500fa4fc8SDr. David Alan Gilbert                          * if a wake up happens on the other thread just after
104600fa4fc8SDr. David Alan Gilbert                          * the poll, there is nothing to read.
104700fa4fc8SDr. David Alan Gilbert                          */
104800fa4fc8SDr. David Alan Gilbert                         continue;
104900fa4fc8SDr. David Alan Gilbert                     }
105000fa4fc8SDr. David Alan Gilbert                     if (ret < 0) {
105100fa4fc8SDr. David Alan Gilbert                         error_report("%s: Failed to read full userfault "
105200fa4fc8SDr. David Alan Gilbert                                      "message: %s (shared) revents=%d",
105300fa4fc8SDr. David Alan Gilbert                                      __func__, strerror(errno),
105400fa4fc8SDr. David Alan Gilbert                                      pfd[index].revents);
105500fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
105600fa4fc8SDr. David Alan Gilbert                         break;
105700fa4fc8SDr. David Alan Gilbert                     } else {
105800fa4fc8SDr. David Alan Gilbert                         error_report("%s: Read %d bytes from userfaultfd "
105900fa4fc8SDr. David Alan Gilbert                                      "expected %zd (shared)",
106000fa4fc8SDr. David Alan Gilbert                                      __func__, ret, sizeof(msg));
106100fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
106200fa4fc8SDr. David Alan Gilbert                         break; /*Lost alignment,don't know what we'd read next*/
106300fa4fc8SDr. David Alan Gilbert                     }
106400fa4fc8SDr. David Alan Gilbert                 }
106500fa4fc8SDr. David Alan Gilbert                 if (msg.event != UFFD_EVENT_PAGEFAULT) {
106600fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read unexpected event %ud "
106700fa4fc8SDr. David Alan Gilbert                                  "from userfaultfd (shared)",
106800fa4fc8SDr. David Alan Gilbert                                  __func__, msg.event);
106900fa4fc8SDr. David Alan Gilbert                     continue; /* It's not a page fault, shouldn't happen */
107000fa4fc8SDr. David Alan Gilbert                 }
107100fa4fc8SDr. David Alan Gilbert                 /* Call the device handler registered with us */
107200fa4fc8SDr. David Alan Gilbert                 ret = pcfd->handler(pcfd, &msg);
107300fa4fc8SDr. David Alan Gilbert                 if (ret) {
107400fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to resolve shared fault on %zd/%s",
107500fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->idstr);
107600fa4fc8SDr. David Alan Gilbert                     /* TODO: Fail? Disable this sharer? */
107700fa4fc8SDr. David Alan Gilbert                 }
107800fa4fc8SDr. David Alan Gilbert             }
107900fa4fc8SDr. David Alan Gilbert         }
108000fa4fc8SDr. David Alan Gilbert     }
108174637e6fSLidong Chen     rcu_unregister_thread();
1082c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_exit();
1083fc6008f3SMarc-André Lureau     g_free(pfd);
1084f0a227adSDr. David Alan Gilbert     return NULL;
1085f0a227adSDr. David Alan Gilbert }
1086f0a227adSDr. David Alan Gilbert 
10872a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1088f0a227adSDr. David Alan Gilbert {
1089c4faeed2SDr. David Alan Gilbert     /* Open the fd for the kernel to give us userfaults */
1090c4faeed2SDr. David Alan Gilbert     mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1091c4faeed2SDr. David Alan Gilbert     if (mis->userfault_fd == -1) {
1092c4faeed2SDr. David Alan Gilbert         error_report("%s: Failed to open userfault fd: %s", __func__,
1093c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1094c4faeed2SDr. David Alan Gilbert         return -1;
1095c4faeed2SDr. David Alan Gilbert     }
1096c4faeed2SDr. David Alan Gilbert 
1097c4faeed2SDr. David Alan Gilbert     /*
1098c4faeed2SDr. David Alan Gilbert      * Although the host check already tested the API, we need to
1099c4faeed2SDr. David Alan Gilbert      * do the check again as an ABI handshake on the new fd.
1100c4faeed2SDr. David Alan Gilbert      */
110154ae0886SAlexey Perevalov     if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
1102c4faeed2SDr. David Alan Gilbert         return -1;
1103c4faeed2SDr. David Alan Gilbert     }
1104c4faeed2SDr. David Alan Gilbert 
1105c4faeed2SDr. David Alan Gilbert     /* Now an eventfd we use to tell the fault-thread to quit */
110664f615feSPeter Xu     mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
110764f615feSPeter Xu     if (mis->userfault_event_fd == -1) {
110864f615feSPeter Xu         error_report("%s: Opening userfault_event_fd: %s", __func__,
1109c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1110c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
1111c4faeed2SDr. David Alan Gilbert         return -1;
1112c4faeed2SDr. David Alan Gilbert     }
1113c4faeed2SDr. David Alan Gilbert 
1114f0a227adSDr. David Alan Gilbert     qemu_sem_init(&mis->fault_thread_sem, 0);
1115f0a227adSDr. David Alan Gilbert     qemu_thread_create(&mis->fault_thread, "postcopy/fault",
1116f0a227adSDr. David Alan Gilbert                        postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
1117f0a227adSDr. David Alan Gilbert     qemu_sem_wait(&mis->fault_thread_sem);
1118f0a227adSDr. David Alan Gilbert     qemu_sem_destroy(&mis->fault_thread_sem);
1119c4faeed2SDr. David Alan Gilbert     mis->have_fault_thread = true;
1120f0a227adSDr. David Alan Gilbert 
1121f0a227adSDr. David Alan Gilbert     /* Mark so that we get notified of accesses to unwritten areas */
1122fbd162e6SYury Kotov     if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
112391b02dc7SFei Li         error_report("ram_block_enable_notify failed");
1124f0a227adSDr. David Alan Gilbert         return -1;
1125f0a227adSDr. David Alan Gilbert     }
1126f0a227adSDr. David Alan Gilbert 
11273414322aSWei Yang     mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
11283414322aSWei Yang                                   PROT_READ | PROT_WRITE, MAP_PRIVATE |
11293414322aSWei Yang                                   MAP_ANONYMOUS, -1, 0);
11303414322aSWei Yang     if (mis->postcopy_tmp_page == MAP_FAILED) {
11313414322aSWei Yang         mis->postcopy_tmp_page = NULL;
11323414322aSWei Yang         error_report("%s: Failed to map postcopy_tmp_page %s",
11333414322aSWei Yang                      __func__, strerror(errno));
11343414322aSWei Yang         return -1;
11353414322aSWei Yang     }
11363414322aSWei Yang 
1137371ff5a3SDr. David Alan Gilbert     /*
11386629890dSWei Yang      * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
11396629890dSWei Yang      */
11406629890dSWei Yang     mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
11416629890dSWei Yang                                        PROT_READ | PROT_WRITE,
11426629890dSWei Yang                                        MAP_PRIVATE | MAP_ANONYMOUS,
11436629890dSWei Yang                                        -1, 0);
11446629890dSWei Yang     if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
11456629890dSWei Yang         int e = errno;
11466629890dSWei Yang         mis->postcopy_tmp_zero_page = NULL;
11476629890dSWei Yang         error_report("%s: Failed to map large zero page %s",
11486629890dSWei Yang                      __func__, strerror(e));
11496629890dSWei Yang         return -e;
11506629890dSWei Yang     }
11516629890dSWei Yang     memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
11526629890dSWei Yang 
1153c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_enable_notify();
1154c4faeed2SDr. David Alan Gilbert 
1155f0a227adSDr. David Alan Gilbert     return 0;
1156f0a227adSDr. David Alan Gilbert }
1157f0a227adSDr. David Alan Gilbert 
1158eef621c4SPeter Xu static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
1159f9494614SAlexey Perevalov                                void *from_addr, uint64_t pagesize, RAMBlock *rb)
1160727b9d7eSAlexey Perevalov {
1161eef621c4SPeter Xu     int userfault_fd = mis->userfault_fd;
1162f9494614SAlexey Perevalov     int ret;
1163eef621c4SPeter Xu 
1164727b9d7eSAlexey Perevalov     if (from_addr) {
1165727b9d7eSAlexey Perevalov         struct uffdio_copy copy_struct;
1166727b9d7eSAlexey Perevalov         copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
1167727b9d7eSAlexey Perevalov         copy_struct.src = (uint64_t)(uintptr_t)from_addr;
1168727b9d7eSAlexey Perevalov         copy_struct.len = pagesize;
1169727b9d7eSAlexey Perevalov         copy_struct.mode = 0;
1170f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
1171727b9d7eSAlexey Perevalov     } else {
1172727b9d7eSAlexey Perevalov         struct uffdio_zeropage zero_struct;
1173727b9d7eSAlexey Perevalov         zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
1174727b9d7eSAlexey Perevalov         zero_struct.range.len = pagesize;
1175727b9d7eSAlexey Perevalov         zero_struct.mode = 0;
1176f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
1177727b9d7eSAlexey Perevalov     }
1178f9494614SAlexey Perevalov     if (!ret) {
11798f8bfffcSPeter Xu         qemu_mutex_lock(&mis->page_request_mutex);
1180f9494614SAlexey Perevalov         ramblock_recv_bitmap_set_range(rb, host_addr,
1181f9494614SAlexey Perevalov                                        pagesize / qemu_target_page_size());
11828f8bfffcSPeter Xu         /*
11838f8bfffcSPeter Xu          * If this page resolves a page fault for a previous recorded faulted
11848f8bfffcSPeter Xu          * address, take a special note to maintain the requested page list.
11858f8bfffcSPeter Xu          */
11868f8bfffcSPeter Xu         if (g_tree_lookup(mis->page_requested, host_addr)) {
11878f8bfffcSPeter Xu             g_tree_remove(mis->page_requested, host_addr);
11888f8bfffcSPeter Xu             mis->page_requested_count--;
11898f8bfffcSPeter Xu             trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
11908f8bfffcSPeter Xu         }
11918f8bfffcSPeter Xu         qemu_mutex_unlock(&mis->page_request_mutex);
1192575b0b33SAlexey Perevalov         mark_postcopy_blocktime_end((uintptr_t)host_addr);
1193f9494614SAlexey Perevalov     }
1194f9494614SAlexey Perevalov     return ret;
1195727b9d7eSAlexey Perevalov }
1196727b9d7eSAlexey Perevalov 
1197d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
1198d488b349SDr. David Alan Gilbert {
1199d488b349SDr. David Alan Gilbert     int i;
1200d488b349SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
1201d488b349SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
1202d488b349SDr. David Alan Gilbert 
1203d488b349SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
1204d488b349SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1205d488b349SDr. David Alan Gilbert         int ret = cur->waker(cur, rb, offset);
1206d488b349SDr. David Alan Gilbert         if (ret) {
1207d488b349SDr. David Alan Gilbert             return ret;
1208d488b349SDr. David Alan Gilbert         }
1209d488b349SDr. David Alan Gilbert     }
1210d488b349SDr. David Alan Gilbert     return 0;
1211d488b349SDr. David Alan Gilbert }
1212d488b349SDr. David Alan Gilbert 
1213696ed9a9SDr. David Alan Gilbert /*
1214696ed9a9SDr. David Alan Gilbert  * Place a host page (from) at (host) atomically
1215696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1216696ed9a9SDr. David Alan Gilbert  */
1217df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
12188be4620bSAlexey Perevalov                         RAMBlock *rb)
1219696ed9a9SDr. David Alan Gilbert {
12208be4620bSAlexey Perevalov     size_t pagesize = qemu_ram_pagesize(rb);
1221696ed9a9SDr. David Alan Gilbert 
1222696ed9a9SDr. David Alan Gilbert     /* copy also acks to the kernel waking the stalled thread up
1223696ed9a9SDr. David Alan Gilbert      * TODO: We can inhibit that ack and only do it if it was requested
1224696ed9a9SDr. David Alan Gilbert      * which would be slightly cheaper, but we'd have to be careful
1225696ed9a9SDr. David Alan Gilbert      * of the order of updating our page state.
1226696ed9a9SDr. David Alan Gilbert      */
1227eef621c4SPeter Xu     if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
1228696ed9a9SDr. David Alan Gilbert         int e = errno;
1229df9ff5e1SDr. David Alan Gilbert         error_report("%s: %s copy host: %p from: %p (size: %zd)",
1230df9ff5e1SDr. David Alan Gilbert                      __func__, strerror(e), host, from, pagesize);
1231696ed9a9SDr. David Alan Gilbert 
1232696ed9a9SDr. David Alan Gilbert         return -e;
1233696ed9a9SDr. David Alan Gilbert     }
1234696ed9a9SDr. David Alan Gilbert 
1235696ed9a9SDr. David Alan Gilbert     trace_postcopy_place_page(host);
1236dedfb4b2SDr. David Alan Gilbert     return postcopy_notify_shared_wake(rb,
1237dedfb4b2SDr. David Alan Gilbert                                        qemu_ram_block_host_offset(rb, host));
1238696ed9a9SDr. David Alan Gilbert }
1239696ed9a9SDr. David Alan Gilbert 
1240696ed9a9SDr. David Alan Gilbert /*
1241696ed9a9SDr. David Alan Gilbert  * Place a zero page at (host) atomically
1242696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1243696ed9a9SDr. David Alan Gilbert  */
1244df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
12458be4620bSAlexey Perevalov                              RAMBlock *rb)
1246696ed9a9SDr. David Alan Gilbert {
12472ce16640SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
1248df9ff5e1SDr. David Alan Gilbert     trace_postcopy_place_page_zero(host);
1249696ed9a9SDr. David Alan Gilbert 
12502ce16640SDr. David Alan Gilbert     /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
12512ce16640SDr. David Alan Gilbert      * but it's not available for everything (e.g. hugetlbpages)
12522ce16640SDr. David Alan Gilbert      */
12532ce16640SDr. David Alan Gilbert     if (qemu_ram_is_uf_zeroable(rb)) {
1254eef621c4SPeter Xu         if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
1255696ed9a9SDr. David Alan Gilbert             int e = errno;
1256696ed9a9SDr. David Alan Gilbert             error_report("%s: %s zero host: %p",
1257696ed9a9SDr. David Alan Gilbert                          __func__, strerror(e), host);
1258696ed9a9SDr. David Alan Gilbert 
1259696ed9a9SDr. David Alan Gilbert             return -e;
1260696ed9a9SDr. David Alan Gilbert         }
1261dedfb4b2SDr. David Alan Gilbert         return postcopy_notify_shared_wake(rb,
1262dedfb4b2SDr. David Alan Gilbert                                            qemu_ram_block_host_offset(rb,
1263dedfb4b2SDr. David Alan Gilbert                                                                       host));
1264df9ff5e1SDr. David Alan Gilbert     } else {
12656629890dSWei Yang         return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
1266df9ff5e1SDr. David Alan Gilbert     }
1267696ed9a9SDr. David Alan Gilbert }
1268696ed9a9SDr. David Alan Gilbert 
1269eb59db53SDr. David Alan Gilbert #else
1270eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */
127165ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
127265ace060SAlexey Perevalov {
127365ace060SAlexey Perevalov }
127465ace060SAlexey Perevalov 
1275d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
1276eb59db53SDr. David Alan Gilbert {
1277eb59db53SDr. David Alan Gilbert     error_report("%s: No OS support", __func__);
1278eb59db53SDr. David Alan Gilbert     return false;
1279eb59db53SDr. David Alan Gilbert }
1280eb59db53SDr. David Alan Gilbert 
1281c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
12821caddf8aSDr. David Alan Gilbert {
12831caddf8aSDr. David Alan Gilbert     error_report("postcopy_ram_incoming_init: No OS support");
12841caddf8aSDr. David Alan Gilbert     return -1;
12851caddf8aSDr. David Alan Gilbert }
12861caddf8aSDr. David Alan Gilbert 
12871caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
12881caddf8aSDr. David Alan Gilbert {
12891caddf8aSDr. David Alan Gilbert     assert(0);
12901caddf8aSDr. David Alan Gilbert     return -1;
12911caddf8aSDr. David Alan Gilbert }
12921caddf8aSDr. David Alan Gilbert 
1293f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
1294f9527107SDr. David Alan Gilbert {
1295f9527107SDr. David Alan Gilbert     assert(0);
1296f9527107SDr. David Alan Gilbert     return -1;
1297f9527107SDr. David Alan Gilbert }
1298f9527107SDr. David Alan Gilbert 
1299c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
1300c188c539SMichael S. Tsirkin                                  uint64_t client_addr, uint64_t rb_offset)
1301c188c539SMichael S. Tsirkin {
1302c188c539SMichael S. Tsirkin     assert(0);
1303c188c539SMichael S. Tsirkin     return -1;
1304c188c539SMichael S. Tsirkin }
1305c188c539SMichael S. Tsirkin 
13062a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1307f0a227adSDr. David Alan Gilbert {
1308f0a227adSDr. David Alan Gilbert     assert(0);
1309f0a227adSDr. David Alan Gilbert     return -1;
1310f0a227adSDr. David Alan Gilbert }
1311696ed9a9SDr. David Alan Gilbert 
1312df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
13138be4620bSAlexey Perevalov                         RAMBlock *rb)
1314696ed9a9SDr. David Alan Gilbert {
1315696ed9a9SDr. David Alan Gilbert     assert(0);
1316696ed9a9SDr. David Alan Gilbert     return -1;
1317696ed9a9SDr. David Alan Gilbert }
1318696ed9a9SDr. David Alan Gilbert 
1319df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
13208be4620bSAlexey Perevalov                         RAMBlock *rb)
1321696ed9a9SDr. David Alan Gilbert {
1322696ed9a9SDr. David Alan Gilbert     assert(0);
1323696ed9a9SDr. David Alan Gilbert     return -1;
1324696ed9a9SDr. David Alan Gilbert }
1325696ed9a9SDr. David Alan Gilbert 
13265efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
13275efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
13285efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
13295efc3564SDr. David Alan Gilbert {
13305efc3564SDr. David Alan Gilbert     assert(0);
13315efc3564SDr. David Alan Gilbert     return -1;
13325efc3564SDr. David Alan Gilbert }
1333eb59db53SDr. David Alan Gilbert #endif
1334eb59db53SDr. David Alan Gilbert 
1335e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */
1336e0b266f0SDr. David Alan Gilbert 
13379ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis)
13389ab7ef9bSPeter Xu {
13399ab7ef9bSPeter Xu     uint64_t tmp64 = 1;
13409ab7ef9bSPeter Xu 
13419ab7ef9bSPeter Xu     /*
13429ab7ef9bSPeter Xu      * Wakeup the fault_thread.  It's an eventfd that should currently
13439ab7ef9bSPeter Xu      * be at 0, we're going to increment it to 1
13449ab7ef9bSPeter Xu      */
13459ab7ef9bSPeter Xu     if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
13469ab7ef9bSPeter Xu         /* Not much we can do here, but may as well report it */
13479ab7ef9bSPeter Xu         error_report("%s: incrementing failed: %s", __func__,
13489ab7ef9bSPeter Xu                      strerror(errno));
13499ab7ef9bSPeter Xu     }
13509ab7ef9bSPeter Xu }
13519ab7ef9bSPeter Xu 
1352e0b266f0SDr. David Alan Gilbert /**
1353e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_init: Called at the start of each RAMBlock before
1354e0b266f0SDr. David Alan Gilbert  *   asking to discard individual ranges.
1355e0b266f0SDr. David Alan Gilbert  *
1356e0b266f0SDr. David Alan Gilbert  * @ms: The current migration state.
1357810cf2bbSWei Yang  * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
1358e0b266f0SDr. David Alan Gilbert  * @name: RAMBlock that discards will operate on.
1359e0b266f0SDr. David Alan Gilbert  */
1360810cf2bbSWei Yang static PostcopyDiscardState pds = {0};
1361810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name)
1362e0b266f0SDr. David Alan Gilbert {
1363810cf2bbSWei Yang     pds.ramblock_name = name;
1364810cf2bbSWei Yang     pds.cur_entry = 0;
1365810cf2bbSWei Yang     pds.nsentwords = 0;
1366810cf2bbSWei Yang     pds.nsentcmds = 0;
1367e0b266f0SDr. David Alan Gilbert }
1368e0b266f0SDr. David Alan Gilbert 
1369e0b266f0SDr. David Alan Gilbert /**
1370e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1371e0b266f0SDr. David Alan Gilbert  *   discard. May send a discard message, may just leave it queued to
1372e0b266f0SDr. David Alan Gilbert  *   be sent later.
1373e0b266f0SDr. David Alan Gilbert  *
1374e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1375e0b266f0SDr. David Alan Gilbert  * @start,@length: a range of pages in the migration bitmap in the
1376e0b266f0SDr. David Alan Gilbert  *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1377e0b266f0SDr. David Alan Gilbert  */
1378810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
1379810cf2bbSWei Yang                                  unsigned long length)
1380e0b266f0SDr. David Alan Gilbert {
138120afaed9SJuan Quintela     size_t tp_size = qemu_target_page_size();
1382e0b266f0SDr. David Alan Gilbert     /* Convert to byte offsets within the RAM block */
1383810cf2bbSWei Yang     pds.start_list[pds.cur_entry] = start  * tp_size;
1384810cf2bbSWei Yang     pds.length_list[pds.cur_entry] = length * tp_size;
1385810cf2bbSWei Yang     trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
1386810cf2bbSWei Yang     pds.cur_entry++;
1387810cf2bbSWei Yang     pds.nsentwords++;
1388e0b266f0SDr. David Alan Gilbert 
1389810cf2bbSWei Yang     if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
1390e0b266f0SDr. David Alan Gilbert         /* Full set, ship it! */
139189a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1392810cf2bbSWei Yang                                               pds.ramblock_name,
1393810cf2bbSWei Yang                                               pds.cur_entry,
1394810cf2bbSWei Yang                                               pds.start_list,
1395810cf2bbSWei Yang                                               pds.length_list);
1396810cf2bbSWei Yang         pds.nsentcmds++;
1397810cf2bbSWei Yang         pds.cur_entry = 0;
1398e0b266f0SDr. David Alan Gilbert     }
1399e0b266f0SDr. David Alan Gilbert }
1400e0b266f0SDr. David Alan Gilbert 
1401e0b266f0SDr. David Alan Gilbert /**
1402e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1403e0b266f0SDr. David Alan Gilbert  * bitmap code. Sends any outstanding discard messages, frees the PDS
1404e0b266f0SDr. David Alan Gilbert  *
1405e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1406e0b266f0SDr. David Alan Gilbert  */
1407810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms)
1408e0b266f0SDr. David Alan Gilbert {
1409e0b266f0SDr. David Alan Gilbert     /* Anything unsent? */
1410810cf2bbSWei Yang     if (pds.cur_entry) {
141189a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1412810cf2bbSWei Yang                                               pds.ramblock_name,
1413810cf2bbSWei Yang                                               pds.cur_entry,
1414810cf2bbSWei Yang                                               pds.start_list,
1415810cf2bbSWei Yang                                               pds.length_list);
1416810cf2bbSWei Yang         pds.nsentcmds++;
1417e0b266f0SDr. David Alan Gilbert     }
1418e0b266f0SDr. David Alan Gilbert 
1419810cf2bbSWei Yang     trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
1420810cf2bbSWei Yang                                        pds.nsentcmds);
1421e0b266f0SDr. David Alan Gilbert }
1422bac3b212SJuan Quintela 
1423bac3b212SJuan Quintela /*
1424bac3b212SJuan Quintela  * Current state of incoming postcopy; note this is not part of
1425bac3b212SJuan Quintela  * MigrationIncomingState since it's state is used during cleanup
1426bac3b212SJuan Quintela  * at the end as MIS is being freed.
1427bac3b212SJuan Quintela  */
1428bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state;
1429bac3b212SJuan Quintela 
1430bac3b212SJuan Quintela PostcopyState  postcopy_state_get(void)
1431bac3b212SJuan Quintela {
1432d73415a3SStefan Hajnoczi     return qatomic_mb_read(&incoming_postcopy_state);
1433bac3b212SJuan Quintela }
1434bac3b212SJuan Quintela 
1435bac3b212SJuan Quintela /* Set the state and return the old state */
1436bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state)
1437bac3b212SJuan Quintela {
1438d73415a3SStefan Hajnoczi     return qatomic_xchg(&incoming_postcopy_state, new_state);
1439bac3b212SJuan Quintela }
144000fa4fc8SDr. David Alan Gilbert 
144100fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy
144200fa4fc8SDr. David Alan Gilbert  * called on the destination.
144300fa4fc8SDr. David Alan Gilbert  */
144400fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
144500fa4fc8SDr. David Alan Gilbert {
144600fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
144700fa4fc8SDr. David Alan Gilbert 
144800fa4fc8SDr. David Alan Gilbert     mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
144900fa4fc8SDr. David Alan Gilbert                                                   *pcfd);
145000fa4fc8SDr. David Alan Gilbert }
145100fa4fc8SDr. David Alan Gilbert 
145200fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy
145300fa4fc8SDr. David Alan Gilbert  */
145400fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
145500fa4fc8SDr. David Alan Gilbert {
145600fa4fc8SDr. David Alan Gilbert     guint i;
145700fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
145800fa4fc8SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
145900fa4fc8SDr. David Alan Gilbert 
146000fa4fc8SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
146100fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
146200fa4fc8SDr. David Alan Gilbert         if (cur->fd == pcfd->fd) {
146300fa4fc8SDr. David Alan Gilbert             mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
146400fa4fc8SDr. David Alan Gilbert             return;
146500fa4fc8SDr. David Alan Gilbert         }
146600fa4fc8SDr. David Alan Gilbert     }
146700fa4fc8SDr. David Alan Gilbert }
1468