xref: /qemu/migration/postcopy-ram.c (revision 60bb3c5871a7f7b7cfff5d0a30a035e30cce8e42)
1eb59db53SDr. David Alan Gilbert /*
2eb59db53SDr. David Alan Gilbert  * Postcopy migration for RAM
3eb59db53SDr. David Alan Gilbert  *
4eb59db53SDr. David Alan Gilbert  * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5eb59db53SDr. David Alan Gilbert  *
6eb59db53SDr. David Alan Gilbert  * Authors:
7eb59db53SDr. David Alan Gilbert  *  Dave Gilbert  <dgilbert@redhat.com>
8eb59db53SDr. David Alan Gilbert  *
9eb59db53SDr. David Alan Gilbert  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10eb59db53SDr. David Alan Gilbert  * See the COPYING file in the top-level directory.
11eb59db53SDr. David Alan Gilbert  *
12eb59db53SDr. David Alan Gilbert  */
13eb59db53SDr. David Alan Gilbert 
14eb59db53SDr. David Alan Gilbert /*
15eb59db53SDr. David Alan Gilbert  * Postcopy is a migration technique where the execution flips from the
16eb59db53SDr. David Alan Gilbert  * source to the destination before all the data has been copied.
17eb59db53SDr. David Alan Gilbert  */
18eb59db53SDr. David Alan Gilbert 
191393a485SPeter Maydell #include "qemu/osdep.h"
20898ba906SDavid Hildenbrand #include "qemu/rcu.h"
21b85ea5faSPeter Maydell #include "qemu/madvise.h"
2251180423SJuan Quintela #include "exec/target_page.h"
236666c96aSJuan Quintela #include "migration.h"
2408a0aee1SJuan Quintela #include "qemu-file.h"
2520a519a0SJuan Quintela #include "savevm.h"
26be07b0acSJuan Quintela #include "postcopy-ram.h"
277b1e1a22SJuan Quintela #include "ram.h"
281693c64cSDr. David Alan Gilbert #include "qapi/error.h"
291693c64cSDr. David Alan Gilbert #include "qemu/notify.h"
30d4842052SMarkus Armbruster #include "qemu/rcu.h"
31eb59db53SDr. David Alan Gilbert #include "sysemu/sysemu.h"
32eb59db53SDr. David Alan Gilbert #include "qemu/error-report.h"
33eb59db53SDr. David Alan Gilbert #include "trace.h"
345cc8767dSLike Xu #include "hw/boards.h"
35898ba906SDavid Hildenbrand #include "exec/ramblock.h"
3636f62f11SPeter Xu #include "socket.h"
3736f62f11SPeter Xu #include "qemu-file.h"
3836f62f11SPeter Xu #include "yank_functions.h"
39eb59db53SDr. David Alan Gilbert 
40e0b266f0SDr. David Alan Gilbert /* Arbitrary limit on size of each discard command,
41e0b266f0SDr. David Alan Gilbert  * keeps them around ~200 bytes
42e0b266f0SDr. David Alan Gilbert  */
43e0b266f0SDr. David Alan Gilbert #define MAX_DISCARDS_PER_COMMAND 12
44e0b266f0SDr. David Alan Gilbert 
45e0b266f0SDr. David Alan Gilbert struct PostcopyDiscardState {
46e0b266f0SDr. David Alan Gilbert     const char *ramblock_name;
47e0b266f0SDr. David Alan Gilbert     uint16_t cur_entry;
48e0b266f0SDr. David Alan Gilbert     /*
49e0b266f0SDr. David Alan Gilbert      * Start and length of a discard range (bytes)
50e0b266f0SDr. David Alan Gilbert      */
51e0b266f0SDr. David Alan Gilbert     uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
52e0b266f0SDr. David Alan Gilbert     uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
53e0b266f0SDr. David Alan Gilbert     unsigned int nsentwords;
54e0b266f0SDr. David Alan Gilbert     unsigned int nsentcmds;
55e0b266f0SDr. David Alan Gilbert };
56e0b266f0SDr. David Alan Gilbert 
571693c64cSDr. David Alan Gilbert static NotifierWithReturnList postcopy_notifier_list;
581693c64cSDr. David Alan Gilbert 
591693c64cSDr. David Alan Gilbert void postcopy_infrastructure_init(void)
601693c64cSDr. David Alan Gilbert {
611693c64cSDr. David Alan Gilbert     notifier_with_return_list_init(&postcopy_notifier_list);
621693c64cSDr. David Alan Gilbert }
631693c64cSDr. David Alan Gilbert 
641693c64cSDr. David Alan Gilbert void postcopy_add_notifier(NotifierWithReturn *nn)
651693c64cSDr. David Alan Gilbert {
661693c64cSDr. David Alan Gilbert     notifier_with_return_list_add(&postcopy_notifier_list, nn);
671693c64cSDr. David Alan Gilbert }
681693c64cSDr. David Alan Gilbert 
691693c64cSDr. David Alan Gilbert void postcopy_remove_notifier(NotifierWithReturn *n)
701693c64cSDr. David Alan Gilbert {
711693c64cSDr. David Alan Gilbert     notifier_with_return_remove(n);
721693c64cSDr. David Alan Gilbert }
731693c64cSDr. David Alan Gilbert 
741693c64cSDr. David Alan Gilbert int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
751693c64cSDr. David Alan Gilbert {
761693c64cSDr. David Alan Gilbert     struct PostcopyNotifyData pnd;
771693c64cSDr. David Alan Gilbert     pnd.reason = reason;
781693c64cSDr. David Alan Gilbert     pnd.errp = errp;
791693c64cSDr. David Alan Gilbert 
801693c64cSDr. David Alan Gilbert     return notifier_with_return_list_notify(&postcopy_notifier_list,
811693c64cSDr. David Alan Gilbert                                             &pnd);
821693c64cSDr. David Alan Gilbert }
831693c64cSDr. David Alan Gilbert 
84095c12a4SPeter Xu /*
85095c12a4SPeter Xu  * NOTE: this routine is not thread safe, we can't call it concurrently. But it
86095c12a4SPeter Xu  * should be good enough for migration's purposes.
87095c12a4SPeter Xu  */
88095c12a4SPeter Xu void postcopy_thread_create(MigrationIncomingState *mis,
89095c12a4SPeter Xu                             QemuThread *thread, const char *name,
90095c12a4SPeter Xu                             void *(*fn)(void *), int joinable)
91095c12a4SPeter Xu {
92095c12a4SPeter Xu     qemu_sem_init(&mis->thread_sync_sem, 0);
93095c12a4SPeter Xu     qemu_thread_create(thread, name, fn, mis, joinable);
94095c12a4SPeter Xu     qemu_sem_wait(&mis->thread_sync_sem);
95095c12a4SPeter Xu     qemu_sem_destroy(&mis->thread_sync_sem);
96095c12a4SPeter Xu }
97095c12a4SPeter Xu 
98eb59db53SDr. David Alan Gilbert /* Postcopy needs to detect accesses to pages that haven't yet been copied
99eb59db53SDr. David Alan Gilbert  * across, and efficiently map new pages in, the techniques for doing this
100eb59db53SDr. David Alan Gilbert  * are target OS specific.
101eb59db53SDr. David Alan Gilbert  */
102eb59db53SDr. David Alan Gilbert #if defined(__linux__)
103eb59db53SDr. David Alan Gilbert 
104c4faeed2SDr. David Alan Gilbert #include <poll.h>
105eb59db53SDr. David Alan Gilbert #include <sys/ioctl.h>
106eb59db53SDr. David Alan Gilbert #include <sys/syscall.h>
107eb59db53SDr. David Alan Gilbert #include <asm/types.h> /* for __u64 */
108eb59db53SDr. David Alan Gilbert #endif
109eb59db53SDr. David Alan Gilbert 
110d8b9d771SMatthew Fortune #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
111d8b9d771SMatthew Fortune #include <sys/eventfd.h>
112eb59db53SDr. David Alan Gilbert #include <linux/userfaultfd.h>
113eb59db53SDr. David Alan Gilbert 
1142a4c42f1SAlexey Perevalov typedef struct PostcopyBlocktimeContext {
1152a4c42f1SAlexey Perevalov     /* time when page fault initiated per vCPU */
1162a4c42f1SAlexey Perevalov     uint32_t *page_fault_vcpu_time;
1172a4c42f1SAlexey Perevalov     /* page address per vCPU */
1182a4c42f1SAlexey Perevalov     uintptr_t *vcpu_addr;
1192a4c42f1SAlexey Perevalov     uint32_t total_blocktime;
1202a4c42f1SAlexey Perevalov     /* blocktime per vCPU */
1212a4c42f1SAlexey Perevalov     uint32_t *vcpu_blocktime;
1222a4c42f1SAlexey Perevalov     /* point in time when last page fault was initiated */
1232a4c42f1SAlexey Perevalov     uint32_t last_begin;
1242a4c42f1SAlexey Perevalov     /* number of vCPU are suspended */
1252a4c42f1SAlexey Perevalov     int smp_cpus_down;
1262a4c42f1SAlexey Perevalov     uint64_t start_time;
1272a4c42f1SAlexey Perevalov 
1282a4c42f1SAlexey Perevalov     /*
1292a4c42f1SAlexey Perevalov      * Handler for exit event, necessary for
1302a4c42f1SAlexey Perevalov      * releasing whole blocktime_ctx
1312a4c42f1SAlexey Perevalov      */
1322a4c42f1SAlexey Perevalov     Notifier exit_notifier;
1332a4c42f1SAlexey Perevalov } PostcopyBlocktimeContext;
1342a4c42f1SAlexey Perevalov 
1352a4c42f1SAlexey Perevalov static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
1362a4c42f1SAlexey Perevalov {
1372a4c42f1SAlexey Perevalov     g_free(ctx->page_fault_vcpu_time);
1382a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_addr);
1392a4c42f1SAlexey Perevalov     g_free(ctx->vcpu_blocktime);
1402a4c42f1SAlexey Perevalov     g_free(ctx);
1412a4c42f1SAlexey Perevalov }
1422a4c42f1SAlexey Perevalov 
1432a4c42f1SAlexey Perevalov static void migration_exit_cb(Notifier *n, void *data)
1442a4c42f1SAlexey Perevalov {
1452a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
1462a4c42f1SAlexey Perevalov                                                  exit_notifier);
1472a4c42f1SAlexey Perevalov     destroy_blocktime_context(ctx);
1482a4c42f1SAlexey Perevalov }
1492a4c42f1SAlexey Perevalov 
1502a4c42f1SAlexey Perevalov static struct PostcopyBlocktimeContext *blocktime_context_new(void)
1512a4c42f1SAlexey Perevalov {
1525cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
1535cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
1542a4c42f1SAlexey Perevalov     PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
1552a4c42f1SAlexey Perevalov     ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
1562a4c42f1SAlexey Perevalov     ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
1572a4c42f1SAlexey Perevalov     ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
1582a4c42f1SAlexey Perevalov 
1592a4c42f1SAlexey Perevalov     ctx->exit_notifier.notify = migration_exit_cb;
1602a4c42f1SAlexey Perevalov     ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1612a4c42f1SAlexey Perevalov     qemu_add_exit_notifier(&ctx->exit_notifier);
1622a4c42f1SAlexey Perevalov     return ctx;
1632a4c42f1SAlexey Perevalov }
164ca6011c2SAlexey Perevalov 
16565ace060SAlexey Perevalov static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
16665ace060SAlexey Perevalov {
1675cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
16854aa3de7SEric Blake     uint32List *list = NULL;
16965ace060SAlexey Perevalov     int i;
17065ace060SAlexey Perevalov 
1715cc8767dSLike Xu     for (i = ms->smp.cpus - 1; i >= 0; i--) {
17254aa3de7SEric Blake         QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
17365ace060SAlexey Perevalov     }
17465ace060SAlexey Perevalov 
17565ace060SAlexey Perevalov     return list;
17665ace060SAlexey Perevalov }
17765ace060SAlexey Perevalov 
17865ace060SAlexey Perevalov /*
17965ace060SAlexey Perevalov  * This function just populates MigrationInfo from postcopy's
18065ace060SAlexey Perevalov  * blocktime context. It will not populate MigrationInfo,
18165ace060SAlexey Perevalov  * unless postcopy-blocktime capability was set.
18265ace060SAlexey Perevalov  *
18365ace060SAlexey Perevalov  * @info: pointer to MigrationInfo to populate
18465ace060SAlexey Perevalov  */
18565ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
18665ace060SAlexey Perevalov {
18765ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
18865ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
18965ace060SAlexey Perevalov 
19065ace060SAlexey Perevalov     if (!bc) {
19165ace060SAlexey Perevalov         return;
19265ace060SAlexey Perevalov     }
19365ace060SAlexey Perevalov 
19465ace060SAlexey Perevalov     info->has_postcopy_blocktime = true;
19565ace060SAlexey Perevalov     info->postcopy_blocktime = bc->total_blocktime;
19665ace060SAlexey Perevalov     info->has_postcopy_vcpu_blocktime = true;
19765ace060SAlexey Perevalov     info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
19865ace060SAlexey Perevalov }
19965ace060SAlexey Perevalov 
20065ace060SAlexey Perevalov static uint32_t get_postcopy_total_blocktime(void)
20165ace060SAlexey Perevalov {
20265ace060SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
20365ace060SAlexey Perevalov     PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
20465ace060SAlexey Perevalov 
20565ace060SAlexey Perevalov     if (!bc) {
20665ace060SAlexey Perevalov         return 0;
20765ace060SAlexey Perevalov     }
20865ace060SAlexey Perevalov 
20965ace060SAlexey Perevalov     return bc->total_blocktime;
21065ace060SAlexey Perevalov }
21165ace060SAlexey Perevalov 
21254ae0886SAlexey Perevalov /**
21354ae0886SAlexey Perevalov  * receive_ufd_features: check userfault fd features, to request only supported
21454ae0886SAlexey Perevalov  * features in the future.
21554ae0886SAlexey Perevalov  *
21654ae0886SAlexey Perevalov  * Returns: true on success
21754ae0886SAlexey Perevalov  *
21854ae0886SAlexey Perevalov  * __NR_userfaultfd - should be checked before
21954ae0886SAlexey Perevalov  *  @features: out parameter will contain uffdio_api.features provided by kernel
22054ae0886SAlexey Perevalov  *              in case of success
22154ae0886SAlexey Perevalov  */
22254ae0886SAlexey Perevalov static bool receive_ufd_features(uint64_t *features)
22354ae0886SAlexey Perevalov {
22454ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
22554ae0886SAlexey Perevalov     int ufd;
22654ae0886SAlexey Perevalov     bool ret = true;
22754ae0886SAlexey Perevalov 
22854ae0886SAlexey Perevalov     /* if we are here __NR_userfaultfd should exists */
22954ae0886SAlexey Perevalov     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
23054ae0886SAlexey Perevalov     if (ufd == -1) {
23154ae0886SAlexey Perevalov         error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
23254ae0886SAlexey Perevalov                      strerror(errno));
23354ae0886SAlexey Perevalov         return false;
23454ae0886SAlexey Perevalov     }
23554ae0886SAlexey Perevalov 
23654ae0886SAlexey Perevalov     /* ask features */
237eb59db53SDr. David Alan Gilbert     api_struct.api = UFFD_API;
238eb59db53SDr. David Alan Gilbert     api_struct.features = 0;
239eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
2405553499fSAlexey Perevalov         error_report("%s: UFFDIO_API failed: %s", __func__,
241eb59db53SDr. David Alan Gilbert                      strerror(errno));
24254ae0886SAlexey Perevalov         ret = false;
24354ae0886SAlexey Perevalov         goto release_ufd;
24454ae0886SAlexey Perevalov     }
24554ae0886SAlexey Perevalov 
24654ae0886SAlexey Perevalov     *features = api_struct.features;
24754ae0886SAlexey Perevalov 
24854ae0886SAlexey Perevalov release_ufd:
24954ae0886SAlexey Perevalov     close(ufd);
25054ae0886SAlexey Perevalov     return ret;
25154ae0886SAlexey Perevalov }
25254ae0886SAlexey Perevalov 
25354ae0886SAlexey Perevalov /**
25454ae0886SAlexey Perevalov  * request_ufd_features: this function should be called only once on a newly
25554ae0886SAlexey Perevalov  * opened ufd, subsequent calls will lead to error.
25654ae0886SAlexey Perevalov  *
2573a4452d8Szhaolichang  * Returns: true on success
25854ae0886SAlexey Perevalov  *
25954ae0886SAlexey Perevalov  * @ufd: fd obtained from userfaultfd syscall
26054ae0886SAlexey Perevalov  * @features: bit mask see UFFD_API_FEATURES
26154ae0886SAlexey Perevalov  */
26254ae0886SAlexey Perevalov static bool request_ufd_features(int ufd, uint64_t features)
26354ae0886SAlexey Perevalov {
26454ae0886SAlexey Perevalov     struct uffdio_api api_struct = {0};
26554ae0886SAlexey Perevalov     uint64_t ioctl_mask;
26654ae0886SAlexey Perevalov 
26754ae0886SAlexey Perevalov     api_struct.api = UFFD_API;
26854ae0886SAlexey Perevalov     api_struct.features = features;
26954ae0886SAlexey Perevalov     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
27054ae0886SAlexey Perevalov         error_report("%s failed: UFFDIO_API failed: %s", __func__,
27154ae0886SAlexey Perevalov                      strerror(errno));
272eb59db53SDr. David Alan Gilbert         return false;
273eb59db53SDr. David Alan Gilbert     }
274eb59db53SDr. David Alan Gilbert 
275eb59db53SDr. David Alan Gilbert     ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
276eb59db53SDr. David Alan Gilbert                  (__u64)1 << _UFFDIO_UNREGISTER;
277eb59db53SDr. David Alan Gilbert     if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
278eb59db53SDr. David Alan Gilbert         error_report("Missing userfault features: %" PRIx64,
279eb59db53SDr. David Alan Gilbert                      (uint64_t)(~api_struct.ioctls & ioctl_mask));
280eb59db53SDr. David Alan Gilbert         return false;
281eb59db53SDr. David Alan Gilbert     }
282eb59db53SDr. David Alan Gilbert 
28354ae0886SAlexey Perevalov     return true;
28454ae0886SAlexey Perevalov }
28554ae0886SAlexey Perevalov 
28654ae0886SAlexey Perevalov static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
28754ae0886SAlexey Perevalov {
28854ae0886SAlexey Perevalov     uint64_t asked_features = 0;
28954ae0886SAlexey Perevalov     static uint64_t supported_features;
29054ae0886SAlexey Perevalov 
29154ae0886SAlexey Perevalov     /*
29254ae0886SAlexey Perevalov      * it's not possible to
29354ae0886SAlexey Perevalov      * request UFFD_API twice per one fd
29454ae0886SAlexey Perevalov      * userfault fd features is persistent
29554ae0886SAlexey Perevalov      */
29654ae0886SAlexey Perevalov     if (!supported_features) {
29754ae0886SAlexey Perevalov         if (!receive_ufd_features(&supported_features)) {
29854ae0886SAlexey Perevalov             error_report("%s failed", __func__);
29954ae0886SAlexey Perevalov             return false;
30054ae0886SAlexey Perevalov         }
30154ae0886SAlexey Perevalov     }
30254ae0886SAlexey Perevalov 
3032a4c42f1SAlexey Perevalov #ifdef UFFD_FEATURE_THREAD_ID
3042d1c37c6SPeter Xu     if (UFFD_FEATURE_THREAD_ID & supported_features) {
3052d1c37c6SPeter Xu         asked_features |= UFFD_FEATURE_THREAD_ID;
3062d1c37c6SPeter Xu         if (migrate_postcopy_blocktime()) {
3072a4c42f1SAlexey Perevalov             if (!mis->blocktime_ctx) {
3082a4c42f1SAlexey Perevalov                 mis->blocktime_ctx = blocktime_context_new();
3092a4c42f1SAlexey Perevalov             }
3102d1c37c6SPeter Xu         }
3112a4c42f1SAlexey Perevalov     }
3122a4c42f1SAlexey Perevalov #endif
3132a4c42f1SAlexey Perevalov 
31454ae0886SAlexey Perevalov     /*
31554ae0886SAlexey Perevalov      * request features, even if asked_features is 0, due to
31654ae0886SAlexey Perevalov      * kernel expects UFFD_API before UFFDIO_REGISTER, per
31754ae0886SAlexey Perevalov      * userfault file descriptor
31854ae0886SAlexey Perevalov      */
31954ae0886SAlexey Perevalov     if (!request_ufd_features(ufd, asked_features)) {
32054ae0886SAlexey Perevalov         error_report("%s failed: features %" PRIu64, __func__,
32154ae0886SAlexey Perevalov                      asked_features);
32254ae0886SAlexey Perevalov         return false;
32354ae0886SAlexey Perevalov     }
32454ae0886SAlexey Perevalov 
3258e3b0cbbSMarc-André Lureau     if (qemu_real_host_page_size() != ram_pagesize_summary()) {
3267e8cafb7SDr. David Alan Gilbert         bool have_hp = false;
3277e8cafb7SDr. David Alan Gilbert         /* We've got a huge page */
3287e8cafb7SDr. David Alan Gilbert #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
32954ae0886SAlexey Perevalov         have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
3307e8cafb7SDr. David Alan Gilbert #endif
3317e8cafb7SDr. David Alan Gilbert         if (!have_hp) {
3327e8cafb7SDr. David Alan Gilbert             error_report("Userfault on this host does not support huge pages");
3337e8cafb7SDr. David Alan Gilbert             return false;
3347e8cafb7SDr. David Alan Gilbert         }
3357e8cafb7SDr. David Alan Gilbert     }
336eb59db53SDr. David Alan Gilbert     return true;
337eb59db53SDr. David Alan Gilbert }
338eb59db53SDr. David Alan Gilbert 
3398679638bSDr. David Alan Gilbert /* Callback from postcopy_ram_supported_by_host block iterator.
3408679638bSDr. David Alan Gilbert  */
341754cb9c0SYury Kotov static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
3428679638bSDr. David Alan Gilbert {
343754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
344754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
3455d214a92SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
3465d214a92SDr. David Alan Gilbert 
3475d214a92SDr. David Alan Gilbert     if (length % pagesize) {
3485d214a92SDr. David Alan Gilbert         error_report("Postcopy requires RAM blocks to be a page size multiple,"
3495d214a92SDr. David Alan Gilbert                      " block %s is 0x" RAM_ADDR_FMT " bytes with a "
3505d214a92SDr. David Alan Gilbert                      "page size of 0x%zx", block_name, length, pagesize);
3515d214a92SDr. David Alan Gilbert         return 1;
3525d214a92SDr. David Alan Gilbert     }
3538679638bSDr. David Alan Gilbert     return 0;
3548679638bSDr. David Alan Gilbert }
3558679638bSDr. David Alan Gilbert 
35658b7c17eSDr. David Alan Gilbert /*
35758b7c17eSDr. David Alan Gilbert  * Note: This has the side effect of munlock'ing all of RAM, that's
35858b7c17eSDr. David Alan Gilbert  * normally fine since if the postcopy succeeds it gets turned back on at the
35958b7c17eSDr. David Alan Gilbert  * end.
36058b7c17eSDr. David Alan Gilbert  */
361d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
362eb59db53SDr. David Alan Gilbert {
3638e3b0cbbSMarc-André Lureau     long pagesize = qemu_real_host_page_size();
364eb59db53SDr. David Alan Gilbert     int ufd = -1;
365eb59db53SDr. David Alan Gilbert     bool ret = false; /* Error unless we change it */
366eb59db53SDr. David Alan Gilbert     void *testarea = NULL;
367eb59db53SDr. David Alan Gilbert     struct uffdio_register reg_struct;
368eb59db53SDr. David Alan Gilbert     struct uffdio_range range_struct;
369eb59db53SDr. David Alan Gilbert     uint64_t feature_mask;
3701693c64cSDr. David Alan Gilbert     Error *local_err = NULL;
371eb59db53SDr. David Alan Gilbert 
37220afaed9SJuan Quintela     if (qemu_target_page_size() > pagesize) {
373eb59db53SDr. David Alan Gilbert         error_report("Target page size bigger than host page size");
374eb59db53SDr. David Alan Gilbert         goto out;
375eb59db53SDr. David Alan Gilbert     }
376eb59db53SDr. David Alan Gilbert 
377eb59db53SDr. David Alan Gilbert     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
378eb59db53SDr. David Alan Gilbert     if (ufd == -1) {
379eb59db53SDr. David Alan Gilbert         error_report("%s: userfaultfd not available: %s", __func__,
380eb59db53SDr. David Alan Gilbert                      strerror(errno));
381eb59db53SDr. David Alan Gilbert         goto out;
382eb59db53SDr. David Alan Gilbert     }
383eb59db53SDr. David Alan Gilbert 
3841693c64cSDr. David Alan Gilbert     /* Give devices a chance to object */
3851693c64cSDr. David Alan Gilbert     if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
3861693c64cSDr. David Alan Gilbert         error_report_err(local_err);
3871693c64cSDr. David Alan Gilbert         goto out;
3881693c64cSDr. David Alan Gilbert     }
3891693c64cSDr. David Alan Gilbert 
390eb59db53SDr. David Alan Gilbert     /* Version and features check */
39154ae0886SAlexey Perevalov     if (!ufd_check_and_apply(ufd, mis)) {
392eb59db53SDr. David Alan Gilbert         goto out;
393eb59db53SDr. David Alan Gilbert     }
394eb59db53SDr. David Alan Gilbert 
3958679638bSDr. David Alan Gilbert     /* We don't support postcopy with shared RAM yet */
396fbd162e6SYury Kotov     if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
3978679638bSDr. David Alan Gilbert         goto out;
3988679638bSDr. David Alan Gilbert     }
3998679638bSDr. David Alan Gilbert 
400eb59db53SDr. David Alan Gilbert     /*
40158b7c17eSDr. David Alan Gilbert      * userfault and mlock don't go together; we'll put it back later if
40258b7c17eSDr. David Alan Gilbert      * it was enabled.
40358b7c17eSDr. David Alan Gilbert      */
40458b7c17eSDr. David Alan Gilbert     if (munlockall()) {
40558b7c17eSDr. David Alan Gilbert         error_report("%s: munlockall: %s", __func__,  strerror(errno));
406617a32f5SDr. David Alan Gilbert         goto out;
40758b7c17eSDr. David Alan Gilbert     }
40858b7c17eSDr. David Alan Gilbert 
40958b7c17eSDr. David Alan Gilbert     /*
410eb59db53SDr. David Alan Gilbert      *  We need to check that the ops we need are supported on anon memory
411eb59db53SDr. David Alan Gilbert      *  To do that we need to register a chunk and see the flags that
412eb59db53SDr. David Alan Gilbert      *  are returned.
413eb59db53SDr. David Alan Gilbert      */
414eb59db53SDr. David Alan Gilbert     testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
415eb59db53SDr. David Alan Gilbert                                     MAP_ANONYMOUS, -1, 0);
416eb59db53SDr. David Alan Gilbert     if (testarea == MAP_FAILED) {
417eb59db53SDr. David Alan Gilbert         error_report("%s: Failed to map test area: %s", __func__,
418eb59db53SDr. David Alan Gilbert                      strerror(errno));
419eb59db53SDr. David Alan Gilbert         goto out;
420eb59db53SDr. David Alan Gilbert     }
4217648297dSDavid Hildenbrand     g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
422eb59db53SDr. David Alan Gilbert 
423eb59db53SDr. David Alan Gilbert     reg_struct.range.start = (uintptr_t)testarea;
424eb59db53SDr. David Alan Gilbert     reg_struct.range.len = pagesize;
425eb59db53SDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
426eb59db53SDr. David Alan Gilbert 
427eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
428eb59db53SDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
429eb59db53SDr. David Alan Gilbert         goto out;
430eb59db53SDr. David Alan Gilbert     }
431eb59db53SDr. David Alan Gilbert 
432eb59db53SDr. David Alan Gilbert     range_struct.start = (uintptr_t)testarea;
433eb59db53SDr. David Alan Gilbert     range_struct.len = pagesize;
434eb59db53SDr. David Alan Gilbert     if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
435eb59db53SDr. David Alan Gilbert         error_report("%s userfault unregister: %s", __func__, strerror(errno));
436eb59db53SDr. David Alan Gilbert         goto out;
437eb59db53SDr. David Alan Gilbert     }
438eb59db53SDr. David Alan Gilbert 
439eb59db53SDr. David Alan Gilbert     feature_mask = (__u64)1 << _UFFDIO_WAKE |
440eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_COPY |
441eb59db53SDr. David Alan Gilbert                    (__u64)1 << _UFFDIO_ZEROPAGE;
442eb59db53SDr. David Alan Gilbert     if ((reg_struct.ioctls & feature_mask) != feature_mask) {
443eb59db53SDr. David Alan Gilbert         error_report("Missing userfault map features: %" PRIx64,
444eb59db53SDr. David Alan Gilbert                      (uint64_t)(~reg_struct.ioctls & feature_mask));
445eb59db53SDr. David Alan Gilbert         goto out;
446eb59db53SDr. David Alan Gilbert     }
447eb59db53SDr. David Alan Gilbert 
448eb59db53SDr. David Alan Gilbert     /* Success! */
449eb59db53SDr. David Alan Gilbert     ret = true;
450eb59db53SDr. David Alan Gilbert out:
451eb59db53SDr. David Alan Gilbert     if (testarea) {
452eb59db53SDr. David Alan Gilbert         munmap(testarea, pagesize);
453eb59db53SDr. David Alan Gilbert     }
454eb59db53SDr. David Alan Gilbert     if (ufd != -1) {
455eb59db53SDr. David Alan Gilbert         close(ufd);
456eb59db53SDr. David Alan Gilbert     }
457eb59db53SDr. David Alan Gilbert     return ret;
458eb59db53SDr. David Alan Gilbert }
459eb59db53SDr. David Alan Gilbert 
4601caddf8aSDr. David Alan Gilbert /*
4611caddf8aSDr. David Alan Gilbert  * Setup an area of RAM so that it *can* be used for postcopy later; this
4621caddf8aSDr. David Alan Gilbert  * must be done right at the start prior to pre-copy.
4631caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4641caddf8aSDr. David Alan Gilbert  */
465754cb9c0SYury Kotov static int init_range(RAMBlock *rb, void *opaque)
4661caddf8aSDr. David Alan Gilbert {
467754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
468754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
469754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
470754cb9c0SYury Kotov     ram_addr_t length = qemu_ram_get_used_length(rb);
4711caddf8aSDr. David Alan Gilbert     trace_postcopy_init_range(block_name, host_addr, offset, length);
4721caddf8aSDr. David Alan Gilbert 
4731caddf8aSDr. David Alan Gilbert     /*
474898ba906SDavid Hildenbrand      * Save the used_length before running the guest. In case we have to
475898ba906SDavid Hildenbrand      * resize RAM blocks when syncing RAM block sizes from the source during
476898ba906SDavid Hildenbrand      * precopy, we'll update it manually via the ram block notifier.
477898ba906SDavid Hildenbrand      */
478898ba906SDavid Hildenbrand     rb->postcopy_length = length;
479898ba906SDavid Hildenbrand 
480898ba906SDavid Hildenbrand     /*
4811caddf8aSDr. David Alan Gilbert      * We need the whole of RAM to be truly empty for postcopy, so things
4821caddf8aSDr. David Alan Gilbert      * like ROMs and any data tables built during init must be zero'd
4831caddf8aSDr. David Alan Gilbert      * - we're going to get the copy from the source anyway.
4841caddf8aSDr. David Alan Gilbert      * (Precopy will just overwrite this data, so doesn't need the discard)
4851caddf8aSDr. David Alan Gilbert      */
486aaa2064cSJuan Quintela     if (ram_discard_range(block_name, 0, length)) {
4871caddf8aSDr. David Alan Gilbert         return -1;
4881caddf8aSDr. David Alan Gilbert     }
4891caddf8aSDr. David Alan Gilbert 
4901caddf8aSDr. David Alan Gilbert     return 0;
4911caddf8aSDr. David Alan Gilbert }
4921caddf8aSDr. David Alan Gilbert 
4931caddf8aSDr. David Alan Gilbert /*
4941caddf8aSDr. David Alan Gilbert  * At the end of migration, undo the effects of init_range
4951caddf8aSDr. David Alan Gilbert  * opaque should be the MIS.
4961caddf8aSDr. David Alan Gilbert  */
497754cb9c0SYury Kotov static int cleanup_range(RAMBlock *rb, void *opaque)
4981caddf8aSDr. David Alan Gilbert {
499754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
500754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
501754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
502898ba906SDavid Hildenbrand     ram_addr_t length = rb->postcopy_length;
5031caddf8aSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
5041caddf8aSDr. David Alan Gilbert     struct uffdio_range range_struct;
5051caddf8aSDr. David Alan Gilbert     trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
5061caddf8aSDr. David Alan Gilbert 
5071caddf8aSDr. David Alan Gilbert     /*
5081caddf8aSDr. David Alan Gilbert      * We turned off hugepage for the precopy stage with postcopy enabled
5091caddf8aSDr. David Alan Gilbert      * we can turn it back on now.
5101caddf8aSDr. David Alan Gilbert      */
5111d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
5121caddf8aSDr. David Alan Gilbert 
5131caddf8aSDr. David Alan Gilbert     /*
5141caddf8aSDr. David Alan Gilbert      * We can also turn off userfault now since we should have all the
5151caddf8aSDr. David Alan Gilbert      * pages.   It can be useful to leave it on to debug postcopy
5161caddf8aSDr. David Alan Gilbert      * if you're not sure it's always getting every page.
5171caddf8aSDr. David Alan Gilbert      */
5181caddf8aSDr. David Alan Gilbert     range_struct.start = (uintptr_t)host_addr;
5191caddf8aSDr. David Alan Gilbert     range_struct.len = length;
5201caddf8aSDr. David Alan Gilbert 
5211caddf8aSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
5221caddf8aSDr. David Alan Gilbert         error_report("%s: userfault unregister %s", __func__, strerror(errno));
5231caddf8aSDr. David Alan Gilbert 
5241caddf8aSDr. David Alan Gilbert         return -1;
5251caddf8aSDr. David Alan Gilbert     }
5261caddf8aSDr. David Alan Gilbert 
5271caddf8aSDr. David Alan Gilbert     return 0;
5281caddf8aSDr. David Alan Gilbert }
5291caddf8aSDr. David Alan Gilbert 
5301caddf8aSDr. David Alan Gilbert /*
5311caddf8aSDr. David Alan Gilbert  * Initialise postcopy-ram, setting the RAM to a state where we can go into
5321caddf8aSDr. David Alan Gilbert  * postcopy later; must be called prior to any precopy.
5331caddf8aSDr. David Alan Gilbert  * called from arch_init's similarly named ram_postcopy_incoming_init
5341caddf8aSDr. David Alan Gilbert  */
535c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
5361caddf8aSDr. David Alan Gilbert {
537fbd162e6SYury Kotov     if (foreach_not_ignored_block(init_range, NULL)) {
5381caddf8aSDr. David Alan Gilbert         return -1;
5391caddf8aSDr. David Alan Gilbert     }
5401caddf8aSDr. David Alan Gilbert 
5411caddf8aSDr. David Alan Gilbert     return 0;
5421caddf8aSDr. David Alan Gilbert }
5431caddf8aSDr. David Alan Gilbert 
544476ebf77SPeter Xu static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
545476ebf77SPeter Xu {
54677dadc3fSPeter Xu     int i;
54777dadc3fSPeter Xu 
54877dadc3fSPeter Xu     if (mis->postcopy_tmp_pages) {
54977dadc3fSPeter Xu         for (i = 0; i < mis->postcopy_channels; i++) {
55077dadc3fSPeter Xu             if (mis->postcopy_tmp_pages[i].tmp_huge_page) {
55177dadc3fSPeter Xu                 munmap(mis->postcopy_tmp_pages[i].tmp_huge_page,
55277dadc3fSPeter Xu                        mis->largest_page_size);
55377dadc3fSPeter Xu                 mis->postcopy_tmp_pages[i].tmp_huge_page = NULL;
55477dadc3fSPeter Xu             }
55577dadc3fSPeter Xu         }
55677dadc3fSPeter Xu         g_free(mis->postcopy_tmp_pages);
55777dadc3fSPeter Xu         mis->postcopy_tmp_pages = NULL;
558476ebf77SPeter Xu     }
559476ebf77SPeter Xu 
560476ebf77SPeter Xu     if (mis->postcopy_tmp_zero_page) {
561476ebf77SPeter Xu         munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
562476ebf77SPeter Xu         mis->postcopy_tmp_zero_page = NULL;
563476ebf77SPeter Xu     }
564476ebf77SPeter Xu }
565476ebf77SPeter Xu 
5661caddf8aSDr. David Alan Gilbert /*
5671caddf8aSDr. David Alan Gilbert  * At the end of a migration where postcopy_ram_incoming_init was called.
5681caddf8aSDr. David Alan Gilbert  */
5691caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
5701caddf8aSDr. David Alan Gilbert {
571c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_entry();
572c4faeed2SDr. David Alan Gilbert 
57336f62f11SPeter Xu     if (mis->postcopy_prio_thread_created) {
57436f62f11SPeter Xu         qemu_thread_join(&mis->postcopy_prio_thread);
57536f62f11SPeter Xu         mis->postcopy_prio_thread_created = false;
57636f62f11SPeter Xu     }
57736f62f11SPeter Xu 
578c4faeed2SDr. David Alan Gilbert     if (mis->have_fault_thread) {
57946343570SDr. David Alan Gilbert         Error *local_err = NULL;
58046343570SDr. David Alan Gilbert 
58155d0fe82SIlya Maximets         /* Let the fault thread quit */
582d73415a3SStefan Hajnoczi         qatomic_set(&mis->fault_thread_quit, 1);
58355d0fe82SIlya Maximets         postcopy_fault_thread_notify(mis);
58455d0fe82SIlya Maximets         trace_postcopy_ram_incoming_cleanup_join();
58555d0fe82SIlya Maximets         qemu_thread_join(&mis->fault_thread);
58655d0fe82SIlya Maximets 
58746343570SDr. David Alan Gilbert         if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
58846343570SDr. David Alan Gilbert             error_report_err(local_err);
58946343570SDr. David Alan Gilbert             return -1;
59046343570SDr. David Alan Gilbert         }
59146343570SDr. David Alan Gilbert 
592fbd162e6SYury Kotov         if (foreach_not_ignored_block(cleanup_range, mis)) {
5931caddf8aSDr. David Alan Gilbert             return -1;
5941caddf8aSDr. David Alan Gilbert         }
5959ab7ef9bSPeter Xu 
596c4faeed2SDr. David Alan Gilbert         trace_postcopy_ram_incoming_cleanup_closeuf();
597c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
59864f615feSPeter Xu         close(mis->userfault_event_fd);
599c4faeed2SDr. David Alan Gilbert         mis->have_fault_thread = false;
600c4faeed2SDr. David Alan Gilbert     }
601c4faeed2SDr. David Alan Gilbert 
60258b7c17eSDr. David Alan Gilbert     if (enable_mlock) {
60358b7c17eSDr. David Alan Gilbert         if (os_mlock() < 0) {
60458b7c17eSDr. David Alan Gilbert             error_report("mlock: %s", strerror(errno));
60558b7c17eSDr. David Alan Gilbert             /*
60658b7c17eSDr. David Alan Gilbert              * It doesn't feel right to fail at this point, we have a valid
60758b7c17eSDr. David Alan Gilbert              * VM state.
60858b7c17eSDr. David Alan Gilbert              */
60958b7c17eSDr. David Alan Gilbert         }
61058b7c17eSDr. David Alan Gilbert     }
61158b7c17eSDr. David Alan Gilbert 
612476ebf77SPeter Xu     postcopy_temp_pages_cleanup(mis);
613476ebf77SPeter Xu 
61465ace060SAlexey Perevalov     trace_postcopy_ram_incoming_cleanup_blocktime(
61565ace060SAlexey Perevalov             get_postcopy_total_blocktime());
61665ace060SAlexey Perevalov 
617c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_incoming_cleanup_exit();
6181caddf8aSDr. David Alan Gilbert     return 0;
6191caddf8aSDr. David Alan Gilbert }
6201caddf8aSDr. David Alan Gilbert 
621f0a227adSDr. David Alan Gilbert /*
622f9527107SDr. David Alan Gilbert  * Disable huge pages on an area
623f9527107SDr. David Alan Gilbert  */
624754cb9c0SYury Kotov static int nhp_range(RAMBlock *rb, void *opaque)
625f9527107SDr. David Alan Gilbert {
626754cb9c0SYury Kotov     const char *block_name = qemu_ram_get_idstr(rb);
627754cb9c0SYury Kotov     void *host_addr = qemu_ram_get_host_addr(rb);
628754cb9c0SYury Kotov     ram_addr_t offset = qemu_ram_get_offset(rb);
629898ba906SDavid Hildenbrand     ram_addr_t length = rb->postcopy_length;
630f9527107SDr. David Alan Gilbert     trace_postcopy_nhp_range(block_name, host_addr, offset, length);
631f9527107SDr. David Alan Gilbert 
632f9527107SDr. David Alan Gilbert     /*
633f9527107SDr. David Alan Gilbert      * Before we do discards we need to ensure those discards really
634f9527107SDr. David Alan Gilbert      * do delete areas of the page, even if THP thinks a hugepage would
635f9527107SDr. David Alan Gilbert      * be a good idea, so force hugepages off.
636f9527107SDr. David Alan Gilbert      */
6371d741439SDr. David Alan Gilbert     qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
638f9527107SDr. David Alan Gilbert 
639f9527107SDr. David Alan Gilbert     return 0;
640f9527107SDr. David Alan Gilbert }
641f9527107SDr. David Alan Gilbert 
642f9527107SDr. David Alan Gilbert /*
643f9527107SDr. David Alan Gilbert  * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
644f9527107SDr. David Alan Gilbert  * however leaving it until after precopy means that most of the precopy
645f9527107SDr. David Alan Gilbert  * data is still THPd
646f9527107SDr. David Alan Gilbert  */
647f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
648f9527107SDr. David Alan Gilbert {
649fbd162e6SYury Kotov     if (foreach_not_ignored_block(nhp_range, mis)) {
650f9527107SDr. David Alan Gilbert         return -1;
651f9527107SDr. David Alan Gilbert     }
652f9527107SDr. David Alan Gilbert 
653f9527107SDr. David Alan Gilbert     postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
654f9527107SDr. David Alan Gilbert 
655f9527107SDr. David Alan Gilbert     return 0;
656f9527107SDr. David Alan Gilbert }
657f9527107SDr. David Alan Gilbert 
658f9527107SDr. David Alan Gilbert /*
659f0a227adSDr. David Alan Gilbert  * Mark the given area of RAM as requiring notification to unwritten areas
660fbd162e6SYury Kotov  * Used as a  callback on foreach_not_ignored_block.
661f0a227adSDr. David Alan Gilbert  *   host_addr: Base of area to mark
662f0a227adSDr. David Alan Gilbert  *   offset: Offset in the whole ram arena
663f0a227adSDr. David Alan Gilbert  *   length: Length of the section
664f0a227adSDr. David Alan Gilbert  *   opaque: MigrationIncomingState pointer
665f0a227adSDr. David Alan Gilbert  * Returns 0 on success
666f0a227adSDr. David Alan Gilbert  */
667754cb9c0SYury Kotov static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
668f0a227adSDr. David Alan Gilbert {
669f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
670f0a227adSDr. David Alan Gilbert     struct uffdio_register reg_struct;
671f0a227adSDr. David Alan Gilbert 
672754cb9c0SYury Kotov     reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
673898ba906SDavid Hildenbrand     reg_struct.range.len = rb->postcopy_length;
674f0a227adSDr. David Alan Gilbert     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
675f0a227adSDr. David Alan Gilbert 
676f0a227adSDr. David Alan Gilbert     /* Now tell our userfault_fd that it's responsible for this area */
677f0a227adSDr. David Alan Gilbert     if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
678f0a227adSDr. David Alan Gilbert         error_report("%s userfault register: %s", __func__, strerror(errno));
679f0a227adSDr. David Alan Gilbert         return -1;
680f0a227adSDr. David Alan Gilbert     }
681665414adSDr. David Alan Gilbert     if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
682665414adSDr. David Alan Gilbert         error_report("%s userfault: Region doesn't support COPY", __func__);
683665414adSDr. David Alan Gilbert         return -1;
684665414adSDr. David Alan Gilbert     }
6852ce16640SDr. David Alan Gilbert     if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
6862ce16640SDr. David Alan Gilbert         qemu_ram_set_uf_zeroable(rb);
6872ce16640SDr. David Alan Gilbert     }
688f0a227adSDr. David Alan Gilbert 
689f0a227adSDr. David Alan Gilbert     return 0;
690f0a227adSDr. David Alan Gilbert }
691f0a227adSDr. David Alan Gilbert 
6925efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
6935efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
6945efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
6955efc3564SDr. David Alan Gilbert {
6965efc3564SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
6975efc3564SDr. David Alan Gilbert     struct uffdio_range range;
6985efc3564SDr. David Alan Gilbert     int ret;
6995efc3564SDr. David Alan Gilbert     trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
7007648297dSDavid Hildenbrand     range.start = ROUND_DOWN(client_addr, pagesize);
7015efc3564SDr. David Alan Gilbert     range.len = pagesize;
7025efc3564SDr. David Alan Gilbert     ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
7035efc3564SDr. David Alan Gilbert     if (ret) {
7045efc3564SDr. David Alan Gilbert         error_report("%s: Failed to wake: %zx in %s (%s)",
7055efc3564SDr. David Alan Gilbert                      __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
7065efc3564SDr. David Alan Gilbert                      strerror(errno));
7075efc3564SDr. David Alan Gilbert     }
7085efc3564SDr. David Alan Gilbert     return ret;
7095efc3564SDr. David Alan Gilbert }
7105efc3564SDr. David Alan Gilbert 
7119470c5e0SDavid Hildenbrand static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
7129470c5e0SDavid Hildenbrand                                  ram_addr_t start, uint64_t haddr)
7139470c5e0SDavid Hildenbrand {
7149470c5e0SDavid Hildenbrand     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
7159470c5e0SDavid Hildenbrand 
7169470c5e0SDavid Hildenbrand     /*
7179470c5e0SDavid Hildenbrand      * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
7189470c5e0SDavid Hildenbrand      * access, place a zeropage, which will also set the relevant bits in the
7199470c5e0SDavid Hildenbrand      * recv_bitmap accordingly, so we won't try placing a zeropage twice.
7209470c5e0SDavid Hildenbrand      *
7219470c5e0SDavid Hildenbrand      * Checking a single bit is sufficient to handle pagesize > TPS as either
7229470c5e0SDavid Hildenbrand      * all relevant bits are set or not.
7239470c5e0SDavid Hildenbrand      */
7249470c5e0SDavid Hildenbrand     assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
7259470c5e0SDavid Hildenbrand     if (ramblock_page_is_discarded(rb, start)) {
7269470c5e0SDavid Hildenbrand         bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
7279470c5e0SDavid Hildenbrand 
7289470c5e0SDavid Hildenbrand         return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
7299470c5e0SDavid Hildenbrand     }
7309470c5e0SDavid Hildenbrand 
7319470c5e0SDavid Hildenbrand     return migrate_send_rp_req_pages(mis, rb, start, haddr);
7329470c5e0SDavid Hildenbrand }
7339470c5e0SDavid Hildenbrand 
734f0a227adSDr. David Alan Gilbert /*
735096bf4c8SDr. David Alan Gilbert  * Callback from shared fault handlers to ask for a page,
736096bf4c8SDr. David Alan Gilbert  * the page must be specified by a RAMBlock and an offset in that rb
737096bf4c8SDr. David Alan Gilbert  * Note: Only for use by shared fault handlers (in fault thread)
738096bf4c8SDr. David Alan Gilbert  */
739096bf4c8SDr. David Alan Gilbert int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
740096bf4c8SDr. David Alan Gilbert                                  uint64_t client_addr, uint64_t rb_offset)
741096bf4c8SDr. David Alan Gilbert {
7427648297dSDavid Hildenbrand     uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
743096bf4c8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
744096bf4c8SDr. David Alan Gilbert 
745096bf4c8SDr. David Alan Gilbert     trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
746096bf4c8SDr. David Alan Gilbert                                        rb_offset);
747dedfb4b2SDr. David Alan Gilbert     if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
748dedfb4b2SDr. David Alan Gilbert         trace_postcopy_request_shared_page_present(pcfd->idstr,
749dedfb4b2SDr. David Alan Gilbert                                         qemu_ram_get_idstr(rb), rb_offset);
750dedfb4b2SDr. David Alan Gilbert         return postcopy_wake_shared(pcfd, client_addr, rb);
751dedfb4b2SDr. David Alan Gilbert     }
7529470c5e0SDavid Hildenbrand     postcopy_request_page(mis, rb, aligned_rbo, client_addr);
753096bf4c8SDr. David Alan Gilbert     return 0;
754096bf4c8SDr. David Alan Gilbert }
755096bf4c8SDr. David Alan Gilbert 
756575b0b33SAlexey Perevalov static int get_mem_fault_cpu_index(uint32_t pid)
757575b0b33SAlexey Perevalov {
758575b0b33SAlexey Perevalov     CPUState *cpu_iter;
759575b0b33SAlexey Perevalov 
760575b0b33SAlexey Perevalov     CPU_FOREACH(cpu_iter) {
761575b0b33SAlexey Perevalov         if (cpu_iter->thread_id == pid) {
762575b0b33SAlexey Perevalov             trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
763575b0b33SAlexey Perevalov             return cpu_iter->cpu_index;
764575b0b33SAlexey Perevalov         }
765575b0b33SAlexey Perevalov     }
766575b0b33SAlexey Perevalov     trace_get_mem_fault_cpu_index(-1, pid);
767575b0b33SAlexey Perevalov     return -1;
768575b0b33SAlexey Perevalov }
769575b0b33SAlexey Perevalov 
770575b0b33SAlexey Perevalov static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
771575b0b33SAlexey Perevalov {
772575b0b33SAlexey Perevalov     int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
773575b0b33SAlexey Perevalov                                     dc->start_time;
774575b0b33SAlexey Perevalov     return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
775575b0b33SAlexey Perevalov }
776575b0b33SAlexey Perevalov 
777575b0b33SAlexey Perevalov /*
778575b0b33SAlexey Perevalov  * This function is being called when pagefault occurs. It
779575b0b33SAlexey Perevalov  * tracks down vCPU blocking time.
780575b0b33SAlexey Perevalov  *
781575b0b33SAlexey Perevalov  * @addr: faulted host virtual address
782575b0b33SAlexey Perevalov  * @ptid: faulted process thread id
783575b0b33SAlexey Perevalov  * @rb: ramblock appropriate to addr
784575b0b33SAlexey Perevalov  */
785575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
786575b0b33SAlexey Perevalov                                           RAMBlock *rb)
787575b0b33SAlexey Perevalov {
788575b0b33SAlexey Perevalov     int cpu, already_received;
789575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
790575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
791575b0b33SAlexey Perevalov     uint32_t low_time_offset;
792575b0b33SAlexey Perevalov 
793575b0b33SAlexey Perevalov     if (!dc || ptid == 0) {
794575b0b33SAlexey Perevalov         return;
795575b0b33SAlexey Perevalov     }
796575b0b33SAlexey Perevalov     cpu = get_mem_fault_cpu_index(ptid);
797575b0b33SAlexey Perevalov     if (cpu < 0) {
798575b0b33SAlexey Perevalov         return;
799575b0b33SAlexey Perevalov     }
800575b0b33SAlexey Perevalov 
801575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
802575b0b33SAlexey Perevalov     if (dc->vcpu_addr[cpu] == 0) {
803d73415a3SStefan Hajnoczi         qatomic_inc(&dc->smp_cpus_down);
804575b0b33SAlexey Perevalov     }
805575b0b33SAlexey Perevalov 
806d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->last_begin, low_time_offset);
807d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
808d73415a3SStefan Hajnoczi     qatomic_xchg(&dc->vcpu_addr[cpu], addr);
809575b0b33SAlexey Perevalov 
810da1725d3SWei Yang     /*
811da1725d3SWei Yang      * check it here, not at the beginning of the function,
812da1725d3SWei Yang      * due to, check could occur early than bitmap_set in
813da1725d3SWei Yang      * qemu_ufd_copy_ioctl
814da1725d3SWei Yang      */
815575b0b33SAlexey Perevalov     already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
816575b0b33SAlexey Perevalov     if (already_received) {
817d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->vcpu_addr[cpu], 0);
818d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
819d73415a3SStefan Hajnoczi         qatomic_dec(&dc->smp_cpus_down);
820575b0b33SAlexey Perevalov     }
821575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
822575b0b33SAlexey Perevalov                                         cpu, already_received);
823575b0b33SAlexey Perevalov }
824575b0b33SAlexey Perevalov 
825575b0b33SAlexey Perevalov /*
826575b0b33SAlexey Perevalov  *  This function just provide calculated blocktime per cpu and trace it.
827575b0b33SAlexey Perevalov  *  Total blocktime is calculated in mark_postcopy_blocktime_end.
828575b0b33SAlexey Perevalov  *
829575b0b33SAlexey Perevalov  *
830575b0b33SAlexey Perevalov  * Assume we have 3 CPU
831575b0b33SAlexey Perevalov  *
832575b0b33SAlexey Perevalov  *      S1        E1           S1               E1
833575b0b33SAlexey Perevalov  * -----***********------------xxx***************------------------------> CPU1
834575b0b33SAlexey Perevalov  *
835575b0b33SAlexey Perevalov  *             S2                E2
836575b0b33SAlexey Perevalov  * ------------****************xxx---------------------------------------> CPU2
837575b0b33SAlexey Perevalov  *
838575b0b33SAlexey Perevalov  *                         S3            E3
839575b0b33SAlexey Perevalov  * ------------------------****xxx********-------------------------------> CPU3
840575b0b33SAlexey Perevalov  *
841575b0b33SAlexey Perevalov  * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
842575b0b33SAlexey Perevalov  * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
843575b0b33SAlexey Perevalov  * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
844575b0b33SAlexey Perevalov  *            it's a part of total blocktime.
845575b0b33SAlexey Perevalov  * S1 - here is last_begin
846575b0b33SAlexey Perevalov  * Legend of the picture is following:
847575b0b33SAlexey Perevalov  *              * - means blocktime per vCPU
848575b0b33SAlexey Perevalov  *              x - means overlapped blocktime (total blocktime)
849575b0b33SAlexey Perevalov  *
850575b0b33SAlexey Perevalov  * @addr: host virtual address
851575b0b33SAlexey Perevalov  */
852575b0b33SAlexey Perevalov static void mark_postcopy_blocktime_end(uintptr_t addr)
853575b0b33SAlexey Perevalov {
854575b0b33SAlexey Perevalov     MigrationIncomingState *mis = migration_incoming_get_current();
855575b0b33SAlexey Perevalov     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
8565cc8767dSLike Xu     MachineState *ms = MACHINE(qdev_get_machine());
8575cc8767dSLike Xu     unsigned int smp_cpus = ms->smp.cpus;
858575b0b33SAlexey Perevalov     int i, affected_cpu = 0;
859575b0b33SAlexey Perevalov     bool vcpu_total_blocktime = false;
860575b0b33SAlexey Perevalov     uint32_t read_vcpu_time, low_time_offset;
861575b0b33SAlexey Perevalov 
862575b0b33SAlexey Perevalov     if (!dc) {
863575b0b33SAlexey Perevalov         return;
864575b0b33SAlexey Perevalov     }
865575b0b33SAlexey Perevalov 
866575b0b33SAlexey Perevalov     low_time_offset = get_low_time_offset(dc);
867575b0b33SAlexey Perevalov     /* lookup cpu, to clear it,
8683a4452d8Szhaolichang      * that algorithm looks straightforward, but it's not
869575b0b33SAlexey Perevalov      * optimal, more optimal algorithm is keeping tree or hash
870575b0b33SAlexey Perevalov      * where key is address value is a list of  */
871575b0b33SAlexey Perevalov     for (i = 0; i < smp_cpus; i++) {
872575b0b33SAlexey Perevalov         uint32_t vcpu_blocktime = 0;
873575b0b33SAlexey Perevalov 
874d73415a3SStefan Hajnoczi         read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
875d73415a3SStefan Hajnoczi         if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
876575b0b33SAlexey Perevalov             read_vcpu_time == 0) {
877575b0b33SAlexey Perevalov             continue;
878575b0b33SAlexey Perevalov         }
879d73415a3SStefan Hajnoczi         qatomic_xchg(&dc->vcpu_addr[i], 0);
880575b0b33SAlexey Perevalov         vcpu_blocktime = low_time_offset - read_vcpu_time;
881575b0b33SAlexey Perevalov         affected_cpu += 1;
882575b0b33SAlexey Perevalov         /* we need to know is that mark_postcopy_end was due to
883575b0b33SAlexey Perevalov          * faulted page, another possible case it's prefetched
884575b0b33SAlexey Perevalov          * page and in that case we shouldn't be here */
885575b0b33SAlexey Perevalov         if (!vcpu_total_blocktime &&
886d73415a3SStefan Hajnoczi             qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
887575b0b33SAlexey Perevalov             vcpu_total_blocktime = true;
888575b0b33SAlexey Perevalov         }
889575b0b33SAlexey Perevalov         /* continue cycle, due to one page could affect several vCPUs */
890575b0b33SAlexey Perevalov         dc->vcpu_blocktime[i] += vcpu_blocktime;
891575b0b33SAlexey Perevalov     }
892575b0b33SAlexey Perevalov 
893d73415a3SStefan Hajnoczi     qatomic_sub(&dc->smp_cpus_down, affected_cpu);
894575b0b33SAlexey Perevalov     if (vcpu_total_blocktime) {
895d73415a3SStefan Hajnoczi         dc->total_blocktime += low_time_offset - qatomic_fetch_add(
896575b0b33SAlexey Perevalov                 &dc->last_begin, 0);
897575b0b33SAlexey Perevalov     }
898575b0b33SAlexey Perevalov     trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
899575b0b33SAlexey Perevalov                                       affected_cpu);
900575b0b33SAlexey Perevalov }
901575b0b33SAlexey Perevalov 
90227dd21b4SPeter Xu static void postcopy_pause_fault_thread(MigrationIncomingState *mis)
9033a7804c3SPeter Xu {
9043a7804c3SPeter Xu     trace_postcopy_pause_fault_thread();
9053a7804c3SPeter Xu     qemu_sem_wait(&mis->postcopy_pause_sem_fault);
9063a7804c3SPeter Xu     trace_postcopy_pause_fault_thread_continued();
9073a7804c3SPeter Xu }
9083a7804c3SPeter Xu 
909096bf4c8SDr. David Alan Gilbert /*
910f0a227adSDr. David Alan Gilbert  * Handle faults detected by the USERFAULT markings
911f0a227adSDr. David Alan Gilbert  */
912f0a227adSDr. David Alan Gilbert static void *postcopy_ram_fault_thread(void *opaque)
913f0a227adSDr. David Alan Gilbert {
914f0a227adSDr. David Alan Gilbert     MigrationIncomingState *mis = opaque;
915c4faeed2SDr. David Alan Gilbert     struct uffd_msg msg;
916c4faeed2SDr. David Alan Gilbert     int ret;
91700fa4fc8SDr. David Alan Gilbert     size_t index;
918c4faeed2SDr. David Alan Gilbert     RAMBlock *rb = NULL;
919f0a227adSDr. David Alan Gilbert 
920c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_entry();
92174637e6fSLidong Chen     rcu_register_thread();
922096bf4c8SDr. David Alan Gilbert     mis->last_rb = NULL; /* last RAMBlock we sent part of */
923095c12a4SPeter Xu     qemu_sem_post(&mis->thread_sync_sem);
924c4faeed2SDr. David Alan Gilbert 
92500fa4fc8SDr. David Alan Gilbert     struct pollfd *pfd;
92600fa4fc8SDr. David Alan Gilbert     size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
92700fa4fc8SDr. David Alan Gilbert 
92800fa4fc8SDr. David Alan Gilbert     pfd = g_new0(struct pollfd, pfd_len);
92900fa4fc8SDr. David Alan Gilbert 
93000fa4fc8SDr. David Alan Gilbert     pfd[0].fd = mis->userfault_fd;
93100fa4fc8SDr. David Alan Gilbert     pfd[0].events = POLLIN;
93200fa4fc8SDr. David Alan Gilbert     pfd[1].fd = mis->userfault_event_fd;
93300fa4fc8SDr. David Alan Gilbert     pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
93400fa4fc8SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
93500fa4fc8SDr. David Alan Gilbert     for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
93600fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
93700fa4fc8SDr. David Alan Gilbert                                                  struct PostCopyFD, index);
93800fa4fc8SDr. David Alan Gilbert         pfd[2 + index].fd = pcfd->fd;
93900fa4fc8SDr. David Alan Gilbert         pfd[2 + index].events = POLLIN;
94000fa4fc8SDr. David Alan Gilbert         trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
94100fa4fc8SDr. David Alan Gilbert                                                   pcfd->fd);
94200fa4fc8SDr. David Alan Gilbert     }
94300fa4fc8SDr. David Alan Gilbert 
944c4faeed2SDr. David Alan Gilbert     while (true) {
945c4faeed2SDr. David Alan Gilbert         ram_addr_t rb_offset;
94600fa4fc8SDr. David Alan Gilbert         int poll_result;
947c4faeed2SDr. David Alan Gilbert 
948c4faeed2SDr. David Alan Gilbert         /*
949c4faeed2SDr. David Alan Gilbert          * We're mainly waiting for the kernel to give us a faulting HVA,
950c4faeed2SDr. David Alan Gilbert          * however we can be told to quit via userfault_quit_fd which is
951c4faeed2SDr. David Alan Gilbert          * an eventfd
952c4faeed2SDr. David Alan Gilbert          */
953c4faeed2SDr. David Alan Gilbert 
95400fa4fc8SDr. David Alan Gilbert         poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
95500fa4fc8SDr. David Alan Gilbert         if (poll_result == -1) {
956c4faeed2SDr. David Alan Gilbert             error_report("%s: userfault poll: %s", __func__, strerror(errno));
957c4faeed2SDr. David Alan Gilbert             break;
958f0a227adSDr. David Alan Gilbert         }
959f0a227adSDr. David Alan Gilbert 
9603a7804c3SPeter Xu         if (!mis->to_src_file) {
9613a7804c3SPeter Xu             /*
9623a7804c3SPeter Xu              * Possibly someone tells us that the return path is
9633a7804c3SPeter Xu              * broken already using the event. We should hold until
9643a7804c3SPeter Xu              * the channel is rebuilt.
9653a7804c3SPeter Xu              */
96627dd21b4SPeter Xu             postcopy_pause_fault_thread(mis);
9673a7804c3SPeter Xu         }
9683a7804c3SPeter Xu 
969c4faeed2SDr. David Alan Gilbert         if (pfd[1].revents) {
97064f615feSPeter Xu             uint64_t tmp64 = 0;
97164f615feSPeter Xu 
97264f615feSPeter Xu             /* Consume the signal */
97364f615feSPeter Xu             if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
97464f615feSPeter Xu                 /* Nothing obviously nicer than posting this error. */
97564f615feSPeter Xu                 error_report("%s: read() failed", __func__);
97664f615feSPeter Xu             }
97764f615feSPeter Xu 
978d73415a3SStefan Hajnoczi             if (qatomic_read(&mis->fault_thread_quit)) {
979c4faeed2SDr. David Alan Gilbert                 trace_postcopy_ram_fault_thread_quit();
980c4faeed2SDr. David Alan Gilbert                 break;
981c4faeed2SDr. David Alan Gilbert             }
98264f615feSPeter Xu         }
983c4faeed2SDr. David Alan Gilbert 
98400fa4fc8SDr. David Alan Gilbert         if (pfd[0].revents) {
98500fa4fc8SDr. David Alan Gilbert             poll_result--;
986c4faeed2SDr. David Alan Gilbert             ret = read(mis->userfault_fd, &msg, sizeof(msg));
987c4faeed2SDr. David Alan Gilbert             if (ret != sizeof(msg)) {
988c4faeed2SDr. David Alan Gilbert                 if (errno == EAGAIN) {
989c4faeed2SDr. David Alan Gilbert                     /*
990c4faeed2SDr. David Alan Gilbert                      * if a wake up happens on the other thread just after
991c4faeed2SDr. David Alan Gilbert                      * the poll, there is nothing to read.
992c4faeed2SDr. David Alan Gilbert                      */
993c4faeed2SDr. David Alan Gilbert                     continue;
994c4faeed2SDr. David Alan Gilbert                 }
995c4faeed2SDr. David Alan Gilbert                 if (ret < 0) {
99600fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to read full userfault "
99700fa4fc8SDr. David Alan Gilbert                                  "message: %s",
998c4faeed2SDr. David Alan Gilbert                                  __func__, strerror(errno));
999c4faeed2SDr. David Alan Gilbert                     break;
1000c4faeed2SDr. David Alan Gilbert                 } else {
100100fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read %d bytes from userfaultfd "
100200fa4fc8SDr. David Alan Gilbert                                  "expected %zd",
1003c4faeed2SDr. David Alan Gilbert                                  __func__, ret, sizeof(msg));
1004c4faeed2SDr. David Alan Gilbert                     break; /* Lost alignment, don't know what we'd read next */
1005c4faeed2SDr. David Alan Gilbert                 }
1006c4faeed2SDr. David Alan Gilbert             }
1007c4faeed2SDr. David Alan Gilbert             if (msg.event != UFFD_EVENT_PAGEFAULT) {
1008c4faeed2SDr. David Alan Gilbert                 error_report("%s: Read unexpected event %ud from userfaultfd",
1009c4faeed2SDr. David Alan Gilbert                              __func__, msg.event);
1010c4faeed2SDr. David Alan Gilbert                 continue; /* It's not a page fault, shouldn't happen */
1011c4faeed2SDr. David Alan Gilbert             }
1012c4faeed2SDr. David Alan Gilbert 
1013c4faeed2SDr. David Alan Gilbert             rb = qemu_ram_block_from_host(
1014c4faeed2SDr. David Alan Gilbert                      (void *)(uintptr_t)msg.arg.pagefault.address,
1015f615f396SPaolo Bonzini                      true, &rb_offset);
1016c4faeed2SDr. David Alan Gilbert             if (!rb) {
1017c4faeed2SDr. David Alan Gilbert                 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
1018c4faeed2SDr. David Alan Gilbert                              PRIx64, (uint64_t)msg.arg.pagefault.address);
1019c4faeed2SDr. David Alan Gilbert                 break;
1020c4faeed2SDr. David Alan Gilbert             }
1021c4faeed2SDr. David Alan Gilbert 
10227648297dSDavid Hildenbrand             rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
1023c4faeed2SDr. David Alan Gilbert             trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
1024c4faeed2SDr. David Alan Gilbert                                                 qemu_ram_get_idstr(rb),
1025575b0b33SAlexey Perevalov                                                 rb_offset,
1026575b0b33SAlexey Perevalov                                                 msg.arg.pagefault.feat.ptid);
1027575b0b33SAlexey Perevalov             mark_postcopy_blocktime_begin(
1028575b0b33SAlexey Perevalov                     (uintptr_t)(msg.arg.pagefault.address),
1029575b0b33SAlexey Perevalov                                 msg.arg.pagefault.feat.ptid, rb);
1030575b0b33SAlexey Perevalov 
10313a7804c3SPeter Xu retry:
1032c4faeed2SDr. David Alan Gilbert             /*
1033c4faeed2SDr. David Alan Gilbert              * Send the request to the source - we want to request one
1034c4faeed2SDr. David Alan Gilbert              * of our host page sizes (which is >= TPS)
1035c4faeed2SDr. David Alan Gilbert              */
10369470c5e0SDavid Hildenbrand             ret = postcopy_request_page(mis, rb, rb_offset,
10378f8bfffcSPeter Xu                                         msg.arg.pagefault.address);
10383a7804c3SPeter Xu             if (ret) {
10393a7804c3SPeter Xu                 /* May be network failure, try to wait for recovery */
104027dd21b4SPeter Xu                 postcopy_pause_fault_thread(mis);
10413a7804c3SPeter Xu                 goto retry;
1042c4faeed2SDr. David Alan Gilbert             }
1043c4faeed2SDr. David Alan Gilbert         }
104400fa4fc8SDr. David Alan Gilbert 
104500fa4fc8SDr. David Alan Gilbert         /* Now handle any requests from external processes on shared memory */
104600fa4fc8SDr. David Alan Gilbert         /* TODO: May need to handle devices deregistering during postcopy */
104700fa4fc8SDr. David Alan Gilbert         for (index = 2; index < pfd_len && poll_result; index++) {
104800fa4fc8SDr. David Alan Gilbert             if (pfd[index].revents) {
104900fa4fc8SDr. David Alan Gilbert                 struct PostCopyFD *pcfd =
105000fa4fc8SDr. David Alan Gilbert                     &g_array_index(mis->postcopy_remote_fds,
105100fa4fc8SDr. David Alan Gilbert                                    struct PostCopyFD, index - 2);
105200fa4fc8SDr. David Alan Gilbert 
105300fa4fc8SDr. David Alan Gilbert                 poll_result--;
105400fa4fc8SDr. David Alan Gilbert                 if (pfd[index].revents & POLLERR) {
105500fa4fc8SDr. David Alan Gilbert                     error_report("%s: POLLERR on poll %zd fd=%d",
105600fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->fd);
105700fa4fc8SDr. David Alan Gilbert                     pfd[index].events = 0;
105800fa4fc8SDr. David Alan Gilbert                     continue;
105900fa4fc8SDr. David Alan Gilbert                 }
106000fa4fc8SDr. David Alan Gilbert 
106100fa4fc8SDr. David Alan Gilbert                 ret = read(pcfd->fd, &msg, sizeof(msg));
106200fa4fc8SDr. David Alan Gilbert                 if (ret != sizeof(msg)) {
106300fa4fc8SDr. David Alan Gilbert                     if (errno == EAGAIN) {
106400fa4fc8SDr. David Alan Gilbert                         /*
106500fa4fc8SDr. David Alan Gilbert                          * if a wake up happens on the other thread just after
106600fa4fc8SDr. David Alan Gilbert                          * the poll, there is nothing to read.
106700fa4fc8SDr. David Alan Gilbert                          */
106800fa4fc8SDr. David Alan Gilbert                         continue;
106900fa4fc8SDr. David Alan Gilbert                     }
107000fa4fc8SDr. David Alan Gilbert                     if (ret < 0) {
107100fa4fc8SDr. David Alan Gilbert                         error_report("%s: Failed to read full userfault "
107200fa4fc8SDr. David Alan Gilbert                                      "message: %s (shared) revents=%d",
107300fa4fc8SDr. David Alan Gilbert                                      __func__, strerror(errno),
107400fa4fc8SDr. David Alan Gilbert                                      pfd[index].revents);
107500fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
107600fa4fc8SDr. David Alan Gilbert                         break;
107700fa4fc8SDr. David Alan Gilbert                     } else {
107800fa4fc8SDr. David Alan Gilbert                         error_report("%s: Read %d bytes from userfaultfd "
107900fa4fc8SDr. David Alan Gilbert                                      "expected %zd (shared)",
108000fa4fc8SDr. David Alan Gilbert                                      __func__, ret, sizeof(msg));
108100fa4fc8SDr. David Alan Gilbert                         /*TODO: Could just disable this sharer */
108200fa4fc8SDr. David Alan Gilbert                         break; /*Lost alignment,don't know what we'd read next*/
108300fa4fc8SDr. David Alan Gilbert                     }
108400fa4fc8SDr. David Alan Gilbert                 }
108500fa4fc8SDr. David Alan Gilbert                 if (msg.event != UFFD_EVENT_PAGEFAULT) {
108600fa4fc8SDr. David Alan Gilbert                     error_report("%s: Read unexpected event %ud "
108700fa4fc8SDr. David Alan Gilbert                                  "from userfaultfd (shared)",
108800fa4fc8SDr. David Alan Gilbert                                  __func__, msg.event);
108900fa4fc8SDr. David Alan Gilbert                     continue; /* It's not a page fault, shouldn't happen */
109000fa4fc8SDr. David Alan Gilbert                 }
109100fa4fc8SDr. David Alan Gilbert                 /* Call the device handler registered with us */
109200fa4fc8SDr. David Alan Gilbert                 ret = pcfd->handler(pcfd, &msg);
109300fa4fc8SDr. David Alan Gilbert                 if (ret) {
109400fa4fc8SDr. David Alan Gilbert                     error_report("%s: Failed to resolve shared fault on %zd/%s",
109500fa4fc8SDr. David Alan Gilbert                                  __func__, index, pcfd->idstr);
109600fa4fc8SDr. David Alan Gilbert                     /* TODO: Fail? Disable this sharer? */
109700fa4fc8SDr. David Alan Gilbert                 }
109800fa4fc8SDr. David Alan Gilbert             }
109900fa4fc8SDr. David Alan Gilbert         }
110000fa4fc8SDr. David Alan Gilbert     }
110174637e6fSLidong Chen     rcu_unregister_thread();
1102c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_fault_thread_exit();
1103fc6008f3SMarc-André Lureau     g_free(pfd);
1104f0a227adSDr. David Alan Gilbert     return NULL;
1105f0a227adSDr. David Alan Gilbert }
1106f0a227adSDr. David Alan Gilbert 
1107476ebf77SPeter Xu static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
1108476ebf77SPeter Xu {
110977dadc3fSPeter Xu     PostcopyTmpPage *tmp_page;
111077dadc3fSPeter Xu     int err, i, channels;
111177dadc3fSPeter Xu     void *temp_page;
1112476ebf77SPeter Xu 
111336f62f11SPeter Xu     if (migrate_postcopy_preempt()) {
111436f62f11SPeter Xu         /* If preemption enabled, need extra channel for urgent requests */
111536f62f11SPeter Xu         mis->postcopy_channels = RAM_CHANNEL_MAX;
111636f62f11SPeter Xu     } else {
111736f62f11SPeter Xu         /* Both precopy/postcopy on the same channel */
111877dadc3fSPeter Xu         mis->postcopy_channels = 1;
111936f62f11SPeter Xu     }
112077dadc3fSPeter Xu 
112177dadc3fSPeter Xu     channels = mis->postcopy_channels;
112277dadc3fSPeter Xu     mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels);
112377dadc3fSPeter Xu 
112477dadc3fSPeter Xu     for (i = 0; i < channels; i++) {
112577dadc3fSPeter Xu         tmp_page = &mis->postcopy_tmp_pages[i];
112677dadc3fSPeter Xu         temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE,
1127476ebf77SPeter Xu                          MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
112877dadc3fSPeter Xu         if (temp_page == MAP_FAILED) {
1129476ebf77SPeter Xu             err = errno;
113077dadc3fSPeter Xu             error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s",
113177dadc3fSPeter Xu                          __func__, i, strerror(err));
113277dadc3fSPeter Xu             /* Clean up will be done later */
1133476ebf77SPeter Xu             return -err;
1134476ebf77SPeter Xu         }
113577dadc3fSPeter Xu         tmp_page->tmp_huge_page = temp_page;
113677dadc3fSPeter Xu         /* Initialize default states for each tmp page */
113777dadc3fSPeter Xu         postcopy_temp_page_reset(tmp_page);
113877dadc3fSPeter Xu     }
1139476ebf77SPeter Xu 
1140476ebf77SPeter Xu     /*
1141476ebf77SPeter Xu      * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
1142476ebf77SPeter Xu      */
1143476ebf77SPeter Xu     mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
1144476ebf77SPeter Xu                                        PROT_READ | PROT_WRITE,
1145476ebf77SPeter Xu                                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1146476ebf77SPeter Xu     if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
1147476ebf77SPeter Xu         err = errno;
1148476ebf77SPeter Xu         mis->postcopy_tmp_zero_page = NULL;
1149476ebf77SPeter Xu         error_report("%s: Failed to map large zero page %s",
1150476ebf77SPeter Xu                      __func__, strerror(err));
1151476ebf77SPeter Xu         return -err;
1152476ebf77SPeter Xu     }
1153476ebf77SPeter Xu 
1154476ebf77SPeter Xu     memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
1155476ebf77SPeter Xu 
1156476ebf77SPeter Xu     return 0;
1157476ebf77SPeter Xu }
1158476ebf77SPeter Xu 
11592a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1160f0a227adSDr. David Alan Gilbert {
1161c4faeed2SDr. David Alan Gilbert     /* Open the fd for the kernel to give us userfaults */
1162c4faeed2SDr. David Alan Gilbert     mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1163c4faeed2SDr. David Alan Gilbert     if (mis->userfault_fd == -1) {
1164c4faeed2SDr. David Alan Gilbert         error_report("%s: Failed to open userfault fd: %s", __func__,
1165c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1166c4faeed2SDr. David Alan Gilbert         return -1;
1167c4faeed2SDr. David Alan Gilbert     }
1168c4faeed2SDr. David Alan Gilbert 
1169c4faeed2SDr. David Alan Gilbert     /*
1170c4faeed2SDr. David Alan Gilbert      * Although the host check already tested the API, we need to
1171c4faeed2SDr. David Alan Gilbert      * do the check again as an ABI handshake on the new fd.
1172c4faeed2SDr. David Alan Gilbert      */
117354ae0886SAlexey Perevalov     if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
1174c4faeed2SDr. David Alan Gilbert         return -1;
1175c4faeed2SDr. David Alan Gilbert     }
1176c4faeed2SDr. David Alan Gilbert 
1177c4faeed2SDr. David Alan Gilbert     /* Now an eventfd we use to tell the fault-thread to quit */
117864f615feSPeter Xu     mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
117964f615feSPeter Xu     if (mis->userfault_event_fd == -1) {
118064f615feSPeter Xu         error_report("%s: Opening userfault_event_fd: %s", __func__,
1181c4faeed2SDr. David Alan Gilbert                      strerror(errno));
1182c4faeed2SDr. David Alan Gilbert         close(mis->userfault_fd);
1183c4faeed2SDr. David Alan Gilbert         return -1;
1184c4faeed2SDr. David Alan Gilbert     }
1185c4faeed2SDr. David Alan Gilbert 
118636f62f11SPeter Xu     postcopy_thread_create(mis, &mis->fault_thread, "fault-default",
1187095c12a4SPeter Xu                            postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE);
1188c4faeed2SDr. David Alan Gilbert     mis->have_fault_thread = true;
1189f0a227adSDr. David Alan Gilbert 
1190f0a227adSDr. David Alan Gilbert     /* Mark so that we get notified of accesses to unwritten areas */
1191fbd162e6SYury Kotov     if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
119291b02dc7SFei Li         error_report("ram_block_enable_notify failed");
1193f0a227adSDr. David Alan Gilbert         return -1;
1194f0a227adSDr. David Alan Gilbert     }
1195f0a227adSDr. David Alan Gilbert 
1196476ebf77SPeter Xu     if (postcopy_temp_pages_setup(mis)) {
1197476ebf77SPeter Xu         /* Error dumped in the sub-function */
11983414322aSWei Yang         return -1;
11993414322aSWei Yang     }
12003414322aSWei Yang 
120136f62f11SPeter Xu     if (migrate_postcopy_preempt()) {
120236f62f11SPeter Xu         /*
120336f62f11SPeter Xu          * This thread needs to be created after the temp pages because
120436f62f11SPeter Xu          * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately.
120536f62f11SPeter Xu          */
120636f62f11SPeter Xu         postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast",
120736f62f11SPeter Xu                                postcopy_preempt_thread, QEMU_THREAD_JOINABLE);
120836f62f11SPeter Xu         mis->postcopy_prio_thread_created = true;
120936f62f11SPeter Xu     }
121036f62f11SPeter Xu 
1211c4faeed2SDr. David Alan Gilbert     trace_postcopy_ram_enable_notify();
1212c4faeed2SDr. David Alan Gilbert 
1213f0a227adSDr. David Alan Gilbert     return 0;
1214f0a227adSDr. David Alan Gilbert }
1215f0a227adSDr. David Alan Gilbert 
1216eef621c4SPeter Xu static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
1217f9494614SAlexey Perevalov                                void *from_addr, uint64_t pagesize, RAMBlock *rb)
1218727b9d7eSAlexey Perevalov {
1219eef621c4SPeter Xu     int userfault_fd = mis->userfault_fd;
1220f9494614SAlexey Perevalov     int ret;
1221eef621c4SPeter Xu 
1222727b9d7eSAlexey Perevalov     if (from_addr) {
1223727b9d7eSAlexey Perevalov         struct uffdio_copy copy_struct;
1224727b9d7eSAlexey Perevalov         copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
1225727b9d7eSAlexey Perevalov         copy_struct.src = (uint64_t)(uintptr_t)from_addr;
1226727b9d7eSAlexey Perevalov         copy_struct.len = pagesize;
1227727b9d7eSAlexey Perevalov         copy_struct.mode = 0;
1228f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
1229727b9d7eSAlexey Perevalov     } else {
1230727b9d7eSAlexey Perevalov         struct uffdio_zeropage zero_struct;
1231727b9d7eSAlexey Perevalov         zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
1232727b9d7eSAlexey Perevalov         zero_struct.range.len = pagesize;
1233727b9d7eSAlexey Perevalov         zero_struct.mode = 0;
1234f9494614SAlexey Perevalov         ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
1235727b9d7eSAlexey Perevalov     }
1236f9494614SAlexey Perevalov     if (!ret) {
12378f8bfffcSPeter Xu         qemu_mutex_lock(&mis->page_request_mutex);
1238f9494614SAlexey Perevalov         ramblock_recv_bitmap_set_range(rb, host_addr,
1239f9494614SAlexey Perevalov                                        pagesize / qemu_target_page_size());
12408f8bfffcSPeter Xu         /*
12418f8bfffcSPeter Xu          * If this page resolves a page fault for a previous recorded faulted
12428f8bfffcSPeter Xu          * address, take a special note to maintain the requested page list.
12438f8bfffcSPeter Xu          */
12448f8bfffcSPeter Xu         if (g_tree_lookup(mis->page_requested, host_addr)) {
12458f8bfffcSPeter Xu             g_tree_remove(mis->page_requested, host_addr);
12468f8bfffcSPeter Xu             mis->page_requested_count--;
12478f8bfffcSPeter Xu             trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
12488f8bfffcSPeter Xu         }
12498f8bfffcSPeter Xu         qemu_mutex_unlock(&mis->page_request_mutex);
1250575b0b33SAlexey Perevalov         mark_postcopy_blocktime_end((uintptr_t)host_addr);
1251f9494614SAlexey Perevalov     }
1252f9494614SAlexey Perevalov     return ret;
1253727b9d7eSAlexey Perevalov }
1254727b9d7eSAlexey Perevalov 
1255d488b349SDr. David Alan Gilbert int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
1256d488b349SDr. David Alan Gilbert {
1257d488b349SDr. David Alan Gilbert     int i;
1258d488b349SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
1259d488b349SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
1260d488b349SDr. David Alan Gilbert 
1261d488b349SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
1262d488b349SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1263d488b349SDr. David Alan Gilbert         int ret = cur->waker(cur, rb, offset);
1264d488b349SDr. David Alan Gilbert         if (ret) {
1265d488b349SDr. David Alan Gilbert             return ret;
1266d488b349SDr. David Alan Gilbert         }
1267d488b349SDr. David Alan Gilbert     }
1268d488b349SDr. David Alan Gilbert     return 0;
1269d488b349SDr. David Alan Gilbert }
1270d488b349SDr. David Alan Gilbert 
1271696ed9a9SDr. David Alan Gilbert /*
1272696ed9a9SDr. David Alan Gilbert  * Place a host page (from) at (host) atomically
1273696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1274696ed9a9SDr. David Alan Gilbert  */
1275df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
12768be4620bSAlexey Perevalov                         RAMBlock *rb)
1277696ed9a9SDr. David Alan Gilbert {
12788be4620bSAlexey Perevalov     size_t pagesize = qemu_ram_pagesize(rb);
1279696ed9a9SDr. David Alan Gilbert 
1280696ed9a9SDr. David Alan Gilbert     /* copy also acks to the kernel waking the stalled thread up
1281696ed9a9SDr. David Alan Gilbert      * TODO: We can inhibit that ack and only do it if it was requested
1282696ed9a9SDr. David Alan Gilbert      * which would be slightly cheaper, but we'd have to be careful
1283696ed9a9SDr. David Alan Gilbert      * of the order of updating our page state.
1284696ed9a9SDr. David Alan Gilbert      */
1285eef621c4SPeter Xu     if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
1286696ed9a9SDr. David Alan Gilbert         int e = errno;
1287df9ff5e1SDr. David Alan Gilbert         error_report("%s: %s copy host: %p from: %p (size: %zd)",
1288df9ff5e1SDr. David Alan Gilbert                      __func__, strerror(e), host, from, pagesize);
1289696ed9a9SDr. David Alan Gilbert 
1290696ed9a9SDr. David Alan Gilbert         return -e;
1291696ed9a9SDr. David Alan Gilbert     }
1292696ed9a9SDr. David Alan Gilbert 
1293696ed9a9SDr. David Alan Gilbert     trace_postcopy_place_page(host);
1294dedfb4b2SDr. David Alan Gilbert     return postcopy_notify_shared_wake(rb,
1295dedfb4b2SDr. David Alan Gilbert                                        qemu_ram_block_host_offset(rb, host));
1296696ed9a9SDr. David Alan Gilbert }
1297696ed9a9SDr. David Alan Gilbert 
1298696ed9a9SDr. David Alan Gilbert /*
1299696ed9a9SDr. David Alan Gilbert  * Place a zero page at (host) atomically
1300696ed9a9SDr. David Alan Gilbert  * returns 0 on success
1301696ed9a9SDr. David Alan Gilbert  */
1302df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
13038be4620bSAlexey Perevalov                              RAMBlock *rb)
1304696ed9a9SDr. David Alan Gilbert {
13052ce16640SDr. David Alan Gilbert     size_t pagesize = qemu_ram_pagesize(rb);
1306df9ff5e1SDr. David Alan Gilbert     trace_postcopy_place_page_zero(host);
1307696ed9a9SDr. David Alan Gilbert 
13082ce16640SDr. David Alan Gilbert     /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
13092ce16640SDr. David Alan Gilbert      * but it's not available for everything (e.g. hugetlbpages)
13102ce16640SDr. David Alan Gilbert      */
13112ce16640SDr. David Alan Gilbert     if (qemu_ram_is_uf_zeroable(rb)) {
1312eef621c4SPeter Xu         if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
1313696ed9a9SDr. David Alan Gilbert             int e = errno;
1314696ed9a9SDr. David Alan Gilbert             error_report("%s: %s zero host: %p",
1315696ed9a9SDr. David Alan Gilbert                          __func__, strerror(e), host);
1316696ed9a9SDr. David Alan Gilbert 
1317696ed9a9SDr. David Alan Gilbert             return -e;
1318696ed9a9SDr. David Alan Gilbert         }
1319dedfb4b2SDr. David Alan Gilbert         return postcopy_notify_shared_wake(rb,
1320dedfb4b2SDr. David Alan Gilbert                                            qemu_ram_block_host_offset(rb,
1321dedfb4b2SDr. David Alan Gilbert                                                                       host));
1322df9ff5e1SDr. David Alan Gilbert     } else {
13236629890dSWei Yang         return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
1324df9ff5e1SDr. David Alan Gilbert     }
1325696ed9a9SDr. David Alan Gilbert }
1326696ed9a9SDr. David Alan Gilbert 
1327eb59db53SDr. David Alan Gilbert #else
1328eb59db53SDr. David Alan Gilbert /* No target OS support, stubs just fail */
132965ace060SAlexey Perevalov void fill_destination_postcopy_migration_info(MigrationInfo *info)
133065ace060SAlexey Perevalov {
133165ace060SAlexey Perevalov }
133265ace060SAlexey Perevalov 
1333d7651f15SAlexey Perevalov bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
1334eb59db53SDr. David Alan Gilbert {
1335eb59db53SDr. David Alan Gilbert     error_report("%s: No OS support", __func__);
1336eb59db53SDr. David Alan Gilbert     return false;
1337eb59db53SDr. David Alan Gilbert }
1338eb59db53SDr. David Alan Gilbert 
1339c136180cSDavid Hildenbrand int postcopy_ram_incoming_init(MigrationIncomingState *mis)
13401caddf8aSDr. David Alan Gilbert {
13411caddf8aSDr. David Alan Gilbert     error_report("postcopy_ram_incoming_init: No OS support");
13421caddf8aSDr. David Alan Gilbert     return -1;
13431caddf8aSDr. David Alan Gilbert }
13441caddf8aSDr. David Alan Gilbert 
13451caddf8aSDr. David Alan Gilbert int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
13461caddf8aSDr. David Alan Gilbert {
13471caddf8aSDr. David Alan Gilbert     assert(0);
13481caddf8aSDr. David Alan Gilbert     return -1;
13491caddf8aSDr. David Alan Gilbert }
13501caddf8aSDr. David Alan Gilbert 
1351f9527107SDr. David Alan Gilbert int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
1352f9527107SDr. David Alan Gilbert {
1353f9527107SDr. David Alan Gilbert     assert(0);
1354f9527107SDr. David Alan Gilbert     return -1;
1355f9527107SDr. David Alan Gilbert }
1356f9527107SDr. David Alan Gilbert 
1357c188c539SMichael S. Tsirkin int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
1358c188c539SMichael S. Tsirkin                                  uint64_t client_addr, uint64_t rb_offset)
1359c188c539SMichael S. Tsirkin {
1360c188c539SMichael S. Tsirkin     assert(0);
1361c188c539SMichael S. Tsirkin     return -1;
1362c188c539SMichael S. Tsirkin }
1363c188c539SMichael S. Tsirkin 
13642a7eb148SWei Yang int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
1365f0a227adSDr. David Alan Gilbert {
1366f0a227adSDr. David Alan Gilbert     assert(0);
1367f0a227adSDr. David Alan Gilbert     return -1;
1368f0a227adSDr. David Alan Gilbert }
1369696ed9a9SDr. David Alan Gilbert 
1370df9ff5e1SDr. David Alan Gilbert int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
13718be4620bSAlexey Perevalov                         RAMBlock *rb)
1372696ed9a9SDr. David Alan Gilbert {
1373696ed9a9SDr. David Alan Gilbert     assert(0);
1374696ed9a9SDr. David Alan Gilbert     return -1;
1375696ed9a9SDr. David Alan Gilbert }
1376696ed9a9SDr. David Alan Gilbert 
1377df9ff5e1SDr. David Alan Gilbert int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
13788be4620bSAlexey Perevalov                         RAMBlock *rb)
1379696ed9a9SDr. David Alan Gilbert {
1380696ed9a9SDr. David Alan Gilbert     assert(0);
1381696ed9a9SDr. David Alan Gilbert     return -1;
1382696ed9a9SDr. David Alan Gilbert }
1383696ed9a9SDr. David Alan Gilbert 
13845efc3564SDr. David Alan Gilbert int postcopy_wake_shared(struct PostCopyFD *pcfd,
13855efc3564SDr. David Alan Gilbert                          uint64_t client_addr,
13865efc3564SDr. David Alan Gilbert                          RAMBlock *rb)
13875efc3564SDr. David Alan Gilbert {
13885efc3564SDr. David Alan Gilbert     assert(0);
13895efc3564SDr. David Alan Gilbert     return -1;
13905efc3564SDr. David Alan Gilbert }
1391eb59db53SDr. David Alan Gilbert #endif
1392eb59db53SDr. David Alan Gilbert 
1393e0b266f0SDr. David Alan Gilbert /* ------------------------------------------------------------------------- */
139477dadc3fSPeter Xu void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page)
139577dadc3fSPeter Xu {
139677dadc3fSPeter Xu     tmp_page->target_pages = 0;
139777dadc3fSPeter Xu     tmp_page->host_addr = NULL;
139877dadc3fSPeter Xu     /*
139977dadc3fSPeter Xu      * This is set to true when reset, and cleared as long as we received any
140077dadc3fSPeter Xu      * of the non-zero small page within this huge page.
140177dadc3fSPeter Xu      */
140277dadc3fSPeter Xu     tmp_page->all_zero = true;
140377dadc3fSPeter Xu }
1404e0b266f0SDr. David Alan Gilbert 
14059ab7ef9bSPeter Xu void postcopy_fault_thread_notify(MigrationIncomingState *mis)
14069ab7ef9bSPeter Xu {
14079ab7ef9bSPeter Xu     uint64_t tmp64 = 1;
14089ab7ef9bSPeter Xu 
14099ab7ef9bSPeter Xu     /*
14109ab7ef9bSPeter Xu      * Wakeup the fault_thread.  It's an eventfd that should currently
14119ab7ef9bSPeter Xu      * be at 0, we're going to increment it to 1
14129ab7ef9bSPeter Xu      */
14139ab7ef9bSPeter Xu     if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
14149ab7ef9bSPeter Xu         /* Not much we can do here, but may as well report it */
14159ab7ef9bSPeter Xu         error_report("%s: incrementing failed: %s", __func__,
14169ab7ef9bSPeter Xu                      strerror(errno));
14179ab7ef9bSPeter Xu     }
14189ab7ef9bSPeter Xu }
14199ab7ef9bSPeter Xu 
1420e0b266f0SDr. David Alan Gilbert /**
1421e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_init: Called at the start of each RAMBlock before
1422e0b266f0SDr. David Alan Gilbert  *   asking to discard individual ranges.
1423e0b266f0SDr. David Alan Gilbert  *
1424e0b266f0SDr. David Alan Gilbert  * @ms: The current migration state.
1425810cf2bbSWei Yang  * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
1426e0b266f0SDr. David Alan Gilbert  * @name: RAMBlock that discards will operate on.
1427e0b266f0SDr. David Alan Gilbert  */
1428810cf2bbSWei Yang static PostcopyDiscardState pds = {0};
1429810cf2bbSWei Yang void postcopy_discard_send_init(MigrationState *ms, const char *name)
1430e0b266f0SDr. David Alan Gilbert {
1431810cf2bbSWei Yang     pds.ramblock_name = name;
1432810cf2bbSWei Yang     pds.cur_entry = 0;
1433810cf2bbSWei Yang     pds.nsentwords = 0;
1434810cf2bbSWei Yang     pds.nsentcmds = 0;
1435e0b266f0SDr. David Alan Gilbert }
1436e0b266f0SDr. David Alan Gilbert 
1437e0b266f0SDr. David Alan Gilbert /**
1438e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1439e0b266f0SDr. David Alan Gilbert  *   discard. May send a discard message, may just leave it queued to
1440e0b266f0SDr. David Alan Gilbert  *   be sent later.
1441e0b266f0SDr. David Alan Gilbert  *
1442e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1443e0b266f0SDr. David Alan Gilbert  * @start,@length: a range of pages in the migration bitmap in the
1444e0b266f0SDr. David Alan Gilbert  *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1445e0b266f0SDr. David Alan Gilbert  */
1446810cf2bbSWei Yang void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
1447810cf2bbSWei Yang                                  unsigned long length)
1448e0b266f0SDr. David Alan Gilbert {
144920afaed9SJuan Quintela     size_t tp_size = qemu_target_page_size();
1450e0b266f0SDr. David Alan Gilbert     /* Convert to byte offsets within the RAM block */
1451810cf2bbSWei Yang     pds.start_list[pds.cur_entry] = start  * tp_size;
1452810cf2bbSWei Yang     pds.length_list[pds.cur_entry] = length * tp_size;
1453810cf2bbSWei Yang     trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
1454810cf2bbSWei Yang     pds.cur_entry++;
1455810cf2bbSWei Yang     pds.nsentwords++;
1456e0b266f0SDr. David Alan Gilbert 
1457810cf2bbSWei Yang     if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
1458e0b266f0SDr. David Alan Gilbert         /* Full set, ship it! */
145989a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1460810cf2bbSWei Yang                                               pds.ramblock_name,
1461810cf2bbSWei Yang                                               pds.cur_entry,
1462810cf2bbSWei Yang                                               pds.start_list,
1463810cf2bbSWei Yang                                               pds.length_list);
1464810cf2bbSWei Yang         pds.nsentcmds++;
1465810cf2bbSWei Yang         pds.cur_entry = 0;
1466e0b266f0SDr. David Alan Gilbert     }
1467e0b266f0SDr. David Alan Gilbert }
1468e0b266f0SDr. David Alan Gilbert 
1469e0b266f0SDr. David Alan Gilbert /**
1470e0b266f0SDr. David Alan Gilbert  * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1471e0b266f0SDr. David Alan Gilbert  * bitmap code. Sends any outstanding discard messages, frees the PDS
1472e0b266f0SDr. David Alan Gilbert  *
1473e0b266f0SDr. David Alan Gilbert  * @ms: Current migration state.
1474e0b266f0SDr. David Alan Gilbert  */
1475810cf2bbSWei Yang void postcopy_discard_send_finish(MigrationState *ms)
1476e0b266f0SDr. David Alan Gilbert {
1477e0b266f0SDr. David Alan Gilbert     /* Anything unsent? */
1478810cf2bbSWei Yang     if (pds.cur_entry) {
147989a02a9fSzhanghailiang         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
1480810cf2bbSWei Yang                                               pds.ramblock_name,
1481810cf2bbSWei Yang                                               pds.cur_entry,
1482810cf2bbSWei Yang                                               pds.start_list,
1483810cf2bbSWei Yang                                               pds.length_list);
1484810cf2bbSWei Yang         pds.nsentcmds++;
1485e0b266f0SDr. David Alan Gilbert     }
1486e0b266f0SDr. David Alan Gilbert 
1487810cf2bbSWei Yang     trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
1488810cf2bbSWei Yang                                        pds.nsentcmds);
1489e0b266f0SDr. David Alan Gilbert }
1490bac3b212SJuan Quintela 
1491bac3b212SJuan Quintela /*
1492bac3b212SJuan Quintela  * Current state of incoming postcopy; note this is not part of
1493bac3b212SJuan Quintela  * MigrationIncomingState since it's state is used during cleanup
1494bac3b212SJuan Quintela  * at the end as MIS is being freed.
1495bac3b212SJuan Quintela  */
1496bac3b212SJuan Quintela static PostcopyState incoming_postcopy_state;
1497bac3b212SJuan Quintela 
1498bac3b212SJuan Quintela PostcopyState  postcopy_state_get(void)
1499bac3b212SJuan Quintela {
1500d73415a3SStefan Hajnoczi     return qatomic_mb_read(&incoming_postcopy_state);
1501bac3b212SJuan Quintela }
1502bac3b212SJuan Quintela 
1503bac3b212SJuan Quintela /* Set the state and return the old state */
1504bac3b212SJuan Quintela PostcopyState postcopy_state_set(PostcopyState new_state)
1505bac3b212SJuan Quintela {
1506d73415a3SStefan Hajnoczi     return qatomic_xchg(&incoming_postcopy_state, new_state);
1507bac3b212SJuan Quintela }
150800fa4fc8SDr. David Alan Gilbert 
150900fa4fc8SDr. David Alan Gilbert /* Register a handler for external shared memory postcopy
151000fa4fc8SDr. David Alan Gilbert  * called on the destination.
151100fa4fc8SDr. David Alan Gilbert  */
151200fa4fc8SDr. David Alan Gilbert void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
151300fa4fc8SDr. David Alan Gilbert {
151400fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
151500fa4fc8SDr. David Alan Gilbert 
151600fa4fc8SDr. David Alan Gilbert     mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
151700fa4fc8SDr. David Alan Gilbert                                                   *pcfd);
151800fa4fc8SDr. David Alan Gilbert }
151900fa4fc8SDr. David Alan Gilbert 
152000fa4fc8SDr. David Alan Gilbert /* Unregister a handler for external shared memory postcopy
152100fa4fc8SDr. David Alan Gilbert  */
152200fa4fc8SDr. David Alan Gilbert void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
152300fa4fc8SDr. David Alan Gilbert {
152400fa4fc8SDr. David Alan Gilbert     guint i;
152500fa4fc8SDr. David Alan Gilbert     MigrationIncomingState *mis = migration_incoming_get_current();
152600fa4fc8SDr. David Alan Gilbert     GArray *pcrfds = mis->postcopy_remote_fds;
152700fa4fc8SDr. David Alan Gilbert 
152856559980SJuan Quintela     if (!pcrfds) {
152956559980SJuan Quintela         /* migration has already finished and freed the array */
153056559980SJuan Quintela         return;
153156559980SJuan Quintela     }
153200fa4fc8SDr. David Alan Gilbert     for (i = 0; i < pcrfds->len; i++) {
153300fa4fc8SDr. David Alan Gilbert         struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
153400fa4fc8SDr. David Alan Gilbert         if (cur->fd == pcfd->fd) {
153500fa4fc8SDr. David Alan Gilbert             mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
153600fa4fc8SDr. David Alan Gilbert             return;
153700fa4fc8SDr. David Alan Gilbert         }
153800fa4fc8SDr. David Alan Gilbert     }
153900fa4fc8SDr. David Alan Gilbert }
154036f62f11SPeter Xu 
154136f62f11SPeter Xu bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file)
154236f62f11SPeter Xu {
154336f62f11SPeter Xu     /*
154436f62f11SPeter Xu      * The new loading channel has its own threads, so it needs to be
154536f62f11SPeter Xu      * blocked too.  It's by default true, just be explicit.
154636f62f11SPeter Xu      */
154736f62f11SPeter Xu     qemu_file_set_blocking(file, true);
154836f62f11SPeter Xu     mis->postcopy_qemufile_dst = file;
154936f62f11SPeter Xu     trace_postcopy_preempt_new_channel();
155036f62f11SPeter Xu 
155136f62f11SPeter Xu     /* Start the migration immediately */
155236f62f11SPeter Xu     return true;
155336f62f11SPeter Xu }
155436f62f11SPeter Xu 
155536f62f11SPeter Xu int postcopy_preempt_setup(MigrationState *s, Error **errp)
155636f62f11SPeter Xu {
155736f62f11SPeter Xu     QIOChannel *ioc;
155836f62f11SPeter Xu 
155936f62f11SPeter Xu     if (!migrate_postcopy_preempt()) {
156036f62f11SPeter Xu         return 0;
156136f62f11SPeter Xu     }
156236f62f11SPeter Xu 
156336f62f11SPeter Xu     if (!migrate_multi_channels_is_allowed()) {
156436f62f11SPeter Xu         error_setg(errp, "Postcopy preempt is not supported as current "
156536f62f11SPeter Xu                    "migration stream does not support multi-channels.");
156636f62f11SPeter Xu         return -1;
156736f62f11SPeter Xu     }
156836f62f11SPeter Xu 
156936f62f11SPeter Xu     ioc = socket_send_channel_create_sync(errp);
157036f62f11SPeter Xu 
157136f62f11SPeter Xu     if (ioc == NULL) {
157236f62f11SPeter Xu         return -1;
157336f62f11SPeter Xu     }
157436f62f11SPeter Xu 
157536f62f11SPeter Xu     migration_ioc_register_yank(ioc);
157636f62f11SPeter Xu     s->postcopy_qemufile_src = qemu_file_new_output(ioc);
157736f62f11SPeter Xu 
157836f62f11SPeter Xu     trace_postcopy_preempt_new_channel();
157936f62f11SPeter Xu 
158036f62f11SPeter Xu     return 0;
158136f62f11SPeter Xu }
158236f62f11SPeter Xu 
1583*60bb3c58SPeter Xu static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis)
1584*60bb3c58SPeter Xu {
1585*60bb3c58SPeter Xu     trace_postcopy_pause_fast_load();
1586*60bb3c58SPeter Xu     qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
1587*60bb3c58SPeter Xu     qemu_sem_wait(&mis->postcopy_pause_sem_fast_load);
1588*60bb3c58SPeter Xu     qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
1589*60bb3c58SPeter Xu     trace_postcopy_pause_fast_load_continued();
1590*60bb3c58SPeter Xu }
1591*60bb3c58SPeter Xu 
159236f62f11SPeter Xu void *postcopy_preempt_thread(void *opaque)
159336f62f11SPeter Xu {
159436f62f11SPeter Xu     MigrationIncomingState *mis = opaque;
159536f62f11SPeter Xu     int ret;
159636f62f11SPeter Xu 
159736f62f11SPeter Xu     trace_postcopy_preempt_thread_entry();
159836f62f11SPeter Xu 
159936f62f11SPeter Xu     rcu_register_thread();
160036f62f11SPeter Xu 
160136f62f11SPeter Xu     qemu_sem_post(&mis->thread_sync_sem);
160236f62f11SPeter Xu 
160336f62f11SPeter Xu     /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */
1604*60bb3c58SPeter Xu     qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
1605*60bb3c58SPeter Xu     while (1) {
1606*60bb3c58SPeter Xu         ret = ram_load_postcopy(mis->postcopy_qemufile_dst,
1607*60bb3c58SPeter Xu                                 RAM_CHANNEL_POSTCOPY);
1608*60bb3c58SPeter Xu         /* If error happened, go into recovery routine */
1609*60bb3c58SPeter Xu         if (ret) {
1610*60bb3c58SPeter Xu             postcopy_pause_ram_fast_load(mis);
1611*60bb3c58SPeter Xu         } else {
1612*60bb3c58SPeter Xu             /* We're done */
1613*60bb3c58SPeter Xu             break;
1614*60bb3c58SPeter Xu         }
1615*60bb3c58SPeter Xu     }
1616*60bb3c58SPeter Xu     qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
161736f62f11SPeter Xu 
161836f62f11SPeter Xu     rcu_unregister_thread();
161936f62f11SPeter Xu 
162036f62f11SPeter Xu     trace_postcopy_preempt_thread_exit();
162136f62f11SPeter Xu 
1622*60bb3c58SPeter Xu     return NULL;
162336f62f11SPeter Xu }
1624