1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/boards.h" 31 #include "net/net.h" 32 #include "migration.h" 33 #include "migration/snapshot.h" 34 #include "migration-stats.h" 35 #include "migration/vmstate.h" 36 #include "migration/misc.h" 37 #include "migration/register.h" 38 #include "migration/global_state.h" 39 #include "migration/channel-block.h" 40 #include "multifd.h" 41 #include "ram.h" 42 #include "qemu-file.h" 43 #include "savevm.h" 44 #include "postcopy-ram.h" 45 #include "qapi/error.h" 46 #include "qapi/qapi-commands-migration.h" 47 #include "qapi/clone-visitor.h" 48 #include "qapi/qapi-builtin-visit.h" 49 #include "qemu/error-report.h" 50 #include "system/cpus.h" 51 #include "system/memory.h" 52 #include "exec/target_page.h" 53 #include "exec/page-vary.h" 54 #include "trace.h" 55 #include "qemu/iov.h" 56 #include "qemu/job.h" 57 #include "qemu/main-loop.h" 58 #include "block/snapshot.h" 59 #include "block/thread-pool.h" 60 #include "qemu/cutils.h" 61 #include "io/channel-buffer.h" 62 #include "io/channel-file.h" 63 #include "system/replay.h" 64 #include "system/runstate.h" 65 #include "system/system.h" 66 #include "system/xen.h" 67 #include "migration/colo.h" 68 #include "qemu/bitmap.h" 69 #include "net/announce.h" 70 #include "qemu/yank.h" 71 #include "yank_functions.h" 72 #include "system/qtest.h" 73 #include "options.h" 74 75 const unsigned int postcopy_ram_discard_version; 76 77 /* Subcommands for QEMU_VM_COMMAND */ 78 enum qemu_vm_cmd { 79 MIG_CMD_INVALID = 0, /* Must be 0 */ 80 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */ 81 MIG_CMD_PING, /* Request a PONG on the RP */ 82 83 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just 84 warn we might want to do PC */ 85 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming 86 pages as it's running. */ 87 MIG_CMD_POSTCOPY_RUN, /* Start execution */ 88 89 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that 90 were previously sent during 91 precopy but are dirty. */ 92 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ 93 MIG_CMD_ENABLE_COLO, /* Enable COLO */ 94 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ 95 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ 96 MIG_CMD_SWITCHOVER_START, /* Switchover start notification */ 97 MIG_CMD_MAX 98 }; 99 100 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX 101 static struct mig_cmd_args { 102 ssize_t len; /* -1 = variable */ 103 const char *name; 104 } mig_cmd_args[] = { 105 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" }, 106 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" }, 107 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" }, 108 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" }, 109 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" }, 110 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" }, 111 [MIG_CMD_POSTCOPY_RAM_DISCARD] = { 112 .len = -1, .name = "POSTCOPY_RAM_DISCARD" }, 113 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, 114 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, 115 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 116 [MIG_CMD_SWITCHOVER_START] = { .len = 0, .name = "SWITCHOVER_START" }, 117 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, 118 }; 119 120 /* Note for MIG_CMD_POSTCOPY_ADVISE: 121 * The format of arguments is depending on postcopy mode: 122 * - postcopy RAM only 123 * uint64_t host page size 124 * uint64_t target page size 125 * 126 * - postcopy RAM and postcopy dirty bitmaps 127 * format is the same as for postcopy RAM only 128 * 129 * - postcopy dirty bitmaps only 130 * Nothing. Command length field is 0. 131 * 132 * Be careful: adding a new postcopy entity with some other parameters should 133 * not break format self-description ability. Good way is to introduce some 134 * generic extendable format with an exception for two old entities. 135 */ 136 137 /***********************************************************/ 138 /* Optional load threads pool support */ 139 140 static void qemu_loadvm_thread_pool_create(MigrationIncomingState *mis) 141 { 142 assert(!mis->load_threads); 143 mis->load_threads = thread_pool_new(); 144 mis->load_threads_abort = false; 145 } 146 147 static void qemu_loadvm_thread_pool_destroy(MigrationIncomingState *mis) 148 { 149 qatomic_set(&mis->load_threads_abort, true); 150 151 bql_unlock(); /* Load threads might be waiting for BQL */ 152 g_clear_pointer(&mis->load_threads, thread_pool_free); 153 bql_lock(); 154 } 155 156 static bool qemu_loadvm_thread_pool_wait(MigrationState *s, 157 MigrationIncomingState *mis) 158 { 159 bql_unlock(); /* Let load threads do work requiring BQL */ 160 thread_pool_wait(mis->load_threads); 161 bql_lock(); 162 163 return !migrate_has_error(s); 164 } 165 166 /***********************************************************/ 167 /* savevm/loadvm support */ 168 169 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) 170 { 171 if (is_writable) { 172 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); 173 } else { 174 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); 175 } 176 } 177 178 179 /* QEMUFile timer support. 180 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c 181 */ 182 183 void timer_put(QEMUFile *f, QEMUTimer *ts) 184 { 185 uint64_t expire_time; 186 187 expire_time = timer_expire_time_ns(ts); 188 qemu_put_be64(f, expire_time); 189 } 190 191 void timer_get(QEMUFile *f, QEMUTimer *ts) 192 { 193 uint64_t expire_time; 194 195 expire_time = qemu_get_be64(f); 196 if (expire_time != -1) { 197 timer_mod_ns(ts, expire_time); 198 } else { 199 timer_del(ts); 200 } 201 } 202 203 204 /* VMState timer support. 205 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c 206 */ 207 208 static int get_timer(QEMUFile *f, void *pv, size_t size, 209 const VMStateField *field) 210 { 211 QEMUTimer *v = pv; 212 timer_get(f, v); 213 return 0; 214 } 215 216 static int put_timer(QEMUFile *f, void *pv, size_t size, 217 const VMStateField *field, JSONWriter *vmdesc) 218 { 219 QEMUTimer *v = pv; 220 timer_put(f, v); 221 222 return 0; 223 } 224 225 const VMStateInfo vmstate_info_timer = { 226 .name = "timer", 227 .get = get_timer, 228 .put = put_timer, 229 }; 230 231 232 typedef struct CompatEntry { 233 char idstr[256]; 234 int instance_id; 235 } CompatEntry; 236 237 typedef struct SaveStateEntry { 238 QTAILQ_ENTRY(SaveStateEntry) entry; 239 char idstr[256]; 240 uint32_t instance_id; 241 int alias_id; 242 int version_id; 243 /* version id read from the stream */ 244 int load_version_id; 245 int section_id; 246 /* section id read from the stream */ 247 int load_section_id; 248 const SaveVMHandlers *ops; 249 const VMStateDescription *vmsd; 250 void *opaque; 251 CompatEntry *compat; 252 int is_ram; 253 } SaveStateEntry; 254 255 typedef struct SaveState { 256 QTAILQ_HEAD(, SaveStateEntry) handlers; 257 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1]; 258 int global_section_id; 259 uint32_t len; 260 const char *name; 261 uint32_t target_page_bits; 262 uint32_t caps_count; 263 MigrationCapability *capabilities; 264 QemuUUID uuid; 265 } SaveState; 266 267 static SaveState savevm_state = { 268 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), 269 .handler_pri_head = { [0 ... MIG_PRI_MAX] = NULL }, 270 .global_section_id = 0, 271 }; 272 273 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id); 274 275 static bool should_validate_capability(int capability) 276 { 277 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); 278 /* Validate only new capabilities to keep compatibility. */ 279 switch (capability) { 280 case MIGRATION_CAPABILITY_X_IGNORE_SHARED: 281 case MIGRATION_CAPABILITY_MAPPED_RAM: 282 return true; 283 default: 284 return false; 285 } 286 } 287 288 static uint32_t get_validatable_capabilities_count(void) 289 { 290 MigrationState *s = migrate_get_current(); 291 uint32_t result = 0; 292 int i; 293 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 294 if (should_validate_capability(i) && s->capabilities[i]) { 295 result++; 296 } 297 } 298 return result; 299 } 300 301 static int configuration_pre_save(void *opaque) 302 { 303 SaveState *state = opaque; 304 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 305 MigrationState *s = migrate_get_current(); 306 int i, j; 307 308 state->len = strlen(current_name); 309 state->name = current_name; 310 state->target_page_bits = qemu_target_page_bits(); 311 312 state->caps_count = get_validatable_capabilities_count(); 313 state->capabilities = g_renew(MigrationCapability, state->capabilities, 314 state->caps_count); 315 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 316 if (should_validate_capability(i) && s->capabilities[i]) { 317 state->capabilities[j++] = i; 318 } 319 } 320 state->uuid = qemu_uuid; 321 322 return 0; 323 } 324 325 static int configuration_post_save(void *opaque) 326 { 327 SaveState *state = opaque; 328 329 g_free(state->capabilities); 330 state->capabilities = NULL; 331 state->caps_count = 0; 332 return 0; 333 } 334 335 static int configuration_pre_load(void *opaque) 336 { 337 SaveState *state = opaque; 338 339 /* If there is no target-page-bits subsection it means the source 340 * predates the variable-target-page-bits support and is using the 341 * minimum possible value for this CPU. 342 */ 343 state->target_page_bits = migration_legacy_page_bits(); 344 return 0; 345 } 346 347 static bool configuration_validate_capabilities(SaveState *state) 348 { 349 bool ret = true; 350 MigrationState *s = migrate_get_current(); 351 unsigned long *source_caps_bm; 352 int i; 353 354 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX); 355 for (i = 0; i < state->caps_count; i++) { 356 MigrationCapability capability = state->capabilities[i]; 357 set_bit(capability, source_caps_bm); 358 } 359 360 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 361 bool source_state, target_state; 362 if (!should_validate_capability(i)) { 363 continue; 364 } 365 source_state = test_bit(i, source_caps_bm); 366 target_state = s->capabilities[i]; 367 if (source_state != target_state) { 368 error_report("Capability %s is %s, but received capability is %s", 369 MigrationCapability_str(i), 370 target_state ? "on" : "off", 371 source_state ? "on" : "off"); 372 ret = false; 373 /* Don't break here to report all failed capabilities */ 374 } 375 } 376 377 g_free(source_caps_bm); 378 return ret; 379 } 380 381 static int configuration_post_load(void *opaque, int version_id) 382 { 383 SaveState *state = opaque; 384 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 385 int ret = 0; 386 387 if (strncmp(state->name, current_name, state->len) != 0) { 388 error_report("Machine type received is '%.*s' and local is '%s'", 389 (int) state->len, state->name, current_name); 390 ret = -EINVAL; 391 goto out; 392 } 393 394 if (state->target_page_bits != qemu_target_page_bits()) { 395 error_report("Received TARGET_PAGE_BITS is %d but local is %d", 396 state->target_page_bits, qemu_target_page_bits()); 397 ret = -EINVAL; 398 goto out; 399 } 400 401 if (!configuration_validate_capabilities(state)) { 402 ret = -EINVAL; 403 goto out; 404 } 405 406 out: 407 g_free((void *)state->name); 408 state->name = NULL; 409 state->len = 0; 410 g_free(state->capabilities); 411 state->capabilities = NULL; 412 state->caps_count = 0; 413 414 return ret; 415 } 416 417 static int get_capability(QEMUFile *f, void *pv, size_t size, 418 const VMStateField *field) 419 { 420 MigrationCapability *capability = pv; 421 char capability_str[UINT8_MAX + 1]; 422 uint8_t len; 423 int i; 424 425 len = qemu_get_byte(f); 426 qemu_get_buffer(f, (uint8_t *)capability_str, len); 427 capability_str[len] = '\0'; 428 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 429 if (!strcmp(MigrationCapability_str(i), capability_str)) { 430 *capability = i; 431 return 0; 432 } 433 } 434 error_report("Received unknown capability %s", capability_str); 435 return -EINVAL; 436 } 437 438 static int put_capability(QEMUFile *f, void *pv, size_t size, 439 const VMStateField *field, JSONWriter *vmdesc) 440 { 441 MigrationCapability *capability = pv; 442 const char *capability_str = MigrationCapability_str(*capability); 443 size_t len = strlen(capability_str); 444 assert(len <= UINT8_MAX); 445 446 qemu_put_byte(f, len); 447 qemu_put_buffer(f, (uint8_t *)capability_str, len); 448 return 0; 449 } 450 451 static const VMStateInfo vmstate_info_capability = { 452 .name = "capability", 453 .get = get_capability, 454 .put = put_capability, 455 }; 456 457 /* The target-page-bits subsection is present only if the 458 * target page size is not the same as the default (ie the 459 * minimum page size for a variable-page-size guest CPU). 460 * If it is present then it contains the actual target page 461 * bits for the machine, and migration will fail if the 462 * two ends don't agree about it. 463 */ 464 static bool vmstate_target_page_bits_needed(void *opaque) 465 { 466 return qemu_target_page_bits() > migration_legacy_page_bits(); 467 } 468 469 static const VMStateDescription vmstate_target_page_bits = { 470 .name = "configuration/target-page-bits", 471 .version_id = 1, 472 .minimum_version_id = 1, 473 .needed = vmstate_target_page_bits_needed, 474 .fields = (const VMStateField[]) { 475 VMSTATE_UINT32(target_page_bits, SaveState), 476 VMSTATE_END_OF_LIST() 477 } 478 }; 479 480 static bool vmstate_capabilites_needed(void *opaque) 481 { 482 return get_validatable_capabilities_count() > 0; 483 } 484 485 static const VMStateDescription vmstate_capabilites = { 486 .name = "configuration/capabilities", 487 .version_id = 1, 488 .minimum_version_id = 1, 489 .needed = vmstate_capabilites_needed, 490 .fields = (const VMStateField[]) { 491 VMSTATE_UINT32_V(caps_count, SaveState, 1), 492 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, 493 vmstate_info_capability, 494 MigrationCapability), 495 VMSTATE_END_OF_LIST() 496 } 497 }; 498 499 static bool vmstate_uuid_needed(void *opaque) 500 { 501 return qemu_uuid_set && migrate_validate_uuid(); 502 } 503 504 static int vmstate_uuid_post_load(void *opaque, int version_id) 505 { 506 SaveState *state = opaque; 507 char uuid_src[UUID_STR_LEN]; 508 char uuid_dst[UUID_STR_LEN]; 509 510 if (!qemu_uuid_set) { 511 /* 512 * It's warning because user might not know UUID in some cases, 513 * e.g. load an old snapshot 514 */ 515 qemu_uuid_unparse(&state->uuid, uuid_src); 516 warn_report("UUID is received %s, but local uuid isn't set", 517 uuid_src); 518 return 0; 519 } 520 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) { 521 qemu_uuid_unparse(&state->uuid, uuid_src); 522 qemu_uuid_unparse(&qemu_uuid, uuid_dst); 523 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst); 524 return -EINVAL; 525 } 526 return 0; 527 } 528 529 static const VMStateDescription vmstate_uuid = { 530 .name = "configuration/uuid", 531 .version_id = 1, 532 .minimum_version_id = 1, 533 .needed = vmstate_uuid_needed, 534 .post_load = vmstate_uuid_post_load, 535 .fields = (const VMStateField[]) { 536 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), 537 VMSTATE_END_OF_LIST() 538 } 539 }; 540 541 static const VMStateDescription vmstate_configuration = { 542 .name = "configuration", 543 .version_id = 1, 544 .pre_load = configuration_pre_load, 545 .post_load = configuration_post_load, 546 .pre_save = configuration_pre_save, 547 .post_save = configuration_post_save, 548 .fields = (const VMStateField[]) { 549 VMSTATE_UINT32(len, SaveState), 550 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 551 VMSTATE_END_OF_LIST() 552 }, 553 .subsections = (const VMStateDescription * const []) { 554 &vmstate_target_page_bits, 555 &vmstate_capabilites, 556 &vmstate_uuid, 557 NULL 558 } 559 }; 560 561 static void dump_vmstate_vmsd(FILE *out_file, 562 const VMStateDescription *vmsd, int indent, 563 bool is_subsection); 564 565 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, 566 int indent) 567 { 568 fprintf(out_file, "%*s{\n", indent, ""); 569 indent += 2; 570 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); 571 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 572 field->version_id); 573 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", 574 field->field_exists ? "true" : "false"); 575 if (field->flags & VMS_ARRAY) { 576 fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num); 577 } 578 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); 579 if (field->vmsd != NULL) { 580 fprintf(out_file, ",\n"); 581 dump_vmstate_vmsd(out_file, field->vmsd, indent, false); 582 } 583 fprintf(out_file, "\n%*s}", indent - 2, ""); 584 } 585 586 static void dump_vmstate_vmss(FILE *out_file, 587 const VMStateDescription *subsection, 588 int indent) 589 { 590 if (subsection != NULL) { 591 dump_vmstate_vmsd(out_file, subsection, indent, true); 592 } 593 } 594 595 static void dump_vmstate_vmsd(FILE *out_file, 596 const VMStateDescription *vmsd, int indent, 597 bool is_subsection) 598 { 599 if (is_subsection) { 600 fprintf(out_file, "%*s{\n", indent, ""); 601 } else { 602 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); 603 } 604 indent += 2; 605 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); 606 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 607 vmsd->version_id); 608 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", 609 vmsd->minimum_version_id); 610 if (vmsd->fields != NULL) { 611 const VMStateField *field = vmsd->fields; 612 bool first; 613 614 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); 615 first = true; 616 while (field->name != NULL) { 617 if (field->flags & VMS_MUST_EXIST) { 618 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ 619 field++; 620 continue; 621 } 622 if (!first) { 623 fprintf(out_file, ",\n"); 624 } 625 dump_vmstate_vmsf(out_file, field, indent + 2); 626 field++; 627 first = false; 628 } 629 assert(field->flags == VMS_END); 630 fprintf(out_file, "\n%*s]", indent, ""); 631 } 632 if (vmsd->subsections != NULL) { 633 const VMStateDescription * const *subsection = vmsd->subsections; 634 bool first; 635 636 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); 637 first = true; 638 while (*subsection != NULL) { 639 if (!first) { 640 fprintf(out_file, ",\n"); 641 } 642 dump_vmstate_vmss(out_file, *subsection, indent + 2); 643 subsection++; 644 first = false; 645 } 646 fprintf(out_file, "\n%*s]", indent, ""); 647 } 648 fprintf(out_file, "\n%*s}", indent - 2, ""); 649 } 650 651 static void dump_machine_type(FILE *out_file) 652 { 653 MachineClass *mc; 654 655 mc = MACHINE_GET_CLASS(current_machine); 656 657 fprintf(out_file, " \"vmschkmachine\": {\n"); 658 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); 659 fprintf(out_file, " },\n"); 660 } 661 662 void dump_vmstate_json_to_file(FILE *out_file) 663 { 664 GSList *list, *elt; 665 bool first; 666 667 fprintf(out_file, "{\n"); 668 dump_machine_type(out_file); 669 670 first = true; 671 list = object_class_get_list(TYPE_DEVICE, true); 672 for (elt = list; elt; elt = elt->next) { 673 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, 674 TYPE_DEVICE); 675 const char *name; 676 int indent = 2; 677 678 if (!dc->vmsd) { 679 continue; 680 } 681 682 if (!first) { 683 fprintf(out_file, ",\n"); 684 } 685 name = object_class_get_name(OBJECT_CLASS(dc)); 686 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); 687 indent += 2; 688 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); 689 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 690 dc->vmsd->version_id); 691 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", 692 dc->vmsd->minimum_version_id); 693 694 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); 695 696 fprintf(out_file, "\n%*s}", indent - 2, ""); 697 first = false; 698 } 699 fprintf(out_file, "\n}\n"); 700 fclose(out_file); 701 g_slist_free(list); 702 } 703 704 static uint32_t calculate_new_instance_id(const char *idstr) 705 { 706 SaveStateEntry *se; 707 uint32_t instance_id = 0; 708 709 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 710 if (strcmp(idstr, se->idstr) == 0 711 && instance_id <= se->instance_id) { 712 instance_id = se->instance_id + 1; 713 } 714 } 715 /* Make sure we never loop over without being noticed */ 716 assert(instance_id != VMSTATE_INSTANCE_ID_ANY); 717 return instance_id; 718 } 719 720 static int calculate_compat_instance_id(const char *idstr) 721 { 722 SaveStateEntry *se; 723 int instance_id = 0; 724 725 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 726 if (!se->compat) { 727 continue; 728 } 729 730 if (strcmp(idstr, se->compat->idstr) == 0 731 && instance_id <= se->compat->instance_id) { 732 instance_id = se->compat->instance_id + 1; 733 } 734 } 735 return instance_id; 736 } 737 738 static inline MigrationPriority save_state_priority(SaveStateEntry *se) 739 { 740 if (se->vmsd && se->vmsd->priority) { 741 return se->vmsd->priority; 742 } 743 return MIG_PRI_DEFAULT; 744 } 745 746 static void savevm_state_handler_insert(SaveStateEntry *nse) 747 { 748 MigrationPriority priority = save_state_priority(nse); 749 SaveStateEntry *se; 750 int i; 751 752 assert(priority <= MIG_PRI_MAX); 753 754 /* 755 * This should never happen otherwise migration will probably fail 756 * silently somewhere because we can be wrongly applying one 757 * object properties upon another one. Bail out ASAP. 758 */ 759 if (find_se(nse->idstr, nse->instance_id)) { 760 error_report("%s: Detected duplicate SaveStateEntry: " 761 "id=%s, instance_id=0x%"PRIx32, __func__, 762 nse->idstr, nse->instance_id); 763 exit(EXIT_FAILURE); 764 } 765 766 for (i = priority - 1; i >= 0; i--) { 767 se = savevm_state.handler_pri_head[i]; 768 if (se != NULL) { 769 assert(save_state_priority(se) < priority); 770 break; 771 } 772 } 773 774 if (i >= 0) { 775 QTAILQ_INSERT_BEFORE(se, nse, entry); 776 } else { 777 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry); 778 } 779 780 if (savevm_state.handler_pri_head[priority] == NULL) { 781 savevm_state.handler_pri_head[priority] = nse; 782 } 783 } 784 785 static void savevm_state_handler_remove(SaveStateEntry *se) 786 { 787 SaveStateEntry *next; 788 MigrationPriority priority = save_state_priority(se); 789 790 if (se == savevm_state.handler_pri_head[priority]) { 791 next = QTAILQ_NEXT(se, entry); 792 if (next != NULL && save_state_priority(next) == priority) { 793 savevm_state.handler_pri_head[priority] = next; 794 } else { 795 savevm_state.handler_pri_head[priority] = NULL; 796 } 797 } 798 QTAILQ_REMOVE(&savevm_state.handlers, se, entry); 799 } 800 801 /* TODO: Individual devices generally have very little idea about the rest 802 of the system, so instance_id should be removed/replaced. 803 Meanwhile pass -1 as instance_id if you do not already have a clearly 804 distinguishing id for all instances of your device class. */ 805 int register_savevm_live(const char *idstr, 806 uint32_t instance_id, 807 int version_id, 808 const SaveVMHandlers *ops, 809 void *opaque) 810 { 811 SaveStateEntry *se; 812 813 se = g_new0(SaveStateEntry, 1); 814 se->version_id = version_id; 815 se->section_id = savevm_state.global_section_id++; 816 se->ops = ops; 817 se->opaque = opaque; 818 se->vmsd = NULL; 819 /* if this is a live_savem then set is_ram */ 820 if (ops->save_setup != NULL) { 821 se->is_ram = 1; 822 } 823 824 pstrcat(se->idstr, sizeof(se->idstr), idstr); 825 826 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 827 se->instance_id = calculate_new_instance_id(se->idstr); 828 } else { 829 se->instance_id = instance_id; 830 } 831 assert(!se->compat || se->instance_id == 0); 832 savevm_state_handler_insert(se); 833 return 0; 834 } 835 836 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) 837 { 838 SaveStateEntry *se, *new_se; 839 char id[256] = ""; 840 841 if (obj) { 842 char *oid = vmstate_if_get_id(obj); 843 if (oid) { 844 pstrcpy(id, sizeof(id), oid); 845 pstrcat(id, sizeof(id), "/"); 846 g_free(oid); 847 } 848 } 849 pstrcat(id, sizeof(id), idstr); 850 851 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 852 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { 853 savevm_state_handler_remove(se); 854 g_free(se->compat); 855 g_free(se); 856 } 857 } 858 } 859 860 /* 861 * Perform some basic checks on vmsd's at registration 862 * time. 863 */ 864 static void vmstate_check(const VMStateDescription *vmsd) 865 { 866 const VMStateField *field = vmsd->fields; 867 const VMStateDescription * const *subsection = vmsd->subsections; 868 869 if (field) { 870 while (field->name) { 871 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) { 872 /* Recurse to sub structures */ 873 vmstate_check(field->vmsd); 874 } 875 /* Carry on */ 876 field++; 877 } 878 /* Check for the end of field list canary */ 879 if (field->flags != VMS_END) { 880 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name); 881 g_assert_not_reached(); 882 } 883 } 884 885 while (subsection && *subsection) { 886 /* 887 * The name of a subsection should start with the name of the 888 * current object. 889 */ 890 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name))); 891 vmstate_check(*subsection); 892 subsection++; 893 } 894 } 895 896 897 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, 898 const VMStateDescription *vmsd, 899 void *opaque, int alias_id, 900 int required_for_version, 901 Error **errp) 902 { 903 SaveStateEntry *se; 904 905 /* If this triggers, alias support can be dropped for the vmsd. */ 906 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); 907 908 se = g_new0(SaveStateEntry, 1); 909 se->version_id = vmsd->version_id; 910 se->section_id = savevm_state.global_section_id++; 911 se->opaque = opaque; 912 se->vmsd = vmsd; 913 se->alias_id = alias_id; 914 915 if (obj) { 916 char *id = vmstate_if_get_id(obj); 917 if (id) { 918 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >= 919 sizeof(se->idstr)) { 920 error_setg(errp, "Path too long for VMState (%s)", id); 921 g_free(id); 922 g_free(se); 923 924 return -1; 925 } 926 g_free(id); 927 928 se->compat = g_new0(CompatEntry, 1); 929 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); 930 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ? 931 calculate_compat_instance_id(vmsd->name) : instance_id; 932 instance_id = VMSTATE_INSTANCE_ID_ANY; 933 } 934 } 935 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); 936 937 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 938 se->instance_id = calculate_new_instance_id(se->idstr); 939 } else { 940 se->instance_id = instance_id; 941 } 942 943 /* Perform a recursive sanity check during the test runs */ 944 if (qtest_enabled()) { 945 vmstate_check(vmsd); 946 } 947 assert(!se->compat || se->instance_id == 0); 948 savevm_state_handler_insert(se); 949 return 0; 950 } 951 952 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, 953 void *opaque) 954 { 955 SaveStateEntry *se, *new_se; 956 957 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 958 if (se->vmsd == vmsd && se->opaque == opaque) { 959 savevm_state_handler_remove(se); 960 g_free(se->compat); 961 g_free(se); 962 } 963 } 964 } 965 966 static int vmstate_load(QEMUFile *f, SaveStateEntry *se) 967 { 968 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 969 if (!se->vmsd) { /* Old style */ 970 return se->ops->load_state(f, se->opaque, se->load_version_id); 971 } 972 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); 973 } 974 975 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, 976 JSONWriter *vmdesc) 977 { 978 uint64_t old_offset = qemu_file_transferred(f); 979 se->ops->save_state(f, se->opaque); 980 uint64_t size = qemu_file_transferred(f) - old_offset; 981 982 if (vmdesc) { 983 json_writer_int64(vmdesc, "size", size); 984 json_writer_start_array(vmdesc, "fields"); 985 json_writer_start_object(vmdesc, NULL); 986 json_writer_str(vmdesc, "name", "data"); 987 json_writer_int64(vmdesc, "size", size); 988 json_writer_str(vmdesc, "type", "buffer"); 989 json_writer_end_object(vmdesc); 990 json_writer_end_array(vmdesc); 991 } 992 } 993 994 /* 995 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) 996 */ 997 static void save_section_header(QEMUFile *f, SaveStateEntry *se, 998 uint8_t section_type) 999 { 1000 qemu_put_byte(f, section_type); 1001 qemu_put_be32(f, se->section_id); 1002 1003 if (section_type == QEMU_VM_SECTION_FULL || 1004 section_type == QEMU_VM_SECTION_START) { 1005 /* ID string */ 1006 size_t len = strlen(se->idstr); 1007 qemu_put_byte(f, len); 1008 qemu_put_buffer(f, (uint8_t *)se->idstr, len); 1009 1010 qemu_put_be32(f, se->instance_id); 1011 qemu_put_be32(f, se->version_id); 1012 } 1013 } 1014 1015 /* 1016 * Write a footer onto device sections that catches cases misformatted device 1017 * sections. 1018 */ 1019 static void save_section_footer(QEMUFile *f, SaveStateEntry *se) 1020 { 1021 if (migrate_get_current()->send_section_footer) { 1022 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); 1023 qemu_put_be32(f, se->section_id); 1024 } 1025 } 1026 1027 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc, 1028 Error **errp) 1029 { 1030 int ret; 1031 1032 if ((!se->ops || !se->ops->save_state) && !se->vmsd) { 1033 return 0; 1034 } 1035 if (se->vmsd && !vmstate_section_needed(se->vmsd, se->opaque)) { 1036 trace_savevm_section_skip(se->idstr, se->section_id); 1037 return 0; 1038 } 1039 1040 trace_savevm_section_start(se->idstr, se->section_id); 1041 save_section_header(f, se, QEMU_VM_SECTION_FULL); 1042 if (vmdesc) { 1043 json_writer_start_object(vmdesc, NULL); 1044 json_writer_str(vmdesc, "name", se->idstr); 1045 json_writer_int64(vmdesc, "instance_id", se->instance_id); 1046 } 1047 1048 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 1049 if (!se->vmsd) { 1050 vmstate_save_old_style(f, se, vmdesc); 1051 } else { 1052 ret = vmstate_save_state_with_err(f, se->vmsd, se->opaque, vmdesc, 1053 errp); 1054 if (ret) { 1055 return ret; 1056 } 1057 } 1058 1059 trace_savevm_section_end(se->idstr, se->section_id, 0); 1060 save_section_footer(f, se); 1061 if (vmdesc) { 1062 json_writer_end_object(vmdesc); 1063 } 1064 return 0; 1065 } 1066 /** 1067 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the 1068 * command and associated data. 1069 * 1070 * @f: File to send command on 1071 * @command: Command type to send 1072 * @len: Length of associated data 1073 * @data: Data associated with command. 1074 */ 1075 static void qemu_savevm_command_send(QEMUFile *f, 1076 enum qemu_vm_cmd command, 1077 uint16_t len, 1078 uint8_t *data) 1079 { 1080 trace_savevm_command_send(command, len); 1081 qemu_put_byte(f, QEMU_VM_COMMAND); 1082 qemu_put_be16(f, (uint16_t)command); 1083 qemu_put_be16(f, len); 1084 qemu_put_buffer(f, data, len); 1085 qemu_fflush(f); 1086 } 1087 1088 void qemu_savevm_send_colo_enable(QEMUFile *f) 1089 { 1090 trace_savevm_send_colo_enable(); 1091 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); 1092 } 1093 1094 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) 1095 { 1096 uint32_t buf; 1097 1098 trace_savevm_send_ping(value); 1099 buf = cpu_to_be32(value); 1100 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf); 1101 } 1102 1103 void qemu_savevm_send_open_return_path(QEMUFile *f) 1104 { 1105 trace_savevm_send_open_return_path(); 1106 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL); 1107 } 1108 1109 /* We have a buffer of data to send; we don't want that all to be loaded 1110 * by the command itself, so the command contains just the length of the 1111 * extra buffer that we then send straight after it. 1112 * TODO: Must be a better way to organise that 1113 * 1114 * Returns: 1115 * 0 on success 1116 * -ve on error 1117 */ 1118 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) 1119 { 1120 uint32_t tmp; 1121 MigrationState *ms = migrate_get_current(); 1122 Error *local_err = NULL; 1123 1124 if (len > MAX_VM_CMD_PACKAGED_SIZE) { 1125 error_setg(&local_err, "%s: Unreasonably large packaged state: %zu", 1126 __func__, len); 1127 migrate_set_error(ms, local_err); 1128 error_report_err(local_err); 1129 return -1; 1130 } 1131 1132 tmp = cpu_to_be32(len); 1133 1134 trace_qemu_savevm_send_packaged(); 1135 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp); 1136 1137 qemu_put_buffer(f, buf, len); 1138 1139 return 0; 1140 } 1141 1142 /* Send prior to any postcopy transfer */ 1143 void qemu_savevm_send_postcopy_advise(QEMUFile *f) 1144 { 1145 if (migrate_postcopy_ram()) { 1146 uint64_t tmp[2]; 1147 tmp[0] = cpu_to_be64(ram_pagesize_summary()); 1148 tmp[1] = cpu_to_be64(qemu_target_page_size()); 1149 1150 trace_qemu_savevm_send_postcopy_advise(); 1151 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 1152 16, (uint8_t *)tmp); 1153 } else { 1154 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); 1155 } 1156 } 1157 1158 /* Sent prior to starting the destination running in postcopy, discard pages 1159 * that have already been sent but redirtied on the source. 1160 * CMD_POSTCOPY_RAM_DISCARD consist of: 1161 * byte version (0) 1162 * byte Length of name field (not including 0) 1163 * n x byte RAM block name 1164 * byte 0 terminator (just for safety) 1165 * n x Byte ranges within the named RAMBlock 1166 * be64 Start of the range 1167 * be64 Length 1168 * 1169 * name: RAMBlock name that these entries are part of 1170 * len: Number of page entries 1171 * start_list: 'len' addresses 1172 * length_list: 'len' addresses 1173 * 1174 */ 1175 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, 1176 uint16_t len, 1177 uint64_t *start_list, 1178 uint64_t *length_list) 1179 { 1180 uint8_t *buf; 1181 uint16_t tmplen; 1182 uint16_t t; 1183 size_t name_len = strlen(name); 1184 1185 trace_qemu_savevm_send_postcopy_ram_discard(name, len); 1186 assert(name_len < 256); 1187 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len); 1188 buf[0] = postcopy_ram_discard_version; 1189 buf[1] = name_len; 1190 memcpy(buf + 2, name, name_len); 1191 tmplen = 2 + name_len; 1192 buf[tmplen++] = '\0'; 1193 1194 for (t = 0; t < len; t++) { 1195 stq_be_p(buf + tmplen, start_list[t]); 1196 tmplen += 8; 1197 stq_be_p(buf + tmplen, length_list[t]); 1198 tmplen += 8; 1199 } 1200 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf); 1201 g_free(buf); 1202 } 1203 1204 /* Get the destination into a state where it can receive postcopy data. */ 1205 void qemu_savevm_send_postcopy_listen(QEMUFile *f) 1206 { 1207 trace_savevm_send_postcopy_listen(); 1208 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL); 1209 } 1210 1211 /* Kick the destination into running */ 1212 void qemu_savevm_send_postcopy_run(QEMUFile *f) 1213 { 1214 trace_savevm_send_postcopy_run(); 1215 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL); 1216 } 1217 1218 void qemu_savevm_send_postcopy_resume(QEMUFile *f) 1219 { 1220 trace_savevm_send_postcopy_resume(); 1221 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL); 1222 } 1223 1224 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) 1225 { 1226 size_t len; 1227 char buf[256]; 1228 1229 trace_savevm_send_recv_bitmap(block_name); 1230 1231 buf[0] = len = strlen(block_name); 1232 memcpy(buf + 1, block_name, len); 1233 1234 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); 1235 } 1236 1237 static void qemu_savevm_send_switchover_start(QEMUFile *f) 1238 { 1239 trace_savevm_send_switchover_start(); 1240 qemu_savevm_command_send(f, MIG_CMD_SWITCHOVER_START, 0, NULL); 1241 } 1242 1243 void qemu_savevm_maybe_send_switchover_start(QEMUFile *f) 1244 { 1245 if (migrate_send_switchover_start()) { 1246 qemu_savevm_send_switchover_start(f); 1247 } 1248 } 1249 1250 bool qemu_savevm_state_blocked(Error **errp) 1251 { 1252 SaveStateEntry *se; 1253 1254 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1255 if (se->vmsd && se->vmsd->unmigratable) { 1256 error_setg(errp, "State blocked by non-migratable device '%s'", 1257 se->idstr); 1258 return true; 1259 } 1260 } 1261 return false; 1262 } 1263 1264 void qemu_savevm_non_migratable_list(strList **reasons) 1265 { 1266 SaveStateEntry *se; 1267 1268 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1269 if (se->vmsd && se->vmsd->unmigratable) { 1270 QAPI_LIST_PREPEND(*reasons, 1271 g_strdup_printf("non-migratable device: %s", 1272 se->idstr)); 1273 } 1274 } 1275 } 1276 1277 void qemu_savevm_state_header(QEMUFile *f) 1278 { 1279 MigrationState *s = migrate_get_current(); 1280 JSONWriter *vmdesc = s->vmdesc; 1281 1282 trace_savevm_state_header(); 1283 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1284 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1285 1286 if (s->send_configuration) { 1287 qemu_put_byte(f, QEMU_VM_CONFIGURATION); 1288 1289 if (vmdesc) { 1290 /* 1291 * This starts the main json object and is paired with the 1292 * json_writer_end_object in 1293 * qemu_savevm_state_complete_precopy_non_iterable 1294 */ 1295 json_writer_start_object(vmdesc, NULL); 1296 json_writer_start_object(vmdesc, "configuration"); 1297 } 1298 1299 vmstate_save_state(f, &vmstate_configuration, &savevm_state, vmdesc); 1300 1301 if (vmdesc) { 1302 json_writer_end_object(vmdesc); 1303 } 1304 } 1305 } 1306 1307 bool qemu_savevm_state_guest_unplug_pending(void) 1308 { 1309 SaveStateEntry *se; 1310 1311 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1312 if (se->vmsd && se->vmsd->dev_unplug_pending && 1313 se->vmsd->dev_unplug_pending(se->opaque)) { 1314 return true; 1315 } 1316 } 1317 1318 return false; 1319 } 1320 1321 int qemu_savevm_state_prepare(Error **errp) 1322 { 1323 SaveStateEntry *se; 1324 int ret; 1325 1326 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1327 if (!se->ops || !se->ops->save_prepare) { 1328 continue; 1329 } 1330 if (se->ops->is_active) { 1331 if (!se->ops->is_active(se->opaque)) { 1332 continue; 1333 } 1334 } 1335 1336 ret = se->ops->save_prepare(se->opaque, errp); 1337 if (ret < 0) { 1338 return ret; 1339 } 1340 } 1341 1342 return 0; 1343 } 1344 1345 int qemu_savevm_state_setup(QEMUFile *f, Error **errp) 1346 { 1347 ERRP_GUARD(); 1348 MigrationState *ms = migrate_get_current(); 1349 JSONWriter *vmdesc = ms->vmdesc; 1350 SaveStateEntry *se; 1351 int ret = 0; 1352 1353 if (vmdesc) { 1354 json_writer_int64(vmdesc, "page_size", qemu_target_page_size()); 1355 json_writer_start_array(vmdesc, "devices"); 1356 } 1357 1358 trace_savevm_state_setup(); 1359 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1360 if (se->vmsd && se->vmsd->early_setup) { 1361 ret = vmstate_save(f, se, vmdesc, errp); 1362 if (ret) { 1363 migrate_set_error(ms, *errp); 1364 qemu_file_set_error(f, ret); 1365 break; 1366 } 1367 continue; 1368 } 1369 1370 if (!se->ops || !se->ops->save_setup) { 1371 continue; 1372 } 1373 if (se->ops->is_active) { 1374 if (!se->ops->is_active(se->opaque)) { 1375 continue; 1376 } 1377 } 1378 save_section_header(f, se, QEMU_VM_SECTION_START); 1379 1380 ret = se->ops->save_setup(f, se->opaque, errp); 1381 save_section_footer(f, se); 1382 if (ret < 0) { 1383 qemu_file_set_error(f, ret); 1384 break; 1385 } 1386 } 1387 1388 if (ret) { 1389 return ret; 1390 } 1391 1392 /* TODO: Should we check that errp is set in case of failure ? */ 1393 return precopy_notify(PRECOPY_NOTIFY_SETUP, errp); 1394 } 1395 1396 int qemu_savevm_state_resume_prepare(MigrationState *s) 1397 { 1398 SaveStateEntry *se; 1399 int ret; 1400 1401 trace_savevm_state_resume_prepare(); 1402 1403 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1404 if (!se->ops || !se->ops->resume_prepare) { 1405 continue; 1406 } 1407 if (se->ops->is_active) { 1408 if (!se->ops->is_active(se->opaque)) { 1409 continue; 1410 } 1411 } 1412 ret = se->ops->resume_prepare(s, se->opaque); 1413 if (ret < 0) { 1414 return ret; 1415 } 1416 } 1417 1418 return 0; 1419 } 1420 1421 /* 1422 * this function has three return values: 1423 * negative: there was one error, and we have -errno. 1424 * 0 : We haven't finished, caller have to go again 1425 * 1 : We have finished, we can go to complete phase 1426 */ 1427 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) 1428 { 1429 SaveStateEntry *se; 1430 bool all_finished = true; 1431 int ret; 1432 1433 trace_savevm_state_iterate(); 1434 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1435 if (!se->ops || !se->ops->save_live_iterate) { 1436 continue; 1437 } 1438 if (se->ops->is_active && 1439 !se->ops->is_active(se->opaque)) { 1440 continue; 1441 } 1442 if (se->ops->is_active_iterate && 1443 !se->ops->is_active_iterate(se->opaque)) { 1444 continue; 1445 } 1446 /* 1447 * In the postcopy phase, any device that doesn't know how to 1448 * do postcopy should have saved it's state in the _complete 1449 * call that's already run, it might get confused if we call 1450 * iterate afterwards. 1451 */ 1452 if (postcopy && 1453 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { 1454 continue; 1455 } 1456 if (migration_rate_exceeded(f)) { 1457 return 0; 1458 } 1459 trace_savevm_section_start(se->idstr, se->section_id); 1460 1461 save_section_header(f, se, QEMU_VM_SECTION_PART); 1462 1463 ret = se->ops->save_live_iterate(f, se->opaque); 1464 trace_savevm_section_end(se->idstr, se->section_id, ret); 1465 save_section_footer(f, se); 1466 1467 if (ret < 0) { 1468 error_report("failed to save SaveStateEntry with id(name): " 1469 "%d(%s): %d", 1470 se->section_id, se->idstr, ret); 1471 qemu_file_set_error(f, ret); 1472 return ret; 1473 } else if (!ret) { 1474 all_finished = false; 1475 } 1476 } 1477 return all_finished; 1478 } 1479 1480 bool should_send_vmdesc(void) 1481 { 1482 MachineState *machine = MACHINE(qdev_get_machine()); 1483 1484 return !machine->suppress_vmdesc; 1485 } 1486 1487 /* 1488 * Calls the save_live_complete_postcopy methods 1489 * causing the last few pages to be sent immediately and doing any associated 1490 * cleanup. 1491 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete 1492 * all the other devices, but that happens at the point we switch to postcopy. 1493 */ 1494 void qemu_savevm_state_complete_postcopy(QEMUFile *f) 1495 { 1496 SaveStateEntry *se; 1497 int ret; 1498 1499 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1500 if (!se->ops || !se->ops->save_live_complete_postcopy) { 1501 continue; 1502 } 1503 if (se->ops->is_active) { 1504 if (!se->ops->is_active(se->opaque)) { 1505 continue; 1506 } 1507 } 1508 trace_savevm_section_start(se->idstr, se->section_id); 1509 /* Section type */ 1510 qemu_put_byte(f, QEMU_VM_SECTION_END); 1511 qemu_put_be32(f, se->section_id); 1512 1513 ret = se->ops->save_live_complete_postcopy(f, se->opaque); 1514 trace_savevm_section_end(se->idstr, se->section_id, ret); 1515 save_section_footer(f, se); 1516 if (ret < 0) { 1517 qemu_file_set_error(f, ret); 1518 return; 1519 } 1520 } 1521 1522 qemu_put_byte(f, QEMU_VM_EOF); 1523 qemu_fflush(f); 1524 } 1525 1526 bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp) 1527 { 1528 SaveStateEntry *se; 1529 bool ret; 1530 1531 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1532 if (!se->ops || !se->ops->save_postcopy_prepare) { 1533 continue; 1534 } 1535 1536 if (se->ops->is_active) { 1537 if (!se->ops->is_active(se->opaque)) { 1538 continue; 1539 } 1540 } 1541 1542 trace_savevm_section_start(se->idstr, se->section_id); 1543 1544 save_section_header(f, se, QEMU_VM_SECTION_PART); 1545 ret = se->ops->save_postcopy_prepare(f, se->opaque, errp); 1546 save_section_footer(f, se); 1547 1548 trace_savevm_section_end(se->idstr, se->section_id, ret); 1549 1550 if (!ret) { 1551 assert(*errp); 1552 return false; 1553 } 1554 } 1555 1556 return true; 1557 } 1558 1559 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) 1560 { 1561 int64_t start_ts_each, end_ts_each; 1562 SaveStateEntry *se; 1563 int ret; 1564 bool multifd_device_state = multifd_device_state_supported(); 1565 1566 if (multifd_device_state) { 1567 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1568 SaveLiveCompletePrecopyThreadHandler hdlr; 1569 1570 if (!se->ops || (in_postcopy && se->ops->has_postcopy && 1571 se->ops->has_postcopy(se->opaque)) || 1572 !se->ops->save_live_complete_precopy_thread) { 1573 continue; 1574 } 1575 1576 hdlr = se->ops->save_live_complete_precopy_thread; 1577 multifd_spawn_device_state_save_thread(hdlr, 1578 se->idstr, se->instance_id, 1579 se->opaque); 1580 } 1581 } 1582 1583 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1584 if (!se->ops || 1585 (in_postcopy && se->ops->has_postcopy && 1586 se->ops->has_postcopy(se->opaque)) || 1587 !se->ops->save_live_complete_precopy) { 1588 continue; 1589 } 1590 1591 if (se->ops->is_active) { 1592 if (!se->ops->is_active(se->opaque)) { 1593 continue; 1594 } 1595 } 1596 1597 start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1598 trace_savevm_section_start(se->idstr, se->section_id); 1599 1600 save_section_header(f, se, QEMU_VM_SECTION_END); 1601 1602 ret = se->ops->save_live_complete_precopy(f, se->opaque); 1603 trace_savevm_section_end(se->idstr, se->section_id, ret); 1604 save_section_footer(f, se); 1605 if (ret < 0) { 1606 qemu_file_set_error(f, ret); 1607 goto ret_fail_abort_threads; 1608 } 1609 end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1610 trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id, 1611 end_ts_each - start_ts_each); 1612 } 1613 1614 if (multifd_device_state) { 1615 if (migrate_has_error(migrate_get_current())) { 1616 multifd_abort_device_state_save_threads(); 1617 } 1618 1619 if (!multifd_join_device_state_save_threads()) { 1620 qemu_file_set_error(f, -EINVAL); 1621 return -1; 1622 } 1623 } 1624 1625 trace_vmstate_downtime_checkpoint("src-iterable-saved"); 1626 1627 return 0; 1628 1629 ret_fail_abort_threads: 1630 if (multifd_device_state) { 1631 multifd_abort_device_state_save_threads(); 1632 multifd_join_device_state_save_threads(); 1633 } 1634 1635 return -1; 1636 } 1637 1638 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, 1639 bool in_postcopy) 1640 { 1641 MigrationState *ms = migrate_get_current(); 1642 int64_t start_ts_each, end_ts_each; 1643 JSONWriter *vmdesc = ms->vmdesc; 1644 int vmdesc_len; 1645 SaveStateEntry *se; 1646 Error *local_err = NULL; 1647 int ret; 1648 1649 /* Making sure cpu states are synchronized before saving non-iterable */ 1650 cpu_synchronize_all_states(); 1651 1652 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1653 if (se->vmsd && se->vmsd->early_setup) { 1654 /* Already saved during qemu_savevm_state_setup(). */ 1655 continue; 1656 } 1657 1658 start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1659 1660 ret = vmstate_save(f, se, vmdesc, &local_err); 1661 if (ret) { 1662 migrate_set_error(ms, local_err); 1663 error_report_err(local_err); 1664 qemu_file_set_error(f, ret); 1665 return ret; 1666 } 1667 1668 end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1669 trace_vmstate_downtime_save("non-iterable", se->idstr, se->instance_id, 1670 end_ts_each - start_ts_each); 1671 } 1672 1673 if (!in_postcopy) { 1674 /* Postcopy stream will still be going */ 1675 qemu_put_byte(f, QEMU_VM_EOF); 1676 1677 if (vmdesc) { 1678 json_writer_end_array(vmdesc); 1679 json_writer_end_object(vmdesc); 1680 vmdesc_len = strlen(json_writer_get(vmdesc)); 1681 1682 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); 1683 qemu_put_be32(f, vmdesc_len); 1684 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); 1685 } 1686 } 1687 1688 trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); 1689 1690 return 0; 1691 } 1692 1693 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) 1694 { 1695 int ret; 1696 1697 ret = qemu_savevm_state_complete_precopy_iterable(f, false); 1698 if (ret) { 1699 return ret; 1700 } 1701 1702 if (!iterable_only) { 1703 ret = qemu_savevm_state_complete_precopy_non_iterable(f, false); 1704 if (ret) { 1705 return ret; 1706 } 1707 } 1708 1709 return qemu_fflush(f); 1710 } 1711 1712 /* Give an estimate of the amount left to be transferred, 1713 * the result is split into the amount for units that can and 1714 * for units that can't do postcopy. 1715 */ 1716 void qemu_savevm_state_pending_estimate(uint64_t *must_precopy, 1717 uint64_t *can_postcopy) 1718 { 1719 SaveStateEntry *se; 1720 1721 *must_precopy = 0; 1722 *can_postcopy = 0; 1723 1724 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1725 if (!se->ops || !se->ops->state_pending_estimate) { 1726 continue; 1727 } 1728 if (se->ops->is_active) { 1729 if (!se->ops->is_active(se->opaque)) { 1730 continue; 1731 } 1732 } 1733 se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy); 1734 } 1735 } 1736 1737 void qemu_savevm_state_pending_exact(uint64_t *must_precopy, 1738 uint64_t *can_postcopy) 1739 { 1740 SaveStateEntry *se; 1741 1742 *must_precopy = 0; 1743 *can_postcopy = 0; 1744 1745 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1746 if (!se->ops || !se->ops->state_pending_exact) { 1747 continue; 1748 } 1749 if (se->ops->is_active) { 1750 if (!se->ops->is_active(se->opaque)) { 1751 continue; 1752 } 1753 } 1754 se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy); 1755 } 1756 } 1757 1758 void qemu_savevm_state_cleanup(void) 1759 { 1760 SaveStateEntry *se; 1761 Error *local_err = NULL; 1762 1763 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) { 1764 error_report_err(local_err); 1765 } 1766 1767 trace_savevm_state_cleanup(); 1768 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1769 if (se->ops && se->ops->save_cleanup) { 1770 se->ops->save_cleanup(se->opaque); 1771 } 1772 } 1773 } 1774 1775 static int qemu_savevm_state(QEMUFile *f, Error **errp) 1776 { 1777 int ret; 1778 MigrationState *ms = migrate_get_current(); 1779 MigrationStatus status; 1780 1781 if (migration_is_running()) { 1782 error_setg(errp, "There's a migration process in progress"); 1783 return -EINVAL; 1784 } 1785 1786 ret = migrate_init(ms, errp); 1787 if (ret) { 1788 return ret; 1789 } 1790 ms->to_dst_file = f; 1791 1792 qemu_savevm_state_header(f); 1793 ret = qemu_savevm_state_setup(f, errp); 1794 if (ret) { 1795 goto cleanup; 1796 } 1797 1798 while (qemu_file_get_error(f) == 0) { 1799 if (qemu_savevm_state_iterate(f, false) > 0) { 1800 break; 1801 } 1802 } 1803 1804 ret = qemu_file_get_error(f); 1805 if (ret == 0) { 1806 qemu_savevm_maybe_send_switchover_start(f); 1807 qemu_savevm_state_complete_precopy(f, false); 1808 ret = qemu_file_get_error(f); 1809 } 1810 if (ret != 0) { 1811 error_setg_errno(errp, -ret, "Error while writing VM state"); 1812 } 1813 cleanup: 1814 qemu_savevm_state_cleanup(); 1815 1816 if (ret != 0) { 1817 status = MIGRATION_STATUS_FAILED; 1818 } else { 1819 status = MIGRATION_STATUS_COMPLETED; 1820 } 1821 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status); 1822 1823 /* f is outer parameter, it should not stay in global migration state after 1824 * this function finished */ 1825 ms->to_dst_file = NULL; 1826 1827 return ret; 1828 } 1829 1830 void qemu_savevm_live_state(QEMUFile *f) 1831 { 1832 /* save QEMU_VM_SECTION_END section */ 1833 qemu_savevm_state_complete_precopy(f, true); 1834 qemu_put_byte(f, QEMU_VM_EOF); 1835 } 1836 1837 int qemu_save_device_state(QEMUFile *f) 1838 { 1839 MigrationState *ms = migrate_get_current(); 1840 Error *local_err = NULL; 1841 SaveStateEntry *se; 1842 1843 if (!migration_in_colo_state()) { 1844 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1845 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1846 } 1847 cpu_synchronize_all_states(); 1848 1849 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1850 int ret; 1851 1852 if (se->is_ram) { 1853 continue; 1854 } 1855 ret = vmstate_save(f, se, NULL, &local_err); 1856 if (ret) { 1857 migrate_set_error(ms, local_err); 1858 error_report_err(local_err); 1859 return ret; 1860 } 1861 } 1862 1863 qemu_put_byte(f, QEMU_VM_EOF); 1864 1865 return qemu_file_get_error(f); 1866 } 1867 1868 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id) 1869 { 1870 SaveStateEntry *se; 1871 1872 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1873 if (!strcmp(se->idstr, idstr) && 1874 (instance_id == se->instance_id || 1875 instance_id == se->alias_id)) 1876 return se; 1877 /* Migrating from an older version? */ 1878 if (strstr(se->idstr, idstr) && se->compat) { 1879 if (!strcmp(se->compat->idstr, idstr) && 1880 (instance_id == se->compat->instance_id || 1881 instance_id == se->alias_id)) 1882 return se; 1883 } 1884 } 1885 return NULL; 1886 } 1887 1888 enum LoadVMExitCodes { 1889 /* Allow a command to quit all layers of nested loadvm loops */ 1890 LOADVM_QUIT = 1, 1891 }; 1892 1893 /* ------ incoming postcopy messages ------ */ 1894 /* 'advise' arrives before any transfers just to tell us that a postcopy 1895 * *might* happen - it might be skipped if precopy transferred everything 1896 * quickly. 1897 */ 1898 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, 1899 uint16_t len) 1900 { 1901 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); 1902 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; 1903 size_t page_size = qemu_target_page_size(); 1904 Error *local_err = NULL; 1905 1906 trace_loadvm_postcopy_handle_advise(); 1907 if (ps != POSTCOPY_INCOMING_NONE) { 1908 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); 1909 return -1; 1910 } 1911 1912 switch (len) { 1913 case 0: 1914 if (migrate_postcopy_ram()) { 1915 error_report("RAM postcopy is enabled but have 0 byte advise"); 1916 return -EINVAL; 1917 } 1918 return 0; 1919 case 8 + 8: 1920 if (!migrate_postcopy_ram()) { 1921 error_report("RAM postcopy is disabled but have 16 byte advise"); 1922 return -EINVAL; 1923 } 1924 break; 1925 default: 1926 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); 1927 return -EINVAL; 1928 } 1929 1930 if (!postcopy_ram_supported_by_host(mis, &local_err)) { 1931 error_report_err(local_err); 1932 postcopy_state_set(POSTCOPY_INCOMING_NONE); 1933 return -1; 1934 } 1935 1936 remote_pagesize_summary = qemu_get_be64(mis->from_src_file); 1937 local_pagesize_summary = ram_pagesize_summary(); 1938 1939 if (remote_pagesize_summary != local_pagesize_summary) { 1940 /* 1941 * This detects two potential causes of mismatch: 1942 * a) A mismatch in host page sizes 1943 * Some combinations of mismatch are probably possible but it gets 1944 * a bit more complicated. In particular we need to place whole 1945 * host pages on the dest at once, and we need to ensure that we 1946 * handle dirtying to make sure we never end up sending part of 1947 * a hostpage on it's own. 1948 * b) The use of different huge page sizes on source/destination 1949 * a more fine grain test is performed during RAM block migration 1950 * but this test here causes a nice early clear failure, and 1951 * also fails when passed to an older qemu that doesn't 1952 * do huge pages. 1953 */ 1954 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 1955 " d=%" PRIx64 ")", 1956 remote_pagesize_summary, local_pagesize_summary); 1957 return -1; 1958 } 1959 1960 remote_tps = qemu_get_be64(mis->from_src_file); 1961 if (remote_tps != page_size) { 1962 /* 1963 * Again, some differences could be dealt with, but for now keep it 1964 * simple. 1965 */ 1966 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", 1967 (int)remote_tps, page_size); 1968 return -1; 1969 } 1970 1971 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { 1972 error_report_err(local_err); 1973 return -1; 1974 } 1975 1976 if (ram_postcopy_incoming_init(mis)) { 1977 return -1; 1978 } 1979 1980 return 0; 1981 } 1982 1983 /* After postcopy we will be told to throw some pages away since they're 1984 * dirty and will have to be demand fetched. Must happen before CPU is 1985 * started. 1986 * There can be 0..many of these messages, each encoding multiple pages. 1987 */ 1988 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, 1989 uint16_t len) 1990 { 1991 int tmp; 1992 char ramid[256]; 1993 PostcopyState ps = postcopy_state_get(); 1994 1995 trace_loadvm_postcopy_ram_handle_discard(); 1996 1997 switch (ps) { 1998 case POSTCOPY_INCOMING_ADVISE: 1999 /* 1st discard */ 2000 tmp = postcopy_ram_prepare_discard(mis); 2001 if (tmp) { 2002 return tmp; 2003 } 2004 break; 2005 2006 case POSTCOPY_INCOMING_DISCARD: 2007 /* Expected state */ 2008 break; 2009 2010 default: 2011 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", 2012 ps); 2013 return -1; 2014 } 2015 /* We're expecting a 2016 * Version (0) 2017 * a RAM ID string (length byte, name, 0 term) 2018 * then at least 1 16 byte chunk 2019 */ 2020 if (len < (1 + 1 + 1 + 1 + 2 * 8)) { 2021 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 2022 return -1; 2023 } 2024 2025 tmp = qemu_get_byte(mis->from_src_file); 2026 if (tmp != postcopy_ram_discard_version) { 2027 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); 2028 return -1; 2029 } 2030 2031 if (!qemu_get_counted_string(mis->from_src_file, ramid)) { 2032 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); 2033 return -1; 2034 } 2035 tmp = qemu_get_byte(mis->from_src_file); 2036 if (tmp != 0) { 2037 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); 2038 return -1; 2039 } 2040 2041 len -= 3 + strlen(ramid); 2042 if (len % 16) { 2043 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 2044 return -1; 2045 } 2046 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); 2047 while (len) { 2048 uint64_t start_addr, block_length; 2049 start_addr = qemu_get_be64(mis->from_src_file); 2050 block_length = qemu_get_be64(mis->from_src_file); 2051 2052 len -= 16; 2053 int ret = ram_discard_range(ramid, start_addr, block_length); 2054 if (ret) { 2055 return ret; 2056 } 2057 } 2058 trace_loadvm_postcopy_ram_handle_discard_end(); 2059 2060 return 0; 2061 } 2062 2063 /* 2064 * Triggered by a postcopy_listen command; this thread takes over reading 2065 * the input stream, leaving the main thread free to carry on loading the rest 2066 * of the device state (from RAM). 2067 * (TODO:This could do with being in a postcopy file - but there again it's 2068 * just another input loop, not that postcopy specific) 2069 */ 2070 static void *postcopy_ram_listen_thread(void *opaque) 2071 { 2072 MigrationIncomingState *mis = migration_incoming_get_current(); 2073 QEMUFile *f = mis->from_src_file; 2074 int load_res; 2075 MigrationState *migr = migrate_get_current(); 2076 2077 object_ref(OBJECT(migr)); 2078 2079 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 2080 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2081 qemu_event_set(&mis->thread_sync_event); 2082 trace_postcopy_ram_listen_thread_start(); 2083 2084 rcu_register_thread(); 2085 /* 2086 * Because we're a thread and not a coroutine we can't yield 2087 * in qemu_file, and thus we must be blocking now. 2088 */ 2089 qemu_file_set_blocking(f, true); 2090 2091 /* TODO: sanity check that only postcopiable data will be loaded here */ 2092 load_res = qemu_loadvm_state_main(f, mis); 2093 2094 /* 2095 * This is tricky, but, mis->from_src_file can change after it 2096 * returns, when postcopy recovery happened. In the future, we may 2097 * want a wrapper for the QEMUFile handle. 2098 */ 2099 f = mis->from_src_file; 2100 2101 /* And non-blocking again so we don't block in any cleanup */ 2102 qemu_file_set_blocking(f, false); 2103 2104 trace_postcopy_ram_listen_thread_exit(); 2105 if (load_res < 0) { 2106 qemu_file_set_error(f, load_res); 2107 dirty_bitmap_mig_cancel_incoming(); 2108 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2109 !migrate_postcopy_ram() && migrate_dirty_bitmaps()) 2110 { 2111 error_report("%s: loadvm failed during postcopy: %d. All states " 2112 "are migrated except dirty bitmaps. Some dirty " 2113 "bitmaps may be lost, and present migrated dirty " 2114 "bitmaps are correctly migrated and valid.", 2115 __func__, load_res); 2116 load_res = 0; /* prevent further exit() */ 2117 } else { 2118 error_report("%s: loadvm failed: %d", __func__, load_res); 2119 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2120 MIGRATION_STATUS_FAILED); 2121 } 2122 } 2123 if (load_res >= 0) { 2124 /* 2125 * This looks good, but it's possible that the device loading in the 2126 * main thread hasn't finished yet, and so we might not be in 'RUN' 2127 * state yet; wait for the end of the main thread. 2128 */ 2129 qemu_event_wait(&mis->main_thread_load_event); 2130 } 2131 postcopy_ram_incoming_cleanup(mis); 2132 2133 if (load_res < 0) { 2134 /* 2135 * If something went wrong then we have a bad state so exit; 2136 * depending how far we got it might be possible at this point 2137 * to leave the guest running and fire MCEs for pages that never 2138 * arrived as a desperate recovery step. 2139 */ 2140 rcu_unregister_thread(); 2141 exit(EXIT_FAILURE); 2142 } 2143 2144 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2145 MIGRATION_STATUS_COMPLETED); 2146 /* 2147 * If everything has worked fine, then the main thread has waited 2148 * for us to start, and we're the last use of the mis. 2149 * (If something broke then qemu will have to exit anyway since it's 2150 * got a bad migration state). 2151 */ 2152 bql_lock(); 2153 migration_incoming_state_destroy(); 2154 bql_unlock(); 2155 2156 rcu_unregister_thread(); 2157 mis->have_listen_thread = false; 2158 postcopy_state_set(POSTCOPY_INCOMING_END); 2159 2160 object_unref(OBJECT(migr)); 2161 2162 return NULL; 2163 } 2164 2165 /* After this message we must be able to immediately receive postcopy data */ 2166 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) 2167 { 2168 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); 2169 Error *local_err = NULL; 2170 2171 trace_loadvm_postcopy_handle_listen("enter"); 2172 2173 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { 2174 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); 2175 return -1; 2176 } 2177 if (ps == POSTCOPY_INCOMING_ADVISE) { 2178 /* 2179 * A rare case, we entered listen without having to do any discards, 2180 * so do the setup that's normally done at the time of the 1st discard. 2181 */ 2182 if (migrate_postcopy_ram()) { 2183 postcopy_ram_prepare_discard(mis); 2184 } 2185 } 2186 2187 trace_loadvm_postcopy_handle_listen("after discard"); 2188 2189 /* 2190 * Sensitise RAM - can now generate requests for blocks that don't exist 2191 * However, at this point the CPU shouldn't be running, and the IO 2192 * shouldn't be doing anything yet so don't actually expect requests 2193 */ 2194 if (migrate_postcopy_ram()) { 2195 if (postcopy_ram_incoming_setup(mis)) { 2196 postcopy_ram_incoming_cleanup(mis); 2197 return -1; 2198 } 2199 } 2200 2201 trace_loadvm_postcopy_handle_listen("after uffd"); 2202 2203 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { 2204 error_report_err(local_err); 2205 return -1; 2206 } 2207 2208 mis->have_listen_thread = true; 2209 postcopy_thread_create(mis, &mis->listen_thread, 2210 MIGRATION_THREAD_DST_LISTEN, 2211 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); 2212 trace_loadvm_postcopy_handle_listen("return"); 2213 2214 return 0; 2215 } 2216 2217 static void loadvm_postcopy_handle_run_bh(void *opaque) 2218 { 2219 MigrationIncomingState *mis = opaque; 2220 2221 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter"); 2222 2223 /* TODO we should move all of this lot into postcopy_ram.c or a shared code 2224 * in migration.c 2225 */ 2226 cpu_synchronize_all_post_init(); 2227 2228 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cpu-synced"); 2229 2230 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 2231 2232 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced"); 2233 2234 dirty_bitmap_mig_before_vm_start(); 2235 2236 if (autostart) { 2237 /* 2238 * Make sure all file formats throw away their mutable metadata. 2239 * If we get an error here, just don't restart the VM yet. 2240 */ 2241 bool success = migration_block_activate(NULL); 2242 2243 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); 2244 2245 if (success) { 2246 vm_start(); 2247 } 2248 } else { 2249 /* leave it paused and let management decide when to start the CPU */ 2250 runstate_set(RUN_STATE_PAUSED); 2251 } 2252 2253 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started"); 2254 } 2255 2256 /* After all discards we can start running and asking for pages */ 2257 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) 2258 { 2259 PostcopyState ps = postcopy_state_get(); 2260 2261 trace_loadvm_postcopy_handle_run(); 2262 if (ps != POSTCOPY_INCOMING_LISTENING) { 2263 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); 2264 return -1; 2265 } 2266 2267 postcopy_state_set(POSTCOPY_INCOMING_RUNNING); 2268 migration_bh_schedule(loadvm_postcopy_handle_run_bh, mis); 2269 2270 /* We need to finish reading the stream from the package 2271 * and also stop reading anything more from the stream that loaded the 2272 * package (since it's now being read by the listener thread). 2273 * LOADVM_QUIT will quit all the layers of nested loadvm loops. 2274 */ 2275 return LOADVM_QUIT; 2276 } 2277 2278 /* We must be with page_request_mutex held */ 2279 static gboolean postcopy_sync_page_req(gpointer key, gpointer value, 2280 gpointer data) 2281 { 2282 MigrationIncomingState *mis = data; 2283 void *host_addr = (void *) key; 2284 ram_addr_t rb_offset; 2285 RAMBlock *rb; 2286 int ret; 2287 2288 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); 2289 if (!rb) { 2290 /* 2291 * This should _never_ happen. However be nice for a migrating VM to 2292 * not crash/assert. Post an error (note: intended to not use *_once 2293 * because we do want to see all the illegal addresses; and this can 2294 * never be triggered by the guest so we're safe) and move on next. 2295 */ 2296 error_report("%s: illegal host addr %p", __func__, host_addr); 2297 /* Try the next entry */ 2298 return FALSE; 2299 } 2300 2301 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); 2302 if (ret) { 2303 /* Please refer to above comment. */ 2304 error_report("%s: send rp message failed for addr %p", 2305 __func__, host_addr); 2306 return FALSE; 2307 } 2308 2309 trace_postcopy_page_req_sync(host_addr); 2310 2311 return FALSE; 2312 } 2313 2314 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) 2315 { 2316 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 2317 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); 2318 } 2319 } 2320 2321 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) 2322 { 2323 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 2324 error_report("%s: illegal resume received", __func__); 2325 /* Don't fail the load, only for this. */ 2326 return 0; 2327 } 2328 2329 /* 2330 * Reset the last_rb before we resend any page req to source again, since 2331 * the source should have it reset already. 2332 */ 2333 mis->last_rb = NULL; 2334 2335 /* 2336 * This means source VM is ready to resume the postcopy migration. 2337 */ 2338 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2339 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2340 2341 trace_loadvm_postcopy_handle_resume(); 2342 2343 /* Tell source that "we are ready" */ 2344 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); 2345 2346 /* 2347 * After a postcopy recovery, the source should have lost the postcopy 2348 * queue, or potentially the requested pages could have been lost during 2349 * the network down phase. Let's re-sync with the source VM by re-sending 2350 * all the pending pages that we eagerly need, so these threads won't get 2351 * blocked too long due to the recovery. 2352 * 2353 * Without this procedure, the faulted destination VM threads (waiting for 2354 * page requests right before the postcopy is interrupted) can keep hanging 2355 * until the pages are sent by the source during the background copying of 2356 * pages, or another thread faulted on the same address accidentally. 2357 */ 2358 migrate_send_rp_req_pages_pending(mis); 2359 2360 /* 2361 * It's time to switch state and release the fault thread to continue 2362 * service page faults. Note that this should be explicitly after the 2363 * above call to migrate_send_rp_req_pages_pending(). In short: 2364 * migrate_send_rp_message_req_pages() is not thread safe, yet. 2365 */ 2366 qemu_sem_post(&mis->postcopy_pause_sem_fault); 2367 2368 if (migrate_postcopy_preempt()) { 2369 /* 2370 * The preempt channel will be created in async manner, now let's 2371 * wait for it and make sure it's created. 2372 */ 2373 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 2374 assert(mis->postcopy_qemufile_dst); 2375 /* Kick the fast ram load thread too */ 2376 qemu_sem_post(&mis->postcopy_pause_sem_fast_load); 2377 } 2378 2379 return 0; 2380 } 2381 2382 /** 2383 * Immediately following this command is a blob of data containing an embedded 2384 * chunk of migration stream; read it and load it. 2385 * 2386 * @mis: Incoming state 2387 * @length: Length of packaged data to read 2388 * 2389 * Returns: Negative values on error 2390 * 2391 */ 2392 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) 2393 { 2394 int ret; 2395 size_t length; 2396 QIOChannelBuffer *bioc; 2397 2398 length = qemu_get_be32(mis->from_src_file); 2399 trace_loadvm_handle_cmd_packaged(length); 2400 2401 if (length > MAX_VM_CMD_PACKAGED_SIZE) { 2402 error_report("Unreasonably large packaged state: %zu", length); 2403 return -1; 2404 } 2405 2406 bioc = qio_channel_buffer_new(length); 2407 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer"); 2408 ret = qemu_get_buffer(mis->from_src_file, 2409 bioc->data, 2410 length); 2411 if (ret != length) { 2412 object_unref(OBJECT(bioc)); 2413 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", 2414 ret, length); 2415 return (ret < 0) ? ret : -EAGAIN; 2416 } 2417 bioc->usage += length; 2418 trace_loadvm_handle_cmd_packaged_received(ret); 2419 2420 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); 2421 2422 /* 2423 * Before loading the guest states, ensure that the preempt channel has 2424 * been ready to use, as some of the states (e.g. via virtio_load) might 2425 * trigger page faults that will be handled through the preempt channel. 2426 * So yield to the main thread in the case that the channel create event 2427 * hasn't been dispatched. 2428 * 2429 * TODO: if we can move migration loadvm out of main thread, then we 2430 * won't block main thread from polling the accept() fds. We can drop 2431 * this as a whole when that is done. 2432 */ 2433 do { 2434 if (!migrate_postcopy_preempt() || !qemu_in_coroutine() || 2435 mis->postcopy_qemufile_dst) { 2436 break; 2437 } 2438 2439 aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self()); 2440 qemu_coroutine_yield(); 2441 } while (1); 2442 2443 ret = qemu_loadvm_state_main(packf, mis); 2444 trace_loadvm_handle_cmd_packaged_main(ret); 2445 qemu_fclose(packf); 2446 object_unref(OBJECT(bioc)); 2447 2448 return ret; 2449 } 2450 2451 /* 2452 * Handle request that source requests for recved_bitmap on 2453 * destination. Payload format: 2454 * 2455 * len (1 byte) + ramblock_name (<255 bytes) 2456 */ 2457 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, 2458 uint16_t len) 2459 { 2460 QEMUFile *file = mis->from_src_file; 2461 RAMBlock *rb; 2462 char block_name[256]; 2463 size_t cnt; 2464 2465 cnt = qemu_get_counted_string(file, block_name); 2466 if (!cnt) { 2467 error_report("%s: failed to read block name", __func__); 2468 return -EINVAL; 2469 } 2470 2471 /* Validate before using the data */ 2472 if (qemu_file_get_error(file)) { 2473 return qemu_file_get_error(file); 2474 } 2475 2476 if (len != cnt + 1) { 2477 error_report("%s: invalid payload length (%d)", __func__, len); 2478 return -EINVAL; 2479 } 2480 2481 rb = qemu_ram_block_by_name(block_name); 2482 if (!rb) { 2483 error_report("%s: block '%s' not found", __func__, block_name); 2484 return -EINVAL; 2485 } 2486 2487 migrate_send_rp_recv_bitmap(mis, block_name); 2488 2489 trace_loadvm_handle_recv_bitmap(block_name); 2490 2491 return 0; 2492 } 2493 2494 static int loadvm_process_enable_colo(MigrationIncomingState *mis) 2495 { 2496 int ret = migration_incoming_enable_colo(); 2497 2498 if (!ret) { 2499 ret = colo_init_ram_cache(); 2500 if (ret) { 2501 migration_incoming_disable_colo(); 2502 } 2503 } 2504 return ret; 2505 } 2506 2507 static int loadvm_postcopy_handle_switchover_start(void) 2508 { 2509 SaveStateEntry *se; 2510 2511 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2512 int ret; 2513 2514 if (!se->ops || !se->ops->switchover_start) { 2515 continue; 2516 } 2517 2518 ret = se->ops->switchover_start(se->opaque); 2519 if (ret < 0) { 2520 return ret; 2521 } 2522 } 2523 2524 return 0; 2525 } 2526 2527 /* 2528 * Process an incoming 'QEMU_VM_COMMAND' 2529 * 0 just a normal return 2530 * LOADVM_QUIT All good, but exit the loop 2531 * <0 Error 2532 */ 2533 static int loadvm_process_command(QEMUFile *f) 2534 { 2535 MigrationIncomingState *mis = migration_incoming_get_current(); 2536 uint16_t cmd; 2537 uint16_t len; 2538 uint32_t tmp32; 2539 2540 cmd = qemu_get_be16(f); 2541 len = qemu_get_be16(f); 2542 2543 /* Check validity before continue processing of cmds */ 2544 if (qemu_file_get_error(f)) { 2545 return qemu_file_get_error(f); 2546 } 2547 2548 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { 2549 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); 2550 return -EINVAL; 2551 } 2552 2553 trace_loadvm_process_command(mig_cmd_args[cmd].name, len); 2554 2555 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { 2556 error_report("%s received with bad length - expecting %zu, got %d", 2557 mig_cmd_args[cmd].name, 2558 (size_t)mig_cmd_args[cmd].len, len); 2559 return -ERANGE; 2560 } 2561 2562 switch (cmd) { 2563 case MIG_CMD_OPEN_RETURN_PATH: 2564 if (mis->to_src_file) { 2565 error_report("CMD_OPEN_RETURN_PATH called when RP already open"); 2566 /* Not really a problem, so don't give up */ 2567 return 0; 2568 } 2569 mis->to_src_file = qemu_file_get_return_path(f); 2570 if (!mis->to_src_file) { 2571 error_report("CMD_OPEN_RETURN_PATH failed"); 2572 return -1; 2573 } 2574 2575 /* 2576 * Switchover ack is enabled but no device uses it, so send an ACK to 2577 * source that it's OK to switchover. Do it here, after return path has 2578 * been created. 2579 */ 2580 if (migrate_switchover_ack() && !mis->switchover_ack_pending_num) { 2581 int ret = migrate_send_rp_switchover_ack(mis); 2582 if (ret) { 2583 error_report( 2584 "Could not send switchover ack RP MSG, err %d (%s)", ret, 2585 strerror(-ret)); 2586 return ret; 2587 } 2588 } 2589 break; 2590 2591 case MIG_CMD_PING: 2592 tmp32 = qemu_get_be32(f); 2593 trace_loadvm_process_command_ping(tmp32); 2594 if (!mis->to_src_file) { 2595 error_report("CMD_PING (0x%x) received with no return path", 2596 tmp32); 2597 return -1; 2598 } 2599 migrate_send_rp_pong(mis, tmp32); 2600 break; 2601 2602 case MIG_CMD_PACKAGED: 2603 return loadvm_handle_cmd_packaged(mis); 2604 2605 case MIG_CMD_POSTCOPY_ADVISE: 2606 return loadvm_postcopy_handle_advise(mis, len); 2607 2608 case MIG_CMD_POSTCOPY_LISTEN: 2609 return loadvm_postcopy_handle_listen(mis); 2610 2611 case MIG_CMD_POSTCOPY_RUN: 2612 return loadvm_postcopy_handle_run(mis); 2613 2614 case MIG_CMD_POSTCOPY_RAM_DISCARD: 2615 return loadvm_postcopy_ram_handle_discard(mis, len); 2616 2617 case MIG_CMD_POSTCOPY_RESUME: 2618 return loadvm_postcopy_handle_resume(mis); 2619 2620 case MIG_CMD_RECV_BITMAP: 2621 return loadvm_handle_recv_bitmap(mis, len); 2622 2623 case MIG_CMD_ENABLE_COLO: 2624 return loadvm_process_enable_colo(mis); 2625 2626 case MIG_CMD_SWITCHOVER_START: 2627 return loadvm_postcopy_handle_switchover_start(); 2628 } 2629 2630 return 0; 2631 } 2632 2633 /* 2634 * Read a footer off the wire and check that it matches the expected section 2635 * 2636 * Returns: true if the footer was good 2637 * false if there is a problem (and calls error_report to say why) 2638 */ 2639 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) 2640 { 2641 int ret; 2642 uint8_t read_mark; 2643 uint32_t read_section_id; 2644 2645 if (!migrate_get_current()->send_section_footer) { 2646 /* No footer to check */ 2647 return true; 2648 } 2649 2650 read_mark = qemu_get_byte(f); 2651 2652 ret = qemu_file_get_error(f); 2653 if (ret) { 2654 error_report("%s: Read section footer failed: %d", 2655 __func__, ret); 2656 return false; 2657 } 2658 2659 if (read_mark != QEMU_VM_SECTION_FOOTER) { 2660 error_report("Missing section footer for %s", se->idstr); 2661 return false; 2662 } 2663 2664 read_section_id = qemu_get_be32(f); 2665 if (read_section_id != se->load_section_id) { 2666 error_report("Mismatched section id in footer for %s -" 2667 " read 0x%x expected 0x%x", 2668 se->idstr, read_section_id, se->load_section_id); 2669 return false; 2670 } 2671 2672 /* All good */ 2673 return true; 2674 } 2675 2676 static int 2677 qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) 2678 { 2679 bool trace_downtime = (type == QEMU_VM_SECTION_FULL); 2680 uint32_t instance_id, version_id, section_id; 2681 int64_t start_ts, end_ts; 2682 SaveStateEntry *se; 2683 char idstr[256]; 2684 int ret; 2685 2686 /* Read section start */ 2687 section_id = qemu_get_be32(f); 2688 if (!qemu_get_counted_string(f, idstr)) { 2689 error_report("Unable to read ID string for section %u", 2690 section_id); 2691 return -EINVAL; 2692 } 2693 instance_id = qemu_get_be32(f); 2694 version_id = qemu_get_be32(f); 2695 2696 ret = qemu_file_get_error(f); 2697 if (ret) { 2698 error_report("%s: Failed to read instance/version ID: %d", 2699 __func__, ret); 2700 return ret; 2701 } 2702 2703 trace_qemu_loadvm_state_section_startfull(section_id, idstr, 2704 instance_id, version_id); 2705 /* Find savevm section */ 2706 se = find_se(idstr, instance_id); 2707 if (se == NULL) { 2708 error_report("Unknown savevm section or instance '%s' %"PRIu32". " 2709 "Make sure that your current VM setup matches your " 2710 "saved VM setup, including any hotplugged devices", 2711 idstr, instance_id); 2712 return -EINVAL; 2713 } 2714 2715 /* Validate version */ 2716 if (version_id > se->version_id) { 2717 error_report("savevm: unsupported version %d for '%s' v%d", 2718 version_id, idstr, se->version_id); 2719 return -EINVAL; 2720 } 2721 se->load_version_id = version_id; 2722 se->load_section_id = section_id; 2723 2724 /* Validate if it is a device's state */ 2725 if (xen_enabled() && se->is_ram) { 2726 error_report("loadvm: %s RAM loading not allowed on Xen", idstr); 2727 return -EINVAL; 2728 } 2729 2730 if (trace_downtime) { 2731 start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2732 } 2733 2734 ret = vmstate_load(f, se); 2735 if (ret < 0) { 2736 error_report("error while loading state for instance 0x%"PRIx32" of" 2737 " device '%s'", instance_id, idstr); 2738 return ret; 2739 } 2740 2741 if (trace_downtime) { 2742 end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2743 trace_vmstate_downtime_load("non-iterable", se->idstr, 2744 se->instance_id, end_ts - start_ts); 2745 } 2746 2747 if (!check_section_footer(f, se)) { 2748 return -EINVAL; 2749 } 2750 2751 return 0; 2752 } 2753 2754 static int 2755 qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) 2756 { 2757 bool trace_downtime = (type == QEMU_VM_SECTION_END); 2758 int64_t start_ts, end_ts; 2759 uint32_t section_id; 2760 SaveStateEntry *se; 2761 int ret; 2762 2763 section_id = qemu_get_be32(f); 2764 2765 ret = qemu_file_get_error(f); 2766 if (ret) { 2767 error_report("%s: Failed to read section ID: %d", 2768 __func__, ret); 2769 return ret; 2770 } 2771 2772 trace_qemu_loadvm_state_section_partend(section_id); 2773 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2774 if (se->load_section_id == section_id) { 2775 break; 2776 } 2777 } 2778 if (se == NULL) { 2779 error_report("Unknown savevm section %d", section_id); 2780 return -EINVAL; 2781 } 2782 2783 if (trace_downtime) { 2784 start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2785 } 2786 2787 ret = vmstate_load(f, se); 2788 if (ret < 0) { 2789 error_report("error while loading state section id %d(%s)", 2790 section_id, se->idstr); 2791 return ret; 2792 } 2793 2794 if (trace_downtime) { 2795 end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2796 trace_vmstate_downtime_load("iterable", se->idstr, 2797 se->instance_id, end_ts - start_ts); 2798 } 2799 2800 if (!check_section_footer(f, se)) { 2801 return -EINVAL; 2802 } 2803 2804 return 0; 2805 } 2806 2807 static int qemu_loadvm_state_header(QEMUFile *f) 2808 { 2809 unsigned int v; 2810 int ret; 2811 2812 v = qemu_get_be32(f); 2813 if (v != QEMU_VM_FILE_MAGIC) { 2814 error_report("Not a migration stream"); 2815 return -EINVAL; 2816 } 2817 2818 v = qemu_get_be32(f); 2819 if (v == QEMU_VM_FILE_VERSION_COMPAT) { 2820 error_report("SaveVM v2 format is obsolete and don't work anymore"); 2821 return -ENOTSUP; 2822 } 2823 if (v != QEMU_VM_FILE_VERSION) { 2824 error_report("Unsupported migration stream version"); 2825 return -ENOTSUP; 2826 } 2827 2828 if (migrate_get_current()->send_configuration) { 2829 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { 2830 error_report("Configuration section missing"); 2831 return -EINVAL; 2832 } 2833 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); 2834 2835 if (ret) { 2836 return ret; 2837 } 2838 } 2839 return 0; 2840 } 2841 2842 static void qemu_loadvm_state_switchover_ack_needed(MigrationIncomingState *mis) 2843 { 2844 SaveStateEntry *se; 2845 2846 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2847 if (!se->ops || !se->ops->switchover_ack_needed) { 2848 continue; 2849 } 2850 2851 if (se->ops->switchover_ack_needed(se->opaque)) { 2852 mis->switchover_ack_pending_num++; 2853 } 2854 } 2855 2856 trace_loadvm_state_switchover_ack_needed(mis->switchover_ack_pending_num); 2857 } 2858 2859 static int qemu_loadvm_state_setup(QEMUFile *f, Error **errp) 2860 { 2861 ERRP_GUARD(); 2862 SaveStateEntry *se; 2863 int ret; 2864 2865 trace_loadvm_state_setup(); 2866 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2867 if (!se->ops || !se->ops->load_setup) { 2868 continue; 2869 } 2870 if (se->ops->is_active) { 2871 if (!se->ops->is_active(se->opaque)) { 2872 continue; 2873 } 2874 } 2875 2876 ret = se->ops->load_setup(f, se->opaque, errp); 2877 if (ret < 0) { 2878 error_prepend(errp, "Load state of device %s failed: ", 2879 se->idstr); 2880 qemu_file_set_error(f, ret); 2881 return ret; 2882 } 2883 } 2884 return 0; 2885 } 2886 2887 struct LoadThreadData { 2888 MigrationLoadThread function; 2889 void *opaque; 2890 }; 2891 2892 static int qemu_loadvm_load_thread(void *thread_opaque) 2893 { 2894 struct LoadThreadData *data = thread_opaque; 2895 MigrationIncomingState *mis = migration_incoming_get_current(); 2896 g_autoptr(Error) local_err = NULL; 2897 2898 if (!data->function(data->opaque, &mis->load_threads_abort, &local_err)) { 2899 MigrationState *s = migrate_get_current(); 2900 2901 /* 2902 * Can't set load_threads_abort here since processing of main migration 2903 * channel data could still be happening, resulting in launching of new 2904 * load threads. 2905 */ 2906 2907 assert(local_err); 2908 2909 /* 2910 * In case of multiple load threads failing which thread error 2911 * return we end setting is purely arbitrary. 2912 */ 2913 migrate_set_error(s, local_err); 2914 } 2915 2916 return 0; 2917 } 2918 2919 void qemu_loadvm_start_load_thread(MigrationLoadThread function, 2920 void *opaque) 2921 { 2922 MigrationIncomingState *mis = migration_incoming_get_current(); 2923 struct LoadThreadData *data; 2924 2925 /* We only set it from this thread so it's okay to read it directly */ 2926 assert(!mis->load_threads_abort); 2927 2928 data = g_new(struct LoadThreadData, 1); 2929 data->function = function; 2930 data->opaque = opaque; 2931 2932 thread_pool_submit_immediate(mis->load_threads, qemu_loadvm_load_thread, 2933 data, g_free); 2934 } 2935 2936 void qemu_loadvm_state_cleanup(MigrationIncomingState *mis) 2937 { 2938 SaveStateEntry *se; 2939 2940 trace_loadvm_state_cleanup(); 2941 2942 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2943 if (se->ops && se->ops->load_cleanup) { 2944 se->ops->load_cleanup(se->opaque); 2945 } 2946 } 2947 2948 qemu_loadvm_thread_pool_destroy(mis); 2949 } 2950 2951 /* Return true if we should continue the migration, or false. */ 2952 static bool postcopy_pause_incoming(MigrationIncomingState *mis) 2953 { 2954 int i; 2955 2956 trace_postcopy_pause_incoming(); 2957 2958 assert(migrate_postcopy_ram()); 2959 2960 /* 2961 * Unregister yank with either from/to src would work, since ioc behind it 2962 * is the same 2963 */ 2964 migration_ioc_unregister_yank_from_file(mis->from_src_file); 2965 2966 assert(mis->from_src_file); 2967 qemu_file_shutdown(mis->from_src_file); 2968 qemu_fclose(mis->from_src_file); 2969 mis->from_src_file = NULL; 2970 2971 assert(mis->to_src_file); 2972 qemu_file_shutdown(mis->to_src_file); 2973 qemu_mutex_lock(&mis->rp_mutex); 2974 qemu_fclose(mis->to_src_file); 2975 mis->to_src_file = NULL; 2976 qemu_mutex_unlock(&mis->rp_mutex); 2977 2978 /* 2979 * NOTE: this must happen before reset the PostcopyTmpPages below, 2980 * otherwise it's racy to reset those fields when the fast load thread 2981 * can be accessing it in parallel. 2982 */ 2983 if (mis->postcopy_qemufile_dst) { 2984 qemu_file_shutdown(mis->postcopy_qemufile_dst); 2985 /* Take the mutex to make sure the fast ram load thread halted */ 2986 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 2987 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 2988 qemu_fclose(mis->postcopy_qemufile_dst); 2989 mis->postcopy_qemufile_dst = NULL; 2990 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 2991 } 2992 2993 /* Current state can be either ACTIVE or RECOVER */ 2994 migrate_set_state(&mis->state, mis->state, 2995 MIGRATION_STATUS_POSTCOPY_PAUSED); 2996 2997 /* Notify the fault thread for the invalidated file handle */ 2998 postcopy_fault_thread_notify(mis); 2999 3000 /* 3001 * If network is interrupted, any temp page we received will be useless 3002 * because we didn't mark them as "received" in receivedmap. After a 3003 * proper recovery later (which will sync src dirty bitmap with receivedmap 3004 * on dest) these cached small pages will be resent again. 3005 */ 3006 for (i = 0; i < mis->postcopy_channels; i++) { 3007 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); 3008 } 3009 3010 error_report("Detected IO failure for postcopy. " 3011 "Migration paused."); 3012 3013 do { 3014 qemu_sem_wait(&mis->postcopy_pause_sem_dst); 3015 } while (postcopy_is_paused(mis->state)); 3016 3017 trace_postcopy_pause_incoming_continued(); 3018 3019 return true; 3020 } 3021 3022 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) 3023 { 3024 uint8_t section_type; 3025 int ret = 0; 3026 3027 retry: 3028 while (true) { 3029 section_type = qemu_get_byte(f); 3030 3031 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); 3032 if (ret) { 3033 break; 3034 } 3035 3036 trace_qemu_loadvm_state_section(section_type); 3037 switch (section_type) { 3038 case QEMU_VM_SECTION_START: 3039 case QEMU_VM_SECTION_FULL: 3040 ret = qemu_loadvm_section_start_full(f, section_type); 3041 if (ret < 0) { 3042 goto out; 3043 } 3044 break; 3045 case QEMU_VM_SECTION_PART: 3046 case QEMU_VM_SECTION_END: 3047 ret = qemu_loadvm_section_part_end(f, section_type); 3048 if (ret < 0) { 3049 goto out; 3050 } 3051 break; 3052 case QEMU_VM_COMMAND: 3053 ret = loadvm_process_command(f); 3054 trace_qemu_loadvm_state_section_command(ret); 3055 if ((ret < 0) || (ret == LOADVM_QUIT)) { 3056 goto out; 3057 } 3058 break; 3059 case QEMU_VM_EOF: 3060 /* This is the end of migration */ 3061 goto out; 3062 default: 3063 error_report("Unknown savevm section type %d", section_type); 3064 ret = -EINVAL; 3065 goto out; 3066 } 3067 } 3068 3069 out: 3070 if (ret < 0) { 3071 qemu_file_set_error(f, ret); 3072 3073 /* Cancel bitmaps incoming regardless of recovery */ 3074 dirty_bitmap_mig_cancel_incoming(); 3075 3076 /* 3077 * If we are during an active postcopy, then we pause instead 3078 * of bail out to at least keep the VM's dirty data. Note 3079 * that POSTCOPY_INCOMING_LISTENING stage is still not enough, 3080 * during which we're still receiving device states and we 3081 * still haven't yet started the VM on destination. 3082 * 3083 * Only RAM postcopy supports recovery. Still, if RAM postcopy is 3084 * enabled, canceled bitmaps postcopy will not affect RAM postcopy 3085 * recovering. 3086 */ 3087 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 3088 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { 3089 /* Reset f to point to the newly created channel */ 3090 f = mis->from_src_file; 3091 goto retry; 3092 } 3093 } 3094 return ret; 3095 } 3096 3097 int qemu_loadvm_state(QEMUFile *f) 3098 { 3099 MigrationState *s = migrate_get_current(); 3100 MigrationIncomingState *mis = migration_incoming_get_current(); 3101 Error *local_err = NULL; 3102 int ret; 3103 3104 if (qemu_savevm_state_blocked(&local_err)) { 3105 error_report_err(local_err); 3106 return -EINVAL; 3107 } 3108 3109 qemu_loadvm_thread_pool_create(mis); 3110 3111 ret = qemu_loadvm_state_header(f); 3112 if (ret) { 3113 return ret; 3114 } 3115 3116 if (qemu_loadvm_state_setup(f, &local_err) != 0) { 3117 error_report_err(local_err); 3118 return -EINVAL; 3119 } 3120 3121 if (migrate_switchover_ack()) { 3122 qemu_loadvm_state_switchover_ack_needed(mis); 3123 } 3124 3125 cpu_synchronize_all_pre_loadvm(); 3126 3127 ret = qemu_loadvm_state_main(f, mis); 3128 qemu_event_set(&mis->main_thread_load_event); 3129 3130 trace_qemu_loadvm_state_post_main(ret); 3131 3132 if (mis->have_listen_thread) { 3133 /* 3134 * Postcopy listen thread still going, don't synchronize the 3135 * cpus yet. 3136 */ 3137 return ret; 3138 } 3139 3140 /* When reaching here, it must be precopy */ 3141 if (ret == 0) { 3142 if (migrate_has_error(migrate_get_current()) || 3143 !qemu_loadvm_thread_pool_wait(s, mis)) { 3144 ret = -EINVAL; 3145 } else { 3146 ret = qemu_file_get_error(f); 3147 } 3148 } 3149 /* 3150 * Set this flag unconditionally so we'll catch further attempts to 3151 * start additional threads via an appropriate assert() 3152 */ 3153 qatomic_set(&mis->load_threads_abort, true); 3154 3155 /* 3156 * Try to read in the VMDESC section as well, so that dumping tools that 3157 * intercept our migration stream have the chance to see it. 3158 */ 3159 3160 /* We've got to be careful; if we don't read the data and just shut the fd 3161 * then the sender can error if we close while it's still sending. 3162 * We also mustn't read data that isn't there; some transports (RDMA) 3163 * will stall waiting for that data when the source has already closed. 3164 */ 3165 if (ret == 0 && should_send_vmdesc()) { 3166 uint8_t *buf; 3167 uint32_t size; 3168 uint8_t section_type = qemu_get_byte(f); 3169 3170 if (section_type != QEMU_VM_VMDESCRIPTION) { 3171 error_report("Expected vmdescription section, but got %d", 3172 section_type); 3173 /* 3174 * It doesn't seem worth failing at this point since 3175 * we apparently have an otherwise valid VM state 3176 */ 3177 } else { 3178 buf = g_malloc(0x1000); 3179 size = qemu_get_be32(f); 3180 3181 while (size > 0) { 3182 uint32_t read_chunk = MIN(size, 0x1000); 3183 qemu_get_buffer(f, buf, read_chunk); 3184 size -= read_chunk; 3185 } 3186 g_free(buf); 3187 } 3188 } 3189 3190 cpu_synchronize_all_post_init(); 3191 3192 return ret; 3193 } 3194 3195 int qemu_load_device_state(QEMUFile *f) 3196 { 3197 MigrationIncomingState *mis = migration_incoming_get_current(); 3198 int ret; 3199 3200 /* Load QEMU_VM_SECTION_FULL section */ 3201 ret = qemu_loadvm_state_main(f, mis); 3202 if (ret < 0) { 3203 error_report("Failed to load device state: %d", ret); 3204 return ret; 3205 } 3206 3207 cpu_synchronize_all_post_init(); 3208 return 0; 3209 } 3210 3211 int qemu_loadvm_approve_switchover(void) 3212 { 3213 MigrationIncomingState *mis = migration_incoming_get_current(); 3214 3215 if (!mis->switchover_ack_pending_num) { 3216 return -EINVAL; 3217 } 3218 3219 mis->switchover_ack_pending_num--; 3220 trace_loadvm_approve_switchover(mis->switchover_ack_pending_num); 3221 3222 if (mis->switchover_ack_pending_num) { 3223 return 0; 3224 } 3225 3226 return migrate_send_rp_switchover_ack(mis); 3227 } 3228 3229 bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, 3230 char *buf, size_t len, Error **errp) 3231 { 3232 SaveStateEntry *se; 3233 3234 se = find_se(idstr, instance_id); 3235 if (!se) { 3236 error_setg(errp, 3237 "Unknown idstr %s or instance id %u for load state buffer", 3238 idstr, instance_id); 3239 return false; 3240 } 3241 3242 if (!se->ops || !se->ops->load_state_buffer) { 3243 error_setg(errp, 3244 "idstr %s / instance %u has no load state buffer operation", 3245 idstr, instance_id); 3246 return false; 3247 } 3248 3249 return se->ops->load_state_buffer(se->opaque, buf, len, errp); 3250 } 3251 3252 bool save_snapshot(const char *name, bool overwrite, const char *vmstate, 3253 bool has_devices, strList *devices, Error **errp) 3254 { 3255 BlockDriverState *bs; 3256 QEMUSnapshotInfo sn1, *sn = &sn1; 3257 int ret = -1, ret2; 3258 QEMUFile *f; 3259 RunState saved_state = runstate_get(); 3260 uint64_t vm_state_size; 3261 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 3262 3263 GLOBAL_STATE_CODE(); 3264 3265 if (migration_is_blocked(errp)) { 3266 return false; 3267 } 3268 3269 if (!replay_can_snapshot()) { 3270 error_setg(errp, "Record/replay does not allow making snapshot " 3271 "right now. Try once more later."); 3272 return false; 3273 } 3274 3275 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3276 return false; 3277 } 3278 3279 /* Delete old snapshots of the same name */ 3280 if (name) { 3281 if (overwrite) { 3282 if (bdrv_all_delete_snapshot(name, has_devices, 3283 devices, errp) < 0) { 3284 return false; 3285 } 3286 } else { 3287 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3288 if (ret2 < 0) { 3289 return false; 3290 } 3291 if (ret2 == 1) { 3292 error_setg(errp, 3293 "Snapshot '%s' already exists in one or more devices", 3294 name); 3295 return false; 3296 } 3297 } 3298 } 3299 3300 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3301 if (bs == NULL) { 3302 return false; 3303 } 3304 3305 global_state_store(); 3306 vm_stop(RUN_STATE_SAVE_VM); 3307 3308 bdrv_drain_all_begin(); 3309 3310 memset(sn, 0, sizeof(*sn)); 3311 3312 /* fill auxiliary fields */ 3313 sn->date_sec = g_date_time_to_unix(now); 3314 sn->date_nsec = g_date_time_get_microsecond(now) * 1000; 3315 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 3316 if (replay_mode != REPLAY_MODE_NONE) { 3317 sn->icount = replay_get_current_icount(); 3318 } else { 3319 sn->icount = -1ULL; 3320 } 3321 3322 if (name) { 3323 pstrcpy(sn->name, sizeof(sn->name), name); 3324 } else { 3325 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S"); 3326 pstrcpy(sn->name, sizeof(sn->name), autoname); 3327 } 3328 3329 /* save the VM state */ 3330 f = qemu_fopen_bdrv(bs, 1); 3331 if (!f) { 3332 error_setg(errp, "Could not open VM state file"); 3333 goto the_end; 3334 } 3335 ret = qemu_savevm_state(f, errp); 3336 vm_state_size = qemu_file_transferred(f); 3337 ret2 = qemu_fclose(f); 3338 if (ret < 0) { 3339 goto the_end; 3340 } 3341 if (ret2 < 0) { 3342 ret = ret2; 3343 goto the_end; 3344 } 3345 3346 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, 3347 has_devices, devices, errp); 3348 if (ret < 0) { 3349 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); 3350 goto the_end; 3351 } 3352 3353 ret = 0; 3354 3355 the_end: 3356 bdrv_drain_all_end(); 3357 3358 vm_resume(saved_state); 3359 return ret == 0; 3360 } 3361 3362 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, 3363 Error **errp) 3364 { 3365 QEMUFile *f; 3366 QIOChannelFile *ioc; 3367 int saved_vm_running; 3368 int ret; 3369 3370 if (!has_live) { 3371 /* live default to true so old version of Xen tool stack can have a 3372 * successful live migration */ 3373 live = true; 3374 } 3375 3376 saved_vm_running = runstate_is_running(); 3377 vm_stop(RUN_STATE_SAVE_VM); 3378 global_state_store_running(); 3379 3380 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC, 3381 0660, errp); 3382 if (!ioc) { 3383 goto the_end; 3384 } 3385 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); 3386 f = qemu_file_new_output(QIO_CHANNEL(ioc)); 3387 object_unref(OBJECT(ioc)); 3388 ret = qemu_save_device_state(f); 3389 if (ret < 0 || qemu_fclose(f) < 0) { 3390 error_setg(errp, "saving Xen device state failed"); 3391 } else { 3392 /* libxl calls the QMP command "stop" before calling 3393 * "xen-save-devices-state" and in case of migration failure, libxl 3394 * would call "cont". 3395 * So call bdrv_inactivate_all (release locks) here to let the other 3396 * side of the migration take control of the images. 3397 */ 3398 if (live && !saved_vm_running) { 3399 migration_block_inactivate(); 3400 } 3401 } 3402 3403 the_end: 3404 if (saved_vm_running) { 3405 vm_start(); 3406 } 3407 } 3408 3409 void qmp_xen_load_devices_state(const char *filename, Error **errp) 3410 { 3411 QEMUFile *f; 3412 QIOChannelFile *ioc; 3413 int ret; 3414 3415 /* Guest must be paused before loading the device state; the RAM state 3416 * will already have been loaded by xc 3417 */ 3418 if (runstate_is_running()) { 3419 error_setg(errp, "Cannot update device state while vm is running"); 3420 return; 3421 } 3422 vm_stop(RUN_STATE_RESTORE_VM); 3423 3424 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp); 3425 if (!ioc) { 3426 return; 3427 } 3428 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); 3429 f = qemu_file_new_input(QIO_CHANNEL(ioc)); 3430 object_unref(OBJECT(ioc)); 3431 3432 ret = qemu_loadvm_state(f); 3433 qemu_fclose(f); 3434 if (ret < 0) { 3435 error_setg(errp, "loading Xen device state failed"); 3436 } 3437 migration_incoming_state_destroy(); 3438 } 3439 3440 bool load_snapshot(const char *name, const char *vmstate, 3441 bool has_devices, strList *devices, Error **errp) 3442 { 3443 BlockDriverState *bs_vm_state; 3444 QEMUSnapshotInfo sn; 3445 QEMUFile *f; 3446 int ret; 3447 MigrationIncomingState *mis = migration_incoming_get_current(); 3448 3449 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3450 return false; 3451 } 3452 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3453 if (ret < 0) { 3454 return false; 3455 } 3456 if (ret == 0) { 3457 error_setg(errp, "Snapshot '%s' does not exist in one or more devices", 3458 name); 3459 return false; 3460 } 3461 3462 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3463 if (!bs_vm_state) { 3464 return false; 3465 } 3466 3467 /* Don't even try to load empty VM states */ 3468 ret = bdrv_snapshot_find(bs_vm_state, &sn, name); 3469 if (ret < 0) { 3470 error_setg(errp, "Snapshot can not be found"); 3471 return false; 3472 } else if (sn.vm_state_size == 0) { 3473 error_setg(errp, "This is a disk-only snapshot. Revert to it " 3474 " offline using qemu-img"); 3475 return false; 3476 } 3477 3478 /* 3479 * Flush the record/replay queue. Now the VM state is going 3480 * to change. Therefore we don't need to preserve its consistency 3481 */ 3482 replay_flush_events(); 3483 3484 /* Flush all IO requests so they don't interfere with the new state. */ 3485 bdrv_drain_all_begin(); 3486 3487 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); 3488 if (ret < 0) { 3489 goto err_drain; 3490 } 3491 3492 /* restore the VM state */ 3493 f = qemu_fopen_bdrv(bs_vm_state, 0); 3494 if (!f) { 3495 error_setg(errp, "Could not open VM state file"); 3496 goto err_drain; 3497 } 3498 3499 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); 3500 mis->from_src_file = f; 3501 3502 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 3503 ret = -EINVAL; 3504 goto err_drain; 3505 } 3506 ret = qemu_loadvm_state(f); 3507 migration_incoming_state_destroy(); 3508 3509 bdrv_drain_all_end(); 3510 3511 if (ret < 0) { 3512 error_setg(errp, "Error %d while loading VM state", ret); 3513 return false; 3514 } 3515 3516 return true; 3517 3518 err_drain: 3519 bdrv_drain_all_end(); 3520 return false; 3521 } 3522 3523 void load_snapshot_resume(RunState state) 3524 { 3525 vm_resume(state); 3526 if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) { 3527 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort); 3528 } 3529 } 3530 3531 bool delete_snapshot(const char *name, bool has_devices, 3532 strList *devices, Error **errp) 3533 { 3534 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3535 return false; 3536 } 3537 3538 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { 3539 return false; 3540 } 3541 3542 return true; 3543 } 3544 3545 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) 3546 { 3547 qemu_ram_set_idstr(mr->ram_block, 3548 memory_region_name(mr), dev); 3549 qemu_ram_set_migratable(mr->ram_block); 3550 ram_block_add_cpr_blocker(mr->ram_block, &error_fatal); 3551 } 3552 3553 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) 3554 { 3555 qemu_ram_unset_idstr(mr->ram_block); 3556 qemu_ram_unset_migratable(mr->ram_block); 3557 ram_block_del_cpr_blocker(mr->ram_block); 3558 } 3559 3560 void vmstate_register_ram_global(MemoryRegion *mr) 3561 { 3562 vmstate_register_ram(mr, NULL); 3563 } 3564 3565 bool vmstate_check_only_migratable(const VMStateDescription *vmsd) 3566 { 3567 /* check needed if --only-migratable is specified */ 3568 if (!only_migratable) { 3569 return true; 3570 } 3571 3572 return !(vmsd && vmsd->unmigratable); 3573 } 3574 3575 typedef struct SnapshotJob { 3576 Job common; 3577 char *tag; 3578 char *vmstate; 3579 strList *devices; 3580 Coroutine *co; 3581 Error **errp; 3582 bool ret; 3583 } SnapshotJob; 3584 3585 static void qmp_snapshot_job_free(SnapshotJob *s) 3586 { 3587 g_free(s->tag); 3588 g_free(s->vmstate); 3589 qapi_free_strList(s->devices); 3590 } 3591 3592 3593 static void snapshot_load_job_bh(void *opaque) 3594 { 3595 Job *job = opaque; 3596 SnapshotJob *s = container_of(job, SnapshotJob, common); 3597 RunState orig_state = runstate_get(); 3598 3599 job_progress_set_remaining(&s->common, 1); 3600 3601 vm_stop(RUN_STATE_RESTORE_VM); 3602 3603 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); 3604 if (s->ret) { 3605 load_snapshot_resume(orig_state); 3606 } 3607 3608 job_progress_update(&s->common, 1); 3609 3610 qmp_snapshot_job_free(s); 3611 aio_co_wake(s->co); 3612 } 3613 3614 static void snapshot_save_job_bh(void *opaque) 3615 { 3616 Job *job = opaque; 3617 SnapshotJob *s = container_of(job, SnapshotJob, common); 3618 3619 job_progress_set_remaining(&s->common, 1); 3620 s->ret = save_snapshot(s->tag, false, s->vmstate, 3621 true, s->devices, s->errp); 3622 job_progress_update(&s->common, 1); 3623 3624 qmp_snapshot_job_free(s); 3625 aio_co_wake(s->co); 3626 } 3627 3628 static void snapshot_delete_job_bh(void *opaque) 3629 { 3630 Job *job = opaque; 3631 SnapshotJob *s = container_of(job, SnapshotJob, common); 3632 3633 job_progress_set_remaining(&s->common, 1); 3634 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); 3635 job_progress_update(&s->common, 1); 3636 3637 qmp_snapshot_job_free(s); 3638 aio_co_wake(s->co); 3639 } 3640 3641 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) 3642 { 3643 SnapshotJob *s = container_of(job, SnapshotJob, common); 3644 s->errp = errp; 3645 s->co = qemu_coroutine_self(); 3646 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3647 snapshot_save_job_bh, job); 3648 qemu_coroutine_yield(); 3649 return s->ret ? 0 : -1; 3650 } 3651 3652 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) 3653 { 3654 SnapshotJob *s = container_of(job, SnapshotJob, common); 3655 s->errp = errp; 3656 s->co = qemu_coroutine_self(); 3657 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3658 snapshot_load_job_bh, job); 3659 qemu_coroutine_yield(); 3660 return s->ret ? 0 : -1; 3661 } 3662 3663 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) 3664 { 3665 SnapshotJob *s = container_of(job, SnapshotJob, common); 3666 s->errp = errp; 3667 s->co = qemu_coroutine_self(); 3668 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3669 snapshot_delete_job_bh, job); 3670 qemu_coroutine_yield(); 3671 return s->ret ? 0 : -1; 3672 } 3673 3674 3675 static const JobDriver snapshot_load_job_driver = { 3676 .instance_size = sizeof(SnapshotJob), 3677 .job_type = JOB_TYPE_SNAPSHOT_LOAD, 3678 .run = snapshot_load_job_run, 3679 }; 3680 3681 static const JobDriver snapshot_save_job_driver = { 3682 .instance_size = sizeof(SnapshotJob), 3683 .job_type = JOB_TYPE_SNAPSHOT_SAVE, 3684 .run = snapshot_save_job_run, 3685 }; 3686 3687 static const JobDriver snapshot_delete_job_driver = { 3688 .instance_size = sizeof(SnapshotJob), 3689 .job_type = JOB_TYPE_SNAPSHOT_DELETE, 3690 .run = snapshot_delete_job_run, 3691 }; 3692 3693 3694 void qmp_snapshot_save(const char *job_id, 3695 const char *tag, 3696 const char *vmstate, 3697 strList *devices, 3698 Error **errp) 3699 { 3700 SnapshotJob *s; 3701 3702 s = job_create(job_id, &snapshot_save_job_driver, NULL, 3703 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3704 NULL, NULL, errp); 3705 if (!s) { 3706 return; 3707 } 3708 3709 s->tag = g_strdup(tag); 3710 s->vmstate = g_strdup(vmstate); 3711 s->devices = QAPI_CLONE(strList, devices); 3712 3713 job_start(&s->common); 3714 } 3715 3716 void qmp_snapshot_load(const char *job_id, 3717 const char *tag, 3718 const char *vmstate, 3719 strList *devices, 3720 Error **errp) 3721 { 3722 SnapshotJob *s; 3723 3724 s = job_create(job_id, &snapshot_load_job_driver, NULL, 3725 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3726 NULL, NULL, errp); 3727 if (!s) { 3728 return; 3729 } 3730 3731 s->tag = g_strdup(tag); 3732 s->vmstate = g_strdup(vmstate); 3733 s->devices = QAPI_CLONE(strList, devices); 3734 3735 job_start(&s->common); 3736 } 3737 3738 void qmp_snapshot_delete(const char *job_id, 3739 const char *tag, 3740 strList *devices, 3741 Error **errp) 3742 { 3743 SnapshotJob *s; 3744 3745 s = job_create(job_id, &snapshot_delete_job_driver, NULL, 3746 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3747 NULL, NULL, errp); 3748 if (!s) { 3749 return; 3750 } 3751 3752 s->tag = g_strdup(tag); 3753 s->devices = QAPI_CLONE(strList, devices); 3754 3755 job_start(&s->common); 3756 } 3757