1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include <stdint.h> 25 #include <stdarg.h> 26 #include <stdlib.h> 27 #ifndef _WIN32 28 #include <sys/types.h> 29 #include <sys/mman.h> 30 #endif 31 #include "config.h" 32 #include "monitor/monitor.h" 33 #include "sysemu/sysemu.h" 34 #include "qemu/bitops.h" 35 #include "qemu/bitmap.h" 36 #include "sysemu/arch_init.h" 37 #include "audio/audio.h" 38 #include "hw/i386/pc.h" 39 #include "hw/pci/pci.h" 40 #include "hw/audio/audio.h" 41 #include "sysemu/kvm.h" 42 #include "migration/migration.h" 43 #include "hw/i386/smbios.h" 44 #include "exec/address-spaces.h" 45 #include "hw/audio/pcspk.h" 46 #include "migration/page_cache.h" 47 #include "qemu/config-file.h" 48 #include "qemu/error-report.h" 49 #include "qmp-commands.h" 50 #include "trace.h" 51 #include "exec/cpu-all.h" 52 #include "exec/ram_addr.h" 53 #include "hw/acpi/acpi.h" 54 #include "qemu/host-utils.h" 55 56 #ifdef DEBUG_ARCH_INIT 57 #define DPRINTF(fmt, ...) \ 58 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0) 59 #else 60 #define DPRINTF(fmt, ...) \ 61 do { } while (0) 62 #endif 63 64 #ifdef TARGET_SPARC 65 int graphic_width = 1024; 66 int graphic_height = 768; 67 int graphic_depth = 8; 68 #else 69 int graphic_width = 800; 70 int graphic_height = 600; 71 int graphic_depth = 32; 72 #endif 73 74 75 #if defined(TARGET_ALPHA) 76 #define QEMU_ARCH QEMU_ARCH_ALPHA 77 #elif defined(TARGET_ARM) 78 #define QEMU_ARCH QEMU_ARCH_ARM 79 #elif defined(TARGET_CRIS) 80 #define QEMU_ARCH QEMU_ARCH_CRIS 81 #elif defined(TARGET_I386) 82 #define QEMU_ARCH QEMU_ARCH_I386 83 #elif defined(TARGET_M68K) 84 #define QEMU_ARCH QEMU_ARCH_M68K 85 #elif defined(TARGET_LM32) 86 #define QEMU_ARCH QEMU_ARCH_LM32 87 #elif defined(TARGET_MICROBLAZE) 88 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE 89 #elif defined(TARGET_MIPS) 90 #define QEMU_ARCH QEMU_ARCH_MIPS 91 #elif defined(TARGET_MOXIE) 92 #define QEMU_ARCH QEMU_ARCH_MOXIE 93 #elif defined(TARGET_OPENRISC) 94 #define QEMU_ARCH QEMU_ARCH_OPENRISC 95 #elif defined(TARGET_PPC) 96 #define QEMU_ARCH QEMU_ARCH_PPC 97 #elif defined(TARGET_S390X) 98 #define QEMU_ARCH QEMU_ARCH_S390X 99 #elif defined(TARGET_SH4) 100 #define QEMU_ARCH QEMU_ARCH_SH4 101 #elif defined(TARGET_SPARC) 102 #define QEMU_ARCH QEMU_ARCH_SPARC 103 #elif defined(TARGET_XTENSA) 104 #define QEMU_ARCH QEMU_ARCH_XTENSA 105 #elif defined(TARGET_UNICORE32) 106 #define QEMU_ARCH QEMU_ARCH_UNICORE32 107 #elif defined(TARGET_TRICORE) 108 #define QEMU_ARCH QEMU_ARCH_TRICORE 109 #endif 110 111 const uint32_t arch_type = QEMU_ARCH; 112 static bool mig_throttle_on; 113 static int dirty_rate_high_cnt; 114 static void check_guest_throttling(void); 115 116 static uint64_t bitmap_sync_count; 117 118 /***********************************************************/ 119 /* ram save/restore */ 120 121 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ 122 #define RAM_SAVE_FLAG_COMPRESS 0x02 123 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 124 #define RAM_SAVE_FLAG_PAGE 0x08 125 #define RAM_SAVE_FLAG_EOS 0x10 126 #define RAM_SAVE_FLAG_CONTINUE 0x20 127 #define RAM_SAVE_FLAG_XBZRLE 0x40 128 /* 0x80 is reserved in migration.h start with 0x100 next */ 129 130 static struct defconfig_file { 131 const char *filename; 132 /* Indicates it is an user config file (disabled by -no-user-config) */ 133 bool userconfig; 134 } default_config_files[] = { 135 { CONFIG_QEMU_CONFDIR "/qemu.conf", true }, 136 { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true }, 137 { NULL }, /* end of list */ 138 }; 139 140 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; 141 142 int qemu_read_default_config_files(bool userconfig) 143 { 144 int ret; 145 struct defconfig_file *f; 146 147 for (f = default_config_files; f->filename; f++) { 148 if (!userconfig && f->userconfig) { 149 continue; 150 } 151 ret = qemu_read_config_file(f->filename); 152 if (ret < 0 && ret != -ENOENT) { 153 return ret; 154 } 155 } 156 157 return 0; 158 } 159 160 static inline bool is_zero_range(uint8_t *p, uint64_t size) 161 { 162 return buffer_find_nonzero_offset(p, size) == size; 163 } 164 165 /* struct contains XBZRLE cache and a static page 166 used by the compression */ 167 static struct { 168 /* buffer used for XBZRLE encoding */ 169 uint8_t *encoded_buf; 170 /* buffer for storing page content */ 171 uint8_t *current_buf; 172 /* Cache for XBZRLE, Protected by lock. */ 173 PageCache *cache; 174 QemuMutex lock; 175 } XBZRLE; 176 177 /* buffer used for XBZRLE decoding */ 178 static uint8_t *xbzrle_decoded_buf; 179 180 static void XBZRLE_cache_lock(void) 181 { 182 if (migrate_use_xbzrle()) 183 qemu_mutex_lock(&XBZRLE.lock); 184 } 185 186 static void XBZRLE_cache_unlock(void) 187 { 188 if (migrate_use_xbzrle()) 189 qemu_mutex_unlock(&XBZRLE.lock); 190 } 191 192 /* 193 * called from qmp_migrate_set_cache_size in main thread, possibly while 194 * a migration is in progress. 195 * A running migration maybe using the cache and might finish during this 196 * call, hence changes to the cache are protected by XBZRLE.lock(). 197 */ 198 int64_t xbzrle_cache_resize(int64_t new_size) 199 { 200 PageCache *new_cache; 201 int64_t ret; 202 203 if (new_size < TARGET_PAGE_SIZE) { 204 return -1; 205 } 206 207 XBZRLE_cache_lock(); 208 209 if (XBZRLE.cache != NULL) { 210 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { 211 goto out_new_size; 212 } 213 new_cache = cache_init(new_size / TARGET_PAGE_SIZE, 214 TARGET_PAGE_SIZE); 215 if (!new_cache) { 216 error_report("Error creating cache"); 217 ret = -1; 218 goto out; 219 } 220 221 cache_fini(XBZRLE.cache); 222 XBZRLE.cache = new_cache; 223 } 224 225 out_new_size: 226 ret = pow2floor(new_size); 227 out: 228 XBZRLE_cache_unlock(); 229 return ret; 230 } 231 232 /* accounting for migration statistics */ 233 typedef struct AccountingInfo { 234 uint64_t dup_pages; 235 uint64_t skipped_pages; 236 uint64_t norm_pages; 237 uint64_t iterations; 238 uint64_t xbzrle_bytes; 239 uint64_t xbzrle_pages; 240 uint64_t xbzrle_cache_miss; 241 double xbzrle_cache_miss_rate; 242 uint64_t xbzrle_overflows; 243 } AccountingInfo; 244 245 static AccountingInfo acct_info; 246 247 static void acct_clear(void) 248 { 249 memset(&acct_info, 0, sizeof(acct_info)); 250 } 251 252 uint64_t dup_mig_bytes_transferred(void) 253 { 254 return acct_info.dup_pages * TARGET_PAGE_SIZE; 255 } 256 257 uint64_t dup_mig_pages_transferred(void) 258 { 259 return acct_info.dup_pages; 260 } 261 262 uint64_t skipped_mig_bytes_transferred(void) 263 { 264 return acct_info.skipped_pages * TARGET_PAGE_SIZE; 265 } 266 267 uint64_t skipped_mig_pages_transferred(void) 268 { 269 return acct_info.skipped_pages; 270 } 271 272 uint64_t norm_mig_bytes_transferred(void) 273 { 274 return acct_info.norm_pages * TARGET_PAGE_SIZE; 275 } 276 277 uint64_t norm_mig_pages_transferred(void) 278 { 279 return acct_info.norm_pages; 280 } 281 282 uint64_t xbzrle_mig_bytes_transferred(void) 283 { 284 return acct_info.xbzrle_bytes; 285 } 286 287 uint64_t xbzrle_mig_pages_transferred(void) 288 { 289 return acct_info.xbzrle_pages; 290 } 291 292 uint64_t xbzrle_mig_pages_cache_miss(void) 293 { 294 return acct_info.xbzrle_cache_miss; 295 } 296 297 double xbzrle_mig_cache_miss_rate(void) 298 { 299 return acct_info.xbzrle_cache_miss_rate; 300 } 301 302 uint64_t xbzrle_mig_pages_overflow(void) 303 { 304 return acct_info.xbzrle_overflows; 305 } 306 307 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, 308 int cont, int flag) 309 { 310 size_t size; 311 312 qemu_put_be64(f, offset | cont | flag); 313 size = 8; 314 315 if (!cont) { 316 qemu_put_byte(f, strlen(block->idstr)); 317 qemu_put_buffer(f, (uint8_t *)block->idstr, 318 strlen(block->idstr)); 319 size += 1 + strlen(block->idstr); 320 } 321 return size; 322 } 323 324 /* This is the last block that we have visited serching for dirty pages 325 */ 326 static RAMBlock *last_seen_block; 327 /* This is the last block from where we have sent data */ 328 static RAMBlock *last_sent_block; 329 static ram_addr_t last_offset; 330 static unsigned long *migration_bitmap; 331 static uint64_t migration_dirty_pages; 332 static uint32_t last_version; 333 static bool ram_bulk_stage; 334 335 /* Update the xbzrle cache to reflect a page that's been sent as all 0. 336 * The important thing is that a stale (not-yet-0'd) page be replaced 337 * by the new data. 338 * As a bonus, if the page wasn't in the cache it gets added so that 339 * when a small write is made into the 0'd page it gets XBZRLE sent 340 */ 341 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 342 { 343 if (ram_bulk_stage || !migrate_use_xbzrle()) { 344 return; 345 } 346 347 /* We don't care if this fails to allocate a new cache page 348 * as long as it updated an old one */ 349 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, 350 bitmap_sync_count); 351 } 352 353 #define ENCODING_FLAG_XBZRLE 0x1 354 355 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, 356 ram_addr_t current_addr, RAMBlock *block, 357 ram_addr_t offset, int cont, bool last_stage) 358 { 359 int encoded_len = 0, bytes_sent = -1; 360 uint8_t *prev_cached_page; 361 362 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) { 363 acct_info.xbzrle_cache_miss++; 364 if (!last_stage) { 365 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 366 bitmap_sync_count) == -1) { 367 return -1; 368 } else { 369 /* update *current_data when the page has been 370 inserted into cache */ 371 *current_data = get_cached_data(XBZRLE.cache, current_addr); 372 } 373 } 374 return -1; 375 } 376 377 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 378 379 /* save current buffer into memory */ 380 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 381 382 /* XBZRLE encoding (if there is no overflow) */ 383 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 384 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 385 TARGET_PAGE_SIZE); 386 if (encoded_len == 0) { 387 DPRINTF("Skipping unmodified page\n"); 388 return 0; 389 } else if (encoded_len == -1) { 390 DPRINTF("Overflow\n"); 391 acct_info.xbzrle_overflows++; 392 /* update data in the cache */ 393 if (!last_stage) { 394 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); 395 *current_data = prev_cached_page; 396 } 397 return -1; 398 } 399 400 /* we need to update the data in the cache, in order to get the same data */ 401 if (!last_stage) { 402 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 403 } 404 405 /* Send XBZRLE based compressed page */ 406 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE); 407 qemu_put_byte(f, ENCODING_FLAG_XBZRLE); 408 qemu_put_be16(f, encoded_len); 409 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); 410 bytes_sent += encoded_len + 1 + 2; 411 acct_info.xbzrle_pages++; 412 acct_info.xbzrle_bytes += bytes_sent; 413 414 return bytes_sent; 415 } 416 417 static inline 418 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr, 419 ram_addr_t start) 420 { 421 unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS; 422 unsigned long nr = base + (start >> TARGET_PAGE_BITS); 423 uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr)); 424 unsigned long size = base + (mr_size >> TARGET_PAGE_BITS); 425 426 unsigned long next; 427 428 if (ram_bulk_stage && nr > base) { 429 next = nr + 1; 430 } else { 431 next = find_next_bit(migration_bitmap, size, nr); 432 } 433 434 if (next < size) { 435 clear_bit(next, migration_bitmap); 436 migration_dirty_pages--; 437 } 438 return (next - base) << TARGET_PAGE_BITS; 439 } 440 441 static inline bool migration_bitmap_set_dirty(ram_addr_t addr) 442 { 443 bool ret; 444 int nr = addr >> TARGET_PAGE_BITS; 445 446 ret = test_and_set_bit(nr, migration_bitmap); 447 448 if (!ret) { 449 migration_dirty_pages++; 450 } 451 return ret; 452 } 453 454 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) 455 { 456 ram_addr_t addr; 457 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); 458 459 /* start address is aligned at the start of a word? */ 460 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { 461 int k; 462 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); 463 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; 464 465 for (k = page; k < page + nr; k++) { 466 if (src[k]) { 467 unsigned long new_dirty; 468 new_dirty = ~migration_bitmap[k]; 469 migration_bitmap[k] |= src[k]; 470 new_dirty &= src[k]; 471 migration_dirty_pages += ctpopl(new_dirty); 472 src[k] = 0; 473 } 474 } 475 } else { 476 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { 477 if (cpu_physical_memory_get_dirty(start + addr, 478 TARGET_PAGE_SIZE, 479 DIRTY_MEMORY_MIGRATION)) { 480 cpu_physical_memory_reset_dirty(start + addr, 481 TARGET_PAGE_SIZE, 482 DIRTY_MEMORY_MIGRATION); 483 migration_bitmap_set_dirty(start + addr); 484 } 485 } 486 } 487 } 488 489 490 /* Needs iothread lock! */ 491 /* Fix me: there are too many global variables used in migration process. */ 492 static int64_t start_time; 493 static int64_t bytes_xfer_prev; 494 static int64_t num_dirty_pages_period; 495 496 static void migration_bitmap_sync_init(void) 497 { 498 start_time = 0; 499 bytes_xfer_prev = 0; 500 num_dirty_pages_period = 0; 501 } 502 503 static void migration_bitmap_sync(void) 504 { 505 RAMBlock *block; 506 uint64_t num_dirty_pages_init = migration_dirty_pages; 507 MigrationState *s = migrate_get_current(); 508 int64_t end_time; 509 int64_t bytes_xfer_now; 510 static uint64_t xbzrle_cache_miss_prev; 511 static uint64_t iterations_prev; 512 513 bitmap_sync_count++; 514 515 if (!bytes_xfer_prev) { 516 bytes_xfer_prev = ram_bytes_transferred(); 517 } 518 519 if (!start_time) { 520 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 521 } 522 523 trace_migration_bitmap_sync_start(); 524 address_space_sync_dirty_bitmap(&address_space_memory); 525 526 QTAILQ_FOREACH(block, &ram_list.blocks, next) { 527 migration_bitmap_sync_range(block->mr->ram_addr, block->used_length); 528 } 529 trace_migration_bitmap_sync_end(migration_dirty_pages 530 - num_dirty_pages_init); 531 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init; 532 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 533 534 /* more than 1 second = 1000 millisecons */ 535 if (end_time > start_time + 1000) { 536 if (migrate_auto_converge()) { 537 /* The following detection logic can be refined later. For now: 538 Check to see if the dirtied bytes is 50% more than the approx. 539 amount of bytes that just got transferred since the last time we 540 were in this routine. If that happens >N times (for now N==4) 541 we turn on the throttle down logic */ 542 bytes_xfer_now = ram_bytes_transferred(); 543 if (s->dirty_pages_rate && 544 (num_dirty_pages_period * TARGET_PAGE_SIZE > 545 (bytes_xfer_now - bytes_xfer_prev)/2) && 546 (dirty_rate_high_cnt++ > 4)) { 547 trace_migration_throttle(); 548 mig_throttle_on = true; 549 dirty_rate_high_cnt = 0; 550 } 551 bytes_xfer_prev = bytes_xfer_now; 552 } else { 553 mig_throttle_on = false; 554 } 555 if (migrate_use_xbzrle()) { 556 if (iterations_prev != 0) { 557 acct_info.xbzrle_cache_miss_rate = 558 (double)(acct_info.xbzrle_cache_miss - 559 xbzrle_cache_miss_prev) / 560 (acct_info.iterations - iterations_prev); 561 } 562 iterations_prev = acct_info.iterations; 563 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; 564 } 565 s->dirty_pages_rate = num_dirty_pages_period * 1000 566 / (end_time - start_time); 567 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; 568 start_time = end_time; 569 num_dirty_pages_period = 0; 570 s->dirty_sync_count = bitmap_sync_count; 571 } 572 } 573 574 /* 575 * ram_save_page: Send the given page to the stream 576 * 577 * Returns: Number of bytes written. 578 */ 579 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset, 580 bool last_stage) 581 { 582 int bytes_sent; 583 int cont; 584 ram_addr_t current_addr; 585 MemoryRegion *mr = block->mr; 586 uint8_t *p; 587 int ret; 588 bool send_async = true; 589 590 cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0; 591 592 p = memory_region_get_ram_ptr(mr) + offset; 593 594 /* In doubt sent page as normal */ 595 bytes_sent = -1; 596 ret = ram_control_save_page(f, block->offset, 597 offset, TARGET_PAGE_SIZE, &bytes_sent); 598 599 XBZRLE_cache_lock(); 600 601 current_addr = block->offset + offset; 602 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 603 if (ret != RAM_SAVE_CONTROL_DELAYED) { 604 if (bytes_sent > 0) { 605 acct_info.norm_pages++; 606 } else if (bytes_sent == 0) { 607 acct_info.dup_pages++; 608 } 609 } 610 } else if (is_zero_range(p, TARGET_PAGE_SIZE)) { 611 acct_info.dup_pages++; 612 bytes_sent = save_block_hdr(f, block, offset, cont, 613 RAM_SAVE_FLAG_COMPRESS); 614 qemu_put_byte(f, 0); 615 bytes_sent++; 616 /* Must let xbzrle know, otherwise a previous (now 0'd) cached 617 * page would be stale 618 */ 619 xbzrle_cache_zero_page(current_addr); 620 } else if (!ram_bulk_stage && migrate_use_xbzrle()) { 621 bytes_sent = save_xbzrle_page(f, &p, current_addr, block, 622 offset, cont, last_stage); 623 if (!last_stage) { 624 /* Can't send this cached data async, since the cache page 625 * might get updated before it gets to the wire 626 */ 627 send_async = false; 628 } 629 } 630 631 /* XBZRLE overflow or normal page */ 632 if (bytes_sent == -1) { 633 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); 634 if (send_async) { 635 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); 636 } else { 637 qemu_put_buffer(f, p, TARGET_PAGE_SIZE); 638 } 639 bytes_sent += TARGET_PAGE_SIZE; 640 acct_info.norm_pages++; 641 } 642 643 XBZRLE_cache_unlock(); 644 645 return bytes_sent; 646 } 647 648 /* 649 * ram_find_and_save_block: Finds a page to send and sends it to f 650 * 651 * Returns: The number of bytes written. 652 * 0 means no dirty pages 653 */ 654 655 static int ram_find_and_save_block(QEMUFile *f, bool last_stage) 656 { 657 RAMBlock *block = last_seen_block; 658 ram_addr_t offset = last_offset; 659 bool complete_round = false; 660 int bytes_sent = 0; 661 MemoryRegion *mr; 662 663 if (!block) 664 block = QTAILQ_FIRST(&ram_list.blocks); 665 666 while (true) { 667 mr = block->mr; 668 offset = migration_bitmap_find_and_reset_dirty(mr, offset); 669 if (complete_round && block == last_seen_block && 670 offset >= last_offset) { 671 break; 672 } 673 if (offset >= block->used_length) { 674 offset = 0; 675 block = QTAILQ_NEXT(block, next); 676 if (!block) { 677 block = QTAILQ_FIRST(&ram_list.blocks); 678 complete_round = true; 679 ram_bulk_stage = false; 680 } 681 } else { 682 bytes_sent = ram_save_page(f, block, offset, last_stage); 683 684 /* if page is unmodified, continue to the next */ 685 if (bytes_sent > 0) { 686 last_sent_block = block; 687 break; 688 } 689 } 690 } 691 last_seen_block = block; 692 last_offset = offset; 693 694 return bytes_sent; 695 } 696 697 static uint64_t bytes_transferred; 698 699 void acct_update_position(QEMUFile *f, size_t size, bool zero) 700 { 701 uint64_t pages = size / TARGET_PAGE_SIZE; 702 if (zero) { 703 acct_info.dup_pages += pages; 704 } else { 705 acct_info.norm_pages += pages; 706 bytes_transferred += size; 707 qemu_update_position(f, size); 708 } 709 } 710 711 static ram_addr_t ram_save_remaining(void) 712 { 713 return migration_dirty_pages; 714 } 715 716 uint64_t ram_bytes_remaining(void) 717 { 718 return ram_save_remaining() * TARGET_PAGE_SIZE; 719 } 720 721 uint64_t ram_bytes_transferred(void) 722 { 723 return bytes_transferred; 724 } 725 726 uint64_t ram_bytes_total(void) 727 { 728 RAMBlock *block; 729 uint64_t total = 0; 730 731 QTAILQ_FOREACH(block, &ram_list.blocks, next) 732 total += block->used_length; 733 734 return total; 735 } 736 737 void free_xbzrle_decoded_buf(void) 738 { 739 g_free(xbzrle_decoded_buf); 740 xbzrle_decoded_buf = NULL; 741 } 742 743 static void migration_end(void) 744 { 745 if (migration_bitmap) { 746 memory_global_dirty_log_stop(); 747 g_free(migration_bitmap); 748 migration_bitmap = NULL; 749 } 750 751 XBZRLE_cache_lock(); 752 if (XBZRLE.cache) { 753 cache_fini(XBZRLE.cache); 754 g_free(XBZRLE.encoded_buf); 755 g_free(XBZRLE.current_buf); 756 XBZRLE.cache = NULL; 757 XBZRLE.encoded_buf = NULL; 758 XBZRLE.current_buf = NULL; 759 } 760 XBZRLE_cache_unlock(); 761 } 762 763 static void ram_migration_cancel(void *opaque) 764 { 765 migration_end(); 766 } 767 768 static void reset_ram_globals(void) 769 { 770 last_seen_block = NULL; 771 last_sent_block = NULL; 772 last_offset = 0; 773 last_version = ram_list.version; 774 ram_bulk_stage = true; 775 } 776 777 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 778 779 static int ram_save_setup(QEMUFile *f, void *opaque) 780 { 781 RAMBlock *block; 782 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ 783 784 mig_throttle_on = false; 785 dirty_rate_high_cnt = 0; 786 bitmap_sync_count = 0; 787 migration_bitmap_sync_init(); 788 789 if (migrate_use_xbzrle()) { 790 XBZRLE_cache_lock(); 791 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / 792 TARGET_PAGE_SIZE, 793 TARGET_PAGE_SIZE); 794 if (!XBZRLE.cache) { 795 XBZRLE_cache_unlock(); 796 error_report("Error creating cache"); 797 return -1; 798 } 799 XBZRLE_cache_unlock(); 800 801 /* We prefer not to abort if there is no memory */ 802 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 803 if (!XBZRLE.encoded_buf) { 804 error_report("Error allocating encoded_buf"); 805 return -1; 806 } 807 808 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 809 if (!XBZRLE.current_buf) { 810 error_report("Error allocating current_buf"); 811 g_free(XBZRLE.encoded_buf); 812 XBZRLE.encoded_buf = NULL; 813 return -1; 814 } 815 816 acct_clear(); 817 } 818 819 qemu_mutex_lock_iothread(); 820 qemu_mutex_lock_ramlist(); 821 bytes_transferred = 0; 822 reset_ram_globals(); 823 824 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; 825 migration_bitmap = bitmap_new(ram_bitmap_pages); 826 bitmap_set(migration_bitmap, 0, ram_bitmap_pages); 827 828 /* 829 * Count the total number of pages used by ram blocks not including any 830 * gaps due to alignment or unplugs. 831 */ 832 migration_dirty_pages = 0; 833 QTAILQ_FOREACH(block, &ram_list.blocks, next) { 834 uint64_t block_pages; 835 836 block_pages = block->used_length >> TARGET_PAGE_BITS; 837 migration_dirty_pages += block_pages; 838 } 839 840 memory_global_dirty_log_start(); 841 migration_bitmap_sync(); 842 qemu_mutex_unlock_iothread(); 843 844 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); 845 846 QTAILQ_FOREACH(block, &ram_list.blocks, next) { 847 qemu_put_byte(f, strlen(block->idstr)); 848 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 849 qemu_put_be64(f, block->used_length); 850 } 851 852 qemu_mutex_unlock_ramlist(); 853 854 ram_control_before_iterate(f, RAM_CONTROL_SETUP); 855 ram_control_after_iterate(f, RAM_CONTROL_SETUP); 856 857 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 858 859 return 0; 860 } 861 862 static int ram_save_iterate(QEMUFile *f, void *opaque) 863 { 864 int ret; 865 int i; 866 int64_t t0; 867 int total_sent = 0; 868 869 qemu_mutex_lock_ramlist(); 870 871 if (ram_list.version != last_version) { 872 reset_ram_globals(); 873 } 874 875 ram_control_before_iterate(f, RAM_CONTROL_ROUND); 876 877 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 878 i = 0; 879 while ((ret = qemu_file_rate_limit(f)) == 0) { 880 int bytes_sent; 881 882 bytes_sent = ram_find_and_save_block(f, false); 883 /* no more blocks to sent */ 884 if (bytes_sent == 0) { 885 break; 886 } 887 total_sent += bytes_sent; 888 acct_info.iterations++; 889 check_guest_throttling(); 890 /* we want to check in the 1st loop, just in case it was the 1st time 891 and we had to sync the dirty bitmap. 892 qemu_get_clock_ns() is a bit expensive, so we only check each some 893 iterations 894 */ 895 if ((i & 63) == 0) { 896 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; 897 if (t1 > MAX_WAIT) { 898 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n", 899 t1, i); 900 break; 901 } 902 } 903 i++; 904 } 905 906 qemu_mutex_unlock_ramlist(); 907 908 /* 909 * Must occur before EOS (or any QEMUFile operation) 910 * because of RDMA protocol. 911 */ 912 ram_control_after_iterate(f, RAM_CONTROL_ROUND); 913 914 bytes_transferred += total_sent; 915 916 /* 917 * Do not count these 8 bytes into total_sent, so that we can 918 * return 0 if no page had been dirtied. 919 */ 920 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 921 bytes_transferred += 8; 922 923 ret = qemu_file_get_error(f); 924 if (ret < 0) { 925 return ret; 926 } 927 928 return total_sent; 929 } 930 931 static int ram_save_complete(QEMUFile *f, void *opaque) 932 { 933 qemu_mutex_lock_ramlist(); 934 migration_bitmap_sync(); 935 936 ram_control_before_iterate(f, RAM_CONTROL_FINISH); 937 938 /* try transferring iterative blocks of memory */ 939 940 /* flush all remaining blocks regardless of rate limiting */ 941 while (true) { 942 int bytes_sent; 943 944 bytes_sent = ram_find_and_save_block(f, true); 945 /* no more blocks to sent */ 946 if (bytes_sent == 0) { 947 break; 948 } 949 bytes_transferred += bytes_sent; 950 } 951 952 ram_control_after_iterate(f, RAM_CONTROL_FINISH); 953 migration_end(); 954 955 qemu_mutex_unlock_ramlist(); 956 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 957 958 return 0; 959 } 960 961 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size) 962 { 963 uint64_t remaining_size; 964 965 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 966 967 if (remaining_size < max_size) { 968 qemu_mutex_lock_iothread(); 969 migration_bitmap_sync(); 970 qemu_mutex_unlock_iothread(); 971 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 972 } 973 return remaining_size; 974 } 975 976 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 977 { 978 unsigned int xh_len; 979 int xh_flags; 980 981 if (!xbzrle_decoded_buf) { 982 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE); 983 } 984 985 /* extract RLE header */ 986 xh_flags = qemu_get_byte(f); 987 xh_len = qemu_get_be16(f); 988 989 if (xh_flags != ENCODING_FLAG_XBZRLE) { 990 error_report("Failed to load XBZRLE page - wrong compression!"); 991 return -1; 992 } 993 994 if (xh_len > TARGET_PAGE_SIZE) { 995 error_report("Failed to load XBZRLE page - len overflow!"); 996 return -1; 997 } 998 /* load data and decode */ 999 qemu_get_buffer(f, xbzrle_decoded_buf, xh_len); 1000 1001 /* decode RLE */ 1002 if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host, 1003 TARGET_PAGE_SIZE) == -1) { 1004 error_report("Failed to load XBZRLE page - decode error!"); 1005 return -1; 1006 } 1007 1008 return 0; 1009 } 1010 1011 static inline void *host_from_stream_offset(QEMUFile *f, 1012 ram_addr_t offset, 1013 int flags) 1014 { 1015 static RAMBlock *block = NULL; 1016 char id[256]; 1017 uint8_t len; 1018 1019 if (flags & RAM_SAVE_FLAG_CONTINUE) { 1020 if (!block || block->max_length <= offset) { 1021 error_report("Ack, bad migration stream!"); 1022 return NULL; 1023 } 1024 1025 return memory_region_get_ram_ptr(block->mr) + offset; 1026 } 1027 1028 len = qemu_get_byte(f); 1029 qemu_get_buffer(f, (uint8_t *)id, len); 1030 id[len] = 0; 1031 1032 QTAILQ_FOREACH(block, &ram_list.blocks, next) { 1033 if (!strncmp(id, block->idstr, sizeof(id)) && 1034 block->max_length > offset) { 1035 return memory_region_get_ram_ptr(block->mr) + offset; 1036 } 1037 } 1038 1039 error_report("Can't find block %s!", id); 1040 return NULL; 1041 } 1042 1043 /* 1044 * If a page (or a whole RDMA chunk) has been 1045 * determined to be zero, then zap it. 1046 */ 1047 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) 1048 { 1049 if (ch != 0 || !is_zero_range(host, size)) { 1050 memset(host, ch, size); 1051 } 1052 } 1053 1054 static int ram_load(QEMUFile *f, void *opaque, int version_id) 1055 { 1056 int flags = 0, ret = 0; 1057 static uint64_t seq_iter; 1058 1059 seq_iter++; 1060 1061 if (version_id != 4) { 1062 ret = -EINVAL; 1063 } 1064 1065 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 1066 ram_addr_t addr, total_ram_bytes; 1067 void *host; 1068 uint8_t ch; 1069 1070 addr = qemu_get_be64(f); 1071 flags = addr & ~TARGET_PAGE_MASK; 1072 addr &= TARGET_PAGE_MASK; 1073 1074 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 1075 case RAM_SAVE_FLAG_MEM_SIZE: 1076 /* Synchronize RAM block list */ 1077 total_ram_bytes = addr; 1078 while (!ret && total_ram_bytes) { 1079 RAMBlock *block; 1080 uint8_t len; 1081 char id[256]; 1082 ram_addr_t length; 1083 1084 len = qemu_get_byte(f); 1085 qemu_get_buffer(f, (uint8_t *)id, len); 1086 id[len] = 0; 1087 length = qemu_get_be64(f); 1088 1089 QTAILQ_FOREACH(block, &ram_list.blocks, next) { 1090 if (!strncmp(id, block->idstr, sizeof(id))) { 1091 if (length != block->used_length) { 1092 Error *local_err = NULL; 1093 1094 ret = qemu_ram_resize(block->offset, length, &local_err); 1095 if (local_err) { 1096 error_report_err(local_err); 1097 } 1098 } 1099 break; 1100 } 1101 } 1102 1103 if (!block) { 1104 error_report("Unknown ramblock \"%s\", cannot " 1105 "accept migration", id); 1106 ret = -EINVAL; 1107 } 1108 1109 total_ram_bytes -= length; 1110 } 1111 break; 1112 case RAM_SAVE_FLAG_COMPRESS: 1113 host = host_from_stream_offset(f, addr, flags); 1114 if (!host) { 1115 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 1116 ret = -EINVAL; 1117 break; 1118 } 1119 1120 ch = qemu_get_byte(f); 1121 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); 1122 break; 1123 case RAM_SAVE_FLAG_PAGE: 1124 host = host_from_stream_offset(f, addr, flags); 1125 if (!host) { 1126 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 1127 ret = -EINVAL; 1128 break; 1129 } 1130 1131 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 1132 break; 1133 case RAM_SAVE_FLAG_XBZRLE: 1134 host = host_from_stream_offset(f, addr, flags); 1135 if (!host) { 1136 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 1137 ret = -EINVAL; 1138 break; 1139 } 1140 1141 if (load_xbzrle(f, addr, host) < 0) { 1142 error_report("Failed to decompress XBZRLE page at " 1143 RAM_ADDR_FMT, addr); 1144 ret = -EINVAL; 1145 break; 1146 } 1147 break; 1148 case RAM_SAVE_FLAG_EOS: 1149 /* normal exit */ 1150 break; 1151 default: 1152 if (flags & RAM_SAVE_FLAG_HOOK) { 1153 ram_control_load_hook(f, flags); 1154 } else { 1155 error_report("Unknown combination of migration flags: %#x", 1156 flags); 1157 ret = -EINVAL; 1158 } 1159 } 1160 if (!ret) { 1161 ret = qemu_file_get_error(f); 1162 } 1163 } 1164 1165 DPRINTF("Completed load of VM with exit code %d seq iteration " 1166 "%" PRIu64 "\n", ret, seq_iter); 1167 return ret; 1168 } 1169 1170 static SaveVMHandlers savevm_ram_handlers = { 1171 .save_live_setup = ram_save_setup, 1172 .save_live_iterate = ram_save_iterate, 1173 .save_live_complete = ram_save_complete, 1174 .save_live_pending = ram_save_pending, 1175 .load_state = ram_load, 1176 .cancel = ram_migration_cancel, 1177 }; 1178 1179 void ram_mig_init(void) 1180 { 1181 qemu_mutex_init(&XBZRLE.lock); 1182 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL); 1183 } 1184 1185 struct soundhw { 1186 const char *name; 1187 const char *descr; 1188 int enabled; 1189 int isa; 1190 union { 1191 int (*init_isa) (ISABus *bus); 1192 int (*init_pci) (PCIBus *bus); 1193 } init; 1194 }; 1195 1196 static struct soundhw soundhw[9]; 1197 static int soundhw_count; 1198 1199 void isa_register_soundhw(const char *name, const char *descr, 1200 int (*init_isa)(ISABus *bus)) 1201 { 1202 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); 1203 soundhw[soundhw_count].name = name; 1204 soundhw[soundhw_count].descr = descr; 1205 soundhw[soundhw_count].isa = 1; 1206 soundhw[soundhw_count].init.init_isa = init_isa; 1207 soundhw_count++; 1208 } 1209 1210 void pci_register_soundhw(const char *name, const char *descr, 1211 int (*init_pci)(PCIBus *bus)) 1212 { 1213 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); 1214 soundhw[soundhw_count].name = name; 1215 soundhw[soundhw_count].descr = descr; 1216 soundhw[soundhw_count].isa = 0; 1217 soundhw[soundhw_count].init.init_pci = init_pci; 1218 soundhw_count++; 1219 } 1220 1221 void select_soundhw(const char *optarg) 1222 { 1223 struct soundhw *c; 1224 1225 if (is_help_option(optarg)) { 1226 show_valid_cards: 1227 1228 if (soundhw_count) { 1229 printf("Valid sound card names (comma separated):\n"); 1230 for (c = soundhw; c->name; ++c) { 1231 printf ("%-11s %s\n", c->name, c->descr); 1232 } 1233 printf("\n-soundhw all will enable all of the above\n"); 1234 } else { 1235 printf("Machine has no user-selectable audio hardware " 1236 "(it may or may not have always-present audio hardware).\n"); 1237 } 1238 exit(!is_help_option(optarg)); 1239 } 1240 else { 1241 size_t l; 1242 const char *p; 1243 char *e; 1244 int bad_card = 0; 1245 1246 if (!strcmp(optarg, "all")) { 1247 for (c = soundhw; c->name; ++c) { 1248 c->enabled = 1; 1249 } 1250 return; 1251 } 1252 1253 p = optarg; 1254 while (*p) { 1255 e = strchr(p, ','); 1256 l = !e ? strlen(p) : (size_t) (e - p); 1257 1258 for (c = soundhw; c->name; ++c) { 1259 if (!strncmp(c->name, p, l) && !c->name[l]) { 1260 c->enabled = 1; 1261 break; 1262 } 1263 } 1264 1265 if (!c->name) { 1266 if (l > 80) { 1267 error_report("Unknown sound card name (too big to show)"); 1268 } 1269 else { 1270 error_report("Unknown sound card name `%.*s'", 1271 (int) l, p); 1272 } 1273 bad_card = 1; 1274 } 1275 p += l + (e != NULL); 1276 } 1277 1278 if (bad_card) { 1279 goto show_valid_cards; 1280 } 1281 } 1282 } 1283 1284 void audio_init(void) 1285 { 1286 struct soundhw *c; 1287 ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL); 1288 PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); 1289 1290 for (c = soundhw; c->name; ++c) { 1291 if (c->enabled) { 1292 if (c->isa) { 1293 if (!isa_bus) { 1294 error_report("ISA bus not available for %s", c->name); 1295 exit(1); 1296 } 1297 c->init.init_isa(isa_bus); 1298 } else { 1299 if (!pci_bus) { 1300 error_report("PCI bus not available for %s", c->name); 1301 exit(1); 1302 } 1303 c->init.init_pci(pci_bus); 1304 } 1305 } 1306 } 1307 } 1308 1309 int qemu_uuid_parse(const char *str, uint8_t *uuid) 1310 { 1311 int ret; 1312 1313 if (strlen(str) != 36) { 1314 return -1; 1315 } 1316 1317 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3], 1318 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9], 1319 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14], 1320 &uuid[15]); 1321 1322 if (ret != 16) { 1323 return -1; 1324 } 1325 return 0; 1326 } 1327 1328 void do_acpitable_option(const QemuOpts *opts) 1329 { 1330 #ifdef TARGET_I386 1331 Error *err = NULL; 1332 1333 acpi_table_add(opts, &err); 1334 if (err) { 1335 error_report("Wrong acpi table provided: %s", 1336 error_get_pretty(err)); 1337 error_free(err); 1338 exit(1); 1339 } 1340 #endif 1341 } 1342 1343 void do_smbios_option(QemuOpts *opts) 1344 { 1345 #ifdef TARGET_I386 1346 smbios_entry_add(opts); 1347 #endif 1348 } 1349 1350 void cpudef_init(void) 1351 { 1352 #if defined(cpudef_setup) 1353 cpudef_setup(); /* parse cpu definitions in target config file */ 1354 #endif 1355 } 1356 1357 int kvm_available(void) 1358 { 1359 #ifdef CONFIG_KVM 1360 return 1; 1361 #else 1362 return 0; 1363 #endif 1364 } 1365 1366 int xen_available(void) 1367 { 1368 #ifdef CONFIG_XEN 1369 return 1; 1370 #else 1371 return 0; 1372 #endif 1373 } 1374 1375 1376 TargetInfo *qmp_query_target(Error **errp) 1377 { 1378 TargetInfo *info = g_malloc0(sizeof(*info)); 1379 1380 info->arch = g_strdup(TARGET_NAME); 1381 1382 return info; 1383 } 1384 1385 /* Stub function that's gets run on the vcpu when its brought out of the 1386 VM to run inside qemu via async_run_on_cpu()*/ 1387 static void mig_sleep_cpu(void *opq) 1388 { 1389 qemu_mutex_unlock_iothread(); 1390 g_usleep(30*1000); 1391 qemu_mutex_lock_iothread(); 1392 } 1393 1394 /* To reduce the dirty rate explicitly disallow the VCPUs from spending 1395 much time in the VM. The migration thread will try to catchup. 1396 Workload will experience a performance drop. 1397 */ 1398 static void mig_throttle_guest_down(void) 1399 { 1400 CPUState *cpu; 1401 1402 qemu_mutex_lock_iothread(); 1403 CPU_FOREACH(cpu) { 1404 async_run_on_cpu(cpu, mig_sleep_cpu, NULL); 1405 } 1406 qemu_mutex_unlock_iothread(); 1407 } 1408 1409 static void check_guest_throttling(void) 1410 { 1411 static int64_t t0; 1412 int64_t t1; 1413 1414 if (!mig_throttle_on) { 1415 return; 1416 } 1417 1418 if (!t0) { 1419 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1420 return; 1421 } 1422 1423 t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1424 1425 /* If it has been more than 40 ms since the last time the guest 1426 * was throttled then do it again. 1427 */ 1428 if (40 < (t1-t0)/1000000) { 1429 mig_throttle_guest_down(); 1430 t0 = t1; 1431 } 1432 } 1433