1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include <stdint.h> 25 #include <stdarg.h> 26 #include <stdlib.h> 27 #ifndef _WIN32 28 #include <sys/types.h> 29 #include <sys/mman.h> 30 #endif 31 #include "config.h" 32 #include "monitor.h" 33 #include "sysemu.h" 34 #include "arch_init.h" 35 #include "audio/audio.h" 36 #include "hw/pc.h" 37 #include "hw/pci.h" 38 #include "hw/audiodev.h" 39 #include "kvm.h" 40 #include "migration.h" 41 #include "net.h" 42 #include "gdbstub.h" 43 #include "hw/smbios.h" 44 #include "exec-memory.h" 45 #include "hw/pcspk.h" 46 #include "qemu/page_cache.h" 47 48 #ifdef DEBUG_ARCH_INIT 49 #define DPRINTF(fmt, ...) \ 50 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0) 51 #else 52 #define DPRINTF(fmt, ...) \ 53 do { } while (0) 54 #endif 55 56 #ifdef TARGET_SPARC 57 int graphic_width = 1024; 58 int graphic_height = 768; 59 int graphic_depth = 8; 60 #else 61 int graphic_width = 800; 62 int graphic_height = 600; 63 int graphic_depth = 15; 64 #endif 65 66 67 #if defined(TARGET_ALPHA) 68 #define QEMU_ARCH QEMU_ARCH_ALPHA 69 #elif defined(TARGET_ARM) 70 #define QEMU_ARCH QEMU_ARCH_ARM 71 #elif defined(TARGET_CRIS) 72 #define QEMU_ARCH QEMU_ARCH_CRIS 73 #elif defined(TARGET_I386) 74 #define QEMU_ARCH QEMU_ARCH_I386 75 #elif defined(TARGET_M68K) 76 #define QEMU_ARCH QEMU_ARCH_M68K 77 #elif defined(TARGET_LM32) 78 #define QEMU_ARCH QEMU_ARCH_LM32 79 #elif defined(TARGET_MICROBLAZE) 80 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE 81 #elif defined(TARGET_MIPS) 82 #define QEMU_ARCH QEMU_ARCH_MIPS 83 #elif defined(TARGET_OPENRISC) 84 #define QEMU_ARCH QEMU_ARCH_OPENRISC 85 #elif defined(TARGET_PPC) 86 #define QEMU_ARCH QEMU_ARCH_PPC 87 #elif defined(TARGET_S390X) 88 #define QEMU_ARCH QEMU_ARCH_S390X 89 #elif defined(TARGET_SH4) 90 #define QEMU_ARCH QEMU_ARCH_SH4 91 #elif defined(TARGET_SPARC) 92 #define QEMU_ARCH QEMU_ARCH_SPARC 93 #elif defined(TARGET_XTENSA) 94 #define QEMU_ARCH QEMU_ARCH_XTENSA 95 #endif 96 97 const uint32_t arch_type = QEMU_ARCH; 98 99 /***********************************************************/ 100 /* ram save/restore */ 101 102 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ 103 #define RAM_SAVE_FLAG_COMPRESS 0x02 104 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 105 #define RAM_SAVE_FLAG_PAGE 0x08 106 #define RAM_SAVE_FLAG_EOS 0x10 107 #define RAM_SAVE_FLAG_CONTINUE 0x20 108 #define RAM_SAVE_FLAG_XBZRLE 0x40 109 110 #ifdef __ALTIVEC__ 111 #include <altivec.h> 112 #define VECTYPE vector unsigned char 113 #define SPLAT(p) vec_splat(vec_ld(0, p), 0) 114 #define ALL_EQ(v1, v2) vec_all_eq(v1, v2) 115 /* altivec.h may redefine the bool macro as vector type. 116 * Reset it to POSIX semantics. */ 117 #undef bool 118 #define bool _Bool 119 #elif defined __SSE2__ 120 #include <emmintrin.h> 121 #define VECTYPE __m128i 122 #define SPLAT(p) _mm_set1_epi8(*(p)) 123 #define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF) 124 #else 125 #define VECTYPE unsigned long 126 #define SPLAT(p) (*(p) * (~0UL / 255)) 127 #define ALL_EQ(v1, v2) ((v1) == (v2)) 128 #endif 129 130 131 static struct defconfig_file { 132 const char *filename; 133 /* Indicates it is an user config file (disabled by -no-user-config) */ 134 bool userconfig; 135 } default_config_files[] = { 136 { CONFIG_QEMU_DATADIR "/cpus-" TARGET_ARCH ".conf", false }, 137 { CONFIG_QEMU_CONFDIR "/qemu.conf", true }, 138 { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true }, 139 { NULL }, /* end of list */ 140 }; 141 142 143 int qemu_read_default_config_files(bool userconfig) 144 { 145 int ret; 146 struct defconfig_file *f; 147 148 for (f = default_config_files; f->filename; f++) { 149 if (!userconfig && f->userconfig) { 150 continue; 151 } 152 ret = qemu_read_config_file(f->filename); 153 if (ret < 0 && ret != -ENOENT) { 154 return ret; 155 } 156 } 157 158 return 0; 159 } 160 161 static int is_dup_page(uint8_t *page) 162 { 163 VECTYPE *p = (VECTYPE *)page; 164 VECTYPE val = SPLAT(page); 165 int i; 166 167 for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) { 168 if (!ALL_EQ(val, p[i])) { 169 return 0; 170 } 171 } 172 173 return 1; 174 } 175 176 /* struct contains XBZRLE cache and a static page 177 used by the compression */ 178 static struct { 179 /* buffer used for XBZRLE encoding */ 180 uint8_t *encoded_buf; 181 /* buffer for storing page content */ 182 uint8_t *current_buf; 183 /* buffer used for XBZRLE decoding */ 184 uint8_t *decoded_buf; 185 /* Cache for XBZRLE */ 186 PageCache *cache; 187 } XBZRLE = { 188 .encoded_buf = NULL, 189 .current_buf = NULL, 190 .decoded_buf = NULL, 191 .cache = NULL, 192 }; 193 194 195 int64_t xbzrle_cache_resize(int64_t new_size) 196 { 197 if (XBZRLE.cache != NULL) { 198 return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) * 199 TARGET_PAGE_SIZE; 200 } 201 return pow2floor(new_size); 202 } 203 204 /* accounting for migration statistics */ 205 typedef struct AccountingInfo { 206 uint64_t dup_pages; 207 uint64_t norm_pages; 208 uint64_t iterations; 209 } AccountingInfo; 210 211 static AccountingInfo acct_info; 212 213 static void acct_clear(void) 214 { 215 memset(&acct_info, 0, sizeof(acct_info)); 216 } 217 218 uint64_t dup_mig_bytes_transferred(void) 219 { 220 return acct_info.dup_pages * TARGET_PAGE_SIZE; 221 } 222 223 uint64_t dup_mig_pages_transferred(void) 224 { 225 return acct_info.dup_pages; 226 } 227 228 uint64_t norm_mig_bytes_transferred(void) 229 { 230 return acct_info.norm_pages * TARGET_PAGE_SIZE; 231 } 232 233 uint64_t norm_mig_pages_transferred(void) 234 { 235 return acct_info.norm_pages; 236 } 237 238 static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, 239 int cont, int flag) 240 { 241 qemu_put_be64(f, offset | cont | flag); 242 if (!cont) { 243 qemu_put_byte(f, strlen(block->idstr)); 244 qemu_put_buffer(f, (uint8_t *)block->idstr, 245 strlen(block->idstr)); 246 } 247 248 } 249 250 #define ENCODING_FLAG_XBZRLE 0x1 251 252 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data, 253 ram_addr_t current_addr, RAMBlock *block, 254 ram_addr_t offset, int cont) 255 { 256 int encoded_len = 0, bytes_sent = -1; 257 uint8_t *prev_cached_page; 258 259 if (!cache_is_cached(XBZRLE.cache, current_addr)) { 260 cache_insert(XBZRLE.cache, current_addr, 261 g_memdup(current_data, TARGET_PAGE_SIZE)); 262 return -1; 263 } 264 265 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 266 267 /* save current buffer into memory */ 268 memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE); 269 270 /* XBZRLE encoding (if there is no overflow) */ 271 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 272 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 273 TARGET_PAGE_SIZE); 274 if (encoded_len == 0) { 275 DPRINTF("Skipping unmodified page\n"); 276 return 0; 277 } else if (encoded_len == -1) { 278 DPRINTF("Overflow\n"); 279 /* update data in the cache */ 280 memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE); 281 return -1; 282 } 283 284 /* we need to update the data in the cache, in order to get the same data */ 285 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 286 287 /* Send XBZRLE based compressed page */ 288 save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE); 289 qemu_put_byte(f, ENCODING_FLAG_XBZRLE); 290 qemu_put_be16(f, encoded_len); 291 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); 292 bytes_sent = encoded_len + 1 + 2; 293 294 return bytes_sent; 295 } 296 297 static RAMBlock *last_block; 298 static ram_addr_t last_offset; 299 300 /* 301 * ram_save_block: Writes a page of memory to the stream f 302 * 303 * Returns: 0: if the page hasn't changed 304 * -1: if there are no more dirty pages 305 * n: the amount of bytes written in other case 306 */ 307 308 static int ram_save_block(QEMUFile *f) 309 { 310 RAMBlock *block = last_block; 311 ram_addr_t offset = last_offset; 312 int bytes_sent = -1; 313 MemoryRegion *mr; 314 ram_addr_t current_addr; 315 316 if (!block) 317 block = QLIST_FIRST(&ram_list.blocks); 318 319 do { 320 mr = block->mr; 321 if (memory_region_get_dirty(mr, offset, TARGET_PAGE_SIZE, 322 DIRTY_MEMORY_MIGRATION)) { 323 uint8_t *p; 324 int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0; 325 326 memory_region_reset_dirty(mr, offset, TARGET_PAGE_SIZE, 327 DIRTY_MEMORY_MIGRATION); 328 329 p = memory_region_get_ram_ptr(mr) + offset; 330 331 if (is_dup_page(p)) { 332 acct_info.dup_pages++; 333 save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS); 334 qemu_put_byte(f, *p); 335 bytes_sent = 1; 336 } else if (migrate_use_xbzrle()) { 337 current_addr = block->offset + offset; 338 bytes_sent = save_xbzrle_page(f, p, current_addr, block, 339 offset, cont); 340 p = get_cached_data(XBZRLE.cache, current_addr); 341 } 342 343 /* either we didn't send yet (we may have had XBZRLE overflow) */ 344 if (bytes_sent == -1) { 345 save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); 346 qemu_put_buffer(f, p, TARGET_PAGE_SIZE); 347 bytes_sent = TARGET_PAGE_SIZE; 348 acct_info.norm_pages++; 349 } 350 351 /* if page is unmodified, continue to the next */ 352 if (bytes_sent != 0) { 353 break; 354 } 355 } 356 357 offset += TARGET_PAGE_SIZE; 358 if (offset >= block->length) { 359 offset = 0; 360 block = QLIST_NEXT(block, next); 361 if (!block) 362 block = QLIST_FIRST(&ram_list.blocks); 363 } 364 } while (block != last_block || offset != last_offset); 365 366 last_block = block; 367 last_offset = offset; 368 369 return bytes_sent; 370 } 371 372 static uint64_t bytes_transferred; 373 374 static ram_addr_t ram_save_remaining(void) 375 { 376 return ram_list.dirty_pages; 377 } 378 379 uint64_t ram_bytes_remaining(void) 380 { 381 return ram_save_remaining() * TARGET_PAGE_SIZE; 382 } 383 384 uint64_t ram_bytes_transferred(void) 385 { 386 return bytes_transferred; 387 } 388 389 uint64_t ram_bytes_total(void) 390 { 391 RAMBlock *block; 392 uint64_t total = 0; 393 394 QLIST_FOREACH(block, &ram_list.blocks, next) 395 total += block->length; 396 397 return total; 398 } 399 400 static int block_compar(const void *a, const void *b) 401 { 402 RAMBlock * const *ablock = a; 403 RAMBlock * const *bblock = b; 404 405 return strcmp((*ablock)->idstr, (*bblock)->idstr); 406 } 407 408 static void sort_ram_list(void) 409 { 410 RAMBlock *block, *nblock, **blocks; 411 int n; 412 n = 0; 413 QLIST_FOREACH(block, &ram_list.blocks, next) { 414 ++n; 415 } 416 blocks = g_malloc(n * sizeof *blocks); 417 n = 0; 418 QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) { 419 blocks[n++] = block; 420 QLIST_REMOVE(block, next); 421 } 422 qsort(blocks, n, sizeof *blocks, block_compar); 423 while (--n >= 0) { 424 QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next); 425 } 426 g_free(blocks); 427 } 428 429 static void migration_end(void) 430 { 431 memory_global_dirty_log_stop(); 432 433 if (migrate_use_xbzrle()) { 434 cache_fini(XBZRLE.cache); 435 g_free(XBZRLE.cache); 436 g_free(XBZRLE.encoded_buf); 437 g_free(XBZRLE.current_buf); 438 g_free(XBZRLE.decoded_buf); 439 XBZRLE.cache = NULL; 440 } 441 } 442 443 static void ram_migration_cancel(void *opaque) 444 { 445 migration_end(); 446 } 447 448 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 449 450 static int ram_save_setup(QEMUFile *f, void *opaque) 451 { 452 ram_addr_t addr; 453 RAMBlock *block; 454 455 bytes_transferred = 0; 456 last_block = NULL; 457 last_offset = 0; 458 sort_ram_list(); 459 460 if (migrate_use_xbzrle()) { 461 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / 462 TARGET_PAGE_SIZE, 463 TARGET_PAGE_SIZE); 464 if (!XBZRLE.cache) { 465 DPRINTF("Error creating cache\n"); 466 return -1; 467 } 468 XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE); 469 XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE); 470 acct_clear(); 471 } 472 473 /* Make sure all dirty bits are set */ 474 QLIST_FOREACH(block, &ram_list.blocks, next) { 475 for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) { 476 if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE, 477 DIRTY_MEMORY_MIGRATION)) { 478 memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE); 479 } 480 } 481 } 482 483 memory_global_dirty_log_start(); 484 485 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); 486 487 QLIST_FOREACH(block, &ram_list.blocks, next) { 488 qemu_put_byte(f, strlen(block->idstr)); 489 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 490 qemu_put_be64(f, block->length); 491 } 492 493 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 494 495 return 0; 496 } 497 498 static int ram_save_iterate(QEMUFile *f, void *opaque) 499 { 500 uint64_t bytes_transferred_last; 501 double bwidth = 0; 502 int ret; 503 int i; 504 uint64_t expected_time; 505 506 bytes_transferred_last = bytes_transferred; 507 bwidth = qemu_get_clock_ns(rt_clock); 508 509 i = 0; 510 while ((ret = qemu_file_rate_limit(f)) == 0) { 511 int bytes_sent; 512 513 bytes_sent = ram_save_block(f); 514 /* no more blocks to sent */ 515 if (bytes_sent < 0) { 516 break; 517 } 518 bytes_transferred += bytes_sent; 519 acct_info.iterations++; 520 /* we want to check in the 1st loop, just in case it was the 1st time 521 and we had to sync the dirty bitmap. 522 qemu_get_clock_ns() is a bit expensive, so we only check each some 523 iterations 524 */ 525 if ((i & 63) == 0) { 526 uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000; 527 if (t1 > MAX_WAIT) { 528 DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n", 529 t1, i); 530 break; 531 } 532 } 533 i++; 534 } 535 536 if (ret < 0) { 537 return ret; 538 } 539 540 bwidth = qemu_get_clock_ns(rt_clock) - bwidth; 541 bwidth = (bytes_transferred - bytes_transferred_last) / bwidth; 542 543 /* if we haven't transferred anything this round, force expected_time to a 544 * a very high value, but without crashing */ 545 if (bwidth == 0) { 546 bwidth = 0.000001; 547 } 548 549 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 550 551 expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; 552 553 DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n", 554 expected_time, migrate_max_downtime()); 555 556 if (expected_time <= migrate_max_downtime()) { 557 memory_global_sync_dirty_bitmap(get_system_memory()); 558 expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; 559 560 return expected_time <= migrate_max_downtime(); 561 } 562 return 0; 563 } 564 565 static int ram_save_complete(QEMUFile *f, void *opaque) 566 { 567 memory_global_sync_dirty_bitmap(get_system_memory()); 568 569 /* try transferring iterative blocks of memory */ 570 571 /* flush all remaining blocks regardless of rate limiting */ 572 while (true) { 573 int bytes_sent; 574 575 bytes_sent = ram_save_block(f); 576 /* no more blocks to sent */ 577 if (bytes_sent < 0) { 578 break; 579 } 580 bytes_transferred += bytes_sent; 581 } 582 memory_global_dirty_log_stop(); 583 584 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 585 586 return 0; 587 } 588 589 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 590 { 591 int ret, rc = 0; 592 unsigned int xh_len; 593 int xh_flags; 594 595 if (!XBZRLE.decoded_buf) { 596 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE); 597 } 598 599 /* extract RLE header */ 600 xh_flags = qemu_get_byte(f); 601 xh_len = qemu_get_be16(f); 602 603 if (xh_flags != ENCODING_FLAG_XBZRLE) { 604 fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n"); 605 return -1; 606 } 607 608 if (xh_len > TARGET_PAGE_SIZE) { 609 fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n"); 610 return -1; 611 } 612 /* load data and decode */ 613 qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len); 614 615 /* decode RLE */ 616 ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host, 617 TARGET_PAGE_SIZE); 618 if (ret == -1) { 619 fprintf(stderr, "Failed to load XBZRLE page - decode error!\n"); 620 rc = -1; 621 } else if (ret > TARGET_PAGE_SIZE) { 622 fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n", 623 ret, TARGET_PAGE_SIZE); 624 abort(); 625 } 626 627 return rc; 628 } 629 630 static inline void *host_from_stream_offset(QEMUFile *f, 631 ram_addr_t offset, 632 int flags) 633 { 634 static RAMBlock *block = NULL; 635 char id[256]; 636 uint8_t len; 637 638 if (flags & RAM_SAVE_FLAG_CONTINUE) { 639 if (!block) { 640 fprintf(stderr, "Ack, bad migration stream!\n"); 641 return NULL; 642 } 643 644 return memory_region_get_ram_ptr(block->mr) + offset; 645 } 646 647 len = qemu_get_byte(f); 648 qemu_get_buffer(f, (uint8_t *)id, len); 649 id[len] = 0; 650 651 QLIST_FOREACH(block, &ram_list.blocks, next) { 652 if (!strncmp(id, block->idstr, sizeof(id))) 653 return memory_region_get_ram_ptr(block->mr) + offset; 654 } 655 656 fprintf(stderr, "Can't find block %s!\n", id); 657 return NULL; 658 } 659 660 static int ram_load(QEMUFile *f, void *opaque, int version_id) 661 { 662 ram_addr_t addr; 663 int flags, ret = 0; 664 int error; 665 static uint64_t seq_iter; 666 667 seq_iter++; 668 669 if (version_id < 4 || version_id > 4) { 670 return -EINVAL; 671 } 672 673 do { 674 addr = qemu_get_be64(f); 675 676 flags = addr & ~TARGET_PAGE_MASK; 677 addr &= TARGET_PAGE_MASK; 678 679 if (flags & RAM_SAVE_FLAG_MEM_SIZE) { 680 if (version_id == 4) { 681 /* Synchronize RAM block list */ 682 char id[256]; 683 ram_addr_t length; 684 ram_addr_t total_ram_bytes = addr; 685 686 while (total_ram_bytes) { 687 RAMBlock *block; 688 uint8_t len; 689 690 len = qemu_get_byte(f); 691 qemu_get_buffer(f, (uint8_t *)id, len); 692 id[len] = 0; 693 length = qemu_get_be64(f); 694 695 QLIST_FOREACH(block, &ram_list.blocks, next) { 696 if (!strncmp(id, block->idstr, sizeof(id))) { 697 if (block->length != length) { 698 ret = -EINVAL; 699 goto done; 700 } 701 break; 702 } 703 } 704 705 if (!block) { 706 fprintf(stderr, "Unknown ramblock \"%s\", cannot " 707 "accept migration\n", id); 708 ret = -EINVAL; 709 goto done; 710 } 711 712 total_ram_bytes -= length; 713 } 714 } 715 } 716 717 if (flags & RAM_SAVE_FLAG_COMPRESS) { 718 void *host; 719 uint8_t ch; 720 721 host = host_from_stream_offset(f, addr, flags); 722 if (!host) { 723 return -EINVAL; 724 } 725 726 ch = qemu_get_byte(f); 727 memset(host, ch, TARGET_PAGE_SIZE); 728 #ifndef _WIN32 729 if (ch == 0 && 730 (!kvm_enabled() || kvm_has_sync_mmu())) { 731 qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED); 732 } 733 #endif 734 } else if (flags & RAM_SAVE_FLAG_PAGE) { 735 void *host; 736 737 host = host_from_stream_offset(f, addr, flags); 738 if (!host) { 739 return -EINVAL; 740 } 741 742 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 743 } else if (flags & RAM_SAVE_FLAG_XBZRLE) { 744 if (!migrate_use_xbzrle()) { 745 return -EINVAL; 746 } 747 void *host = host_from_stream_offset(f, addr, flags); 748 if (!host) { 749 return -EINVAL; 750 } 751 752 if (load_xbzrle(f, addr, host) < 0) { 753 ret = -EINVAL; 754 goto done; 755 } 756 } 757 error = qemu_file_get_error(f); 758 if (error) { 759 ret = error; 760 goto done; 761 } 762 } while (!(flags & RAM_SAVE_FLAG_EOS)); 763 764 done: 765 DPRINTF("Completed load of VM with exit code %d seq iteration " PRIu64 "\n", 766 ret, seq_iter); 767 return ret; 768 } 769 770 SaveVMHandlers savevm_ram_handlers = { 771 .save_live_setup = ram_save_setup, 772 .save_live_iterate = ram_save_iterate, 773 .save_live_complete = ram_save_complete, 774 .load_state = ram_load, 775 .cancel = ram_migration_cancel, 776 }; 777 778 #ifdef HAS_AUDIO 779 struct soundhw { 780 const char *name; 781 const char *descr; 782 int enabled; 783 int isa; 784 union { 785 int (*init_isa) (ISABus *bus); 786 int (*init_pci) (PCIBus *bus); 787 } init; 788 }; 789 790 static struct soundhw soundhw[] = { 791 #ifdef HAS_AUDIO_CHOICE 792 #ifdef CONFIG_PCSPK 793 { 794 "pcspk", 795 "PC speaker", 796 0, 797 1, 798 { .init_isa = pcspk_audio_init } 799 }, 800 #endif 801 802 #ifdef CONFIG_SB16 803 { 804 "sb16", 805 "Creative Sound Blaster 16", 806 0, 807 1, 808 { .init_isa = SB16_init } 809 }, 810 #endif 811 812 #ifdef CONFIG_CS4231A 813 { 814 "cs4231a", 815 "CS4231A", 816 0, 817 1, 818 { .init_isa = cs4231a_init } 819 }, 820 #endif 821 822 #ifdef CONFIG_ADLIB 823 { 824 "adlib", 825 #ifdef HAS_YMF262 826 "Yamaha YMF262 (OPL3)", 827 #else 828 "Yamaha YM3812 (OPL2)", 829 #endif 830 0, 831 1, 832 { .init_isa = Adlib_init } 833 }, 834 #endif 835 836 #ifdef CONFIG_GUS 837 { 838 "gus", 839 "Gravis Ultrasound GF1", 840 0, 841 1, 842 { .init_isa = GUS_init } 843 }, 844 #endif 845 846 #ifdef CONFIG_AC97 847 { 848 "ac97", 849 "Intel 82801AA AC97 Audio", 850 0, 851 0, 852 { .init_pci = ac97_init } 853 }, 854 #endif 855 856 #ifdef CONFIG_ES1370 857 { 858 "es1370", 859 "ENSONIQ AudioPCI ES1370", 860 0, 861 0, 862 { .init_pci = es1370_init } 863 }, 864 #endif 865 866 #ifdef CONFIG_HDA 867 { 868 "hda", 869 "Intel HD Audio", 870 0, 871 0, 872 { .init_pci = intel_hda_and_codec_init } 873 }, 874 #endif 875 876 #endif /* HAS_AUDIO_CHOICE */ 877 878 { NULL, NULL, 0, 0, { NULL } } 879 }; 880 881 void select_soundhw(const char *optarg) 882 { 883 struct soundhw *c; 884 885 if (is_help_option(optarg)) { 886 show_valid_cards: 887 888 printf("Valid sound card names (comma separated):\n"); 889 for (c = soundhw; c->name; ++c) { 890 printf ("%-11s %s\n", c->name, c->descr); 891 } 892 printf("\n-soundhw all will enable all of the above\n"); 893 exit(!is_help_option(optarg)); 894 } 895 else { 896 size_t l; 897 const char *p; 898 char *e; 899 int bad_card = 0; 900 901 if (!strcmp(optarg, "all")) { 902 for (c = soundhw; c->name; ++c) { 903 c->enabled = 1; 904 } 905 return; 906 } 907 908 p = optarg; 909 while (*p) { 910 e = strchr(p, ','); 911 l = !e ? strlen(p) : (size_t) (e - p); 912 913 for (c = soundhw; c->name; ++c) { 914 if (!strncmp(c->name, p, l) && !c->name[l]) { 915 c->enabled = 1; 916 break; 917 } 918 } 919 920 if (!c->name) { 921 if (l > 80) { 922 fprintf(stderr, 923 "Unknown sound card name (too big to show)\n"); 924 } 925 else { 926 fprintf(stderr, "Unknown sound card name `%.*s'\n", 927 (int) l, p); 928 } 929 bad_card = 1; 930 } 931 p += l + (e != NULL); 932 } 933 934 if (bad_card) { 935 goto show_valid_cards; 936 } 937 } 938 } 939 940 void audio_init(ISABus *isa_bus, PCIBus *pci_bus) 941 { 942 struct soundhw *c; 943 944 for (c = soundhw; c->name; ++c) { 945 if (c->enabled) { 946 if (c->isa) { 947 if (isa_bus) { 948 c->init.init_isa(isa_bus); 949 } 950 } else { 951 if (pci_bus) { 952 c->init.init_pci(pci_bus); 953 } 954 } 955 } 956 } 957 } 958 #else 959 void select_soundhw(const char *optarg) 960 { 961 } 962 void audio_init(ISABus *isa_bus, PCIBus *pci_bus) 963 { 964 } 965 #endif 966 967 int qemu_uuid_parse(const char *str, uint8_t *uuid) 968 { 969 int ret; 970 971 if (strlen(str) != 36) { 972 return -1; 973 } 974 975 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3], 976 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9], 977 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14], 978 &uuid[15]); 979 980 if (ret != 16) { 981 return -1; 982 } 983 #ifdef TARGET_I386 984 smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid); 985 #endif 986 return 0; 987 } 988 989 void do_acpitable_option(const char *optarg) 990 { 991 #ifdef TARGET_I386 992 if (acpi_table_add(optarg) < 0) { 993 fprintf(stderr, "Wrong acpi table provided\n"); 994 exit(1); 995 } 996 #endif 997 } 998 999 void do_smbios_option(const char *optarg) 1000 { 1001 #ifdef TARGET_I386 1002 if (smbios_entry_add(optarg) < 0) { 1003 fprintf(stderr, "Wrong smbios provided\n"); 1004 exit(1); 1005 } 1006 #endif 1007 } 1008 1009 void cpudef_init(void) 1010 { 1011 #if defined(cpudef_setup) 1012 cpudef_setup(); /* parse cpu definitions in target config file */ 1013 #endif 1014 } 1015 1016 int audio_available(void) 1017 { 1018 #ifdef HAS_AUDIO 1019 return 1; 1020 #else 1021 return 0; 1022 #endif 1023 } 1024 1025 int tcg_available(void) 1026 { 1027 return 1; 1028 } 1029 1030 int kvm_available(void) 1031 { 1032 #ifdef CONFIG_KVM 1033 return 1; 1034 #else 1035 return 0; 1036 #endif 1037 } 1038 1039 int xen_available(void) 1040 { 1041 #ifdef CONFIG_XEN 1042 return 1; 1043 #else 1044 return 0; 1045 #endif 1046 } 1047