xref: /qemu/system/arch_init.c (revision 0445259ba686f9ddf395f700c7d5b1ac400a451c)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "exec/gdbstub.h"
44 #include "hw/i386/smbios.h"
45 #include "exec/address-spaces.h"
46 #include "hw/audio/pcspk.h"
47 #include "migration/page_cache.h"
48 #include "qemu/config-file.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "exec/cpu-all.h"
52 #include "hw/acpi/acpi.h"
53 
54 #ifdef DEBUG_ARCH_INIT
55 #define DPRINTF(fmt, ...) \
56     do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF(fmt, ...) \
59     do { } while (0)
60 #endif
61 
62 #ifdef TARGET_SPARC
63 int graphic_width = 1024;
64 int graphic_height = 768;
65 int graphic_depth = 8;
66 #else
67 int graphic_width = 800;
68 int graphic_height = 600;
69 int graphic_depth = 15;
70 #endif
71 
72 
73 #if defined(TARGET_ALPHA)
74 #define QEMU_ARCH QEMU_ARCH_ALPHA
75 #elif defined(TARGET_ARM)
76 #define QEMU_ARCH QEMU_ARCH_ARM
77 #elif defined(TARGET_CRIS)
78 #define QEMU_ARCH QEMU_ARCH_CRIS
79 #elif defined(TARGET_I386)
80 #define QEMU_ARCH QEMU_ARCH_I386
81 #elif defined(TARGET_M68K)
82 #define QEMU_ARCH QEMU_ARCH_M68K
83 #elif defined(TARGET_LM32)
84 #define QEMU_ARCH QEMU_ARCH_LM32
85 #elif defined(TARGET_MICROBLAZE)
86 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
87 #elif defined(TARGET_MIPS)
88 #define QEMU_ARCH QEMU_ARCH_MIPS
89 #elif defined(TARGET_MOXIE)
90 #define QEMU_ARCH QEMU_ARCH_MOXIE
91 #elif defined(TARGET_OPENRISC)
92 #define QEMU_ARCH QEMU_ARCH_OPENRISC
93 #elif defined(TARGET_PPC)
94 #define QEMU_ARCH QEMU_ARCH_PPC
95 #elif defined(TARGET_S390X)
96 #define QEMU_ARCH QEMU_ARCH_S390X
97 #elif defined(TARGET_SH4)
98 #define QEMU_ARCH QEMU_ARCH_SH4
99 #elif defined(TARGET_SPARC)
100 #define QEMU_ARCH QEMU_ARCH_SPARC
101 #elif defined(TARGET_XTENSA)
102 #define QEMU_ARCH QEMU_ARCH_XTENSA
103 #elif defined(TARGET_UNICORE32)
104 #define QEMU_ARCH QEMU_ARCH_UNICORE32
105 #endif
106 
107 const uint32_t arch_type = QEMU_ARCH;
108 
109 /***********************************************************/
110 /* ram save/restore */
111 
112 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
113 #define RAM_SAVE_FLAG_COMPRESS 0x02
114 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
115 #define RAM_SAVE_FLAG_PAGE     0x08
116 #define RAM_SAVE_FLAG_EOS      0x10
117 #define RAM_SAVE_FLAG_CONTINUE 0x20
118 #define RAM_SAVE_FLAG_XBZRLE   0x40
119 
120 
121 static struct defconfig_file {
122     const char *filename;
123     /* Indicates it is an user config file (disabled by -no-user-config) */
124     bool userconfig;
125 } default_config_files[] = {
126     { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
127     { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
128     { NULL }, /* end of list */
129 };
130 
131 
132 int qemu_read_default_config_files(bool userconfig)
133 {
134     int ret;
135     struct defconfig_file *f;
136 
137     for (f = default_config_files; f->filename; f++) {
138         if (!userconfig && f->userconfig) {
139             continue;
140         }
141         ret = qemu_read_config_file(f->filename);
142         if (ret < 0 && ret != -ENOENT) {
143             return ret;
144         }
145     }
146 
147     return 0;
148 }
149 
150 static inline bool is_zero_page(uint8_t *p)
151 {
152     return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
153         TARGET_PAGE_SIZE;
154 }
155 
156 /* struct contains XBZRLE cache and a static page
157    used by the compression */
158 static struct {
159     /* buffer used for XBZRLE encoding */
160     uint8_t *encoded_buf;
161     /* buffer for storing page content */
162     uint8_t *current_buf;
163     /* buffer used for XBZRLE decoding */
164     uint8_t *decoded_buf;
165     /* Cache for XBZRLE */
166     PageCache *cache;
167 } XBZRLE = {
168     .encoded_buf = NULL,
169     .current_buf = NULL,
170     .decoded_buf = NULL,
171     .cache = NULL,
172 };
173 
174 
175 int64_t xbzrle_cache_resize(int64_t new_size)
176 {
177     if (XBZRLE.cache != NULL) {
178         return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
179             TARGET_PAGE_SIZE;
180     }
181     return pow2floor(new_size);
182 }
183 
184 /* accounting for migration statistics */
185 typedef struct AccountingInfo {
186     uint64_t dup_pages;
187     uint64_t skipped_pages;
188     uint64_t norm_pages;
189     uint64_t iterations;
190     uint64_t xbzrle_bytes;
191     uint64_t xbzrle_pages;
192     uint64_t xbzrle_cache_miss;
193     uint64_t xbzrle_overflows;
194 } AccountingInfo;
195 
196 static AccountingInfo acct_info;
197 
198 static void acct_clear(void)
199 {
200     memset(&acct_info, 0, sizeof(acct_info));
201 }
202 
203 uint64_t dup_mig_bytes_transferred(void)
204 {
205     return acct_info.dup_pages * TARGET_PAGE_SIZE;
206 }
207 
208 uint64_t dup_mig_pages_transferred(void)
209 {
210     return acct_info.dup_pages;
211 }
212 
213 uint64_t skipped_mig_bytes_transferred(void)
214 {
215     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
216 }
217 
218 uint64_t skipped_mig_pages_transferred(void)
219 {
220     return acct_info.skipped_pages;
221 }
222 
223 uint64_t norm_mig_bytes_transferred(void)
224 {
225     return acct_info.norm_pages * TARGET_PAGE_SIZE;
226 }
227 
228 uint64_t norm_mig_pages_transferred(void)
229 {
230     return acct_info.norm_pages;
231 }
232 
233 uint64_t xbzrle_mig_bytes_transferred(void)
234 {
235     return acct_info.xbzrle_bytes;
236 }
237 
238 uint64_t xbzrle_mig_pages_transferred(void)
239 {
240     return acct_info.xbzrle_pages;
241 }
242 
243 uint64_t xbzrle_mig_pages_cache_miss(void)
244 {
245     return acct_info.xbzrle_cache_miss;
246 }
247 
248 uint64_t xbzrle_mig_pages_overflow(void)
249 {
250     return acct_info.xbzrle_overflows;
251 }
252 
253 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
254                              int cont, int flag)
255 {
256     size_t size;
257 
258     qemu_put_be64(f, offset | cont | flag);
259     size = 8;
260 
261     if (!cont) {
262         qemu_put_byte(f, strlen(block->idstr));
263         qemu_put_buffer(f, (uint8_t *)block->idstr,
264                         strlen(block->idstr));
265         size += 1 + strlen(block->idstr);
266     }
267     return size;
268 }
269 
270 #define ENCODING_FLAG_XBZRLE 0x1
271 
272 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
273                             ram_addr_t current_addr, RAMBlock *block,
274                             ram_addr_t offset, int cont, bool last_stage)
275 {
276     int encoded_len = 0, bytes_sent = -1;
277     uint8_t *prev_cached_page;
278 
279     if (!cache_is_cached(XBZRLE.cache, current_addr)) {
280         if (!last_stage) {
281             cache_insert(XBZRLE.cache, current_addr, current_data);
282         }
283         acct_info.xbzrle_cache_miss++;
284         return -1;
285     }
286 
287     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
288 
289     /* save current buffer into memory */
290     memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
291 
292     /* XBZRLE encoding (if there is no overflow) */
293     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
294                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
295                                        TARGET_PAGE_SIZE);
296     if (encoded_len == 0) {
297         DPRINTF("Skipping unmodified page\n");
298         return 0;
299     } else if (encoded_len == -1) {
300         DPRINTF("Overflow\n");
301         acct_info.xbzrle_overflows++;
302         /* update data in the cache */
303         memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
304         return -1;
305     }
306 
307     /* we need to update the data in the cache, in order to get the same data */
308     if (!last_stage) {
309         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
310     }
311 
312     /* Send XBZRLE based compressed page */
313     bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
314     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
315     qemu_put_be16(f, encoded_len);
316     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
317     bytes_sent += encoded_len + 1 + 2;
318     acct_info.xbzrle_pages++;
319     acct_info.xbzrle_bytes += bytes_sent;
320 
321     return bytes_sent;
322 }
323 
324 
325 /* This is the last block that we have visited serching for dirty pages
326  */
327 static RAMBlock *last_seen_block;
328 /* This is the last block from where we have sent data */
329 static RAMBlock *last_sent_block;
330 static ram_addr_t last_offset;
331 static unsigned long *migration_bitmap;
332 static uint64_t migration_dirty_pages;
333 static uint32_t last_version;
334 static bool ram_bulk_stage;
335 
336 static inline
337 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
338                                                  ram_addr_t start)
339 {
340     unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
341     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
342     unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS);
343 
344     unsigned long next;
345 
346     if (ram_bulk_stage && nr > base) {
347         next = nr + 1;
348     } else {
349         next = find_next_bit(migration_bitmap, size, nr);
350     }
351 
352     if (next < size) {
353         clear_bit(next, migration_bitmap);
354         migration_dirty_pages--;
355     }
356     return (next - base) << TARGET_PAGE_BITS;
357 }
358 
359 static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
360                                               ram_addr_t offset)
361 {
362     bool ret;
363     int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
364 
365     ret = test_and_set_bit(nr, migration_bitmap);
366 
367     if (!ret) {
368         migration_dirty_pages++;
369     }
370     return ret;
371 }
372 
373 /* Needs iothread lock! */
374 
375 static void migration_bitmap_sync(void)
376 {
377     RAMBlock *block;
378     ram_addr_t addr;
379     uint64_t num_dirty_pages_init = migration_dirty_pages;
380     MigrationState *s = migrate_get_current();
381     static int64_t start_time;
382     static int64_t num_dirty_pages_period;
383     int64_t end_time;
384 
385     if (!start_time) {
386         start_time = qemu_get_clock_ms(rt_clock);
387     }
388 
389     trace_migration_bitmap_sync_start();
390     memory_global_sync_dirty_bitmap(get_system_memory());
391 
392     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
393         for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
394             if (memory_region_test_and_clear_dirty(block->mr,
395                                                    addr, TARGET_PAGE_SIZE,
396                                                    DIRTY_MEMORY_MIGRATION)) {
397                 migration_bitmap_set_dirty(block->mr, addr);
398             }
399         }
400     }
401     trace_migration_bitmap_sync_end(migration_dirty_pages
402                                     - num_dirty_pages_init);
403     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
404     end_time = qemu_get_clock_ms(rt_clock);
405 
406     /* more than 1 second = 1000 millisecons */
407     if (end_time > start_time + 1000) {
408         s->dirty_pages_rate = num_dirty_pages_period * 1000
409             / (end_time - start_time);
410         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
411         start_time = end_time;
412         num_dirty_pages_period = 0;
413     }
414 }
415 
416 /*
417  * ram_save_block: Writes a page of memory to the stream f
418  *
419  * Returns:  The number of bytes written.
420  *           0 means no dirty pages
421  */
422 
423 static int ram_save_block(QEMUFile *f, bool last_stage)
424 {
425     RAMBlock *block = last_seen_block;
426     ram_addr_t offset = last_offset;
427     bool complete_round = false;
428     int bytes_sent = 0;
429     MemoryRegion *mr;
430     ram_addr_t current_addr;
431 
432     if (!block)
433         block = QTAILQ_FIRST(&ram_list.blocks);
434 
435     while (true) {
436         mr = block->mr;
437         offset = migration_bitmap_find_and_reset_dirty(mr, offset);
438         if (complete_round && block == last_seen_block &&
439             offset >= last_offset) {
440             break;
441         }
442         if (offset >= block->length) {
443             offset = 0;
444             block = QTAILQ_NEXT(block, next);
445             if (!block) {
446                 block = QTAILQ_FIRST(&ram_list.blocks);
447                 complete_round = true;
448                 ram_bulk_stage = false;
449             }
450         } else {
451             uint8_t *p;
452             int cont = (block == last_sent_block) ?
453                 RAM_SAVE_FLAG_CONTINUE : 0;
454 
455             p = memory_region_get_ram_ptr(mr) + offset;
456 
457             /* In doubt sent page as normal */
458             bytes_sent = -1;
459             if (is_zero_page(p)) {
460                 acct_info.dup_pages++;
461                 if (!ram_bulk_stage) {
462                     bytes_sent = save_block_hdr(f, block, offset, cont,
463                                                 RAM_SAVE_FLAG_COMPRESS);
464                     qemu_put_byte(f, 0);
465                     bytes_sent++;
466                 } else {
467                     acct_info.skipped_pages++;
468                     bytes_sent = 0;
469                 }
470             } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
471                 current_addr = block->offset + offset;
472                 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
473                                               offset, cont, last_stage);
474                 if (!last_stage) {
475                     p = get_cached_data(XBZRLE.cache, current_addr);
476                 }
477             }
478 
479             /* XBZRLE overflow or normal page */
480             if (bytes_sent == -1) {
481                 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
482                 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
483                 bytes_sent += TARGET_PAGE_SIZE;
484                 acct_info.norm_pages++;
485             }
486 
487             /* if page is unmodified, continue to the next */
488             if (bytes_sent > 0) {
489                 last_sent_block = block;
490                 break;
491             }
492         }
493     }
494     last_seen_block = block;
495     last_offset = offset;
496 
497     return bytes_sent;
498 }
499 
500 static uint64_t bytes_transferred;
501 
502 static ram_addr_t ram_save_remaining(void)
503 {
504     return migration_dirty_pages;
505 }
506 
507 uint64_t ram_bytes_remaining(void)
508 {
509     return ram_save_remaining() * TARGET_PAGE_SIZE;
510 }
511 
512 uint64_t ram_bytes_transferred(void)
513 {
514     return bytes_transferred;
515 }
516 
517 uint64_t ram_bytes_total(void)
518 {
519     RAMBlock *block;
520     uint64_t total = 0;
521 
522     QTAILQ_FOREACH(block, &ram_list.blocks, next)
523         total += block->length;
524 
525     return total;
526 }
527 
528 static void migration_end(void)
529 {
530     if (migration_bitmap) {
531         memory_global_dirty_log_stop();
532         g_free(migration_bitmap);
533         migration_bitmap = NULL;
534     }
535 
536     if (XBZRLE.cache) {
537         cache_fini(XBZRLE.cache);
538         g_free(XBZRLE.cache);
539         g_free(XBZRLE.encoded_buf);
540         g_free(XBZRLE.current_buf);
541         g_free(XBZRLE.decoded_buf);
542         XBZRLE.cache = NULL;
543     }
544 }
545 
546 static void ram_migration_cancel(void *opaque)
547 {
548     migration_end();
549 }
550 
551 static void reset_ram_globals(void)
552 {
553     last_seen_block = NULL;
554     last_sent_block = NULL;
555     last_offset = 0;
556     last_version = ram_list.version;
557     ram_bulk_stage = true;
558 }
559 
560 #define MAX_WAIT 50 /* ms, half buffered_file limit */
561 
562 static int ram_save_setup(QEMUFile *f, void *opaque)
563 {
564     RAMBlock *block;
565     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
566 
567     migration_bitmap = bitmap_new(ram_pages);
568     bitmap_set(migration_bitmap, 0, ram_pages);
569     migration_dirty_pages = ram_pages;
570 
571     if (migrate_use_xbzrle()) {
572         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
573                                   TARGET_PAGE_SIZE,
574                                   TARGET_PAGE_SIZE);
575         if (!XBZRLE.cache) {
576             DPRINTF("Error creating cache\n");
577             return -1;
578         }
579         XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
580         XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
581         acct_clear();
582     }
583 
584     qemu_mutex_lock_iothread();
585     qemu_mutex_lock_ramlist();
586     bytes_transferred = 0;
587     reset_ram_globals();
588 
589     memory_global_dirty_log_start();
590     migration_bitmap_sync();
591     qemu_mutex_unlock_iothread();
592 
593     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
594 
595     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
596         qemu_put_byte(f, strlen(block->idstr));
597         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
598         qemu_put_be64(f, block->length);
599     }
600 
601     qemu_mutex_unlock_ramlist();
602     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
603 
604     return 0;
605 }
606 
607 static int ram_save_iterate(QEMUFile *f, void *opaque)
608 {
609     int ret;
610     int i;
611     int64_t t0;
612     int total_sent = 0;
613 
614     qemu_mutex_lock_ramlist();
615 
616     if (ram_list.version != last_version) {
617         reset_ram_globals();
618     }
619 
620     t0 = qemu_get_clock_ns(rt_clock);
621     i = 0;
622     while ((ret = qemu_file_rate_limit(f)) == 0) {
623         int bytes_sent;
624 
625         bytes_sent = ram_save_block(f, false);
626         /* no more blocks to sent */
627         if (bytes_sent == 0) {
628             break;
629         }
630         total_sent += bytes_sent;
631         acct_info.iterations++;
632         /* we want to check in the 1st loop, just in case it was the 1st time
633            and we had to sync the dirty bitmap.
634            qemu_get_clock_ns() is a bit expensive, so we only check each some
635            iterations
636         */
637         if ((i & 63) == 0) {
638             uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
639             if (t1 > MAX_WAIT) {
640                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
641                         t1, i);
642                 break;
643             }
644         }
645         i++;
646     }
647 
648     qemu_mutex_unlock_ramlist();
649 
650     if (ret < 0) {
651         bytes_transferred += total_sent;
652         return ret;
653     }
654 
655     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
656     total_sent += 8;
657     bytes_transferred += total_sent;
658 
659     return total_sent;
660 }
661 
662 static int ram_save_complete(QEMUFile *f, void *opaque)
663 {
664     qemu_mutex_lock_ramlist();
665     migration_bitmap_sync();
666 
667     /* try transferring iterative blocks of memory */
668 
669     /* flush all remaining blocks regardless of rate limiting */
670     while (true) {
671         int bytes_sent;
672 
673         bytes_sent = ram_save_block(f, true);
674         /* no more blocks to sent */
675         if (bytes_sent == 0) {
676             break;
677         }
678         bytes_transferred += bytes_sent;
679     }
680     migration_end();
681 
682     qemu_mutex_unlock_ramlist();
683     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
684 
685     return 0;
686 }
687 
688 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
689 {
690     uint64_t remaining_size;
691 
692     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
693 
694     if (remaining_size < max_size) {
695         qemu_mutex_lock_iothread();
696         migration_bitmap_sync();
697         qemu_mutex_unlock_iothread();
698         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
699     }
700     return remaining_size;
701 }
702 
703 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
704 {
705     int ret, rc = 0;
706     unsigned int xh_len;
707     int xh_flags;
708 
709     if (!XBZRLE.decoded_buf) {
710         XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
711     }
712 
713     /* extract RLE header */
714     xh_flags = qemu_get_byte(f);
715     xh_len = qemu_get_be16(f);
716 
717     if (xh_flags != ENCODING_FLAG_XBZRLE) {
718         fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
719         return -1;
720     }
721 
722     if (xh_len > TARGET_PAGE_SIZE) {
723         fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
724         return -1;
725     }
726     /* load data and decode */
727     qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
728 
729     /* decode RLE */
730     ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
731                                TARGET_PAGE_SIZE);
732     if (ret == -1) {
733         fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
734         rc = -1;
735     } else  if (ret > TARGET_PAGE_SIZE) {
736         fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
737                 ret, TARGET_PAGE_SIZE);
738         abort();
739     }
740 
741     return rc;
742 }
743 
744 static inline void *host_from_stream_offset(QEMUFile *f,
745                                             ram_addr_t offset,
746                                             int flags)
747 {
748     static RAMBlock *block = NULL;
749     char id[256];
750     uint8_t len;
751 
752     if (flags & RAM_SAVE_FLAG_CONTINUE) {
753         if (!block) {
754             fprintf(stderr, "Ack, bad migration stream!\n");
755             return NULL;
756         }
757 
758         return memory_region_get_ram_ptr(block->mr) + offset;
759     }
760 
761     len = qemu_get_byte(f);
762     qemu_get_buffer(f, (uint8_t *)id, len);
763     id[len] = 0;
764 
765     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
766         if (!strncmp(id, block->idstr, sizeof(id)))
767             return memory_region_get_ram_ptr(block->mr) + offset;
768     }
769 
770     fprintf(stderr, "Can't find block %s!\n", id);
771     return NULL;
772 }
773 
774 static int ram_load(QEMUFile *f, void *opaque, int version_id)
775 {
776     ram_addr_t addr;
777     int flags, ret = 0;
778     int error;
779     static uint64_t seq_iter;
780 
781     seq_iter++;
782 
783     if (version_id < 4 || version_id > 4) {
784         return -EINVAL;
785     }
786 
787     do {
788         addr = qemu_get_be64(f);
789 
790         flags = addr & ~TARGET_PAGE_MASK;
791         addr &= TARGET_PAGE_MASK;
792 
793         if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
794             if (version_id == 4) {
795                 /* Synchronize RAM block list */
796                 char id[256];
797                 ram_addr_t length;
798                 ram_addr_t total_ram_bytes = addr;
799 
800                 while (total_ram_bytes) {
801                     RAMBlock *block;
802                     uint8_t len;
803 
804                     len = qemu_get_byte(f);
805                     qemu_get_buffer(f, (uint8_t *)id, len);
806                     id[len] = 0;
807                     length = qemu_get_be64(f);
808 
809                     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
810                         if (!strncmp(id, block->idstr, sizeof(id))) {
811                             if (block->length != length) {
812                                 ret =  -EINVAL;
813                                 goto done;
814                             }
815                             break;
816                         }
817                     }
818 
819                     if (!block) {
820                         fprintf(stderr, "Unknown ramblock \"%s\", cannot "
821                                 "accept migration\n", id);
822                         ret = -EINVAL;
823                         goto done;
824                     }
825 
826                     total_ram_bytes -= length;
827                 }
828             }
829         }
830 
831         if (flags & RAM_SAVE_FLAG_COMPRESS) {
832             void *host;
833             uint8_t ch;
834 
835             host = host_from_stream_offset(f, addr, flags);
836             if (!host) {
837                 return -EINVAL;
838             }
839 
840             ch = qemu_get_byte(f);
841             memset(host, ch, TARGET_PAGE_SIZE);
842 #ifndef _WIN32
843             if (ch == 0 &&
844                 (!kvm_enabled() || kvm_has_sync_mmu()) &&
845                 getpagesize() <= TARGET_PAGE_SIZE) {
846                 qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
847             }
848 #endif
849         } else if (flags & RAM_SAVE_FLAG_PAGE) {
850             void *host;
851 
852             host = host_from_stream_offset(f, addr, flags);
853             if (!host) {
854                 return -EINVAL;
855             }
856 
857             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
858         } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
859             void *host = host_from_stream_offset(f, addr, flags);
860             if (!host) {
861                 return -EINVAL;
862             }
863 
864             if (load_xbzrle(f, addr, host) < 0) {
865                 ret = -EINVAL;
866                 goto done;
867             }
868         }
869         error = qemu_file_get_error(f);
870         if (error) {
871             ret = error;
872             goto done;
873         }
874     } while (!(flags & RAM_SAVE_FLAG_EOS));
875 
876 done:
877     DPRINTF("Completed load of VM with exit code %d seq iteration "
878             "%" PRIu64 "\n", ret, seq_iter);
879     return ret;
880 }
881 
882 SaveVMHandlers savevm_ram_handlers = {
883     .save_live_setup = ram_save_setup,
884     .save_live_iterate = ram_save_iterate,
885     .save_live_complete = ram_save_complete,
886     .save_live_pending = ram_save_pending,
887     .load_state = ram_load,
888     .cancel = ram_migration_cancel,
889 };
890 
891 #ifdef HAS_AUDIO
892 struct soundhw {
893     const char *name;
894     const char *descr;
895     int enabled;
896     int isa;
897     union {
898         int (*init_isa) (ISABus *bus);
899         int (*init_pci) (PCIBus *bus);
900     } init;
901 };
902 
903 static struct soundhw soundhw[] = {
904 #ifdef HAS_AUDIO_CHOICE
905 #ifdef CONFIG_PCSPK
906     {
907         "pcspk",
908         "PC speaker",
909         0,
910         1,
911         { .init_isa = pcspk_audio_init }
912     },
913 #endif
914 
915 #ifdef CONFIG_SB16
916     {
917         "sb16",
918         "Creative Sound Blaster 16",
919         0,
920         1,
921         { .init_isa = SB16_init }
922     },
923 #endif
924 
925 #ifdef CONFIG_CS4231A
926     {
927         "cs4231a",
928         "CS4231A",
929         0,
930         1,
931         { .init_isa = cs4231a_init }
932     },
933 #endif
934 
935 #ifdef CONFIG_ADLIB
936     {
937         "adlib",
938 #ifdef HAS_YMF262
939         "Yamaha YMF262 (OPL3)",
940 #else
941         "Yamaha YM3812 (OPL2)",
942 #endif
943         0,
944         1,
945         { .init_isa = Adlib_init }
946     },
947 #endif
948 
949 #ifdef CONFIG_GUS
950     {
951         "gus",
952         "Gravis Ultrasound GF1",
953         0,
954         1,
955         { .init_isa = GUS_init }
956     },
957 #endif
958 
959 #ifdef CONFIG_AC97
960     {
961         "ac97",
962         "Intel 82801AA AC97 Audio",
963         0,
964         0,
965         { .init_pci = ac97_init }
966     },
967 #endif
968 
969 #ifdef CONFIG_ES1370
970     {
971         "es1370",
972         "ENSONIQ AudioPCI ES1370",
973         0,
974         0,
975         { .init_pci = es1370_init }
976     },
977 #endif
978 
979 #ifdef CONFIG_HDA
980     {
981         "hda",
982         "Intel HD Audio",
983         0,
984         0,
985         { .init_pci = intel_hda_and_codec_init }
986     },
987 #endif
988 
989 #endif /* HAS_AUDIO_CHOICE */
990 
991     { NULL, NULL, 0, 0, { NULL } }
992 };
993 
994 void select_soundhw(const char *optarg)
995 {
996     struct soundhw *c;
997 
998     if (is_help_option(optarg)) {
999     show_valid_cards:
1000 
1001 #ifdef HAS_AUDIO_CHOICE
1002         printf("Valid sound card names (comma separated):\n");
1003         for (c = soundhw; c->name; ++c) {
1004             printf ("%-11s %s\n", c->name, c->descr);
1005         }
1006         printf("\n-soundhw all will enable all of the above\n");
1007 #else
1008         printf("Machine has no user-selectable audio hardware "
1009                "(it may or may not have always-present audio hardware).\n");
1010 #endif
1011         exit(!is_help_option(optarg));
1012     }
1013     else {
1014         size_t l;
1015         const char *p;
1016         char *e;
1017         int bad_card = 0;
1018 
1019         if (!strcmp(optarg, "all")) {
1020             for (c = soundhw; c->name; ++c) {
1021                 c->enabled = 1;
1022             }
1023             return;
1024         }
1025 
1026         p = optarg;
1027         while (*p) {
1028             e = strchr(p, ',');
1029             l = !e ? strlen(p) : (size_t) (e - p);
1030 
1031             for (c = soundhw; c->name; ++c) {
1032                 if (!strncmp(c->name, p, l) && !c->name[l]) {
1033                     c->enabled = 1;
1034                     break;
1035                 }
1036             }
1037 
1038             if (!c->name) {
1039                 if (l > 80) {
1040                     fprintf(stderr,
1041                             "Unknown sound card name (too big to show)\n");
1042                 }
1043                 else {
1044                     fprintf(stderr, "Unknown sound card name `%.*s'\n",
1045                             (int) l, p);
1046                 }
1047                 bad_card = 1;
1048             }
1049             p += l + (e != NULL);
1050         }
1051 
1052         if (bad_card) {
1053             goto show_valid_cards;
1054         }
1055     }
1056 }
1057 
1058 void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1059 {
1060     struct soundhw *c;
1061 
1062     for (c = soundhw; c->name; ++c) {
1063         if (c->enabled) {
1064             if (c->isa) {
1065                 if (isa_bus) {
1066                     c->init.init_isa(isa_bus);
1067                 }
1068             } else {
1069                 if (pci_bus) {
1070                     c->init.init_pci(pci_bus);
1071                 }
1072             }
1073         }
1074     }
1075 }
1076 #else
1077 void select_soundhw(const char *optarg)
1078 {
1079 }
1080 void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1081 {
1082 }
1083 #endif
1084 
1085 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1086 {
1087     int ret;
1088 
1089     if (strlen(str) != 36) {
1090         return -1;
1091     }
1092 
1093     ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1094                  &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1095                  &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1096                  &uuid[15]);
1097 
1098     if (ret != 16) {
1099         return -1;
1100     }
1101 #ifdef TARGET_I386
1102     smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1103 #endif
1104     return 0;
1105 }
1106 
1107 void do_acpitable_option(const QemuOpts *opts)
1108 {
1109 #ifdef TARGET_I386
1110     Error *err = NULL;
1111 
1112     acpi_table_add(opts, &err);
1113     if (err) {
1114         fprintf(stderr, "Wrong acpi table provided: %s\n",
1115                 error_get_pretty(err));
1116         error_free(err);
1117         exit(1);
1118     }
1119 #endif
1120 }
1121 
1122 void do_smbios_option(const char *optarg)
1123 {
1124 #ifdef TARGET_I386
1125     if (smbios_entry_add(optarg) < 0) {
1126         fprintf(stderr, "Wrong smbios provided\n");
1127         exit(1);
1128     }
1129 #endif
1130 }
1131 
1132 void cpudef_init(void)
1133 {
1134 #if defined(cpudef_setup)
1135     cpudef_setup(); /* parse cpu definitions in target config file */
1136 #endif
1137 }
1138 
1139 int audio_available(void)
1140 {
1141 #ifdef HAS_AUDIO
1142     return 1;
1143 #else
1144     return 0;
1145 #endif
1146 }
1147 
1148 int tcg_available(void)
1149 {
1150     return 1;
1151 }
1152 
1153 int kvm_available(void)
1154 {
1155 #ifdef CONFIG_KVM
1156     return 1;
1157 #else
1158     return 0;
1159 #endif
1160 }
1161 
1162 int xen_available(void)
1163 {
1164 #ifdef CONFIG_XEN
1165     return 1;
1166 #else
1167     return 0;
1168 #endif
1169 }
1170 
1171 
1172 TargetInfo *qmp_query_target(Error **errp)
1173 {
1174     TargetInfo *info = g_malloc0(sizeof(*info));
1175 
1176     info->arch = TARGET_TYPE;
1177 
1178     return info;
1179 }
1180