xref: /qemu/system/arch_init.c (revision f6f14c58d542b306b6a0ff207db793d0aba62aa1)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "exec/cpu-all.h"
52 #include "exec/ram_addr.h"
53 #include "hw/acpi/acpi.h"
54 #include "qemu/host-utils.h"
55 #include "qemu/rcu_queue.h"
56 
57 #ifdef DEBUG_ARCH_INIT
58 #define DPRINTF(fmt, ...) \
59     do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
60 #else
61 #define DPRINTF(fmt, ...) \
62     do { } while (0)
63 #endif
64 
65 #ifdef TARGET_SPARC
66 int graphic_width = 1024;
67 int graphic_height = 768;
68 int graphic_depth = 8;
69 #else
70 int graphic_width = 800;
71 int graphic_height = 600;
72 int graphic_depth = 32;
73 #endif
74 
75 
76 #if defined(TARGET_ALPHA)
77 #define QEMU_ARCH QEMU_ARCH_ALPHA
78 #elif defined(TARGET_ARM)
79 #define QEMU_ARCH QEMU_ARCH_ARM
80 #elif defined(TARGET_CRIS)
81 #define QEMU_ARCH QEMU_ARCH_CRIS
82 #elif defined(TARGET_I386)
83 #define QEMU_ARCH QEMU_ARCH_I386
84 #elif defined(TARGET_M68K)
85 #define QEMU_ARCH QEMU_ARCH_M68K
86 #elif defined(TARGET_LM32)
87 #define QEMU_ARCH QEMU_ARCH_LM32
88 #elif defined(TARGET_MICROBLAZE)
89 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
90 #elif defined(TARGET_MIPS)
91 #define QEMU_ARCH QEMU_ARCH_MIPS
92 #elif defined(TARGET_MOXIE)
93 #define QEMU_ARCH QEMU_ARCH_MOXIE
94 #elif defined(TARGET_OPENRISC)
95 #define QEMU_ARCH QEMU_ARCH_OPENRISC
96 #elif defined(TARGET_PPC)
97 #define QEMU_ARCH QEMU_ARCH_PPC
98 #elif defined(TARGET_S390X)
99 #define QEMU_ARCH QEMU_ARCH_S390X
100 #elif defined(TARGET_SH4)
101 #define QEMU_ARCH QEMU_ARCH_SH4
102 #elif defined(TARGET_SPARC)
103 #define QEMU_ARCH QEMU_ARCH_SPARC
104 #elif defined(TARGET_XTENSA)
105 #define QEMU_ARCH QEMU_ARCH_XTENSA
106 #elif defined(TARGET_UNICORE32)
107 #define QEMU_ARCH QEMU_ARCH_UNICORE32
108 #elif defined(TARGET_TRICORE)
109 #define QEMU_ARCH QEMU_ARCH_TRICORE
110 #endif
111 
112 const uint32_t arch_type = QEMU_ARCH;
113 static bool mig_throttle_on;
114 static int dirty_rate_high_cnt;
115 static void check_guest_throttling(void);
116 
117 static uint64_t bitmap_sync_count;
118 
119 /***********************************************************/
120 /* ram save/restore */
121 
122 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
123 #define RAM_SAVE_FLAG_COMPRESS 0x02
124 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
125 #define RAM_SAVE_FLAG_PAGE     0x08
126 #define RAM_SAVE_FLAG_EOS      0x10
127 #define RAM_SAVE_FLAG_CONTINUE 0x20
128 #define RAM_SAVE_FLAG_XBZRLE   0x40
129 /* 0x80 is reserved in migration.h start with 0x100 next */
130 
131 static struct defconfig_file {
132     const char *filename;
133     /* Indicates it is an user config file (disabled by -no-user-config) */
134     bool userconfig;
135 } default_config_files[] = {
136     { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
137     { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
138     { NULL }, /* end of list */
139 };
140 
141 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
142 
143 int qemu_read_default_config_files(bool userconfig)
144 {
145     int ret;
146     struct defconfig_file *f;
147 
148     for (f = default_config_files; f->filename; f++) {
149         if (!userconfig && f->userconfig) {
150             continue;
151         }
152         ret = qemu_read_config_file(f->filename);
153         if (ret < 0 && ret != -ENOENT) {
154             return ret;
155         }
156     }
157 
158     return 0;
159 }
160 
161 static inline bool is_zero_range(uint8_t *p, uint64_t size)
162 {
163     return buffer_find_nonzero_offset(p, size) == size;
164 }
165 
166 /* struct contains XBZRLE cache and a static page
167    used by the compression */
168 static struct {
169     /* buffer used for XBZRLE encoding */
170     uint8_t *encoded_buf;
171     /* buffer for storing page content */
172     uint8_t *current_buf;
173     /* Cache for XBZRLE, Protected by lock. */
174     PageCache *cache;
175     QemuMutex lock;
176 } XBZRLE;
177 
178 /* buffer used for XBZRLE decoding */
179 static uint8_t *xbzrle_decoded_buf;
180 
181 static void XBZRLE_cache_lock(void)
182 {
183     if (migrate_use_xbzrle())
184         qemu_mutex_lock(&XBZRLE.lock);
185 }
186 
187 static void XBZRLE_cache_unlock(void)
188 {
189     if (migrate_use_xbzrle())
190         qemu_mutex_unlock(&XBZRLE.lock);
191 }
192 
193 /*
194  * called from qmp_migrate_set_cache_size in main thread, possibly while
195  * a migration is in progress.
196  * A running migration maybe using the cache and might finish during this
197  * call, hence changes to the cache are protected by XBZRLE.lock().
198  */
199 int64_t xbzrle_cache_resize(int64_t new_size)
200 {
201     PageCache *new_cache;
202     int64_t ret;
203 
204     if (new_size < TARGET_PAGE_SIZE) {
205         return -1;
206     }
207 
208     XBZRLE_cache_lock();
209 
210     if (XBZRLE.cache != NULL) {
211         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
212             goto out_new_size;
213         }
214         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
215                                         TARGET_PAGE_SIZE);
216         if (!new_cache) {
217             error_report("Error creating cache");
218             ret = -1;
219             goto out;
220         }
221 
222         cache_fini(XBZRLE.cache);
223         XBZRLE.cache = new_cache;
224     }
225 
226 out_new_size:
227     ret = pow2floor(new_size);
228 out:
229     XBZRLE_cache_unlock();
230     return ret;
231 }
232 
233 /* accounting for migration statistics */
234 typedef struct AccountingInfo {
235     uint64_t dup_pages;
236     uint64_t skipped_pages;
237     uint64_t norm_pages;
238     uint64_t iterations;
239     uint64_t xbzrle_bytes;
240     uint64_t xbzrle_pages;
241     uint64_t xbzrle_cache_miss;
242     double xbzrle_cache_miss_rate;
243     uint64_t xbzrle_overflows;
244 } AccountingInfo;
245 
246 static AccountingInfo acct_info;
247 
248 static void acct_clear(void)
249 {
250     memset(&acct_info, 0, sizeof(acct_info));
251 }
252 
253 uint64_t dup_mig_bytes_transferred(void)
254 {
255     return acct_info.dup_pages * TARGET_PAGE_SIZE;
256 }
257 
258 uint64_t dup_mig_pages_transferred(void)
259 {
260     return acct_info.dup_pages;
261 }
262 
263 uint64_t skipped_mig_bytes_transferred(void)
264 {
265     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
266 }
267 
268 uint64_t skipped_mig_pages_transferred(void)
269 {
270     return acct_info.skipped_pages;
271 }
272 
273 uint64_t norm_mig_bytes_transferred(void)
274 {
275     return acct_info.norm_pages * TARGET_PAGE_SIZE;
276 }
277 
278 uint64_t norm_mig_pages_transferred(void)
279 {
280     return acct_info.norm_pages;
281 }
282 
283 uint64_t xbzrle_mig_bytes_transferred(void)
284 {
285     return acct_info.xbzrle_bytes;
286 }
287 
288 uint64_t xbzrle_mig_pages_transferred(void)
289 {
290     return acct_info.xbzrle_pages;
291 }
292 
293 uint64_t xbzrle_mig_pages_cache_miss(void)
294 {
295     return acct_info.xbzrle_cache_miss;
296 }
297 
298 double xbzrle_mig_cache_miss_rate(void)
299 {
300     return acct_info.xbzrle_cache_miss_rate;
301 }
302 
303 uint64_t xbzrle_mig_pages_overflow(void)
304 {
305     return acct_info.xbzrle_overflows;
306 }
307 
308 /* This is the last block that we have visited serching for dirty pages
309  */
310 static RAMBlock *last_seen_block;
311 /* This is the last block from where we have sent data */
312 static RAMBlock *last_sent_block;
313 static ram_addr_t last_offset;
314 static unsigned long *migration_bitmap;
315 static uint64_t migration_dirty_pages;
316 static uint32_t last_version;
317 static bool ram_bulk_stage;
318 
319 /**
320  * save_page_header: Write page header to wire
321  *
322  * If this is the 1st block, it also writes the block identification
323  *
324  * Returns: Number of bytes written
325  *
326  * @f: QEMUFile where to send the data
327  * @block: block that contains the page we want to send
328  * @offset: offset inside the block for the page
329  *          in the lower bits, it contains flags
330  */
331 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
332 {
333     size_t size;
334 
335     if (block == last_sent_block) {
336         offset |= RAM_SAVE_FLAG_CONTINUE;
337     }
338 
339     qemu_put_be64(f, offset);
340     size = 8;
341 
342     if (block != last_sent_block) {
343         qemu_put_byte(f, strlen(block->idstr));
344         qemu_put_buffer(f, (uint8_t *)block->idstr,
345                         strlen(block->idstr));
346         size += 1 + strlen(block->idstr);
347         last_sent_block = block;
348     }
349     return size;
350 }
351 
352 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
353  * The important thing is that a stale (not-yet-0'd) page be replaced
354  * by the new data.
355  * As a bonus, if the page wasn't in the cache it gets added so that
356  * when a small write is made into the 0'd page it gets XBZRLE sent
357  */
358 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
359 {
360     if (ram_bulk_stage || !migrate_use_xbzrle()) {
361         return;
362     }
363 
364     /* We don't care if this fails to allocate a new cache page
365      * as long as it updated an old one */
366     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
367                  bitmap_sync_count);
368 }
369 
370 #define ENCODING_FLAG_XBZRLE 0x1
371 
372 /**
373  * save_xbzrle_page: compress and send current page
374  *
375  * Returns: 1 means that we wrote the page
376  *          0 means that page is identical to the one already sent
377  *          -1 means that xbzrle would be longer than normal
378  *
379  * @f: QEMUFile where to send the data
380  * @current_data:
381  * @current_addr:
382  * @block: block that contains the page we want to send
383  * @offset: offset inside the block for the page
384  * @last_stage: if we are at the completion stage
385  * @bytes_transferred: increase it with the number of transferred bytes
386  */
387 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
388                             ram_addr_t current_addr, RAMBlock *block,
389                             ram_addr_t offset, bool last_stage,
390                             uint64_t *bytes_transferred)
391 {
392     int encoded_len = 0, bytes_xbzrle;
393     uint8_t *prev_cached_page;
394 
395     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
396         acct_info.xbzrle_cache_miss++;
397         if (!last_stage) {
398             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
399                              bitmap_sync_count) == -1) {
400                 return -1;
401             } else {
402                 /* update *current_data when the page has been
403                    inserted into cache */
404                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
405             }
406         }
407         return -1;
408     }
409 
410     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
411 
412     /* save current buffer into memory */
413     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
414 
415     /* XBZRLE encoding (if there is no overflow) */
416     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
417                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
418                                        TARGET_PAGE_SIZE);
419     if (encoded_len == 0) {
420         DPRINTF("Skipping unmodified page\n");
421         return 0;
422     } else if (encoded_len == -1) {
423         DPRINTF("Overflow\n");
424         acct_info.xbzrle_overflows++;
425         /* update data in the cache */
426         if (!last_stage) {
427             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
428             *current_data = prev_cached_page;
429         }
430         return -1;
431     }
432 
433     /* we need to update the data in the cache, in order to get the same data */
434     if (!last_stage) {
435         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
436     }
437 
438     /* Send XBZRLE based compressed page */
439     bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
440     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
441     qemu_put_be16(f, encoded_len);
442     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
443     bytes_xbzrle += encoded_len + 1 + 2;
444     acct_info.xbzrle_pages++;
445     acct_info.xbzrle_bytes += bytes_xbzrle;
446     *bytes_transferred += bytes_xbzrle;
447 
448     return 1;
449 }
450 
451 static inline
452 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
453                                                  ram_addr_t start)
454 {
455     unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
456     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
457     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
458     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
459 
460     unsigned long next;
461 
462     if (ram_bulk_stage && nr > base) {
463         next = nr + 1;
464     } else {
465         next = find_next_bit(migration_bitmap, size, nr);
466     }
467 
468     if (next < size) {
469         clear_bit(next, migration_bitmap);
470         migration_dirty_pages--;
471     }
472     return (next - base) << TARGET_PAGE_BITS;
473 }
474 
475 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
476 {
477     bool ret;
478     int nr = addr >> TARGET_PAGE_BITS;
479 
480     ret = test_and_set_bit(nr, migration_bitmap);
481 
482     if (!ret) {
483         migration_dirty_pages++;
484     }
485     return ret;
486 }
487 
488 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
489 {
490     ram_addr_t addr;
491     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
492 
493     /* start address is aligned at the start of a word? */
494     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
495         int k;
496         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
497         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
498 
499         for (k = page; k < page + nr; k++) {
500             if (src[k]) {
501                 unsigned long new_dirty;
502                 new_dirty = ~migration_bitmap[k];
503                 migration_bitmap[k] |= src[k];
504                 new_dirty &= src[k];
505                 migration_dirty_pages += ctpopl(new_dirty);
506                 src[k] = 0;
507             }
508         }
509     } else {
510         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
511             if (cpu_physical_memory_get_dirty(start + addr,
512                                               TARGET_PAGE_SIZE,
513                                               DIRTY_MEMORY_MIGRATION)) {
514                 cpu_physical_memory_reset_dirty(start + addr,
515                                                 TARGET_PAGE_SIZE,
516                                                 DIRTY_MEMORY_MIGRATION);
517                 migration_bitmap_set_dirty(start + addr);
518             }
519         }
520     }
521 }
522 
523 
524 /* Fix me: there are too many global variables used in migration process. */
525 static int64_t start_time;
526 static int64_t bytes_xfer_prev;
527 static int64_t num_dirty_pages_period;
528 
529 static void migration_bitmap_sync_init(void)
530 {
531     start_time = 0;
532     bytes_xfer_prev = 0;
533     num_dirty_pages_period = 0;
534 }
535 
536 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
537 static void migration_bitmap_sync(void)
538 {
539     RAMBlock *block;
540     uint64_t num_dirty_pages_init = migration_dirty_pages;
541     MigrationState *s = migrate_get_current();
542     int64_t end_time;
543     int64_t bytes_xfer_now;
544     static uint64_t xbzrle_cache_miss_prev;
545     static uint64_t iterations_prev;
546 
547     bitmap_sync_count++;
548 
549     if (!bytes_xfer_prev) {
550         bytes_xfer_prev = ram_bytes_transferred();
551     }
552 
553     if (!start_time) {
554         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
555     }
556 
557     trace_migration_bitmap_sync_start();
558     address_space_sync_dirty_bitmap(&address_space_memory);
559 
560     rcu_read_lock();
561     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
562         migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
563     }
564     rcu_read_unlock();
565 
566     trace_migration_bitmap_sync_end(migration_dirty_pages
567                                     - num_dirty_pages_init);
568     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
569     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
570 
571     /* more than 1 second = 1000 millisecons */
572     if (end_time > start_time + 1000) {
573         if (migrate_auto_converge()) {
574             /* The following detection logic can be refined later. For now:
575                Check to see if the dirtied bytes is 50% more than the approx.
576                amount of bytes that just got transferred since the last time we
577                were in this routine. If that happens >N times (for now N==4)
578                we turn on the throttle down logic */
579             bytes_xfer_now = ram_bytes_transferred();
580             if (s->dirty_pages_rate &&
581                (num_dirty_pages_period * TARGET_PAGE_SIZE >
582                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
583                (dirty_rate_high_cnt++ > 4)) {
584                     trace_migration_throttle();
585                     mig_throttle_on = true;
586                     dirty_rate_high_cnt = 0;
587              }
588              bytes_xfer_prev = bytes_xfer_now;
589         } else {
590              mig_throttle_on = false;
591         }
592         if (migrate_use_xbzrle()) {
593             if (iterations_prev != 0) {
594                 acct_info.xbzrle_cache_miss_rate =
595                    (double)(acct_info.xbzrle_cache_miss -
596                             xbzrle_cache_miss_prev) /
597                    (acct_info.iterations - iterations_prev);
598             }
599             iterations_prev = acct_info.iterations;
600             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
601         }
602         s->dirty_pages_rate = num_dirty_pages_period * 1000
603             / (end_time - start_time);
604         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
605         start_time = end_time;
606         num_dirty_pages_period = 0;
607         s->dirty_sync_count = bitmap_sync_count;
608     }
609 }
610 
611 /**
612  * ram_save_page: Send the given page to the stream
613  *
614  * Returns: Number of pages written.
615  *
616  * @f: QEMUFile where to send the data
617  * @block: block that contains the page we want to send
618  * @offset: offset inside the block for the page
619  * @last_stage: if we are at the completion stage
620  * @bytes_transferred: increase it with the number of transferred bytes
621  */
622 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
623                          bool last_stage, uint64_t *bytes_transferred)
624 {
625     int pages = -1;
626     uint64_t bytes_xmit;
627     ram_addr_t current_addr;
628     MemoryRegion *mr = block->mr;
629     uint8_t *p;
630     int ret;
631     bool send_async = true;
632 
633     p = memory_region_get_ram_ptr(mr) + offset;
634 
635     /* In doubt sent page as normal */
636     bytes_xmit = 0;
637     ret = ram_control_save_page(f, block->offset,
638                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
639     if (bytes_xmit) {
640         *bytes_transferred += bytes_xmit;
641         pages = 1;
642     }
643 
644     XBZRLE_cache_lock();
645 
646     current_addr = block->offset + offset;
647     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
648         if (ret != RAM_SAVE_CONTROL_DELAYED) {
649             if (bytes_xmit > 0) {
650                 acct_info.norm_pages++;
651             } else if (bytes_xmit == 0) {
652                 acct_info.dup_pages++;
653             }
654         }
655     } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
656         acct_info.dup_pages++;
657         *bytes_transferred += save_page_header(f, block,
658                                                offset | RAM_SAVE_FLAG_COMPRESS);
659         qemu_put_byte(f, 0);
660         *bytes_transferred += 1;
661         pages = 1;
662         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
663          * page would be stale
664          */
665         xbzrle_cache_zero_page(current_addr);
666     } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
667         pages = save_xbzrle_page(f, &p, current_addr, block,
668                                  offset, last_stage, bytes_transferred);
669         if (!last_stage) {
670             /* Can't send this cached data async, since the cache page
671              * might get updated before it gets to the wire
672              */
673             send_async = false;
674         }
675     }
676 
677     /* XBZRLE overflow or normal page */
678     if (pages == -1) {
679         *bytes_transferred += save_page_header(f, block,
680                                                offset | RAM_SAVE_FLAG_PAGE);
681         if (send_async) {
682             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
683         } else {
684             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
685         }
686         *bytes_transferred += TARGET_PAGE_SIZE;
687         pages = 1;
688         acct_info.norm_pages++;
689     }
690 
691     XBZRLE_cache_unlock();
692 
693     return pages;
694 }
695 
696 /**
697  * ram_find_and_save_block: Finds a dirty page and sends it to f
698  *
699  * Called within an RCU critical section.
700  *
701  * Returns:  The number of pages written
702  *           0 means no dirty pages
703  *
704  * @f: QEMUFile where to send the data
705  * @last_stage: if we are at the completion stage
706  * @bytes_transferred: increase it with the number of transferred bytes
707  */
708 
709 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
710                                    uint64_t *bytes_transferred)
711 {
712     RAMBlock *block = last_seen_block;
713     ram_addr_t offset = last_offset;
714     bool complete_round = false;
715     int pages = 0;
716     MemoryRegion *mr;
717 
718     if (!block)
719         block = QLIST_FIRST_RCU(&ram_list.blocks);
720 
721     while (true) {
722         mr = block->mr;
723         offset = migration_bitmap_find_and_reset_dirty(mr, offset);
724         if (complete_round && block == last_seen_block &&
725             offset >= last_offset) {
726             break;
727         }
728         if (offset >= block->used_length) {
729             offset = 0;
730             block = QLIST_NEXT_RCU(block, next);
731             if (!block) {
732                 block = QLIST_FIRST_RCU(&ram_list.blocks);
733                 complete_round = true;
734                 ram_bulk_stage = false;
735             }
736         } else {
737             pages = ram_save_page(f, block, offset, last_stage,
738                                   bytes_transferred);
739 
740             /* if page is unmodified, continue to the next */
741             if (pages > 0) {
742                 break;
743             }
744         }
745     }
746 
747     last_seen_block = block;
748     last_offset = offset;
749 
750     return pages;
751 }
752 
753 static uint64_t bytes_transferred;
754 
755 void acct_update_position(QEMUFile *f, size_t size, bool zero)
756 {
757     uint64_t pages = size / TARGET_PAGE_SIZE;
758     if (zero) {
759         acct_info.dup_pages += pages;
760     } else {
761         acct_info.norm_pages += pages;
762         bytes_transferred += size;
763         qemu_update_position(f, size);
764     }
765 }
766 
767 static ram_addr_t ram_save_remaining(void)
768 {
769     return migration_dirty_pages;
770 }
771 
772 uint64_t ram_bytes_remaining(void)
773 {
774     return ram_save_remaining() * TARGET_PAGE_SIZE;
775 }
776 
777 uint64_t ram_bytes_transferred(void)
778 {
779     return bytes_transferred;
780 }
781 
782 uint64_t ram_bytes_total(void)
783 {
784     RAMBlock *block;
785     uint64_t total = 0;
786 
787     rcu_read_lock();
788     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
789         total += block->used_length;
790     rcu_read_unlock();
791     return total;
792 }
793 
794 void free_xbzrle_decoded_buf(void)
795 {
796     g_free(xbzrle_decoded_buf);
797     xbzrle_decoded_buf = NULL;
798 }
799 
800 static void migration_end(void)
801 {
802     if (migration_bitmap) {
803         memory_global_dirty_log_stop();
804         g_free(migration_bitmap);
805         migration_bitmap = NULL;
806     }
807 
808     XBZRLE_cache_lock();
809     if (XBZRLE.cache) {
810         cache_fini(XBZRLE.cache);
811         g_free(XBZRLE.encoded_buf);
812         g_free(XBZRLE.current_buf);
813         XBZRLE.cache = NULL;
814         XBZRLE.encoded_buf = NULL;
815         XBZRLE.current_buf = NULL;
816     }
817     XBZRLE_cache_unlock();
818 }
819 
820 static void ram_migration_cancel(void *opaque)
821 {
822     migration_end();
823 }
824 
825 static void reset_ram_globals(void)
826 {
827     last_seen_block = NULL;
828     last_sent_block = NULL;
829     last_offset = 0;
830     last_version = ram_list.version;
831     ram_bulk_stage = true;
832 }
833 
834 #define MAX_WAIT 50 /* ms, half buffered_file limit */
835 
836 
837 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
838  * long-running RCU critical section.  When rcu-reclaims in the code
839  * start to become numerous it will be necessary to reduce the
840  * granularity of these critical sections.
841  */
842 
843 static int ram_save_setup(QEMUFile *f, void *opaque)
844 {
845     RAMBlock *block;
846     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
847 
848     mig_throttle_on = false;
849     dirty_rate_high_cnt = 0;
850     bitmap_sync_count = 0;
851     migration_bitmap_sync_init();
852 
853     if (migrate_use_xbzrle()) {
854         XBZRLE_cache_lock();
855         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
856                                   TARGET_PAGE_SIZE,
857                                   TARGET_PAGE_SIZE);
858         if (!XBZRLE.cache) {
859             XBZRLE_cache_unlock();
860             error_report("Error creating cache");
861             return -1;
862         }
863         XBZRLE_cache_unlock();
864 
865         /* We prefer not to abort if there is no memory */
866         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
867         if (!XBZRLE.encoded_buf) {
868             error_report("Error allocating encoded_buf");
869             return -1;
870         }
871 
872         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
873         if (!XBZRLE.current_buf) {
874             error_report("Error allocating current_buf");
875             g_free(XBZRLE.encoded_buf);
876             XBZRLE.encoded_buf = NULL;
877             return -1;
878         }
879 
880         acct_clear();
881     }
882 
883     /* iothread lock needed for ram_list.dirty_memory[] */
884     qemu_mutex_lock_iothread();
885     qemu_mutex_lock_ramlist();
886     rcu_read_lock();
887     bytes_transferred = 0;
888     reset_ram_globals();
889 
890     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
891     migration_bitmap = bitmap_new(ram_bitmap_pages);
892     bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
893 
894     /*
895      * Count the total number of pages used by ram blocks not including any
896      * gaps due to alignment or unplugs.
897      */
898     migration_dirty_pages = 0;
899     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
900         uint64_t block_pages;
901 
902         block_pages = block->used_length >> TARGET_PAGE_BITS;
903         migration_dirty_pages += block_pages;
904     }
905 
906     memory_global_dirty_log_start();
907     migration_bitmap_sync();
908     qemu_mutex_unlock_ramlist();
909     qemu_mutex_unlock_iothread();
910 
911     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
912 
913     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
914         qemu_put_byte(f, strlen(block->idstr));
915         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
916         qemu_put_be64(f, block->used_length);
917     }
918 
919     rcu_read_unlock();
920 
921     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
922     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
923 
924     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
925 
926     return 0;
927 }
928 
929 static int ram_save_iterate(QEMUFile *f, void *opaque)
930 {
931     int ret;
932     int i;
933     int64_t t0;
934     int pages_sent = 0;
935 
936     rcu_read_lock();
937     if (ram_list.version != last_version) {
938         reset_ram_globals();
939     }
940 
941     /* Read version before ram_list.blocks */
942     smp_rmb();
943 
944     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
945 
946     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
947     i = 0;
948     while ((ret = qemu_file_rate_limit(f)) == 0) {
949         int pages;
950 
951         pages = ram_find_and_save_block(f, false, &bytes_transferred);
952         /* no more pages to sent */
953         if (pages == 0) {
954             break;
955         }
956         pages_sent += pages;
957         acct_info.iterations++;
958         check_guest_throttling();
959         /* we want to check in the 1st loop, just in case it was the 1st time
960            and we had to sync the dirty bitmap.
961            qemu_get_clock_ns() is a bit expensive, so we only check each some
962            iterations
963         */
964         if ((i & 63) == 0) {
965             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
966             if (t1 > MAX_WAIT) {
967                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
968                         t1, i);
969                 break;
970             }
971         }
972         i++;
973     }
974     rcu_read_unlock();
975 
976     /*
977      * Must occur before EOS (or any QEMUFile operation)
978      * because of RDMA protocol.
979      */
980     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
981 
982     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
983     bytes_transferred += 8;
984 
985     ret = qemu_file_get_error(f);
986     if (ret < 0) {
987         return ret;
988     }
989 
990     return pages_sent;
991 }
992 
993 /* Called with iothread lock */
994 static int ram_save_complete(QEMUFile *f, void *opaque)
995 {
996     rcu_read_lock();
997 
998     migration_bitmap_sync();
999 
1000     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
1001 
1002     /* try transferring iterative blocks of memory */
1003 
1004     /* flush all remaining blocks regardless of rate limiting */
1005     while (true) {
1006         int pages;
1007 
1008         pages = ram_find_and_save_block(f, true, &bytes_transferred);
1009         /* no more blocks to sent */
1010         if (pages == 0) {
1011             break;
1012         }
1013     }
1014 
1015     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
1016     migration_end();
1017 
1018     rcu_read_unlock();
1019     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1020 
1021     return 0;
1022 }
1023 
1024 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
1025 {
1026     uint64_t remaining_size;
1027 
1028     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
1029 
1030     if (remaining_size < max_size) {
1031         qemu_mutex_lock_iothread();
1032         rcu_read_lock();
1033         migration_bitmap_sync();
1034         rcu_read_unlock();
1035         qemu_mutex_unlock_iothread();
1036         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
1037     }
1038     return remaining_size;
1039 }
1040 
1041 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
1042 {
1043     unsigned int xh_len;
1044     int xh_flags;
1045 
1046     if (!xbzrle_decoded_buf) {
1047         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
1048     }
1049 
1050     /* extract RLE header */
1051     xh_flags = qemu_get_byte(f);
1052     xh_len = qemu_get_be16(f);
1053 
1054     if (xh_flags != ENCODING_FLAG_XBZRLE) {
1055         error_report("Failed to load XBZRLE page - wrong compression!");
1056         return -1;
1057     }
1058 
1059     if (xh_len > TARGET_PAGE_SIZE) {
1060         error_report("Failed to load XBZRLE page - len overflow!");
1061         return -1;
1062     }
1063     /* load data and decode */
1064     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
1065 
1066     /* decode RLE */
1067     if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
1068                              TARGET_PAGE_SIZE) == -1) {
1069         error_report("Failed to load XBZRLE page - decode error!");
1070         return -1;
1071     }
1072 
1073     return 0;
1074 }
1075 
1076 /* Must be called from within a rcu critical section.
1077  * Returns a pointer from within the RCU-protected ram_list.
1078  */
1079 static inline void *host_from_stream_offset(QEMUFile *f,
1080                                             ram_addr_t offset,
1081                                             int flags)
1082 {
1083     static RAMBlock *block = NULL;
1084     char id[256];
1085     uint8_t len;
1086 
1087     if (flags & RAM_SAVE_FLAG_CONTINUE) {
1088         if (!block || block->max_length <= offset) {
1089             error_report("Ack, bad migration stream!");
1090             return NULL;
1091         }
1092 
1093         return memory_region_get_ram_ptr(block->mr) + offset;
1094     }
1095 
1096     len = qemu_get_byte(f);
1097     qemu_get_buffer(f, (uint8_t *)id, len);
1098     id[len] = 0;
1099 
1100     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1101         if (!strncmp(id, block->idstr, sizeof(id)) &&
1102             block->max_length > offset) {
1103             return memory_region_get_ram_ptr(block->mr) + offset;
1104         }
1105     }
1106 
1107     error_report("Can't find block %s!", id);
1108     return NULL;
1109 }
1110 
1111 /*
1112  * If a page (or a whole RDMA chunk) has been
1113  * determined to be zero, then zap it.
1114  */
1115 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
1116 {
1117     if (ch != 0 || !is_zero_range(host, size)) {
1118         memset(host, ch, size);
1119     }
1120 }
1121 
1122 static int ram_load(QEMUFile *f, void *opaque, int version_id)
1123 {
1124     int flags = 0, ret = 0;
1125     static uint64_t seq_iter;
1126 
1127     seq_iter++;
1128 
1129     if (version_id != 4) {
1130         ret = -EINVAL;
1131     }
1132 
1133     /* This RCU critical section can be very long running.
1134      * When RCU reclaims in the code start to become numerous,
1135      * it will be necessary to reduce the granularity of this
1136      * critical section.
1137      */
1138     rcu_read_lock();
1139     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
1140         ram_addr_t addr, total_ram_bytes;
1141         void *host;
1142         uint8_t ch;
1143 
1144         addr = qemu_get_be64(f);
1145         flags = addr & ~TARGET_PAGE_MASK;
1146         addr &= TARGET_PAGE_MASK;
1147 
1148         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
1149         case RAM_SAVE_FLAG_MEM_SIZE:
1150             /* Synchronize RAM block list */
1151             total_ram_bytes = addr;
1152             while (!ret && total_ram_bytes) {
1153                 RAMBlock *block;
1154                 uint8_t len;
1155                 char id[256];
1156                 ram_addr_t length;
1157 
1158                 len = qemu_get_byte(f);
1159                 qemu_get_buffer(f, (uint8_t *)id, len);
1160                 id[len] = 0;
1161                 length = qemu_get_be64(f);
1162 
1163                 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1164                     if (!strncmp(id, block->idstr, sizeof(id))) {
1165                         if (length != block->used_length) {
1166                             Error *local_err = NULL;
1167 
1168                             ret = qemu_ram_resize(block->offset, length, &local_err);
1169                             if (local_err) {
1170                                 error_report_err(local_err);
1171                             }
1172                         }
1173                         break;
1174                     }
1175                 }
1176 
1177                 if (!block) {
1178                     error_report("Unknown ramblock \"%s\", cannot "
1179                                  "accept migration", id);
1180                     ret = -EINVAL;
1181                 }
1182 
1183                 total_ram_bytes -= length;
1184             }
1185             break;
1186         case RAM_SAVE_FLAG_COMPRESS:
1187             host = host_from_stream_offset(f, addr, flags);
1188             if (!host) {
1189                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1190                 ret = -EINVAL;
1191                 break;
1192             }
1193             ch = qemu_get_byte(f);
1194             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1195             break;
1196         case RAM_SAVE_FLAG_PAGE:
1197             host = host_from_stream_offset(f, addr, flags);
1198             if (!host) {
1199                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1200                 ret = -EINVAL;
1201                 break;
1202             }
1203             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1204             break;
1205         case RAM_SAVE_FLAG_XBZRLE:
1206             host = host_from_stream_offset(f, addr, flags);
1207             if (!host) {
1208                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1209                 ret = -EINVAL;
1210                 break;
1211             }
1212             if (load_xbzrle(f, addr, host) < 0) {
1213                 error_report("Failed to decompress XBZRLE page at "
1214                              RAM_ADDR_FMT, addr);
1215                 ret = -EINVAL;
1216                 break;
1217             }
1218             break;
1219         case RAM_SAVE_FLAG_EOS:
1220             /* normal exit */
1221             break;
1222         default:
1223             if (flags & RAM_SAVE_FLAG_HOOK) {
1224                 ram_control_load_hook(f, flags);
1225             } else {
1226                 error_report("Unknown combination of migration flags: %#x",
1227                              flags);
1228                 ret = -EINVAL;
1229             }
1230         }
1231         if (!ret) {
1232             ret = qemu_file_get_error(f);
1233         }
1234     }
1235 
1236     rcu_read_unlock();
1237     DPRINTF("Completed load of VM with exit code %d seq iteration "
1238             "%" PRIu64 "\n", ret, seq_iter);
1239     return ret;
1240 }
1241 
1242 static SaveVMHandlers savevm_ram_handlers = {
1243     .save_live_setup = ram_save_setup,
1244     .save_live_iterate = ram_save_iterate,
1245     .save_live_complete = ram_save_complete,
1246     .save_live_pending = ram_save_pending,
1247     .load_state = ram_load,
1248     .cancel = ram_migration_cancel,
1249 };
1250 
1251 void ram_mig_init(void)
1252 {
1253     qemu_mutex_init(&XBZRLE.lock);
1254     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
1255 }
1256 
1257 struct soundhw {
1258     const char *name;
1259     const char *descr;
1260     int enabled;
1261     int isa;
1262     union {
1263         int (*init_isa) (ISABus *bus);
1264         int (*init_pci) (PCIBus *bus);
1265     } init;
1266 };
1267 
1268 static struct soundhw soundhw[9];
1269 static int soundhw_count;
1270 
1271 void isa_register_soundhw(const char *name, const char *descr,
1272                           int (*init_isa)(ISABus *bus))
1273 {
1274     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1275     soundhw[soundhw_count].name = name;
1276     soundhw[soundhw_count].descr = descr;
1277     soundhw[soundhw_count].isa = 1;
1278     soundhw[soundhw_count].init.init_isa = init_isa;
1279     soundhw_count++;
1280 }
1281 
1282 void pci_register_soundhw(const char *name, const char *descr,
1283                           int (*init_pci)(PCIBus *bus))
1284 {
1285     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1286     soundhw[soundhw_count].name = name;
1287     soundhw[soundhw_count].descr = descr;
1288     soundhw[soundhw_count].isa = 0;
1289     soundhw[soundhw_count].init.init_pci = init_pci;
1290     soundhw_count++;
1291 }
1292 
1293 void select_soundhw(const char *optarg)
1294 {
1295     struct soundhw *c;
1296 
1297     if (is_help_option(optarg)) {
1298     show_valid_cards:
1299 
1300         if (soundhw_count) {
1301              printf("Valid sound card names (comma separated):\n");
1302              for (c = soundhw; c->name; ++c) {
1303                  printf ("%-11s %s\n", c->name, c->descr);
1304              }
1305              printf("\n-soundhw all will enable all of the above\n");
1306         } else {
1307              printf("Machine has no user-selectable audio hardware "
1308                     "(it may or may not have always-present audio hardware).\n");
1309         }
1310         exit(!is_help_option(optarg));
1311     }
1312     else {
1313         size_t l;
1314         const char *p;
1315         char *e;
1316         int bad_card = 0;
1317 
1318         if (!strcmp(optarg, "all")) {
1319             for (c = soundhw; c->name; ++c) {
1320                 c->enabled = 1;
1321             }
1322             return;
1323         }
1324 
1325         p = optarg;
1326         while (*p) {
1327             e = strchr(p, ',');
1328             l = !e ? strlen(p) : (size_t) (e - p);
1329 
1330             for (c = soundhw; c->name; ++c) {
1331                 if (!strncmp(c->name, p, l) && !c->name[l]) {
1332                     c->enabled = 1;
1333                     break;
1334                 }
1335             }
1336 
1337             if (!c->name) {
1338                 if (l > 80) {
1339                     error_report("Unknown sound card name (too big to show)");
1340                 }
1341                 else {
1342                     error_report("Unknown sound card name `%.*s'",
1343                                  (int) l, p);
1344                 }
1345                 bad_card = 1;
1346             }
1347             p += l + (e != NULL);
1348         }
1349 
1350         if (bad_card) {
1351             goto show_valid_cards;
1352         }
1353     }
1354 }
1355 
1356 void audio_init(void)
1357 {
1358     struct soundhw *c;
1359     ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1360     PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1361 
1362     for (c = soundhw; c->name; ++c) {
1363         if (c->enabled) {
1364             if (c->isa) {
1365                 if (!isa_bus) {
1366                     error_report("ISA bus not available for %s", c->name);
1367                     exit(1);
1368                 }
1369                 c->init.init_isa(isa_bus);
1370             } else {
1371                 if (!pci_bus) {
1372                     error_report("PCI bus not available for %s", c->name);
1373                     exit(1);
1374                 }
1375                 c->init.init_pci(pci_bus);
1376             }
1377         }
1378     }
1379 }
1380 
1381 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1382 {
1383     int ret;
1384 
1385     if (strlen(str) != 36) {
1386         return -1;
1387     }
1388 
1389     ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1390                  &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1391                  &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1392                  &uuid[15]);
1393 
1394     if (ret != 16) {
1395         return -1;
1396     }
1397     return 0;
1398 }
1399 
1400 void do_acpitable_option(const QemuOpts *opts)
1401 {
1402 #ifdef TARGET_I386
1403     Error *err = NULL;
1404 
1405     acpi_table_add(opts, &err);
1406     if (err) {
1407         error_report("Wrong acpi table provided: %s",
1408                      error_get_pretty(err));
1409         error_free(err);
1410         exit(1);
1411     }
1412 #endif
1413 }
1414 
1415 void do_smbios_option(QemuOpts *opts)
1416 {
1417 #ifdef TARGET_I386
1418     smbios_entry_add(opts);
1419 #endif
1420 }
1421 
1422 void cpudef_init(void)
1423 {
1424 #if defined(cpudef_setup)
1425     cpudef_setup(); /* parse cpu definitions in target config file */
1426 #endif
1427 }
1428 
1429 int kvm_available(void)
1430 {
1431 #ifdef CONFIG_KVM
1432     return 1;
1433 #else
1434     return 0;
1435 #endif
1436 }
1437 
1438 int xen_available(void)
1439 {
1440 #ifdef CONFIG_XEN
1441     return 1;
1442 #else
1443     return 0;
1444 #endif
1445 }
1446 
1447 
1448 TargetInfo *qmp_query_target(Error **errp)
1449 {
1450     TargetInfo *info = g_malloc0(sizeof(*info));
1451 
1452     info->arch = g_strdup(TARGET_NAME);
1453 
1454     return info;
1455 }
1456 
1457 /* Stub function that's gets run on the vcpu when its brought out of the
1458    VM to run inside qemu via async_run_on_cpu()*/
1459 static void mig_sleep_cpu(void *opq)
1460 {
1461     qemu_mutex_unlock_iothread();
1462     g_usleep(30*1000);
1463     qemu_mutex_lock_iothread();
1464 }
1465 
1466 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1467    much time in the VM. The migration thread will try to catchup.
1468    Workload will experience a performance drop.
1469 */
1470 static void mig_throttle_guest_down(void)
1471 {
1472     CPUState *cpu;
1473 
1474     qemu_mutex_lock_iothread();
1475     CPU_FOREACH(cpu) {
1476         async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1477     }
1478     qemu_mutex_unlock_iothread();
1479 }
1480 
1481 static void check_guest_throttling(void)
1482 {
1483     static int64_t t0;
1484     int64_t        t1;
1485 
1486     if (!mig_throttle_on) {
1487         return;
1488     }
1489 
1490     if (!t0)  {
1491         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1492         return;
1493     }
1494 
1495     t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1496 
1497     /* If it has been more than 40 ms since the last time the guest
1498      * was throttled then do it again.
1499      */
1500     if (40 < (t1-t0)/1000000) {
1501         mig_throttle_guest_down();
1502         t0 = t1;
1503     }
1504 }
1505