xref: /qemu/system/arch_init.c (revision 6e1dea46b89e137ee1593ded5566d5371a61d304)
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "exec/cpu-all.h"
52 #include "exec/ram_addr.h"
53 #include "hw/acpi/acpi.h"
54 #include "qemu/host-utils.h"
55 #include "qemu/rcu_queue.h"
56 
57 #ifdef DEBUG_ARCH_INIT
58 #define DPRINTF(fmt, ...) \
59     do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
60 #else
61 #define DPRINTF(fmt, ...) \
62     do { } while (0)
63 #endif
64 
65 #ifdef TARGET_SPARC
66 int graphic_width = 1024;
67 int graphic_height = 768;
68 int graphic_depth = 8;
69 #else
70 int graphic_width = 800;
71 int graphic_height = 600;
72 int graphic_depth = 32;
73 #endif
74 
75 
76 #if defined(TARGET_ALPHA)
77 #define QEMU_ARCH QEMU_ARCH_ALPHA
78 #elif defined(TARGET_ARM)
79 #define QEMU_ARCH QEMU_ARCH_ARM
80 #elif defined(TARGET_CRIS)
81 #define QEMU_ARCH QEMU_ARCH_CRIS
82 #elif defined(TARGET_I386)
83 #define QEMU_ARCH QEMU_ARCH_I386
84 #elif defined(TARGET_M68K)
85 #define QEMU_ARCH QEMU_ARCH_M68K
86 #elif defined(TARGET_LM32)
87 #define QEMU_ARCH QEMU_ARCH_LM32
88 #elif defined(TARGET_MICROBLAZE)
89 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
90 #elif defined(TARGET_MIPS)
91 #define QEMU_ARCH QEMU_ARCH_MIPS
92 #elif defined(TARGET_MOXIE)
93 #define QEMU_ARCH QEMU_ARCH_MOXIE
94 #elif defined(TARGET_OPENRISC)
95 #define QEMU_ARCH QEMU_ARCH_OPENRISC
96 #elif defined(TARGET_PPC)
97 #define QEMU_ARCH QEMU_ARCH_PPC
98 #elif defined(TARGET_S390X)
99 #define QEMU_ARCH QEMU_ARCH_S390X
100 #elif defined(TARGET_SH4)
101 #define QEMU_ARCH QEMU_ARCH_SH4
102 #elif defined(TARGET_SPARC)
103 #define QEMU_ARCH QEMU_ARCH_SPARC
104 #elif defined(TARGET_XTENSA)
105 #define QEMU_ARCH QEMU_ARCH_XTENSA
106 #elif defined(TARGET_UNICORE32)
107 #define QEMU_ARCH QEMU_ARCH_UNICORE32
108 #elif defined(TARGET_TRICORE)
109 #define QEMU_ARCH QEMU_ARCH_TRICORE
110 #endif
111 
112 const uint32_t arch_type = QEMU_ARCH;
113 static bool mig_throttle_on;
114 static int dirty_rate_high_cnt;
115 static void check_guest_throttling(void);
116 
117 static uint64_t bitmap_sync_count;
118 
119 /***********************************************************/
120 /* ram save/restore */
121 
122 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
123 #define RAM_SAVE_FLAG_COMPRESS 0x02
124 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
125 #define RAM_SAVE_FLAG_PAGE     0x08
126 #define RAM_SAVE_FLAG_EOS      0x10
127 #define RAM_SAVE_FLAG_CONTINUE 0x20
128 #define RAM_SAVE_FLAG_XBZRLE   0x40
129 /* 0x80 is reserved in migration.h start with 0x100 next */
130 
131 static struct defconfig_file {
132     const char *filename;
133     /* Indicates it is an user config file (disabled by -no-user-config) */
134     bool userconfig;
135 } default_config_files[] = {
136     { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
137     { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
138     { NULL }, /* end of list */
139 };
140 
141 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
142 
143 int qemu_read_default_config_files(bool userconfig)
144 {
145     int ret;
146     struct defconfig_file *f;
147 
148     for (f = default_config_files; f->filename; f++) {
149         if (!userconfig && f->userconfig) {
150             continue;
151         }
152         ret = qemu_read_config_file(f->filename);
153         if (ret < 0 && ret != -ENOENT) {
154             return ret;
155         }
156     }
157 
158     return 0;
159 }
160 
161 static inline bool is_zero_range(uint8_t *p, uint64_t size)
162 {
163     return buffer_find_nonzero_offset(p, size) == size;
164 }
165 
166 /* struct contains XBZRLE cache and a static page
167    used by the compression */
168 static struct {
169     /* buffer used for XBZRLE encoding */
170     uint8_t *encoded_buf;
171     /* buffer for storing page content */
172     uint8_t *current_buf;
173     /* Cache for XBZRLE, Protected by lock. */
174     PageCache *cache;
175     QemuMutex lock;
176 } XBZRLE;
177 
178 /* buffer used for XBZRLE decoding */
179 static uint8_t *xbzrle_decoded_buf;
180 
181 static void XBZRLE_cache_lock(void)
182 {
183     if (migrate_use_xbzrle())
184         qemu_mutex_lock(&XBZRLE.lock);
185 }
186 
187 static void XBZRLE_cache_unlock(void)
188 {
189     if (migrate_use_xbzrle())
190         qemu_mutex_unlock(&XBZRLE.lock);
191 }
192 
193 /*
194  * called from qmp_migrate_set_cache_size in main thread, possibly while
195  * a migration is in progress.
196  * A running migration maybe using the cache and might finish during this
197  * call, hence changes to the cache are protected by XBZRLE.lock().
198  */
199 int64_t xbzrle_cache_resize(int64_t new_size)
200 {
201     PageCache *new_cache;
202     int64_t ret;
203 
204     if (new_size < TARGET_PAGE_SIZE) {
205         return -1;
206     }
207 
208     XBZRLE_cache_lock();
209 
210     if (XBZRLE.cache != NULL) {
211         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
212             goto out_new_size;
213         }
214         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
215                                         TARGET_PAGE_SIZE);
216         if (!new_cache) {
217             error_report("Error creating cache");
218             ret = -1;
219             goto out;
220         }
221 
222         cache_fini(XBZRLE.cache);
223         XBZRLE.cache = new_cache;
224     }
225 
226 out_new_size:
227     ret = pow2floor(new_size);
228 out:
229     XBZRLE_cache_unlock();
230     return ret;
231 }
232 
233 /* accounting for migration statistics */
234 typedef struct AccountingInfo {
235     uint64_t dup_pages;
236     uint64_t skipped_pages;
237     uint64_t norm_pages;
238     uint64_t iterations;
239     uint64_t xbzrle_bytes;
240     uint64_t xbzrle_pages;
241     uint64_t xbzrle_cache_miss;
242     double xbzrle_cache_miss_rate;
243     uint64_t xbzrle_overflows;
244 } AccountingInfo;
245 
246 static AccountingInfo acct_info;
247 
248 static void acct_clear(void)
249 {
250     memset(&acct_info, 0, sizeof(acct_info));
251 }
252 
253 uint64_t dup_mig_bytes_transferred(void)
254 {
255     return acct_info.dup_pages * TARGET_PAGE_SIZE;
256 }
257 
258 uint64_t dup_mig_pages_transferred(void)
259 {
260     return acct_info.dup_pages;
261 }
262 
263 uint64_t skipped_mig_bytes_transferred(void)
264 {
265     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
266 }
267 
268 uint64_t skipped_mig_pages_transferred(void)
269 {
270     return acct_info.skipped_pages;
271 }
272 
273 uint64_t norm_mig_bytes_transferred(void)
274 {
275     return acct_info.norm_pages * TARGET_PAGE_SIZE;
276 }
277 
278 uint64_t norm_mig_pages_transferred(void)
279 {
280     return acct_info.norm_pages;
281 }
282 
283 uint64_t xbzrle_mig_bytes_transferred(void)
284 {
285     return acct_info.xbzrle_bytes;
286 }
287 
288 uint64_t xbzrle_mig_pages_transferred(void)
289 {
290     return acct_info.xbzrle_pages;
291 }
292 
293 uint64_t xbzrle_mig_pages_cache_miss(void)
294 {
295     return acct_info.xbzrle_cache_miss;
296 }
297 
298 double xbzrle_mig_cache_miss_rate(void)
299 {
300     return acct_info.xbzrle_cache_miss_rate;
301 }
302 
303 uint64_t xbzrle_mig_pages_overflow(void)
304 {
305     return acct_info.xbzrle_overflows;
306 }
307 
308 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
309                              int cont, int flag)
310 {
311     size_t size;
312 
313     qemu_put_be64(f, offset | cont | flag);
314     size = 8;
315 
316     if (!cont) {
317         qemu_put_byte(f, strlen(block->idstr));
318         qemu_put_buffer(f, (uint8_t *)block->idstr,
319                         strlen(block->idstr));
320         size += 1 + strlen(block->idstr);
321     }
322     return size;
323 }
324 
325 /* This is the last block that we have visited serching for dirty pages
326  */
327 static RAMBlock *last_seen_block;
328 /* This is the last block from where we have sent data */
329 static RAMBlock *last_sent_block;
330 static ram_addr_t last_offset;
331 static unsigned long *migration_bitmap;
332 static uint64_t migration_dirty_pages;
333 static uint32_t last_version;
334 static bool ram_bulk_stage;
335 
336 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
337  * The important thing is that a stale (not-yet-0'd) page be replaced
338  * by the new data.
339  * As a bonus, if the page wasn't in the cache it gets added so that
340  * when a small write is made into the 0'd page it gets XBZRLE sent
341  */
342 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
343 {
344     if (ram_bulk_stage || !migrate_use_xbzrle()) {
345         return;
346     }
347 
348     /* We don't care if this fails to allocate a new cache page
349      * as long as it updated an old one */
350     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
351                  bitmap_sync_count);
352 }
353 
354 #define ENCODING_FLAG_XBZRLE 0x1
355 
356 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
357                             ram_addr_t current_addr, RAMBlock *block,
358                             ram_addr_t offset, int cont, bool last_stage)
359 {
360     int encoded_len = 0, bytes_sent = -1;
361     uint8_t *prev_cached_page;
362 
363     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
364         acct_info.xbzrle_cache_miss++;
365         if (!last_stage) {
366             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
367                              bitmap_sync_count) == -1) {
368                 return -1;
369             } else {
370                 /* update *current_data when the page has been
371                    inserted into cache */
372                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
373             }
374         }
375         return -1;
376     }
377 
378     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
379 
380     /* save current buffer into memory */
381     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
382 
383     /* XBZRLE encoding (if there is no overflow) */
384     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
385                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
386                                        TARGET_PAGE_SIZE);
387     if (encoded_len == 0) {
388         DPRINTF("Skipping unmodified page\n");
389         return 0;
390     } else if (encoded_len == -1) {
391         DPRINTF("Overflow\n");
392         acct_info.xbzrle_overflows++;
393         /* update data in the cache */
394         if (!last_stage) {
395             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
396             *current_data = prev_cached_page;
397         }
398         return -1;
399     }
400 
401     /* we need to update the data in the cache, in order to get the same data */
402     if (!last_stage) {
403         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
404     }
405 
406     /* Send XBZRLE based compressed page */
407     bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
408     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
409     qemu_put_be16(f, encoded_len);
410     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
411     bytes_sent += encoded_len + 1 + 2;
412     acct_info.xbzrle_pages++;
413     acct_info.xbzrle_bytes += bytes_sent;
414 
415     return bytes_sent;
416 }
417 
418 static inline
419 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
420                                                  ram_addr_t start)
421 {
422     unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
423     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
424     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
425     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
426 
427     unsigned long next;
428 
429     if (ram_bulk_stage && nr > base) {
430         next = nr + 1;
431     } else {
432         next = find_next_bit(migration_bitmap, size, nr);
433     }
434 
435     if (next < size) {
436         clear_bit(next, migration_bitmap);
437         migration_dirty_pages--;
438     }
439     return (next - base) << TARGET_PAGE_BITS;
440 }
441 
442 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
443 {
444     bool ret;
445     int nr = addr >> TARGET_PAGE_BITS;
446 
447     ret = test_and_set_bit(nr, migration_bitmap);
448 
449     if (!ret) {
450         migration_dirty_pages++;
451     }
452     return ret;
453 }
454 
455 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
456 {
457     ram_addr_t addr;
458     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
459 
460     /* start address is aligned at the start of a word? */
461     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
462         int k;
463         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
464         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
465 
466         for (k = page; k < page + nr; k++) {
467             if (src[k]) {
468                 unsigned long new_dirty;
469                 new_dirty = ~migration_bitmap[k];
470                 migration_bitmap[k] |= src[k];
471                 new_dirty &= src[k];
472                 migration_dirty_pages += ctpopl(new_dirty);
473                 src[k] = 0;
474             }
475         }
476     } else {
477         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
478             if (cpu_physical_memory_get_dirty(start + addr,
479                                               TARGET_PAGE_SIZE,
480                                               DIRTY_MEMORY_MIGRATION)) {
481                 cpu_physical_memory_reset_dirty(start + addr,
482                                                 TARGET_PAGE_SIZE,
483                                                 DIRTY_MEMORY_MIGRATION);
484                 migration_bitmap_set_dirty(start + addr);
485             }
486         }
487     }
488 }
489 
490 
491 /* Fix me: there are too many global variables used in migration process. */
492 static int64_t start_time;
493 static int64_t bytes_xfer_prev;
494 static int64_t num_dirty_pages_period;
495 
496 static void migration_bitmap_sync_init(void)
497 {
498     start_time = 0;
499     bytes_xfer_prev = 0;
500     num_dirty_pages_period = 0;
501 }
502 
503 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
504 static void migration_bitmap_sync(void)
505 {
506     RAMBlock *block;
507     uint64_t num_dirty_pages_init = migration_dirty_pages;
508     MigrationState *s = migrate_get_current();
509     int64_t end_time;
510     int64_t bytes_xfer_now;
511     static uint64_t xbzrle_cache_miss_prev;
512     static uint64_t iterations_prev;
513 
514     bitmap_sync_count++;
515 
516     if (!bytes_xfer_prev) {
517         bytes_xfer_prev = ram_bytes_transferred();
518     }
519 
520     if (!start_time) {
521         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
522     }
523 
524     trace_migration_bitmap_sync_start();
525     address_space_sync_dirty_bitmap(&address_space_memory);
526 
527     rcu_read_lock();
528     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
529         migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
530     }
531     rcu_read_unlock();
532 
533     trace_migration_bitmap_sync_end(migration_dirty_pages
534                                     - num_dirty_pages_init);
535     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
536     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
537 
538     /* more than 1 second = 1000 millisecons */
539     if (end_time > start_time + 1000) {
540         if (migrate_auto_converge()) {
541             /* The following detection logic can be refined later. For now:
542                Check to see if the dirtied bytes is 50% more than the approx.
543                amount of bytes that just got transferred since the last time we
544                were in this routine. If that happens >N times (for now N==4)
545                we turn on the throttle down logic */
546             bytes_xfer_now = ram_bytes_transferred();
547             if (s->dirty_pages_rate &&
548                (num_dirty_pages_period * TARGET_PAGE_SIZE >
549                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
550                (dirty_rate_high_cnt++ > 4)) {
551                     trace_migration_throttle();
552                     mig_throttle_on = true;
553                     dirty_rate_high_cnt = 0;
554              }
555              bytes_xfer_prev = bytes_xfer_now;
556         } else {
557              mig_throttle_on = false;
558         }
559         if (migrate_use_xbzrle()) {
560             if (iterations_prev != 0) {
561                 acct_info.xbzrle_cache_miss_rate =
562                    (double)(acct_info.xbzrle_cache_miss -
563                             xbzrle_cache_miss_prev) /
564                    (acct_info.iterations - iterations_prev);
565             }
566             iterations_prev = acct_info.iterations;
567             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
568         }
569         s->dirty_pages_rate = num_dirty_pages_period * 1000
570             / (end_time - start_time);
571         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
572         start_time = end_time;
573         num_dirty_pages_period = 0;
574         s->dirty_sync_count = bitmap_sync_count;
575     }
576 }
577 
578 /*
579  * ram_save_page: Send the given page to the stream
580  *
581  * Returns: Number of bytes written.
582  */
583 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
584                          bool last_stage)
585 {
586     int bytes_sent;
587     uint64_t bytes_xmit;
588     int cont;
589     ram_addr_t current_addr;
590     MemoryRegion *mr = block->mr;
591     uint8_t *p;
592     int ret;
593     bool send_async = true;
594 
595     cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
596 
597     p = memory_region_get_ram_ptr(mr) + offset;
598 
599     /* In doubt sent page as normal */
600     bytes_sent = -1;
601     bytes_xmit = 0;
602     ret = ram_control_save_page(f, block->offset,
603                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
604     if (bytes_xmit) {
605         bytes_sent = bytes_xmit;
606     }
607 
608     XBZRLE_cache_lock();
609 
610     current_addr = block->offset + offset;
611     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
612         if (ret != RAM_SAVE_CONTROL_DELAYED) {
613             if (bytes_xmit > 0) {
614                 acct_info.norm_pages++;
615             } else if (bytes_xmit == 0) {
616                 acct_info.dup_pages++;
617             }
618         }
619     } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
620         acct_info.dup_pages++;
621         bytes_sent = save_block_hdr(f, block, offset, cont,
622                                     RAM_SAVE_FLAG_COMPRESS);
623         qemu_put_byte(f, 0);
624         bytes_sent++;
625         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
626          * page would be stale
627          */
628         xbzrle_cache_zero_page(current_addr);
629     } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
630         bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
631                                       offset, cont, last_stage);
632         if (!last_stage) {
633             /* Can't send this cached data async, since the cache page
634              * might get updated before it gets to the wire
635              */
636             send_async = false;
637         }
638     }
639 
640     /* XBZRLE overflow or normal page */
641     if (bytes_sent == -1) {
642         bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
643         if (send_async) {
644             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
645         } else {
646             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
647         }
648         bytes_sent += TARGET_PAGE_SIZE;
649         acct_info.norm_pages++;
650     }
651 
652     XBZRLE_cache_unlock();
653 
654     return bytes_sent;
655 }
656 
657 /*
658  * ram_find_and_save_block: Finds a page to send and sends it to f
659  *
660  * Called within an RCU critical section.
661  *
662  * Returns:  The number of bytes written.
663  *           0 means no dirty pages
664  */
665 
666 static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
667 {
668     RAMBlock *block = last_seen_block;
669     ram_addr_t offset = last_offset;
670     bool complete_round = false;
671     int bytes_sent = 0;
672     MemoryRegion *mr;
673 
674     if (!block)
675         block = QLIST_FIRST_RCU(&ram_list.blocks);
676 
677     while (true) {
678         mr = block->mr;
679         offset = migration_bitmap_find_and_reset_dirty(mr, offset);
680         if (complete_round && block == last_seen_block &&
681             offset >= last_offset) {
682             break;
683         }
684         if (offset >= block->used_length) {
685             offset = 0;
686             block = QLIST_NEXT_RCU(block, next);
687             if (!block) {
688                 block = QLIST_FIRST_RCU(&ram_list.blocks);
689                 complete_round = true;
690                 ram_bulk_stage = false;
691             }
692         } else {
693             bytes_sent = ram_save_page(f, block, offset, last_stage);
694 
695             /* if page is unmodified, continue to the next */
696             if (bytes_sent > 0) {
697                 last_sent_block = block;
698                 break;
699             }
700         }
701     }
702 
703     last_seen_block = block;
704     last_offset = offset;
705     return bytes_sent;
706 }
707 
708 static uint64_t bytes_transferred;
709 
710 void acct_update_position(QEMUFile *f, size_t size, bool zero)
711 {
712     uint64_t pages = size / TARGET_PAGE_SIZE;
713     if (zero) {
714         acct_info.dup_pages += pages;
715     } else {
716         acct_info.norm_pages += pages;
717         bytes_transferred += size;
718         qemu_update_position(f, size);
719     }
720 }
721 
722 static ram_addr_t ram_save_remaining(void)
723 {
724     return migration_dirty_pages;
725 }
726 
727 uint64_t ram_bytes_remaining(void)
728 {
729     return ram_save_remaining() * TARGET_PAGE_SIZE;
730 }
731 
732 uint64_t ram_bytes_transferred(void)
733 {
734     return bytes_transferred;
735 }
736 
737 uint64_t ram_bytes_total(void)
738 {
739     RAMBlock *block;
740     uint64_t total = 0;
741 
742     rcu_read_lock();
743     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
744         total += block->used_length;
745     rcu_read_unlock();
746     return total;
747 }
748 
749 void free_xbzrle_decoded_buf(void)
750 {
751     g_free(xbzrle_decoded_buf);
752     xbzrle_decoded_buf = NULL;
753 }
754 
755 static void migration_end(void)
756 {
757     if (migration_bitmap) {
758         memory_global_dirty_log_stop();
759         g_free(migration_bitmap);
760         migration_bitmap = NULL;
761     }
762 
763     XBZRLE_cache_lock();
764     if (XBZRLE.cache) {
765         cache_fini(XBZRLE.cache);
766         g_free(XBZRLE.encoded_buf);
767         g_free(XBZRLE.current_buf);
768         XBZRLE.cache = NULL;
769         XBZRLE.encoded_buf = NULL;
770         XBZRLE.current_buf = NULL;
771     }
772     XBZRLE_cache_unlock();
773 }
774 
775 static void ram_migration_cancel(void *opaque)
776 {
777     migration_end();
778 }
779 
780 static void reset_ram_globals(void)
781 {
782     last_seen_block = NULL;
783     last_sent_block = NULL;
784     last_offset = 0;
785     last_version = ram_list.version;
786     ram_bulk_stage = true;
787 }
788 
789 #define MAX_WAIT 50 /* ms, half buffered_file limit */
790 
791 
792 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
793  * long-running RCU critical section.  When rcu-reclaims in the code
794  * start to become numerous it will be necessary to reduce the
795  * granularity of these critical sections.
796  */
797 
798 static int ram_save_setup(QEMUFile *f, void *opaque)
799 {
800     RAMBlock *block;
801     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
802 
803     mig_throttle_on = false;
804     dirty_rate_high_cnt = 0;
805     bitmap_sync_count = 0;
806     migration_bitmap_sync_init();
807 
808     if (migrate_use_xbzrle()) {
809         XBZRLE_cache_lock();
810         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
811                                   TARGET_PAGE_SIZE,
812                                   TARGET_PAGE_SIZE);
813         if (!XBZRLE.cache) {
814             XBZRLE_cache_unlock();
815             error_report("Error creating cache");
816             return -1;
817         }
818         XBZRLE_cache_unlock();
819 
820         /* We prefer not to abort if there is no memory */
821         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
822         if (!XBZRLE.encoded_buf) {
823             error_report("Error allocating encoded_buf");
824             return -1;
825         }
826 
827         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
828         if (!XBZRLE.current_buf) {
829             error_report("Error allocating current_buf");
830             g_free(XBZRLE.encoded_buf);
831             XBZRLE.encoded_buf = NULL;
832             return -1;
833         }
834 
835         acct_clear();
836     }
837 
838     /* iothread lock needed for ram_list.dirty_memory[] */
839     qemu_mutex_lock_iothread();
840     qemu_mutex_lock_ramlist();
841     rcu_read_lock();
842     bytes_transferred = 0;
843     reset_ram_globals();
844 
845     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
846     migration_bitmap = bitmap_new(ram_bitmap_pages);
847     bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
848 
849     /*
850      * Count the total number of pages used by ram blocks not including any
851      * gaps due to alignment or unplugs.
852      */
853     migration_dirty_pages = 0;
854     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
855         uint64_t block_pages;
856 
857         block_pages = block->used_length >> TARGET_PAGE_BITS;
858         migration_dirty_pages += block_pages;
859     }
860 
861     memory_global_dirty_log_start();
862     migration_bitmap_sync();
863     qemu_mutex_unlock_ramlist();
864     qemu_mutex_unlock_iothread();
865 
866     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
867 
868     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
869         qemu_put_byte(f, strlen(block->idstr));
870         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
871         qemu_put_be64(f, block->used_length);
872     }
873 
874     rcu_read_unlock();
875 
876     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
877     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
878 
879     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
880 
881     return 0;
882 }
883 
884 static int ram_save_iterate(QEMUFile *f, void *opaque)
885 {
886     int ret;
887     int i;
888     int64_t t0;
889     int total_sent = 0;
890 
891     rcu_read_lock();
892     if (ram_list.version != last_version) {
893         reset_ram_globals();
894     }
895 
896     /* Read version before ram_list.blocks */
897     smp_rmb();
898 
899     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
900 
901     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
902     i = 0;
903     while ((ret = qemu_file_rate_limit(f)) == 0) {
904         int bytes_sent;
905 
906         bytes_sent = ram_find_and_save_block(f, false);
907         /* no more blocks to sent */
908         if (bytes_sent == 0) {
909             break;
910         }
911         total_sent += bytes_sent;
912         acct_info.iterations++;
913         check_guest_throttling();
914         /* we want to check in the 1st loop, just in case it was the 1st time
915            and we had to sync the dirty bitmap.
916            qemu_get_clock_ns() is a bit expensive, so we only check each some
917            iterations
918         */
919         if ((i & 63) == 0) {
920             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
921             if (t1 > MAX_WAIT) {
922                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
923                         t1, i);
924                 break;
925             }
926         }
927         i++;
928     }
929     rcu_read_unlock();
930 
931     /*
932      * Must occur before EOS (or any QEMUFile operation)
933      * because of RDMA protocol.
934      */
935     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
936 
937     bytes_transferred += total_sent;
938 
939     /*
940      * Do not count these 8 bytes into total_sent, so that we can
941      * return 0 if no page had been dirtied.
942      */
943     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
944     bytes_transferred += 8;
945 
946     ret = qemu_file_get_error(f);
947     if (ret < 0) {
948         return ret;
949     }
950 
951     return total_sent;
952 }
953 
954 /* Called with iothread lock */
955 static int ram_save_complete(QEMUFile *f, void *opaque)
956 {
957     rcu_read_lock();
958 
959     migration_bitmap_sync();
960 
961     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
962 
963     /* try transferring iterative blocks of memory */
964 
965     /* flush all remaining blocks regardless of rate limiting */
966     while (true) {
967         int bytes_sent;
968 
969         bytes_sent = ram_find_and_save_block(f, true);
970         /* no more blocks to sent */
971         if (bytes_sent == 0) {
972             break;
973         }
974         bytes_transferred += bytes_sent;
975     }
976 
977     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
978     migration_end();
979 
980     rcu_read_unlock();
981     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
982 
983     return 0;
984 }
985 
986 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
987 {
988     uint64_t remaining_size;
989 
990     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
991 
992     if (remaining_size < max_size) {
993         qemu_mutex_lock_iothread();
994         rcu_read_lock();
995         migration_bitmap_sync();
996         rcu_read_unlock();
997         qemu_mutex_unlock_iothread();
998         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
999     }
1000     return remaining_size;
1001 }
1002 
1003 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
1004 {
1005     unsigned int xh_len;
1006     int xh_flags;
1007 
1008     if (!xbzrle_decoded_buf) {
1009         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
1010     }
1011 
1012     /* extract RLE header */
1013     xh_flags = qemu_get_byte(f);
1014     xh_len = qemu_get_be16(f);
1015 
1016     if (xh_flags != ENCODING_FLAG_XBZRLE) {
1017         error_report("Failed to load XBZRLE page - wrong compression!");
1018         return -1;
1019     }
1020 
1021     if (xh_len > TARGET_PAGE_SIZE) {
1022         error_report("Failed to load XBZRLE page - len overflow!");
1023         return -1;
1024     }
1025     /* load data and decode */
1026     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
1027 
1028     /* decode RLE */
1029     if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
1030                              TARGET_PAGE_SIZE) == -1) {
1031         error_report("Failed to load XBZRLE page - decode error!");
1032         return -1;
1033     }
1034 
1035     return 0;
1036 }
1037 
1038 /* Must be called from within a rcu critical section.
1039  * Returns a pointer from within the RCU-protected ram_list.
1040  */
1041 static inline void *host_from_stream_offset(QEMUFile *f,
1042                                             ram_addr_t offset,
1043                                             int flags)
1044 {
1045     static RAMBlock *block = NULL;
1046     char id[256];
1047     uint8_t len;
1048 
1049     if (flags & RAM_SAVE_FLAG_CONTINUE) {
1050         if (!block || block->max_length <= offset) {
1051             error_report("Ack, bad migration stream!");
1052             return NULL;
1053         }
1054 
1055         return memory_region_get_ram_ptr(block->mr) + offset;
1056     }
1057 
1058     len = qemu_get_byte(f);
1059     qemu_get_buffer(f, (uint8_t *)id, len);
1060     id[len] = 0;
1061 
1062     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1063         if (!strncmp(id, block->idstr, sizeof(id)) &&
1064             block->max_length > offset) {
1065             return memory_region_get_ram_ptr(block->mr) + offset;
1066         }
1067     }
1068 
1069     error_report("Can't find block %s!", id);
1070     return NULL;
1071 }
1072 
1073 /*
1074  * If a page (or a whole RDMA chunk) has been
1075  * determined to be zero, then zap it.
1076  */
1077 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
1078 {
1079     if (ch != 0 || !is_zero_range(host, size)) {
1080         memset(host, ch, size);
1081     }
1082 }
1083 
1084 static int ram_load(QEMUFile *f, void *opaque, int version_id)
1085 {
1086     int flags = 0, ret = 0;
1087     static uint64_t seq_iter;
1088 
1089     seq_iter++;
1090 
1091     if (version_id != 4) {
1092         ret = -EINVAL;
1093     }
1094 
1095     /* This RCU critical section can be very long running.
1096      * When RCU reclaims in the code start to become numerous,
1097      * it will be necessary to reduce the granularity of this
1098      * critical section.
1099      */
1100     rcu_read_lock();
1101     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
1102         ram_addr_t addr, total_ram_bytes;
1103         void *host;
1104         uint8_t ch;
1105 
1106         addr = qemu_get_be64(f);
1107         flags = addr & ~TARGET_PAGE_MASK;
1108         addr &= TARGET_PAGE_MASK;
1109 
1110         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
1111         case RAM_SAVE_FLAG_MEM_SIZE:
1112             /* Synchronize RAM block list */
1113             total_ram_bytes = addr;
1114             while (!ret && total_ram_bytes) {
1115                 RAMBlock *block;
1116                 uint8_t len;
1117                 char id[256];
1118                 ram_addr_t length;
1119 
1120                 len = qemu_get_byte(f);
1121                 qemu_get_buffer(f, (uint8_t *)id, len);
1122                 id[len] = 0;
1123                 length = qemu_get_be64(f);
1124 
1125                 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1126                     if (!strncmp(id, block->idstr, sizeof(id))) {
1127                         if (length != block->used_length) {
1128                             Error *local_err = NULL;
1129 
1130                             ret = qemu_ram_resize(block->offset, length, &local_err);
1131                             if (local_err) {
1132                                 error_report_err(local_err);
1133                             }
1134                         }
1135                         break;
1136                     }
1137                 }
1138 
1139                 if (!block) {
1140                     error_report("Unknown ramblock \"%s\", cannot "
1141                                  "accept migration", id);
1142                     ret = -EINVAL;
1143                 }
1144 
1145                 total_ram_bytes -= length;
1146             }
1147             break;
1148         case RAM_SAVE_FLAG_COMPRESS:
1149             host = host_from_stream_offset(f, addr, flags);
1150             if (!host) {
1151                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1152                 ret = -EINVAL;
1153                 break;
1154             }
1155             ch = qemu_get_byte(f);
1156             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1157             break;
1158         case RAM_SAVE_FLAG_PAGE:
1159             host = host_from_stream_offset(f, addr, flags);
1160             if (!host) {
1161                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1162                 ret = -EINVAL;
1163                 break;
1164             }
1165             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1166             break;
1167         case RAM_SAVE_FLAG_XBZRLE:
1168             host = host_from_stream_offset(f, addr, flags);
1169             if (!host) {
1170                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1171                 ret = -EINVAL;
1172                 break;
1173             }
1174             if (load_xbzrle(f, addr, host) < 0) {
1175                 error_report("Failed to decompress XBZRLE page at "
1176                              RAM_ADDR_FMT, addr);
1177                 ret = -EINVAL;
1178                 break;
1179             }
1180             break;
1181         case RAM_SAVE_FLAG_EOS:
1182             /* normal exit */
1183             break;
1184         default:
1185             if (flags & RAM_SAVE_FLAG_HOOK) {
1186                 ram_control_load_hook(f, flags);
1187             } else {
1188                 error_report("Unknown combination of migration flags: %#x",
1189                              flags);
1190                 ret = -EINVAL;
1191             }
1192         }
1193         if (!ret) {
1194             ret = qemu_file_get_error(f);
1195         }
1196     }
1197 
1198     rcu_read_unlock();
1199     DPRINTF("Completed load of VM with exit code %d seq iteration "
1200             "%" PRIu64 "\n", ret, seq_iter);
1201     return ret;
1202 }
1203 
1204 static SaveVMHandlers savevm_ram_handlers = {
1205     .save_live_setup = ram_save_setup,
1206     .save_live_iterate = ram_save_iterate,
1207     .save_live_complete = ram_save_complete,
1208     .save_live_pending = ram_save_pending,
1209     .load_state = ram_load,
1210     .cancel = ram_migration_cancel,
1211 };
1212 
1213 void ram_mig_init(void)
1214 {
1215     qemu_mutex_init(&XBZRLE.lock);
1216     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
1217 }
1218 
1219 struct soundhw {
1220     const char *name;
1221     const char *descr;
1222     int enabled;
1223     int isa;
1224     union {
1225         int (*init_isa) (ISABus *bus);
1226         int (*init_pci) (PCIBus *bus);
1227     } init;
1228 };
1229 
1230 static struct soundhw soundhw[9];
1231 static int soundhw_count;
1232 
1233 void isa_register_soundhw(const char *name, const char *descr,
1234                           int (*init_isa)(ISABus *bus))
1235 {
1236     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1237     soundhw[soundhw_count].name = name;
1238     soundhw[soundhw_count].descr = descr;
1239     soundhw[soundhw_count].isa = 1;
1240     soundhw[soundhw_count].init.init_isa = init_isa;
1241     soundhw_count++;
1242 }
1243 
1244 void pci_register_soundhw(const char *name, const char *descr,
1245                           int (*init_pci)(PCIBus *bus))
1246 {
1247     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1248     soundhw[soundhw_count].name = name;
1249     soundhw[soundhw_count].descr = descr;
1250     soundhw[soundhw_count].isa = 0;
1251     soundhw[soundhw_count].init.init_pci = init_pci;
1252     soundhw_count++;
1253 }
1254 
1255 void select_soundhw(const char *optarg)
1256 {
1257     struct soundhw *c;
1258 
1259     if (is_help_option(optarg)) {
1260     show_valid_cards:
1261 
1262         if (soundhw_count) {
1263              printf("Valid sound card names (comma separated):\n");
1264              for (c = soundhw; c->name; ++c) {
1265                  printf ("%-11s %s\n", c->name, c->descr);
1266              }
1267              printf("\n-soundhw all will enable all of the above\n");
1268         } else {
1269              printf("Machine has no user-selectable audio hardware "
1270                     "(it may or may not have always-present audio hardware).\n");
1271         }
1272         exit(!is_help_option(optarg));
1273     }
1274     else {
1275         size_t l;
1276         const char *p;
1277         char *e;
1278         int bad_card = 0;
1279 
1280         if (!strcmp(optarg, "all")) {
1281             for (c = soundhw; c->name; ++c) {
1282                 c->enabled = 1;
1283             }
1284             return;
1285         }
1286 
1287         p = optarg;
1288         while (*p) {
1289             e = strchr(p, ',');
1290             l = !e ? strlen(p) : (size_t) (e - p);
1291 
1292             for (c = soundhw; c->name; ++c) {
1293                 if (!strncmp(c->name, p, l) && !c->name[l]) {
1294                     c->enabled = 1;
1295                     break;
1296                 }
1297             }
1298 
1299             if (!c->name) {
1300                 if (l > 80) {
1301                     error_report("Unknown sound card name (too big to show)");
1302                 }
1303                 else {
1304                     error_report("Unknown sound card name `%.*s'",
1305                                  (int) l, p);
1306                 }
1307                 bad_card = 1;
1308             }
1309             p += l + (e != NULL);
1310         }
1311 
1312         if (bad_card) {
1313             goto show_valid_cards;
1314         }
1315     }
1316 }
1317 
1318 void audio_init(void)
1319 {
1320     struct soundhw *c;
1321     ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1322     PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1323 
1324     for (c = soundhw; c->name; ++c) {
1325         if (c->enabled) {
1326             if (c->isa) {
1327                 if (!isa_bus) {
1328                     error_report("ISA bus not available for %s", c->name);
1329                     exit(1);
1330                 }
1331                 c->init.init_isa(isa_bus);
1332             } else {
1333                 if (!pci_bus) {
1334                     error_report("PCI bus not available for %s", c->name);
1335                     exit(1);
1336                 }
1337                 c->init.init_pci(pci_bus);
1338             }
1339         }
1340     }
1341 }
1342 
1343 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1344 {
1345     int ret;
1346 
1347     if (strlen(str) != 36) {
1348         return -1;
1349     }
1350 
1351     ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1352                  &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1353                  &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1354                  &uuid[15]);
1355 
1356     if (ret != 16) {
1357         return -1;
1358     }
1359     return 0;
1360 }
1361 
1362 void do_acpitable_option(const QemuOpts *opts)
1363 {
1364 #ifdef TARGET_I386
1365     Error *err = NULL;
1366 
1367     acpi_table_add(opts, &err);
1368     if (err) {
1369         error_report("Wrong acpi table provided: %s",
1370                      error_get_pretty(err));
1371         error_free(err);
1372         exit(1);
1373     }
1374 #endif
1375 }
1376 
1377 void do_smbios_option(QemuOpts *opts)
1378 {
1379 #ifdef TARGET_I386
1380     smbios_entry_add(opts);
1381 #endif
1382 }
1383 
1384 void cpudef_init(void)
1385 {
1386 #if defined(cpudef_setup)
1387     cpudef_setup(); /* parse cpu definitions in target config file */
1388 #endif
1389 }
1390 
1391 int kvm_available(void)
1392 {
1393 #ifdef CONFIG_KVM
1394     return 1;
1395 #else
1396     return 0;
1397 #endif
1398 }
1399 
1400 int xen_available(void)
1401 {
1402 #ifdef CONFIG_XEN
1403     return 1;
1404 #else
1405     return 0;
1406 #endif
1407 }
1408 
1409 
1410 TargetInfo *qmp_query_target(Error **errp)
1411 {
1412     TargetInfo *info = g_malloc0(sizeof(*info));
1413 
1414     info->arch = g_strdup(TARGET_NAME);
1415 
1416     return info;
1417 }
1418 
1419 /* Stub function that's gets run on the vcpu when its brought out of the
1420    VM to run inside qemu via async_run_on_cpu()*/
1421 static void mig_sleep_cpu(void *opq)
1422 {
1423     qemu_mutex_unlock_iothread();
1424     g_usleep(30*1000);
1425     qemu_mutex_lock_iothread();
1426 }
1427 
1428 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1429    much time in the VM. The migration thread will try to catchup.
1430    Workload will experience a performance drop.
1431 */
1432 static void mig_throttle_guest_down(void)
1433 {
1434     CPUState *cpu;
1435 
1436     qemu_mutex_lock_iothread();
1437     CPU_FOREACH(cpu) {
1438         async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1439     }
1440     qemu_mutex_unlock_iothread();
1441 }
1442 
1443 static void check_guest_throttling(void)
1444 {
1445     static int64_t t0;
1446     int64_t        t1;
1447 
1448     if (!mig_throttle_on) {
1449         return;
1450     }
1451 
1452     if (!t0)  {
1453         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1454         return;
1455     }
1456 
1457     t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1458 
1459     /* If it has been more than 40 ms since the last time the guest
1460      * was throttled then do it again.
1461      */
1462     if (40 < (t1-t0)/1000000) {
1463         mig_throttle_guest_down();
1464         t0 = t1;
1465     }
1466 }
1467