1 /*
2 * Copyright (C) 2010 Citrix Ltd.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 *
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
10
11 #include "qemu/osdep.h"
12 #include "qemu/units.h"
13 #include "qemu/error-report.h"
14 #include "qapi/error.h"
15 #include "qapi/qapi-commands-migration.h"
16 #include "trace.h"
17
18 #include "hw/hw.h"
19 #include "hw/i386/pc.h"
20 #include "hw/irq.h"
21 #include "hw/i386/apic-msidef.h"
22 #include "hw/xen/xen-x86.h"
23 #include "qemu/range.h"
24
25 #include "hw/xen/xen-hvm-common.h"
26 #include "hw/xen/arch_hvm.h"
27 #include <xen/hvm/e820.h>
28 #include "exec/target_page.h"
29 #include "target/i386/cpu.h"
30 #include "system/runstate.h"
31 #include "system/xen-mapcache.h"
32 #include "system/xen.h"
33
34 static MemoryRegion ram_640k, ram_lo, ram_hi;
35 static MemoryRegion *framebuffer;
36 static bool xen_in_migration;
37
38 /* Compatibility with older version */
39
40 /*
41 * This allows QEMU to build on a system that has Xen 4.5 or earlier installed.
42 * This is here (not in hw/xen/xen_native.h) because xen/hvm/ioreq.h needs to
43 * be included before this block and hw/xen/xen_native.h needs to be included
44 * before xen/hvm/ioreq.h
45 */
46 #ifndef IOREQ_TYPE_VMWARE_PORT
47 #define IOREQ_TYPE_VMWARE_PORT 3
48 struct vmware_regs {
49 uint32_t esi;
50 uint32_t edi;
51 uint32_t ebx;
52 uint32_t ecx;
53 uint32_t edx;
54 };
55 typedef struct vmware_regs vmware_regs_t;
56
57 struct shared_vmport_iopage {
58 struct vmware_regs vcpu_vmport_regs[1];
59 };
60 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
61 #endif
62
63 static shared_vmport_iopage_t *shared_vmport_page;
64
65 static QLIST_HEAD(, XenPhysmap) xen_physmap;
66 static const XenPhysmap *log_for_dirtybit;
67 /* Buffer used by xen_sync_dirty_bitmap */
68 static unsigned long *dirty_bitmap;
69 static Notifier suspend;
70 static Notifier wakeup;
71
72 /* Xen specific function for piix pci */
73
xen_pci_slot_get_pirq(PCIDevice * pci_dev,int irq_num)74 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
75 {
76 return irq_num + (PCI_SLOT(pci_dev->devfn) << 2);
77 }
78
xen_intx_set_irq(void * opaque,int irq_num,int level)79 void xen_intx_set_irq(void *opaque, int irq_num, int level)
80 {
81 xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
82 irq_num & 3, level);
83 }
84
xen_set_pci_link_route(uint8_t link,uint8_t irq)85 int xen_set_pci_link_route(uint8_t link, uint8_t irq)
86 {
87 return xendevicemodel_set_pci_link_route(xen_dmod, xen_domid, link, irq);
88 }
89
xen_is_pirq_msi(uint32_t msi_data)90 int xen_is_pirq_msi(uint32_t msi_data)
91 {
92 /* If vector is 0, the msi is remapped into a pirq, passed as
93 * dest_id.
94 */
95 return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
96 }
97
xen_hvm_inject_msi(uint64_t addr,uint32_t data)98 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
99 {
100 xen_inject_msi(xen_domid, addr, data);
101 }
102
xen_suspend_notifier(Notifier * notifier,void * data)103 static void xen_suspend_notifier(Notifier *notifier, void *data)
104 {
105 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
106 }
107
108 /* Xen Interrupt Controller */
109
xen_set_irq(void * opaque,int irq,int level)110 static void xen_set_irq(void *opaque, int irq, int level)
111 {
112 xen_set_isa_irq_level(xen_domid, irq, level);
113 }
114
xen_interrupt_controller_init(void)115 qemu_irq *xen_interrupt_controller_init(void)
116 {
117 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
118 }
119
120 /* Memory Ops */
121
xen_ram_init(PCMachineState * pcms,ram_addr_t ram_size,MemoryRegion ** ram_memory_p)122 static void xen_ram_init(PCMachineState *pcms,
123 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
124 {
125 X86MachineState *x86ms = X86_MACHINE(pcms);
126 MemoryRegion *sysmem = get_system_memory();
127 ram_addr_t block_len;
128 uint64_t user_lowmem =
129 object_property_get_uint(qdev_get_machine(),
130 PC_MACHINE_MAX_RAM_BELOW_4G,
131 &error_abort);
132
133 /* Handle the machine opt max-ram-below-4g. It is basically doing
134 * min(xen limit, user limit).
135 */
136 if (!user_lowmem) {
137 user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
138 }
139 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
140 user_lowmem = HVM_BELOW_4G_RAM_END;
141 }
142
143 if (ram_size >= user_lowmem) {
144 x86ms->above_4g_mem_size = ram_size - user_lowmem;
145 x86ms->below_4g_mem_size = user_lowmem;
146 } else {
147 x86ms->above_4g_mem_size = 0;
148 x86ms->below_4g_mem_size = ram_size;
149 }
150 if (!x86ms->above_4g_mem_size) {
151 block_len = ram_size;
152 } else {
153 /*
154 * Xen does not allocate the memory continuously, it keeps a
155 * hole of the size computed above or passed in.
156 */
157 block_len = (4 * GiB) + x86ms->above_4g_mem_size;
158 }
159 memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len,
160 &error_fatal);
161 *ram_memory_p = &xen_memory;
162
163 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
164 &xen_memory, 0, 0xa0000);
165 memory_region_add_subregion(sysmem, 0, &ram_640k);
166 /* Skip of the VGA IO memory space, it will be registered later by the VGA
167 * emulated device.
168 *
169 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
170 * the Options ROM, so it is registered here as RAM.
171 */
172 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
173 &xen_memory, 0xc0000,
174 x86ms->below_4g_mem_size - 0xc0000);
175 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
176 if (x86ms->above_4g_mem_size > 0) {
177 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
178 &xen_memory, 0x100000000ULL,
179 x86ms->above_4g_mem_size);
180 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
181 }
182 }
183
get_physmapping(hwaddr start_addr,ram_addr_t size,int page_mask)184 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size,
185 int page_mask)
186 {
187 XenPhysmap *physmap = NULL;
188
189 start_addr &= page_mask;
190
191 QLIST_FOREACH(physmap, &xen_physmap, list) {
192 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
193 return physmap;
194 }
195 }
196 return NULL;
197 }
198
xen_phys_offset_to_gaddr(hwaddr phys_offset,ram_addr_t size,int page_mask)199 static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size,
200 int page_mask)
201 {
202 hwaddr addr = phys_offset & page_mask;
203 XenPhysmap *physmap = NULL;
204
205 QLIST_FOREACH(physmap, &xen_physmap, list) {
206 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
207 return physmap->start_addr + (phys_offset - physmap->phys_offset);
208 }
209 }
210
211 return phys_offset;
212 }
213
214 #ifdef XEN_COMPAT_PHYSMAP
xen_save_physmap(XenIOState * state,XenPhysmap * physmap)215 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
216 {
217 char path[80], value[17];
218
219 snprintf(path, sizeof(path),
220 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
221 xen_domid, (uint64_t)physmap->phys_offset);
222 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
223 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
224 return -1;
225 }
226 snprintf(path, sizeof(path),
227 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
228 xen_domid, (uint64_t)physmap->phys_offset);
229 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
230 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
231 return -1;
232 }
233 if (physmap->name) {
234 snprintf(path, sizeof(path),
235 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
236 xen_domid, (uint64_t)physmap->phys_offset);
237 if (!xs_write(state->xenstore, 0, path,
238 physmap->name, strlen(physmap->name))) {
239 return -1;
240 }
241 }
242 return 0;
243 }
244 #else
xen_save_physmap(XenIOState * state,XenPhysmap * physmap)245 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
246 {
247 return 0;
248 }
249 #endif
250
xen_add_to_physmap(XenIOState * state,hwaddr start_addr,ram_addr_t size,MemoryRegion * mr,hwaddr offset_within_region)251 static int xen_add_to_physmap(XenIOState *state,
252 hwaddr start_addr,
253 ram_addr_t size,
254 MemoryRegion *mr,
255 hwaddr offset_within_region)
256 {
257 unsigned target_page_bits = qemu_target_page_bits();
258 int page_size = qemu_target_page_size();
259 int page_mask = -page_size;
260 unsigned long nr_pages;
261 int rc = 0;
262 XenPhysmap *physmap = NULL;
263 hwaddr pfn, start_gpfn;
264 hwaddr phys_offset = memory_region_get_ram_addr(mr);
265 const char *mr_name;
266
267 if (get_physmapping(start_addr, size, page_mask)) {
268 return 0;
269 }
270 if (size <= 0) {
271 return -1;
272 }
273
274 /* Xen can only handle a single dirty log region for now and we want
275 * the linear framebuffer to be that region.
276 * Avoid tracking any regions that is not videoram and avoid tracking
277 * the legacy vga region. */
278 if (mr == framebuffer && start_addr > 0xbffff) {
279 goto go_physmap;
280 }
281 return -1;
282
283 go_physmap:
284 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
285 start_addr, start_addr + size);
286
287 mr_name = memory_region_name(mr);
288
289 physmap = g_new(XenPhysmap, 1);
290
291 physmap->start_addr = start_addr;
292 physmap->size = size;
293 physmap->name = mr_name;
294 physmap->phys_offset = phys_offset;
295
296 QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
297
298 if (runstate_check(RUN_STATE_INMIGRATE)) {
299 /* Now when we have a physmap entry we can replace a dummy mapping with
300 * a real one of guest foreign memory. */
301 uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
302 assert(p && p == memory_region_get_ram_ptr(mr));
303
304 return 0;
305 }
306
307 pfn = phys_offset >> target_page_bits;
308 start_gpfn = start_addr >> target_page_bits;
309 nr_pages = size >> target_page_bits;
310 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
311 start_gpfn);
312 if (rc) {
313 int saved_errno = errno;
314
315 error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
316 " to GFN %"HWADDR_PRIx" failed: %s",
317 nr_pages, pfn, start_gpfn, strerror(saved_errno));
318 errno = saved_errno;
319 return -1;
320 }
321
322 rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
323 start_addr >> target_page_bits,
324 (start_addr + size - 1) >> target_page_bits,
325 XEN_DOMCTL_MEM_CACHEATTR_WB);
326 if (rc) {
327 error_report("pin_memory_cacheattr failed: %s", strerror(errno));
328 }
329 return xen_save_physmap(state, physmap);
330 }
331
xen_remove_from_physmap(XenIOState * state,hwaddr start_addr,ram_addr_t size)332 static int xen_remove_from_physmap(XenIOState *state,
333 hwaddr start_addr,
334 ram_addr_t size)
335 {
336 unsigned target_page_bits = qemu_target_page_bits();
337 int page_size = qemu_target_page_size();
338 int page_mask = -page_size;
339 int rc = 0;
340 XenPhysmap *physmap = NULL;
341 hwaddr phys_offset = 0;
342
343 physmap = get_physmapping(start_addr, size, page_mask);
344 if (physmap == NULL) {
345 return -1;
346 }
347
348 phys_offset = physmap->phys_offset;
349 size = physmap->size;
350
351 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
352 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
353
354 size >>= target_page_bits;
355 start_addr >>= target_page_bits;
356 phys_offset >>= target_page_bits;
357 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
358 phys_offset);
359 if (rc) {
360 int saved_errno = errno;
361
362 error_report("relocate_memory "RAM_ADDR_FMT" pages"
363 " from GFN %"HWADDR_PRIx
364 " to GFN %"HWADDR_PRIx" failed: %s",
365 size, start_addr, phys_offset, strerror(saved_errno));
366 errno = saved_errno;
367 return -1;
368 }
369
370 QLIST_REMOVE(physmap, list);
371 if (log_for_dirtybit == physmap) {
372 log_for_dirtybit = NULL;
373 g_free(dirty_bitmap);
374 dirty_bitmap = NULL;
375 }
376 g_free(physmap);
377
378 return 0;
379 }
380
xen_sync_dirty_bitmap(XenIOState * state,hwaddr start_addr,ram_addr_t size)381 static void xen_sync_dirty_bitmap(XenIOState *state,
382 hwaddr start_addr,
383 ram_addr_t size)
384 {
385 unsigned target_page_bits = qemu_target_page_bits();
386 int page_size = qemu_target_page_size();
387 int page_mask = -page_size;
388 hwaddr npages = size >> target_page_bits;
389 const int width = sizeof(unsigned long) * 8;
390 size_t bitmap_size = DIV_ROUND_UP(npages, width);
391 int rc, i, j;
392 const XenPhysmap *physmap = NULL;
393
394 physmap = get_physmapping(start_addr, size, page_mask);
395 if (physmap == NULL) {
396 /* not handled */
397 return;
398 }
399
400 if (log_for_dirtybit == NULL) {
401 log_for_dirtybit = physmap;
402 dirty_bitmap = g_new(unsigned long, bitmap_size);
403 } else if (log_for_dirtybit != physmap) {
404 /* Only one range for dirty bitmap can be tracked. */
405 return;
406 }
407
408 rc = xen_track_dirty_vram(xen_domid, start_addr >> target_page_bits,
409 npages, dirty_bitmap);
410 if (rc < 0) {
411 #ifndef ENODATA
412 #define ENODATA ENOENT
413 #endif
414 if (errno == ENODATA) {
415 memory_region_set_dirty(framebuffer, 0, size);
416 DPRINTF("xen: track_dirty_vram failed (0x" HWADDR_FMT_plx
417 ", 0x" HWADDR_FMT_plx "): %s\n",
418 start_addr, start_addr + size, strerror(errno));
419 }
420 return;
421 }
422
423 for (i = 0; i < bitmap_size; i++) {
424 unsigned long map = dirty_bitmap[i];
425 while (map != 0) {
426 j = ctzl(map);
427 map &= ~(1ul << j);
428 memory_region_set_dirty(framebuffer,
429 (i * width + j) * page_size, page_size);
430 };
431 }
432 }
433
xen_log_start(MemoryListener * listener,MemoryRegionSection * section,int old,int new)434 static void xen_log_start(MemoryListener *listener,
435 MemoryRegionSection *section,
436 int old, int new)
437 {
438 XenIOState *state = container_of(listener, XenIOState, memory_listener);
439
440 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
441 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
442 int128_get64(section->size));
443 }
444 }
445
xen_log_stop(MemoryListener * listener,MemoryRegionSection * section,int old,int new)446 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
447 int old, int new)
448 {
449 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
450 log_for_dirtybit = NULL;
451 g_free(dirty_bitmap);
452 dirty_bitmap = NULL;
453 /* Disable dirty bit tracking */
454 xen_track_dirty_vram(xen_domid, 0, 0, NULL);
455 }
456 }
457
xen_log_sync(MemoryListener * listener,MemoryRegionSection * section)458 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
459 {
460 XenIOState *state = container_of(listener, XenIOState, memory_listener);
461
462 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
463 int128_get64(section->size));
464 }
465
xen_log_global_start(MemoryListener * listener,Error ** errp)466 static bool xen_log_global_start(MemoryListener *listener, Error **errp)
467 {
468 if (xen_enabled()) {
469 xen_in_migration = true;
470 }
471 return true;
472 }
473
xen_log_global_stop(MemoryListener * listener)474 static void xen_log_global_stop(MemoryListener *listener)
475 {
476 xen_in_migration = false;
477 }
478
479 static const MemoryListener xen_memory_listener = {
480 .name = "xen-memory",
481 .region_add = xen_region_add,
482 .region_del = xen_region_del,
483 .log_start = xen_log_start,
484 .log_stop = xen_log_stop,
485 .log_sync = xen_log_sync,
486 .log_global_start = xen_log_global_start,
487 .log_global_stop = xen_log_global_stop,
488 .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
489 };
490
regs_to_cpu(vmware_regs_t * vmport_regs,ioreq_t * req)491 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
492 {
493 X86CPU *cpu;
494 CPUX86State *env;
495
496 cpu = X86_CPU(current_cpu);
497 env = &cpu->env;
498 env->regs[R_EAX] = req->data;
499 env->regs[R_EBX] = vmport_regs->ebx;
500 env->regs[R_ECX] = vmport_regs->ecx;
501 env->regs[R_EDX] = vmport_regs->edx;
502 env->regs[R_ESI] = vmport_regs->esi;
503 env->regs[R_EDI] = vmport_regs->edi;
504 }
505
regs_from_cpu(vmware_regs_t * vmport_regs)506 static void regs_from_cpu(vmware_regs_t *vmport_regs)
507 {
508 X86CPU *cpu = X86_CPU(current_cpu);
509 CPUX86State *env = &cpu->env;
510
511 vmport_regs->ebx = env->regs[R_EBX];
512 vmport_regs->ecx = env->regs[R_ECX];
513 vmport_regs->edx = env->regs[R_EDX];
514 vmport_regs->esi = env->regs[R_ESI];
515 vmport_regs->edi = env->regs[R_EDI];
516 }
517
handle_vmport_ioreq(XenIOState * state,ioreq_t * req)518 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
519 {
520 vmware_regs_t *vmport_regs;
521
522 assert(shared_vmport_page);
523 vmport_regs =
524 &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
525 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
526
527 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
528 regs_to_cpu(vmport_regs, req);
529 cpu_ioreq_pio(req);
530 regs_from_cpu(vmport_regs);
531 current_cpu = NULL;
532 }
533
534 #ifdef XEN_COMPAT_PHYSMAP
xen_read_physmap(XenIOState * state)535 static void xen_read_physmap(XenIOState *state)
536 {
537 XenPhysmap *physmap = NULL;
538 unsigned int len, num, i;
539 char path[80], *value = NULL;
540 char **entries = NULL;
541
542 snprintf(path, sizeof(path),
543 "/local/domain/0/device-model/%d/physmap", xen_domid);
544 entries = xs_directory(state->xenstore, 0, path, &num);
545 if (entries == NULL)
546 return;
547
548 for (i = 0; i < num; i++) {
549 physmap = g_new(XenPhysmap, 1);
550 physmap->phys_offset = strtoull(entries[i], NULL, 16);
551 snprintf(path, sizeof(path),
552 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
553 xen_domid, entries[i]);
554 value = xs_read(state->xenstore, 0, path, &len);
555 if (value == NULL) {
556 g_free(physmap);
557 continue;
558 }
559 physmap->start_addr = strtoull(value, NULL, 16);
560 free(value);
561
562 snprintf(path, sizeof(path),
563 "/local/domain/0/device-model/%d/physmap/%s/size",
564 xen_domid, entries[i]);
565 value = xs_read(state->xenstore, 0, path, &len);
566 if (value == NULL) {
567 g_free(physmap);
568 continue;
569 }
570 physmap->size = strtoull(value, NULL, 16);
571 free(value);
572
573 snprintf(path, sizeof(path),
574 "/local/domain/0/device-model/%d/physmap/%s/name",
575 xen_domid, entries[i]);
576 physmap->name = xs_read(state->xenstore, 0, path, &len);
577
578 QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
579 }
580 free(entries);
581 }
582 #else
xen_read_physmap(XenIOState * state)583 static void xen_read_physmap(XenIOState *state)
584 {
585 }
586 #endif
587
xen_wakeup_notifier(Notifier * notifier,void * data)588 static void xen_wakeup_notifier(Notifier *notifier, void *data)
589 {
590 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
591 }
592
xen_check_stubdomain(struct xs_handle * xsh)593 static bool xen_check_stubdomain(struct xs_handle *xsh)
594 {
595 char *dm_path = g_strdup_printf(
596 "/local/domain/%d/image/device-model-domid", xen_domid);
597 char *val;
598 int32_t dm_domid;
599 bool is_stubdom = false;
600
601 val = xs_read(xsh, 0, dm_path, NULL);
602 if (val) {
603 if (sscanf(val, "%d", &dm_domid) == 1) {
604 is_stubdom = dm_domid != 0;
605 }
606 free(val);
607 }
608
609 g_free(dm_path);
610 return is_stubdom;
611 }
612
xen_hvm_init_pc(PCMachineState * pcms,MemoryRegion ** ram_memory)613 void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
614 {
615 MachineState *ms = MACHINE(pcms);
616 unsigned int max_cpus = ms->smp.max_cpus;
617 int rc;
618 xen_pfn_t ioreq_pfn;
619 XenIOState *state;
620
621 state = g_new0(XenIOState, 1);
622
623 xen_register_ioreq(state, max_cpus,
624 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
625 &xen_memory_listener);
626
627 xen_is_stubdomain = xen_check_stubdomain(state->xenstore);
628
629 QLIST_INIT(&xen_physmap);
630 xen_read_physmap(state);
631
632 suspend.notify = xen_suspend_notifier;
633 qemu_register_suspend_notifier(&suspend);
634
635 wakeup.notify = xen_wakeup_notifier;
636 qemu_register_wakeup_notifier(&wakeup);
637
638 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
639 if (!rc) {
640 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
641 shared_vmport_page =
642 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
643 1, &ioreq_pfn, NULL);
644 if (shared_vmport_page == NULL) {
645 error_report("map shared vmport IO page returned error %d handle=%p",
646 errno, xen_xc);
647 goto err;
648 }
649 } else if (rc != -ENOSYS) {
650 error_report("get vmport regs pfn returned error %d, rc=%d",
651 errno, rc);
652 goto err;
653 }
654
655 xen_ram_init(pcms, ms->ram_size, ram_memory);
656
657 /* Disable ACPI build because Xen handles it */
658 pcms->acpi_build_enabled = false;
659
660 return;
661
662 err:
663 error_report("xen hardware virtual machine initialisation failed");
664 exit(1);
665 }
666
xen_register_framebuffer(MemoryRegion * mr)667 void xen_register_framebuffer(MemoryRegion *mr)
668 {
669 framebuffer = mr;
670 }
671
xen_hvm_modified_memory(ram_addr_t start,ram_addr_t length)672 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
673 {
674 unsigned target_page_bits = qemu_target_page_bits();
675 int page_size = qemu_target_page_size();
676 int page_mask = -page_size;
677
678 if (unlikely(xen_in_migration)) {
679 int rc;
680 ram_addr_t start_pfn, nb_pages;
681
682 start = xen_phys_offset_to_gaddr(start, length, page_mask);
683
684 if (length == 0) {
685 length = page_size;
686 }
687 start_pfn = start >> target_page_bits;
688 nb_pages = ((start + length + page_size - 1) >> target_page_bits)
689 - start_pfn;
690 rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
691 if (rc) {
692 fprintf(stderr,
693 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
694 __func__, start, nb_pages, errno, strerror(errno));
695 }
696 }
697 }
698
qmp_xen_set_global_dirty_log(bool enable,Error ** errp)699 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
700 {
701 if (enable) {
702 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, errp);
703 } else {
704 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
705 }
706 }
707
arch_xen_set_memory(XenIOState * state,MemoryRegionSection * section,bool add)708 void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section,
709 bool add)
710 {
711 unsigned target_page_bits = qemu_target_page_bits();
712 int page_size = qemu_target_page_size();
713 int page_mask = -page_size;
714 hwaddr start_addr = section->offset_within_address_space;
715 ram_addr_t size = int128_get64(section->size);
716 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
717 hvmmem_type_t mem_type;
718
719 if (!memory_region_is_ram(section->mr)) {
720 return;
721 }
722
723 if (log_dirty != add) {
724 return;
725 }
726
727 trace_xen_client_set_memory(start_addr, size, log_dirty);
728
729 start_addr &= page_mask;
730 size = ROUND_UP(size, page_size);
731
732 if (add) {
733 if (!memory_region_is_rom(section->mr)) {
734 xen_add_to_physmap(state, start_addr, size,
735 section->mr, section->offset_within_region);
736 } else {
737 mem_type = HVMMEM_ram_ro;
738 if (xen_set_mem_type(xen_domid, mem_type,
739 start_addr >> target_page_bits,
740 size >> target_page_bits)) {
741 DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n",
742 start_addr);
743 }
744 }
745 } else {
746 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
747 DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr);
748 }
749 }
750 }
751
arch_handle_ioreq(XenIOState * state,ioreq_t * req)752 void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
753 {
754 switch (req->type) {
755 case IOREQ_TYPE_VMWARE_PORT:
756 handle_vmport_ioreq(state, req);
757 break;
758 default:
759 hw_error("Invalid ioreq type 0x%x\n", req->type);
760 }
761 }
762