xref: /qemu/target/ppc/kvm.c (revision a84f71793aab5d06b5798e78e1cee82cc3e4b3e2)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
469c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
472d103aaeSMichael Roth #include "sysemu/hostmem.h"
48f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
499c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
50f3d9f303SSam Bobroff #include "elf.h"
51c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
52f61b4bedSAlexander Graf 
53d76d1650Saurel32 //#define DEBUG_KVM
54d76d1650Saurel32 
55d76d1650Saurel32 #ifdef DEBUG_KVM
56da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
57d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
58d76d1650Saurel32 #else
59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
60d76d1650Saurel32     do { } while (0)
61d76d1650Saurel32 #endif
62d76d1650Saurel32 
63eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
64eadaada1SAlexander Graf 
6594a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6694a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6794a8d39aSJan Kiszka };
6894a8d39aSJan Kiszka 
69fc87e185SAlexander Graf static int cap_interrupt_unset = false;
70fc87e185SAlexander Graf static int cap_interrupt_level = false;
7190dc8812SScott Wood static int cap_segstate;
7290dc8812SScott Wood static int cap_booke_sregs;
73e97c3636SDavid Gibson static int cap_ppc_smt;
74fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
750f5cb298SDavid Gibson static int cap_spapr_tce;
76d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
77da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
789bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
79f1af19d7SDavid Gibson static int cap_hior;
80d67d40eaSDavid Gibson static int cap_one_reg;
813b961124SStuart Yoder static int cap_epr;
8231f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
839b00ea49SDavid Gibson static int cap_papr;
84e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8587a91de6SAlexander Graf static int cap_fixup_hcalls;
86bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
87cf1c4cceSSam Bobroff static int cap_mmu_radix;
88cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
89b55d295eSDavid Gibson static int cap_resize_hpt;
90c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
918acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
928acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
938acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
94fc87e185SAlexander Graf 
953c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
963c902d44SBharat Bhushan 
97c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
98c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
99c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
100c821c2bdSAlexander Graf  *     still an interrupt pending.
101c6a94ba5SAlexander Graf  *
102c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
103c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
104c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
105c6a94ba5SAlexander Graf  */
106c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
107c6a94ba5SAlexander Graf 
108d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
109c6a94ba5SAlexander Graf {
110d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
111d5a68146SAndreas Färber 
112c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
113c6a94ba5SAlexander Graf }
114c6a94ba5SAlexander Graf 
11596c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11696c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11796c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11896c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11996c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12096c9cff0SThomas Huth {
12196c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12270a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12396c9cff0SThomas Huth }
12496c9cff0SThomas Huth 
1252e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms);
1268acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1275ba4576bSAndreas Färber 
128b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
129d76d1650Saurel32 {
130fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
131fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13290dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13390dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1346977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
1350f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
136d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
137da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1389ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
139d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
140f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1413b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14231f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1439b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1449b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1456977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14687a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
147fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
148bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
149cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
150cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
151b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1528acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
153c363a37aSDaniel Henrique Barboza     /*
154c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
155c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
156c363a37aSDaniel Henrique Barboza      *
157c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
158c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
159c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
160fc87e185SAlexander Graf 
161fc87e185SAlexander Graf     if (!cap_interrupt_level) {
162fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
163fc87e185SAlexander Graf                         "VM to stall at times!\n");
164fc87e185SAlexander Graf     }
165fc87e185SAlexander Graf 
1662e9c10ebSIgor Mammedov     kvm_ppc_register_host_cpu_type(ms);
1675ba4576bSAndreas Färber 
168d76d1650Saurel32     return 0;
169d76d1650Saurel32 }
170d76d1650Saurel32 
171d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
172d525ffabSPaolo Bonzini {
173d525ffabSPaolo Bonzini     return 0;
174d525ffabSPaolo Bonzini }
175d525ffabSPaolo Bonzini 
1761bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
177d76d1650Saurel32 {
1781bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1791bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
180861bbc80SAlexander Graf     struct kvm_sregs sregs;
1815666ca4aSScott Wood     int ret;
1825666ca4aSScott Wood 
1835666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18464e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18564e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
18664e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
18764e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1885666ca4aSScott Wood         return 0;
1895666ca4aSScott Wood     } else {
19090dc8812SScott Wood         if (!cap_segstate) {
19164e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19264e07be5SAlexander Graf             return -ENOSYS;
1935666ca4aSScott Wood         }
1945666ca4aSScott Wood     }
1955666ca4aSScott Wood 
1961bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1975666ca4aSScott Wood     if (ret) {
1985666ca4aSScott Wood         return ret;
1995666ca4aSScott Wood     }
200861bbc80SAlexander Graf 
201861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2021bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2035666ca4aSScott Wood }
2045666ca4aSScott Wood 
20593dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2061bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
20793dd5e85SScott Wood {
2081bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2091bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21093dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21193dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21293dd5e85SScott Wood     unsigned int entries = 0;
21393dd5e85SScott Wood     int ret, i;
21493dd5e85SScott Wood 
21593dd5e85SScott Wood     if (!kvm_enabled() ||
216a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
21793dd5e85SScott Wood         return 0;
21893dd5e85SScott Wood     }
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22193dd5e85SScott Wood 
22293dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22393dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22493dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22593dd5e85SScott Wood         entries += params.tlb_sizes[i];
22693dd5e85SScott Wood     }
22793dd5e85SScott Wood 
22893dd5e85SScott Wood     assert(entries == env->nb_tlb);
22993dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23093dd5e85SScott Wood 
23193dd5e85SScott Wood     env->tlb_dirty = true;
23293dd5e85SScott Wood 
23393dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23493dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23593dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
23693dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
23793dd5e85SScott Wood 
23848add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
23993dd5e85SScott Wood     if (ret < 0) {
24093dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24193dd5e85SScott Wood                 __func__, strerror(-ret));
24293dd5e85SScott Wood         return ret;
24393dd5e85SScott Wood     }
24493dd5e85SScott Wood 
24593dd5e85SScott Wood     env->kvm_sw_tlb = true;
24693dd5e85SScott Wood     return 0;
24793dd5e85SScott Wood }
24893dd5e85SScott Wood 
2494656e1f0SBenjamin Herrenschmidt 
2504656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
251ab256960SGreg Kurz static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
2524656e1f0SBenjamin Herrenschmidt {
2534656e1f0SBenjamin Herrenschmidt     int ret;
2544656e1f0SBenjamin Herrenschmidt 
255ab256960SGreg Kurz     assert(kvm_state != NULL);
256ab256960SGreg Kurz 
257ab256960SGreg Kurz     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
25871d0f1eaSGreg Kurz         error_setg(errp, "KVM doesn't expose the MMU features it supports");
25971d0f1eaSGreg Kurz         error_append_hint(errp, "Consider switching to a newer KVM\n");
26071d0f1eaSGreg Kurz         return;
26171d0f1eaSGreg Kurz     }
26271d0f1eaSGreg Kurz 
263ab256960SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
2644656e1f0SBenjamin Herrenschmidt     if (ret == 0) {
2654656e1f0SBenjamin Herrenschmidt         return;
2664656e1f0SBenjamin Herrenschmidt     }
2674656e1f0SBenjamin Herrenschmidt 
26871d0f1eaSGreg Kurz     error_setg_errno(errp, -ret,
26971d0f1eaSGreg Kurz                      "KVM failed to provide the MMU features it supports");
2704656e1f0SBenjamin Herrenschmidt }
2714656e1f0SBenjamin Herrenschmidt 
272c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
273c64abd1fSSam Bobroff {
274c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
275c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
276c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
277c64abd1fSSam Bobroff     int i;
278c64abd1fSSam Bobroff 
279c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
280c64abd1fSSam Bobroff         return NULL;
281c64abd1fSSam Bobroff     }
282c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
283c64abd1fSSam Bobroff         return NULL;
284c64abd1fSSam Bobroff     }
285c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
286c64abd1fSSam Bobroff     radix_page_info->count = 0;
287c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
288c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
289c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
290c64abd1fSSam Bobroff             radix_page_info->count++;
291c64abd1fSSam Bobroff         }
292c64abd1fSSam Bobroff     }
293c64abd1fSSam Bobroff     return radix_page_info;
294c64abd1fSSam Bobroff }
295c64abd1fSSam Bobroff 
296b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
297b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
298b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
299b4db5413SSuraj Jitindar Singh {
300b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
301b4db5413SSuraj Jitindar Singh     int ret;
302b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
303b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
304b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
305b4db5413SSuraj Jitindar Singh     };
306b4db5413SSuraj Jitindar Singh 
307b4db5413SSuraj Jitindar Singh     if (radix) {
308b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
309b4db5413SSuraj Jitindar Singh     }
310b4db5413SSuraj Jitindar Singh     if (gtse) {
311b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
312b4db5413SSuraj Jitindar Singh     }
313b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
314b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
315b4db5413SSuraj Jitindar Singh     switch (ret) {
316b4db5413SSuraj Jitindar Singh     case 0:
317b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
318b4db5413SSuraj Jitindar Singh     case -EINVAL:
319b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
320b4db5413SSuraj Jitindar Singh     case -ENODEV:
321b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
322b4db5413SSuraj Jitindar Singh     default:
323b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
324b4db5413SSuraj Jitindar Singh     }
325b4db5413SSuraj Jitindar Singh }
326b4db5413SSuraj Jitindar Singh 
32724c6863cSDavid Gibson bool kvmppc_hpt_needs_host_contiguous_pages(void)
32824c6863cSDavid Gibson {
32924c6863cSDavid Gibson     static struct kvm_ppc_smmu_info smmu_info;
33024c6863cSDavid Gibson 
33124c6863cSDavid Gibson     if (!kvm_enabled()) {
33224c6863cSDavid Gibson         return false;
33324c6863cSDavid Gibson     }
33424c6863cSDavid Gibson 
335ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &error_fatal);
33624c6863cSDavid Gibson     return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
33724c6863cSDavid Gibson }
33824c6863cSDavid Gibson 
339e5ca28ecSDavid Gibson void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
3404656e1f0SBenjamin Herrenschmidt {
341e5ca28ecSDavid Gibson     struct kvm_ppc_smmu_info smmu_info;
3424656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
34371d0f1eaSGreg Kurz     Error *local_err = NULL;
3444656e1f0SBenjamin Herrenschmidt 
345e5ca28ecSDavid Gibson     /* For now, we only have anything to check on hash64 MMUs */
346e5ca28ecSDavid Gibson     if (!cpu->hash64_opts || !kvm_enabled()) {
3474656e1f0SBenjamin Herrenschmidt         return;
3484656e1f0SBenjamin Herrenschmidt     }
3494656e1f0SBenjamin Herrenschmidt 
350ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &local_err);
35171d0f1eaSGreg Kurz     if (local_err) {
35271d0f1eaSGreg Kurz         error_propagate(errp, local_err);
35371d0f1eaSGreg Kurz         return;
35471d0f1eaSGreg Kurz     }
355e5ca28ecSDavid Gibson 
356e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
357e5ca28ecSDavid Gibson         && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
358e5ca28ecSDavid Gibson         error_setg(errp,
359e5ca28ecSDavid Gibson                    "KVM does not support 1TiB segments which guest expects");
360e5ca28ecSDavid Gibson         return;
3614656e1f0SBenjamin Herrenschmidt     }
3624656e1f0SBenjamin Herrenschmidt 
363e5ca28ecSDavid Gibson     if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
364e5ca28ecSDavid Gibson         error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
365e5ca28ecSDavid Gibson                    smmu_info.slb_size, cpu->hash64_opts->slb_size);
366e5ca28ecSDavid Gibson         return;
36790da0d5aSBenjamin Herrenschmidt     }
36890da0d5aSBenjamin Herrenschmidt 
36908215d8fSAlexander Graf     /*
370e5ca28ecSDavid Gibson      * Verify that every pagesize supported by the cpu model is
371e5ca28ecSDavid Gibson      * supported by KVM with the same encodings
37208215d8fSAlexander Graf      */
373e5ca28ecSDavid Gibson     for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
374b07c59f7SDavid Gibson         PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
375e5ca28ecSDavid Gibson         struct kvm_ppc_one_seg_page_size *ksps;
3764656e1f0SBenjamin Herrenschmidt 
377e5ca28ecSDavid Gibson         for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
378e5ca28ecSDavid Gibson             if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
3794656e1f0SBenjamin Herrenschmidt                 break;
3804656e1f0SBenjamin Herrenschmidt             }
3814656e1f0SBenjamin Herrenschmidt         }
382e5ca28ecSDavid Gibson         if (ik >= ARRAY_SIZE(smmu_info.sps)) {
383e5ca28ecSDavid Gibson             error_setg(errp, "KVM doesn't support for base page shift %u",
384e5ca28ecSDavid Gibson                        qsps->page_shift);
385e5ca28ecSDavid Gibson             return;
386e5ca28ecSDavid Gibson         }
387e5ca28ecSDavid Gibson 
388e5ca28ecSDavid Gibson         ksps = &smmu_info.sps[ik];
389e5ca28ecSDavid Gibson         if (ksps->slb_enc != qsps->slb_enc) {
390e5ca28ecSDavid Gibson             error_setg(errp,
391e5ca28ecSDavid Gibson "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
392e5ca28ecSDavid Gibson                        ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
393e5ca28ecSDavid Gibson             return;
394e5ca28ecSDavid Gibson         }
395e5ca28ecSDavid Gibson 
396e5ca28ecSDavid Gibson         for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
397e5ca28ecSDavid Gibson             for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
398e5ca28ecSDavid Gibson                 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
3994656e1f0SBenjamin Herrenschmidt                     break;
4004656e1f0SBenjamin Herrenschmidt                 }
4014656e1f0SBenjamin Herrenschmidt             }
4024656e1f0SBenjamin Herrenschmidt 
403e5ca28ecSDavid Gibson             if (jk >= ARRAY_SIZE(ksps->enc)) {
404e5ca28ecSDavid Gibson                 error_setg(errp, "KVM doesn't support page shift %u/%u",
405e5ca28ecSDavid Gibson                            qsps->enc[jq].page_shift, qsps->page_shift);
406e5ca28ecSDavid Gibson                 return;
407e5ca28ecSDavid Gibson             }
408e5ca28ecSDavid Gibson             if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
409e5ca28ecSDavid Gibson                 error_setg(errp,
410e5ca28ecSDavid Gibson "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
411e5ca28ecSDavid Gibson                            ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
412e5ca28ecSDavid Gibson                            qsps->page_shift, qsps->enc[jq].pte_enc);
413e5ca28ecSDavid Gibson                 return;
414e5ca28ecSDavid Gibson             }
415e5ca28ecSDavid Gibson         }
4164656e1f0SBenjamin Herrenschmidt     }
4174656e1f0SBenjamin Herrenschmidt 
418e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
419e5ca28ecSDavid Gibson         /* Mostly what guest pagesizes we can use are related to the
420e5ca28ecSDavid Gibson          * host pages used to map guest RAM, which is handled in the
421e5ca28ecSDavid Gibson          * platform code. Cache-Inhibited largepages (64k) however are
422e5ca28ecSDavid Gibson          * used for I/O, so if they're mapped to the host at all it
423e5ca28ecSDavid Gibson          * will be a normal mapping, not a special hugepage one used
424e5ca28ecSDavid Gibson          * for RAM. */
425e5ca28ecSDavid Gibson         if (getpagesize() < 0x10000) {
426e5ca28ecSDavid Gibson             error_setg(errp,
427e5ca28ecSDavid Gibson                        "KVM can't supply 64kiB CI pages, which guest expects");
428e5ca28ecSDavid Gibson         }
429e5ca28ecSDavid Gibson     }
430e5ca28ecSDavid Gibson }
4314656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
4324656e1f0SBenjamin Herrenschmidt 
433b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
434b164e48eSEduardo Habkost {
4352e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
436b164e48eSEduardo Habkost }
437b164e48eSEduardo Habkost 
43888365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
43988365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
44088365d17SBharat Bhushan  * of 4 is sufficient for now.
44188365d17SBharat Bhushan  */
44288365d17SBharat Bhushan #define MAX_HW_BKPTS 4
44388365d17SBharat Bhushan 
44488365d17SBharat Bhushan static struct HWBreakpoint {
44588365d17SBharat Bhushan     target_ulong addr;
44688365d17SBharat Bhushan     int type;
44788365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
44888365d17SBharat Bhushan 
44988365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
45088365d17SBharat Bhushan 
45188365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
45288365d17SBharat Bhushan static int max_hw_breakpoint;
45388365d17SBharat Bhushan static int max_hw_watchpoint;
45488365d17SBharat Bhushan static int nb_hw_breakpoint;
45588365d17SBharat Bhushan static int nb_hw_watchpoint;
45688365d17SBharat Bhushan 
45788365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
45888365d17SBharat Bhushan {
45988365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
46088365d17SBharat Bhushan         max_hw_breakpoint = 2;
46188365d17SBharat Bhushan         max_hw_watchpoint = 2;
46288365d17SBharat Bhushan     }
46388365d17SBharat Bhushan 
46488365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
46588365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
46688365d17SBharat Bhushan         return;
46788365d17SBharat Bhushan     }
46888365d17SBharat Bhushan }
46988365d17SBharat Bhushan 
47020d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
4715666ca4aSScott Wood {
47220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
47320d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
4745666ca4aSScott Wood     int ret;
4755666ca4aSScott Wood 
4764656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
4771bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
4785666ca4aSScott Wood     if (ret) {
479388e47c7SThomas Huth         if (ret == -EINVAL) {
480388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
481388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
482388e47c7SThomas Huth         }
4835666ca4aSScott Wood         return ret;
4845666ca4aSScott Wood     }
485861bbc80SAlexander Graf 
486bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
487c821c2bdSAlexander Graf 
48893dd5e85SScott Wood     switch (cenv->mmu_model) {
48993dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
4907f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
4911bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
49293dd5e85SScott Wood         break;
4937f516c96SThomas Huth     case POWERPC_MMU_2_07:
4947f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
4957f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
496f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
497f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
498f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
4997f516c96SThomas Huth                 cap_htm = true;
5007f516c96SThomas Huth             }
501f3d9f303SSam Bobroff         }
5027f516c96SThomas Huth         break;
50393dd5e85SScott Wood     default:
50493dd5e85SScott Wood         break;
50593dd5e85SScott Wood     }
50693dd5e85SScott Wood 
5073c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
50888365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5093c902d44SBharat Bhushan 
510861bbc80SAlexander Graf     return ret;
511d76d1650Saurel32 }
512d76d1650Saurel32 
5131bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
51493dd5e85SScott Wood {
5151bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5161bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
51793dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
51893dd5e85SScott Wood     unsigned char *bitmap;
51993dd5e85SScott Wood     int ret;
52093dd5e85SScott Wood 
52193dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
52293dd5e85SScott Wood         return;
52393dd5e85SScott Wood     }
52493dd5e85SScott Wood 
52593dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
52693dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
52793dd5e85SScott Wood 
52893dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
52993dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
53093dd5e85SScott Wood 
5311bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
53293dd5e85SScott Wood     if (ret) {
53393dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
53493dd5e85SScott Wood                 __func__, strerror(-ret));
53593dd5e85SScott Wood     }
53693dd5e85SScott Wood 
53793dd5e85SScott Wood     g_free(bitmap);
53893dd5e85SScott Wood }
53993dd5e85SScott Wood 
540d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
541d67d40eaSDavid Gibson {
542d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
543d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
544d67d40eaSDavid Gibson     union {
545d67d40eaSDavid Gibson         uint32_t u32;
546d67d40eaSDavid Gibson         uint64_t u64;
547d67d40eaSDavid Gibson     } val;
548d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
549d67d40eaSDavid Gibson         .id = id,
550d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
551d67d40eaSDavid Gibson     };
552d67d40eaSDavid Gibson     int ret;
553d67d40eaSDavid Gibson 
554d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
555d67d40eaSDavid Gibson     if (ret != 0) {
556b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
557d67d40eaSDavid Gibson     } else {
558d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
559d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
560d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
561d67d40eaSDavid Gibson             break;
562d67d40eaSDavid Gibson 
563d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
564d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
565d67d40eaSDavid Gibson             break;
566d67d40eaSDavid Gibson 
567d67d40eaSDavid Gibson         default:
568d67d40eaSDavid Gibson             /* Don't handle this size yet */
569d67d40eaSDavid Gibson             abort();
570d67d40eaSDavid Gibson         }
571d67d40eaSDavid Gibson     }
572d67d40eaSDavid Gibson }
573d67d40eaSDavid Gibson 
574d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
575d67d40eaSDavid Gibson {
576d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
577d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
578d67d40eaSDavid Gibson     union {
579d67d40eaSDavid Gibson         uint32_t u32;
580d67d40eaSDavid Gibson         uint64_t u64;
581d67d40eaSDavid Gibson     } val;
582d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
583d67d40eaSDavid Gibson         .id = id,
584d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
585d67d40eaSDavid Gibson     };
586d67d40eaSDavid Gibson     int ret;
587d67d40eaSDavid Gibson 
588d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
589d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
590d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
591d67d40eaSDavid Gibson         break;
592d67d40eaSDavid Gibson 
593d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
594d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
595d67d40eaSDavid Gibson         break;
596d67d40eaSDavid Gibson 
597d67d40eaSDavid Gibson     default:
598d67d40eaSDavid Gibson         /* Don't handle this size yet */
599d67d40eaSDavid Gibson         abort();
600d67d40eaSDavid Gibson     }
601d67d40eaSDavid Gibson 
602d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
603d67d40eaSDavid Gibson     if (ret != 0) {
604b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
605d67d40eaSDavid Gibson     }
606d67d40eaSDavid Gibson }
607d67d40eaSDavid Gibson 
60870b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
60970b79849SDavid Gibson {
61070b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
61170b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
61270b79849SDavid Gibson     struct kvm_one_reg reg;
61370b79849SDavid Gibson     int i;
61470b79849SDavid Gibson     int ret;
61570b79849SDavid Gibson 
61670b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
61770b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
61870b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
61970b79849SDavid Gibson 
62070b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
62170b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
62270b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
62370b79849SDavid Gibson         if (ret < 0) {
624da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
62570b79849SDavid Gibson             return ret;
62670b79849SDavid Gibson         }
62770b79849SDavid Gibson 
62870b79849SDavid Gibson         for (i = 0; i < 32; i++) {
62970b79849SDavid Gibson             uint64_t vsr[2];
63070b79849SDavid Gibson 
6313a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
63270b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
63370b79849SDavid Gibson             vsr[1] = env->vsr[i];
6343a4b791bSGreg Kurz #else
6353a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
6363a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
6373a4b791bSGreg Kurz #endif
63870b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
63970b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
64070b79849SDavid Gibson 
64170b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
64270b79849SDavid Gibson             if (ret < 0) {
643da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
64470b79849SDavid Gibson                         i, strerror(errno));
64570b79849SDavid Gibson                 return ret;
64670b79849SDavid Gibson             }
64770b79849SDavid Gibson         }
64870b79849SDavid Gibson     }
64970b79849SDavid Gibson 
65070b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
65170b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
65270b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
65370b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
65470b79849SDavid Gibson         if (ret < 0) {
655da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
65670b79849SDavid Gibson             return ret;
65770b79849SDavid Gibson         }
65870b79849SDavid Gibson 
65970b79849SDavid Gibson         for (i = 0; i < 32; i++) {
66070b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
66170b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
66270b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66370b79849SDavid Gibson             if (ret < 0) {
664da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
66570b79849SDavid Gibson                 return ret;
66670b79849SDavid Gibson             }
66770b79849SDavid Gibson         }
66870b79849SDavid Gibson     }
66970b79849SDavid Gibson 
67070b79849SDavid Gibson     return 0;
67170b79849SDavid Gibson }
67270b79849SDavid Gibson 
67370b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
67470b79849SDavid Gibson {
67570b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
67670b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
67770b79849SDavid Gibson     struct kvm_one_reg reg;
67870b79849SDavid Gibson     int i;
67970b79849SDavid Gibson     int ret;
68070b79849SDavid Gibson 
68170b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
68270b79849SDavid Gibson         uint64_t fpscr;
68370b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
68470b79849SDavid Gibson 
68570b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
68670b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
68770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
68870b79849SDavid Gibson         if (ret < 0) {
689da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
69070b79849SDavid Gibson             return ret;
69170b79849SDavid Gibson         } else {
69270b79849SDavid Gibson             env->fpscr = fpscr;
69370b79849SDavid Gibson         }
69470b79849SDavid Gibson 
69570b79849SDavid Gibson         for (i = 0; i < 32; i++) {
69670b79849SDavid Gibson             uint64_t vsr[2];
69770b79849SDavid Gibson 
69870b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
69970b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
70070b79849SDavid Gibson 
70170b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
70270b79849SDavid Gibson             if (ret < 0) {
703da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
70470b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
70570b79849SDavid Gibson                 return ret;
70670b79849SDavid Gibson             } else {
7073a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
70870b79849SDavid Gibson                 env->fpr[i] = vsr[0];
70970b79849SDavid Gibson                 if (vsx) {
71070b79849SDavid Gibson                     env->vsr[i] = vsr[1];
71170b79849SDavid Gibson                 }
7123a4b791bSGreg Kurz #else
7133a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
7143a4b791bSGreg Kurz                 if (vsx) {
7153a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
7163a4b791bSGreg Kurz                 }
7173a4b791bSGreg Kurz #endif
71870b79849SDavid Gibson             }
71970b79849SDavid Gibson         }
72070b79849SDavid Gibson     }
72170b79849SDavid Gibson 
72270b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
72370b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
72470b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
72570b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
72670b79849SDavid Gibson         if (ret < 0) {
727da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
72870b79849SDavid Gibson             return ret;
72970b79849SDavid Gibson         }
73070b79849SDavid Gibson 
73170b79849SDavid Gibson         for (i = 0; i < 32; i++) {
73270b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
73370b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
73470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
73570b79849SDavid Gibson             if (ret < 0) {
736da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
73770b79849SDavid Gibson                         i, strerror(errno));
73870b79849SDavid Gibson                 return ret;
73970b79849SDavid Gibson             }
74070b79849SDavid Gibson         }
74170b79849SDavid Gibson     }
74270b79849SDavid Gibson 
74370b79849SDavid Gibson     return 0;
74470b79849SDavid Gibson }
74570b79849SDavid Gibson 
7469b00ea49SDavid Gibson #if defined(TARGET_PPC64)
7479b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
7489b00ea49SDavid Gibson {
7499b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
7507388efafSDavid Gibson     sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
7519b00ea49SDavid Gibson     struct kvm_one_reg reg;
7529b00ea49SDavid Gibson     int ret;
7539b00ea49SDavid Gibson 
7549b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
7557388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
7569b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7579b00ea49SDavid Gibson     if (ret < 0) {
758da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
7599b00ea49SDavid Gibson         return ret;
7609b00ea49SDavid Gibson     }
7619b00ea49SDavid Gibson 
7627388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
7637388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
7649b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
7657388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
7669b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7679b00ea49SDavid Gibson     if (ret < 0) {
768da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
7699b00ea49SDavid Gibson                 strerror(errno));
7709b00ea49SDavid Gibson         return ret;
7719b00ea49SDavid Gibson     }
7729b00ea49SDavid Gibson 
7737388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
7747388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
7759b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
7767388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
7779b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7789b00ea49SDavid Gibson     if (ret < 0) {
779da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
7809b00ea49SDavid Gibson                 strerror(errno));
7819b00ea49SDavid Gibson         return ret;
7829b00ea49SDavid Gibson     }
7839b00ea49SDavid Gibson 
7849b00ea49SDavid Gibson     return 0;
7859b00ea49SDavid Gibson }
7869b00ea49SDavid Gibson 
7879b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
7889b00ea49SDavid Gibson {
7899b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
7907388efafSDavid Gibson     sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
7919b00ea49SDavid Gibson     struct kvm_one_reg reg;
7929b00ea49SDavid Gibson     int ret;
7939b00ea49SDavid Gibson 
7949b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
7959b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
7969b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
7979b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
7987388efafSDavid Gibson     assert(spapr_cpu->vpa_addr
7997388efafSDavid Gibson            || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
8009b00ea49SDavid Gibson 
8017388efafSDavid Gibson     if (spapr_cpu->vpa_addr) {
8029b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8037388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8049b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8059b00ea49SDavid Gibson         if (ret < 0) {
806da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8079b00ea49SDavid Gibson             return ret;
8089b00ea49SDavid Gibson         }
8099b00ea49SDavid Gibson     }
8109b00ea49SDavid Gibson 
8117388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
8127388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
8139b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8147388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
8159b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8169b00ea49SDavid Gibson     if (ret < 0) {
817da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8189b00ea49SDavid Gibson         return ret;
8199b00ea49SDavid Gibson     }
8209b00ea49SDavid Gibson 
8217388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
8227388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
8239b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8247388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
8259b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8269b00ea49SDavid Gibson     if (ret < 0) {
827da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
8289b00ea49SDavid Gibson                 strerror(errno));
8299b00ea49SDavid Gibson         return ret;
8309b00ea49SDavid Gibson     }
8319b00ea49SDavid Gibson 
8327388efafSDavid Gibson     if (!spapr_cpu->vpa_addr) {
8339b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8347388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8359b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8369b00ea49SDavid Gibson         if (ret < 0) {
837da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8389b00ea49SDavid Gibson             return ret;
8399b00ea49SDavid Gibson         }
8409b00ea49SDavid Gibson     }
8419b00ea49SDavid Gibson 
8429b00ea49SDavid Gibson     return 0;
8439b00ea49SDavid Gibson }
8449b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
8459b00ea49SDavid Gibson 
846e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
847a7a00a72SDavid Gibson {
848a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
849a7a00a72SDavid Gibson     struct kvm_sregs sregs;
850a7a00a72SDavid Gibson     int i;
851a7a00a72SDavid Gibson 
852a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
853a7a00a72SDavid Gibson 
8541ec26c75SGreg Kurz     if (cpu->vhyp) {
8551ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
8561ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
8571ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
8581ec26c75SGreg Kurz     } else {
859a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
8601ec26c75SGreg Kurz     }
861a7a00a72SDavid Gibson 
862a7a00a72SDavid Gibson     /* Sync SLB */
863a7a00a72SDavid Gibson #ifdef TARGET_PPC64
864a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
865a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
866a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
867a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
868a7a00a72SDavid Gibson         }
869a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
870a7a00a72SDavid Gibson     }
871a7a00a72SDavid Gibson #endif
872a7a00a72SDavid Gibson 
873a7a00a72SDavid Gibson     /* Sync SRs */
874a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
875a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
876a7a00a72SDavid Gibson     }
877a7a00a72SDavid Gibson 
878a7a00a72SDavid Gibson     /* Sync BATs */
879a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
880a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
881a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
882a7a00a72SDavid Gibson             | env->DBAT[1][i];
883a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
884a7a00a72SDavid Gibson             | env->IBAT[1][i];
885a7a00a72SDavid Gibson     }
886a7a00a72SDavid Gibson 
887a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
888a7a00a72SDavid Gibson }
889a7a00a72SDavid Gibson 
89020d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
891d76d1650Saurel32 {
89220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
89320d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
894d76d1650Saurel32     struct kvm_regs regs;
895d76d1650Saurel32     int ret;
896d76d1650Saurel32     int i;
897d76d1650Saurel32 
8981bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
8991bc22652SAndreas Färber     if (ret < 0) {
900d76d1650Saurel32         return ret;
9011bc22652SAndreas Färber     }
902d76d1650Saurel32 
903d76d1650Saurel32     regs.ctr = env->ctr;
904d76d1650Saurel32     regs.lr  = env->lr;
905da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
906d76d1650Saurel32     regs.msr = env->msr;
907d76d1650Saurel32     regs.pc = env->nip;
908d76d1650Saurel32 
909d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
910d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
911d76d1650Saurel32 
912d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
913d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
914d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
915d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
916d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
917d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
918d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
919d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
920d76d1650Saurel32 
92190dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
92290dc8812SScott Wood 
923d76d1650Saurel32     for (i = 0;i < 32; i++)
924d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
925d76d1650Saurel32 
9264bddaf55SAlexey Kardashevskiy     regs.cr = 0;
9274bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
9284bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
9294bddaf55SAlexey Kardashevskiy     }
9304bddaf55SAlexey Kardashevskiy 
9311bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
932d76d1650Saurel32     if (ret < 0)
933d76d1650Saurel32         return ret;
934d76d1650Saurel32 
93570b79849SDavid Gibson     kvm_put_fp(cs);
93670b79849SDavid Gibson 
93793dd5e85SScott Wood     if (env->tlb_dirty) {
9381bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
93993dd5e85SScott Wood         env->tlb_dirty = false;
94093dd5e85SScott Wood     }
94193dd5e85SScott Wood 
942f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
943a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
944a7a00a72SDavid Gibson         if (ret < 0) {
945f1af19d7SDavid Gibson             return ret;
946f1af19d7SDavid Gibson         }
947f1af19d7SDavid Gibson     }
948f1af19d7SDavid Gibson 
949f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
950d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
951d67d40eaSDavid Gibson     }
952f1af19d7SDavid Gibson 
953d67d40eaSDavid Gibson     if (cap_one_reg) {
954d67d40eaSDavid Gibson         int i;
955d67d40eaSDavid Gibson 
956d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
957d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
958d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
959d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
960d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
961d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
962d67d40eaSDavid Gibson 
963d67d40eaSDavid Gibson             if (id != 0) {
964d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
965d67d40eaSDavid Gibson             }
966f1af19d7SDavid Gibson         }
9679b00ea49SDavid Gibson 
9689b00ea49SDavid Gibson #ifdef TARGET_PPC64
96980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
97080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
97180b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
97280b3f79bSAlexey Kardashevskiy             }
97380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
97480b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
97580b3f79bSAlexey Kardashevskiy             }
97680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
97780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
97880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
97980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
98080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
98180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
98280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
98380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
98480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
98580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
98680b3f79bSAlexey Kardashevskiy         }
98780b3f79bSAlexey Kardashevskiy 
9889b00ea49SDavid Gibson         if (cap_papr) {
9899b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
990da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
9919b00ea49SDavid Gibson             }
9929b00ea49SDavid Gibson         }
99398a8b524SAlexey Kardashevskiy 
99498a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
9959b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
996f1af19d7SDavid Gibson     }
997f1af19d7SDavid Gibson 
998d76d1650Saurel32     return ret;
999d76d1650Saurel32 }
1000d76d1650Saurel32 
1001c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1002c371c2e3SBharat Bhushan {
1003c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1004c371c2e3SBharat Bhushan }
1005c371c2e3SBharat Bhushan 
1006a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1007d76d1650Saurel32 {
100820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1009ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1010a7a00a72SDavid Gibson     int ret;
1011d76d1650Saurel32 
1012a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
101390dc8812SScott Wood     if (ret < 0) {
101490dc8812SScott Wood         return ret;
101590dc8812SScott Wood     }
101690dc8812SScott Wood 
101790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
101890dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
101990dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
102090dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
102190dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
102290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
102390dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
102490dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
102590dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
102690dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
102790dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
102890dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
102990dc8812SScott Wood     }
103090dc8812SScott Wood 
103190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
103290dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
103390dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
103490dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
103590dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
103690dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
103790dc8812SScott Wood     }
103890dc8812SScott Wood 
103990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
104090dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
104190dc8812SScott Wood     }
104290dc8812SScott Wood 
104390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
104490dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
104590dc8812SScott Wood     }
104690dc8812SScott Wood 
104790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
104890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1049c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
105090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1051c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
105290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1053c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
105490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1055c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
105690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1057c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
105890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1059c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
106090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1061c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
106290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1063c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
106490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1065c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
106690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1067c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
106890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1069c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
107090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1071c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
107290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1073c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
107490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1075c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
107690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1077c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
107890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1079c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
108090dc8812SScott Wood 
108190dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
108290dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1083c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
108490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1085c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
108690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1087c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
108890dc8812SScott Wood         }
108990dc8812SScott Wood 
109090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
109190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1092c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
109390dc8812SScott Wood         }
109490dc8812SScott Wood 
109590dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
109690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1097c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
109890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1099c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
110090dc8812SScott Wood         }
110190dc8812SScott Wood     }
110290dc8812SScott Wood 
110390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
110490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
110590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
110690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
110790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
110890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
110990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
111090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
111190dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
111290dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
111390dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
111490dc8812SScott Wood     }
111590dc8812SScott Wood 
111690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
111790dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
111890dc8812SScott Wood     }
111990dc8812SScott Wood 
112090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
112190dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
112290dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
112390dc8812SScott Wood     }
112490dc8812SScott Wood 
112590dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
112690dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
112790dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
112890dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
112990dc8812SScott Wood 
113090dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
113190dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
113290dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
113390dc8812SScott Wood         }
113490dc8812SScott Wood     }
1135a7a00a72SDavid Gibson 
1136a7a00a72SDavid Gibson     return 0;
1137fafc0b6aSAlexander Graf }
113890dc8812SScott Wood 
1139a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1140a7a00a72SDavid Gibson {
1141a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1142a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1143a7a00a72SDavid Gibson     int ret;
1144a7a00a72SDavid Gibson     int i;
1145a7a00a72SDavid Gibson 
1146a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
114790dc8812SScott Wood     if (ret < 0) {
114890dc8812SScott Wood         return ret;
114990dc8812SScott Wood     }
115090dc8812SScott Wood 
1151e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1152bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1153f3c75d42SAneesh Kumar K.V     }
1154ba5e5090SAlexander Graf 
1155ba5e5090SAlexander Graf     /* Sync SLB */
115682c09f2fSAlexander Graf #ifdef TARGET_PPC64
11574b4d4a21SAneesh Kumar K.V     /*
11584b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1159a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1160a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1161a7a00a72SDavid Gibson      * in.
11624b4d4a21SAneesh Kumar K.V      */
11634b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1164d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
11654b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
11664b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
11674b4d4a21SAneesh Kumar K.V         /*
11684b4d4a21SAneesh Kumar K.V          * Only restore valid entries
11694b4d4a21SAneesh Kumar K.V          */
11704b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1171bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
11724b4d4a21SAneesh Kumar K.V         }
1173ba5e5090SAlexander Graf     }
117482c09f2fSAlexander Graf #endif
1175ba5e5090SAlexander Graf 
1176ba5e5090SAlexander Graf     /* Sync SRs */
1177ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1178ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1179ba5e5090SAlexander Graf     }
1180ba5e5090SAlexander Graf 
1181ba5e5090SAlexander Graf     /* Sync BATs */
1182ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1183ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1184ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1185ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1186ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1187ba5e5090SAlexander Graf     }
1188a7a00a72SDavid Gibson 
1189a7a00a72SDavid Gibson     return 0;
1190a7a00a72SDavid Gibson }
1191a7a00a72SDavid Gibson 
1192a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1193a7a00a72SDavid Gibson {
1194a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1195a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1196a7a00a72SDavid Gibson     struct kvm_regs regs;
1197a7a00a72SDavid Gibson     uint32_t cr;
1198a7a00a72SDavid Gibson     int i, ret;
1199a7a00a72SDavid Gibson 
1200a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1201a7a00a72SDavid Gibson     if (ret < 0)
1202a7a00a72SDavid Gibson         return ret;
1203a7a00a72SDavid Gibson 
1204a7a00a72SDavid Gibson     cr = regs.cr;
1205a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1206a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1207a7a00a72SDavid Gibson         cr >>= 4;
1208a7a00a72SDavid Gibson     }
1209a7a00a72SDavid Gibson 
1210a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1211a7a00a72SDavid Gibson     env->lr = regs.lr;
1212a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1213a7a00a72SDavid Gibson     env->msr = regs.msr;
1214a7a00a72SDavid Gibson     env->nip = regs.pc;
1215a7a00a72SDavid Gibson 
1216a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1217a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1218a7a00a72SDavid Gibson 
1219a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1220a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1221a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1222a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1223a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1224a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1225a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1226a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1227a7a00a72SDavid Gibson 
1228a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1229a7a00a72SDavid Gibson 
1230a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1231a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1232a7a00a72SDavid Gibson 
1233a7a00a72SDavid Gibson     kvm_get_fp(cs);
1234a7a00a72SDavid Gibson 
1235a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1236a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1237a7a00a72SDavid Gibson         if (ret < 0) {
1238a7a00a72SDavid Gibson             return ret;
1239a7a00a72SDavid Gibson         }
1240a7a00a72SDavid Gibson     }
1241a7a00a72SDavid Gibson 
1242a7a00a72SDavid Gibson     if (cap_segstate) {
1243a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1244a7a00a72SDavid Gibson         if (ret < 0) {
1245a7a00a72SDavid Gibson             return ret;
1246a7a00a72SDavid Gibson         }
1247fafc0b6aSAlexander Graf     }
1248ba5e5090SAlexander Graf 
1249d67d40eaSDavid Gibson     if (cap_hior) {
1250d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1251d67d40eaSDavid Gibson     }
1252d67d40eaSDavid Gibson 
1253d67d40eaSDavid Gibson     if (cap_one_reg) {
1254d67d40eaSDavid Gibson         int i;
1255d67d40eaSDavid Gibson 
1256d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1257d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1258d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1259d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1260d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1261d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1262d67d40eaSDavid Gibson 
1263d67d40eaSDavid Gibson             if (id != 0) {
1264d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1265d67d40eaSDavid Gibson             }
1266d67d40eaSDavid Gibson         }
12679b00ea49SDavid Gibson 
12689b00ea49SDavid Gibson #ifdef TARGET_PPC64
126980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
127080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
127180b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
127280b3f79bSAlexey Kardashevskiy             }
127380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
127480b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
127580b3f79bSAlexey Kardashevskiy             }
127680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
127780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
127880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
127980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
128080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
128180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
128280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
128380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
128480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
128580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
128680b3f79bSAlexey Kardashevskiy         }
128780b3f79bSAlexey Kardashevskiy 
12889b00ea49SDavid Gibson         if (cap_papr) {
12899b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1290da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
12919b00ea49SDavid Gibson             }
12929b00ea49SDavid Gibson         }
129398a8b524SAlexey Kardashevskiy 
129498a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
12959b00ea49SDavid Gibson #endif
1296d67d40eaSDavid Gibson     }
1297d67d40eaSDavid Gibson 
1298d76d1650Saurel32     return 0;
1299d76d1650Saurel32 }
1300d76d1650Saurel32 
13011bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1302fc87e185SAlexander Graf {
1303fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1304fc87e185SAlexander Graf 
1305fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1306fc87e185SAlexander Graf         return 0;
1307fc87e185SAlexander Graf     }
1308fc87e185SAlexander Graf 
1309fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1310fc87e185SAlexander Graf         return 0;
1311fc87e185SAlexander Graf     }
1312fc87e185SAlexander Graf 
13131bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1314fc87e185SAlexander Graf 
1315fc87e185SAlexander Graf     return 0;
1316fc87e185SAlexander Graf }
1317fc87e185SAlexander Graf 
1318a69dc537SThomas Huth #if defined(TARGET_PPC64)
131916415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
132016415335SAlexander Graf #else
132116415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
132216415335SAlexander Graf #endif
132316415335SAlexander Graf 
132420d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1325d76d1650Saurel32 {
132620d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
132720d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1328d76d1650Saurel32     int r;
1329d76d1650Saurel32     unsigned irq;
1330d76d1650Saurel32 
13314b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
13324b8523eeSJan Kiszka 
13335cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1334d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1335fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1336fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1337259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
133816415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1339d76d1650Saurel32     {
1340d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1341d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1342d76d1650Saurel32          * when reading the UIC.
1343d76d1650Saurel32          */
1344fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1345d76d1650Saurel32 
1346da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
13471bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
134855e5c285SAndreas Färber         if (r < 0) {
134955e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
135055e5c285SAndreas Färber         }
1351c821c2bdSAlexander Graf 
1352c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1353bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
135473bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1355d76d1650Saurel32     }
1356d76d1650Saurel32 
1357d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1358d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1359d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
13604b8523eeSJan Kiszka 
13614b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1362d76d1650Saurel32 }
1363d76d1650Saurel32 
13644c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1365d76d1650Saurel32 {
13664c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1367d76d1650Saurel32 }
1368d76d1650Saurel32 
136920d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
13700af691d7SMarcelo Tosatti {
1371259186a7SAndreas Färber     return cs->halted;
13720af691d7SMarcelo Tosatti }
13730af691d7SMarcelo Tosatti 
1374259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1375d76d1650Saurel32 {
1376259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1377259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1378259186a7SAndreas Färber 
1379259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1380259186a7SAndreas Färber         cs->halted = 1;
138127103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1382d76d1650Saurel32     }
1383d76d1650Saurel32 
1384bb4ea393SJan Kiszka     return 0;
1385d76d1650Saurel32 }
1386d76d1650Saurel32 
1387d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
13881328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1389d76d1650Saurel32 {
1390d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1391d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1392d76d1650Saurel32 
1393bb4ea393SJan Kiszka     return 0;
1394d76d1650Saurel32 }
1395d76d1650Saurel32 
13961328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1397d76d1650Saurel32 {
1398d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1399d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1400d76d1650Saurel32 
1401bb4ea393SJan Kiszka     return 0;
1402d76d1650Saurel32 }
1403d76d1650Saurel32 
14048a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14058a0548f9SBharat Bhushan {
14068a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14078a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14088a0548f9SBharat Bhushan 
14098a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14108a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14118a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14128a0548f9SBharat Bhushan         return -EINVAL;
14138a0548f9SBharat Bhushan     }
14148a0548f9SBharat Bhushan 
14158a0548f9SBharat Bhushan     return 0;
14168a0548f9SBharat Bhushan }
14178a0548f9SBharat Bhushan 
14188a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14198a0548f9SBharat Bhushan {
14208a0548f9SBharat Bhushan     uint32_t sc;
14218a0548f9SBharat Bhushan 
14228a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14238a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14248a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14258a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14268a0548f9SBharat Bhushan         return -EINVAL;
14278a0548f9SBharat Bhushan     }
14288a0548f9SBharat Bhushan 
14298a0548f9SBharat Bhushan     return 0;
14308a0548f9SBharat Bhushan }
14318a0548f9SBharat Bhushan 
143288365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
143388365d17SBharat Bhushan {
143488365d17SBharat Bhushan     int n;
143588365d17SBharat Bhushan 
143688365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
143788365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
143888365d17SBharat Bhushan 
143988365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
144088365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
144188365d17SBharat Bhushan              hw_debug_points[n].type == type) {
144288365d17SBharat Bhushan             return n;
144388365d17SBharat Bhushan         }
144488365d17SBharat Bhushan     }
144588365d17SBharat Bhushan 
144688365d17SBharat Bhushan     return -1;
144788365d17SBharat Bhushan }
144888365d17SBharat Bhushan 
144988365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
145088365d17SBharat Bhushan {
145188365d17SBharat Bhushan     int n;
145288365d17SBharat Bhushan 
145388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
145488365d17SBharat Bhushan     if (n >= 0) {
145588365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
145688365d17SBharat Bhushan         return n;
145788365d17SBharat Bhushan     }
145888365d17SBharat Bhushan 
145988365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
146088365d17SBharat Bhushan     if (n >= 0) {
146188365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
146288365d17SBharat Bhushan         return n;
146388365d17SBharat Bhushan     }
146488365d17SBharat Bhushan 
146588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
146688365d17SBharat Bhushan     if (n >= 0) {
146788365d17SBharat Bhushan         *flag = BP_MEM_READ;
146888365d17SBharat Bhushan         return n;
146988365d17SBharat Bhushan     }
147088365d17SBharat Bhushan 
147188365d17SBharat Bhushan     return -1;
147288365d17SBharat Bhushan }
147388365d17SBharat Bhushan 
147488365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
147588365d17SBharat Bhushan                                   target_ulong len, int type)
147688365d17SBharat Bhushan {
147788365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
147888365d17SBharat Bhushan         return -ENOBUFS;
147988365d17SBharat Bhushan     }
148088365d17SBharat Bhushan 
148188365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
148288365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
148388365d17SBharat Bhushan 
148488365d17SBharat Bhushan     switch (type) {
148588365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
148688365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
148788365d17SBharat Bhushan             return -ENOBUFS;
148888365d17SBharat Bhushan         }
148988365d17SBharat Bhushan 
149088365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
149188365d17SBharat Bhushan             return -EEXIST;
149288365d17SBharat Bhushan         }
149388365d17SBharat Bhushan 
149488365d17SBharat Bhushan         nb_hw_breakpoint++;
149588365d17SBharat Bhushan         break;
149688365d17SBharat Bhushan 
149788365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
149888365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
149988365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
150088365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
150188365d17SBharat Bhushan             return -ENOBUFS;
150288365d17SBharat Bhushan         }
150388365d17SBharat Bhushan 
150488365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
150588365d17SBharat Bhushan             return -EEXIST;
150688365d17SBharat Bhushan         }
150788365d17SBharat Bhushan 
150888365d17SBharat Bhushan         nb_hw_watchpoint++;
150988365d17SBharat Bhushan         break;
151088365d17SBharat Bhushan 
151188365d17SBharat Bhushan     default:
151288365d17SBharat Bhushan         return -ENOSYS;
151388365d17SBharat Bhushan     }
151488365d17SBharat Bhushan 
151588365d17SBharat Bhushan     return 0;
151688365d17SBharat Bhushan }
151788365d17SBharat Bhushan 
151888365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
151988365d17SBharat Bhushan                                   target_ulong len, int type)
152088365d17SBharat Bhushan {
152188365d17SBharat Bhushan     int n;
152288365d17SBharat Bhushan 
152388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
152488365d17SBharat Bhushan     if (n < 0) {
152588365d17SBharat Bhushan         return -ENOENT;
152688365d17SBharat Bhushan     }
152788365d17SBharat Bhushan 
152888365d17SBharat Bhushan     switch (type) {
152988365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
153088365d17SBharat Bhushan         nb_hw_breakpoint--;
153188365d17SBharat Bhushan         break;
153288365d17SBharat Bhushan 
153388365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
153488365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
153588365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
153688365d17SBharat Bhushan         nb_hw_watchpoint--;
153788365d17SBharat Bhushan         break;
153888365d17SBharat Bhushan 
153988365d17SBharat Bhushan     default:
154088365d17SBharat Bhushan         return -ENOSYS;
154188365d17SBharat Bhushan     }
154288365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
154388365d17SBharat Bhushan 
154488365d17SBharat Bhushan     return 0;
154588365d17SBharat Bhushan }
154688365d17SBharat Bhushan 
154788365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
154888365d17SBharat Bhushan {
154988365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
155088365d17SBharat Bhushan }
155188365d17SBharat Bhushan 
15528a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
15538a0548f9SBharat Bhushan {
155488365d17SBharat Bhushan     int n;
155588365d17SBharat Bhushan 
15568a0548f9SBharat Bhushan     /* Software Breakpoint updates */
15578a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
15588a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
15598a0548f9SBharat Bhushan     }
156088365d17SBharat Bhushan 
156188365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
156288365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
156388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
156488365d17SBharat Bhushan 
156588365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
156688365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
156788365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
156888365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
156988365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
157088365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
157188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
157288365d17SBharat Bhushan                 break;
157388365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
157488365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
157588365d17SBharat Bhushan                 break;
157688365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
157788365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
157888365d17SBharat Bhushan                 break;
157988365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
158088365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
158188365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
158288365d17SBharat Bhushan                 break;
158388365d17SBharat Bhushan             default:
158488365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
158588365d17SBharat Bhushan             }
158688365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
158788365d17SBharat Bhushan         }
158888365d17SBharat Bhushan     }
15898a0548f9SBharat Bhushan }
15908a0548f9SBharat Bhushan 
15918a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
15928a0548f9SBharat Bhushan {
15938a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
15948a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
15958a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
15968a0548f9SBharat Bhushan     int handle = 0;
159788365d17SBharat Bhushan     int n;
159888365d17SBharat Bhushan     int flag = 0;
15998a0548f9SBharat Bhushan 
160088365d17SBharat Bhushan     if (cs->singlestep_enabled) {
160188365d17SBharat Bhushan         handle = 1;
160288365d17SBharat Bhushan     } else if (arch_info->status) {
160388365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
160488365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
160588365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
160688365d17SBharat Bhushan                 if (n >= 0) {
160788365d17SBharat Bhushan                     handle = 1;
160888365d17SBharat Bhushan                 }
160988365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
161088365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
161188365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
161288365d17SBharat Bhushan                 if (n >= 0) {
161388365d17SBharat Bhushan                     handle = 1;
161488365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
161588365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
161688365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
161788365d17SBharat Bhushan                 }
161888365d17SBharat Bhushan             }
161988365d17SBharat Bhushan         }
162088365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
16218a0548f9SBharat Bhushan         handle = 1;
16228a0548f9SBharat Bhushan     } else {
16238a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
16248a0548f9SBharat Bhushan          * program exception to guest;
16258a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
162688365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
162788365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
162888365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
162988365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
163088365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
163188365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
163288365d17SBharat Bhushan          * So the result is that we cannot share debug resources
163388365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
163488365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
163588365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
163688365d17SBharat Bhushan          * cannot use them;
16378a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
16388a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
16398a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
16408a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
16418a0548f9SBharat Bhushan          * injecting a program interrupt.
16428a0548f9SBharat Bhushan          */
16438a0548f9SBharat Bhushan 
16448a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
16458a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
16468a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
16478a0548f9SBharat Bhushan          */
16488a0548f9SBharat Bhushan         env->nip += 4;
16498a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
16508a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
16518a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
16528a0548f9SBharat Bhushan     }
16538a0548f9SBharat Bhushan 
16548a0548f9SBharat Bhushan     return handle;
16558a0548f9SBharat Bhushan }
16568a0548f9SBharat Bhushan 
165720d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1658d76d1650Saurel32 {
165920d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
166020d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1661bb4ea393SJan Kiszka     int ret;
1662d76d1650Saurel32 
16634b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
16644b8523eeSJan Kiszka 
1665d76d1650Saurel32     switch (run->exit_reason) {
1666d76d1650Saurel32     case KVM_EXIT_DCR:
1667d76d1650Saurel32         if (run->dcr.is_write) {
1668da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1669d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1670d76d1650Saurel32         } else {
1671da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1672d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1673d76d1650Saurel32         }
1674d76d1650Saurel32         break;
1675d76d1650Saurel32     case KVM_EXIT_HLT:
1676da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1677259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1678d76d1650Saurel32         break;
1679c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1680f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1681da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
168220d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1683aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1684f61b4bedSAlexander Graf                                               run->papr_hcall.args);
168578e8fde2SDavid Gibson         ret = 0;
1686f61b4bedSAlexander Graf         break;
1687f61b4bedSAlexander Graf #endif
16885b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1689da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1690933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
16915b95b8b9SAlexander Graf         ret = 0;
16925b95b8b9SAlexander Graf         break;
169331f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1694da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
169531f2cb8fSBharat Bhushan         watchdog_perform_action();
169631f2cb8fSBharat Bhushan         ret = 0;
169731f2cb8fSBharat Bhushan         break;
169831f2cb8fSBharat Bhushan 
16998a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17008a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17018a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17028a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17038a0548f9SBharat Bhushan             break;
17048a0548f9SBharat Bhushan         }
17058a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17068a0548f9SBharat Bhushan         ret = 0;
17078a0548f9SBharat Bhushan         break;
17088a0548f9SBharat Bhushan 
170973aaec4aSJan Kiszka     default:
171073aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
171173aaec4aSJan Kiszka         ret = -1;
171273aaec4aSJan Kiszka         break;
1713d76d1650Saurel32     }
1714d76d1650Saurel32 
17154b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1716d76d1650Saurel32     return ret;
1717d76d1650Saurel32 }
1718d76d1650Saurel32 
171931f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
172031f2cb8fSBharat Bhushan {
172131f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
172231f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
172331f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
172431f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
172531f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
172631f2cb8fSBharat Bhushan     };
172731f2cb8fSBharat Bhushan 
172831f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
172931f2cb8fSBharat Bhushan }
173031f2cb8fSBharat Bhushan 
173131f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
173231f2cb8fSBharat Bhushan {
173331f2cb8fSBharat Bhushan 
173431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
173531f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
173631f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
173731f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
173831f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
173931f2cb8fSBharat Bhushan     };
174031f2cb8fSBharat Bhushan 
174131f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
174231f2cb8fSBharat Bhushan }
174331f2cb8fSBharat Bhushan 
174431f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
174531f2cb8fSBharat Bhushan {
174631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
174731f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
174831f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
174931f2cb8fSBharat Bhushan 
175031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
175131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
175231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
175331f2cb8fSBharat Bhushan     };
175431f2cb8fSBharat Bhushan 
175531f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
175631f2cb8fSBharat Bhushan }
175731f2cb8fSBharat Bhushan 
175831f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
175931f2cb8fSBharat Bhushan {
176031f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
176131f2cb8fSBharat Bhushan     int ret;
176231f2cb8fSBharat Bhushan 
176331f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
176431f2cb8fSBharat Bhushan         return -1;
176531f2cb8fSBharat Bhushan     }
176631f2cb8fSBharat Bhushan 
176731f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
176831f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
176931f2cb8fSBharat Bhushan         return -1;
177031f2cb8fSBharat Bhushan     }
177131f2cb8fSBharat Bhushan 
177248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
177331f2cb8fSBharat Bhushan     if (ret < 0) {
177431f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
177531f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
177631f2cb8fSBharat Bhushan         return ret;
177731f2cb8fSBharat Bhushan     }
177831f2cb8fSBharat Bhushan 
177931f2cb8fSBharat Bhushan     return ret;
178031f2cb8fSBharat Bhushan }
178131f2cb8fSBharat Bhushan 
1782dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1783dc333cd6SAlexander Graf {
1784dc333cd6SAlexander Graf     FILE *f;
1785dc333cd6SAlexander Graf     int ret = -1;
1786dc333cd6SAlexander Graf     int field_len = strlen(field);
1787dc333cd6SAlexander Graf     char line[512];
1788dc333cd6SAlexander Graf 
1789dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1790dc333cd6SAlexander Graf     if (!f) {
1791dc333cd6SAlexander Graf         return -1;
1792dc333cd6SAlexander Graf     }
1793dc333cd6SAlexander Graf 
1794dc333cd6SAlexander Graf     do {
1795dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1796dc333cd6SAlexander Graf             break;
1797dc333cd6SAlexander Graf         }
1798dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1799ae215068SJim Meyering             pstrcpy(value, len, line);
1800dc333cd6SAlexander Graf             ret = 0;
1801dc333cd6SAlexander Graf             break;
1802dc333cd6SAlexander Graf         }
1803dc333cd6SAlexander Graf     } while(*line);
1804dc333cd6SAlexander Graf 
1805dc333cd6SAlexander Graf     fclose(f);
1806dc333cd6SAlexander Graf 
1807dc333cd6SAlexander Graf     return ret;
1808dc333cd6SAlexander Graf }
1809dc333cd6SAlexander Graf 
1810dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1811dc333cd6SAlexander Graf {
1812dc333cd6SAlexander Graf     char line[512];
1813dc333cd6SAlexander Graf     char *ns;
181473bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1815dc333cd6SAlexander Graf 
1816dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1817dc333cd6SAlexander Graf         return retval;
1818dc333cd6SAlexander Graf     }
1819dc333cd6SAlexander Graf 
1820dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1821dc333cd6SAlexander Graf         return retval;
1822dc333cd6SAlexander Graf     }
1823dc333cd6SAlexander Graf 
1824dc333cd6SAlexander Graf     ns++;
1825dc333cd6SAlexander Graf 
1826f9b8e7f6SShraddha Barke     return atoi(ns);
1827ef951443SNikunj A Dadhania }
1828ef951443SNikunj A Dadhania 
1829ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1830ef951443SNikunj A Dadhania {
1831ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1832ef951443SNikunj A Dadhania                                NULL);
1833ef951443SNikunj A Dadhania }
1834ef951443SNikunj A Dadhania 
1835ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1836ef951443SNikunj A Dadhania {
1837ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1838dc333cd6SAlexander Graf }
18394513d923SGleb Natapov 
1840eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1841eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1842eadaada1SAlexander Graf {
1843eadaada1SAlexander Graf     struct dirent *dirp;
1844eadaada1SAlexander Graf     DIR *dp;
1845eadaada1SAlexander Graf 
1846eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1847eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1848eadaada1SAlexander Graf         return -1;
1849eadaada1SAlexander Graf     }
1850eadaada1SAlexander Graf 
1851eadaada1SAlexander Graf     buf[0] = '\0';
1852eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1853eadaada1SAlexander Graf         FILE *f;
1854eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1855eadaada1SAlexander Graf                  dirp->d_name);
1856eadaada1SAlexander Graf         f = fopen(buf, "r");
1857eadaada1SAlexander Graf         if (f) {
1858eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1859eadaada1SAlexander Graf             fclose(f);
1860eadaada1SAlexander Graf             break;
1861eadaada1SAlexander Graf         }
1862eadaada1SAlexander Graf         buf[0] = '\0';
1863eadaada1SAlexander Graf     }
1864eadaada1SAlexander Graf     closedir(dp);
1865eadaada1SAlexander Graf     if (buf[0] == '\0') {
1866eadaada1SAlexander Graf         printf("Unknown host!\n");
1867eadaada1SAlexander Graf         return -1;
1868eadaada1SAlexander Graf     }
1869eadaada1SAlexander Graf 
1870eadaada1SAlexander Graf     return 0;
1871eadaada1SAlexander Graf }
1872eadaada1SAlexander Graf 
18737d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1874eadaada1SAlexander Graf {
18759bc884b7SDavid Gibson     union {
18769bc884b7SDavid Gibson         uint32_t v32;
18779bc884b7SDavid Gibson         uint64_t v64;
18789bc884b7SDavid Gibson     } u;
1879eadaada1SAlexander Graf     FILE *f;
1880eadaada1SAlexander Graf     int len;
1881eadaada1SAlexander Graf 
18827d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1883eadaada1SAlexander Graf     if (!f) {
1884eadaada1SAlexander Graf         return -1;
1885eadaada1SAlexander Graf     }
1886eadaada1SAlexander Graf 
18879bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1888eadaada1SAlexander Graf     fclose(f);
1889eadaada1SAlexander Graf     switch (len) {
18909bc884b7SDavid Gibson     case 4:
18919bc884b7SDavid Gibson         /* property is a 32-bit quantity */
18929bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
18939bc884b7SDavid Gibson     case 8:
18949bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1895eadaada1SAlexander Graf     }
1896eadaada1SAlexander Graf 
1897eadaada1SAlexander Graf     return 0;
1898eadaada1SAlexander Graf }
1899eadaada1SAlexander Graf 
19007d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19017d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19027d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19037d94a30bSSukadev Bhattiprolu  * format) */
19047d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19057d94a30bSSukadev Bhattiprolu {
19067d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19077d94a30bSSukadev Bhattiprolu     uint64_t val;
19087d94a30bSSukadev Bhattiprolu 
19097d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19107d94a30bSSukadev Bhattiprolu         return -1;
19117d94a30bSSukadev Bhattiprolu     }
19127d94a30bSSukadev Bhattiprolu 
19137d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19147d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19157d94a30bSSukadev Bhattiprolu     g_free(tmp);
19167d94a30bSSukadev Bhattiprolu 
19177d94a30bSSukadev Bhattiprolu     return val;
19187d94a30bSSukadev Bhattiprolu }
19197d94a30bSSukadev Bhattiprolu 
19209bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19219bc884b7SDavid Gibson {
19229bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19239bc884b7SDavid Gibson }
19249bc884b7SDavid Gibson 
19251a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
192645024f09SAlexander Graf  {
1927a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
1928a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
192945024f09SAlexander Graf 
19306fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
19311a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
19321a61a9aeSStuart Yoder         return 0;
19331a61a9aeSStuart Yoder     }
193445024f09SAlexander Graf 
19351a61a9aeSStuart Yoder     return 1;
19361a61a9aeSStuart Yoder }
19371a61a9aeSStuart Yoder 
19381a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
19391a61a9aeSStuart Yoder {
19401a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19411a61a9aeSStuart Yoder 
19421a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
19431a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
19441a61a9aeSStuart Yoder         return 1;
19451a61a9aeSStuart Yoder     }
19461a61a9aeSStuart Yoder 
19471a61a9aeSStuart Yoder     return 0;
19481a61a9aeSStuart Yoder }
19491a61a9aeSStuart Yoder 
19501a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
19511a61a9aeSStuart Yoder {
19521a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
19531a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19541a61a9aeSStuart Yoder 
19551a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
19561a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
195745024f09SAlexander Graf         return 0;
195845024f09SAlexander Graf     }
195945024f09SAlexander Graf 
196045024f09SAlexander Graf     /*
1961d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
196245024f09SAlexander Graf      *
1963d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
196445024f09SAlexander Graf      *     li r3, -1
1965d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
1966d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
196745024f09SAlexander Graf      */
196845024f09SAlexander Graf 
1969d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
1970d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
1971d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
1972d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
197345024f09SAlexander Graf 
19740ddbd053SAlexey Kardashevskiy     return 1;
197545024f09SAlexander Graf }
197645024f09SAlexander Graf 
1977026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
1978026bfd89SDavid Gibson {
1979026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
1980026bfd89SDavid Gibson }
1981026bfd89SDavid Gibson 
1982026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
1983026bfd89SDavid Gibson {
1984026bfd89SDavid Gibson     /*
1985026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
1986026bfd89SDavid Gibson      * we're using a device which requires the in kernel
1987026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
1988026bfd89SDavid Gibson      * produce a warning.
1989026bfd89SDavid Gibson      */
1990026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
1991026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
1992026bfd89SDavid Gibson }
1993026bfd89SDavid Gibson 
1994ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
1995ef9971ddSAlexey Kardashevskiy {
1996ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
1997ef9971ddSAlexey Kardashevskiy }
1998ef9971ddSAlexey Kardashevskiy 
19995145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20005145ad4fSNathan Whitehorn {
20015145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20025145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20035145ad4fSNathan Whitehorn }
20045145ad4fSNathan Whitehorn 
20051bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2006f61b4bedSAlexander Graf {
20071bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2008f61b4bedSAlexander Graf     int ret;
2009f61b4bedSAlexander Graf 
2010da20aed1SDavid Gibson     if (!kvm_enabled()) {
2011da20aed1SDavid Gibson         return;
2012da20aed1SDavid Gibson     }
2013da20aed1SDavid Gibson 
201448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2015f61b4bedSAlexander Graf     if (ret) {
2016072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2017072ed5f2SThomas Huth         exit(1);
2018f61b4bedSAlexander Graf     }
20199b00ea49SDavid Gibson 
20209b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
20219b00ea49SDavid Gibson      * with kvm */
20229b00ea49SDavid Gibson     cap_papr = 1;
2023f1af19d7SDavid Gibson }
2024f61b4bedSAlexander Graf 
2025d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
20266db5bb0fSAlexey Kardashevskiy {
2027d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
20286db5bb0fSAlexey Kardashevskiy }
20296db5bb0fSAlexey Kardashevskiy 
20305b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
20315b95b8b9SAlexander Graf {
20325b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
20335b95b8b9SAlexander Graf     int ret;
20345b95b8b9SAlexander Graf 
203548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
20365b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2037072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2038072ed5f2SThomas Huth         exit(1);
20395b95b8b9SAlexander Graf     }
20405b95b8b9SAlexander Graf }
20415b95b8b9SAlexander Graf 
2042e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2043e97c3636SDavid Gibson {
2044e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2045e97c3636SDavid Gibson }
2046e97c3636SDavid Gibson 
2047fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2048fa98fbfcSSam Bobroff {
2049fa98fbfcSSam Bobroff     int ret;
2050fa98fbfcSSam Bobroff 
2051fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2052fa98fbfcSSam Bobroff     if (!ret) {
2053fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2054fa98fbfcSSam Bobroff     }
2055fa98fbfcSSam Bobroff     return ret;
2056fa98fbfcSSam Bobroff }
2057fa98fbfcSSam Bobroff 
2058fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2059fa98fbfcSSam Bobroff {
2060fa98fbfcSSam Bobroff     int i;
2061fa98fbfcSSam Bobroff     GString *g;
2062fa98fbfcSSam Bobroff     char *s;
2063fa98fbfcSSam Bobroff 
2064fa98fbfcSSam Bobroff     assert(kvm_enabled());
2065fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2066fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2067fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2068fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2069fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2070fa98fbfcSSam Bobroff             }
2071fa98fbfcSSam Bobroff         }
2072fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2073fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2074fa98fbfcSSam Bobroff         g_free(s);
2075fa98fbfcSSam Bobroff     } else {
2076fa98fbfcSSam Bobroff         error_append_hint(errp,
2077fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2078fa98fbfcSSam Bobroff     }
2079fa98fbfcSSam Bobroff }
2080fa98fbfcSSam Bobroff 
2081fa98fbfcSSam Bobroff 
20827f763a5dSDavid Gibson #ifdef TARGET_PPC64
20837f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
20847f763a5dSDavid Gibson {
2085f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2086f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2087f36951c1SDavid Gibson     int i;
2088f36951c1SDavid Gibson 
2089f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2090f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2091ab256960SGreg Kurz     kvm_get_smmu_info(&info, &error_fatal);
20929c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2093f36951c1SDavid Gibson     best_page_shift = 0;
2094f36951c1SDavid Gibson 
2095f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2096f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2097f36951c1SDavid Gibson 
2098f36951c1SDavid Gibson         if (!sps->page_shift) {
2099f36951c1SDavid Gibson             continue;
2100f36951c1SDavid Gibson         }
2101f36951c1SDavid Gibson 
2102f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2103f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2104f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2105f36951c1SDavid Gibson         }
2106f36951c1SDavid Gibson     }
2107f36951c1SDavid Gibson 
21087f763a5dSDavid Gibson     return MIN(current_size,
2109f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21107f763a5dSDavid Gibson }
21117f763a5dSDavid Gibson #endif
21127f763a5dSDavid Gibson 
2113da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2114da95324eSAlexey Kardashevskiy {
2115da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2116da95324eSAlexey Kardashevskiy }
2117da95324eSAlexey Kardashevskiy 
21183dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
21193dc410aeSAlexey Kardashevskiy {
21203dc410aeSAlexey Kardashevskiy     int ret;
21213dc410aeSAlexey Kardashevskiy 
21223dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21233dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
21243dc410aeSAlexey Kardashevskiy     if (!ret) {
21253dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21263dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
21273dc410aeSAlexey Kardashevskiy     }
21283dc410aeSAlexey Kardashevskiy 
21293dc410aeSAlexey Kardashevskiy     return ret;
21303dc410aeSAlexey Kardashevskiy }
21313dc410aeSAlexey Kardashevskiy 
2132d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2133d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2134d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
21350f5cb298SDavid Gibson {
21360f5cb298SDavid Gibson     long len;
21370f5cb298SDavid Gibson     int fd;
21380f5cb298SDavid Gibson     void *table;
21390f5cb298SDavid Gibson 
2140b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2141b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2142b5aec396SDavid Gibson      */
2143b5aec396SDavid Gibson     *pfd = -1;
21446a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
21450f5cb298SDavid Gibson         return NULL;
21460f5cb298SDavid Gibson     }
21470f5cb298SDavid Gibson 
2148d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2149d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2150d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2151d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2152d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2153d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2154d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2155d6ee2a7cSAlexey Kardashevskiy         };
2156d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2157d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2158d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2159d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2160d6ee2a7cSAlexey Kardashevskiy                     liobn);
2161d6ee2a7cSAlexey Kardashevskiy             return NULL;
2162d6ee2a7cSAlexey Kardashevskiy         }
2163d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2164d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2165d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2166d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2167d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2168d6ee2a7cSAlexey Kardashevskiy         };
2169d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2170d6ee2a7cSAlexey Kardashevskiy             return NULL;
2171d6ee2a7cSAlexey Kardashevskiy         }
21720f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
21730f5cb298SDavid Gibson         if (fd < 0) {
2174b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2175b5aec396SDavid Gibson                     liobn);
21760f5cb298SDavid Gibson             return NULL;
21770f5cb298SDavid Gibson         }
2178d6ee2a7cSAlexey Kardashevskiy     } else {
2179d6ee2a7cSAlexey Kardashevskiy         return NULL;
2180d6ee2a7cSAlexey Kardashevskiy     }
21810f5cb298SDavid Gibson 
2182d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
21830f5cb298SDavid Gibson     /* FIXME: round this up to page size */
21840f5cb298SDavid Gibson 
218574b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
21860f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2187b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2188b5aec396SDavid Gibson                 liobn);
21890f5cb298SDavid Gibson         close(fd);
21900f5cb298SDavid Gibson         return NULL;
21910f5cb298SDavid Gibson     }
21920f5cb298SDavid Gibson 
21930f5cb298SDavid Gibson     *pfd = fd;
21940f5cb298SDavid Gibson     return table;
21950f5cb298SDavid Gibson }
21960f5cb298SDavid Gibson 
2197523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
21980f5cb298SDavid Gibson {
21990f5cb298SDavid Gibson     long len;
22000f5cb298SDavid Gibson 
22010f5cb298SDavid Gibson     if (fd < 0) {
22020f5cb298SDavid Gibson         return -1;
22030f5cb298SDavid Gibson     }
22040f5cb298SDavid Gibson 
2205523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22060f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22070f5cb298SDavid Gibson         (close(fd) < 0)) {
2208b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2209b5aec396SDavid Gibson                 strerror(errno));
22100f5cb298SDavid Gibson         /* Leak the table */
22110f5cb298SDavid Gibson     }
22120f5cb298SDavid Gibson 
22130f5cb298SDavid Gibson     return 0;
22140f5cb298SDavid Gibson }
22150f5cb298SDavid Gibson 
22167f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22177f763a5dSDavid Gibson {
22187f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22197f763a5dSDavid Gibson 
2220ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2221ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2222ace9a2cbSDavid Gibson         return 0;
2223ace9a2cbSDavid Gibson     }
22246977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22257f763a5dSDavid Gibson         int ret;
22267f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2227ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2228ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2229ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2230ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2231ace9a2cbSDavid Gibson              * correct for PR. */
2232ace9a2cbSDavid Gibson             return 0;
2233ace9a2cbSDavid Gibson         } else if (ret < 0) {
22347f763a5dSDavid Gibson             return ret;
22357f763a5dSDavid Gibson         }
22367f763a5dSDavid Gibson         return shift;
22377f763a5dSDavid Gibson     }
22387f763a5dSDavid Gibson 
2239ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2240ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
224196c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
224296c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2243ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
22447f763a5dSDavid Gibson         return 0;
2245ace9a2cbSDavid Gibson     } else {
2246ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2247ace9a2cbSDavid Gibson         return 24;
2248ace9a2cbSDavid Gibson     }
22497f763a5dSDavid Gibson }
22507f763a5dSDavid Gibson 
2251a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2252a1e98583SDavid Gibson {
2253a1e98583SDavid Gibson     uint32_t pvr;
2254a1e98583SDavid Gibson 
2255a1e98583SDavid Gibson     asm ("mfpvr %0"
2256a1e98583SDavid Gibson          : "=r"(pvr));
2257a1e98583SDavid Gibson     return pvr;
2258a1e98583SDavid Gibson }
2259a1e98583SDavid Gibson 
2260a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2261a7342588SDavid Gibson {
2262a7342588SDavid Gibson     if (on) {
2263a7342588SDavid Gibson         *word |= flags;
2264a7342588SDavid Gibson     } else {
2265a7342588SDavid Gibson         *word &= ~flags;
2266a7342588SDavid Gibson     }
2267a7342588SDavid Gibson }
2268a7342588SDavid Gibson 
22692985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
22702985b86bSAndreas Färber {
22712985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
22720cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
22730cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2274a1e98583SDavid Gibson 
2275cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
22763bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2277a7342588SDavid Gibson 
22783f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
22793f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
22803f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
22813f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
22823f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
22833f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
22840cbad81fSDavid Gibson 
22850cbad81fSDavid Gibson     if (dcache_size != -1) {
22860cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
22870cbad81fSDavid Gibson     }
22880cbad81fSDavid Gibson 
22890cbad81fSDavid Gibson     if (icache_size != -1) {
22900cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
22910cbad81fSDavid Gibson     }
2292c64abd1fSSam Bobroff 
2293c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2294c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
22955f3066d8SDavid Gibson 
22965f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
22975f3066d8SDavid Gibson         /*
22985f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
22995f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
23005f3066d8SDavid Gibson          * architected mode may prevent guests from activating
23015f3066d8SDavid Gibson          * necessary DD1 workarounds.
23025f3066d8SDavid Gibson          */
23035f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
23045f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
23055f3066d8SDavid Gibson     }
2306c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2307a1e98583SDavid Gibson }
2308a1e98583SDavid Gibson 
23093b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23103b961124SStuart Yoder {
23113b961124SStuart Yoder     return cap_epr;
23123b961124SStuart Yoder }
23133b961124SStuart Yoder 
231487a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
231587a91de6SAlexander Graf {
231687a91de6SAlexander Graf     return cap_fixup_hcalls;
231787a91de6SAlexander Graf }
231887a91de6SAlexander Graf 
2319bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2320bac3bf28SThomas Huth {
2321bac3bf28SThomas Huth     return cap_htm;
2322bac3bf28SThomas Huth }
2323bac3bf28SThomas Huth 
2324cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2325cf1c4cceSSam Bobroff {
2326cf1c4cceSSam Bobroff     return cap_mmu_radix;
2327cf1c4cceSSam Bobroff }
2328cf1c4cceSSam Bobroff 
2329cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2330cf1c4cceSSam Bobroff {
2331cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2332cf1c4cceSSam Bobroff }
2333cf1c4cceSSam Bobroff 
2334072f416aSSuraj Jitindar Singh static bool kvmppc_power8_host(void)
2335072f416aSSuraj Jitindar Singh {
2336072f416aSSuraj Jitindar Singh     bool ret = false;
2337072f416aSSuraj Jitindar Singh #ifdef TARGET_PPC64
2338072f416aSSuraj Jitindar Singh     {
2339072f416aSSuraj Jitindar Singh         uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2340072f416aSSuraj Jitindar Singh         ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2341072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2342072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8_BASE);
2343072f416aSSuraj Jitindar Singh     }
2344072f416aSSuraj Jitindar Singh #endif /* TARGET_PPC64 */
2345072f416aSSuraj Jitindar Singh     return ret;
2346072f416aSSuraj Jitindar Singh }
2347072f416aSSuraj Jitindar Singh 
23488fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
23498fea7044SSuraj Jitindar Singh {
2350072f416aSSuraj Jitindar Singh     bool l1d_thread_priv_req = !kvmppc_power8_host();
2351072f416aSSuraj Jitindar Singh 
23528fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
23538fea7044SSuraj Jitindar Singh         return 2;
2354072f416aSSuraj Jitindar Singh     } else if ((!l1d_thread_priv_req ||
2355072f416aSSuraj Jitindar Singh                 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
23568fea7044SSuraj Jitindar Singh                (c.character & c.character_mask
23578fea7044SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
23588fea7044SSuraj Jitindar Singh         return 1;
23598fea7044SSuraj Jitindar Singh     }
23608fea7044SSuraj Jitindar Singh 
23618fea7044SSuraj Jitindar Singh     return 0;
23628fea7044SSuraj Jitindar Singh }
23638fea7044SSuraj Jitindar Singh 
23648fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
23658fea7044SSuraj Jitindar Singh {
23668fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
23678fea7044SSuraj Jitindar Singh         return 2;
23688fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
23698fea7044SSuraj Jitindar Singh         return 1;
23708fea7044SSuraj Jitindar Singh     }
23718fea7044SSuraj Jitindar Singh 
23728fea7044SSuraj Jitindar Singh     return 0;
23738fea7044SSuraj Jitindar Singh }
23748fea7044SSuraj Jitindar Singh 
23758fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
23768fea7044SSuraj Jitindar Singh {
23778fea7044SSuraj Jitindar Singh     if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
23788fea7044SSuraj Jitindar Singh         return  SPAPR_CAP_FIXED_CCD;
23798fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
23808fea7044SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_IBS;
23818fea7044SSuraj Jitindar Singh     }
23828fea7044SSuraj Jitindar Singh 
23838fea7044SSuraj Jitindar Singh     return 0;
23848fea7044SSuraj Jitindar Singh }
23858fea7044SSuraj Jitindar Singh 
23868acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
23878acc2ae5SSuraj Jitindar Singh {
23888acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
23898acc2ae5SSuraj Jitindar Singh     int ret;
23908acc2ae5SSuraj Jitindar Singh 
23918acc2ae5SSuraj Jitindar Singh     /* Assume broken */
23928acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
23938acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
23948acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
23958acc2ae5SSuraj Jitindar Singh 
23968acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
23978acc2ae5SSuraj Jitindar Singh     if (!ret) {
23988acc2ae5SSuraj Jitindar Singh         return;
23998acc2ae5SSuraj Jitindar Singh     }
24008acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
24018acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
24028acc2ae5SSuraj Jitindar Singh         return;
24038acc2ae5SSuraj Jitindar Singh     }
24048fea7044SSuraj Jitindar Singh 
24058fea7044SSuraj Jitindar Singh     cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
24068fea7044SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
24078fea7044SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
24088acc2ae5SSuraj Jitindar Singh }
24098acc2ae5SSuraj Jitindar Singh 
24108acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
24118acc2ae5SSuraj Jitindar Singh {
24128acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
24138acc2ae5SSuraj Jitindar Singh }
24148acc2ae5SSuraj Jitindar Singh 
24158acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
24168acc2ae5SSuraj Jitindar Singh {
24178acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
24188acc2ae5SSuraj Jitindar Singh }
24198acc2ae5SSuraj Jitindar Singh 
24208acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
24218acc2ae5SSuraj Jitindar Singh {
24228acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
24238acc2ae5SSuraj Jitindar Singh }
24248acc2ae5SSuraj Jitindar Singh 
24259ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
24269ded780cSAlexey Kardashevskiy {
24279ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
24289ded780cSAlexey Kardashevskiy }
24299ded780cSAlexey Kardashevskiy 
243052b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
243152b2519cSThomas Huth {
243252b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
243352b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
243452b2519cSThomas Huth 
243552b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
243652b2519cSThomas Huth     if (pvr_pcc == NULL) {
243752b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
243852b2519cSThomas Huth     }
243952b2519cSThomas Huth 
244052b2519cSThomas Huth     return pvr_pcc;
244152b2519cSThomas Huth }
244252b2519cSThomas Huth 
24432e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms)
24445ba4576bSAndreas Färber {
24455ba4576bSAndreas Färber     TypeInfo type_info = {
24465ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24475ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24485ba4576bSAndreas Färber     };
24492e9c10ebSIgor Mammedov     MachineClass *mc = MACHINE_GET_CLASS(ms);
24505ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
245192e926e1SGreg Kurz     ObjectClass *oc;
24525b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2453715d4b96SThomas Huth     int i;
24545ba4576bSAndreas Färber 
245552b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
24563bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
24575ba4576bSAndreas Färber         return -1;
24585ba4576bSAndreas Färber     }
24595ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24605ba4576bSAndreas Färber     type_register(&type_info);
24612e9c10ebSIgor Mammedov     if (object_dynamic_cast(OBJECT(ms), TYPE_SPAPR_MACHINE)) {
24622e9c10ebSIgor Mammedov         /* override TCG default cpu type with 'host' cpu model */
24632e9c10ebSIgor Mammedov         mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
24642e9c10ebSIgor Mammedov     }
24655b79b1caSAlexey Kardashevskiy 
246692e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
246792e926e1SGreg Kurz     g_assert(oc);
246892e926e1SGreg Kurz 
2469715d4b96SThomas Huth     /*
2470715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2471715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2472715d4b96SThomas Huth      * host CPU type, too)
2473715d4b96SThomas Huth      */
2474715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2475715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2476c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2477715d4b96SThomas Huth             char *suffix;
2478715d4b96SThomas Huth 
2479715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2480c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2481715d4b96SThomas Huth             if (suffix) {
2482715d4b96SThomas Huth                 *suffix = 0;
2483715d4b96SThomas Huth             }
2484715d4b96SThomas Huth             break;
2485715d4b96SThomas Huth         }
2486715d4b96SThomas Huth     }
2487715d4b96SThomas Huth 
24885ba4576bSAndreas Färber     return 0;
24895ba4576bSAndreas Färber }
24905ba4576bSAndreas Färber 
2491feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2492feaa64c4SDavid Gibson {
2493feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2494feaa64c4SDavid Gibson         .token = token,
2495feaa64c4SDavid Gibson     };
2496feaa64c4SDavid Gibson 
2497feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2498feaa64c4SDavid Gibson         return -ENOENT;
2499feaa64c4SDavid Gibson     }
2500feaa64c4SDavid Gibson 
2501feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2502feaa64c4SDavid Gibson 
2503feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2504feaa64c4SDavid Gibson }
250512b1143bSDavid Gibson 
250614b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2507e68cb8b4SAlexey Kardashevskiy {
2508e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2509e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
251014b0d748SGreg Kurz         .start_index = index,
2511e68cb8b4SAlexey Kardashevskiy     };
251282be8e73SGreg Kurz     int ret;
2513e68cb8b4SAlexey Kardashevskiy 
2514e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
251514b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
251614b0d748SGreg Kurz                    write ? "writing" : "reading");
251782be8e73SGreg Kurz         return -ENOTSUP;
2518e68cb8b4SAlexey Kardashevskiy     }
2519e68cb8b4SAlexey Kardashevskiy 
252082be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
252182be8e73SGreg Kurz     if (ret < 0) {
252214b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
252314b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
252414b0d748SGreg Kurz                    strerror(errno));
252582be8e73SGreg Kurz         return -errno;
252682be8e73SGreg Kurz     }
252782be8e73SGreg Kurz 
252882be8e73SGreg Kurz     return ret;
2529e68cb8b4SAlexey Kardashevskiy }
2530e68cb8b4SAlexey Kardashevskiy 
2531e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2532e68cb8b4SAlexey Kardashevskiy {
2533bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2534e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2535e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2536e68cb8b4SAlexey Kardashevskiy 
2537e68cb8b4SAlexey Kardashevskiy     do {
2538e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2539e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2540e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2541e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2542e68cb8b4SAlexey Kardashevskiy             return rc;
2543e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2544e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2545e094c4c1SCédric Le Goater             ssize_t n = rc;
2546e094c4c1SCédric Le Goater             while (n) {
2547e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2548e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2549e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2550e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2551e094c4c1SCédric Le Goater 
2552e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2553e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2554e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2555e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2556e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2557e094c4c1SCédric Le Goater 
2558e094c4c1SCédric Le Goater                 buffer += chunksize;
2559e094c4c1SCédric Le Goater                 n -= chunksize;
2560e094c4c1SCédric Le Goater             }
2561e68cb8b4SAlexey Kardashevskiy         }
2562e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2563e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2564bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2565e68cb8b4SAlexey Kardashevskiy 
2566e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2567e68cb8b4SAlexey Kardashevskiy }
2568e68cb8b4SAlexey Kardashevskiy 
2569e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2570e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2571e68cb8b4SAlexey Kardashevskiy {
2572e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2573e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2574e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2575e68cb8b4SAlexey Kardashevskiy 
2576e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2577e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2578e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2579e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2580e68cb8b4SAlexey Kardashevskiy 
2581e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2582e68cb8b4SAlexey Kardashevskiy 
2583e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2584e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2585e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2586e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2587e68cb8b4SAlexey Kardashevskiy         return rc;
2588e68cb8b4SAlexey Kardashevskiy     }
2589e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2590e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2591e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2592e68cb8b4SAlexey Kardashevskiy         return -1;
2593e68cb8b4SAlexey Kardashevskiy     }
2594e68cb8b4SAlexey Kardashevskiy     return 0;
2595e68cb8b4SAlexey Kardashevskiy }
2596e68cb8b4SAlexey Kardashevskiy 
259720d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
25984513d923SGleb Natapov {
25994513d923SGleb Natapov     return true;
26004513d923SGleb Natapov }
2601a1b87fe0SJan Kiszka 
260282169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
260382169660SScott Wood {
260482169660SScott Wood }
2605c65f9a07SGreg Kurz 
26061ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26071ad9f0a4SDavid Gibson {
26081ad9f0a4SDavid Gibson     int fd, rc;
26091ad9f0a4SDavid Gibson     int i;
26107c43bca0SAneesh Kumar K.V 
261114b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
26121ad9f0a4SDavid Gibson 
26131ad9f0a4SDavid Gibson     i = 0;
26141ad9f0a4SDavid Gibson     while (i < n) {
26151ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26161ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26171ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26181ad9f0a4SDavid Gibson 
26191ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26201ad9f0a4SDavid Gibson         if (rc < 0) {
26211ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26221ad9f0a4SDavid Gibson         }
26231ad9f0a4SDavid Gibson 
26241ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26251ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2626a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
26271ad9f0a4SDavid Gibson 
26281ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26291ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26301ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26311ad9f0a4SDavid Gibson             }
26321ad9f0a4SDavid Gibson 
2633a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2634a36593e1SAlexey Kardashevskiy                 valid = n - i;
2635a36593e1SAlexey Kardashevskiy             }
2636a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2637a36593e1SAlexey Kardashevskiy             i += valid;
26381ad9f0a4SDavid Gibson 
26391ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
26401ad9f0a4SDavid Gibson                 invalid = n - i;
26411ad9f0a4SDavid Gibson             }
26421ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2643a36593e1SAlexey Kardashevskiy             i += invalid;
26441ad9f0a4SDavid Gibson 
26451ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
26461ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
26471ad9f0a4SDavid Gibson         }
26481ad9f0a4SDavid Gibson     }
26491ad9f0a4SDavid Gibson 
26501ad9f0a4SDavid Gibson     close(fd);
26511ad9f0a4SDavid Gibson }
26521ad9f0a4SDavid Gibson 
26531ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
26547c43bca0SAneesh Kumar K.V {
26551ad9f0a4SDavid Gibson     int fd, rc;
26561ad9f0a4SDavid Gibson     struct {
26571ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
26581ad9f0a4SDavid Gibson         uint64_t pte0;
26591ad9f0a4SDavid Gibson         uint64_t pte1;
26601ad9f0a4SDavid Gibson     } buf;
2661c1385933SAneesh Kumar K.V 
266214b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2663c1385933SAneesh Kumar K.V 
26641ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
26651ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
26661ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
26671ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
26681ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
26691ad9f0a4SDavid Gibson 
26701ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
26711ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
26721ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2673c1385933SAneesh Kumar K.V     }
26741ad9f0a4SDavid Gibson     close(fd);
2675c1385933SAneesh Kumar K.V }
26769e03a040SFrank Blaschka 
26779e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2678dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26799e03a040SFrank Blaschka {
26809e03a040SFrank Blaschka     return 0;
26819e03a040SFrank Blaschka }
26821850b6b7SEric Auger 
268338d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
268438d87493SPeter Xu                                 int vector, PCIDevice *dev)
268538d87493SPeter Xu {
268638d87493SPeter Xu     return 0;
268738d87493SPeter Xu }
268838d87493SPeter Xu 
268938d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
269038d87493SPeter Xu {
269138d87493SPeter Xu     return 0;
269238d87493SPeter Xu }
269338d87493SPeter Xu 
26941850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
26951850b6b7SEric Auger {
26961850b6b7SEric Auger     return data & 0xffff;
26971850b6b7SEric Auger }
26984d9392beSThomas Huth 
26994d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27004d9392beSThomas Huth {
27014d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27024d9392beSThomas Huth         return -1;
27034d9392beSThomas Huth     }
27044d9392beSThomas Huth 
27054d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27064d9392beSThomas Huth }
270730f4b05bSDavid Gibson 
270830f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
270930f4b05bSDavid Gibson {
271030f4b05bSDavid Gibson     if (!kvm_enabled()) {
2711b55d295eSDavid Gibson         return; /* No KVM, we're good */
2712b55d295eSDavid Gibson     }
2713b55d295eSDavid Gibson 
2714b55d295eSDavid Gibson     if (cap_resize_hpt) {
2715b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2716b55d295eSDavid Gibson     }
2717b55d295eSDavid Gibson 
2718b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2719b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
272030f4b05bSDavid Gibson         return;
272130f4b05bSDavid Gibson     }
272230f4b05bSDavid Gibson 
272330f4b05bSDavid Gibson     error_setg(errp,
272430f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
272530f4b05bSDavid Gibson }
2726b55d295eSDavid Gibson 
2727b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2728b55d295eSDavid Gibson {
2729b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2730b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2731b55d295eSDavid Gibson         .flags = flags,
2732b55d295eSDavid Gibson         .shift = shift,
2733b55d295eSDavid Gibson     };
2734b55d295eSDavid Gibson 
2735b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2736b55d295eSDavid Gibson         return -ENOSYS;
2737b55d295eSDavid Gibson     }
2738b55d295eSDavid Gibson 
2739b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2740b55d295eSDavid Gibson }
2741b55d295eSDavid Gibson 
2742b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2743b55d295eSDavid Gibson {
2744b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2745b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2746b55d295eSDavid Gibson         .flags = flags,
2747b55d295eSDavid Gibson         .shift = shift,
2748b55d295eSDavid Gibson     };
2749b55d295eSDavid Gibson 
2750b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2751b55d295eSDavid Gibson         return -ENOSYS;
2752b55d295eSDavid Gibson     }
2753b55d295eSDavid Gibson 
2754b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2755b55d295eSDavid Gibson }
2756b55d295eSDavid Gibson 
2757c363a37aSDaniel Henrique Barboza /*
2758c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2759c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2760c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2761c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2762c363a37aSDaniel Henrique Barboza  *
2763c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2764c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2765c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2766c363a37aSDaniel Henrique Barboza  *
2767c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2768c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2769c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2770c363a37aSDaniel Henrique Barboza  * in QEMU level.
2771c363a37aSDaniel Henrique Barboza  */
2772c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2773c363a37aSDaniel Henrique Barboza {
2774c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2775c363a37aSDaniel Henrique Barboza 
2776c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2777c363a37aSDaniel Henrique Barboza         return false;
2778c363a37aSDaniel Henrique Barboza     }
2779c363a37aSDaniel Henrique Barboza 
2780c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2781c363a37aSDaniel Henrique Barboza         return false;
2782c363a37aSDaniel Henrique Barboza     }
2783c363a37aSDaniel Henrique Barboza 
2784c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2785c363a37aSDaniel Henrique Barboza }
2786*a84f7179SNikunj A Dadhania 
2787*a84f7179SNikunj A Dadhania void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2788*a84f7179SNikunj A Dadhania {
2789*a84f7179SNikunj A Dadhania     CPUState *cs = CPU(cpu);
2790*a84f7179SNikunj A Dadhania 
2791*a84f7179SNikunj A Dadhania     if (kvm_enabled()) {
2792*a84f7179SNikunj A Dadhania         kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2793*a84f7179SNikunj A Dadhania     }
2794*a84f7179SNikunj A Dadhania }
2795