xref: /qemu/target/ppc/kvm.c (revision 8fea70440eb0d095442de7e80d586a285cf96be5)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
469c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
472d103aaeSMichael Roth #include "sysemu/hostmem.h"
48f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
499c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
50f3d9f303SSam Bobroff #include "elf.h"
51c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
52f61b4bedSAlexander Graf 
53d76d1650Saurel32 //#define DEBUG_KVM
54d76d1650Saurel32 
55d76d1650Saurel32 #ifdef DEBUG_KVM
56da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
57d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
58d76d1650Saurel32 #else
59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
60d76d1650Saurel32     do { } while (0)
61d76d1650Saurel32 #endif
62d76d1650Saurel32 
63eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
64eadaada1SAlexander Graf 
6594a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6694a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6794a8d39aSJan Kiszka };
6894a8d39aSJan Kiszka 
69fc87e185SAlexander Graf static int cap_interrupt_unset = false;
70fc87e185SAlexander Graf static int cap_interrupt_level = false;
7190dc8812SScott Wood static int cap_segstate;
7290dc8812SScott Wood static int cap_booke_sregs;
73e97c3636SDavid Gibson static int cap_ppc_smt;
74fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
750f5cb298SDavid Gibson static int cap_spapr_tce;
76d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
77da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
789bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
79f1af19d7SDavid Gibson static int cap_hior;
80d67d40eaSDavid Gibson static int cap_one_reg;
813b961124SStuart Yoder static int cap_epr;
8231f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
839b00ea49SDavid Gibson static int cap_papr;
84e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8587a91de6SAlexander Graf static int cap_fixup_hcalls;
86bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
87cf1c4cceSSam Bobroff static int cap_mmu_radix;
88cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
89b55d295eSDavid Gibson static int cap_resize_hpt;
90c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
918acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
928acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
938acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
94fc87e185SAlexander Graf 
953c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
963c902d44SBharat Bhushan 
97c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
98c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
99c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
100c821c2bdSAlexander Graf  *     still an interrupt pending.
101c6a94ba5SAlexander Graf  *
102c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
103c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
104c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
105c6a94ba5SAlexander Graf  */
106c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
107c6a94ba5SAlexander Graf 
108d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
109c6a94ba5SAlexander Graf {
110d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
111d5a68146SAndreas Färber 
112c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
113c6a94ba5SAlexander Graf }
114c6a94ba5SAlexander Graf 
11596c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11696c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11796c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11896c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11996c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12096c9cff0SThomas Huth {
12196c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12270a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12396c9cff0SThomas Huth }
12496c9cff0SThomas Huth 
1252e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms);
1268acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1275ba4576bSAndreas Färber 
128b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
129d76d1650Saurel32 {
130fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
131fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13290dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13390dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1346977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
1350f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
136d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
137da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1389ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
139d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
140f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1413b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14231f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1439b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1449b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1456977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14687a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
147fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
148bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
149cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
150cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
151b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1528acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
153c363a37aSDaniel Henrique Barboza     /*
154c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
155c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
156c363a37aSDaniel Henrique Barboza      *
157c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
158c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
159c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
160fc87e185SAlexander Graf 
161fc87e185SAlexander Graf     if (!cap_interrupt_level) {
162fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
163fc87e185SAlexander Graf                         "VM to stall at times!\n");
164fc87e185SAlexander Graf     }
165fc87e185SAlexander Graf 
1662e9c10ebSIgor Mammedov     kvm_ppc_register_host_cpu_type(ms);
1675ba4576bSAndreas Färber 
168d76d1650Saurel32     return 0;
169d76d1650Saurel32 }
170d76d1650Saurel32 
171d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
172d525ffabSPaolo Bonzini {
173d525ffabSPaolo Bonzini     return 0;
174d525ffabSPaolo Bonzini }
175d525ffabSPaolo Bonzini 
1761bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
177d76d1650Saurel32 {
1781bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1791bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
180861bbc80SAlexander Graf     struct kvm_sregs sregs;
1815666ca4aSScott Wood     int ret;
1825666ca4aSScott Wood 
1835666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18464e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18564e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
18664e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
18764e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1885666ca4aSScott Wood         return 0;
1895666ca4aSScott Wood     } else {
19090dc8812SScott Wood         if (!cap_segstate) {
19164e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19264e07be5SAlexander Graf             return -ENOSYS;
1935666ca4aSScott Wood         }
1945666ca4aSScott Wood     }
1955666ca4aSScott Wood 
1961bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1975666ca4aSScott Wood     if (ret) {
1985666ca4aSScott Wood         return ret;
1995666ca4aSScott Wood     }
200861bbc80SAlexander Graf 
201861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2021bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2035666ca4aSScott Wood }
2045666ca4aSScott Wood 
20593dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2061bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
20793dd5e85SScott Wood {
2081bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2091bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21093dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21193dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21293dd5e85SScott Wood     unsigned int entries = 0;
21393dd5e85SScott Wood     int ret, i;
21493dd5e85SScott Wood 
21593dd5e85SScott Wood     if (!kvm_enabled() ||
216a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
21793dd5e85SScott Wood         return 0;
21893dd5e85SScott Wood     }
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22193dd5e85SScott Wood 
22293dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22393dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22493dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22593dd5e85SScott Wood         entries += params.tlb_sizes[i];
22693dd5e85SScott Wood     }
22793dd5e85SScott Wood 
22893dd5e85SScott Wood     assert(entries == env->nb_tlb);
22993dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23093dd5e85SScott Wood 
23193dd5e85SScott Wood     env->tlb_dirty = true;
23293dd5e85SScott Wood 
23393dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23493dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23593dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
23693dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
23793dd5e85SScott Wood 
23848add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
23993dd5e85SScott Wood     if (ret < 0) {
24093dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24193dd5e85SScott Wood                 __func__, strerror(-ret));
24293dd5e85SScott Wood         return ret;
24393dd5e85SScott Wood     }
24493dd5e85SScott Wood 
24593dd5e85SScott Wood     env->kvm_sw_tlb = true;
24693dd5e85SScott Wood     return 0;
24793dd5e85SScott Wood }
24893dd5e85SScott Wood 
2494656e1f0SBenjamin Herrenschmidt 
2504656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
251a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2524656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2534656e1f0SBenjamin Herrenschmidt {
254a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
255a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
256a60f24b5SAndreas Färber 
2574656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2584656e1f0SBenjamin Herrenschmidt 
2594656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2604656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2614656e1f0SBenjamin Herrenschmidt      *
2624656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2634656e1f0SBenjamin Herrenschmidt      *
26496c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
26596c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
26696c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2674656e1f0SBenjamin Herrenschmidt      *
2684656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2694656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2704656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2714656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2724656e1f0SBenjamin Herrenschmidt      *
2734656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2744656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2754656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2764656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2774656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2784656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2794656e1f0SBenjamin Herrenschmidt      *   this fallback.
2804656e1f0SBenjamin Herrenschmidt      */
28196c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2824656e1f0SBenjamin Herrenschmidt         /* No flags */
2834656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2844656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2854656e1f0SBenjamin Herrenschmidt 
2864656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2874656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2884656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2894656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2904656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2914656e1f0SBenjamin Herrenschmidt 
2924656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2934656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2944656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2954656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2964656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2974656e1f0SBenjamin Herrenschmidt     } else {
2984656e1f0SBenjamin Herrenschmidt         int i = 0;
2994656e1f0SBenjamin Herrenschmidt 
3004656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
3014656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
3024656e1f0SBenjamin Herrenschmidt 
30358969eeeSDavid Gibson         if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
3044656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
3054656e1f0SBenjamin Herrenschmidt         }
3064656e1f0SBenjamin Herrenschmidt 
3070941d728SDavid Gibson         if (env->mmu_model == POWERPC_MMU_2_06 ||
3080941d728SDavid Gibson             env->mmu_model == POWERPC_MMU_2_07) {
3094656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
3104656e1f0SBenjamin Herrenschmidt         } else {
3114656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
3124656e1f0SBenjamin Herrenschmidt         }
3134656e1f0SBenjamin Herrenschmidt 
3144656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
3154656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
3164656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3174656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3184656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3194656e1f0SBenjamin Herrenschmidt         i++;
3204656e1f0SBenjamin Herrenschmidt 
321aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
3220941d728SDavid Gibson         if (env->mmu_model == POWERPC_MMU_2_06 ||
3230941d728SDavid Gibson             env->mmu_model == POWERPC_MMU_2_07) {
3244656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3254656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3264656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3274656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3284656e1f0SBenjamin Herrenschmidt             i++;
3294656e1f0SBenjamin Herrenschmidt         }
3304656e1f0SBenjamin Herrenschmidt 
3314656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3324656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3334656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3344656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3354656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3364656e1f0SBenjamin Herrenschmidt     }
3374656e1f0SBenjamin Herrenschmidt }
3384656e1f0SBenjamin Herrenschmidt 
339a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3404656e1f0SBenjamin Herrenschmidt {
341a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3424656e1f0SBenjamin Herrenschmidt     int ret;
3434656e1f0SBenjamin Herrenschmidt 
344a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
345a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3464656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3474656e1f0SBenjamin Herrenschmidt             return;
3484656e1f0SBenjamin Herrenschmidt         }
3494656e1f0SBenjamin Herrenschmidt     }
3504656e1f0SBenjamin Herrenschmidt 
351a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3524656e1f0SBenjamin Herrenschmidt }
3534656e1f0SBenjamin Herrenschmidt 
354c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
355c64abd1fSSam Bobroff {
356c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
357c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
358c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
359c64abd1fSSam Bobroff     int i;
360c64abd1fSSam Bobroff 
361c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
362c64abd1fSSam Bobroff         return NULL;
363c64abd1fSSam Bobroff     }
364c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
365c64abd1fSSam Bobroff         return NULL;
366c64abd1fSSam Bobroff     }
367c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
368c64abd1fSSam Bobroff     radix_page_info->count = 0;
369c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
370c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
371c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
372c64abd1fSSam Bobroff             radix_page_info->count++;
373c64abd1fSSam Bobroff         }
374c64abd1fSSam Bobroff     }
375c64abd1fSSam Bobroff     return radix_page_info;
376c64abd1fSSam Bobroff }
377c64abd1fSSam Bobroff 
378b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
379b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
380b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
381b4db5413SSuraj Jitindar Singh {
382b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
383b4db5413SSuraj Jitindar Singh     int ret;
384b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
385b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
386b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
387b4db5413SSuraj Jitindar Singh     };
388b4db5413SSuraj Jitindar Singh 
389b4db5413SSuraj Jitindar Singh     if (radix) {
390b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
391b4db5413SSuraj Jitindar Singh     }
392b4db5413SSuraj Jitindar Singh     if (gtse) {
393b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
394b4db5413SSuraj Jitindar Singh     }
395b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
396b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
397b4db5413SSuraj Jitindar Singh     switch (ret) {
398b4db5413SSuraj Jitindar Singh     case 0:
399b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
400b4db5413SSuraj Jitindar Singh     case -EINVAL:
401b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
402b4db5413SSuraj Jitindar Singh     case -ENODEV:
403b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
404b4db5413SSuraj Jitindar Singh     default:
405b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
406b4db5413SSuraj Jitindar Singh     }
407b4db5413SSuraj Jitindar Singh }
408b4db5413SSuraj Jitindar Singh 
4094656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4104656e1f0SBenjamin Herrenschmidt {
4114656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4124656e1f0SBenjamin Herrenschmidt         return true;
4134656e1f0SBenjamin Herrenschmidt     }
4144656e1f0SBenjamin Herrenschmidt 
4154656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4164656e1f0SBenjamin Herrenschmidt }
4174656e1f0SBenjamin Herrenschmidt 
418df587133SThomas Huth static long max_cpu_page_size;
419df587133SThomas Huth 
420a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4214656e1f0SBenjamin Herrenschmidt {
4224656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4234656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
424a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4254656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4264656e1f0SBenjamin Herrenschmidt 
4274656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4284656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4294656e1f0SBenjamin Herrenschmidt         return;
4304656e1f0SBenjamin Herrenschmidt     }
4314656e1f0SBenjamin Herrenschmidt 
4324656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4334656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
434a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4354656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4364656e1f0SBenjamin Herrenschmidt     }
4374656e1f0SBenjamin Herrenschmidt 
438df587133SThomas Huth     if (!max_cpu_page_size) {
4399c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
440df587133SThomas Huth     }
4414656e1f0SBenjamin Herrenschmidt 
4424656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
443b07c59f7SDavid Gibson     memset(cpu->hash64_opts->sps, 0, sizeof(*cpu->hash64_opts->sps));
4444656e1f0SBenjamin Herrenschmidt 
44590da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
44690da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
44790da0d5aSBenjamin Herrenschmidt      */
44890da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
44926cd35b8SDavid Gibson         if (getpagesize() >= 0x10000) {
45026cd35b8SDavid Gibson             cpu->hash64_opts->flags |= PPC_HASH64_CI_LARGEPAGE;
45126cd35b8SDavid Gibson         } else {
45226cd35b8SDavid Gibson             cpu->hash64_opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
45326cd35b8SDavid Gibson         }
45490da0d5aSBenjamin Herrenschmidt     }
45590da0d5aSBenjamin Herrenschmidt 
45608215d8fSAlexander Graf     /*
45708215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
45808215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
45908215d8fSAlexander Graf      */
4604656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
461b07c59f7SDavid Gibson         PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
4624656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4634656e1f0SBenjamin Herrenschmidt 
464df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4654656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4664656e1f0SBenjamin Herrenschmidt             continue;
4674656e1f0SBenjamin Herrenschmidt         }
4684656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4694656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4704656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
471df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4724656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4734656e1f0SBenjamin Herrenschmidt                 continue;
4744656e1f0SBenjamin Herrenschmidt             }
4754656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4764656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4774656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4784656e1f0SBenjamin Herrenschmidt                 break;
4794656e1f0SBenjamin Herrenschmidt             }
4804656e1f0SBenjamin Herrenschmidt         }
4814656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4824656e1f0SBenjamin Herrenschmidt             break;
4834656e1f0SBenjamin Herrenschmidt         }
4844656e1f0SBenjamin Herrenschmidt     }
48567d7d66fSDavid Gibson     cpu->hash64_opts->slb_size = smmu_info.slb_size;
48608215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
48758969eeeSDavid Gibson         cpu->hash64_opts->flags &= ~PPC_HASH64_1TSEG;
4884656e1f0SBenjamin Herrenschmidt     }
4894656e1f0SBenjamin Herrenschmidt }
490df587133SThomas Huth 
491ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
492df587133SThomas Huth {
493df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
4942b108085SDavid Gibson     long pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(mem_obj));
495df587133SThomas Huth 
496df587133SThomas Huth     return pagesize >= max_cpu_page_size;
497df587133SThomas Huth }
498df587133SThomas Huth 
4994656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5004656e1f0SBenjamin Herrenschmidt 
501a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5024656e1f0SBenjamin Herrenschmidt {
5034656e1f0SBenjamin Herrenschmidt }
5044656e1f0SBenjamin Herrenschmidt 
505ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
506df587133SThomas Huth {
507df587133SThomas Huth     return true;
508df587133SThomas Huth }
509df587133SThomas Huth 
5104656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5114656e1f0SBenjamin Herrenschmidt 
512b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
513b164e48eSEduardo Habkost {
5142e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
515b164e48eSEduardo Habkost }
516b164e48eSEduardo Habkost 
51788365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
51888365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
51988365d17SBharat Bhushan  * of 4 is sufficient for now.
52088365d17SBharat Bhushan  */
52188365d17SBharat Bhushan #define MAX_HW_BKPTS 4
52288365d17SBharat Bhushan 
52388365d17SBharat Bhushan static struct HWBreakpoint {
52488365d17SBharat Bhushan     target_ulong addr;
52588365d17SBharat Bhushan     int type;
52688365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
52788365d17SBharat Bhushan 
52888365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
52988365d17SBharat Bhushan 
53088365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
53188365d17SBharat Bhushan static int max_hw_breakpoint;
53288365d17SBharat Bhushan static int max_hw_watchpoint;
53388365d17SBharat Bhushan static int nb_hw_breakpoint;
53488365d17SBharat Bhushan static int nb_hw_watchpoint;
53588365d17SBharat Bhushan 
53688365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
53788365d17SBharat Bhushan {
53888365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
53988365d17SBharat Bhushan         max_hw_breakpoint = 2;
54088365d17SBharat Bhushan         max_hw_watchpoint = 2;
54188365d17SBharat Bhushan     }
54288365d17SBharat Bhushan 
54388365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
54488365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
54588365d17SBharat Bhushan         return;
54688365d17SBharat Bhushan     }
54788365d17SBharat Bhushan }
54888365d17SBharat Bhushan 
54920d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5505666ca4aSScott Wood {
55120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
55220d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5535666ca4aSScott Wood     int ret;
5545666ca4aSScott Wood 
5554656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
556a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5574656e1f0SBenjamin Herrenschmidt 
5584656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5591bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5605666ca4aSScott Wood     if (ret) {
561388e47c7SThomas Huth         if (ret == -EINVAL) {
562388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
563388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
564388e47c7SThomas Huth         }
5655666ca4aSScott Wood         return ret;
5665666ca4aSScott Wood     }
567861bbc80SAlexander Graf 
568bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
569c821c2bdSAlexander Graf 
57093dd5e85SScott Wood     switch (cenv->mmu_model) {
57193dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5727f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5731bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
57493dd5e85SScott Wood         break;
5757f516c96SThomas Huth     case POWERPC_MMU_2_07:
5767f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5777f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
578f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
579f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
580f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5817f516c96SThomas Huth                 cap_htm = true;
5827f516c96SThomas Huth             }
583f3d9f303SSam Bobroff         }
5847f516c96SThomas Huth         break;
58593dd5e85SScott Wood     default:
58693dd5e85SScott Wood         break;
58793dd5e85SScott Wood     }
58893dd5e85SScott Wood 
5893c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
59088365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5913c902d44SBharat Bhushan 
592861bbc80SAlexander Graf     return ret;
593d76d1650Saurel32 }
594d76d1650Saurel32 
5951bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
59693dd5e85SScott Wood {
5971bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5981bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
59993dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
60093dd5e85SScott Wood     unsigned char *bitmap;
60193dd5e85SScott Wood     int ret;
60293dd5e85SScott Wood 
60393dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
60493dd5e85SScott Wood         return;
60593dd5e85SScott Wood     }
60693dd5e85SScott Wood 
60793dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
60893dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
60993dd5e85SScott Wood 
61093dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
61193dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
61293dd5e85SScott Wood 
6131bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
61493dd5e85SScott Wood     if (ret) {
61593dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
61693dd5e85SScott Wood                 __func__, strerror(-ret));
61793dd5e85SScott Wood     }
61893dd5e85SScott Wood 
61993dd5e85SScott Wood     g_free(bitmap);
62093dd5e85SScott Wood }
62193dd5e85SScott Wood 
622d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
623d67d40eaSDavid Gibson {
624d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
625d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
626d67d40eaSDavid Gibson     union {
627d67d40eaSDavid Gibson         uint32_t u32;
628d67d40eaSDavid Gibson         uint64_t u64;
629d67d40eaSDavid Gibson     } val;
630d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
631d67d40eaSDavid Gibson         .id = id,
632d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
633d67d40eaSDavid Gibson     };
634d67d40eaSDavid Gibson     int ret;
635d67d40eaSDavid Gibson 
636d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
637d67d40eaSDavid Gibson     if (ret != 0) {
638b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
639d67d40eaSDavid Gibson     } else {
640d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
641d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
642d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
643d67d40eaSDavid Gibson             break;
644d67d40eaSDavid Gibson 
645d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
646d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
647d67d40eaSDavid Gibson             break;
648d67d40eaSDavid Gibson 
649d67d40eaSDavid Gibson         default:
650d67d40eaSDavid Gibson             /* Don't handle this size yet */
651d67d40eaSDavid Gibson             abort();
652d67d40eaSDavid Gibson         }
653d67d40eaSDavid Gibson     }
654d67d40eaSDavid Gibson }
655d67d40eaSDavid Gibson 
656d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
657d67d40eaSDavid Gibson {
658d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
659d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
660d67d40eaSDavid Gibson     union {
661d67d40eaSDavid Gibson         uint32_t u32;
662d67d40eaSDavid Gibson         uint64_t u64;
663d67d40eaSDavid Gibson     } val;
664d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
665d67d40eaSDavid Gibson         .id = id,
666d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
667d67d40eaSDavid Gibson     };
668d67d40eaSDavid Gibson     int ret;
669d67d40eaSDavid Gibson 
670d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
671d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
672d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
673d67d40eaSDavid Gibson         break;
674d67d40eaSDavid Gibson 
675d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
676d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
677d67d40eaSDavid Gibson         break;
678d67d40eaSDavid Gibson 
679d67d40eaSDavid Gibson     default:
680d67d40eaSDavid Gibson         /* Don't handle this size yet */
681d67d40eaSDavid Gibson         abort();
682d67d40eaSDavid Gibson     }
683d67d40eaSDavid Gibson 
684d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
685d67d40eaSDavid Gibson     if (ret != 0) {
686b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
687d67d40eaSDavid Gibson     }
688d67d40eaSDavid Gibson }
689d67d40eaSDavid Gibson 
69070b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
69170b79849SDavid Gibson {
69270b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
69370b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
69470b79849SDavid Gibson     struct kvm_one_reg reg;
69570b79849SDavid Gibson     int i;
69670b79849SDavid Gibson     int ret;
69770b79849SDavid Gibson 
69870b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
69970b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
70070b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
70170b79849SDavid Gibson 
70270b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
70370b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
70470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
70570b79849SDavid Gibson         if (ret < 0) {
706da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
70770b79849SDavid Gibson             return ret;
70870b79849SDavid Gibson         }
70970b79849SDavid Gibson 
71070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
71170b79849SDavid Gibson             uint64_t vsr[2];
71270b79849SDavid Gibson 
7133a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
71470b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
71570b79849SDavid Gibson             vsr[1] = env->vsr[i];
7163a4b791bSGreg Kurz #else
7173a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7183a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7193a4b791bSGreg Kurz #endif
72070b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
72170b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
72270b79849SDavid Gibson 
72370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
72470b79849SDavid Gibson             if (ret < 0) {
725da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
72670b79849SDavid Gibson                         i, strerror(errno));
72770b79849SDavid Gibson                 return ret;
72870b79849SDavid Gibson             }
72970b79849SDavid Gibson         }
73070b79849SDavid Gibson     }
73170b79849SDavid Gibson 
73270b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
73370b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
73470b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
73570b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73670b79849SDavid Gibson         if (ret < 0) {
737da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
73870b79849SDavid Gibson             return ret;
73970b79849SDavid Gibson         }
74070b79849SDavid Gibson 
74170b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74270b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
74370b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
74470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74570b79849SDavid Gibson             if (ret < 0) {
746da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
74770b79849SDavid Gibson                 return ret;
74870b79849SDavid Gibson             }
74970b79849SDavid Gibson         }
75070b79849SDavid Gibson     }
75170b79849SDavid Gibson 
75270b79849SDavid Gibson     return 0;
75370b79849SDavid Gibson }
75470b79849SDavid Gibson 
75570b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
75670b79849SDavid Gibson {
75770b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
75870b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
75970b79849SDavid Gibson     struct kvm_one_reg reg;
76070b79849SDavid Gibson     int i;
76170b79849SDavid Gibson     int ret;
76270b79849SDavid Gibson 
76370b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
76470b79849SDavid Gibson         uint64_t fpscr;
76570b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
76670b79849SDavid Gibson 
76770b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
76870b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
76970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
77070b79849SDavid Gibson         if (ret < 0) {
771da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
77270b79849SDavid Gibson             return ret;
77370b79849SDavid Gibson         } else {
77470b79849SDavid Gibson             env->fpscr = fpscr;
77570b79849SDavid Gibson         }
77670b79849SDavid Gibson 
77770b79849SDavid Gibson         for (i = 0; i < 32; i++) {
77870b79849SDavid Gibson             uint64_t vsr[2];
77970b79849SDavid Gibson 
78070b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
78170b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
78270b79849SDavid Gibson 
78370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
78470b79849SDavid Gibson             if (ret < 0) {
785da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
78670b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
78770b79849SDavid Gibson                 return ret;
78870b79849SDavid Gibson             } else {
7893a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
79070b79849SDavid Gibson                 env->fpr[i] = vsr[0];
79170b79849SDavid Gibson                 if (vsx) {
79270b79849SDavid Gibson                     env->vsr[i] = vsr[1];
79370b79849SDavid Gibson                 }
7943a4b791bSGreg Kurz #else
7953a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
7963a4b791bSGreg Kurz                 if (vsx) {
7973a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
7983a4b791bSGreg Kurz                 }
7993a4b791bSGreg Kurz #endif
80070b79849SDavid Gibson             }
80170b79849SDavid Gibson         }
80270b79849SDavid Gibson     }
80370b79849SDavid Gibson 
80470b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
80570b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
80670b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
80770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
80870b79849SDavid Gibson         if (ret < 0) {
809da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
81070b79849SDavid Gibson             return ret;
81170b79849SDavid Gibson         }
81270b79849SDavid Gibson 
81370b79849SDavid Gibson         for (i = 0; i < 32; i++) {
81470b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
81570b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
81670b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81770b79849SDavid Gibson             if (ret < 0) {
818da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
81970b79849SDavid Gibson                         i, strerror(errno));
82070b79849SDavid Gibson                 return ret;
82170b79849SDavid Gibson             }
82270b79849SDavid Gibson         }
82370b79849SDavid Gibson     }
82470b79849SDavid Gibson 
82570b79849SDavid Gibson     return 0;
82670b79849SDavid Gibson }
82770b79849SDavid Gibson 
8289b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8299b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8309b00ea49SDavid Gibson {
8319b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8329b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8339b00ea49SDavid Gibson     struct kvm_one_reg reg;
8349b00ea49SDavid Gibson     int ret;
8359b00ea49SDavid Gibson 
8369b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8379b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8389b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8399b00ea49SDavid Gibson     if (ret < 0) {
840da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8419b00ea49SDavid Gibson         return ret;
8429b00ea49SDavid Gibson     }
8439b00ea49SDavid Gibson 
8449b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8459b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8469b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8479b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8489b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8499b00ea49SDavid Gibson     if (ret < 0) {
850da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8519b00ea49SDavid Gibson                 strerror(errno));
8529b00ea49SDavid Gibson         return ret;
8539b00ea49SDavid Gibson     }
8549b00ea49SDavid Gibson 
8559b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8569b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8579b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8589b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8599b00ea49SDavid Gibson     if (ret < 0) {
860da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8619b00ea49SDavid Gibson                 strerror(errno));
8629b00ea49SDavid Gibson         return ret;
8639b00ea49SDavid Gibson     }
8649b00ea49SDavid Gibson 
8659b00ea49SDavid Gibson     return 0;
8669b00ea49SDavid Gibson }
8679b00ea49SDavid Gibson 
8689b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8699b00ea49SDavid Gibson {
8709b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8719b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8729b00ea49SDavid Gibson     struct kvm_one_reg reg;
8739b00ea49SDavid Gibson     int ret;
8749b00ea49SDavid Gibson 
8759b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8769b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8779b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8789b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8799b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8809b00ea49SDavid Gibson 
8819b00ea49SDavid Gibson     if (env->vpa_addr) {
8829b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8839b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8849b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8859b00ea49SDavid Gibson         if (ret < 0) {
886da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8879b00ea49SDavid Gibson             return ret;
8889b00ea49SDavid Gibson         }
8899b00ea49SDavid Gibson     }
8909b00ea49SDavid Gibson 
8919b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8929b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8939b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8949b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8959b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8969b00ea49SDavid Gibson     if (ret < 0) {
897da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8989b00ea49SDavid Gibson         return ret;
8999b00ea49SDavid Gibson     }
9009b00ea49SDavid Gibson 
9019b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9029b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9039b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9049b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9059b00ea49SDavid Gibson     if (ret < 0) {
906da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9079b00ea49SDavid Gibson                 strerror(errno));
9089b00ea49SDavid Gibson         return ret;
9099b00ea49SDavid Gibson     }
9109b00ea49SDavid Gibson 
9119b00ea49SDavid Gibson     if (!env->vpa_addr) {
9129b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9139b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9149b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9159b00ea49SDavid Gibson         if (ret < 0) {
916da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9179b00ea49SDavid Gibson             return ret;
9189b00ea49SDavid Gibson         }
9199b00ea49SDavid Gibson     }
9209b00ea49SDavid Gibson 
9219b00ea49SDavid Gibson     return 0;
9229b00ea49SDavid Gibson }
9239b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9249b00ea49SDavid Gibson 
925e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
926a7a00a72SDavid Gibson {
927a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
928a7a00a72SDavid Gibson     struct kvm_sregs sregs;
929a7a00a72SDavid Gibson     int i;
930a7a00a72SDavid Gibson 
931a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
932a7a00a72SDavid Gibson 
9331ec26c75SGreg Kurz     if (cpu->vhyp) {
9341ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
9351ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
9361ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
9371ec26c75SGreg Kurz     } else {
938a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
9391ec26c75SGreg Kurz     }
940a7a00a72SDavid Gibson 
941a7a00a72SDavid Gibson     /* Sync SLB */
942a7a00a72SDavid Gibson #ifdef TARGET_PPC64
943a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
944a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
945a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
946a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
947a7a00a72SDavid Gibson         }
948a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
949a7a00a72SDavid Gibson     }
950a7a00a72SDavid Gibson #endif
951a7a00a72SDavid Gibson 
952a7a00a72SDavid Gibson     /* Sync SRs */
953a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
954a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
955a7a00a72SDavid Gibson     }
956a7a00a72SDavid Gibson 
957a7a00a72SDavid Gibson     /* Sync BATs */
958a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
959a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
960a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
961a7a00a72SDavid Gibson             | env->DBAT[1][i];
962a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
963a7a00a72SDavid Gibson             | env->IBAT[1][i];
964a7a00a72SDavid Gibson     }
965a7a00a72SDavid Gibson 
966a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
967a7a00a72SDavid Gibson }
968a7a00a72SDavid Gibson 
96920d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
970d76d1650Saurel32 {
97120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
97220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
973d76d1650Saurel32     struct kvm_regs regs;
974d76d1650Saurel32     int ret;
975d76d1650Saurel32     int i;
976d76d1650Saurel32 
9771bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9781bc22652SAndreas Färber     if (ret < 0) {
979d76d1650Saurel32         return ret;
9801bc22652SAndreas Färber     }
981d76d1650Saurel32 
982d76d1650Saurel32     regs.ctr = env->ctr;
983d76d1650Saurel32     regs.lr  = env->lr;
984da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
985d76d1650Saurel32     regs.msr = env->msr;
986d76d1650Saurel32     regs.pc = env->nip;
987d76d1650Saurel32 
988d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
989d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
990d76d1650Saurel32 
991d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
992d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
993d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
994d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
995d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
996d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
997d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
998d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
999d76d1650Saurel32 
100090dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
100190dc8812SScott Wood 
1002d76d1650Saurel32     for (i = 0;i < 32; i++)
1003d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1004d76d1650Saurel32 
10054bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10064bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10074bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10084bddaf55SAlexey Kardashevskiy     }
10094bddaf55SAlexey Kardashevskiy 
10101bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1011d76d1650Saurel32     if (ret < 0)
1012d76d1650Saurel32         return ret;
1013d76d1650Saurel32 
101470b79849SDavid Gibson     kvm_put_fp(cs);
101570b79849SDavid Gibson 
101693dd5e85SScott Wood     if (env->tlb_dirty) {
10171bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
101893dd5e85SScott Wood         env->tlb_dirty = false;
101993dd5e85SScott Wood     }
102093dd5e85SScott Wood 
1021f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1022a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1023a7a00a72SDavid Gibson         if (ret < 0) {
1024f1af19d7SDavid Gibson             return ret;
1025f1af19d7SDavid Gibson         }
1026f1af19d7SDavid Gibson     }
1027f1af19d7SDavid Gibson 
1028f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1029d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1030d67d40eaSDavid Gibson     }
1031f1af19d7SDavid Gibson 
1032d67d40eaSDavid Gibson     if (cap_one_reg) {
1033d67d40eaSDavid Gibson         int i;
1034d67d40eaSDavid Gibson 
1035d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1036d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1037d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1038d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1039d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1040d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1041d67d40eaSDavid Gibson 
1042d67d40eaSDavid Gibson             if (id != 0) {
1043d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1044d67d40eaSDavid Gibson             }
1045f1af19d7SDavid Gibson         }
10469b00ea49SDavid Gibson 
10479b00ea49SDavid Gibson #ifdef TARGET_PPC64
104880b3f79bSAlexey Kardashevskiy         if (msr_ts) {
104980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
105080b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
105180b3f79bSAlexey Kardashevskiy             }
105280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
105380b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
105480b3f79bSAlexey Kardashevskiy             }
105580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
105680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
105780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
105880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
105980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
106080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
106180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
106280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
106380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
106480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
106580b3f79bSAlexey Kardashevskiy         }
106680b3f79bSAlexey Kardashevskiy 
10679b00ea49SDavid Gibson         if (cap_papr) {
10689b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1069da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10709b00ea49SDavid Gibson             }
10719b00ea49SDavid Gibson         }
107298a8b524SAlexey Kardashevskiy 
107398a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10749b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1075f1af19d7SDavid Gibson     }
1076f1af19d7SDavid Gibson 
1077d76d1650Saurel32     return ret;
1078d76d1650Saurel32 }
1079d76d1650Saurel32 
1080c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1081c371c2e3SBharat Bhushan {
1082c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1083c371c2e3SBharat Bhushan }
1084c371c2e3SBharat Bhushan 
1085a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1086d76d1650Saurel32 {
108720d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1088ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1089a7a00a72SDavid Gibson     int ret;
1090d76d1650Saurel32 
1091a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
109290dc8812SScott Wood     if (ret < 0) {
109390dc8812SScott Wood         return ret;
109490dc8812SScott Wood     }
109590dc8812SScott Wood 
109690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
109790dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
109890dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
109990dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
110090dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
110190dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
110290dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
110390dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
110490dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
110590dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
110690dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
110790dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
110890dc8812SScott Wood     }
110990dc8812SScott Wood 
111090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
111190dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
111290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
111390dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
111590dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
111690dc8812SScott Wood     }
111790dc8812SScott Wood 
111890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
111990dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
112090dc8812SScott Wood     }
112190dc8812SScott Wood 
112290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
112390dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
112490dc8812SScott Wood     }
112590dc8812SScott Wood 
112690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
112790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1128c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
112990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1130c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
113190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1132c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
113390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1134c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
113590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1136c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
113790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1138c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
113990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1140c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
114190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1142c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
114390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1144c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
114590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1146c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
114790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1148c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
114990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1150c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
115190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1152c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
115390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1154c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
115590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1156c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
115790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1158c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
115990dc8812SScott Wood 
116090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
116190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1162c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
116390dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1164c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
116590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1166c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
116790dc8812SScott Wood         }
116890dc8812SScott Wood 
116990dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
117090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1171c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
117290dc8812SScott Wood         }
117390dc8812SScott Wood 
117490dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
117590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1176c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
117790dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1178c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
117990dc8812SScott Wood         }
118090dc8812SScott Wood     }
118190dc8812SScott Wood 
118290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
118390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
118490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
118590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
118690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
118790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
118890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
118990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
119090dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
119190dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
119290dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
119390dc8812SScott Wood     }
119490dc8812SScott Wood 
119590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
119690dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
119790dc8812SScott Wood     }
119890dc8812SScott Wood 
119990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
120090dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
120190dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
120290dc8812SScott Wood     }
120390dc8812SScott Wood 
120490dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
120590dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
120690dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
120790dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
120890dc8812SScott Wood 
120990dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
121090dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
121190dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
121290dc8812SScott Wood         }
121390dc8812SScott Wood     }
1214a7a00a72SDavid Gibson 
1215a7a00a72SDavid Gibson     return 0;
1216fafc0b6aSAlexander Graf }
121790dc8812SScott Wood 
1218a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1219a7a00a72SDavid Gibson {
1220a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1221a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1222a7a00a72SDavid Gibson     int ret;
1223a7a00a72SDavid Gibson     int i;
1224a7a00a72SDavid Gibson 
1225a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
122690dc8812SScott Wood     if (ret < 0) {
122790dc8812SScott Wood         return ret;
122890dc8812SScott Wood     }
122990dc8812SScott Wood 
1230e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1231bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1232f3c75d42SAneesh Kumar K.V     }
1233ba5e5090SAlexander Graf 
1234ba5e5090SAlexander Graf     /* Sync SLB */
123582c09f2fSAlexander Graf #ifdef TARGET_PPC64
12364b4d4a21SAneesh Kumar K.V     /*
12374b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1238a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1239a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1240a7a00a72SDavid Gibson      * in.
12414b4d4a21SAneesh Kumar K.V      */
12424b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1243d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12444b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12454b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12464b4d4a21SAneesh Kumar K.V         /*
12474b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12484b4d4a21SAneesh Kumar K.V          */
12494b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1250bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12514b4d4a21SAneesh Kumar K.V         }
1252ba5e5090SAlexander Graf     }
125382c09f2fSAlexander Graf #endif
1254ba5e5090SAlexander Graf 
1255ba5e5090SAlexander Graf     /* Sync SRs */
1256ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1257ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1258ba5e5090SAlexander Graf     }
1259ba5e5090SAlexander Graf 
1260ba5e5090SAlexander Graf     /* Sync BATs */
1261ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1262ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1263ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1264ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1265ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1266ba5e5090SAlexander Graf     }
1267a7a00a72SDavid Gibson 
1268a7a00a72SDavid Gibson     return 0;
1269a7a00a72SDavid Gibson }
1270a7a00a72SDavid Gibson 
1271a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1272a7a00a72SDavid Gibson {
1273a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1274a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1275a7a00a72SDavid Gibson     struct kvm_regs regs;
1276a7a00a72SDavid Gibson     uint32_t cr;
1277a7a00a72SDavid Gibson     int i, ret;
1278a7a00a72SDavid Gibson 
1279a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1280a7a00a72SDavid Gibson     if (ret < 0)
1281a7a00a72SDavid Gibson         return ret;
1282a7a00a72SDavid Gibson 
1283a7a00a72SDavid Gibson     cr = regs.cr;
1284a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1285a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1286a7a00a72SDavid Gibson         cr >>= 4;
1287a7a00a72SDavid Gibson     }
1288a7a00a72SDavid Gibson 
1289a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1290a7a00a72SDavid Gibson     env->lr = regs.lr;
1291a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1292a7a00a72SDavid Gibson     env->msr = regs.msr;
1293a7a00a72SDavid Gibson     env->nip = regs.pc;
1294a7a00a72SDavid Gibson 
1295a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1296a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1297a7a00a72SDavid Gibson 
1298a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1299a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1300a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1301a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1302a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1303a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1304a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1305a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1306a7a00a72SDavid Gibson 
1307a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1308a7a00a72SDavid Gibson 
1309a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1310a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1311a7a00a72SDavid Gibson 
1312a7a00a72SDavid Gibson     kvm_get_fp(cs);
1313a7a00a72SDavid Gibson 
1314a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1315a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1316a7a00a72SDavid Gibson         if (ret < 0) {
1317a7a00a72SDavid Gibson             return ret;
1318a7a00a72SDavid Gibson         }
1319a7a00a72SDavid Gibson     }
1320a7a00a72SDavid Gibson 
1321a7a00a72SDavid Gibson     if (cap_segstate) {
1322a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1323a7a00a72SDavid Gibson         if (ret < 0) {
1324a7a00a72SDavid Gibson             return ret;
1325a7a00a72SDavid Gibson         }
1326fafc0b6aSAlexander Graf     }
1327ba5e5090SAlexander Graf 
1328d67d40eaSDavid Gibson     if (cap_hior) {
1329d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1330d67d40eaSDavid Gibson     }
1331d67d40eaSDavid Gibson 
1332d67d40eaSDavid Gibson     if (cap_one_reg) {
1333d67d40eaSDavid Gibson         int i;
1334d67d40eaSDavid Gibson 
1335d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1336d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1337d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1338d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1339d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1340d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1341d67d40eaSDavid Gibson 
1342d67d40eaSDavid Gibson             if (id != 0) {
1343d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1344d67d40eaSDavid Gibson             }
1345d67d40eaSDavid Gibson         }
13469b00ea49SDavid Gibson 
13479b00ea49SDavid Gibson #ifdef TARGET_PPC64
134880b3f79bSAlexey Kardashevskiy         if (msr_ts) {
134980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
135080b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
135180b3f79bSAlexey Kardashevskiy             }
135280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
135380b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
135480b3f79bSAlexey Kardashevskiy             }
135580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
135680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
135780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
135880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
135980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
136080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
136180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
136280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
136380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
136480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
136580b3f79bSAlexey Kardashevskiy         }
136680b3f79bSAlexey Kardashevskiy 
13679b00ea49SDavid Gibson         if (cap_papr) {
13689b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1369da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13709b00ea49SDavid Gibson             }
13719b00ea49SDavid Gibson         }
137298a8b524SAlexey Kardashevskiy 
137398a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13749b00ea49SDavid Gibson #endif
1375d67d40eaSDavid Gibson     }
1376d67d40eaSDavid Gibson 
1377d76d1650Saurel32     return 0;
1378d76d1650Saurel32 }
1379d76d1650Saurel32 
13801bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1381fc87e185SAlexander Graf {
1382fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1383fc87e185SAlexander Graf 
1384fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1385fc87e185SAlexander Graf         return 0;
1386fc87e185SAlexander Graf     }
1387fc87e185SAlexander Graf 
1388fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1389fc87e185SAlexander Graf         return 0;
1390fc87e185SAlexander Graf     }
1391fc87e185SAlexander Graf 
13921bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1393fc87e185SAlexander Graf 
1394fc87e185SAlexander Graf     return 0;
1395fc87e185SAlexander Graf }
1396fc87e185SAlexander Graf 
139716415335SAlexander Graf #if defined(TARGET_PPCEMB)
139816415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
139916415335SAlexander Graf #elif defined(TARGET_PPC64)
140016415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
140116415335SAlexander Graf #else
140216415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
140316415335SAlexander Graf #endif
140416415335SAlexander Graf 
140520d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1406d76d1650Saurel32 {
140720d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
140820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1409d76d1650Saurel32     int r;
1410d76d1650Saurel32     unsigned irq;
1411d76d1650Saurel32 
14124b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14134b8523eeSJan Kiszka 
14145cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1415d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1416fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1417fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1418259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
141916415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1420d76d1650Saurel32     {
1421d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1422d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1423d76d1650Saurel32          * when reading the UIC.
1424d76d1650Saurel32          */
1425fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1426d76d1650Saurel32 
1427da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14281bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
142955e5c285SAndreas Färber         if (r < 0) {
143055e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
143155e5c285SAndreas Färber         }
1432c821c2bdSAlexander Graf 
1433c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1434bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
143573bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1436d76d1650Saurel32     }
1437d76d1650Saurel32 
1438d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1439d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1440d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14414b8523eeSJan Kiszka 
14424b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1443d76d1650Saurel32 }
1444d76d1650Saurel32 
14454c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1446d76d1650Saurel32 {
14474c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1448d76d1650Saurel32 }
1449d76d1650Saurel32 
145020d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14510af691d7SMarcelo Tosatti {
1452259186a7SAndreas Färber     return cs->halted;
14530af691d7SMarcelo Tosatti }
14540af691d7SMarcelo Tosatti 
1455259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1456d76d1650Saurel32 {
1457259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1458259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1459259186a7SAndreas Färber 
1460259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1461259186a7SAndreas Färber         cs->halted = 1;
146227103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1463d76d1650Saurel32     }
1464d76d1650Saurel32 
1465bb4ea393SJan Kiszka     return 0;
1466d76d1650Saurel32 }
1467d76d1650Saurel32 
1468d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14691328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1470d76d1650Saurel32 {
1471d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1472d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1473d76d1650Saurel32 
1474bb4ea393SJan Kiszka     return 0;
1475d76d1650Saurel32 }
1476d76d1650Saurel32 
14771328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1478d76d1650Saurel32 {
1479d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1480d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1481d76d1650Saurel32 
1482bb4ea393SJan Kiszka     return 0;
1483d76d1650Saurel32 }
1484d76d1650Saurel32 
14858a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14868a0548f9SBharat Bhushan {
14878a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14888a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14898a0548f9SBharat Bhushan 
14908a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14918a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14928a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14938a0548f9SBharat Bhushan         return -EINVAL;
14948a0548f9SBharat Bhushan     }
14958a0548f9SBharat Bhushan 
14968a0548f9SBharat Bhushan     return 0;
14978a0548f9SBharat Bhushan }
14988a0548f9SBharat Bhushan 
14998a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15008a0548f9SBharat Bhushan {
15018a0548f9SBharat Bhushan     uint32_t sc;
15028a0548f9SBharat Bhushan 
15038a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15048a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15058a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15068a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15078a0548f9SBharat Bhushan         return -EINVAL;
15088a0548f9SBharat Bhushan     }
15098a0548f9SBharat Bhushan 
15108a0548f9SBharat Bhushan     return 0;
15118a0548f9SBharat Bhushan }
15128a0548f9SBharat Bhushan 
151388365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
151488365d17SBharat Bhushan {
151588365d17SBharat Bhushan     int n;
151688365d17SBharat Bhushan 
151788365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
151888365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
151988365d17SBharat Bhushan 
152088365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
152188365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
152288365d17SBharat Bhushan              hw_debug_points[n].type == type) {
152388365d17SBharat Bhushan             return n;
152488365d17SBharat Bhushan         }
152588365d17SBharat Bhushan     }
152688365d17SBharat Bhushan 
152788365d17SBharat Bhushan     return -1;
152888365d17SBharat Bhushan }
152988365d17SBharat Bhushan 
153088365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
153188365d17SBharat Bhushan {
153288365d17SBharat Bhushan     int n;
153388365d17SBharat Bhushan 
153488365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
153588365d17SBharat Bhushan     if (n >= 0) {
153688365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
153788365d17SBharat Bhushan         return n;
153888365d17SBharat Bhushan     }
153988365d17SBharat Bhushan 
154088365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
154188365d17SBharat Bhushan     if (n >= 0) {
154288365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
154388365d17SBharat Bhushan         return n;
154488365d17SBharat Bhushan     }
154588365d17SBharat Bhushan 
154688365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
154788365d17SBharat Bhushan     if (n >= 0) {
154888365d17SBharat Bhushan         *flag = BP_MEM_READ;
154988365d17SBharat Bhushan         return n;
155088365d17SBharat Bhushan     }
155188365d17SBharat Bhushan 
155288365d17SBharat Bhushan     return -1;
155388365d17SBharat Bhushan }
155488365d17SBharat Bhushan 
155588365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
155688365d17SBharat Bhushan                                   target_ulong len, int type)
155788365d17SBharat Bhushan {
155888365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
155988365d17SBharat Bhushan         return -ENOBUFS;
156088365d17SBharat Bhushan     }
156188365d17SBharat Bhushan 
156288365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
156388365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
156488365d17SBharat Bhushan 
156588365d17SBharat Bhushan     switch (type) {
156688365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
156788365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
156888365d17SBharat Bhushan             return -ENOBUFS;
156988365d17SBharat Bhushan         }
157088365d17SBharat Bhushan 
157188365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
157288365d17SBharat Bhushan             return -EEXIST;
157388365d17SBharat Bhushan         }
157488365d17SBharat Bhushan 
157588365d17SBharat Bhushan         nb_hw_breakpoint++;
157688365d17SBharat Bhushan         break;
157788365d17SBharat Bhushan 
157888365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
157988365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
158088365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
158188365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
158288365d17SBharat Bhushan             return -ENOBUFS;
158388365d17SBharat Bhushan         }
158488365d17SBharat Bhushan 
158588365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
158688365d17SBharat Bhushan             return -EEXIST;
158788365d17SBharat Bhushan         }
158888365d17SBharat Bhushan 
158988365d17SBharat Bhushan         nb_hw_watchpoint++;
159088365d17SBharat Bhushan         break;
159188365d17SBharat Bhushan 
159288365d17SBharat Bhushan     default:
159388365d17SBharat Bhushan         return -ENOSYS;
159488365d17SBharat Bhushan     }
159588365d17SBharat Bhushan 
159688365d17SBharat Bhushan     return 0;
159788365d17SBharat Bhushan }
159888365d17SBharat Bhushan 
159988365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
160088365d17SBharat Bhushan                                   target_ulong len, int type)
160188365d17SBharat Bhushan {
160288365d17SBharat Bhushan     int n;
160388365d17SBharat Bhushan 
160488365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
160588365d17SBharat Bhushan     if (n < 0) {
160688365d17SBharat Bhushan         return -ENOENT;
160788365d17SBharat Bhushan     }
160888365d17SBharat Bhushan 
160988365d17SBharat Bhushan     switch (type) {
161088365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
161188365d17SBharat Bhushan         nb_hw_breakpoint--;
161288365d17SBharat Bhushan         break;
161388365d17SBharat Bhushan 
161488365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
161588365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
161688365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
161788365d17SBharat Bhushan         nb_hw_watchpoint--;
161888365d17SBharat Bhushan         break;
161988365d17SBharat Bhushan 
162088365d17SBharat Bhushan     default:
162188365d17SBharat Bhushan         return -ENOSYS;
162288365d17SBharat Bhushan     }
162388365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
162488365d17SBharat Bhushan 
162588365d17SBharat Bhushan     return 0;
162688365d17SBharat Bhushan }
162788365d17SBharat Bhushan 
162888365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
162988365d17SBharat Bhushan {
163088365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
163188365d17SBharat Bhushan }
163288365d17SBharat Bhushan 
16338a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16348a0548f9SBharat Bhushan {
163588365d17SBharat Bhushan     int n;
163688365d17SBharat Bhushan 
16378a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16388a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16398a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16408a0548f9SBharat Bhushan     }
164188365d17SBharat Bhushan 
164288365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
164388365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
164488365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
164588365d17SBharat Bhushan 
164688365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
164788365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
164888365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
164988365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
165088365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
165188365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
165288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
165388365d17SBharat Bhushan                 break;
165488365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
165588365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
165688365d17SBharat Bhushan                 break;
165788365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
165888365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
165988365d17SBharat Bhushan                 break;
166088365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
166188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
166288365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
166388365d17SBharat Bhushan                 break;
166488365d17SBharat Bhushan             default:
166588365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
166688365d17SBharat Bhushan             }
166788365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
166888365d17SBharat Bhushan         }
166988365d17SBharat Bhushan     }
16708a0548f9SBharat Bhushan }
16718a0548f9SBharat Bhushan 
16728a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16738a0548f9SBharat Bhushan {
16748a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16758a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16768a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16778a0548f9SBharat Bhushan     int handle = 0;
167888365d17SBharat Bhushan     int n;
167988365d17SBharat Bhushan     int flag = 0;
16808a0548f9SBharat Bhushan 
168188365d17SBharat Bhushan     if (cs->singlestep_enabled) {
168288365d17SBharat Bhushan         handle = 1;
168388365d17SBharat Bhushan     } else if (arch_info->status) {
168488365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
168588365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
168688365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
168788365d17SBharat Bhushan                 if (n >= 0) {
168888365d17SBharat Bhushan                     handle = 1;
168988365d17SBharat Bhushan                 }
169088365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
169188365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
169288365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
169388365d17SBharat Bhushan                 if (n >= 0) {
169488365d17SBharat Bhushan                     handle = 1;
169588365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
169688365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
169788365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
169888365d17SBharat Bhushan                 }
169988365d17SBharat Bhushan             }
170088365d17SBharat Bhushan         }
170188365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17028a0548f9SBharat Bhushan         handle = 1;
17038a0548f9SBharat Bhushan     } else {
17048a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17058a0548f9SBharat Bhushan          * program exception to guest;
17068a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
170788365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
170888365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
170988365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
171088365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
171188365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
171288365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
171388365d17SBharat Bhushan          * So the result is that we cannot share debug resources
171488365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
171588365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
171688365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
171788365d17SBharat Bhushan          * cannot use them;
17188a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17198a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17208a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17218a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17228a0548f9SBharat Bhushan          * injecting a program interrupt.
17238a0548f9SBharat Bhushan          */
17248a0548f9SBharat Bhushan 
17258a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17268a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17278a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17288a0548f9SBharat Bhushan          */
17298a0548f9SBharat Bhushan         env->nip += 4;
17308a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17318a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17328a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17338a0548f9SBharat Bhushan     }
17348a0548f9SBharat Bhushan 
17358a0548f9SBharat Bhushan     return handle;
17368a0548f9SBharat Bhushan }
17378a0548f9SBharat Bhushan 
173820d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1739d76d1650Saurel32 {
174020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
174120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1742bb4ea393SJan Kiszka     int ret;
1743d76d1650Saurel32 
17444b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17454b8523eeSJan Kiszka 
1746d76d1650Saurel32     switch (run->exit_reason) {
1747d76d1650Saurel32     case KVM_EXIT_DCR:
1748d76d1650Saurel32         if (run->dcr.is_write) {
1749da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1750d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1751d76d1650Saurel32         } else {
1752da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1753d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1754d76d1650Saurel32         }
1755d76d1650Saurel32         break;
1756d76d1650Saurel32     case KVM_EXIT_HLT:
1757da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1758259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1759d76d1650Saurel32         break;
1760c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1761f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1762da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
176320d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1764aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1765f61b4bedSAlexander Graf                                               run->papr_hcall.args);
176678e8fde2SDavid Gibson         ret = 0;
1767f61b4bedSAlexander Graf         break;
1768f61b4bedSAlexander Graf #endif
17695b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1770da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1771933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17725b95b8b9SAlexander Graf         ret = 0;
17735b95b8b9SAlexander Graf         break;
177431f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1775da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
177631f2cb8fSBharat Bhushan         watchdog_perform_action();
177731f2cb8fSBharat Bhushan         ret = 0;
177831f2cb8fSBharat Bhushan         break;
177931f2cb8fSBharat Bhushan 
17808a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17818a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17828a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17838a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17848a0548f9SBharat Bhushan             break;
17858a0548f9SBharat Bhushan         }
17868a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17878a0548f9SBharat Bhushan         ret = 0;
17888a0548f9SBharat Bhushan         break;
17898a0548f9SBharat Bhushan 
179073aaec4aSJan Kiszka     default:
179173aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
179273aaec4aSJan Kiszka         ret = -1;
179373aaec4aSJan Kiszka         break;
1794d76d1650Saurel32     }
1795d76d1650Saurel32 
17964b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1797d76d1650Saurel32     return ret;
1798d76d1650Saurel32 }
1799d76d1650Saurel32 
180031f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
180131f2cb8fSBharat Bhushan {
180231f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
180331f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
180431f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
180531f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
180631f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
180731f2cb8fSBharat Bhushan     };
180831f2cb8fSBharat Bhushan 
180931f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
181031f2cb8fSBharat Bhushan }
181131f2cb8fSBharat Bhushan 
181231f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
181331f2cb8fSBharat Bhushan {
181431f2cb8fSBharat Bhushan 
181531f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181631f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
181731f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
181831f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
181931f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
182031f2cb8fSBharat Bhushan     };
182131f2cb8fSBharat Bhushan 
182231f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182331f2cb8fSBharat Bhushan }
182431f2cb8fSBharat Bhushan 
182531f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
182631f2cb8fSBharat Bhushan {
182731f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
182831f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
182931f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
183031f2cb8fSBharat Bhushan 
183131f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
183231f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
183331f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
183431f2cb8fSBharat Bhushan     };
183531f2cb8fSBharat Bhushan 
183631f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
183731f2cb8fSBharat Bhushan }
183831f2cb8fSBharat Bhushan 
183931f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
184031f2cb8fSBharat Bhushan {
184131f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
184231f2cb8fSBharat Bhushan     int ret;
184331f2cb8fSBharat Bhushan 
184431f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
184531f2cb8fSBharat Bhushan         return -1;
184631f2cb8fSBharat Bhushan     }
184731f2cb8fSBharat Bhushan 
184831f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
184931f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
185031f2cb8fSBharat Bhushan         return -1;
185131f2cb8fSBharat Bhushan     }
185231f2cb8fSBharat Bhushan 
185348add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
185431f2cb8fSBharat Bhushan     if (ret < 0) {
185531f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
185631f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
185731f2cb8fSBharat Bhushan         return ret;
185831f2cb8fSBharat Bhushan     }
185931f2cb8fSBharat Bhushan 
186031f2cb8fSBharat Bhushan     return ret;
186131f2cb8fSBharat Bhushan }
186231f2cb8fSBharat Bhushan 
1863dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1864dc333cd6SAlexander Graf {
1865dc333cd6SAlexander Graf     FILE *f;
1866dc333cd6SAlexander Graf     int ret = -1;
1867dc333cd6SAlexander Graf     int field_len = strlen(field);
1868dc333cd6SAlexander Graf     char line[512];
1869dc333cd6SAlexander Graf 
1870dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1871dc333cd6SAlexander Graf     if (!f) {
1872dc333cd6SAlexander Graf         return -1;
1873dc333cd6SAlexander Graf     }
1874dc333cd6SAlexander Graf 
1875dc333cd6SAlexander Graf     do {
1876dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1877dc333cd6SAlexander Graf             break;
1878dc333cd6SAlexander Graf         }
1879dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1880ae215068SJim Meyering             pstrcpy(value, len, line);
1881dc333cd6SAlexander Graf             ret = 0;
1882dc333cd6SAlexander Graf             break;
1883dc333cd6SAlexander Graf         }
1884dc333cd6SAlexander Graf     } while(*line);
1885dc333cd6SAlexander Graf 
1886dc333cd6SAlexander Graf     fclose(f);
1887dc333cd6SAlexander Graf 
1888dc333cd6SAlexander Graf     return ret;
1889dc333cd6SAlexander Graf }
1890dc333cd6SAlexander Graf 
1891dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1892dc333cd6SAlexander Graf {
1893dc333cd6SAlexander Graf     char line[512];
1894dc333cd6SAlexander Graf     char *ns;
189573bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1896dc333cd6SAlexander Graf 
1897dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1898dc333cd6SAlexander Graf         return retval;
1899dc333cd6SAlexander Graf     }
1900dc333cd6SAlexander Graf 
1901dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1902dc333cd6SAlexander Graf         return retval;
1903dc333cd6SAlexander Graf     }
1904dc333cd6SAlexander Graf 
1905dc333cd6SAlexander Graf     ns++;
1906dc333cd6SAlexander Graf 
1907f9b8e7f6SShraddha Barke     return atoi(ns);
1908ef951443SNikunj A Dadhania }
1909ef951443SNikunj A Dadhania 
1910ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1911ef951443SNikunj A Dadhania {
1912ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1913ef951443SNikunj A Dadhania                                NULL);
1914ef951443SNikunj A Dadhania }
1915ef951443SNikunj A Dadhania 
1916ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1917ef951443SNikunj A Dadhania {
1918ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1919dc333cd6SAlexander Graf }
19204513d923SGleb Natapov 
1921eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1922eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1923eadaada1SAlexander Graf {
1924eadaada1SAlexander Graf     struct dirent *dirp;
1925eadaada1SAlexander Graf     DIR *dp;
1926eadaada1SAlexander Graf 
1927eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1928eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1929eadaada1SAlexander Graf         return -1;
1930eadaada1SAlexander Graf     }
1931eadaada1SAlexander Graf 
1932eadaada1SAlexander Graf     buf[0] = '\0';
1933eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1934eadaada1SAlexander Graf         FILE *f;
1935eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1936eadaada1SAlexander Graf                  dirp->d_name);
1937eadaada1SAlexander Graf         f = fopen(buf, "r");
1938eadaada1SAlexander Graf         if (f) {
1939eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1940eadaada1SAlexander Graf             fclose(f);
1941eadaada1SAlexander Graf             break;
1942eadaada1SAlexander Graf         }
1943eadaada1SAlexander Graf         buf[0] = '\0';
1944eadaada1SAlexander Graf     }
1945eadaada1SAlexander Graf     closedir(dp);
1946eadaada1SAlexander Graf     if (buf[0] == '\0') {
1947eadaada1SAlexander Graf         printf("Unknown host!\n");
1948eadaada1SAlexander Graf         return -1;
1949eadaada1SAlexander Graf     }
1950eadaada1SAlexander Graf 
1951eadaada1SAlexander Graf     return 0;
1952eadaada1SAlexander Graf }
1953eadaada1SAlexander Graf 
19547d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1955eadaada1SAlexander Graf {
19569bc884b7SDavid Gibson     union {
19579bc884b7SDavid Gibson         uint32_t v32;
19589bc884b7SDavid Gibson         uint64_t v64;
19599bc884b7SDavid Gibson     } u;
1960eadaada1SAlexander Graf     FILE *f;
1961eadaada1SAlexander Graf     int len;
1962eadaada1SAlexander Graf 
19637d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1964eadaada1SAlexander Graf     if (!f) {
1965eadaada1SAlexander Graf         return -1;
1966eadaada1SAlexander Graf     }
1967eadaada1SAlexander Graf 
19689bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1969eadaada1SAlexander Graf     fclose(f);
1970eadaada1SAlexander Graf     switch (len) {
19719bc884b7SDavid Gibson     case 4:
19729bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19739bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19749bc884b7SDavid Gibson     case 8:
19759bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1976eadaada1SAlexander Graf     }
1977eadaada1SAlexander Graf 
1978eadaada1SAlexander Graf     return 0;
1979eadaada1SAlexander Graf }
1980eadaada1SAlexander Graf 
19817d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19827d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19837d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19847d94a30bSSukadev Bhattiprolu  * format) */
19857d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19867d94a30bSSukadev Bhattiprolu {
19877d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19887d94a30bSSukadev Bhattiprolu     uint64_t val;
19897d94a30bSSukadev Bhattiprolu 
19907d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19917d94a30bSSukadev Bhattiprolu         return -1;
19927d94a30bSSukadev Bhattiprolu     }
19937d94a30bSSukadev Bhattiprolu 
19947d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19957d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19967d94a30bSSukadev Bhattiprolu     g_free(tmp);
19977d94a30bSSukadev Bhattiprolu 
19987d94a30bSSukadev Bhattiprolu     return val;
19997d94a30bSSukadev Bhattiprolu }
20007d94a30bSSukadev Bhattiprolu 
20019bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20029bc884b7SDavid Gibson {
20039bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20049bc884b7SDavid Gibson }
20059bc884b7SDavid Gibson 
20061a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
200745024f09SAlexander Graf  {
2008a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2009a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
201045024f09SAlexander Graf 
20116fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20121a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20131a61a9aeSStuart Yoder         return 0;
20141a61a9aeSStuart Yoder     }
201545024f09SAlexander Graf 
20161a61a9aeSStuart Yoder     return 1;
20171a61a9aeSStuart Yoder }
20181a61a9aeSStuart Yoder 
20191a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20201a61a9aeSStuart Yoder {
20211a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20221a61a9aeSStuart Yoder 
20231a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20241a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20251a61a9aeSStuart Yoder         return 1;
20261a61a9aeSStuart Yoder     }
20271a61a9aeSStuart Yoder 
20281a61a9aeSStuart Yoder     return 0;
20291a61a9aeSStuart Yoder }
20301a61a9aeSStuart Yoder 
20311a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20321a61a9aeSStuart Yoder {
20331a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20341a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20351a61a9aeSStuart Yoder 
20361a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20371a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
203845024f09SAlexander Graf         return 0;
203945024f09SAlexander Graf     }
204045024f09SAlexander Graf 
204145024f09SAlexander Graf     /*
2042d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
204345024f09SAlexander Graf      *
2044d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
204545024f09SAlexander Graf      *     li r3, -1
2046d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2047d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
204845024f09SAlexander Graf      */
204945024f09SAlexander Graf 
2050d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2051d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2052d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2053d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
205445024f09SAlexander Graf 
20550ddbd053SAlexey Kardashevskiy     return 1;
205645024f09SAlexander Graf }
205745024f09SAlexander Graf 
2058026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2059026bfd89SDavid Gibson {
2060026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2061026bfd89SDavid Gibson }
2062026bfd89SDavid Gibson 
2063026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2064026bfd89SDavid Gibson {
2065026bfd89SDavid Gibson     /*
2066026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2067026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2068026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2069026bfd89SDavid Gibson      * produce a warning.
2070026bfd89SDavid Gibson      */
2071026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2072026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2073026bfd89SDavid Gibson }
2074026bfd89SDavid Gibson 
2075ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2076ef9971ddSAlexey Kardashevskiy {
2077ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2078ef9971ddSAlexey Kardashevskiy }
2079ef9971ddSAlexey Kardashevskiy 
20805145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20815145ad4fSNathan Whitehorn {
20825145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20835145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20845145ad4fSNathan Whitehorn }
20855145ad4fSNathan Whitehorn 
20861bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2087f61b4bedSAlexander Graf {
20881bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2089f61b4bedSAlexander Graf     int ret;
2090f61b4bedSAlexander Graf 
2091da20aed1SDavid Gibson     if (!kvm_enabled()) {
2092da20aed1SDavid Gibson         return;
2093da20aed1SDavid Gibson     }
2094da20aed1SDavid Gibson 
209548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2096f61b4bedSAlexander Graf     if (ret) {
2097072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2098072ed5f2SThomas Huth         exit(1);
2099f61b4bedSAlexander Graf     }
21009b00ea49SDavid Gibson 
21019b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21029b00ea49SDavid Gibson      * with kvm */
21039b00ea49SDavid Gibson     cap_papr = 1;
2104f1af19d7SDavid Gibson }
2105f61b4bedSAlexander Graf 
2106d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21076db5bb0fSAlexey Kardashevskiy {
2108d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21096db5bb0fSAlexey Kardashevskiy }
21106db5bb0fSAlexey Kardashevskiy 
21115b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21125b95b8b9SAlexander Graf {
21135b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21145b95b8b9SAlexander Graf     int ret;
21155b95b8b9SAlexander Graf 
211648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21175b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2118072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2119072ed5f2SThomas Huth         exit(1);
21205b95b8b9SAlexander Graf     }
21215b95b8b9SAlexander Graf }
21225b95b8b9SAlexander Graf 
2123e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2124e97c3636SDavid Gibson {
2125e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2126e97c3636SDavid Gibson }
2127e97c3636SDavid Gibson 
2128fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2129fa98fbfcSSam Bobroff {
2130fa98fbfcSSam Bobroff     int ret;
2131fa98fbfcSSam Bobroff 
2132fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2133fa98fbfcSSam Bobroff     if (!ret) {
2134fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2135fa98fbfcSSam Bobroff     }
2136fa98fbfcSSam Bobroff     return ret;
2137fa98fbfcSSam Bobroff }
2138fa98fbfcSSam Bobroff 
2139fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2140fa98fbfcSSam Bobroff {
2141fa98fbfcSSam Bobroff     int i;
2142fa98fbfcSSam Bobroff     GString *g;
2143fa98fbfcSSam Bobroff     char *s;
2144fa98fbfcSSam Bobroff 
2145fa98fbfcSSam Bobroff     assert(kvm_enabled());
2146fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2147fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2148fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2149fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2150fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2151fa98fbfcSSam Bobroff             }
2152fa98fbfcSSam Bobroff         }
2153fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2154fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2155fa98fbfcSSam Bobroff         g_free(s);
2156fa98fbfcSSam Bobroff     } else {
2157fa98fbfcSSam Bobroff         error_append_hint(errp,
2158fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2159fa98fbfcSSam Bobroff     }
2160fa98fbfcSSam Bobroff }
2161fa98fbfcSSam Bobroff 
2162fa98fbfcSSam Bobroff 
21637f763a5dSDavid Gibson #ifdef TARGET_PPC64
21647f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21657f763a5dSDavid Gibson {
2166f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2167f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2168f36951c1SDavid Gibson     int i;
2169f36951c1SDavid Gibson 
2170f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2171f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2172182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
21739c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2174f36951c1SDavid Gibson     best_page_shift = 0;
2175f36951c1SDavid Gibson 
2176f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2177f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2178f36951c1SDavid Gibson 
2179f36951c1SDavid Gibson         if (!sps->page_shift) {
2180f36951c1SDavid Gibson             continue;
2181f36951c1SDavid Gibson         }
2182f36951c1SDavid Gibson 
2183f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2184f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2185f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2186f36951c1SDavid Gibson         }
2187f36951c1SDavid Gibson     }
2188f36951c1SDavid Gibson 
21897f763a5dSDavid Gibson     return MIN(current_size,
2190f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21917f763a5dSDavid Gibson }
21927f763a5dSDavid Gibson #endif
21937f763a5dSDavid Gibson 
2194da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2195da95324eSAlexey Kardashevskiy {
2196da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2197da95324eSAlexey Kardashevskiy }
2198da95324eSAlexey Kardashevskiy 
21993dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
22003dc410aeSAlexey Kardashevskiy {
22013dc410aeSAlexey Kardashevskiy     int ret;
22023dc410aeSAlexey Kardashevskiy 
22033dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22043dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
22053dc410aeSAlexey Kardashevskiy     if (!ret) {
22063dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22073dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
22083dc410aeSAlexey Kardashevskiy     }
22093dc410aeSAlexey Kardashevskiy 
22103dc410aeSAlexey Kardashevskiy     return ret;
22113dc410aeSAlexey Kardashevskiy }
22123dc410aeSAlexey Kardashevskiy 
2213d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2214d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2215d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22160f5cb298SDavid Gibson {
22170f5cb298SDavid Gibson     long len;
22180f5cb298SDavid Gibson     int fd;
22190f5cb298SDavid Gibson     void *table;
22200f5cb298SDavid Gibson 
2221b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2222b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2223b5aec396SDavid Gibson      */
2224b5aec396SDavid Gibson     *pfd = -1;
22256a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22260f5cb298SDavid Gibson         return NULL;
22270f5cb298SDavid Gibson     }
22280f5cb298SDavid Gibson 
2229d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2230d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2231d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2232d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2233d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2234d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2235d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2236d6ee2a7cSAlexey Kardashevskiy         };
2237d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2238d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2239d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2240d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2241d6ee2a7cSAlexey Kardashevskiy                     liobn);
2242d6ee2a7cSAlexey Kardashevskiy             return NULL;
2243d6ee2a7cSAlexey Kardashevskiy         }
2244d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2245d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2246d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2247d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2248d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2249d6ee2a7cSAlexey Kardashevskiy         };
2250d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2251d6ee2a7cSAlexey Kardashevskiy             return NULL;
2252d6ee2a7cSAlexey Kardashevskiy         }
22530f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22540f5cb298SDavid Gibson         if (fd < 0) {
2255b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2256b5aec396SDavid Gibson                     liobn);
22570f5cb298SDavid Gibson             return NULL;
22580f5cb298SDavid Gibson         }
2259d6ee2a7cSAlexey Kardashevskiy     } else {
2260d6ee2a7cSAlexey Kardashevskiy         return NULL;
2261d6ee2a7cSAlexey Kardashevskiy     }
22620f5cb298SDavid Gibson 
2263d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22640f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22650f5cb298SDavid Gibson 
226674b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22670f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2268b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2269b5aec396SDavid Gibson                 liobn);
22700f5cb298SDavid Gibson         close(fd);
22710f5cb298SDavid Gibson         return NULL;
22720f5cb298SDavid Gibson     }
22730f5cb298SDavid Gibson 
22740f5cb298SDavid Gibson     *pfd = fd;
22750f5cb298SDavid Gibson     return table;
22760f5cb298SDavid Gibson }
22770f5cb298SDavid Gibson 
2278523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22790f5cb298SDavid Gibson {
22800f5cb298SDavid Gibson     long len;
22810f5cb298SDavid Gibson 
22820f5cb298SDavid Gibson     if (fd < 0) {
22830f5cb298SDavid Gibson         return -1;
22840f5cb298SDavid Gibson     }
22850f5cb298SDavid Gibson 
2286523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22870f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22880f5cb298SDavid Gibson         (close(fd) < 0)) {
2289b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2290b5aec396SDavid Gibson                 strerror(errno));
22910f5cb298SDavid Gibson         /* Leak the table */
22920f5cb298SDavid Gibson     }
22930f5cb298SDavid Gibson 
22940f5cb298SDavid Gibson     return 0;
22950f5cb298SDavid Gibson }
22960f5cb298SDavid Gibson 
22977f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22987f763a5dSDavid Gibson {
22997f763a5dSDavid Gibson     uint32_t shift = shift_hint;
23007f763a5dSDavid Gibson 
2301ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2302ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2303ace9a2cbSDavid Gibson         return 0;
2304ace9a2cbSDavid Gibson     }
23056977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23067f763a5dSDavid Gibson         int ret;
23077f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2308ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2309ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2310ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2311ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2312ace9a2cbSDavid Gibson              * correct for PR. */
2313ace9a2cbSDavid Gibson             return 0;
2314ace9a2cbSDavid Gibson         } else if (ret < 0) {
23157f763a5dSDavid Gibson             return ret;
23167f763a5dSDavid Gibson         }
23177f763a5dSDavid Gibson         return shift;
23187f763a5dSDavid Gibson     }
23197f763a5dSDavid Gibson 
2320ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2321ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
232296c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
232396c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2324ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23257f763a5dSDavid Gibson         return 0;
2326ace9a2cbSDavid Gibson     } else {
2327ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2328ace9a2cbSDavid Gibson         return 24;
2329ace9a2cbSDavid Gibson     }
23307f763a5dSDavid Gibson }
23317f763a5dSDavid Gibson 
2332a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2333a1e98583SDavid Gibson {
2334a1e98583SDavid Gibson     uint32_t pvr;
2335a1e98583SDavid Gibson 
2336a1e98583SDavid Gibson     asm ("mfpvr %0"
2337a1e98583SDavid Gibson          : "=r"(pvr));
2338a1e98583SDavid Gibson     return pvr;
2339a1e98583SDavid Gibson }
2340a1e98583SDavid Gibson 
2341a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2342a7342588SDavid Gibson {
2343a7342588SDavid Gibson     if (on) {
2344a7342588SDavid Gibson         *word |= flags;
2345a7342588SDavid Gibson     } else {
2346a7342588SDavid Gibson         *word &= ~flags;
2347a7342588SDavid Gibson     }
2348a7342588SDavid Gibson }
2349a7342588SDavid Gibson 
23502985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23512985b86bSAndreas Färber {
23522985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
23530cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23540cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2355a1e98583SDavid Gibson 
2356cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23573bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2358a7342588SDavid Gibson 
23593f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
23603f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
23613f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
23623f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
23633f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
23643f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
23650cbad81fSDavid Gibson 
23660cbad81fSDavid Gibson     if (dcache_size != -1) {
23670cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23680cbad81fSDavid Gibson     }
23690cbad81fSDavid Gibson 
23700cbad81fSDavid Gibson     if (icache_size != -1) {
23710cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23720cbad81fSDavid Gibson     }
2373c64abd1fSSam Bobroff 
2374c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2375c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
23765f3066d8SDavid Gibson 
23775f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
23785f3066d8SDavid Gibson         /*
23795f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
23805f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
23815f3066d8SDavid Gibson          * architected mode may prevent guests from activating
23825f3066d8SDavid Gibson          * necessary DD1 workarounds.
23835f3066d8SDavid Gibson          */
23845f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
23855f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
23865f3066d8SDavid Gibson     }
2387c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2388a1e98583SDavid Gibson }
2389a1e98583SDavid Gibson 
23903b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23913b961124SStuart Yoder {
23923b961124SStuart Yoder     return cap_epr;
23933b961124SStuart Yoder }
23943b961124SStuart Yoder 
239587a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
239687a91de6SAlexander Graf {
239787a91de6SAlexander Graf     return cap_fixup_hcalls;
239887a91de6SAlexander Graf }
239987a91de6SAlexander Graf 
2400bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2401bac3bf28SThomas Huth {
2402bac3bf28SThomas Huth     return cap_htm;
2403bac3bf28SThomas Huth }
2404bac3bf28SThomas Huth 
2405cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2406cf1c4cceSSam Bobroff {
2407cf1c4cceSSam Bobroff     return cap_mmu_radix;
2408cf1c4cceSSam Bobroff }
2409cf1c4cceSSam Bobroff 
2410cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2411cf1c4cceSSam Bobroff {
2412cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2413cf1c4cceSSam Bobroff }
2414cf1c4cceSSam Bobroff 
2415*8fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
2416*8fea7044SSuraj Jitindar Singh {
2417*8fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
2418*8fea7044SSuraj Jitindar Singh         return 2;
2419*8fea7044SSuraj Jitindar Singh     } else if ((c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
2420*8fea7044SSuraj Jitindar Singh                (c.character & c.character_mask
2421*8fea7044SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
2422*8fea7044SSuraj Jitindar Singh         return 1;
2423*8fea7044SSuraj Jitindar Singh     }
2424*8fea7044SSuraj Jitindar Singh 
2425*8fea7044SSuraj Jitindar Singh     return 0;
2426*8fea7044SSuraj Jitindar Singh }
2427*8fea7044SSuraj Jitindar Singh 
2428*8fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
2429*8fea7044SSuraj Jitindar Singh {
2430*8fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
2431*8fea7044SSuraj Jitindar Singh         return 2;
2432*8fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
2433*8fea7044SSuraj Jitindar Singh         return 1;
2434*8fea7044SSuraj Jitindar Singh     }
2435*8fea7044SSuraj Jitindar Singh 
2436*8fea7044SSuraj Jitindar Singh     return 0;
2437*8fea7044SSuraj Jitindar Singh }
2438*8fea7044SSuraj Jitindar Singh 
2439*8fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
2440*8fea7044SSuraj Jitindar Singh {
2441*8fea7044SSuraj Jitindar Singh     if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
2442*8fea7044SSuraj Jitindar Singh         return  SPAPR_CAP_FIXED_CCD;
2443*8fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
2444*8fea7044SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_IBS;
2445*8fea7044SSuraj Jitindar Singh     }
2446*8fea7044SSuraj Jitindar Singh 
2447*8fea7044SSuraj Jitindar Singh     return 0;
2448*8fea7044SSuraj Jitindar Singh }
2449*8fea7044SSuraj Jitindar Singh 
24508acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
24518acc2ae5SSuraj Jitindar Singh {
24528acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
24538acc2ae5SSuraj Jitindar Singh     int ret;
24548acc2ae5SSuraj Jitindar Singh 
24558acc2ae5SSuraj Jitindar Singh     /* Assume broken */
24568acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
24578acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
24588acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
24598acc2ae5SSuraj Jitindar Singh 
24608acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
24618acc2ae5SSuraj Jitindar Singh     if (!ret) {
24628acc2ae5SSuraj Jitindar Singh         return;
24638acc2ae5SSuraj Jitindar Singh     }
24648acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
24658acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
24668acc2ae5SSuraj Jitindar Singh         return;
24678acc2ae5SSuraj Jitindar Singh     }
2468*8fea7044SSuraj Jitindar Singh 
2469*8fea7044SSuraj Jitindar Singh     cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
2470*8fea7044SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
2471*8fea7044SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
24728acc2ae5SSuraj Jitindar Singh }
24738acc2ae5SSuraj Jitindar Singh 
24748acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
24758acc2ae5SSuraj Jitindar Singh {
24768acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
24778acc2ae5SSuraj Jitindar Singh }
24788acc2ae5SSuraj Jitindar Singh 
24798acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
24808acc2ae5SSuraj Jitindar Singh {
24818acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
24828acc2ae5SSuraj Jitindar Singh }
24838acc2ae5SSuraj Jitindar Singh 
24848acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
24858acc2ae5SSuraj Jitindar Singh {
24868acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
24878acc2ae5SSuraj Jitindar Singh }
24888acc2ae5SSuraj Jitindar Singh 
24899ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
24909ded780cSAlexey Kardashevskiy {
24919ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
24929ded780cSAlexey Kardashevskiy }
24939ded780cSAlexey Kardashevskiy 
249452b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
249552b2519cSThomas Huth {
249652b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
249752b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
249852b2519cSThomas Huth 
249952b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
250052b2519cSThomas Huth     if (pvr_pcc == NULL) {
250152b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
250252b2519cSThomas Huth     }
250352b2519cSThomas Huth 
250452b2519cSThomas Huth     return pvr_pcc;
250552b2519cSThomas Huth }
250652b2519cSThomas Huth 
25072e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms)
25085ba4576bSAndreas Färber {
25095ba4576bSAndreas Färber     TypeInfo type_info = {
25105ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
25115ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
25125ba4576bSAndreas Färber     };
25132e9c10ebSIgor Mammedov     MachineClass *mc = MACHINE_GET_CLASS(ms);
25145ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
251592e926e1SGreg Kurz     ObjectClass *oc;
25165b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2517715d4b96SThomas Huth     int i;
25185ba4576bSAndreas Färber 
251952b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
25203bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
25215ba4576bSAndreas Färber         return -1;
25225ba4576bSAndreas Färber     }
25235ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
25245ba4576bSAndreas Färber     type_register(&type_info);
25252e9c10ebSIgor Mammedov     if (object_dynamic_cast(OBJECT(ms), TYPE_SPAPR_MACHINE)) {
25262e9c10ebSIgor Mammedov         /* override TCG default cpu type with 'host' cpu model */
25272e9c10ebSIgor Mammedov         mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
25282e9c10ebSIgor Mammedov     }
25295b79b1caSAlexey Kardashevskiy 
253092e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
253192e926e1SGreg Kurz     g_assert(oc);
253292e926e1SGreg Kurz 
2533715d4b96SThomas Huth     /*
2534715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2535715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2536715d4b96SThomas Huth      * host CPU type, too)
2537715d4b96SThomas Huth      */
2538715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2539715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2540c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2541715d4b96SThomas Huth             char *suffix;
2542715d4b96SThomas Huth 
2543715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2544c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2545715d4b96SThomas Huth             if (suffix) {
2546715d4b96SThomas Huth                 *suffix = 0;
2547715d4b96SThomas Huth             }
2548715d4b96SThomas Huth             break;
2549715d4b96SThomas Huth         }
2550715d4b96SThomas Huth     }
2551715d4b96SThomas Huth 
25525ba4576bSAndreas Färber     return 0;
25535ba4576bSAndreas Färber }
25545ba4576bSAndreas Färber 
2555feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2556feaa64c4SDavid Gibson {
2557feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2558feaa64c4SDavid Gibson         .token = token,
2559feaa64c4SDavid Gibson     };
2560feaa64c4SDavid Gibson 
2561feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2562feaa64c4SDavid Gibson         return -ENOENT;
2563feaa64c4SDavid Gibson     }
2564feaa64c4SDavid Gibson 
2565feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2566feaa64c4SDavid Gibson 
2567feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2568feaa64c4SDavid Gibson }
256912b1143bSDavid Gibson 
257014b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2571e68cb8b4SAlexey Kardashevskiy {
2572e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2573e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
257414b0d748SGreg Kurz         .start_index = index,
2575e68cb8b4SAlexey Kardashevskiy     };
257682be8e73SGreg Kurz     int ret;
2577e68cb8b4SAlexey Kardashevskiy 
2578e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
257914b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
258014b0d748SGreg Kurz                    write ? "writing" : "reading");
258182be8e73SGreg Kurz         return -ENOTSUP;
2582e68cb8b4SAlexey Kardashevskiy     }
2583e68cb8b4SAlexey Kardashevskiy 
258482be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
258582be8e73SGreg Kurz     if (ret < 0) {
258614b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
258714b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
258814b0d748SGreg Kurz                    strerror(errno));
258982be8e73SGreg Kurz         return -errno;
259082be8e73SGreg Kurz     }
259182be8e73SGreg Kurz 
259282be8e73SGreg Kurz     return ret;
2593e68cb8b4SAlexey Kardashevskiy }
2594e68cb8b4SAlexey Kardashevskiy 
2595e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2596e68cb8b4SAlexey Kardashevskiy {
2597bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2598e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2599e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2600e68cb8b4SAlexey Kardashevskiy 
2601e68cb8b4SAlexey Kardashevskiy     do {
2602e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2603e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2604e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2605e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2606e68cb8b4SAlexey Kardashevskiy             return rc;
2607e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2608e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2609e094c4c1SCédric Le Goater             ssize_t n = rc;
2610e094c4c1SCédric Le Goater             while (n) {
2611e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2612e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2613e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2614e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2615e094c4c1SCédric Le Goater 
2616e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2617e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2618e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2619e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2620e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2621e094c4c1SCédric Le Goater 
2622e094c4c1SCédric Le Goater                 buffer += chunksize;
2623e094c4c1SCédric Le Goater                 n -= chunksize;
2624e094c4c1SCédric Le Goater             }
2625e68cb8b4SAlexey Kardashevskiy         }
2626e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2627e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2628bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2629e68cb8b4SAlexey Kardashevskiy 
2630e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2631e68cb8b4SAlexey Kardashevskiy }
2632e68cb8b4SAlexey Kardashevskiy 
2633e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2634e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2635e68cb8b4SAlexey Kardashevskiy {
2636e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2637e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2638e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2639e68cb8b4SAlexey Kardashevskiy 
2640e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2641e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2642e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2643e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2644e68cb8b4SAlexey Kardashevskiy 
2645e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2646e68cb8b4SAlexey Kardashevskiy 
2647e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2648e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2649e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2650e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2651e68cb8b4SAlexey Kardashevskiy         return rc;
2652e68cb8b4SAlexey Kardashevskiy     }
2653e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2654e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2655e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2656e68cb8b4SAlexey Kardashevskiy         return -1;
2657e68cb8b4SAlexey Kardashevskiy     }
2658e68cb8b4SAlexey Kardashevskiy     return 0;
2659e68cb8b4SAlexey Kardashevskiy }
2660e68cb8b4SAlexey Kardashevskiy 
266120d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
26624513d923SGleb Natapov {
26634513d923SGleb Natapov     return true;
26644513d923SGleb Natapov }
2665a1b87fe0SJan Kiszka 
266682169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
266782169660SScott Wood {
266882169660SScott Wood }
2669c65f9a07SGreg Kurz 
26701ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26711ad9f0a4SDavid Gibson {
26721ad9f0a4SDavid Gibson     int fd, rc;
26731ad9f0a4SDavid Gibson     int i;
26747c43bca0SAneesh Kumar K.V 
267514b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
26761ad9f0a4SDavid Gibson 
26771ad9f0a4SDavid Gibson     i = 0;
26781ad9f0a4SDavid Gibson     while (i < n) {
26791ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26801ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26811ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26821ad9f0a4SDavid Gibson 
26831ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26841ad9f0a4SDavid Gibson         if (rc < 0) {
26851ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26861ad9f0a4SDavid Gibson         }
26871ad9f0a4SDavid Gibson 
26881ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26891ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2690a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
26911ad9f0a4SDavid Gibson 
26921ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26931ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26941ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26951ad9f0a4SDavid Gibson             }
26961ad9f0a4SDavid Gibson 
2697a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2698a36593e1SAlexey Kardashevskiy                 valid = n - i;
2699a36593e1SAlexey Kardashevskiy             }
2700a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2701a36593e1SAlexey Kardashevskiy             i += valid;
27021ad9f0a4SDavid Gibson 
27031ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
27041ad9f0a4SDavid Gibson                 invalid = n - i;
27051ad9f0a4SDavid Gibson             }
27061ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2707a36593e1SAlexey Kardashevskiy             i += invalid;
27081ad9f0a4SDavid Gibson 
27091ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
27101ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
27111ad9f0a4SDavid Gibson         }
27121ad9f0a4SDavid Gibson     }
27131ad9f0a4SDavid Gibson 
27141ad9f0a4SDavid Gibson     close(fd);
27151ad9f0a4SDavid Gibson }
27161ad9f0a4SDavid Gibson 
27171ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
27187c43bca0SAneesh Kumar K.V {
27191ad9f0a4SDavid Gibson     int fd, rc;
27201ad9f0a4SDavid Gibson     struct {
27211ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
27221ad9f0a4SDavid Gibson         uint64_t pte0;
27231ad9f0a4SDavid Gibson         uint64_t pte1;
27241ad9f0a4SDavid Gibson     } buf;
2725c1385933SAneesh Kumar K.V 
272614b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2727c1385933SAneesh Kumar K.V 
27281ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
27291ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
27301ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
27311ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
27321ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
27331ad9f0a4SDavid Gibson 
27341ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
27351ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
27361ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2737c1385933SAneesh Kumar K.V     }
27381ad9f0a4SDavid Gibson     close(fd);
2739c1385933SAneesh Kumar K.V }
27409e03a040SFrank Blaschka 
27419e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2742dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
27439e03a040SFrank Blaschka {
27449e03a040SFrank Blaschka     return 0;
27459e03a040SFrank Blaschka }
27461850b6b7SEric Auger 
274738d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
274838d87493SPeter Xu                                 int vector, PCIDevice *dev)
274938d87493SPeter Xu {
275038d87493SPeter Xu     return 0;
275138d87493SPeter Xu }
275238d87493SPeter Xu 
275338d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
275438d87493SPeter Xu {
275538d87493SPeter Xu     return 0;
275638d87493SPeter Xu }
275738d87493SPeter Xu 
27581850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
27591850b6b7SEric Auger {
27601850b6b7SEric Auger     return data & 0xffff;
27611850b6b7SEric Auger }
27624d9392beSThomas Huth 
27634d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27644d9392beSThomas Huth {
27654d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27664d9392beSThomas Huth         return -1;
27674d9392beSThomas Huth     }
27684d9392beSThomas Huth 
27694d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27704d9392beSThomas Huth }
277130f4b05bSDavid Gibson 
277230f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
277330f4b05bSDavid Gibson {
277430f4b05bSDavid Gibson     if (!kvm_enabled()) {
2775b55d295eSDavid Gibson         return; /* No KVM, we're good */
2776b55d295eSDavid Gibson     }
2777b55d295eSDavid Gibson 
2778b55d295eSDavid Gibson     if (cap_resize_hpt) {
2779b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2780b55d295eSDavid Gibson     }
2781b55d295eSDavid Gibson 
2782b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2783b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
278430f4b05bSDavid Gibson         return;
278530f4b05bSDavid Gibson     }
278630f4b05bSDavid Gibson 
278730f4b05bSDavid Gibson     error_setg(errp,
278830f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
278930f4b05bSDavid Gibson }
2790b55d295eSDavid Gibson 
2791b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2792b55d295eSDavid Gibson {
2793b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2794b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2795b55d295eSDavid Gibson         .flags = flags,
2796b55d295eSDavid Gibson         .shift = shift,
2797b55d295eSDavid Gibson     };
2798b55d295eSDavid Gibson 
2799b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2800b55d295eSDavid Gibson         return -ENOSYS;
2801b55d295eSDavid Gibson     }
2802b55d295eSDavid Gibson 
2803b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2804b55d295eSDavid Gibson }
2805b55d295eSDavid Gibson 
2806b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2807b55d295eSDavid Gibson {
2808b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2809b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2810b55d295eSDavid Gibson         .flags = flags,
2811b55d295eSDavid Gibson         .shift = shift,
2812b55d295eSDavid Gibson     };
2813b55d295eSDavid Gibson 
2814b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2815b55d295eSDavid Gibson         return -ENOSYS;
2816b55d295eSDavid Gibson     }
2817b55d295eSDavid Gibson 
2818b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2819b55d295eSDavid Gibson }
2820b55d295eSDavid Gibson 
2821c363a37aSDaniel Henrique Barboza /*
2822c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2823c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2824c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2825c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2826c363a37aSDaniel Henrique Barboza  *
2827c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2828c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2829c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2830c363a37aSDaniel Henrique Barboza  *
2831c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2832c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2833c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2834c363a37aSDaniel Henrique Barboza  * in QEMU level.
2835c363a37aSDaniel Henrique Barboza  */
2836c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2837c363a37aSDaniel Henrique Barboza {
2838c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2839c363a37aSDaniel Henrique Barboza 
2840c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2841c363a37aSDaniel Henrique Barboza         return false;
2842c363a37aSDaniel Henrique Barboza     }
2843c363a37aSDaniel Henrique Barboza 
2844c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2845c363a37aSDaniel Henrique Barboza         return false;
2846c363a37aSDaniel Henrique Barboza     }
2847c363a37aSDaniel Henrique Barboza 
2848c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2849c363a37aSDaniel Henrique Barboza }
2850