xref: /qemu/target/ppc/kvm.c (revision 1ad9f0a464fe78d30ee60b3629f7a825cf2fab13)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
25072ed5f2SThomas Huth #include "qemu/error-report.h"
2633c11879SPaolo Bonzini #include "cpu.h"
27715d4b96SThomas Huth #include "cpu-models.h"
281de7afc9SPaolo Bonzini #include "qemu/timer.h"
299c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
30b3946626SVincent Palatin #include "sysemu/hw_accel.h"
3186b50f2eSThomas Huth #include "sysemu/numa.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
462d103aaeSMichael Roth #include "sysemu/hostmem.h"
47f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
483b542549SBharata B Rao #if defined(TARGET_PPC64)
493b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
503b542549SBharata B Rao #endif
51f61b4bedSAlexander Graf 
52d76d1650Saurel32 //#define DEBUG_KVM
53d76d1650Saurel32 
54d76d1650Saurel32 #ifdef DEBUG_KVM
55da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
56d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57d76d1650Saurel32 #else
58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
59d76d1650Saurel32     do { } while (0)
60d76d1650Saurel32 #endif
61d76d1650Saurel32 
62eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
63eadaada1SAlexander Graf 
6494a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6594a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6694a8d39aSJan Kiszka };
6794a8d39aSJan Kiszka 
68fc87e185SAlexander Graf static int cap_interrupt_unset = false;
69fc87e185SAlexander Graf static int cap_interrupt_level = false;
7090dc8812SScott Wood static int cap_segstate;
7190dc8812SScott Wood static int cap_booke_sregs;
72e97c3636SDavid Gibson static int cap_ppc_smt;
73354ac20aSDavid Gibson static int cap_ppc_rma;
740f5cb298SDavid Gibson static int cap_spapr_tce;
75da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
769bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
77f1af19d7SDavid Gibson static int cap_hior;
78d67d40eaSDavid Gibson static int cap_one_reg;
793b961124SStuart Yoder static int cap_epr;
8031f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
819b00ea49SDavid Gibson static int cap_papr;
82e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8387a91de6SAlexander Graf static int cap_fixup_hcalls;
84bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
85fc87e185SAlexander Graf 
863c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
873c902d44SBharat Bhushan 
88c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
89c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
90c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
91c821c2bdSAlexander Graf  *     still an interrupt pending.
92c6a94ba5SAlexander Graf  *
93c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
94c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
95c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
96c6a94ba5SAlexander Graf  */
97c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
98c6a94ba5SAlexander Graf 
99d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
100c6a94ba5SAlexander Graf {
101d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
102d5a68146SAndreas Färber 
103c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
104c6a94ba5SAlexander Graf }
105c6a94ba5SAlexander Graf 
10696c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
10796c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
10896c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
10996c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11096c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
11196c9cff0SThomas Huth {
11296c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
11396c9cff0SThomas Huth     return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
11496c9cff0SThomas Huth }
11596c9cff0SThomas Huth 
1165ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1175ba4576bSAndreas Färber 
118b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
119d76d1650Saurel32 {
120fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
121fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
12290dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
12390dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
124e97c3636SDavid Gibson     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
125354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1260f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
127da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1289bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
129d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
130f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1313b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
13231f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1339b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1349b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
135e68cb8b4SAlexey Kardashevskiy     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
13687a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
137bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
138fc87e185SAlexander Graf 
139fc87e185SAlexander Graf     if (!cap_interrupt_level) {
140fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
141fc87e185SAlexander Graf                         "VM to stall at times!\n");
142fc87e185SAlexander Graf     }
143fc87e185SAlexander Graf 
1445ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1455ba4576bSAndreas Färber 
146d76d1650Saurel32     return 0;
147d76d1650Saurel32 }
148d76d1650Saurel32 
149d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
150d525ffabSPaolo Bonzini {
151d525ffabSPaolo Bonzini     return 0;
152d525ffabSPaolo Bonzini }
153d525ffabSPaolo Bonzini 
1541bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
155d76d1650Saurel32 {
1561bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1571bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
158861bbc80SAlexander Graf     struct kvm_sregs sregs;
1595666ca4aSScott Wood     int ret;
1605666ca4aSScott Wood 
1615666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
16264e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
16364e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
16464e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
16564e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1665666ca4aSScott Wood         return 0;
1675666ca4aSScott Wood     } else {
16890dc8812SScott Wood         if (!cap_segstate) {
16964e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
17064e07be5SAlexander Graf             return -ENOSYS;
1715666ca4aSScott Wood         }
1725666ca4aSScott Wood     }
1735666ca4aSScott Wood 
1741bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1755666ca4aSScott Wood     if (ret) {
1765666ca4aSScott Wood         return ret;
1775666ca4aSScott Wood     }
178861bbc80SAlexander Graf 
179861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
1801bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
1815666ca4aSScott Wood }
1825666ca4aSScott Wood 
18393dd5e85SScott Wood /* Set up a shared TLB array with KVM */
1841bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
18593dd5e85SScott Wood {
1861bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
1871bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
18893dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
18993dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
19093dd5e85SScott Wood     unsigned int entries = 0;
19193dd5e85SScott Wood     int ret, i;
19293dd5e85SScott Wood 
19393dd5e85SScott Wood     if (!kvm_enabled() ||
194a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
19593dd5e85SScott Wood         return 0;
19693dd5e85SScott Wood     }
19793dd5e85SScott Wood 
19893dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
19993dd5e85SScott Wood 
20093dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
20193dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
20293dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
20393dd5e85SScott Wood         entries += params.tlb_sizes[i];
20493dd5e85SScott Wood     }
20593dd5e85SScott Wood 
20693dd5e85SScott Wood     assert(entries == env->nb_tlb);
20793dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
20893dd5e85SScott Wood 
20993dd5e85SScott Wood     env->tlb_dirty = true;
21093dd5e85SScott Wood 
21193dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
21293dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
21393dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
21493dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
21593dd5e85SScott Wood 
21648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
21793dd5e85SScott Wood     if (ret < 0) {
21893dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
21993dd5e85SScott Wood                 __func__, strerror(-ret));
22093dd5e85SScott Wood         return ret;
22193dd5e85SScott Wood     }
22293dd5e85SScott Wood 
22393dd5e85SScott Wood     env->kvm_sw_tlb = true;
22493dd5e85SScott Wood     return 0;
22593dd5e85SScott Wood }
22693dd5e85SScott Wood 
2274656e1f0SBenjamin Herrenschmidt 
2284656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
229a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2304656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2314656e1f0SBenjamin Herrenschmidt {
232a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
233a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
234a60f24b5SAndreas Färber 
2354656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2364656e1f0SBenjamin Herrenschmidt 
2374656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2384656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2394656e1f0SBenjamin Herrenschmidt      *
2404656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2414656e1f0SBenjamin Herrenschmidt      *
24296c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
24396c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
24496c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2454656e1f0SBenjamin Herrenschmidt      *
2464656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2474656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2484656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2494656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2504656e1f0SBenjamin Herrenschmidt      *
2514656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2524656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2534656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2544656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2554656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2564656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2574656e1f0SBenjamin Herrenschmidt      *   this fallback.
2584656e1f0SBenjamin Herrenschmidt      */
25996c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2604656e1f0SBenjamin Herrenschmidt         /* No flags */
2614656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2624656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2634656e1f0SBenjamin Herrenschmidt 
2644656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2654656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2664656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2674656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2684656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2694656e1f0SBenjamin Herrenschmidt 
2704656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2714656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2724656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2734656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2744656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2754656e1f0SBenjamin Herrenschmidt     } else {
2764656e1f0SBenjamin Herrenschmidt         int i = 0;
2774656e1f0SBenjamin Herrenschmidt 
2784656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
2794656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2804656e1f0SBenjamin Herrenschmidt 
2814656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
2824656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
2834656e1f0SBenjamin Herrenschmidt         }
2844656e1f0SBenjamin Herrenschmidt 
285aa4bb587SBenjamin Herrenschmidt         if (env->mmu_model == POWERPC_MMU_2_06 ||
286aa4bb587SBenjamin Herrenschmidt             env->mmu_model == POWERPC_MMU_2_07) {
2874656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
2884656e1f0SBenjamin Herrenschmidt         } else {
2894656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
2904656e1f0SBenjamin Herrenschmidt         }
2914656e1f0SBenjamin Herrenschmidt 
2924656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2934656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
2944656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
2954656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
2964656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
2974656e1f0SBenjamin Herrenschmidt         i++;
2984656e1f0SBenjamin Herrenschmidt 
299aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
300aa4bb587SBenjamin Herrenschmidt         if (env->mmu_model == POWERPC_MMU_2_06 ||
301aa4bb587SBenjamin Herrenschmidt             env->mmu_model == POWERPC_MMU_2_07) {
3024656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3034656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3044656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3054656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3064656e1f0SBenjamin Herrenschmidt             i++;
3074656e1f0SBenjamin Herrenschmidt         }
3084656e1f0SBenjamin Herrenschmidt 
3094656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3104656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3114656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3124656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3134656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3144656e1f0SBenjamin Herrenschmidt     }
3154656e1f0SBenjamin Herrenschmidt }
3164656e1f0SBenjamin Herrenschmidt 
317a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3184656e1f0SBenjamin Herrenschmidt {
319a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3204656e1f0SBenjamin Herrenschmidt     int ret;
3214656e1f0SBenjamin Herrenschmidt 
322a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
323a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3244656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3254656e1f0SBenjamin Herrenschmidt             return;
3264656e1f0SBenjamin Herrenschmidt         }
3274656e1f0SBenjamin Herrenschmidt     }
3284656e1f0SBenjamin Herrenschmidt 
329a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3304656e1f0SBenjamin Herrenschmidt }
3314656e1f0SBenjamin Herrenschmidt 
3322d103aaeSMichael Roth static long gethugepagesize(const char *mem_path)
3334656e1f0SBenjamin Herrenschmidt {
3344656e1f0SBenjamin Herrenschmidt     struct statfs fs;
3354656e1f0SBenjamin Herrenschmidt     int ret;
3364656e1f0SBenjamin Herrenschmidt 
3374656e1f0SBenjamin Herrenschmidt     do {
3384656e1f0SBenjamin Herrenschmidt         ret = statfs(mem_path, &fs);
3394656e1f0SBenjamin Herrenschmidt     } while (ret != 0 && errno == EINTR);
3404656e1f0SBenjamin Herrenschmidt 
3414656e1f0SBenjamin Herrenschmidt     if (ret != 0) {
3424656e1f0SBenjamin Herrenschmidt         fprintf(stderr, "Couldn't statfs() memory path: %s\n",
3434656e1f0SBenjamin Herrenschmidt                 strerror(errno));
3444656e1f0SBenjamin Herrenschmidt         exit(1);
3454656e1f0SBenjamin Herrenschmidt     }
3464656e1f0SBenjamin Herrenschmidt 
3474656e1f0SBenjamin Herrenschmidt #define HUGETLBFS_MAGIC       0x958458f6
3484656e1f0SBenjamin Herrenschmidt 
3494656e1f0SBenjamin Herrenschmidt     if (fs.f_type != HUGETLBFS_MAGIC) {
3504656e1f0SBenjamin Herrenschmidt         /* Explicit mempath, but it's ordinary pages */
3514656e1f0SBenjamin Herrenschmidt         return getpagesize();
3524656e1f0SBenjamin Herrenschmidt     }
3534656e1f0SBenjamin Herrenschmidt 
3544656e1f0SBenjamin Herrenschmidt     /* It's hugepage, return the huge page size */
3554656e1f0SBenjamin Herrenschmidt     return fs.f_bsize;
3564656e1f0SBenjamin Herrenschmidt }
3574656e1f0SBenjamin Herrenschmidt 
3583be5cc23SMarkus Armbruster /*
3593be5cc23SMarkus Armbruster  * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
3603be5cc23SMarkus Armbruster  * may or may not name the same files / on the same filesystem now as
3613be5cc23SMarkus Armbruster  * when we actually open and map them.  Iterate over the file
3623be5cc23SMarkus Armbruster  * descriptors instead, and use qemu_fd_getpagesize().
3633be5cc23SMarkus Armbruster  */
3642d103aaeSMichael Roth static int find_max_supported_pagesize(Object *obj, void *opaque)
3652d103aaeSMichael Roth {
3662d103aaeSMichael Roth     char *mem_path;
3672d103aaeSMichael Roth     long *hpsize_min = opaque;
3682d103aaeSMichael Roth 
3692d103aaeSMichael Roth     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
3702d103aaeSMichael Roth         mem_path = object_property_get_str(obj, "mem-path", NULL);
3712d103aaeSMichael Roth         if (mem_path) {
3722d103aaeSMichael Roth             long hpsize = gethugepagesize(mem_path);
3732d103aaeSMichael Roth             if (hpsize < *hpsize_min) {
3742d103aaeSMichael Roth                 *hpsize_min = hpsize;
3752d103aaeSMichael Roth             }
3762d103aaeSMichael Roth         } else {
3772d103aaeSMichael Roth             *hpsize_min = getpagesize();
3782d103aaeSMichael Roth         }
3792d103aaeSMichael Roth     }
3802d103aaeSMichael Roth 
3812d103aaeSMichael Roth     return 0;
3822d103aaeSMichael Roth }
3832d103aaeSMichael Roth 
3842d103aaeSMichael Roth static long getrampagesize(void)
3852d103aaeSMichael Roth {
3862d103aaeSMichael Roth     long hpsize = LONG_MAX;
3873d4f2534SThomas Huth     long mainrampagesize;
3882d103aaeSMichael Roth     Object *memdev_root;
3892d103aaeSMichael Roth 
3902d103aaeSMichael Roth     if (mem_path) {
3913d4f2534SThomas Huth         mainrampagesize = gethugepagesize(mem_path);
3923d4f2534SThomas Huth     } else {
3933d4f2534SThomas Huth         mainrampagesize = getpagesize();
3942d103aaeSMichael Roth     }
3952d103aaeSMichael Roth 
3962d103aaeSMichael Roth     /* it's possible we have memory-backend objects with
3972d103aaeSMichael Roth      * hugepage-backed RAM. these may get mapped into system
3982d103aaeSMichael Roth      * address space via -numa parameters or memory hotplug
3992d103aaeSMichael Roth      * hooks. we want to take these into account, but we
4002d103aaeSMichael Roth      * also want to make sure these supported hugepage
4012d103aaeSMichael Roth      * sizes are applicable across the entire range of memory
4022d103aaeSMichael Roth      * we may boot from, so we take the min across all
4032d103aaeSMichael Roth      * backends, and assume normal pages in cases where a
4042d103aaeSMichael Roth      * backend isn't backed by hugepages.
4052d103aaeSMichael Roth      */
4062d103aaeSMichael Roth     memdev_root = object_resolve_path("/objects", NULL);
4073d4f2534SThomas Huth     if (memdev_root) {
4082d103aaeSMichael Roth         object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
4093d4f2534SThomas Huth     }
4103d4f2534SThomas Huth     if (hpsize == LONG_MAX) {
4113d4f2534SThomas Huth         /* No additional memory regions found ==> Report main RAM page size */
4123d4f2534SThomas Huth         return mainrampagesize;
41386b50f2eSThomas Huth     }
41486b50f2eSThomas Huth 
415159d2e39SThomas Huth     /* If NUMA is disabled or the NUMA nodes are not backed with a
4163d4f2534SThomas Huth      * memory-backend, then there is at least one node using "normal" RAM,
4173d4f2534SThomas Huth      * so if its page size is smaller we have got to report that size instead.
418159d2e39SThomas Huth      */
4193d4f2534SThomas Huth     if (hpsize > mainrampagesize &&
4203d4f2534SThomas Huth         (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
42186b50f2eSThomas Huth         static bool warned;
42286b50f2eSThomas Huth         if (!warned) {
42386b50f2eSThomas Huth             error_report("Huge page support disabled (n/a for main memory).");
42486b50f2eSThomas Huth             warned = true;
42586b50f2eSThomas Huth         }
4263d4f2534SThomas Huth         return mainrampagesize;
42786b50f2eSThomas Huth     }
42886b50f2eSThomas Huth 
42986b50f2eSThomas Huth     return hpsize;
4302d103aaeSMichael Roth }
4312d103aaeSMichael Roth 
4324656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4334656e1f0SBenjamin Herrenschmidt {
4344656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4354656e1f0SBenjamin Herrenschmidt         return true;
4364656e1f0SBenjamin Herrenschmidt     }
4374656e1f0SBenjamin Herrenschmidt 
4384656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4394656e1f0SBenjamin Herrenschmidt }
4404656e1f0SBenjamin Herrenschmidt 
441df587133SThomas Huth static long max_cpu_page_size;
442df587133SThomas Huth 
443a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4444656e1f0SBenjamin Herrenschmidt {
4454656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4464656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
447a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4484656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4490d594f55SThomas Huth     bool has_64k_pages = false;
4504656e1f0SBenjamin Herrenschmidt 
4514656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4524656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4534656e1f0SBenjamin Herrenschmidt         return;
4544656e1f0SBenjamin Herrenschmidt     }
4554656e1f0SBenjamin Herrenschmidt 
4564656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4574656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
458a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4594656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4604656e1f0SBenjamin Herrenschmidt     }
4614656e1f0SBenjamin Herrenschmidt 
462df587133SThomas Huth     if (!max_cpu_page_size) {
463df587133SThomas Huth         max_cpu_page_size = getrampagesize();
464df587133SThomas Huth     }
4654656e1f0SBenjamin Herrenschmidt 
4664656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4674656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4684656e1f0SBenjamin Herrenschmidt 
46990da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
47090da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
47190da0d5aSBenjamin Herrenschmidt      */
47290da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
47390da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
47490da0d5aSBenjamin Herrenschmidt     }
47590da0d5aSBenjamin Herrenschmidt 
47608215d8fSAlexander Graf     /*
47708215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
47808215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
47908215d8fSAlexander Graf      */
4804656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4814656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4824656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4834656e1f0SBenjamin Herrenschmidt 
484df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4854656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4864656e1f0SBenjamin Herrenschmidt             continue;
4874656e1f0SBenjamin Herrenschmidt         }
4884656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4894656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4904656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
491df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4924656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4934656e1f0SBenjamin Herrenschmidt                 continue;
4944656e1f0SBenjamin Herrenschmidt             }
4950d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4960d594f55SThomas Huth                 has_64k_pages = true;
4970d594f55SThomas Huth             }
4984656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4994656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
5004656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
5014656e1f0SBenjamin Herrenschmidt                 break;
5024656e1f0SBenjamin Herrenschmidt             }
5034656e1f0SBenjamin Herrenschmidt         }
5044656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
5054656e1f0SBenjamin Herrenschmidt             break;
5064656e1f0SBenjamin Herrenschmidt         }
5074656e1f0SBenjamin Herrenschmidt     }
5084656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
50908215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
5104656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
5114656e1f0SBenjamin Herrenschmidt     }
5120d594f55SThomas Huth     if (!has_64k_pages) {
5130d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
5140d594f55SThomas Huth     }
5154656e1f0SBenjamin Herrenschmidt }
516df587133SThomas Huth 
517df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
518df587133SThomas Huth {
519df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
520df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
521df587133SThomas Huth     long pagesize;
522df587133SThomas Huth 
523df587133SThomas Huth     if (mempath) {
524df587133SThomas Huth         pagesize = gethugepagesize(mempath);
525df587133SThomas Huth     } else {
526df587133SThomas Huth         pagesize = getpagesize();
527df587133SThomas Huth     }
528df587133SThomas Huth 
529df587133SThomas Huth     return pagesize >= max_cpu_page_size;
530df587133SThomas Huth }
531df587133SThomas Huth 
5324656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5334656e1f0SBenjamin Herrenschmidt 
534a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5354656e1f0SBenjamin Herrenschmidt {
5364656e1f0SBenjamin Herrenschmidt }
5374656e1f0SBenjamin Herrenschmidt 
538df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
539df587133SThomas Huth {
540df587133SThomas Huth     return true;
541df587133SThomas Huth }
542df587133SThomas Huth 
5434656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5444656e1f0SBenjamin Herrenschmidt 
545b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
546b164e48eSEduardo Habkost {
5470f20ba62SAlexey Kardashevskiy     return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
548b164e48eSEduardo Habkost }
549b164e48eSEduardo Habkost 
55088365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
55188365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
55288365d17SBharat Bhushan  * of 4 is sufficient for now.
55388365d17SBharat Bhushan  */
55488365d17SBharat Bhushan #define MAX_HW_BKPTS 4
55588365d17SBharat Bhushan 
55688365d17SBharat Bhushan static struct HWBreakpoint {
55788365d17SBharat Bhushan     target_ulong addr;
55888365d17SBharat Bhushan     int type;
55988365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
56088365d17SBharat Bhushan 
56188365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
56288365d17SBharat Bhushan 
56388365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
56488365d17SBharat Bhushan static int max_hw_breakpoint;
56588365d17SBharat Bhushan static int max_hw_watchpoint;
56688365d17SBharat Bhushan static int nb_hw_breakpoint;
56788365d17SBharat Bhushan static int nb_hw_watchpoint;
56888365d17SBharat Bhushan 
56988365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
57088365d17SBharat Bhushan {
57188365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
57288365d17SBharat Bhushan         max_hw_breakpoint = 2;
57388365d17SBharat Bhushan         max_hw_watchpoint = 2;
57488365d17SBharat Bhushan     }
57588365d17SBharat Bhushan 
57688365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
57788365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
57888365d17SBharat Bhushan         return;
57988365d17SBharat Bhushan     }
58088365d17SBharat Bhushan }
58188365d17SBharat Bhushan 
58220d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5835666ca4aSScott Wood {
58420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
58520d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5865666ca4aSScott Wood     int ret;
5875666ca4aSScott Wood 
5884656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
589a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5904656e1f0SBenjamin Herrenschmidt 
5914656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5921bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5935666ca4aSScott Wood     if (ret) {
594388e47c7SThomas Huth         if (ret == -EINVAL) {
595388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
596388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
597388e47c7SThomas Huth         }
5985666ca4aSScott Wood         return ret;
5995666ca4aSScott Wood     }
600861bbc80SAlexander Graf 
601bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
602c821c2bdSAlexander Graf 
60393dd5e85SScott Wood     switch (cenv->mmu_model) {
60493dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
6057f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
6061bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
60793dd5e85SScott Wood         break;
6087f516c96SThomas Huth     case POWERPC_MMU_2_07:
6097f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
6107f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
6117f516c96SThomas Huth              * KVM_CAP_PPC_HTM extension, so enable it here instead. */
6127f516c96SThomas Huth             cap_htm = true;
6137f516c96SThomas Huth         }
6147f516c96SThomas Huth         break;
61593dd5e85SScott Wood     default:
61693dd5e85SScott Wood         break;
61793dd5e85SScott Wood     }
61893dd5e85SScott Wood 
6193c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
62088365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
6213c902d44SBharat Bhushan 
622861bbc80SAlexander Graf     return ret;
623d76d1650Saurel32 }
624d76d1650Saurel32 
6251bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
62693dd5e85SScott Wood {
6271bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
6281bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
62993dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
63093dd5e85SScott Wood     unsigned char *bitmap;
63193dd5e85SScott Wood     int ret;
63293dd5e85SScott Wood 
63393dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
63493dd5e85SScott Wood         return;
63593dd5e85SScott Wood     }
63693dd5e85SScott Wood 
63793dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
63893dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
63993dd5e85SScott Wood 
64093dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
64193dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
64293dd5e85SScott Wood 
6431bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
64493dd5e85SScott Wood     if (ret) {
64593dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
64693dd5e85SScott Wood                 __func__, strerror(-ret));
64793dd5e85SScott Wood     }
64893dd5e85SScott Wood 
64993dd5e85SScott Wood     g_free(bitmap);
65093dd5e85SScott Wood }
65193dd5e85SScott Wood 
652d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
653d67d40eaSDavid Gibson {
654d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
655d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
656d67d40eaSDavid Gibson     union {
657d67d40eaSDavid Gibson         uint32_t u32;
658d67d40eaSDavid Gibson         uint64_t u64;
659d67d40eaSDavid Gibson     } val;
660d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
661d67d40eaSDavid Gibson         .id = id,
662d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
663d67d40eaSDavid Gibson     };
664d67d40eaSDavid Gibson     int ret;
665d67d40eaSDavid Gibson 
666d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
667d67d40eaSDavid Gibson     if (ret != 0) {
668b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
669d67d40eaSDavid Gibson     } else {
670d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
671d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
672d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
673d67d40eaSDavid Gibson             break;
674d67d40eaSDavid Gibson 
675d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
676d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
677d67d40eaSDavid Gibson             break;
678d67d40eaSDavid Gibson 
679d67d40eaSDavid Gibson         default:
680d67d40eaSDavid Gibson             /* Don't handle this size yet */
681d67d40eaSDavid Gibson             abort();
682d67d40eaSDavid Gibson         }
683d67d40eaSDavid Gibson     }
684d67d40eaSDavid Gibson }
685d67d40eaSDavid Gibson 
686d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
687d67d40eaSDavid Gibson {
688d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
689d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
690d67d40eaSDavid Gibson     union {
691d67d40eaSDavid Gibson         uint32_t u32;
692d67d40eaSDavid Gibson         uint64_t u64;
693d67d40eaSDavid Gibson     } val;
694d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
695d67d40eaSDavid Gibson         .id = id,
696d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
697d67d40eaSDavid Gibson     };
698d67d40eaSDavid Gibson     int ret;
699d67d40eaSDavid Gibson 
700d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
701d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
702d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
703d67d40eaSDavid Gibson         break;
704d67d40eaSDavid Gibson 
705d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
706d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
707d67d40eaSDavid Gibson         break;
708d67d40eaSDavid Gibson 
709d67d40eaSDavid Gibson     default:
710d67d40eaSDavid Gibson         /* Don't handle this size yet */
711d67d40eaSDavid Gibson         abort();
712d67d40eaSDavid Gibson     }
713d67d40eaSDavid Gibson 
714d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
715d67d40eaSDavid Gibson     if (ret != 0) {
716b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
717d67d40eaSDavid Gibson     }
718d67d40eaSDavid Gibson }
719d67d40eaSDavid Gibson 
72070b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
72170b79849SDavid Gibson {
72270b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
72370b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
72470b79849SDavid Gibson     struct kvm_one_reg reg;
72570b79849SDavid Gibson     int i;
72670b79849SDavid Gibson     int ret;
72770b79849SDavid Gibson 
72870b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
72970b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
73070b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
73170b79849SDavid Gibson 
73270b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
73370b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
73470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73570b79849SDavid Gibson         if (ret < 0) {
736da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
73770b79849SDavid Gibson             return ret;
73870b79849SDavid Gibson         }
73970b79849SDavid Gibson 
74070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74170b79849SDavid Gibson             uint64_t vsr[2];
74270b79849SDavid Gibson 
7433a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
74470b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
74570b79849SDavid Gibson             vsr[1] = env->vsr[i];
7463a4b791bSGreg Kurz #else
7473a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7483a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7493a4b791bSGreg Kurz #endif
75070b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
75170b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
75270b79849SDavid Gibson 
75370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
75470b79849SDavid Gibson             if (ret < 0) {
755da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
75670b79849SDavid Gibson                         i, strerror(errno));
75770b79849SDavid Gibson                 return ret;
75870b79849SDavid Gibson             }
75970b79849SDavid Gibson         }
76070b79849SDavid Gibson     }
76170b79849SDavid Gibson 
76270b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
76370b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
76470b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
76570b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
76670b79849SDavid Gibson         if (ret < 0) {
767da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
76870b79849SDavid Gibson             return ret;
76970b79849SDavid Gibson         }
77070b79849SDavid Gibson 
77170b79849SDavid Gibson         for (i = 0; i < 32; i++) {
77270b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
77370b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
77470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
77570b79849SDavid Gibson             if (ret < 0) {
776da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
77770b79849SDavid Gibson                 return ret;
77870b79849SDavid Gibson             }
77970b79849SDavid Gibson         }
78070b79849SDavid Gibson     }
78170b79849SDavid Gibson 
78270b79849SDavid Gibson     return 0;
78370b79849SDavid Gibson }
78470b79849SDavid Gibson 
78570b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
78670b79849SDavid Gibson {
78770b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
78870b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
78970b79849SDavid Gibson     struct kvm_one_reg reg;
79070b79849SDavid Gibson     int i;
79170b79849SDavid Gibson     int ret;
79270b79849SDavid Gibson 
79370b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
79470b79849SDavid Gibson         uint64_t fpscr;
79570b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
79670b79849SDavid Gibson 
79770b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
79870b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
79970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
80070b79849SDavid Gibson         if (ret < 0) {
801da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
80270b79849SDavid Gibson             return ret;
80370b79849SDavid Gibson         } else {
80470b79849SDavid Gibson             env->fpscr = fpscr;
80570b79849SDavid Gibson         }
80670b79849SDavid Gibson 
80770b79849SDavid Gibson         for (i = 0; i < 32; i++) {
80870b79849SDavid Gibson             uint64_t vsr[2];
80970b79849SDavid Gibson 
81070b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
81170b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
81270b79849SDavid Gibson 
81370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81470b79849SDavid Gibson             if (ret < 0) {
815da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
81670b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
81770b79849SDavid Gibson                 return ret;
81870b79849SDavid Gibson             } else {
8193a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
82070b79849SDavid Gibson                 env->fpr[i] = vsr[0];
82170b79849SDavid Gibson                 if (vsx) {
82270b79849SDavid Gibson                     env->vsr[i] = vsr[1];
82370b79849SDavid Gibson                 }
8243a4b791bSGreg Kurz #else
8253a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
8263a4b791bSGreg Kurz                 if (vsx) {
8273a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
8283a4b791bSGreg Kurz                 }
8293a4b791bSGreg Kurz #endif
83070b79849SDavid Gibson             }
83170b79849SDavid Gibson         }
83270b79849SDavid Gibson     }
83370b79849SDavid Gibson 
83470b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
83570b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
83670b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
83770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
83870b79849SDavid Gibson         if (ret < 0) {
839da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
84070b79849SDavid Gibson             return ret;
84170b79849SDavid Gibson         }
84270b79849SDavid Gibson 
84370b79849SDavid Gibson         for (i = 0; i < 32; i++) {
84470b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
84570b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
84670b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
84770b79849SDavid Gibson             if (ret < 0) {
848da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
84970b79849SDavid Gibson                         i, strerror(errno));
85070b79849SDavid Gibson                 return ret;
85170b79849SDavid Gibson             }
85270b79849SDavid Gibson         }
85370b79849SDavid Gibson     }
85470b79849SDavid Gibson 
85570b79849SDavid Gibson     return 0;
85670b79849SDavid Gibson }
85770b79849SDavid Gibson 
8589b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8599b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8609b00ea49SDavid Gibson {
8619b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8629b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8639b00ea49SDavid Gibson     struct kvm_one_reg reg;
8649b00ea49SDavid Gibson     int ret;
8659b00ea49SDavid Gibson 
8669b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8679b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8689b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8699b00ea49SDavid Gibson     if (ret < 0) {
870da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8719b00ea49SDavid Gibson         return ret;
8729b00ea49SDavid Gibson     }
8739b00ea49SDavid Gibson 
8749b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8759b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8769b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8779b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8789b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8799b00ea49SDavid Gibson     if (ret < 0) {
880da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8819b00ea49SDavid Gibson                 strerror(errno));
8829b00ea49SDavid Gibson         return ret;
8839b00ea49SDavid Gibson     }
8849b00ea49SDavid Gibson 
8859b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8869b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8879b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8889b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8899b00ea49SDavid Gibson     if (ret < 0) {
890da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8919b00ea49SDavid Gibson                 strerror(errno));
8929b00ea49SDavid Gibson         return ret;
8939b00ea49SDavid Gibson     }
8949b00ea49SDavid Gibson 
8959b00ea49SDavid Gibson     return 0;
8969b00ea49SDavid Gibson }
8979b00ea49SDavid Gibson 
8989b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8999b00ea49SDavid Gibson {
9009b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
9019b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
9029b00ea49SDavid Gibson     struct kvm_one_reg reg;
9039b00ea49SDavid Gibson     int ret;
9049b00ea49SDavid Gibson 
9059b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
9069b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
9079b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
9089b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
9099b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
9109b00ea49SDavid Gibson 
9119b00ea49SDavid Gibson     if (env->vpa_addr) {
9129b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9139b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9149b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9159b00ea49SDavid Gibson         if (ret < 0) {
916da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9179b00ea49SDavid Gibson             return ret;
9189b00ea49SDavid Gibson         }
9199b00ea49SDavid Gibson     }
9209b00ea49SDavid Gibson 
9219b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
9229b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
9239b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
9249b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
9259b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9269b00ea49SDavid Gibson     if (ret < 0) {
927da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
9289b00ea49SDavid Gibson         return ret;
9299b00ea49SDavid Gibson     }
9309b00ea49SDavid Gibson 
9319b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9329b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9339b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9349b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9359b00ea49SDavid Gibson     if (ret < 0) {
936da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9379b00ea49SDavid Gibson                 strerror(errno));
9389b00ea49SDavid Gibson         return ret;
9399b00ea49SDavid Gibson     }
9409b00ea49SDavid Gibson 
9419b00ea49SDavid Gibson     if (!env->vpa_addr) {
9429b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9439b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9449b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9459b00ea49SDavid Gibson         if (ret < 0) {
946da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9479b00ea49SDavid Gibson             return ret;
9489b00ea49SDavid Gibson         }
9499b00ea49SDavid Gibson     }
9509b00ea49SDavid Gibson 
9519b00ea49SDavid Gibson     return 0;
9529b00ea49SDavid Gibson }
9539b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9549b00ea49SDavid Gibson 
955e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
956a7a00a72SDavid Gibson {
957a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
958a7a00a72SDavid Gibson     struct kvm_sregs sregs;
959a7a00a72SDavid Gibson     int i;
960a7a00a72SDavid Gibson 
961a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
962a7a00a72SDavid Gibson 
963a7a00a72SDavid Gibson     sregs.u.s.sdr1 = env->spr[SPR_SDR1];
964a7a00a72SDavid Gibson 
965a7a00a72SDavid Gibson     /* Sync SLB */
966a7a00a72SDavid Gibson #ifdef TARGET_PPC64
967a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
968a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
969a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
970a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
971a7a00a72SDavid Gibson         }
972a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
973a7a00a72SDavid Gibson     }
974a7a00a72SDavid Gibson #endif
975a7a00a72SDavid Gibson 
976a7a00a72SDavid Gibson     /* Sync SRs */
977a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
978a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
979a7a00a72SDavid Gibson     }
980a7a00a72SDavid Gibson 
981a7a00a72SDavid Gibson     /* Sync BATs */
982a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
983a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
984a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
985a7a00a72SDavid Gibson             | env->DBAT[1][i];
986a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
987a7a00a72SDavid Gibson             | env->IBAT[1][i];
988a7a00a72SDavid Gibson     }
989a7a00a72SDavid Gibson 
990a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
991a7a00a72SDavid Gibson }
992a7a00a72SDavid Gibson 
99320d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
994d76d1650Saurel32 {
99520d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
99620d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
997d76d1650Saurel32     struct kvm_regs regs;
998d76d1650Saurel32     int ret;
999d76d1650Saurel32     int i;
1000d76d1650Saurel32 
10011bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
10021bc22652SAndreas Färber     if (ret < 0) {
1003d76d1650Saurel32         return ret;
10041bc22652SAndreas Färber     }
1005d76d1650Saurel32 
1006d76d1650Saurel32     regs.ctr = env->ctr;
1007d76d1650Saurel32     regs.lr  = env->lr;
1008da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
1009d76d1650Saurel32     regs.msr = env->msr;
1010d76d1650Saurel32     regs.pc = env->nip;
1011d76d1650Saurel32 
1012d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
1013d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
1014d76d1650Saurel32 
1015d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
1016d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
1017d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
1018d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
1019d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
1020d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
1021d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
1022d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
1023d76d1650Saurel32 
102490dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
102590dc8812SScott Wood 
1026d76d1650Saurel32     for (i = 0;i < 32; i++)
1027d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1028d76d1650Saurel32 
10294bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10304bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10314bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10324bddaf55SAlexey Kardashevskiy     }
10334bddaf55SAlexey Kardashevskiy 
10341bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1035d76d1650Saurel32     if (ret < 0)
1036d76d1650Saurel32         return ret;
1037d76d1650Saurel32 
103870b79849SDavid Gibson     kvm_put_fp(cs);
103970b79849SDavid Gibson 
104093dd5e85SScott Wood     if (env->tlb_dirty) {
10411bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
104293dd5e85SScott Wood         env->tlb_dirty = false;
104393dd5e85SScott Wood     }
104493dd5e85SScott Wood 
1045f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1046a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1047a7a00a72SDavid Gibson         if (ret < 0) {
1048f1af19d7SDavid Gibson             return ret;
1049f1af19d7SDavid Gibson         }
1050f1af19d7SDavid Gibson     }
1051f1af19d7SDavid Gibson 
1052f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1053d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1054d67d40eaSDavid Gibson     }
1055f1af19d7SDavid Gibson 
1056d67d40eaSDavid Gibson     if (cap_one_reg) {
1057d67d40eaSDavid Gibson         int i;
1058d67d40eaSDavid Gibson 
1059d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1060d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1061d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1062d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1063d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1064d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1065d67d40eaSDavid Gibson 
1066d67d40eaSDavid Gibson             if (id != 0) {
1067d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1068d67d40eaSDavid Gibson             }
1069f1af19d7SDavid Gibson         }
10709b00ea49SDavid Gibson 
10719b00ea49SDavid Gibson #ifdef TARGET_PPC64
107280b3f79bSAlexey Kardashevskiy         if (msr_ts) {
107380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
107480b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
107580b3f79bSAlexey Kardashevskiy             }
107680b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
107780b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
107880b3f79bSAlexey Kardashevskiy             }
107980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
108080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
108180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
108280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
108380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
108480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
108580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
108680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
108780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
108880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
108980b3f79bSAlexey Kardashevskiy         }
109080b3f79bSAlexey Kardashevskiy 
10919b00ea49SDavid Gibson         if (cap_papr) {
10929b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1093da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10949b00ea49SDavid Gibson             }
10959b00ea49SDavid Gibson         }
109698a8b524SAlexey Kardashevskiy 
109798a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10989b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1099f1af19d7SDavid Gibson     }
1100f1af19d7SDavid Gibson 
1101d76d1650Saurel32     return ret;
1102d76d1650Saurel32 }
1103d76d1650Saurel32 
1104c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1105c371c2e3SBharat Bhushan {
1106c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1107c371c2e3SBharat Bhushan }
1108c371c2e3SBharat Bhushan 
1109a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1110d76d1650Saurel32 {
111120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1112ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1113a7a00a72SDavid Gibson     int ret;
1114d76d1650Saurel32 
1115a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
111690dc8812SScott Wood     if (ret < 0) {
111790dc8812SScott Wood         return ret;
111890dc8812SScott Wood     }
111990dc8812SScott Wood 
112090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
112190dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
112290dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
112390dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
112490dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
112590dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
112690dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
112790dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
112890dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
112990dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
113090dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
113190dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
113290dc8812SScott Wood     }
113390dc8812SScott Wood 
113490dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
113590dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
113690dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
113790dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
113890dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
113990dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
114090dc8812SScott Wood     }
114190dc8812SScott Wood 
114290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
114390dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
114490dc8812SScott Wood     }
114590dc8812SScott Wood 
114690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
114790dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
114890dc8812SScott Wood     }
114990dc8812SScott Wood 
115090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
115190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1152c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
115390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1154c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
115590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1156c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
115790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1158c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
115990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1160c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
116190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1162c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
116390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1164c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
116590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1166c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
116790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1168c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
116990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1170c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
117190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1172c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
117390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1174c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
117590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1176c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
117790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1178c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
117990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1180c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
118190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1182c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
118390dc8812SScott Wood 
118490dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
118590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1186c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
118790dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1188c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
118990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1190c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
119190dc8812SScott Wood         }
119290dc8812SScott Wood 
119390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
119490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1195c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
119690dc8812SScott Wood         }
119790dc8812SScott Wood 
119890dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
119990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1200c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
120190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1202c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
120390dc8812SScott Wood         }
120490dc8812SScott Wood     }
120590dc8812SScott Wood 
120690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
120790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
120890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
120990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
121090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
121190dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
121290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
121390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
121490dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
121590dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
121690dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
121790dc8812SScott Wood     }
121890dc8812SScott Wood 
121990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
122090dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
122190dc8812SScott Wood     }
122290dc8812SScott Wood 
122390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
122490dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
122590dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
122690dc8812SScott Wood     }
122790dc8812SScott Wood 
122890dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
122990dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
123090dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
123190dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
123290dc8812SScott Wood 
123390dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
123490dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
123590dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
123690dc8812SScott Wood         }
123790dc8812SScott Wood     }
1238a7a00a72SDavid Gibson 
1239a7a00a72SDavid Gibson     return 0;
1240fafc0b6aSAlexander Graf }
124190dc8812SScott Wood 
1242a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1243a7a00a72SDavid Gibson {
1244a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1245a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1246a7a00a72SDavid Gibson     int ret;
1247a7a00a72SDavid Gibson     int i;
1248a7a00a72SDavid Gibson 
1249a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
125090dc8812SScott Wood     if (ret < 0) {
125190dc8812SScott Wood         return ret;
125290dc8812SScott Wood     }
125390dc8812SScott Wood 
1254f3c75d42SAneesh Kumar K.V     if (!env->external_htab) {
1255bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1256f3c75d42SAneesh Kumar K.V     }
1257ba5e5090SAlexander Graf 
1258ba5e5090SAlexander Graf     /* Sync SLB */
125982c09f2fSAlexander Graf #ifdef TARGET_PPC64
12604b4d4a21SAneesh Kumar K.V     /*
12614b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1262a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1263a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1264a7a00a72SDavid Gibson      * in.
12654b4d4a21SAneesh Kumar K.V      */
12664b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1267d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12684b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12694b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12704b4d4a21SAneesh Kumar K.V         /*
12714b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12724b4d4a21SAneesh Kumar K.V          */
12734b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1274bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12754b4d4a21SAneesh Kumar K.V         }
1276ba5e5090SAlexander Graf     }
127782c09f2fSAlexander Graf #endif
1278ba5e5090SAlexander Graf 
1279ba5e5090SAlexander Graf     /* Sync SRs */
1280ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1281ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1282ba5e5090SAlexander Graf     }
1283ba5e5090SAlexander Graf 
1284ba5e5090SAlexander Graf     /* Sync BATs */
1285ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1286ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1287ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1288ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1289ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1290ba5e5090SAlexander Graf     }
1291a7a00a72SDavid Gibson 
1292a7a00a72SDavid Gibson     return 0;
1293a7a00a72SDavid Gibson }
1294a7a00a72SDavid Gibson 
1295a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1296a7a00a72SDavid Gibson {
1297a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1298a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1299a7a00a72SDavid Gibson     struct kvm_regs regs;
1300a7a00a72SDavid Gibson     uint32_t cr;
1301a7a00a72SDavid Gibson     int i, ret;
1302a7a00a72SDavid Gibson 
1303a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1304a7a00a72SDavid Gibson     if (ret < 0)
1305a7a00a72SDavid Gibson         return ret;
1306a7a00a72SDavid Gibson 
1307a7a00a72SDavid Gibson     cr = regs.cr;
1308a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1309a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1310a7a00a72SDavid Gibson         cr >>= 4;
1311a7a00a72SDavid Gibson     }
1312a7a00a72SDavid Gibson 
1313a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1314a7a00a72SDavid Gibson     env->lr = regs.lr;
1315a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1316a7a00a72SDavid Gibson     env->msr = regs.msr;
1317a7a00a72SDavid Gibson     env->nip = regs.pc;
1318a7a00a72SDavid Gibson 
1319a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1320a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1321a7a00a72SDavid Gibson 
1322a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1323a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1324a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1325a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1326a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1327a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1328a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1329a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1330a7a00a72SDavid Gibson 
1331a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1332a7a00a72SDavid Gibson 
1333a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1334a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1335a7a00a72SDavid Gibson 
1336a7a00a72SDavid Gibson     kvm_get_fp(cs);
1337a7a00a72SDavid Gibson 
1338a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1339a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1340a7a00a72SDavid Gibson         if (ret < 0) {
1341a7a00a72SDavid Gibson             return ret;
1342a7a00a72SDavid Gibson         }
1343a7a00a72SDavid Gibson     }
1344a7a00a72SDavid Gibson 
1345a7a00a72SDavid Gibson     if (cap_segstate) {
1346a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1347a7a00a72SDavid Gibson         if (ret < 0) {
1348a7a00a72SDavid Gibson             return ret;
1349a7a00a72SDavid Gibson         }
1350fafc0b6aSAlexander Graf     }
1351ba5e5090SAlexander Graf 
1352d67d40eaSDavid Gibson     if (cap_hior) {
1353d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1354d67d40eaSDavid Gibson     }
1355d67d40eaSDavid Gibson 
1356d67d40eaSDavid Gibson     if (cap_one_reg) {
1357d67d40eaSDavid Gibson         int i;
1358d67d40eaSDavid Gibson 
1359d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1360d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1361d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1362d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1363d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1364d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1365d67d40eaSDavid Gibson 
1366d67d40eaSDavid Gibson             if (id != 0) {
1367d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1368d67d40eaSDavid Gibson             }
1369d67d40eaSDavid Gibson         }
13709b00ea49SDavid Gibson 
13719b00ea49SDavid Gibson #ifdef TARGET_PPC64
137280b3f79bSAlexey Kardashevskiy         if (msr_ts) {
137380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
137480b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
137580b3f79bSAlexey Kardashevskiy             }
137680b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
137780b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
137880b3f79bSAlexey Kardashevskiy             }
137980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
138080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
138180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
138280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
138380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
138480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
138580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
138680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
138780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
138880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
138980b3f79bSAlexey Kardashevskiy         }
139080b3f79bSAlexey Kardashevskiy 
13919b00ea49SDavid Gibson         if (cap_papr) {
13929b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1393da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13949b00ea49SDavid Gibson             }
13959b00ea49SDavid Gibson         }
139698a8b524SAlexey Kardashevskiy 
139798a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13989b00ea49SDavid Gibson #endif
1399d67d40eaSDavid Gibson     }
1400d67d40eaSDavid Gibson 
1401d76d1650Saurel32     return 0;
1402d76d1650Saurel32 }
1403d76d1650Saurel32 
14041bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1405fc87e185SAlexander Graf {
1406fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1407fc87e185SAlexander Graf 
1408fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1409fc87e185SAlexander Graf         return 0;
1410fc87e185SAlexander Graf     }
1411fc87e185SAlexander Graf 
1412fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1413fc87e185SAlexander Graf         return 0;
1414fc87e185SAlexander Graf     }
1415fc87e185SAlexander Graf 
14161bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1417fc87e185SAlexander Graf 
1418fc87e185SAlexander Graf     return 0;
1419fc87e185SAlexander Graf }
1420fc87e185SAlexander Graf 
142116415335SAlexander Graf #if defined(TARGET_PPCEMB)
142216415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
142316415335SAlexander Graf #elif defined(TARGET_PPC64)
142416415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
142516415335SAlexander Graf #else
142616415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
142716415335SAlexander Graf #endif
142816415335SAlexander Graf 
142920d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1430d76d1650Saurel32 {
143120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
143220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1433d76d1650Saurel32     int r;
1434d76d1650Saurel32     unsigned irq;
1435d76d1650Saurel32 
14364b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14374b8523eeSJan Kiszka 
14385cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1439d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1440fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1441fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1442259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
144316415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1444d76d1650Saurel32     {
1445d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1446d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1447d76d1650Saurel32          * when reading the UIC.
1448d76d1650Saurel32          */
1449fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1450d76d1650Saurel32 
1451da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14521bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
145355e5c285SAndreas Färber         if (r < 0) {
145455e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
145555e5c285SAndreas Färber         }
1456c821c2bdSAlexander Graf 
1457c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1458bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
145973bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1460d76d1650Saurel32     }
1461d76d1650Saurel32 
1462d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1463d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1464d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14654b8523eeSJan Kiszka 
14664b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1467d76d1650Saurel32 }
1468d76d1650Saurel32 
14694c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1470d76d1650Saurel32 {
14714c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1472d76d1650Saurel32 }
1473d76d1650Saurel32 
147420d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14750af691d7SMarcelo Tosatti {
1476259186a7SAndreas Färber     return cs->halted;
14770af691d7SMarcelo Tosatti }
14780af691d7SMarcelo Tosatti 
1479259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1480d76d1650Saurel32 {
1481259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1482259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1483259186a7SAndreas Färber 
1484259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1485259186a7SAndreas Färber         cs->halted = 1;
148627103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1487d76d1650Saurel32     }
1488d76d1650Saurel32 
1489bb4ea393SJan Kiszka     return 0;
1490d76d1650Saurel32 }
1491d76d1650Saurel32 
1492d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14931328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1494d76d1650Saurel32 {
1495d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1496d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1497d76d1650Saurel32 
1498bb4ea393SJan Kiszka     return 0;
1499d76d1650Saurel32 }
1500d76d1650Saurel32 
15011328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1502d76d1650Saurel32 {
1503d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1504d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1505d76d1650Saurel32 
1506bb4ea393SJan Kiszka     return 0;
1507d76d1650Saurel32 }
1508d76d1650Saurel32 
15098a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15108a0548f9SBharat Bhushan {
15118a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
15128a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
15138a0548f9SBharat Bhushan 
15148a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15158a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
15168a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
15178a0548f9SBharat Bhushan         return -EINVAL;
15188a0548f9SBharat Bhushan     }
15198a0548f9SBharat Bhushan 
15208a0548f9SBharat Bhushan     return 0;
15218a0548f9SBharat Bhushan }
15228a0548f9SBharat Bhushan 
15238a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15248a0548f9SBharat Bhushan {
15258a0548f9SBharat Bhushan     uint32_t sc;
15268a0548f9SBharat Bhushan 
15278a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15288a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15298a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15308a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15318a0548f9SBharat Bhushan         return -EINVAL;
15328a0548f9SBharat Bhushan     }
15338a0548f9SBharat Bhushan 
15348a0548f9SBharat Bhushan     return 0;
15358a0548f9SBharat Bhushan }
15368a0548f9SBharat Bhushan 
153788365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
153888365d17SBharat Bhushan {
153988365d17SBharat Bhushan     int n;
154088365d17SBharat Bhushan 
154188365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
154288365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
154388365d17SBharat Bhushan 
154488365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
154588365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
154688365d17SBharat Bhushan              hw_debug_points[n].type == type) {
154788365d17SBharat Bhushan             return n;
154888365d17SBharat Bhushan         }
154988365d17SBharat Bhushan     }
155088365d17SBharat Bhushan 
155188365d17SBharat Bhushan     return -1;
155288365d17SBharat Bhushan }
155388365d17SBharat Bhushan 
155488365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
155588365d17SBharat Bhushan {
155688365d17SBharat Bhushan     int n;
155788365d17SBharat Bhushan 
155888365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
155988365d17SBharat Bhushan     if (n >= 0) {
156088365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
156188365d17SBharat Bhushan         return n;
156288365d17SBharat Bhushan     }
156388365d17SBharat Bhushan 
156488365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
156588365d17SBharat Bhushan     if (n >= 0) {
156688365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
156788365d17SBharat Bhushan         return n;
156888365d17SBharat Bhushan     }
156988365d17SBharat Bhushan 
157088365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
157188365d17SBharat Bhushan     if (n >= 0) {
157288365d17SBharat Bhushan         *flag = BP_MEM_READ;
157388365d17SBharat Bhushan         return n;
157488365d17SBharat Bhushan     }
157588365d17SBharat Bhushan 
157688365d17SBharat Bhushan     return -1;
157788365d17SBharat Bhushan }
157888365d17SBharat Bhushan 
157988365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
158088365d17SBharat Bhushan                                   target_ulong len, int type)
158188365d17SBharat Bhushan {
158288365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
158388365d17SBharat Bhushan         return -ENOBUFS;
158488365d17SBharat Bhushan     }
158588365d17SBharat Bhushan 
158688365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
158788365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
158888365d17SBharat Bhushan 
158988365d17SBharat Bhushan     switch (type) {
159088365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
159188365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
159288365d17SBharat Bhushan             return -ENOBUFS;
159388365d17SBharat Bhushan         }
159488365d17SBharat Bhushan 
159588365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
159688365d17SBharat Bhushan             return -EEXIST;
159788365d17SBharat Bhushan         }
159888365d17SBharat Bhushan 
159988365d17SBharat Bhushan         nb_hw_breakpoint++;
160088365d17SBharat Bhushan         break;
160188365d17SBharat Bhushan 
160288365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
160388365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
160488365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
160588365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
160688365d17SBharat Bhushan             return -ENOBUFS;
160788365d17SBharat Bhushan         }
160888365d17SBharat Bhushan 
160988365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
161088365d17SBharat Bhushan             return -EEXIST;
161188365d17SBharat Bhushan         }
161288365d17SBharat Bhushan 
161388365d17SBharat Bhushan         nb_hw_watchpoint++;
161488365d17SBharat Bhushan         break;
161588365d17SBharat Bhushan 
161688365d17SBharat Bhushan     default:
161788365d17SBharat Bhushan         return -ENOSYS;
161888365d17SBharat Bhushan     }
161988365d17SBharat Bhushan 
162088365d17SBharat Bhushan     return 0;
162188365d17SBharat Bhushan }
162288365d17SBharat Bhushan 
162388365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
162488365d17SBharat Bhushan                                   target_ulong len, int type)
162588365d17SBharat Bhushan {
162688365d17SBharat Bhushan     int n;
162788365d17SBharat Bhushan 
162888365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
162988365d17SBharat Bhushan     if (n < 0) {
163088365d17SBharat Bhushan         return -ENOENT;
163188365d17SBharat Bhushan     }
163288365d17SBharat Bhushan 
163388365d17SBharat Bhushan     switch (type) {
163488365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
163588365d17SBharat Bhushan         nb_hw_breakpoint--;
163688365d17SBharat Bhushan         break;
163788365d17SBharat Bhushan 
163888365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
163988365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
164088365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
164188365d17SBharat Bhushan         nb_hw_watchpoint--;
164288365d17SBharat Bhushan         break;
164388365d17SBharat Bhushan 
164488365d17SBharat Bhushan     default:
164588365d17SBharat Bhushan         return -ENOSYS;
164688365d17SBharat Bhushan     }
164788365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
164888365d17SBharat Bhushan 
164988365d17SBharat Bhushan     return 0;
165088365d17SBharat Bhushan }
165188365d17SBharat Bhushan 
165288365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
165388365d17SBharat Bhushan {
165488365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
165588365d17SBharat Bhushan }
165688365d17SBharat Bhushan 
16578a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16588a0548f9SBharat Bhushan {
165988365d17SBharat Bhushan     int n;
166088365d17SBharat Bhushan 
16618a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16628a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16638a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16648a0548f9SBharat Bhushan     }
166588365d17SBharat Bhushan 
166688365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
166788365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
166888365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
166988365d17SBharat Bhushan 
167088365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
167188365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
167288365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
167388365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
167488365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
167588365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
167688365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
167788365d17SBharat Bhushan                 break;
167888365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
167988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
168088365d17SBharat Bhushan                 break;
168188365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
168288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
168388365d17SBharat Bhushan                 break;
168488365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
168588365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
168688365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
168788365d17SBharat Bhushan                 break;
168888365d17SBharat Bhushan             default:
168988365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
169088365d17SBharat Bhushan             }
169188365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
169288365d17SBharat Bhushan         }
169388365d17SBharat Bhushan     }
16948a0548f9SBharat Bhushan }
16958a0548f9SBharat Bhushan 
16968a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16978a0548f9SBharat Bhushan {
16988a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16998a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
17008a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
17018a0548f9SBharat Bhushan     int handle = 0;
170288365d17SBharat Bhushan     int n;
170388365d17SBharat Bhushan     int flag = 0;
17048a0548f9SBharat Bhushan 
170588365d17SBharat Bhushan     if (cs->singlestep_enabled) {
170688365d17SBharat Bhushan         handle = 1;
170788365d17SBharat Bhushan     } else if (arch_info->status) {
170888365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
170988365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
171088365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
171188365d17SBharat Bhushan                 if (n >= 0) {
171288365d17SBharat Bhushan                     handle = 1;
171388365d17SBharat Bhushan                 }
171488365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
171588365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
171688365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
171788365d17SBharat Bhushan                 if (n >= 0) {
171888365d17SBharat Bhushan                     handle = 1;
171988365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
172088365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
172188365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
172288365d17SBharat Bhushan                 }
172388365d17SBharat Bhushan             }
172488365d17SBharat Bhushan         }
172588365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17268a0548f9SBharat Bhushan         handle = 1;
17278a0548f9SBharat Bhushan     } else {
17288a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17298a0548f9SBharat Bhushan          * program exception to guest;
17308a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
173188365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
173288365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
173388365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
173488365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
173588365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
173688365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
173788365d17SBharat Bhushan          * So the result is that we cannot share debug resources
173888365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
173988365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
174088365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
174188365d17SBharat Bhushan          * cannot use them;
17428a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17438a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17448a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17458a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17468a0548f9SBharat Bhushan          * injecting a program interrupt.
17478a0548f9SBharat Bhushan          */
17488a0548f9SBharat Bhushan 
17498a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17508a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17518a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17528a0548f9SBharat Bhushan          */
17538a0548f9SBharat Bhushan         env->nip += 4;
17548a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17558a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17568a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17578a0548f9SBharat Bhushan     }
17588a0548f9SBharat Bhushan 
17598a0548f9SBharat Bhushan     return handle;
17608a0548f9SBharat Bhushan }
17618a0548f9SBharat Bhushan 
176220d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1763d76d1650Saurel32 {
176420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
176520d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1766bb4ea393SJan Kiszka     int ret;
1767d76d1650Saurel32 
17684b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17694b8523eeSJan Kiszka 
1770d76d1650Saurel32     switch (run->exit_reason) {
1771d76d1650Saurel32     case KVM_EXIT_DCR:
1772d76d1650Saurel32         if (run->dcr.is_write) {
1773da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1774d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1775d76d1650Saurel32         } else {
1776da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1777d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1778d76d1650Saurel32         }
1779d76d1650Saurel32         break;
1780d76d1650Saurel32     case KVM_EXIT_HLT:
1781da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1782259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1783d76d1650Saurel32         break;
1784c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1785f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1786da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
178720d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1788aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1789f61b4bedSAlexander Graf                                               run->papr_hcall.args);
179078e8fde2SDavid Gibson         ret = 0;
1791f61b4bedSAlexander Graf         break;
1792f61b4bedSAlexander Graf #endif
17935b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1794da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1795933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17965b95b8b9SAlexander Graf         ret = 0;
17975b95b8b9SAlexander Graf         break;
179831f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1799da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
180031f2cb8fSBharat Bhushan         watchdog_perform_action();
180131f2cb8fSBharat Bhushan         ret = 0;
180231f2cb8fSBharat Bhushan         break;
180331f2cb8fSBharat Bhushan 
18048a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
18058a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
18068a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
18078a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
18088a0548f9SBharat Bhushan             break;
18098a0548f9SBharat Bhushan         }
18108a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
18118a0548f9SBharat Bhushan         ret = 0;
18128a0548f9SBharat Bhushan         break;
18138a0548f9SBharat Bhushan 
181473aaec4aSJan Kiszka     default:
181573aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
181673aaec4aSJan Kiszka         ret = -1;
181773aaec4aSJan Kiszka         break;
1818d76d1650Saurel32     }
1819d76d1650Saurel32 
18204b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1821d76d1650Saurel32     return ret;
1822d76d1650Saurel32 }
1823d76d1650Saurel32 
182431f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
182531f2cb8fSBharat Bhushan {
182631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
182731f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
182831f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
182931f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
183031f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
183131f2cb8fSBharat Bhushan     };
183231f2cb8fSBharat Bhushan 
183331f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
183431f2cb8fSBharat Bhushan }
183531f2cb8fSBharat Bhushan 
183631f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
183731f2cb8fSBharat Bhushan {
183831f2cb8fSBharat Bhushan 
183931f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
184031f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
184131f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
184231f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
184331f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
184431f2cb8fSBharat Bhushan     };
184531f2cb8fSBharat Bhushan 
184631f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
184731f2cb8fSBharat Bhushan }
184831f2cb8fSBharat Bhushan 
184931f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
185031f2cb8fSBharat Bhushan {
185131f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
185231f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
185331f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
185431f2cb8fSBharat Bhushan 
185531f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
185631f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
185731f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
185831f2cb8fSBharat Bhushan     };
185931f2cb8fSBharat Bhushan 
186031f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
186131f2cb8fSBharat Bhushan }
186231f2cb8fSBharat Bhushan 
186331f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
186431f2cb8fSBharat Bhushan {
186531f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
186631f2cb8fSBharat Bhushan     int ret;
186731f2cb8fSBharat Bhushan 
186831f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
186931f2cb8fSBharat Bhushan         return -1;
187031f2cb8fSBharat Bhushan     }
187131f2cb8fSBharat Bhushan 
187231f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
187331f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
187431f2cb8fSBharat Bhushan         return -1;
187531f2cb8fSBharat Bhushan     }
187631f2cb8fSBharat Bhushan 
187748add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
187831f2cb8fSBharat Bhushan     if (ret < 0) {
187931f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
188031f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
188131f2cb8fSBharat Bhushan         return ret;
188231f2cb8fSBharat Bhushan     }
188331f2cb8fSBharat Bhushan 
188431f2cb8fSBharat Bhushan     return ret;
188531f2cb8fSBharat Bhushan }
188631f2cb8fSBharat Bhushan 
1887dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1888dc333cd6SAlexander Graf {
1889dc333cd6SAlexander Graf     FILE *f;
1890dc333cd6SAlexander Graf     int ret = -1;
1891dc333cd6SAlexander Graf     int field_len = strlen(field);
1892dc333cd6SAlexander Graf     char line[512];
1893dc333cd6SAlexander Graf 
1894dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1895dc333cd6SAlexander Graf     if (!f) {
1896dc333cd6SAlexander Graf         return -1;
1897dc333cd6SAlexander Graf     }
1898dc333cd6SAlexander Graf 
1899dc333cd6SAlexander Graf     do {
1900dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1901dc333cd6SAlexander Graf             break;
1902dc333cd6SAlexander Graf         }
1903dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1904ae215068SJim Meyering             pstrcpy(value, len, line);
1905dc333cd6SAlexander Graf             ret = 0;
1906dc333cd6SAlexander Graf             break;
1907dc333cd6SAlexander Graf         }
1908dc333cd6SAlexander Graf     } while(*line);
1909dc333cd6SAlexander Graf 
1910dc333cd6SAlexander Graf     fclose(f);
1911dc333cd6SAlexander Graf 
1912dc333cd6SAlexander Graf     return ret;
1913dc333cd6SAlexander Graf }
1914dc333cd6SAlexander Graf 
1915dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1916dc333cd6SAlexander Graf {
1917dc333cd6SAlexander Graf     char line[512];
1918dc333cd6SAlexander Graf     char *ns;
191973bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1920dc333cd6SAlexander Graf 
1921dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1922dc333cd6SAlexander Graf         return retval;
1923dc333cd6SAlexander Graf     }
1924dc333cd6SAlexander Graf 
1925dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1926dc333cd6SAlexander Graf         return retval;
1927dc333cd6SAlexander Graf     }
1928dc333cd6SAlexander Graf 
1929dc333cd6SAlexander Graf     ns++;
1930dc333cd6SAlexander Graf 
1931f9b8e7f6SShraddha Barke     return atoi(ns);
1932ef951443SNikunj A Dadhania }
1933ef951443SNikunj A Dadhania 
1934ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1935ef951443SNikunj A Dadhania {
1936ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1937ef951443SNikunj A Dadhania                                NULL);
1938ef951443SNikunj A Dadhania }
1939ef951443SNikunj A Dadhania 
1940ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1941ef951443SNikunj A Dadhania {
1942ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1943dc333cd6SAlexander Graf }
19444513d923SGleb Natapov 
1945eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1946eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1947eadaada1SAlexander Graf {
1948eadaada1SAlexander Graf     struct dirent *dirp;
1949eadaada1SAlexander Graf     DIR *dp;
1950eadaada1SAlexander Graf 
1951eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1952eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1953eadaada1SAlexander Graf         return -1;
1954eadaada1SAlexander Graf     }
1955eadaada1SAlexander Graf 
1956eadaada1SAlexander Graf     buf[0] = '\0';
1957eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1958eadaada1SAlexander Graf         FILE *f;
1959eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1960eadaada1SAlexander Graf                  dirp->d_name);
1961eadaada1SAlexander Graf         f = fopen(buf, "r");
1962eadaada1SAlexander Graf         if (f) {
1963eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1964eadaada1SAlexander Graf             fclose(f);
1965eadaada1SAlexander Graf             break;
1966eadaada1SAlexander Graf         }
1967eadaada1SAlexander Graf         buf[0] = '\0';
1968eadaada1SAlexander Graf     }
1969eadaada1SAlexander Graf     closedir(dp);
1970eadaada1SAlexander Graf     if (buf[0] == '\0') {
1971eadaada1SAlexander Graf         printf("Unknown host!\n");
1972eadaada1SAlexander Graf         return -1;
1973eadaada1SAlexander Graf     }
1974eadaada1SAlexander Graf 
1975eadaada1SAlexander Graf     return 0;
1976eadaada1SAlexander Graf }
1977eadaada1SAlexander Graf 
19787d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1979eadaada1SAlexander Graf {
19809bc884b7SDavid Gibson     union {
19819bc884b7SDavid Gibson         uint32_t v32;
19829bc884b7SDavid Gibson         uint64_t v64;
19839bc884b7SDavid Gibson     } u;
1984eadaada1SAlexander Graf     FILE *f;
1985eadaada1SAlexander Graf     int len;
1986eadaada1SAlexander Graf 
19877d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1988eadaada1SAlexander Graf     if (!f) {
1989eadaada1SAlexander Graf         return -1;
1990eadaada1SAlexander Graf     }
1991eadaada1SAlexander Graf 
19929bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1993eadaada1SAlexander Graf     fclose(f);
1994eadaada1SAlexander Graf     switch (len) {
19959bc884b7SDavid Gibson     case 4:
19969bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19979bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19989bc884b7SDavid Gibson     case 8:
19999bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
2000eadaada1SAlexander Graf     }
2001eadaada1SAlexander Graf 
2002eadaada1SAlexander Graf     return 0;
2003eadaada1SAlexander Graf }
2004eadaada1SAlexander Graf 
20057d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
20067d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
20077d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
20087d94a30bSSukadev Bhattiprolu  * format) */
20097d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
20107d94a30bSSukadev Bhattiprolu {
20117d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
20127d94a30bSSukadev Bhattiprolu     uint64_t val;
20137d94a30bSSukadev Bhattiprolu 
20147d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
20157d94a30bSSukadev Bhattiprolu         return -1;
20167d94a30bSSukadev Bhattiprolu     }
20177d94a30bSSukadev Bhattiprolu 
20187d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
20197d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
20207d94a30bSSukadev Bhattiprolu     g_free(tmp);
20217d94a30bSSukadev Bhattiprolu 
20227d94a30bSSukadev Bhattiprolu     return val;
20237d94a30bSSukadev Bhattiprolu }
20247d94a30bSSukadev Bhattiprolu 
20259bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20269bc884b7SDavid Gibson {
20279bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20289bc884b7SDavid Gibson }
20299bc884b7SDavid Gibson 
20306659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
20316659394fSDavid Gibson {
20326659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
20336659394fSDavid Gibson }
20346659394fSDavid Gibson 
20356659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
20366659394fSDavid Gibson {
20376659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
20386659394fSDavid Gibson }
20396659394fSDavid Gibson 
20401a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
204145024f09SAlexander Graf  {
2042a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2043a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
204445024f09SAlexander Graf 
20456fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20461a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20471a61a9aeSStuart Yoder         return 0;
20481a61a9aeSStuart Yoder     }
204945024f09SAlexander Graf 
20501a61a9aeSStuart Yoder     return 1;
20511a61a9aeSStuart Yoder }
20521a61a9aeSStuart Yoder 
20531a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20541a61a9aeSStuart Yoder {
20551a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20561a61a9aeSStuart Yoder 
20571a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20581a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20591a61a9aeSStuart Yoder         return 1;
20601a61a9aeSStuart Yoder     }
20611a61a9aeSStuart Yoder 
20621a61a9aeSStuart Yoder     return 0;
20631a61a9aeSStuart Yoder }
20641a61a9aeSStuart Yoder 
20651a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20661a61a9aeSStuart Yoder {
20671a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20681a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20691a61a9aeSStuart Yoder 
20701a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20711a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
207245024f09SAlexander Graf         return 0;
207345024f09SAlexander Graf     }
207445024f09SAlexander Graf 
207545024f09SAlexander Graf     /*
2076d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
207745024f09SAlexander Graf      *
2078d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
207945024f09SAlexander Graf      *     li r3, -1
2080d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2081d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
208245024f09SAlexander Graf      */
208345024f09SAlexander Graf 
2084d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2085d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2086d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2087d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
208845024f09SAlexander Graf 
20890ddbd053SAlexey Kardashevskiy     return 1;
209045024f09SAlexander Graf }
209145024f09SAlexander Graf 
2092026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2093026bfd89SDavid Gibson {
2094026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2095026bfd89SDavid Gibson }
2096026bfd89SDavid Gibson 
2097026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2098026bfd89SDavid Gibson {
2099026bfd89SDavid Gibson     /*
2100026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2101026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2102026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2103026bfd89SDavid Gibson      * produce a warning.
2104026bfd89SDavid Gibson      */
2105026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2106026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2107026bfd89SDavid Gibson }
2108026bfd89SDavid Gibson 
2109ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2110ef9971ddSAlexey Kardashevskiy {
2111ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2112ef9971ddSAlexey Kardashevskiy }
2113ef9971ddSAlexey Kardashevskiy 
21145145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
21155145ad4fSNathan Whitehorn {
21165145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
21175145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
21185145ad4fSNathan Whitehorn }
21195145ad4fSNathan Whitehorn 
21201bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2121f61b4bedSAlexander Graf {
21221bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2123f61b4bedSAlexander Graf     int ret;
2124f61b4bedSAlexander Graf 
212548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2126f61b4bedSAlexander Graf     if (ret) {
2127072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2128072ed5f2SThomas Huth         exit(1);
2129f61b4bedSAlexander Graf     }
21309b00ea49SDavid Gibson 
21319b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21329b00ea49SDavid Gibson      * with kvm */
21339b00ea49SDavid Gibson     cap_papr = 1;
2134f1af19d7SDavid Gibson }
2135f61b4bedSAlexander Graf 
2136d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21376db5bb0fSAlexey Kardashevskiy {
2138d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21396db5bb0fSAlexey Kardashevskiy }
21406db5bb0fSAlexey Kardashevskiy 
21415b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21425b95b8b9SAlexander Graf {
21435b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21445b95b8b9SAlexander Graf     int ret;
21455b95b8b9SAlexander Graf 
214648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21475b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2148072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2149072ed5f2SThomas Huth         exit(1);
21505b95b8b9SAlexander Graf     }
21515b95b8b9SAlexander Graf }
21525b95b8b9SAlexander Graf 
2153e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2154e97c3636SDavid Gibson {
2155e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2156e97c3636SDavid Gibson }
2157e97c3636SDavid Gibson 
21587f763a5dSDavid Gibson #ifdef TARGET_PPC64
2159658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2160354ac20aSDavid Gibson {
2161354ac20aSDavid Gibson     off_t size;
2162354ac20aSDavid Gibson     int fd;
2163354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2164354ac20aSDavid Gibson 
2165354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2166354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2167354ac20aSDavid Gibson      *                      not necessary on this hardware
2168354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2169354ac20aSDavid Gibson      *
2170354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2171354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2172354ac20aSDavid Gibson      */
2173354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2174354ac20aSDavid Gibson         return 0;
2175354ac20aSDavid Gibson     }
2176354ac20aSDavid Gibson 
2177354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2178354ac20aSDavid Gibson     if (fd < 0) {
2179354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2180354ac20aSDavid Gibson                 strerror(errno));
2181354ac20aSDavid Gibson         return -1;
2182354ac20aSDavid Gibson     }
2183354ac20aSDavid Gibson 
2184354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2185354ac20aSDavid Gibson 
2186658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2187658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2188354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2189354ac20aSDavid Gibson         return -1;
2190354ac20aSDavid Gibson     };
2191354ac20aSDavid Gibson 
2192354ac20aSDavid Gibson     return size;
2193354ac20aSDavid Gibson }
2194354ac20aSDavid Gibson 
21957f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21967f763a5dSDavid Gibson {
2197f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2198f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2199f36951c1SDavid Gibson     int i;
2200f36951c1SDavid Gibson 
22017f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
22027f763a5dSDavid Gibson         return current_size;
22037f763a5dSDavid Gibson     }
2204f36951c1SDavid Gibson 
2205f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2206f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2207182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
2208f36951c1SDavid Gibson     rampagesize = getrampagesize();
2209f36951c1SDavid Gibson     best_page_shift = 0;
2210f36951c1SDavid Gibson 
2211f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2212f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2213f36951c1SDavid Gibson 
2214f36951c1SDavid Gibson         if (!sps->page_shift) {
2215f36951c1SDavid Gibson             continue;
2216f36951c1SDavid Gibson         }
2217f36951c1SDavid Gibson 
2218f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2219f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2220f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2221f36951c1SDavid Gibson         }
2222f36951c1SDavid Gibson     }
2223f36951c1SDavid Gibson 
22247f763a5dSDavid Gibson     return MIN(current_size,
2225f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
22267f763a5dSDavid Gibson }
22277f763a5dSDavid Gibson #endif
22287f763a5dSDavid Gibson 
2229da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2230da95324eSAlexey Kardashevskiy {
2231da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2232da95324eSAlexey Kardashevskiy }
2233da95324eSAlexey Kardashevskiy 
22349bb62a07SAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
22356a81dd17SDavid Gibson                               bool need_vfio)
22360f5cb298SDavid Gibson {
22370f5cb298SDavid Gibson     struct kvm_create_spapr_tce args = {
22380f5cb298SDavid Gibson         .liobn = liobn,
22390f5cb298SDavid Gibson         .window_size = window_size,
22400f5cb298SDavid Gibson     };
22410f5cb298SDavid Gibson     long len;
22420f5cb298SDavid Gibson     int fd;
22430f5cb298SDavid Gibson     void *table;
22440f5cb298SDavid Gibson 
2245b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2246b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2247b5aec396SDavid Gibson      */
2248b5aec396SDavid Gibson     *pfd = -1;
22496a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22500f5cb298SDavid Gibson         return NULL;
22510f5cb298SDavid Gibson     }
22520f5cb298SDavid Gibson 
22530f5cb298SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22540f5cb298SDavid Gibson     if (fd < 0) {
2255b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2256b5aec396SDavid Gibson                 liobn);
22570f5cb298SDavid Gibson         return NULL;
22580f5cb298SDavid Gibson     }
22590f5cb298SDavid Gibson 
2260a83000f5SAnthony Liguori     len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t);
22610f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22620f5cb298SDavid Gibson 
226374b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22640f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2265b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2266b5aec396SDavid Gibson                 liobn);
22670f5cb298SDavid Gibson         close(fd);
22680f5cb298SDavid Gibson         return NULL;
22690f5cb298SDavid Gibson     }
22700f5cb298SDavid Gibson 
22710f5cb298SDavid Gibson     *pfd = fd;
22720f5cb298SDavid Gibson     return table;
22730f5cb298SDavid Gibson }
22740f5cb298SDavid Gibson 
2275523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22760f5cb298SDavid Gibson {
22770f5cb298SDavid Gibson     long len;
22780f5cb298SDavid Gibson 
22790f5cb298SDavid Gibson     if (fd < 0) {
22800f5cb298SDavid Gibson         return -1;
22810f5cb298SDavid Gibson     }
22820f5cb298SDavid Gibson 
2283523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22840f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22850f5cb298SDavid Gibson         (close(fd) < 0)) {
2286b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2287b5aec396SDavid Gibson                 strerror(errno));
22880f5cb298SDavid Gibson         /* Leak the table */
22890f5cb298SDavid Gibson     }
22900f5cb298SDavid Gibson 
22910f5cb298SDavid Gibson     return 0;
22920f5cb298SDavid Gibson }
22930f5cb298SDavid Gibson 
22947f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22957f763a5dSDavid Gibson {
22967f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22977f763a5dSDavid Gibson 
2298ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2299ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2300ace9a2cbSDavid Gibson         return 0;
2301ace9a2cbSDavid Gibson     }
2302ace9a2cbSDavid Gibson     if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23037f763a5dSDavid Gibson         int ret;
23047f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2305ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2306ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2307ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2308ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2309ace9a2cbSDavid Gibson              * correct for PR. */
2310ace9a2cbSDavid Gibson             return 0;
2311ace9a2cbSDavid Gibson         } else if (ret < 0) {
23127f763a5dSDavid Gibson             return ret;
23137f763a5dSDavid Gibson         }
23147f763a5dSDavid Gibson         return shift;
23157f763a5dSDavid Gibson     }
23167f763a5dSDavid Gibson 
2317ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2318ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
231996c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
232096c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2321ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23227f763a5dSDavid Gibson         return 0;
2323ace9a2cbSDavid Gibson     } else {
2324ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2325ace9a2cbSDavid Gibson         return 24;
2326ace9a2cbSDavid Gibson     }
23277f763a5dSDavid Gibson }
23287f763a5dSDavid Gibson 
2329a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2330a1e98583SDavid Gibson {
2331a1e98583SDavid Gibson     uint32_t pvr;
2332a1e98583SDavid Gibson 
2333a1e98583SDavid Gibson     asm ("mfpvr %0"
2334a1e98583SDavid Gibson          : "=r"(pvr));
2335a1e98583SDavid Gibson     return pvr;
2336a1e98583SDavid Gibson }
2337a1e98583SDavid Gibson 
2338a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2339a7342588SDavid Gibson {
2340a7342588SDavid Gibson     if (on) {
2341a7342588SDavid Gibson         *word |= flags;
2342a7342588SDavid Gibson     } else {
2343a7342588SDavid Gibson         *word &= ~flags;
2344a7342588SDavid Gibson     }
2345a7342588SDavid Gibson }
2346a7342588SDavid Gibson 
23472985b86bSAndreas Färber static void kvmppc_host_cpu_initfn(Object *obj)
2348a1e98583SDavid Gibson {
23492985b86bSAndreas Färber     assert(kvm_enabled());
23502985b86bSAndreas Färber }
23512985b86bSAndreas Färber 
23522985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23532985b86bSAndreas Färber {
23544c315c27SMarkus Armbruster     DeviceClass *dc = DEVICE_CLASS(oc);
23552985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2356a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2357a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
23580cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23590cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2360a1e98583SDavid Gibson 
2361cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23623bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2363a7342588SDavid Gibson 
236470bca53fSAlexander Graf     if (vmx != -1) {
236570bca53fSAlexander Graf         /* Only override when we know what the host supports */
2366cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2367cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
236870bca53fSAlexander Graf     }
236970bca53fSAlexander Graf     if (dfp != -1) {
237070bca53fSAlexander Graf         /* Only override when we know what the host supports */
2371cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
237270bca53fSAlexander Graf     }
23730cbad81fSDavid Gibson 
23740cbad81fSDavid Gibson     if (dcache_size != -1) {
23750cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23760cbad81fSDavid Gibson     }
23770cbad81fSDavid Gibson 
23780cbad81fSDavid Gibson     if (icache_size != -1) {
23790cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23800cbad81fSDavid Gibson     }
23814c315c27SMarkus Armbruster 
23824c315c27SMarkus Armbruster     /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */
23834c315c27SMarkus Armbruster     dc->cannot_destroy_with_object_finalize_yet = true;
2384a1e98583SDavid Gibson }
2385a1e98583SDavid Gibson 
23863b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23873b961124SStuart Yoder {
23883b961124SStuart Yoder     return cap_epr;
23893b961124SStuart Yoder }
23903b961124SStuart Yoder 
23917c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void)
23927c43bca0SAneesh Kumar K.V {
23937c43bca0SAneesh Kumar K.V     return cap_htab_fd;
23947c43bca0SAneesh Kumar K.V }
23957c43bca0SAneesh Kumar K.V 
239687a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
239787a91de6SAlexander Graf {
239887a91de6SAlexander Graf     return cap_fixup_hcalls;
239987a91de6SAlexander Graf }
240087a91de6SAlexander Graf 
2401bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2402bac3bf28SThomas Huth {
2403bac3bf28SThomas Huth     return cap_htm;
2404bac3bf28SThomas Huth }
2405bac3bf28SThomas Huth 
24065b79b1caSAlexey Kardashevskiy static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
24075b79b1caSAlexey Kardashevskiy {
24085b79b1caSAlexey Kardashevskiy     ObjectClass *oc = OBJECT_CLASS(pcc);
24095b79b1caSAlexey Kardashevskiy 
24105b79b1caSAlexey Kardashevskiy     while (oc && !object_class_is_abstract(oc)) {
24115b79b1caSAlexey Kardashevskiy         oc = object_class_get_parent(oc);
24125b79b1caSAlexey Kardashevskiy     }
24135b79b1caSAlexey Kardashevskiy     assert(oc);
24145b79b1caSAlexey Kardashevskiy 
24155b79b1caSAlexey Kardashevskiy     return POWERPC_CPU_CLASS(oc);
24165b79b1caSAlexey Kardashevskiy }
24175b79b1caSAlexey Kardashevskiy 
241852b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
241952b2519cSThomas Huth {
242052b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
242152b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
242252b2519cSThomas Huth 
242352b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
242452b2519cSThomas Huth     if (pvr_pcc == NULL) {
242552b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
242652b2519cSThomas Huth     }
242752b2519cSThomas Huth 
242852b2519cSThomas Huth     return pvr_pcc;
242952b2519cSThomas Huth }
243052b2519cSThomas Huth 
24315ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
24325ba4576bSAndreas Färber {
24335ba4576bSAndreas Färber     TypeInfo type_info = {
24345ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24355ba4576bSAndreas Färber         .instance_init = kvmppc_host_cpu_initfn,
24365ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24375ba4576bSAndreas Färber     };
24385ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
24395b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2440715d4b96SThomas Huth     int i;
24415ba4576bSAndreas Färber 
244252b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
24433bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
24445ba4576bSAndreas Färber         return -1;
24455ba4576bSAndreas Färber     }
24465ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24475ba4576bSAndreas Färber     type_register(&type_info);
24485b79b1caSAlexey Kardashevskiy 
24493b542549SBharata B Rao #if defined(TARGET_PPC64)
24503b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
24513b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
24527ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
24537ebaf795SBharata B Rao     type_info.instance_init = NULL;
24547ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
24557ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
24563b542549SBharata B Rao     type_register(&type_info);
24573b542549SBharata B Rao     g_free((void *)type_info.name);
24583b542549SBharata B Rao #endif
24593b542549SBharata B Rao 
2460715d4b96SThomas Huth     /*
2461715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2462715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2463715d4b96SThomas Huth      * host CPU type, too)
2464715d4b96SThomas Huth      */
2465715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2466715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2467715d4b96SThomas Huth         if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2468715d4b96SThomas Huth             ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
2469715d4b96SThomas Huth             char *suffix;
2470715d4b96SThomas Huth 
2471715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2472715d4b96SThomas Huth             suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU);
2473715d4b96SThomas Huth             if (suffix) {
2474715d4b96SThomas Huth                 *suffix = 0;
2475715d4b96SThomas Huth             }
2476715d4b96SThomas Huth             ppc_cpu_aliases[i].oc = oc;
2477715d4b96SThomas Huth             break;
2478715d4b96SThomas Huth         }
2479715d4b96SThomas Huth     }
2480715d4b96SThomas Huth 
24815ba4576bSAndreas Färber     return 0;
24825ba4576bSAndreas Färber }
24835ba4576bSAndreas Färber 
2484feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2485feaa64c4SDavid Gibson {
2486feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2487feaa64c4SDavid Gibson         .token = token,
2488feaa64c4SDavid Gibson     };
2489feaa64c4SDavid Gibson 
2490feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2491feaa64c4SDavid Gibson         return -ENOENT;
2492feaa64c4SDavid Gibson     }
2493feaa64c4SDavid Gibson 
2494feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2495feaa64c4SDavid Gibson 
2496feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2497feaa64c4SDavid Gibson }
249812b1143bSDavid Gibson 
2499e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write)
2500e68cb8b4SAlexey Kardashevskiy {
2501e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2502e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
2503e68cb8b4SAlexey Kardashevskiy         .start_index = 0,
2504e68cb8b4SAlexey Kardashevskiy     };
2505e68cb8b4SAlexey Kardashevskiy 
2506e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
2507e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2508e68cb8b4SAlexey Kardashevskiy         return -1;
2509e68cb8b4SAlexey Kardashevskiy     }
2510e68cb8b4SAlexey Kardashevskiy 
2511e68cb8b4SAlexey Kardashevskiy     return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2512e68cb8b4SAlexey Kardashevskiy }
2513e68cb8b4SAlexey Kardashevskiy 
2514e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2515e68cb8b4SAlexey Kardashevskiy {
2516bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2517e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2518e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2519e68cb8b4SAlexey Kardashevskiy 
2520e68cb8b4SAlexey Kardashevskiy     do {
2521e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2522e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2523e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2524e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2525e68cb8b4SAlexey Kardashevskiy             return rc;
2526e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2527e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2528e094c4c1SCédric Le Goater             ssize_t n = rc;
2529e094c4c1SCédric Le Goater             while (n) {
2530e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2531e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2532e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2533e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2534e094c4c1SCédric Le Goater 
2535e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2536e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2537e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2538e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2539e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2540e094c4c1SCédric Le Goater 
2541e094c4c1SCédric Le Goater                 buffer += chunksize;
2542e094c4c1SCédric Le Goater                 n -= chunksize;
2543e094c4c1SCédric Le Goater             }
2544e68cb8b4SAlexey Kardashevskiy         }
2545e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2546e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2547bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2548e68cb8b4SAlexey Kardashevskiy 
2549e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2550e68cb8b4SAlexey Kardashevskiy }
2551e68cb8b4SAlexey Kardashevskiy 
2552e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2553e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2554e68cb8b4SAlexey Kardashevskiy {
2555e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2556e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2557e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2558e68cb8b4SAlexey Kardashevskiy 
2559e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2560e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2561e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2562e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2563e68cb8b4SAlexey Kardashevskiy 
2564e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2565e68cb8b4SAlexey Kardashevskiy 
2566e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2567e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2568e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2569e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2570e68cb8b4SAlexey Kardashevskiy         return rc;
2571e68cb8b4SAlexey Kardashevskiy     }
2572e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2573e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2574e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2575e68cb8b4SAlexey Kardashevskiy         return -1;
2576e68cb8b4SAlexey Kardashevskiy     }
2577e68cb8b4SAlexey Kardashevskiy     return 0;
2578e68cb8b4SAlexey Kardashevskiy }
2579e68cb8b4SAlexey Kardashevskiy 
258020d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
25814513d923SGleb Natapov {
25824513d923SGleb Natapov     return true;
25834513d923SGleb Natapov }
2584a1b87fe0SJan Kiszka 
258520d695a9SAndreas Färber int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2586a1b87fe0SJan Kiszka {
2587a1b87fe0SJan Kiszka     return 1;
2588a1b87fe0SJan Kiszka }
2589a1b87fe0SJan Kiszka 
2590a1b87fe0SJan Kiszka int kvm_arch_on_sigbus(int code, void *addr)
2591a1b87fe0SJan Kiszka {
2592a1b87fe0SJan Kiszka     return 1;
2593a1b87fe0SJan Kiszka }
259482169660SScott Wood 
259582169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
259682169660SScott Wood {
259782169660SScott Wood }
2598c65f9a07SGreg Kurz 
2599*1ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
2600*1ad9f0a4SDavid Gibson {
2601*1ad9f0a4SDavid Gibson     struct kvm_get_htab_fd ghf = {
2602*1ad9f0a4SDavid Gibson         .flags = 0,
2603*1ad9f0a4SDavid Gibson         .start_index = ptex,
26047c43bca0SAneesh Kumar K.V     };
2605*1ad9f0a4SDavid Gibson     int fd, rc;
2606*1ad9f0a4SDavid Gibson     int i;
26077c43bca0SAneesh Kumar K.V 
2608*1ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
2609*1ad9f0a4SDavid Gibson     if (fd < 0) {
2610*1ad9f0a4SDavid Gibson         hw_error("kvmppc_read_hptes: Unable to open HPT fd");
2611*1ad9f0a4SDavid Gibson     }
2612*1ad9f0a4SDavid Gibson 
2613*1ad9f0a4SDavid Gibson     i = 0;
2614*1ad9f0a4SDavid Gibson     while (i < n) {
2615*1ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
2616*1ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
2617*1ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
2618*1ad9f0a4SDavid Gibson 
2619*1ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
2620*1ad9f0a4SDavid Gibson         if (rc < 0) {
2621*1ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
2622*1ad9f0a4SDavid Gibson         }
2623*1ad9f0a4SDavid Gibson 
2624*1ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
2625*1ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2626*1ad9f0a4SDavid Gibson             int invalid = hdr->n_invalid;
2627*1ad9f0a4SDavid Gibson 
2628*1ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
2629*1ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
2630*1ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
2631*1ad9f0a4SDavid Gibson             }
2632*1ad9f0a4SDavid Gibson 
2633*1ad9f0a4SDavid Gibson             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
2634*1ad9f0a4SDavid Gibson             i += hdr->n_valid;
2635*1ad9f0a4SDavid Gibson 
2636*1ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
2637*1ad9f0a4SDavid Gibson                 invalid = n - i;
2638*1ad9f0a4SDavid Gibson             }
2639*1ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2640*1ad9f0a4SDavid Gibson             i += hdr->n_invalid;
2641*1ad9f0a4SDavid Gibson 
2642*1ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
2643*1ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
2644*1ad9f0a4SDavid Gibson         }
2645*1ad9f0a4SDavid Gibson     }
2646*1ad9f0a4SDavid Gibson 
2647*1ad9f0a4SDavid Gibson     close(fd);
2648*1ad9f0a4SDavid Gibson }
2649*1ad9f0a4SDavid Gibson 
2650*1ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
26517c43bca0SAneesh Kumar K.V {
2652*1ad9f0a4SDavid Gibson     int fd, rc;
26537c43bca0SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
2654*1ad9f0a4SDavid Gibson     struct {
2655*1ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
2656*1ad9f0a4SDavid Gibson         uint64_t pte0;
2657*1ad9f0a4SDavid Gibson         uint64_t pte1;
2658*1ad9f0a4SDavid Gibson     } buf;
2659c1385933SAneesh Kumar K.V 
2660c1385933SAneesh Kumar K.V     ghf.flags = 0;
2661c1385933SAneesh Kumar K.V     ghf.start_index = 0;     /* Ignored */
2662*1ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
2663*1ad9f0a4SDavid Gibson     if (fd < 0) {
2664*1ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to open HPT fd");
2665c1385933SAneesh Kumar K.V     }
2666c1385933SAneesh Kumar K.V 
2667*1ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
2668*1ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
2669*1ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
2670*1ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
2671*1ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
2672*1ad9f0a4SDavid Gibson 
2673*1ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
2674*1ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
2675*1ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2676c1385933SAneesh Kumar K.V     }
2677*1ad9f0a4SDavid Gibson     close(fd);
2678c1385933SAneesh Kumar K.V }
26799e03a040SFrank Blaschka 
26809e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2681dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26829e03a040SFrank Blaschka {
26839e03a040SFrank Blaschka     return 0;
26849e03a040SFrank Blaschka }
26851850b6b7SEric Auger 
268638d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
268738d87493SPeter Xu                                 int vector, PCIDevice *dev)
268838d87493SPeter Xu {
268938d87493SPeter Xu     return 0;
269038d87493SPeter Xu }
269138d87493SPeter Xu 
269238d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
269338d87493SPeter Xu {
269438d87493SPeter Xu     return 0;
269538d87493SPeter Xu }
269638d87493SPeter Xu 
26971850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
26981850b6b7SEric Auger {
26991850b6b7SEric Auger     return data & 0xffff;
27001850b6b7SEric Auger }
27014d9392beSThomas Huth 
27024d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27034d9392beSThomas Huth {
27044d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27054d9392beSThomas Huth         return -1;
27064d9392beSThomas Huth     }
27074d9392beSThomas Huth 
27084d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27094d9392beSThomas Huth }
2710