xref: /qemu/target/ppc/kvm.c (revision d6e166c08203f47017555f5f52b70f35399c824c)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
25072ed5f2SThomas Huth #include "qemu/error-report.h"
2633c11879SPaolo Bonzini #include "cpu.h"
271de7afc9SPaolo Bonzini #include "qemu/timer.h"
289c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
29b3946626SVincent Palatin #include "sysemu/hw_accel.h"
3086b50f2eSThomas Huth #include "sysemu/numa.h"
31d76d1650Saurel32 #include "kvm_ppc.h"
329c17d615SPaolo Bonzini #include "sysemu/cpus.h"
339c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
34d5aea6f3SDavid Gibson #include "mmu-hash64.h"
35d76d1650Saurel32 
36f61b4bedSAlexander Graf #include "hw/sysbus.h"
370d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
42b36f100eSAlexey Kardashevskiy #include "trace.h"
4388365d17SBharat Bhushan #include "exec/gdbstub.h"
444c663752SPaolo Bonzini #include "exec/memattrs.h"
452d103aaeSMichael Roth #include "sysemu/hostmem.h"
46f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
473b542549SBharata B Rao #if defined(TARGET_PPC64)
483b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
493b542549SBharata B Rao #endif
50f61b4bedSAlexander Graf 
51d76d1650Saurel32 //#define DEBUG_KVM
52d76d1650Saurel32 
53d76d1650Saurel32 #ifdef DEBUG_KVM
54da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
55d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56d76d1650Saurel32 #else
57da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
58d76d1650Saurel32     do { } while (0)
59d76d1650Saurel32 #endif
60d76d1650Saurel32 
61eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
62eadaada1SAlexander Graf 
6394a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6494a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6594a8d39aSJan Kiszka };
6694a8d39aSJan Kiszka 
67fc87e185SAlexander Graf static int cap_interrupt_unset = false;
68fc87e185SAlexander Graf static int cap_interrupt_level = false;
6990dc8812SScott Wood static int cap_segstate;
7090dc8812SScott Wood static int cap_booke_sregs;
71e97c3636SDavid Gibson static int cap_ppc_smt;
72354ac20aSDavid Gibson static int cap_ppc_rma;
730f5cb298SDavid Gibson static int cap_spapr_tce;
74da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
759bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
76f1af19d7SDavid Gibson static int cap_hior;
77d67d40eaSDavid Gibson static int cap_one_reg;
783b961124SStuart Yoder static int cap_epr;
7931f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
809b00ea49SDavid Gibson static int cap_papr;
81e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8287a91de6SAlexander Graf static int cap_fixup_hcalls;
83bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
84fc87e185SAlexander Graf 
853c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
863c902d44SBharat Bhushan 
87c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
88c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
89c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
90c821c2bdSAlexander Graf  *     still an interrupt pending.
91c6a94ba5SAlexander Graf  *
92c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
93c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
94c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
95c6a94ba5SAlexander Graf  */
96c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
97c6a94ba5SAlexander Graf 
98d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
99c6a94ba5SAlexander Graf {
100d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
101d5a68146SAndreas Färber 
102c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
103c6a94ba5SAlexander Graf }
104c6a94ba5SAlexander Graf 
10596c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
10696c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
10796c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
10896c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
10996c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
11096c9cff0SThomas Huth {
11196c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
11296c9cff0SThomas Huth     return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
11396c9cff0SThomas Huth }
11496c9cff0SThomas Huth 
1155ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1165ba4576bSAndreas Färber 
117b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
118d76d1650Saurel32 {
119fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
120fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
12190dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
12290dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
123e97c3636SDavid Gibson     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
124354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1250f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
126da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1279bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
128d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
129f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1303b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
13131f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1329b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1339b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
134e68cb8b4SAlexey Kardashevskiy     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
13587a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
136bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
137fc87e185SAlexander Graf 
138fc87e185SAlexander Graf     if (!cap_interrupt_level) {
139fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
140fc87e185SAlexander Graf                         "VM to stall at times!\n");
141fc87e185SAlexander Graf     }
142fc87e185SAlexander Graf 
1435ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1445ba4576bSAndreas Färber 
145d76d1650Saurel32     return 0;
146d76d1650Saurel32 }
147d76d1650Saurel32 
148d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
149d525ffabSPaolo Bonzini {
150d525ffabSPaolo Bonzini     return 0;
151d525ffabSPaolo Bonzini }
152d525ffabSPaolo Bonzini 
1531bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
154d76d1650Saurel32 {
1551bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1561bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
157861bbc80SAlexander Graf     struct kvm_sregs sregs;
1585666ca4aSScott Wood     int ret;
1595666ca4aSScott Wood 
1605666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
16164e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
16264e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
16364e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
16464e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1655666ca4aSScott Wood         return 0;
1665666ca4aSScott Wood     } else {
16790dc8812SScott Wood         if (!cap_segstate) {
16864e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
16964e07be5SAlexander Graf             return -ENOSYS;
1705666ca4aSScott Wood         }
1715666ca4aSScott Wood     }
1725666ca4aSScott Wood 
1731bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1745666ca4aSScott Wood     if (ret) {
1755666ca4aSScott Wood         return ret;
1765666ca4aSScott Wood     }
177861bbc80SAlexander Graf 
178861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
1791bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
1805666ca4aSScott Wood }
1815666ca4aSScott Wood 
18293dd5e85SScott Wood /* Set up a shared TLB array with KVM */
1831bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
18493dd5e85SScott Wood {
1851bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
1861bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
18793dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
18893dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
18993dd5e85SScott Wood     unsigned int entries = 0;
19093dd5e85SScott Wood     int ret, i;
19193dd5e85SScott Wood 
19293dd5e85SScott Wood     if (!kvm_enabled() ||
193a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
19493dd5e85SScott Wood         return 0;
19593dd5e85SScott Wood     }
19693dd5e85SScott Wood 
19793dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
19893dd5e85SScott Wood 
19993dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
20093dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
20193dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
20293dd5e85SScott Wood         entries += params.tlb_sizes[i];
20393dd5e85SScott Wood     }
20493dd5e85SScott Wood 
20593dd5e85SScott Wood     assert(entries == env->nb_tlb);
20693dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
20793dd5e85SScott Wood 
20893dd5e85SScott Wood     env->tlb_dirty = true;
20993dd5e85SScott Wood 
21093dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
21193dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
21293dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
21393dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
21493dd5e85SScott Wood 
21548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
21693dd5e85SScott Wood     if (ret < 0) {
21793dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
21893dd5e85SScott Wood                 __func__, strerror(-ret));
21993dd5e85SScott Wood         return ret;
22093dd5e85SScott Wood     }
22193dd5e85SScott Wood 
22293dd5e85SScott Wood     env->kvm_sw_tlb = true;
22393dd5e85SScott Wood     return 0;
22493dd5e85SScott Wood }
22593dd5e85SScott Wood 
2264656e1f0SBenjamin Herrenschmidt 
2274656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
228a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2294656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2304656e1f0SBenjamin Herrenschmidt {
231a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
232a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
233a60f24b5SAndreas Färber 
2344656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2354656e1f0SBenjamin Herrenschmidt 
2364656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2374656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2384656e1f0SBenjamin Herrenschmidt      *
2394656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2404656e1f0SBenjamin Herrenschmidt      *
24196c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
24296c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
24396c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2444656e1f0SBenjamin Herrenschmidt      *
2454656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2464656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2474656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2484656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2494656e1f0SBenjamin Herrenschmidt      *
2504656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2514656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2524656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2534656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2544656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2554656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2564656e1f0SBenjamin Herrenschmidt      *   this fallback.
2574656e1f0SBenjamin Herrenschmidt      */
25896c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2594656e1f0SBenjamin Herrenschmidt         /* No flags */
2604656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2614656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2624656e1f0SBenjamin Herrenschmidt 
2634656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2644656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2654656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2664656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2674656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2684656e1f0SBenjamin Herrenschmidt 
2694656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2704656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2714656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2724656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2734656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2744656e1f0SBenjamin Herrenschmidt     } else {
2754656e1f0SBenjamin Herrenschmidt         int i = 0;
2764656e1f0SBenjamin Herrenschmidt 
2774656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
2784656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2794656e1f0SBenjamin Herrenschmidt 
2804656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
2814656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
2824656e1f0SBenjamin Herrenschmidt         }
2834656e1f0SBenjamin Herrenschmidt 
284aa4bb587SBenjamin Herrenschmidt         if (env->mmu_model == POWERPC_MMU_2_06 ||
285aa4bb587SBenjamin Herrenschmidt             env->mmu_model == POWERPC_MMU_2_07) {
2864656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
2874656e1f0SBenjamin Herrenschmidt         } else {
2884656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
2894656e1f0SBenjamin Herrenschmidt         }
2904656e1f0SBenjamin Herrenschmidt 
2914656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2924656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
2934656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
2944656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
2954656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
2964656e1f0SBenjamin Herrenschmidt         i++;
2974656e1f0SBenjamin Herrenschmidt 
298aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
299aa4bb587SBenjamin Herrenschmidt         if (env->mmu_model == POWERPC_MMU_2_06 ||
300aa4bb587SBenjamin Herrenschmidt             env->mmu_model == POWERPC_MMU_2_07) {
3014656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3024656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3034656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3044656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3054656e1f0SBenjamin Herrenschmidt             i++;
3064656e1f0SBenjamin Herrenschmidt         }
3074656e1f0SBenjamin Herrenschmidt 
3084656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3094656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3104656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3114656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3124656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3134656e1f0SBenjamin Herrenschmidt     }
3144656e1f0SBenjamin Herrenschmidt }
3154656e1f0SBenjamin Herrenschmidt 
316a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3174656e1f0SBenjamin Herrenschmidt {
318a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3194656e1f0SBenjamin Herrenschmidt     int ret;
3204656e1f0SBenjamin Herrenschmidt 
321a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
322a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3234656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3244656e1f0SBenjamin Herrenschmidt             return;
3254656e1f0SBenjamin Herrenschmidt         }
3264656e1f0SBenjamin Herrenschmidt     }
3274656e1f0SBenjamin Herrenschmidt 
328a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3294656e1f0SBenjamin Herrenschmidt }
3304656e1f0SBenjamin Herrenschmidt 
3312d103aaeSMichael Roth static long gethugepagesize(const char *mem_path)
3324656e1f0SBenjamin Herrenschmidt {
3334656e1f0SBenjamin Herrenschmidt     struct statfs fs;
3344656e1f0SBenjamin Herrenschmidt     int ret;
3354656e1f0SBenjamin Herrenschmidt 
3364656e1f0SBenjamin Herrenschmidt     do {
3374656e1f0SBenjamin Herrenschmidt         ret = statfs(mem_path, &fs);
3384656e1f0SBenjamin Herrenschmidt     } while (ret != 0 && errno == EINTR);
3394656e1f0SBenjamin Herrenschmidt 
3404656e1f0SBenjamin Herrenschmidt     if (ret != 0) {
3414656e1f0SBenjamin Herrenschmidt         fprintf(stderr, "Couldn't statfs() memory path: %s\n",
3424656e1f0SBenjamin Herrenschmidt                 strerror(errno));
3434656e1f0SBenjamin Herrenschmidt         exit(1);
3444656e1f0SBenjamin Herrenschmidt     }
3454656e1f0SBenjamin Herrenschmidt 
3464656e1f0SBenjamin Herrenschmidt #define HUGETLBFS_MAGIC       0x958458f6
3474656e1f0SBenjamin Herrenschmidt 
3484656e1f0SBenjamin Herrenschmidt     if (fs.f_type != HUGETLBFS_MAGIC) {
3494656e1f0SBenjamin Herrenschmidt         /* Explicit mempath, but it's ordinary pages */
3504656e1f0SBenjamin Herrenschmidt         return getpagesize();
3514656e1f0SBenjamin Herrenschmidt     }
3524656e1f0SBenjamin Herrenschmidt 
3534656e1f0SBenjamin Herrenschmidt     /* It's hugepage, return the huge page size */
3544656e1f0SBenjamin Herrenschmidt     return fs.f_bsize;
3554656e1f0SBenjamin Herrenschmidt }
3564656e1f0SBenjamin Herrenschmidt 
3573be5cc23SMarkus Armbruster /*
3583be5cc23SMarkus Armbruster  * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
3593be5cc23SMarkus Armbruster  * may or may not name the same files / on the same filesystem now as
3603be5cc23SMarkus Armbruster  * when we actually open and map them.  Iterate over the file
3613be5cc23SMarkus Armbruster  * descriptors instead, and use qemu_fd_getpagesize().
3623be5cc23SMarkus Armbruster  */
3632d103aaeSMichael Roth static int find_max_supported_pagesize(Object *obj, void *opaque)
3642d103aaeSMichael Roth {
3652d103aaeSMichael Roth     char *mem_path;
3662d103aaeSMichael Roth     long *hpsize_min = opaque;
3672d103aaeSMichael Roth 
3682d103aaeSMichael Roth     if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
3692d103aaeSMichael Roth         mem_path = object_property_get_str(obj, "mem-path", NULL);
3702d103aaeSMichael Roth         if (mem_path) {
3712d103aaeSMichael Roth             long hpsize = gethugepagesize(mem_path);
3722d103aaeSMichael Roth             if (hpsize < *hpsize_min) {
3732d103aaeSMichael Roth                 *hpsize_min = hpsize;
3742d103aaeSMichael Roth             }
3752d103aaeSMichael Roth         } else {
3762d103aaeSMichael Roth             *hpsize_min = getpagesize();
3772d103aaeSMichael Roth         }
3782d103aaeSMichael Roth     }
3792d103aaeSMichael Roth 
3802d103aaeSMichael Roth     return 0;
3812d103aaeSMichael Roth }
3822d103aaeSMichael Roth 
3832d103aaeSMichael Roth static long getrampagesize(void)
3842d103aaeSMichael Roth {
3852d103aaeSMichael Roth     long hpsize = LONG_MAX;
3863d4f2534SThomas Huth     long mainrampagesize;
3872d103aaeSMichael Roth     Object *memdev_root;
3882d103aaeSMichael Roth 
3892d103aaeSMichael Roth     if (mem_path) {
3903d4f2534SThomas Huth         mainrampagesize = gethugepagesize(mem_path);
3913d4f2534SThomas Huth     } else {
3923d4f2534SThomas Huth         mainrampagesize = getpagesize();
3932d103aaeSMichael Roth     }
3942d103aaeSMichael Roth 
3952d103aaeSMichael Roth     /* it's possible we have memory-backend objects with
3962d103aaeSMichael Roth      * hugepage-backed RAM. these may get mapped into system
3972d103aaeSMichael Roth      * address space via -numa parameters or memory hotplug
3982d103aaeSMichael Roth      * hooks. we want to take these into account, but we
3992d103aaeSMichael Roth      * also want to make sure these supported hugepage
4002d103aaeSMichael Roth      * sizes are applicable across the entire range of memory
4012d103aaeSMichael Roth      * we may boot from, so we take the min across all
4022d103aaeSMichael Roth      * backends, and assume normal pages in cases where a
4032d103aaeSMichael Roth      * backend isn't backed by hugepages.
4042d103aaeSMichael Roth      */
4052d103aaeSMichael Roth     memdev_root = object_resolve_path("/objects", NULL);
4063d4f2534SThomas Huth     if (memdev_root) {
4072d103aaeSMichael Roth         object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
4083d4f2534SThomas Huth     }
4093d4f2534SThomas Huth     if (hpsize == LONG_MAX) {
4103d4f2534SThomas Huth         /* No additional memory regions found ==> Report main RAM page size */
4113d4f2534SThomas Huth         return mainrampagesize;
41286b50f2eSThomas Huth     }
41386b50f2eSThomas Huth 
414159d2e39SThomas Huth     /* If NUMA is disabled or the NUMA nodes are not backed with a
4153d4f2534SThomas Huth      * memory-backend, then there is at least one node using "normal" RAM,
4163d4f2534SThomas Huth      * so if its page size is smaller we have got to report that size instead.
417159d2e39SThomas Huth      */
4183d4f2534SThomas Huth     if (hpsize > mainrampagesize &&
4193d4f2534SThomas Huth         (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
42086b50f2eSThomas Huth         static bool warned;
42186b50f2eSThomas Huth         if (!warned) {
42286b50f2eSThomas Huth             error_report("Huge page support disabled (n/a for main memory).");
42386b50f2eSThomas Huth             warned = true;
42486b50f2eSThomas Huth         }
4253d4f2534SThomas Huth         return mainrampagesize;
42686b50f2eSThomas Huth     }
42786b50f2eSThomas Huth 
42886b50f2eSThomas Huth     return hpsize;
4292d103aaeSMichael Roth }
4302d103aaeSMichael Roth 
4314656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4324656e1f0SBenjamin Herrenschmidt {
4334656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4344656e1f0SBenjamin Herrenschmidt         return true;
4354656e1f0SBenjamin Herrenschmidt     }
4364656e1f0SBenjamin Herrenschmidt 
4374656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4384656e1f0SBenjamin Herrenschmidt }
4394656e1f0SBenjamin Herrenschmidt 
440a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4414656e1f0SBenjamin Herrenschmidt {
4424656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4434656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
444a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4454656e1f0SBenjamin Herrenschmidt     long rampagesize;
4464656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4470d594f55SThomas Huth     bool has_64k_pages = false;
4484656e1f0SBenjamin Herrenschmidt 
4494656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4504656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4514656e1f0SBenjamin Herrenschmidt         return;
4524656e1f0SBenjamin Herrenschmidt     }
4534656e1f0SBenjamin Herrenschmidt 
4544656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4554656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
456a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4574656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4584656e1f0SBenjamin Herrenschmidt     }
4594656e1f0SBenjamin Herrenschmidt 
4604656e1f0SBenjamin Herrenschmidt     rampagesize = getrampagesize();
4614656e1f0SBenjamin Herrenschmidt 
4624656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4634656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4644656e1f0SBenjamin Herrenschmidt 
46590da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
46690da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
46790da0d5aSBenjamin Herrenschmidt      */
46890da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
46990da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
47090da0d5aSBenjamin Herrenschmidt     }
47190da0d5aSBenjamin Herrenschmidt 
47208215d8fSAlexander Graf     /*
47308215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
47408215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
47508215d8fSAlexander Graf      */
4764656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4774656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4784656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4794656e1f0SBenjamin Herrenschmidt 
4804656e1f0SBenjamin Herrenschmidt         if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
4814656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4824656e1f0SBenjamin Herrenschmidt             continue;
4834656e1f0SBenjamin Herrenschmidt         }
4844656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4854656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4864656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
4874656e1f0SBenjamin Herrenschmidt             if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
4884656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4894656e1f0SBenjamin Herrenschmidt                 continue;
4904656e1f0SBenjamin Herrenschmidt             }
4910d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4920d594f55SThomas Huth                 has_64k_pages = true;
4930d594f55SThomas Huth             }
4944656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4954656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4964656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4974656e1f0SBenjamin Herrenschmidt                 break;
4984656e1f0SBenjamin Herrenschmidt             }
4994656e1f0SBenjamin Herrenschmidt         }
5004656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
5014656e1f0SBenjamin Herrenschmidt             break;
5024656e1f0SBenjamin Herrenschmidt         }
5034656e1f0SBenjamin Herrenschmidt     }
5044656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
50508215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
5064656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
5074656e1f0SBenjamin Herrenschmidt     }
5080d594f55SThomas Huth     if (!has_64k_pages) {
5090d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
5100d594f55SThomas Huth     }
5114656e1f0SBenjamin Herrenschmidt }
5124656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5134656e1f0SBenjamin Herrenschmidt 
514a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5154656e1f0SBenjamin Herrenschmidt {
5164656e1f0SBenjamin Herrenschmidt }
5174656e1f0SBenjamin Herrenschmidt 
5184656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5194656e1f0SBenjamin Herrenschmidt 
520b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
521b164e48eSEduardo Habkost {
5220f20ba62SAlexey Kardashevskiy     return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
523b164e48eSEduardo Habkost }
524b164e48eSEduardo Habkost 
52588365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
52688365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
52788365d17SBharat Bhushan  * of 4 is sufficient for now.
52888365d17SBharat Bhushan  */
52988365d17SBharat Bhushan #define MAX_HW_BKPTS 4
53088365d17SBharat Bhushan 
53188365d17SBharat Bhushan static struct HWBreakpoint {
53288365d17SBharat Bhushan     target_ulong addr;
53388365d17SBharat Bhushan     int type;
53488365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
53588365d17SBharat Bhushan 
53688365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
53788365d17SBharat Bhushan 
53888365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
53988365d17SBharat Bhushan static int max_hw_breakpoint;
54088365d17SBharat Bhushan static int max_hw_watchpoint;
54188365d17SBharat Bhushan static int nb_hw_breakpoint;
54288365d17SBharat Bhushan static int nb_hw_watchpoint;
54388365d17SBharat Bhushan 
54488365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
54588365d17SBharat Bhushan {
54688365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
54788365d17SBharat Bhushan         max_hw_breakpoint = 2;
54888365d17SBharat Bhushan         max_hw_watchpoint = 2;
54988365d17SBharat Bhushan     }
55088365d17SBharat Bhushan 
55188365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
55288365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
55388365d17SBharat Bhushan         return;
55488365d17SBharat Bhushan     }
55588365d17SBharat Bhushan }
55688365d17SBharat Bhushan 
55720d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5585666ca4aSScott Wood {
55920d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
56020d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5615666ca4aSScott Wood     int ret;
5625666ca4aSScott Wood 
5634656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
564a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5654656e1f0SBenjamin Herrenschmidt 
5664656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5671bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5685666ca4aSScott Wood     if (ret) {
569388e47c7SThomas Huth         if (ret == -EINVAL) {
570388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
571388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
572388e47c7SThomas Huth         }
5735666ca4aSScott Wood         return ret;
5745666ca4aSScott Wood     }
575861bbc80SAlexander Graf 
576bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
577c821c2bdSAlexander Graf 
57893dd5e85SScott Wood     switch (cenv->mmu_model) {
57993dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5807f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5811bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
58293dd5e85SScott Wood         break;
5837f516c96SThomas Huth     case POWERPC_MMU_2_07:
5847f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5857f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
5867f516c96SThomas Huth              * KVM_CAP_PPC_HTM extension, so enable it here instead. */
5877f516c96SThomas Huth             cap_htm = true;
5887f516c96SThomas Huth         }
5897f516c96SThomas Huth         break;
59093dd5e85SScott Wood     default:
59193dd5e85SScott Wood         break;
59293dd5e85SScott Wood     }
59393dd5e85SScott Wood 
5943c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
59588365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5963c902d44SBharat Bhushan 
597861bbc80SAlexander Graf     return ret;
598d76d1650Saurel32 }
599d76d1650Saurel32 
6001bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
60193dd5e85SScott Wood {
6021bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
6031bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
60493dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
60593dd5e85SScott Wood     unsigned char *bitmap;
60693dd5e85SScott Wood     int ret;
60793dd5e85SScott Wood 
60893dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
60993dd5e85SScott Wood         return;
61093dd5e85SScott Wood     }
61193dd5e85SScott Wood 
61293dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
61393dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
61493dd5e85SScott Wood 
61593dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
61693dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
61793dd5e85SScott Wood 
6181bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
61993dd5e85SScott Wood     if (ret) {
62093dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
62193dd5e85SScott Wood                 __func__, strerror(-ret));
62293dd5e85SScott Wood     }
62393dd5e85SScott Wood 
62493dd5e85SScott Wood     g_free(bitmap);
62593dd5e85SScott Wood }
62693dd5e85SScott Wood 
627d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
628d67d40eaSDavid Gibson {
629d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
630d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
631d67d40eaSDavid Gibson     union {
632d67d40eaSDavid Gibson         uint32_t u32;
633d67d40eaSDavid Gibson         uint64_t u64;
634d67d40eaSDavid Gibson     } val;
635d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
636d67d40eaSDavid Gibson         .id = id,
637d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
638d67d40eaSDavid Gibson     };
639d67d40eaSDavid Gibson     int ret;
640d67d40eaSDavid Gibson 
641d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
642d67d40eaSDavid Gibson     if (ret != 0) {
643b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
644d67d40eaSDavid Gibson     } else {
645d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
646d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
647d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
648d67d40eaSDavid Gibson             break;
649d67d40eaSDavid Gibson 
650d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
651d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
652d67d40eaSDavid Gibson             break;
653d67d40eaSDavid Gibson 
654d67d40eaSDavid Gibson         default:
655d67d40eaSDavid Gibson             /* Don't handle this size yet */
656d67d40eaSDavid Gibson             abort();
657d67d40eaSDavid Gibson         }
658d67d40eaSDavid Gibson     }
659d67d40eaSDavid Gibson }
660d67d40eaSDavid Gibson 
661d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
662d67d40eaSDavid Gibson {
663d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
664d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
665d67d40eaSDavid Gibson     union {
666d67d40eaSDavid Gibson         uint32_t u32;
667d67d40eaSDavid Gibson         uint64_t u64;
668d67d40eaSDavid Gibson     } val;
669d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
670d67d40eaSDavid Gibson         .id = id,
671d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
672d67d40eaSDavid Gibson     };
673d67d40eaSDavid Gibson     int ret;
674d67d40eaSDavid Gibson 
675d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
676d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
677d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
678d67d40eaSDavid Gibson         break;
679d67d40eaSDavid Gibson 
680d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
681d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
682d67d40eaSDavid Gibson         break;
683d67d40eaSDavid Gibson 
684d67d40eaSDavid Gibson     default:
685d67d40eaSDavid Gibson         /* Don't handle this size yet */
686d67d40eaSDavid Gibson         abort();
687d67d40eaSDavid Gibson     }
688d67d40eaSDavid Gibson 
689d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
690d67d40eaSDavid Gibson     if (ret != 0) {
691b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
692d67d40eaSDavid Gibson     }
693d67d40eaSDavid Gibson }
694d67d40eaSDavid Gibson 
69570b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
69670b79849SDavid Gibson {
69770b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
69870b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
69970b79849SDavid Gibson     struct kvm_one_reg reg;
70070b79849SDavid Gibson     int i;
70170b79849SDavid Gibson     int ret;
70270b79849SDavid Gibson 
70370b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
70470b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
70570b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
70670b79849SDavid Gibson 
70770b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
70870b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
70970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
71070b79849SDavid Gibson         if (ret < 0) {
711da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
71270b79849SDavid Gibson             return ret;
71370b79849SDavid Gibson         }
71470b79849SDavid Gibson 
71570b79849SDavid Gibson         for (i = 0; i < 32; i++) {
71670b79849SDavid Gibson             uint64_t vsr[2];
71770b79849SDavid Gibson 
7183a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
71970b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
72070b79849SDavid Gibson             vsr[1] = env->vsr[i];
7213a4b791bSGreg Kurz #else
7223a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7233a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7243a4b791bSGreg Kurz #endif
72570b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
72670b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
72770b79849SDavid Gibson 
72870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
72970b79849SDavid Gibson             if (ret < 0) {
730da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
73170b79849SDavid Gibson                         i, strerror(errno));
73270b79849SDavid Gibson                 return ret;
73370b79849SDavid Gibson             }
73470b79849SDavid Gibson         }
73570b79849SDavid Gibson     }
73670b79849SDavid Gibson 
73770b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
73870b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
73970b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
74070b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74170b79849SDavid Gibson         if (ret < 0) {
742da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
74370b79849SDavid Gibson             return ret;
74470b79849SDavid Gibson         }
74570b79849SDavid Gibson 
74670b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74770b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
74870b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
74970b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
75070b79849SDavid Gibson             if (ret < 0) {
751da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
75270b79849SDavid Gibson                 return ret;
75370b79849SDavid Gibson             }
75470b79849SDavid Gibson         }
75570b79849SDavid Gibson     }
75670b79849SDavid Gibson 
75770b79849SDavid Gibson     return 0;
75870b79849SDavid Gibson }
75970b79849SDavid Gibson 
76070b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
76170b79849SDavid Gibson {
76270b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
76370b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
76470b79849SDavid Gibson     struct kvm_one_reg reg;
76570b79849SDavid Gibson     int i;
76670b79849SDavid Gibson     int ret;
76770b79849SDavid Gibson 
76870b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
76970b79849SDavid Gibson         uint64_t fpscr;
77070b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
77170b79849SDavid Gibson 
77270b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
77370b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
77470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
77570b79849SDavid Gibson         if (ret < 0) {
776da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
77770b79849SDavid Gibson             return ret;
77870b79849SDavid Gibson         } else {
77970b79849SDavid Gibson             env->fpscr = fpscr;
78070b79849SDavid Gibson         }
78170b79849SDavid Gibson 
78270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
78370b79849SDavid Gibson             uint64_t vsr[2];
78470b79849SDavid Gibson 
78570b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
78670b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
78770b79849SDavid Gibson 
78870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
78970b79849SDavid Gibson             if (ret < 0) {
790da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
79170b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
79270b79849SDavid Gibson                 return ret;
79370b79849SDavid Gibson             } else {
7943a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
79570b79849SDavid Gibson                 env->fpr[i] = vsr[0];
79670b79849SDavid Gibson                 if (vsx) {
79770b79849SDavid Gibson                     env->vsr[i] = vsr[1];
79870b79849SDavid Gibson                 }
7993a4b791bSGreg Kurz #else
8003a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
8013a4b791bSGreg Kurz                 if (vsx) {
8023a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
8033a4b791bSGreg Kurz                 }
8043a4b791bSGreg Kurz #endif
80570b79849SDavid Gibson             }
80670b79849SDavid Gibson         }
80770b79849SDavid Gibson     }
80870b79849SDavid Gibson 
80970b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
81070b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
81170b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
81270b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81370b79849SDavid Gibson         if (ret < 0) {
814da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
81570b79849SDavid Gibson             return ret;
81670b79849SDavid Gibson         }
81770b79849SDavid Gibson 
81870b79849SDavid Gibson         for (i = 0; i < 32; i++) {
81970b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
82070b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
82170b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
82270b79849SDavid Gibson             if (ret < 0) {
823da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
82470b79849SDavid Gibson                         i, strerror(errno));
82570b79849SDavid Gibson                 return ret;
82670b79849SDavid Gibson             }
82770b79849SDavid Gibson         }
82870b79849SDavid Gibson     }
82970b79849SDavid Gibson 
83070b79849SDavid Gibson     return 0;
83170b79849SDavid Gibson }
83270b79849SDavid Gibson 
8339b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8349b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8359b00ea49SDavid Gibson {
8369b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8379b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8389b00ea49SDavid Gibson     struct kvm_one_reg reg;
8399b00ea49SDavid Gibson     int ret;
8409b00ea49SDavid Gibson 
8419b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8429b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8439b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8449b00ea49SDavid Gibson     if (ret < 0) {
845da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8469b00ea49SDavid Gibson         return ret;
8479b00ea49SDavid Gibson     }
8489b00ea49SDavid Gibson 
8499b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8509b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8519b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8529b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8539b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8549b00ea49SDavid Gibson     if (ret < 0) {
855da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8569b00ea49SDavid Gibson                 strerror(errno));
8579b00ea49SDavid Gibson         return ret;
8589b00ea49SDavid Gibson     }
8599b00ea49SDavid Gibson 
8609b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8619b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8629b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8639b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8649b00ea49SDavid Gibson     if (ret < 0) {
865da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8669b00ea49SDavid Gibson                 strerror(errno));
8679b00ea49SDavid Gibson         return ret;
8689b00ea49SDavid Gibson     }
8699b00ea49SDavid Gibson 
8709b00ea49SDavid Gibson     return 0;
8719b00ea49SDavid Gibson }
8729b00ea49SDavid Gibson 
8739b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8749b00ea49SDavid Gibson {
8759b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8769b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8779b00ea49SDavid Gibson     struct kvm_one_reg reg;
8789b00ea49SDavid Gibson     int ret;
8799b00ea49SDavid Gibson 
8809b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8819b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8829b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8839b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8849b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8859b00ea49SDavid Gibson 
8869b00ea49SDavid Gibson     if (env->vpa_addr) {
8879b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8889b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8899b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8909b00ea49SDavid Gibson         if (ret < 0) {
891da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8929b00ea49SDavid Gibson             return ret;
8939b00ea49SDavid Gibson         }
8949b00ea49SDavid Gibson     }
8959b00ea49SDavid Gibson 
8969b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8979b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8989b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8999b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
9009b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9019b00ea49SDavid Gibson     if (ret < 0) {
902da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
9039b00ea49SDavid Gibson         return ret;
9049b00ea49SDavid Gibson     }
9059b00ea49SDavid Gibson 
9069b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9079b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9089b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9099b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9109b00ea49SDavid Gibson     if (ret < 0) {
911da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9129b00ea49SDavid Gibson                 strerror(errno));
9139b00ea49SDavid Gibson         return ret;
9149b00ea49SDavid Gibson     }
9159b00ea49SDavid Gibson 
9169b00ea49SDavid Gibson     if (!env->vpa_addr) {
9179b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9189b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9199b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9209b00ea49SDavid Gibson         if (ret < 0) {
921da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9229b00ea49SDavid Gibson             return ret;
9239b00ea49SDavid Gibson         }
9249b00ea49SDavid Gibson     }
9259b00ea49SDavid Gibson 
9269b00ea49SDavid Gibson     return 0;
9279b00ea49SDavid Gibson }
9289b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9299b00ea49SDavid Gibson 
930e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
931a7a00a72SDavid Gibson {
932a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
933a7a00a72SDavid Gibson     struct kvm_sregs sregs;
934a7a00a72SDavid Gibson     int i;
935a7a00a72SDavid Gibson 
936a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
937a7a00a72SDavid Gibson 
938a7a00a72SDavid Gibson     sregs.u.s.sdr1 = env->spr[SPR_SDR1];
939a7a00a72SDavid Gibson 
940a7a00a72SDavid Gibson     /* Sync SLB */
941a7a00a72SDavid Gibson #ifdef TARGET_PPC64
942a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
943a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
944a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
945a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
946a7a00a72SDavid Gibson         }
947a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
948a7a00a72SDavid Gibson     }
949a7a00a72SDavid Gibson #endif
950a7a00a72SDavid Gibson 
951a7a00a72SDavid Gibson     /* Sync SRs */
952a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
953a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
954a7a00a72SDavid Gibson     }
955a7a00a72SDavid Gibson 
956a7a00a72SDavid Gibson     /* Sync BATs */
957a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
958a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
959a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
960a7a00a72SDavid Gibson             | env->DBAT[1][i];
961a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
962a7a00a72SDavid Gibson             | env->IBAT[1][i];
963a7a00a72SDavid Gibson     }
964a7a00a72SDavid Gibson 
965a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
966a7a00a72SDavid Gibson }
967a7a00a72SDavid Gibson 
96820d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
969d76d1650Saurel32 {
97020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
97120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
972d76d1650Saurel32     struct kvm_regs regs;
973d76d1650Saurel32     int ret;
974d76d1650Saurel32     int i;
975d76d1650Saurel32 
9761bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9771bc22652SAndreas Färber     if (ret < 0) {
978d76d1650Saurel32         return ret;
9791bc22652SAndreas Färber     }
980d76d1650Saurel32 
981d76d1650Saurel32     regs.ctr = env->ctr;
982d76d1650Saurel32     regs.lr  = env->lr;
983da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
984d76d1650Saurel32     regs.msr = env->msr;
985d76d1650Saurel32     regs.pc = env->nip;
986d76d1650Saurel32 
987d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
988d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
989d76d1650Saurel32 
990d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
991d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
992d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
993d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
994d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
995d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
996d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
997d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
998d76d1650Saurel32 
99990dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
100090dc8812SScott Wood 
1001d76d1650Saurel32     for (i = 0;i < 32; i++)
1002d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1003d76d1650Saurel32 
10044bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10054bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10064bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10074bddaf55SAlexey Kardashevskiy     }
10084bddaf55SAlexey Kardashevskiy 
10091bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1010d76d1650Saurel32     if (ret < 0)
1011d76d1650Saurel32         return ret;
1012d76d1650Saurel32 
101370b79849SDavid Gibson     kvm_put_fp(cs);
101470b79849SDavid Gibson 
101593dd5e85SScott Wood     if (env->tlb_dirty) {
10161bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
101793dd5e85SScott Wood         env->tlb_dirty = false;
101893dd5e85SScott Wood     }
101993dd5e85SScott Wood 
1020f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1021a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1022a7a00a72SDavid Gibson         if (ret < 0) {
1023f1af19d7SDavid Gibson             return ret;
1024f1af19d7SDavid Gibson         }
1025f1af19d7SDavid Gibson     }
1026f1af19d7SDavid Gibson 
1027f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1028d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1029d67d40eaSDavid Gibson     }
1030f1af19d7SDavid Gibson 
1031d67d40eaSDavid Gibson     if (cap_one_reg) {
1032d67d40eaSDavid Gibson         int i;
1033d67d40eaSDavid Gibson 
1034d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1035d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1036d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1037d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1038d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1039d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1040d67d40eaSDavid Gibson 
1041d67d40eaSDavid Gibson             if (id != 0) {
1042d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1043d67d40eaSDavid Gibson             }
1044f1af19d7SDavid Gibson         }
10459b00ea49SDavid Gibson 
10469b00ea49SDavid Gibson #ifdef TARGET_PPC64
104780b3f79bSAlexey Kardashevskiy         if (msr_ts) {
104880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
104980b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
105080b3f79bSAlexey Kardashevskiy             }
105180b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
105280b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
105380b3f79bSAlexey Kardashevskiy             }
105480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
105580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
105680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
105780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
105880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
105980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
106080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
106180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
106280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
106380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
106480b3f79bSAlexey Kardashevskiy         }
106580b3f79bSAlexey Kardashevskiy 
10669b00ea49SDavid Gibson         if (cap_papr) {
10679b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1068da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10699b00ea49SDavid Gibson             }
10709b00ea49SDavid Gibson         }
107198a8b524SAlexey Kardashevskiy 
107298a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10739b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1074f1af19d7SDavid Gibson     }
1075f1af19d7SDavid Gibson 
1076d76d1650Saurel32     return ret;
1077d76d1650Saurel32 }
1078d76d1650Saurel32 
1079c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1080c371c2e3SBharat Bhushan {
1081c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1082c371c2e3SBharat Bhushan }
1083c371c2e3SBharat Bhushan 
1084a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1085d76d1650Saurel32 {
108620d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1087ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1088a7a00a72SDavid Gibson     int ret;
1089d76d1650Saurel32 
1090a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
109190dc8812SScott Wood     if (ret < 0) {
109290dc8812SScott Wood         return ret;
109390dc8812SScott Wood     }
109490dc8812SScott Wood 
109590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
109690dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
109790dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
109890dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
109990dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
110090dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
110190dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
110290dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
110390dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
110490dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
110590dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
110690dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
110790dc8812SScott Wood     }
110890dc8812SScott Wood 
110990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
111090dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
111190dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
111290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
111390dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
111590dc8812SScott Wood     }
111690dc8812SScott Wood 
111790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
111890dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
111990dc8812SScott Wood     }
112090dc8812SScott Wood 
112190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
112290dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
112390dc8812SScott Wood     }
112490dc8812SScott Wood 
112590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
112690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1127c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
112890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1129c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
113090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1131c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
113290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1133c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
113490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1135c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
113690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1137c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
113890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1139c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
114090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1141c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
114290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1143c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
114490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1145c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
114690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1147c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
114890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1149c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
115090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1151c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
115290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1153c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
115490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1155c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
115690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1157c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
115890dc8812SScott Wood 
115990dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
116090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1161c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
116290dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1163c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
116490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1165c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
116690dc8812SScott Wood         }
116790dc8812SScott Wood 
116890dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
116990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1170c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
117190dc8812SScott Wood         }
117290dc8812SScott Wood 
117390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
117490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1175c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
117690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1177c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
117890dc8812SScott Wood         }
117990dc8812SScott Wood     }
118090dc8812SScott Wood 
118190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
118290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
118390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
118490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
118590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
118690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
118790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
118890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
118990dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
119090dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
119190dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
119290dc8812SScott Wood     }
119390dc8812SScott Wood 
119490dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
119590dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
119690dc8812SScott Wood     }
119790dc8812SScott Wood 
119890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
119990dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
120090dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
120190dc8812SScott Wood     }
120290dc8812SScott Wood 
120390dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
120490dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
120590dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
120690dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
120790dc8812SScott Wood 
120890dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
120990dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
121090dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
121190dc8812SScott Wood         }
121290dc8812SScott Wood     }
1213a7a00a72SDavid Gibson 
1214a7a00a72SDavid Gibson     return 0;
1215fafc0b6aSAlexander Graf }
121690dc8812SScott Wood 
1217a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1218a7a00a72SDavid Gibson {
1219a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1220a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1221a7a00a72SDavid Gibson     int ret;
1222a7a00a72SDavid Gibson     int i;
1223a7a00a72SDavid Gibson 
1224a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
122590dc8812SScott Wood     if (ret < 0) {
122690dc8812SScott Wood         return ret;
122790dc8812SScott Wood     }
122890dc8812SScott Wood 
1229f3c75d42SAneesh Kumar K.V     if (!env->external_htab) {
1230bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1231f3c75d42SAneesh Kumar K.V     }
1232ba5e5090SAlexander Graf 
1233ba5e5090SAlexander Graf     /* Sync SLB */
123482c09f2fSAlexander Graf #ifdef TARGET_PPC64
12354b4d4a21SAneesh Kumar K.V     /*
12364b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1237a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1238a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1239a7a00a72SDavid Gibson      * in.
12404b4d4a21SAneesh Kumar K.V      */
12414b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1242d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12434b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12444b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12454b4d4a21SAneesh Kumar K.V         /*
12464b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12474b4d4a21SAneesh Kumar K.V          */
12484b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1249bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12504b4d4a21SAneesh Kumar K.V         }
1251ba5e5090SAlexander Graf     }
125282c09f2fSAlexander Graf #endif
1253ba5e5090SAlexander Graf 
1254ba5e5090SAlexander Graf     /* Sync SRs */
1255ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1256ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1257ba5e5090SAlexander Graf     }
1258ba5e5090SAlexander Graf 
1259ba5e5090SAlexander Graf     /* Sync BATs */
1260ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1261ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1262ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1263ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1264ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1265ba5e5090SAlexander Graf     }
1266a7a00a72SDavid Gibson 
1267a7a00a72SDavid Gibson     return 0;
1268a7a00a72SDavid Gibson }
1269a7a00a72SDavid Gibson 
1270a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1271a7a00a72SDavid Gibson {
1272a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1273a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1274a7a00a72SDavid Gibson     struct kvm_regs regs;
1275a7a00a72SDavid Gibson     uint32_t cr;
1276a7a00a72SDavid Gibson     int i, ret;
1277a7a00a72SDavid Gibson 
1278a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1279a7a00a72SDavid Gibson     if (ret < 0)
1280a7a00a72SDavid Gibson         return ret;
1281a7a00a72SDavid Gibson 
1282a7a00a72SDavid Gibson     cr = regs.cr;
1283a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1284a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1285a7a00a72SDavid Gibson         cr >>= 4;
1286a7a00a72SDavid Gibson     }
1287a7a00a72SDavid Gibson 
1288a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1289a7a00a72SDavid Gibson     env->lr = regs.lr;
1290a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1291a7a00a72SDavid Gibson     env->msr = regs.msr;
1292a7a00a72SDavid Gibson     env->nip = regs.pc;
1293a7a00a72SDavid Gibson 
1294a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1295a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1296a7a00a72SDavid Gibson 
1297a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1298a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1299a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1300a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1301a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1302a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1303a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1304a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1305a7a00a72SDavid Gibson 
1306a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1307a7a00a72SDavid Gibson 
1308a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1309a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1310a7a00a72SDavid Gibson 
1311a7a00a72SDavid Gibson     kvm_get_fp(cs);
1312a7a00a72SDavid Gibson 
1313a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1314a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1315a7a00a72SDavid Gibson         if (ret < 0) {
1316a7a00a72SDavid Gibson             return ret;
1317a7a00a72SDavid Gibson         }
1318a7a00a72SDavid Gibson     }
1319a7a00a72SDavid Gibson 
1320a7a00a72SDavid Gibson     if (cap_segstate) {
1321a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1322a7a00a72SDavid Gibson         if (ret < 0) {
1323a7a00a72SDavid Gibson             return ret;
1324a7a00a72SDavid Gibson         }
1325fafc0b6aSAlexander Graf     }
1326ba5e5090SAlexander Graf 
1327d67d40eaSDavid Gibson     if (cap_hior) {
1328d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1329d67d40eaSDavid Gibson     }
1330d67d40eaSDavid Gibson 
1331d67d40eaSDavid Gibson     if (cap_one_reg) {
1332d67d40eaSDavid Gibson         int i;
1333d67d40eaSDavid Gibson 
1334d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1335d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1336d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1337d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1338d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1339d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1340d67d40eaSDavid Gibson 
1341d67d40eaSDavid Gibson             if (id != 0) {
1342d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1343d67d40eaSDavid Gibson             }
1344d67d40eaSDavid Gibson         }
13459b00ea49SDavid Gibson 
13469b00ea49SDavid Gibson #ifdef TARGET_PPC64
134780b3f79bSAlexey Kardashevskiy         if (msr_ts) {
134880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
134980b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
135080b3f79bSAlexey Kardashevskiy             }
135180b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
135280b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
135380b3f79bSAlexey Kardashevskiy             }
135480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
135580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
135680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
135780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
135880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
135980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
136080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
136180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
136280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
136380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
136480b3f79bSAlexey Kardashevskiy         }
136580b3f79bSAlexey Kardashevskiy 
13669b00ea49SDavid Gibson         if (cap_papr) {
13679b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1368da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13699b00ea49SDavid Gibson             }
13709b00ea49SDavid Gibson         }
137198a8b524SAlexey Kardashevskiy 
137298a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13739b00ea49SDavid Gibson #endif
1374d67d40eaSDavid Gibson     }
1375d67d40eaSDavid Gibson 
1376d76d1650Saurel32     return 0;
1377d76d1650Saurel32 }
1378d76d1650Saurel32 
13791bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1380fc87e185SAlexander Graf {
1381fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1382fc87e185SAlexander Graf 
1383fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1384fc87e185SAlexander Graf         return 0;
1385fc87e185SAlexander Graf     }
1386fc87e185SAlexander Graf 
1387fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1388fc87e185SAlexander Graf         return 0;
1389fc87e185SAlexander Graf     }
1390fc87e185SAlexander Graf 
13911bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1392fc87e185SAlexander Graf 
1393fc87e185SAlexander Graf     return 0;
1394fc87e185SAlexander Graf }
1395fc87e185SAlexander Graf 
139616415335SAlexander Graf #if defined(TARGET_PPCEMB)
139716415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
139816415335SAlexander Graf #elif defined(TARGET_PPC64)
139916415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
140016415335SAlexander Graf #else
140116415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
140216415335SAlexander Graf #endif
140316415335SAlexander Graf 
140420d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1405d76d1650Saurel32 {
140620d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
140720d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1408d76d1650Saurel32     int r;
1409d76d1650Saurel32     unsigned irq;
1410d76d1650Saurel32 
14114b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14124b8523eeSJan Kiszka 
14135cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1414d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1415fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1416fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1417259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
141816415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1419d76d1650Saurel32     {
1420d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1421d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1422d76d1650Saurel32          * when reading the UIC.
1423d76d1650Saurel32          */
1424fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1425d76d1650Saurel32 
1426da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14271bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
142855e5c285SAndreas Färber         if (r < 0) {
142955e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
143055e5c285SAndreas Färber         }
1431c821c2bdSAlexander Graf 
1432c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1433bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
143473bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1435d76d1650Saurel32     }
1436d76d1650Saurel32 
1437d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1438d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1439d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14404b8523eeSJan Kiszka 
14414b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1442d76d1650Saurel32 }
1443d76d1650Saurel32 
14444c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1445d76d1650Saurel32 {
14464c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1447d76d1650Saurel32 }
1448d76d1650Saurel32 
144920d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14500af691d7SMarcelo Tosatti {
1451259186a7SAndreas Färber     return cs->halted;
14520af691d7SMarcelo Tosatti }
14530af691d7SMarcelo Tosatti 
1454259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1455d76d1650Saurel32 {
1456259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1457259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1458259186a7SAndreas Färber 
1459259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1460259186a7SAndreas Färber         cs->halted = 1;
146127103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1462d76d1650Saurel32     }
1463d76d1650Saurel32 
1464bb4ea393SJan Kiszka     return 0;
1465d76d1650Saurel32 }
1466d76d1650Saurel32 
1467d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14681328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1469d76d1650Saurel32 {
1470d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1471d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1472d76d1650Saurel32 
1473bb4ea393SJan Kiszka     return 0;
1474d76d1650Saurel32 }
1475d76d1650Saurel32 
14761328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1477d76d1650Saurel32 {
1478d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1479d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1480d76d1650Saurel32 
1481bb4ea393SJan Kiszka     return 0;
1482d76d1650Saurel32 }
1483d76d1650Saurel32 
14848a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14858a0548f9SBharat Bhushan {
14868a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14878a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14888a0548f9SBharat Bhushan 
14898a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14908a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14918a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14928a0548f9SBharat Bhushan         return -EINVAL;
14938a0548f9SBharat Bhushan     }
14948a0548f9SBharat Bhushan 
14958a0548f9SBharat Bhushan     return 0;
14968a0548f9SBharat Bhushan }
14978a0548f9SBharat Bhushan 
14988a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14998a0548f9SBharat Bhushan {
15008a0548f9SBharat Bhushan     uint32_t sc;
15018a0548f9SBharat Bhushan 
15028a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15038a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15048a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15058a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15068a0548f9SBharat Bhushan         return -EINVAL;
15078a0548f9SBharat Bhushan     }
15088a0548f9SBharat Bhushan 
15098a0548f9SBharat Bhushan     return 0;
15108a0548f9SBharat Bhushan }
15118a0548f9SBharat Bhushan 
151288365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
151388365d17SBharat Bhushan {
151488365d17SBharat Bhushan     int n;
151588365d17SBharat Bhushan 
151688365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
151788365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
151888365d17SBharat Bhushan 
151988365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
152088365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
152188365d17SBharat Bhushan              hw_debug_points[n].type == type) {
152288365d17SBharat Bhushan             return n;
152388365d17SBharat Bhushan         }
152488365d17SBharat Bhushan     }
152588365d17SBharat Bhushan 
152688365d17SBharat Bhushan     return -1;
152788365d17SBharat Bhushan }
152888365d17SBharat Bhushan 
152988365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
153088365d17SBharat Bhushan {
153188365d17SBharat Bhushan     int n;
153288365d17SBharat Bhushan 
153388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
153488365d17SBharat Bhushan     if (n >= 0) {
153588365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
153688365d17SBharat Bhushan         return n;
153788365d17SBharat Bhushan     }
153888365d17SBharat Bhushan 
153988365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
154088365d17SBharat Bhushan     if (n >= 0) {
154188365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
154288365d17SBharat Bhushan         return n;
154388365d17SBharat Bhushan     }
154488365d17SBharat Bhushan 
154588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
154688365d17SBharat Bhushan     if (n >= 0) {
154788365d17SBharat Bhushan         *flag = BP_MEM_READ;
154888365d17SBharat Bhushan         return n;
154988365d17SBharat Bhushan     }
155088365d17SBharat Bhushan 
155188365d17SBharat Bhushan     return -1;
155288365d17SBharat Bhushan }
155388365d17SBharat Bhushan 
155488365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
155588365d17SBharat Bhushan                                   target_ulong len, int type)
155688365d17SBharat Bhushan {
155788365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
155888365d17SBharat Bhushan         return -ENOBUFS;
155988365d17SBharat Bhushan     }
156088365d17SBharat Bhushan 
156188365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
156288365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
156388365d17SBharat Bhushan 
156488365d17SBharat Bhushan     switch (type) {
156588365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
156688365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
156788365d17SBharat Bhushan             return -ENOBUFS;
156888365d17SBharat Bhushan         }
156988365d17SBharat Bhushan 
157088365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
157188365d17SBharat Bhushan             return -EEXIST;
157288365d17SBharat Bhushan         }
157388365d17SBharat Bhushan 
157488365d17SBharat Bhushan         nb_hw_breakpoint++;
157588365d17SBharat Bhushan         break;
157688365d17SBharat Bhushan 
157788365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
157888365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
157988365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
158088365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
158188365d17SBharat Bhushan             return -ENOBUFS;
158288365d17SBharat Bhushan         }
158388365d17SBharat Bhushan 
158488365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
158588365d17SBharat Bhushan             return -EEXIST;
158688365d17SBharat Bhushan         }
158788365d17SBharat Bhushan 
158888365d17SBharat Bhushan         nb_hw_watchpoint++;
158988365d17SBharat Bhushan         break;
159088365d17SBharat Bhushan 
159188365d17SBharat Bhushan     default:
159288365d17SBharat Bhushan         return -ENOSYS;
159388365d17SBharat Bhushan     }
159488365d17SBharat Bhushan 
159588365d17SBharat Bhushan     return 0;
159688365d17SBharat Bhushan }
159788365d17SBharat Bhushan 
159888365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
159988365d17SBharat Bhushan                                   target_ulong len, int type)
160088365d17SBharat Bhushan {
160188365d17SBharat Bhushan     int n;
160288365d17SBharat Bhushan 
160388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
160488365d17SBharat Bhushan     if (n < 0) {
160588365d17SBharat Bhushan         return -ENOENT;
160688365d17SBharat Bhushan     }
160788365d17SBharat Bhushan 
160888365d17SBharat Bhushan     switch (type) {
160988365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
161088365d17SBharat Bhushan         nb_hw_breakpoint--;
161188365d17SBharat Bhushan         break;
161288365d17SBharat Bhushan 
161388365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
161488365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
161588365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
161688365d17SBharat Bhushan         nb_hw_watchpoint--;
161788365d17SBharat Bhushan         break;
161888365d17SBharat Bhushan 
161988365d17SBharat Bhushan     default:
162088365d17SBharat Bhushan         return -ENOSYS;
162188365d17SBharat Bhushan     }
162288365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
162388365d17SBharat Bhushan 
162488365d17SBharat Bhushan     return 0;
162588365d17SBharat Bhushan }
162688365d17SBharat Bhushan 
162788365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
162888365d17SBharat Bhushan {
162988365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
163088365d17SBharat Bhushan }
163188365d17SBharat Bhushan 
16328a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16338a0548f9SBharat Bhushan {
163488365d17SBharat Bhushan     int n;
163588365d17SBharat Bhushan 
16368a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16378a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16388a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16398a0548f9SBharat Bhushan     }
164088365d17SBharat Bhushan 
164188365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
164288365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
164388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
164488365d17SBharat Bhushan 
164588365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
164688365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
164788365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
164888365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
164988365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
165088365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
165188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
165288365d17SBharat Bhushan                 break;
165388365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
165488365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
165588365d17SBharat Bhushan                 break;
165688365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
165788365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
165888365d17SBharat Bhushan                 break;
165988365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
166088365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
166188365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
166288365d17SBharat Bhushan                 break;
166388365d17SBharat Bhushan             default:
166488365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
166588365d17SBharat Bhushan             }
166688365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
166788365d17SBharat Bhushan         }
166888365d17SBharat Bhushan     }
16698a0548f9SBharat Bhushan }
16708a0548f9SBharat Bhushan 
16718a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16728a0548f9SBharat Bhushan {
16738a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16748a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16758a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16768a0548f9SBharat Bhushan     int handle = 0;
167788365d17SBharat Bhushan     int n;
167888365d17SBharat Bhushan     int flag = 0;
16798a0548f9SBharat Bhushan 
168088365d17SBharat Bhushan     if (cs->singlestep_enabled) {
168188365d17SBharat Bhushan         handle = 1;
168288365d17SBharat Bhushan     } else if (arch_info->status) {
168388365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
168488365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
168588365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
168688365d17SBharat Bhushan                 if (n >= 0) {
168788365d17SBharat Bhushan                     handle = 1;
168888365d17SBharat Bhushan                 }
168988365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
169088365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
169188365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
169288365d17SBharat Bhushan                 if (n >= 0) {
169388365d17SBharat Bhushan                     handle = 1;
169488365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
169588365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
169688365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
169788365d17SBharat Bhushan                 }
169888365d17SBharat Bhushan             }
169988365d17SBharat Bhushan         }
170088365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17018a0548f9SBharat Bhushan         handle = 1;
17028a0548f9SBharat Bhushan     } else {
17038a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17048a0548f9SBharat Bhushan          * program exception to guest;
17058a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
170688365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
170788365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
170888365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
170988365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
171088365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
171188365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
171288365d17SBharat Bhushan          * So the result is that we cannot share debug resources
171388365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
171488365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
171588365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
171688365d17SBharat Bhushan          * cannot use them;
17178a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17188a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17198a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17208a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17218a0548f9SBharat Bhushan          * injecting a program interrupt.
17228a0548f9SBharat Bhushan          */
17238a0548f9SBharat Bhushan 
17248a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17258a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17268a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17278a0548f9SBharat Bhushan          */
17288a0548f9SBharat Bhushan         env->nip += 4;
17298a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17308a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17318a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17328a0548f9SBharat Bhushan     }
17338a0548f9SBharat Bhushan 
17348a0548f9SBharat Bhushan     return handle;
17358a0548f9SBharat Bhushan }
17368a0548f9SBharat Bhushan 
173720d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1738d76d1650Saurel32 {
173920d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
174020d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1741bb4ea393SJan Kiszka     int ret;
1742d76d1650Saurel32 
17434b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17444b8523eeSJan Kiszka 
1745d76d1650Saurel32     switch (run->exit_reason) {
1746d76d1650Saurel32     case KVM_EXIT_DCR:
1747d76d1650Saurel32         if (run->dcr.is_write) {
1748da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1749d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1750d76d1650Saurel32         } else {
1751da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1752d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1753d76d1650Saurel32         }
1754d76d1650Saurel32         break;
1755d76d1650Saurel32     case KVM_EXIT_HLT:
1756da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1757259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1758d76d1650Saurel32         break;
1759c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1760f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1761da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
176220d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1763aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1764f61b4bedSAlexander Graf                                               run->papr_hcall.args);
176578e8fde2SDavid Gibson         ret = 0;
1766f61b4bedSAlexander Graf         break;
1767f61b4bedSAlexander Graf #endif
17685b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1769da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1770933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17715b95b8b9SAlexander Graf         ret = 0;
17725b95b8b9SAlexander Graf         break;
177331f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1774da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
177531f2cb8fSBharat Bhushan         watchdog_perform_action();
177631f2cb8fSBharat Bhushan         ret = 0;
177731f2cb8fSBharat Bhushan         break;
177831f2cb8fSBharat Bhushan 
17798a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17808a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17818a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17828a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17838a0548f9SBharat Bhushan             break;
17848a0548f9SBharat Bhushan         }
17858a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17868a0548f9SBharat Bhushan         ret = 0;
17878a0548f9SBharat Bhushan         break;
17888a0548f9SBharat Bhushan 
178973aaec4aSJan Kiszka     default:
179073aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
179173aaec4aSJan Kiszka         ret = -1;
179273aaec4aSJan Kiszka         break;
1793d76d1650Saurel32     }
1794d76d1650Saurel32 
17954b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1796d76d1650Saurel32     return ret;
1797d76d1650Saurel32 }
1798d76d1650Saurel32 
179931f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
180031f2cb8fSBharat Bhushan {
180131f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
180231f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
180331f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
180431f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
180531f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
180631f2cb8fSBharat Bhushan     };
180731f2cb8fSBharat Bhushan 
180831f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
180931f2cb8fSBharat Bhushan }
181031f2cb8fSBharat Bhushan 
181131f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
181231f2cb8fSBharat Bhushan {
181331f2cb8fSBharat Bhushan 
181431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181531f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
181631f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
181731f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
181831f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
181931f2cb8fSBharat Bhushan     };
182031f2cb8fSBharat Bhushan 
182131f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182231f2cb8fSBharat Bhushan }
182331f2cb8fSBharat Bhushan 
182431f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
182531f2cb8fSBharat Bhushan {
182631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
182731f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
182831f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
182931f2cb8fSBharat Bhushan 
183031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
183131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
183231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
183331f2cb8fSBharat Bhushan     };
183431f2cb8fSBharat Bhushan 
183531f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
183631f2cb8fSBharat Bhushan }
183731f2cb8fSBharat Bhushan 
183831f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
183931f2cb8fSBharat Bhushan {
184031f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
184131f2cb8fSBharat Bhushan     int ret;
184231f2cb8fSBharat Bhushan 
184331f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
184431f2cb8fSBharat Bhushan         return -1;
184531f2cb8fSBharat Bhushan     }
184631f2cb8fSBharat Bhushan 
184731f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
184831f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
184931f2cb8fSBharat Bhushan         return -1;
185031f2cb8fSBharat Bhushan     }
185131f2cb8fSBharat Bhushan 
185248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
185331f2cb8fSBharat Bhushan     if (ret < 0) {
185431f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
185531f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
185631f2cb8fSBharat Bhushan         return ret;
185731f2cb8fSBharat Bhushan     }
185831f2cb8fSBharat Bhushan 
185931f2cb8fSBharat Bhushan     return ret;
186031f2cb8fSBharat Bhushan }
186131f2cb8fSBharat Bhushan 
1862dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1863dc333cd6SAlexander Graf {
1864dc333cd6SAlexander Graf     FILE *f;
1865dc333cd6SAlexander Graf     int ret = -1;
1866dc333cd6SAlexander Graf     int field_len = strlen(field);
1867dc333cd6SAlexander Graf     char line[512];
1868dc333cd6SAlexander Graf 
1869dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1870dc333cd6SAlexander Graf     if (!f) {
1871dc333cd6SAlexander Graf         return -1;
1872dc333cd6SAlexander Graf     }
1873dc333cd6SAlexander Graf 
1874dc333cd6SAlexander Graf     do {
1875dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1876dc333cd6SAlexander Graf             break;
1877dc333cd6SAlexander Graf         }
1878dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1879ae215068SJim Meyering             pstrcpy(value, len, line);
1880dc333cd6SAlexander Graf             ret = 0;
1881dc333cd6SAlexander Graf             break;
1882dc333cd6SAlexander Graf         }
1883dc333cd6SAlexander Graf     } while(*line);
1884dc333cd6SAlexander Graf 
1885dc333cd6SAlexander Graf     fclose(f);
1886dc333cd6SAlexander Graf 
1887dc333cd6SAlexander Graf     return ret;
1888dc333cd6SAlexander Graf }
1889dc333cd6SAlexander Graf 
1890dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1891dc333cd6SAlexander Graf {
1892dc333cd6SAlexander Graf     char line[512];
1893dc333cd6SAlexander Graf     char *ns;
189473bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1895dc333cd6SAlexander Graf 
1896dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1897dc333cd6SAlexander Graf         return retval;
1898dc333cd6SAlexander Graf     }
1899dc333cd6SAlexander Graf 
1900dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1901dc333cd6SAlexander Graf         return retval;
1902dc333cd6SAlexander Graf     }
1903dc333cd6SAlexander Graf 
1904dc333cd6SAlexander Graf     ns++;
1905dc333cd6SAlexander Graf 
1906f9b8e7f6SShraddha Barke     return atoi(ns);
1907ef951443SNikunj A Dadhania }
1908ef951443SNikunj A Dadhania 
1909ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1910ef951443SNikunj A Dadhania {
1911ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1912ef951443SNikunj A Dadhania                                NULL);
1913ef951443SNikunj A Dadhania }
1914ef951443SNikunj A Dadhania 
1915ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1916ef951443SNikunj A Dadhania {
1917ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1918dc333cd6SAlexander Graf }
19194513d923SGleb Natapov 
1920eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1921eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1922eadaada1SAlexander Graf {
1923eadaada1SAlexander Graf     struct dirent *dirp;
1924eadaada1SAlexander Graf     DIR *dp;
1925eadaada1SAlexander Graf 
1926eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1927eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1928eadaada1SAlexander Graf         return -1;
1929eadaada1SAlexander Graf     }
1930eadaada1SAlexander Graf 
1931eadaada1SAlexander Graf     buf[0] = '\0';
1932eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1933eadaada1SAlexander Graf         FILE *f;
1934eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1935eadaada1SAlexander Graf                  dirp->d_name);
1936eadaada1SAlexander Graf         f = fopen(buf, "r");
1937eadaada1SAlexander Graf         if (f) {
1938eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1939eadaada1SAlexander Graf             fclose(f);
1940eadaada1SAlexander Graf             break;
1941eadaada1SAlexander Graf         }
1942eadaada1SAlexander Graf         buf[0] = '\0';
1943eadaada1SAlexander Graf     }
1944eadaada1SAlexander Graf     closedir(dp);
1945eadaada1SAlexander Graf     if (buf[0] == '\0') {
1946eadaada1SAlexander Graf         printf("Unknown host!\n");
1947eadaada1SAlexander Graf         return -1;
1948eadaada1SAlexander Graf     }
1949eadaada1SAlexander Graf 
1950eadaada1SAlexander Graf     return 0;
1951eadaada1SAlexander Graf }
1952eadaada1SAlexander Graf 
19537d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1954eadaada1SAlexander Graf {
19559bc884b7SDavid Gibson     union {
19569bc884b7SDavid Gibson         uint32_t v32;
19579bc884b7SDavid Gibson         uint64_t v64;
19589bc884b7SDavid Gibson     } u;
1959eadaada1SAlexander Graf     FILE *f;
1960eadaada1SAlexander Graf     int len;
1961eadaada1SAlexander Graf 
19627d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1963eadaada1SAlexander Graf     if (!f) {
1964eadaada1SAlexander Graf         return -1;
1965eadaada1SAlexander Graf     }
1966eadaada1SAlexander Graf 
19679bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1968eadaada1SAlexander Graf     fclose(f);
1969eadaada1SAlexander Graf     switch (len) {
19709bc884b7SDavid Gibson     case 4:
19719bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19729bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19739bc884b7SDavid Gibson     case 8:
19749bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1975eadaada1SAlexander Graf     }
1976eadaada1SAlexander Graf 
1977eadaada1SAlexander Graf     return 0;
1978eadaada1SAlexander Graf }
1979eadaada1SAlexander Graf 
19807d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19817d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19827d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19837d94a30bSSukadev Bhattiprolu  * format) */
19847d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19857d94a30bSSukadev Bhattiprolu {
19867d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19877d94a30bSSukadev Bhattiprolu     uint64_t val;
19887d94a30bSSukadev Bhattiprolu 
19897d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19907d94a30bSSukadev Bhattiprolu         return -1;
19917d94a30bSSukadev Bhattiprolu     }
19927d94a30bSSukadev Bhattiprolu 
19937d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19947d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19957d94a30bSSukadev Bhattiprolu     g_free(tmp);
19967d94a30bSSukadev Bhattiprolu 
19977d94a30bSSukadev Bhattiprolu     return val;
19987d94a30bSSukadev Bhattiprolu }
19997d94a30bSSukadev Bhattiprolu 
20009bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20019bc884b7SDavid Gibson {
20029bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20039bc884b7SDavid Gibson }
20049bc884b7SDavid Gibson 
20056659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
20066659394fSDavid Gibson {
20076659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
20086659394fSDavid Gibson }
20096659394fSDavid Gibson 
20106659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
20116659394fSDavid Gibson {
20126659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
20136659394fSDavid Gibson }
20146659394fSDavid Gibson 
20151a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
201645024f09SAlexander Graf  {
2017a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2018a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
201945024f09SAlexander Graf 
20206fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20211a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20221a61a9aeSStuart Yoder         return 0;
20231a61a9aeSStuart Yoder     }
202445024f09SAlexander Graf 
20251a61a9aeSStuart Yoder     return 1;
20261a61a9aeSStuart Yoder }
20271a61a9aeSStuart Yoder 
20281a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20291a61a9aeSStuart Yoder {
20301a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20311a61a9aeSStuart Yoder 
20321a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20331a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20341a61a9aeSStuart Yoder         return 1;
20351a61a9aeSStuart Yoder     }
20361a61a9aeSStuart Yoder 
20371a61a9aeSStuart Yoder     return 0;
20381a61a9aeSStuart Yoder }
20391a61a9aeSStuart Yoder 
20401a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20411a61a9aeSStuart Yoder {
20421a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20431a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20441a61a9aeSStuart Yoder 
20451a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20461a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
204745024f09SAlexander Graf         return 0;
204845024f09SAlexander Graf     }
204945024f09SAlexander Graf 
205045024f09SAlexander Graf     /*
2051d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
205245024f09SAlexander Graf      *
2053d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
205445024f09SAlexander Graf      *     li r3, -1
2055d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2056d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
205745024f09SAlexander Graf      */
205845024f09SAlexander Graf 
2059d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2060d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2061d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2062d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
206345024f09SAlexander Graf 
20640ddbd053SAlexey Kardashevskiy     return 1;
206545024f09SAlexander Graf }
206645024f09SAlexander Graf 
2067026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2068026bfd89SDavid Gibson {
2069026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2070026bfd89SDavid Gibson }
2071026bfd89SDavid Gibson 
2072026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2073026bfd89SDavid Gibson {
2074026bfd89SDavid Gibson     /*
2075026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2076026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2077026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2078026bfd89SDavid Gibson      * produce a warning.
2079026bfd89SDavid Gibson      */
2080026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2081026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2082026bfd89SDavid Gibson }
2083026bfd89SDavid Gibson 
2084ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2085ef9971ddSAlexey Kardashevskiy {
2086ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2087ef9971ddSAlexey Kardashevskiy }
2088ef9971ddSAlexey Kardashevskiy 
20895145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20905145ad4fSNathan Whitehorn {
20915145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20925145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20935145ad4fSNathan Whitehorn }
20945145ad4fSNathan Whitehorn 
20951bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2096f61b4bedSAlexander Graf {
20971bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2098f61b4bedSAlexander Graf     int ret;
2099f61b4bedSAlexander Graf 
210048add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2101f61b4bedSAlexander Graf     if (ret) {
2102072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2103072ed5f2SThomas Huth         exit(1);
2104f61b4bedSAlexander Graf     }
21059b00ea49SDavid Gibson 
21069b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21079b00ea49SDavid Gibson      * with kvm */
21089b00ea49SDavid Gibson     cap_papr = 1;
2109f1af19d7SDavid Gibson }
2110f61b4bedSAlexander Graf 
2111*d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21126db5bb0fSAlexey Kardashevskiy {
2113*d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21146db5bb0fSAlexey Kardashevskiy }
21156db5bb0fSAlexey Kardashevskiy 
21165b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21175b95b8b9SAlexander Graf {
21185b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21195b95b8b9SAlexander Graf     int ret;
21205b95b8b9SAlexander Graf 
212148add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21225b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2123072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2124072ed5f2SThomas Huth         exit(1);
21255b95b8b9SAlexander Graf     }
21265b95b8b9SAlexander Graf }
21275b95b8b9SAlexander Graf 
2128e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2129e97c3636SDavid Gibson {
2130e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2131e97c3636SDavid Gibson }
2132e97c3636SDavid Gibson 
21337f763a5dSDavid Gibson #ifdef TARGET_PPC64
2134658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2135354ac20aSDavid Gibson {
2136354ac20aSDavid Gibson     off_t size;
2137354ac20aSDavid Gibson     int fd;
2138354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2139354ac20aSDavid Gibson 
2140354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2141354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2142354ac20aSDavid Gibson      *                      not necessary on this hardware
2143354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2144354ac20aSDavid Gibson      *
2145354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2146354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2147354ac20aSDavid Gibson      */
2148354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2149354ac20aSDavid Gibson         return 0;
2150354ac20aSDavid Gibson     }
2151354ac20aSDavid Gibson 
2152354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2153354ac20aSDavid Gibson     if (fd < 0) {
2154354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2155354ac20aSDavid Gibson                 strerror(errno));
2156354ac20aSDavid Gibson         return -1;
2157354ac20aSDavid Gibson     }
2158354ac20aSDavid Gibson 
2159354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2160354ac20aSDavid Gibson 
2161658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2162658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2163354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2164354ac20aSDavid Gibson         return -1;
2165354ac20aSDavid Gibson     };
2166354ac20aSDavid Gibson 
2167354ac20aSDavid Gibson     return size;
2168354ac20aSDavid Gibson }
2169354ac20aSDavid Gibson 
21707f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21717f763a5dSDavid Gibson {
2172f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2173f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2174f36951c1SDavid Gibson     int i;
2175f36951c1SDavid Gibson 
21767f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
21777f763a5dSDavid Gibson         return current_size;
21787f763a5dSDavid Gibson     }
2179f36951c1SDavid Gibson 
2180f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2181f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2182182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
2183f36951c1SDavid Gibson     rampagesize = getrampagesize();
2184f36951c1SDavid Gibson     best_page_shift = 0;
2185f36951c1SDavid Gibson 
2186f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2187f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2188f36951c1SDavid Gibson 
2189f36951c1SDavid Gibson         if (!sps->page_shift) {
2190f36951c1SDavid Gibson             continue;
2191f36951c1SDavid Gibson         }
2192f36951c1SDavid Gibson 
2193f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2194f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2195f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2196f36951c1SDavid Gibson         }
2197f36951c1SDavid Gibson     }
2198f36951c1SDavid Gibson 
21997f763a5dSDavid Gibson     return MIN(current_size,
2200f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
22017f763a5dSDavid Gibson }
22027f763a5dSDavid Gibson #endif
22037f763a5dSDavid Gibson 
2204da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2205da95324eSAlexey Kardashevskiy {
2206da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2207da95324eSAlexey Kardashevskiy }
2208da95324eSAlexey Kardashevskiy 
22099bb62a07SAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
22106a81dd17SDavid Gibson                               bool need_vfio)
22110f5cb298SDavid Gibson {
22120f5cb298SDavid Gibson     struct kvm_create_spapr_tce args = {
22130f5cb298SDavid Gibson         .liobn = liobn,
22140f5cb298SDavid Gibson         .window_size = window_size,
22150f5cb298SDavid Gibson     };
22160f5cb298SDavid Gibson     long len;
22170f5cb298SDavid Gibson     int fd;
22180f5cb298SDavid Gibson     void *table;
22190f5cb298SDavid Gibson 
2220b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2221b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2222b5aec396SDavid Gibson      */
2223b5aec396SDavid Gibson     *pfd = -1;
22246a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22250f5cb298SDavid Gibson         return NULL;
22260f5cb298SDavid Gibson     }
22270f5cb298SDavid Gibson 
22280f5cb298SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22290f5cb298SDavid Gibson     if (fd < 0) {
2230b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2231b5aec396SDavid Gibson                 liobn);
22320f5cb298SDavid Gibson         return NULL;
22330f5cb298SDavid Gibson     }
22340f5cb298SDavid Gibson 
2235a83000f5SAnthony Liguori     len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t);
22360f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22370f5cb298SDavid Gibson 
223874b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22390f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2240b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2241b5aec396SDavid Gibson                 liobn);
22420f5cb298SDavid Gibson         close(fd);
22430f5cb298SDavid Gibson         return NULL;
22440f5cb298SDavid Gibson     }
22450f5cb298SDavid Gibson 
22460f5cb298SDavid Gibson     *pfd = fd;
22470f5cb298SDavid Gibson     return table;
22480f5cb298SDavid Gibson }
22490f5cb298SDavid Gibson 
2250523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22510f5cb298SDavid Gibson {
22520f5cb298SDavid Gibson     long len;
22530f5cb298SDavid Gibson 
22540f5cb298SDavid Gibson     if (fd < 0) {
22550f5cb298SDavid Gibson         return -1;
22560f5cb298SDavid Gibson     }
22570f5cb298SDavid Gibson 
2258523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22590f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22600f5cb298SDavid Gibson         (close(fd) < 0)) {
2261b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2262b5aec396SDavid Gibson                 strerror(errno));
22630f5cb298SDavid Gibson         /* Leak the table */
22640f5cb298SDavid Gibson     }
22650f5cb298SDavid Gibson 
22660f5cb298SDavid Gibson     return 0;
22670f5cb298SDavid Gibson }
22680f5cb298SDavid Gibson 
22697f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22707f763a5dSDavid Gibson {
22717f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22727f763a5dSDavid Gibson 
2273ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2274ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2275ace9a2cbSDavid Gibson         return 0;
2276ace9a2cbSDavid Gibson     }
2277ace9a2cbSDavid Gibson     if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22787f763a5dSDavid Gibson         int ret;
22797f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2280ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2281ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2282ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2283ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2284ace9a2cbSDavid Gibson              * correct for PR. */
2285ace9a2cbSDavid Gibson             return 0;
2286ace9a2cbSDavid Gibson         } else if (ret < 0) {
22877f763a5dSDavid Gibson             return ret;
22887f763a5dSDavid Gibson         }
22897f763a5dSDavid Gibson         return shift;
22907f763a5dSDavid Gibson     }
22917f763a5dSDavid Gibson 
2292ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2293ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
229496c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
229596c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2296ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
22977f763a5dSDavid Gibson         return 0;
2298ace9a2cbSDavid Gibson     } else {
2299ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2300ace9a2cbSDavid Gibson         return 24;
2301ace9a2cbSDavid Gibson     }
23027f763a5dSDavid Gibson }
23037f763a5dSDavid Gibson 
2304a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2305a1e98583SDavid Gibson {
2306a1e98583SDavid Gibson     uint32_t pvr;
2307a1e98583SDavid Gibson 
2308a1e98583SDavid Gibson     asm ("mfpvr %0"
2309a1e98583SDavid Gibson          : "=r"(pvr));
2310a1e98583SDavid Gibson     return pvr;
2311a1e98583SDavid Gibson }
2312a1e98583SDavid Gibson 
2313a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2314a7342588SDavid Gibson {
2315a7342588SDavid Gibson     if (on) {
2316a7342588SDavid Gibson         *word |= flags;
2317a7342588SDavid Gibson     } else {
2318a7342588SDavid Gibson         *word &= ~flags;
2319a7342588SDavid Gibson     }
2320a7342588SDavid Gibson }
2321a7342588SDavid Gibson 
23222985b86bSAndreas Färber static void kvmppc_host_cpu_initfn(Object *obj)
2323a1e98583SDavid Gibson {
23242985b86bSAndreas Färber     assert(kvm_enabled());
23252985b86bSAndreas Färber }
23262985b86bSAndreas Färber 
23272985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23282985b86bSAndreas Färber {
23294c315c27SMarkus Armbruster     DeviceClass *dc = DEVICE_CLASS(oc);
23302985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2331a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2332a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
23330cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23340cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2335a1e98583SDavid Gibson 
2336cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23373bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2338a7342588SDavid Gibson 
233970bca53fSAlexander Graf     if (vmx != -1) {
234070bca53fSAlexander Graf         /* Only override when we know what the host supports */
2341cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2342cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
234370bca53fSAlexander Graf     }
234470bca53fSAlexander Graf     if (dfp != -1) {
234570bca53fSAlexander Graf         /* Only override when we know what the host supports */
2346cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
234770bca53fSAlexander Graf     }
23480cbad81fSDavid Gibson 
23490cbad81fSDavid Gibson     if (dcache_size != -1) {
23500cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23510cbad81fSDavid Gibson     }
23520cbad81fSDavid Gibson 
23530cbad81fSDavid Gibson     if (icache_size != -1) {
23540cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23550cbad81fSDavid Gibson     }
23564c315c27SMarkus Armbruster 
23574c315c27SMarkus Armbruster     /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */
23584c315c27SMarkus Armbruster     dc->cannot_destroy_with_object_finalize_yet = true;
2359a1e98583SDavid Gibson }
2360a1e98583SDavid Gibson 
23613b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23623b961124SStuart Yoder {
23633b961124SStuart Yoder     return cap_epr;
23643b961124SStuart Yoder }
23653b961124SStuart Yoder 
23667c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void)
23677c43bca0SAneesh Kumar K.V {
23687c43bca0SAneesh Kumar K.V     return cap_htab_fd;
23697c43bca0SAneesh Kumar K.V }
23707c43bca0SAneesh Kumar K.V 
237187a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
237287a91de6SAlexander Graf {
237387a91de6SAlexander Graf     return cap_fixup_hcalls;
237487a91de6SAlexander Graf }
237587a91de6SAlexander Graf 
2376bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2377bac3bf28SThomas Huth {
2378bac3bf28SThomas Huth     return cap_htm;
2379bac3bf28SThomas Huth }
2380bac3bf28SThomas Huth 
23815b79b1caSAlexey Kardashevskiy static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
23825b79b1caSAlexey Kardashevskiy {
23835b79b1caSAlexey Kardashevskiy     ObjectClass *oc = OBJECT_CLASS(pcc);
23845b79b1caSAlexey Kardashevskiy 
23855b79b1caSAlexey Kardashevskiy     while (oc && !object_class_is_abstract(oc)) {
23865b79b1caSAlexey Kardashevskiy         oc = object_class_get_parent(oc);
23875b79b1caSAlexey Kardashevskiy     }
23885b79b1caSAlexey Kardashevskiy     assert(oc);
23895b79b1caSAlexey Kardashevskiy 
23905b79b1caSAlexey Kardashevskiy     return POWERPC_CPU_CLASS(oc);
23915b79b1caSAlexey Kardashevskiy }
23925b79b1caSAlexey Kardashevskiy 
239352b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
239452b2519cSThomas Huth {
239552b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
239652b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
239752b2519cSThomas Huth 
239852b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
239952b2519cSThomas Huth     if (pvr_pcc == NULL) {
240052b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
240152b2519cSThomas Huth     }
240252b2519cSThomas Huth 
240352b2519cSThomas Huth     return pvr_pcc;
240452b2519cSThomas Huth }
240552b2519cSThomas Huth 
24065ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
24075ba4576bSAndreas Färber {
24085ba4576bSAndreas Färber     TypeInfo type_info = {
24095ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24105ba4576bSAndreas Färber         .instance_init = kvmppc_host_cpu_initfn,
24115ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24125ba4576bSAndreas Färber     };
24135ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
24145b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
24155ba4576bSAndreas Färber 
241652b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
24173bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
24185ba4576bSAndreas Färber         return -1;
24195ba4576bSAndreas Färber     }
24205ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24215ba4576bSAndreas Färber     type_register(&type_info);
24225b79b1caSAlexey Kardashevskiy 
24239c83fc2eSThomas Huth     /* Register generic family CPU class for a family */
24249c83fc2eSThomas Huth     pvr_pcc = ppc_cpu_get_family_class(pvr_pcc);
24259c83fc2eSThomas Huth     dc = DEVICE_CLASS(pvr_pcc);
24269c83fc2eSThomas Huth     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24279c83fc2eSThomas Huth     type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
24289c83fc2eSThomas Huth     type_register(&type_info);
24299c83fc2eSThomas Huth 
24303b542549SBharata B Rao #if defined(TARGET_PPC64)
24313b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
24323b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
24337ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
24347ebaf795SBharata B Rao     type_info.instance_init = NULL;
24357ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
24367ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
24373b542549SBharata B Rao     type_register(&type_info);
24383b542549SBharata B Rao     g_free((void *)type_info.name);
2439d11b268eSThomas Huth 
2440d11b268eSThomas Huth     /* Register generic spapr CPU family class for current host CPU type */
2441d11b268eSThomas Huth     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
24427ebaf795SBharata B Rao     type_info.class_data = (void *) dc->desc;
2443d11b268eSThomas Huth     type_register(&type_info);
2444d11b268eSThomas Huth     g_free((void *)type_info.name);
24453b542549SBharata B Rao #endif
24463b542549SBharata B Rao 
24475ba4576bSAndreas Färber     return 0;
24485ba4576bSAndreas Färber }
24495ba4576bSAndreas Färber 
2450feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2451feaa64c4SDavid Gibson {
2452feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2453feaa64c4SDavid Gibson         .token = token,
2454feaa64c4SDavid Gibson     };
2455feaa64c4SDavid Gibson 
2456feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2457feaa64c4SDavid Gibson         return -ENOENT;
2458feaa64c4SDavid Gibson     }
2459feaa64c4SDavid Gibson 
2460feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2461feaa64c4SDavid Gibson 
2462feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2463feaa64c4SDavid Gibson }
246412b1143bSDavid Gibson 
2465e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write)
2466e68cb8b4SAlexey Kardashevskiy {
2467e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2468e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
2469e68cb8b4SAlexey Kardashevskiy         .start_index = 0,
2470e68cb8b4SAlexey Kardashevskiy     };
2471e68cb8b4SAlexey Kardashevskiy 
2472e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
2473e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2474e68cb8b4SAlexey Kardashevskiy         return -1;
2475e68cb8b4SAlexey Kardashevskiy     }
2476e68cb8b4SAlexey Kardashevskiy 
2477e68cb8b4SAlexey Kardashevskiy     return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2478e68cb8b4SAlexey Kardashevskiy }
2479e68cb8b4SAlexey Kardashevskiy 
2480e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2481e68cb8b4SAlexey Kardashevskiy {
2482bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2483e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2484e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2485e68cb8b4SAlexey Kardashevskiy 
2486e68cb8b4SAlexey Kardashevskiy     do {
2487e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2488e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2489e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2490e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2491e68cb8b4SAlexey Kardashevskiy             return rc;
2492e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2493e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2494e094c4c1SCédric Le Goater             ssize_t n = rc;
2495e094c4c1SCédric Le Goater             while (n) {
2496e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2497e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2498e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2499e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2500e094c4c1SCédric Le Goater 
2501e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2502e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2503e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2504e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2505e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2506e094c4c1SCédric Le Goater 
2507e094c4c1SCédric Le Goater                 buffer += chunksize;
2508e094c4c1SCédric Le Goater                 n -= chunksize;
2509e094c4c1SCédric Le Goater             }
2510e68cb8b4SAlexey Kardashevskiy         }
2511e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2512e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2513bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2514e68cb8b4SAlexey Kardashevskiy 
2515e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2516e68cb8b4SAlexey Kardashevskiy }
2517e68cb8b4SAlexey Kardashevskiy 
2518e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2519e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2520e68cb8b4SAlexey Kardashevskiy {
2521e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2522e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2523e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2524e68cb8b4SAlexey Kardashevskiy 
2525e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2526e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2527e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2528e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2529e68cb8b4SAlexey Kardashevskiy 
2530e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2531e68cb8b4SAlexey Kardashevskiy 
2532e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2533e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2534e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2535e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2536e68cb8b4SAlexey Kardashevskiy         return rc;
2537e68cb8b4SAlexey Kardashevskiy     }
2538e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2539e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2540e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2541e68cb8b4SAlexey Kardashevskiy         return -1;
2542e68cb8b4SAlexey Kardashevskiy     }
2543e68cb8b4SAlexey Kardashevskiy     return 0;
2544e68cb8b4SAlexey Kardashevskiy }
2545e68cb8b4SAlexey Kardashevskiy 
254620d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
25474513d923SGleb Natapov {
25484513d923SGleb Natapov     return true;
25494513d923SGleb Natapov }
2550a1b87fe0SJan Kiszka 
255120d695a9SAndreas Färber int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2552a1b87fe0SJan Kiszka {
2553a1b87fe0SJan Kiszka     return 1;
2554a1b87fe0SJan Kiszka }
2555a1b87fe0SJan Kiszka 
2556a1b87fe0SJan Kiszka int kvm_arch_on_sigbus(int code, void *addr)
2557a1b87fe0SJan Kiszka {
2558a1b87fe0SJan Kiszka     return 1;
2559a1b87fe0SJan Kiszka }
256082169660SScott Wood 
256182169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
256282169660SScott Wood {
256382169660SScott Wood }
2564c65f9a07SGreg Kurz 
25657c43bca0SAneesh Kumar K.V struct kvm_get_htab_buf {
25667c43bca0SAneesh Kumar K.V     struct kvm_get_htab_header header;
25677c43bca0SAneesh Kumar K.V     /*
25687c43bca0SAneesh Kumar K.V      * We require one extra byte for read
25697c43bca0SAneesh Kumar K.V      */
25707c43bca0SAneesh Kumar K.V     target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
25717c43bca0SAneesh Kumar K.V };
25727c43bca0SAneesh Kumar K.V 
25737c43bca0SAneesh Kumar K.V uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
25747c43bca0SAneesh Kumar K.V {
25757c43bca0SAneesh Kumar K.V     int htab_fd;
25767c43bca0SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
25777c43bca0SAneesh Kumar K.V     struct kvm_get_htab_buf  *hpte_buf;
25787c43bca0SAneesh Kumar K.V 
25797c43bca0SAneesh Kumar K.V     ghf.flags = 0;
25807c43bca0SAneesh Kumar K.V     ghf.start_index = pte_index;
25817c43bca0SAneesh Kumar K.V     htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
25827c43bca0SAneesh Kumar K.V     if (htab_fd < 0) {
25837c43bca0SAneesh Kumar K.V         goto error_out;
25847c43bca0SAneesh Kumar K.V     }
25857c43bca0SAneesh Kumar K.V 
25867c43bca0SAneesh Kumar K.V     hpte_buf = g_malloc0(sizeof(*hpte_buf));
25877c43bca0SAneesh Kumar K.V     /*
25887c43bca0SAneesh Kumar K.V      * Read the hpte group
25897c43bca0SAneesh Kumar K.V      */
25907c43bca0SAneesh Kumar K.V     if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
25917c43bca0SAneesh Kumar K.V         goto out_close;
25927c43bca0SAneesh Kumar K.V     }
25937c43bca0SAneesh Kumar K.V 
25947c43bca0SAneesh Kumar K.V     close(htab_fd);
25957c43bca0SAneesh Kumar K.V     return (uint64_t)(uintptr_t) hpte_buf->hpte;
25967c43bca0SAneesh Kumar K.V 
25977c43bca0SAneesh Kumar K.V out_close:
25987c43bca0SAneesh Kumar K.V     g_free(hpte_buf);
25997c43bca0SAneesh Kumar K.V     close(htab_fd);
26007c43bca0SAneesh Kumar K.V error_out:
26017c43bca0SAneesh Kumar K.V     return 0;
26027c43bca0SAneesh Kumar K.V }
26037c43bca0SAneesh Kumar K.V 
26047c43bca0SAneesh Kumar K.V void kvmppc_hash64_free_pteg(uint64_t token)
26057c43bca0SAneesh Kumar K.V {
26067c43bca0SAneesh Kumar K.V     struct kvm_get_htab_buf *htab_buf;
26077c43bca0SAneesh Kumar K.V 
26087c43bca0SAneesh Kumar K.V     htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
26097c43bca0SAneesh Kumar K.V                             hpte);
26107c43bca0SAneesh Kumar K.V     g_free(htab_buf);
26117c43bca0SAneesh Kumar K.V     return;
26127c43bca0SAneesh Kumar K.V }
2613c1385933SAneesh Kumar K.V 
2614c1385933SAneesh Kumar K.V void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
2615c1385933SAneesh Kumar K.V                              target_ulong pte0, target_ulong pte1)
2616c1385933SAneesh Kumar K.V {
2617c1385933SAneesh Kumar K.V     int htab_fd;
2618c1385933SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
2619c1385933SAneesh Kumar K.V     struct kvm_get_htab_buf hpte_buf;
2620c1385933SAneesh Kumar K.V 
2621c1385933SAneesh Kumar K.V     ghf.flags = 0;
2622c1385933SAneesh Kumar K.V     ghf.start_index = 0;     /* Ignored */
2623c1385933SAneesh Kumar K.V     htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
2624c1385933SAneesh Kumar K.V     if (htab_fd < 0) {
2625c1385933SAneesh Kumar K.V         goto error_out;
2626c1385933SAneesh Kumar K.V     }
2627c1385933SAneesh Kumar K.V 
2628c1385933SAneesh Kumar K.V     hpte_buf.header.n_valid = 1;
2629c1385933SAneesh Kumar K.V     hpte_buf.header.n_invalid = 0;
2630c1385933SAneesh Kumar K.V     hpte_buf.header.index = pte_index;
2631c1385933SAneesh Kumar K.V     hpte_buf.hpte[0] = pte0;
2632c1385933SAneesh Kumar K.V     hpte_buf.hpte[1] = pte1;
2633c1385933SAneesh Kumar K.V     /*
2634c1385933SAneesh Kumar K.V      * Write the hpte entry.
2635c1385933SAneesh Kumar K.V      * CAUTION: write() has the warn_unused_result attribute. Hence we
2636c1385933SAneesh Kumar K.V      * need to check the return value, even though we do nothing.
2637c1385933SAneesh Kumar K.V      */
2638c1385933SAneesh Kumar K.V     if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
2639c1385933SAneesh Kumar K.V         goto out_close;
2640c1385933SAneesh Kumar K.V     }
2641c1385933SAneesh Kumar K.V 
2642c1385933SAneesh Kumar K.V out_close:
2643c1385933SAneesh Kumar K.V     close(htab_fd);
2644c1385933SAneesh Kumar K.V     return;
2645c1385933SAneesh Kumar K.V 
2646c1385933SAneesh Kumar K.V error_out:
2647c1385933SAneesh Kumar K.V     return;
2648c1385933SAneesh Kumar K.V }
26499e03a040SFrank Blaschka 
26509e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2651dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26529e03a040SFrank Blaschka {
26539e03a040SFrank Blaschka     return 0;
26549e03a040SFrank Blaschka }
26551850b6b7SEric Auger 
265638d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
265738d87493SPeter Xu                                 int vector, PCIDevice *dev)
265838d87493SPeter Xu {
265938d87493SPeter Xu     return 0;
266038d87493SPeter Xu }
266138d87493SPeter Xu 
266238d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
266338d87493SPeter Xu {
266438d87493SPeter Xu     return 0;
266538d87493SPeter Xu }
266638d87493SPeter Xu 
26671850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
26681850b6b7SEric Auger {
26691850b6b7SEric Auger     return data & 0xffff;
26701850b6b7SEric Auger }
26714d9392beSThomas Huth 
26724d9392beSThomas Huth int kvmppc_enable_hwrng(void)
26734d9392beSThomas Huth {
26744d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
26754d9392beSThomas Huth         return -1;
26764d9392beSThomas Huth     }
26774d9392beSThomas Huth 
26784d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
26794d9392beSThomas Huth }
2680