xref: /qemu/target/ppc/kvm.c (revision b4db54132ffeadafa9516cc553ba9548e42d42ad)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
25072ed5f2SThomas Huth #include "qemu/error-report.h"
2633c11879SPaolo Bonzini #include "cpu.h"
27715d4b96SThomas Huth #include "cpu-models.h"
281de7afc9SPaolo Bonzini #include "qemu/timer.h"
299c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
30b3946626SVincent Palatin #include "sysemu/hw_accel.h"
31d76d1650Saurel32 #include "kvm_ppc.h"
329c17d615SPaolo Bonzini #include "sysemu/cpus.h"
339c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
34d5aea6f3SDavid Gibson #include "mmu-hash64.h"
35d76d1650Saurel32 
36f61b4bedSAlexander Graf #include "hw/sysbus.h"
370d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
42b36f100eSAlexey Kardashevskiy #include "trace.h"
4388365d17SBharat Bhushan #include "exec/gdbstub.h"
444c663752SPaolo Bonzini #include "exec/memattrs.h"
459c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
462d103aaeSMichael Roth #include "sysemu/hostmem.h"
47f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
493b542549SBharata B Rao #if defined(TARGET_PPC64)
503b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
513b542549SBharata B Rao #endif
52f3d9f303SSam Bobroff #include "elf.h"
53c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
54f61b4bedSAlexander Graf 
55d76d1650Saurel32 //#define DEBUG_KVM
56d76d1650Saurel32 
57d76d1650Saurel32 #ifdef DEBUG_KVM
58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
59d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
60d76d1650Saurel32 #else
61da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
62d76d1650Saurel32     do { } while (0)
63d76d1650Saurel32 #endif
64d76d1650Saurel32 
65eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
66eadaada1SAlexander Graf 
6794a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6894a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6994a8d39aSJan Kiszka };
7094a8d39aSJan Kiszka 
71fc87e185SAlexander Graf static int cap_interrupt_unset = false;
72fc87e185SAlexander Graf static int cap_interrupt_level = false;
7390dc8812SScott Wood static int cap_segstate;
7490dc8812SScott Wood static int cap_booke_sregs;
75e97c3636SDavid Gibson static int cap_ppc_smt;
76354ac20aSDavid Gibson static int cap_ppc_rma;
770f5cb298SDavid Gibson static int cap_spapr_tce;
78d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
79da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
809bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
81f1af19d7SDavid Gibson static int cap_hior;
82d67d40eaSDavid Gibson static int cap_one_reg;
833b961124SStuart Yoder static int cap_epr;
8431f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
859b00ea49SDavid Gibson static int cap_papr;
86e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8787a91de6SAlexander Graf static int cap_fixup_hcalls;
88bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
89cf1c4cceSSam Bobroff static int cap_mmu_radix;
90cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
91fc87e185SAlexander Graf 
923c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
933c902d44SBharat Bhushan 
94c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
95c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
96c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
97c821c2bdSAlexander Graf  *     still an interrupt pending.
98c6a94ba5SAlexander Graf  *
99c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
100c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
101c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
102c6a94ba5SAlexander Graf  */
103c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
104c6a94ba5SAlexander Graf 
105d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
106c6a94ba5SAlexander Graf {
107d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
108d5a68146SAndreas Färber 
109c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
110c6a94ba5SAlexander Graf }
111c6a94ba5SAlexander Graf 
11296c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11396c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11496c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11596c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11696c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
11796c9cff0SThomas Huth {
11896c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
11996c9cff0SThomas Huth     return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12096c9cff0SThomas Huth }
12196c9cff0SThomas Huth 
1225ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1235ba4576bSAndreas Färber 
124b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
125d76d1650Saurel32 {
126fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
127fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
12890dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
12990dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
130e97c3636SDavid Gibson     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
131354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1320f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
133d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
134da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1359bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
136d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
137f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1383b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
13931f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1409b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1419b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
142e68cb8b4SAlexey Kardashevskiy     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14387a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
144bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
145cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
146cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
147fc87e185SAlexander Graf 
148fc87e185SAlexander Graf     if (!cap_interrupt_level) {
149fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
150fc87e185SAlexander Graf                         "VM to stall at times!\n");
151fc87e185SAlexander Graf     }
152fc87e185SAlexander Graf 
1535ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1545ba4576bSAndreas Färber 
155d76d1650Saurel32     return 0;
156d76d1650Saurel32 }
157d76d1650Saurel32 
158d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
159d525ffabSPaolo Bonzini {
160d525ffabSPaolo Bonzini     return 0;
161d525ffabSPaolo Bonzini }
162d525ffabSPaolo Bonzini 
1631bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
164d76d1650Saurel32 {
1651bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1661bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
167861bbc80SAlexander Graf     struct kvm_sregs sregs;
1685666ca4aSScott Wood     int ret;
1695666ca4aSScott Wood 
1705666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
17164e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
17264e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
17364e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
17464e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1755666ca4aSScott Wood         return 0;
1765666ca4aSScott Wood     } else {
17790dc8812SScott Wood         if (!cap_segstate) {
17864e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
17964e07be5SAlexander Graf             return -ENOSYS;
1805666ca4aSScott Wood         }
1815666ca4aSScott Wood     }
1825666ca4aSScott Wood 
1831bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1845666ca4aSScott Wood     if (ret) {
1855666ca4aSScott Wood         return ret;
1865666ca4aSScott Wood     }
187861bbc80SAlexander Graf 
188861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
1891bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
1905666ca4aSScott Wood }
1915666ca4aSScott Wood 
19293dd5e85SScott Wood /* Set up a shared TLB array with KVM */
1931bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
19493dd5e85SScott Wood {
1951bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
1961bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
19793dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
19893dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
19993dd5e85SScott Wood     unsigned int entries = 0;
20093dd5e85SScott Wood     int ret, i;
20193dd5e85SScott Wood 
20293dd5e85SScott Wood     if (!kvm_enabled() ||
203a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
20493dd5e85SScott Wood         return 0;
20593dd5e85SScott Wood     }
20693dd5e85SScott Wood 
20793dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
20893dd5e85SScott Wood 
20993dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
21093dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
21193dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
21293dd5e85SScott Wood         entries += params.tlb_sizes[i];
21393dd5e85SScott Wood     }
21493dd5e85SScott Wood 
21593dd5e85SScott Wood     assert(entries == env->nb_tlb);
21693dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
21793dd5e85SScott Wood 
21893dd5e85SScott Wood     env->tlb_dirty = true;
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
22193dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
22293dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
22393dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
22493dd5e85SScott Wood 
22548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
22693dd5e85SScott Wood     if (ret < 0) {
22793dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
22893dd5e85SScott Wood                 __func__, strerror(-ret));
22993dd5e85SScott Wood         return ret;
23093dd5e85SScott Wood     }
23193dd5e85SScott Wood 
23293dd5e85SScott Wood     env->kvm_sw_tlb = true;
23393dd5e85SScott Wood     return 0;
23493dd5e85SScott Wood }
23593dd5e85SScott Wood 
2364656e1f0SBenjamin Herrenschmidt 
2374656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
238a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2394656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2404656e1f0SBenjamin Herrenschmidt {
241a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
242a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
243a60f24b5SAndreas Färber 
2444656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2454656e1f0SBenjamin Herrenschmidt 
2464656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2474656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2484656e1f0SBenjamin Herrenschmidt      *
2494656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2504656e1f0SBenjamin Herrenschmidt      *
25196c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
25296c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
25396c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2544656e1f0SBenjamin Herrenschmidt      *
2554656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2564656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2574656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2584656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2594656e1f0SBenjamin Herrenschmidt      *
2604656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2614656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2624656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2634656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2644656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2654656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2664656e1f0SBenjamin Herrenschmidt      *   this fallback.
2674656e1f0SBenjamin Herrenschmidt      */
26896c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2694656e1f0SBenjamin Herrenschmidt         /* No flags */
2704656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2714656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2724656e1f0SBenjamin Herrenschmidt 
2734656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2744656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2754656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2764656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2774656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2784656e1f0SBenjamin Herrenschmidt 
2794656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2804656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2814656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2824656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2834656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2844656e1f0SBenjamin Herrenschmidt     } else {
2854656e1f0SBenjamin Herrenschmidt         int i = 0;
2864656e1f0SBenjamin Herrenschmidt 
2874656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
2884656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2894656e1f0SBenjamin Herrenschmidt 
2904656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
2914656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
2924656e1f0SBenjamin Herrenschmidt         }
2934656e1f0SBenjamin Herrenschmidt 
294ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
295ec975e83SSam Bobroff            POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
2964656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
2974656e1f0SBenjamin Herrenschmidt         } else {
2984656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
2994656e1f0SBenjamin Herrenschmidt         }
3004656e1f0SBenjamin Herrenschmidt 
3014656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
3024656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
3034656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3044656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3054656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3064656e1f0SBenjamin Herrenschmidt         i++;
3074656e1f0SBenjamin Herrenschmidt 
308aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
309ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
310ec975e83SSam Bobroff             POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3114656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3124656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3134656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3144656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3154656e1f0SBenjamin Herrenschmidt             i++;
3164656e1f0SBenjamin Herrenschmidt         }
3174656e1f0SBenjamin Herrenschmidt 
3184656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3194656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3204656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3214656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3224656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3234656e1f0SBenjamin Herrenschmidt     }
3244656e1f0SBenjamin Herrenschmidt }
3254656e1f0SBenjamin Herrenschmidt 
326a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3274656e1f0SBenjamin Herrenschmidt {
328a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3294656e1f0SBenjamin Herrenschmidt     int ret;
3304656e1f0SBenjamin Herrenschmidt 
331a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
332a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3334656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3344656e1f0SBenjamin Herrenschmidt             return;
3354656e1f0SBenjamin Herrenschmidt         }
3364656e1f0SBenjamin Herrenschmidt     }
3374656e1f0SBenjamin Herrenschmidt 
338a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3394656e1f0SBenjamin Herrenschmidt }
3404656e1f0SBenjamin Herrenschmidt 
341c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
342c64abd1fSSam Bobroff {
343c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
344c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
345c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
346c64abd1fSSam Bobroff     int i;
347c64abd1fSSam Bobroff 
348c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
349c64abd1fSSam Bobroff         return NULL;
350c64abd1fSSam Bobroff     }
351c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
352c64abd1fSSam Bobroff         return NULL;
353c64abd1fSSam Bobroff     }
354c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
355c64abd1fSSam Bobroff     radix_page_info->count = 0;
356c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
357c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
358c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
359c64abd1fSSam Bobroff             radix_page_info->count++;
360c64abd1fSSam Bobroff         }
361c64abd1fSSam Bobroff     }
362c64abd1fSSam Bobroff     return radix_page_info;
363c64abd1fSSam Bobroff }
364c64abd1fSSam Bobroff 
365*b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
366*b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
367*b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
368*b4db5413SSuraj Jitindar Singh {
369*b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
370*b4db5413SSuraj Jitindar Singh     int ret;
371*b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
372*b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
373*b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
374*b4db5413SSuraj Jitindar Singh     };
375*b4db5413SSuraj Jitindar Singh 
376*b4db5413SSuraj Jitindar Singh     if (radix) {
377*b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
378*b4db5413SSuraj Jitindar Singh     }
379*b4db5413SSuraj Jitindar Singh     if (gtse) {
380*b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
381*b4db5413SSuraj Jitindar Singh     }
382*b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
383*b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
384*b4db5413SSuraj Jitindar Singh     switch (ret) {
385*b4db5413SSuraj Jitindar Singh     case 0:
386*b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
387*b4db5413SSuraj Jitindar Singh     case -EINVAL:
388*b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
389*b4db5413SSuraj Jitindar Singh     case -ENODEV:
390*b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
391*b4db5413SSuraj Jitindar Singh     default:
392*b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
393*b4db5413SSuraj Jitindar Singh     }
394*b4db5413SSuraj Jitindar Singh }
395*b4db5413SSuraj Jitindar Singh 
3964656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
3974656e1f0SBenjamin Herrenschmidt {
3984656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
3994656e1f0SBenjamin Herrenschmidt         return true;
4004656e1f0SBenjamin Herrenschmidt     }
4014656e1f0SBenjamin Herrenschmidt 
4024656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4034656e1f0SBenjamin Herrenschmidt }
4044656e1f0SBenjamin Herrenschmidt 
405df587133SThomas Huth static long max_cpu_page_size;
406df587133SThomas Huth 
407a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4084656e1f0SBenjamin Herrenschmidt {
4094656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4104656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
411a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4124656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4130d594f55SThomas Huth     bool has_64k_pages = false;
4144656e1f0SBenjamin Herrenschmidt 
4154656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4164656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4174656e1f0SBenjamin Herrenschmidt         return;
4184656e1f0SBenjamin Herrenschmidt     }
4194656e1f0SBenjamin Herrenschmidt 
4204656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4214656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
422a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4234656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4244656e1f0SBenjamin Herrenschmidt     }
4254656e1f0SBenjamin Herrenschmidt 
426df587133SThomas Huth     if (!max_cpu_page_size) {
4279c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
428df587133SThomas Huth     }
4294656e1f0SBenjamin Herrenschmidt 
4304656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4314656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4324656e1f0SBenjamin Herrenschmidt 
43390da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
43490da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
43590da0d5aSBenjamin Herrenschmidt      */
43690da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
43790da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
43890da0d5aSBenjamin Herrenschmidt     }
43990da0d5aSBenjamin Herrenschmidt 
44008215d8fSAlexander Graf     /*
44108215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
44208215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
44308215d8fSAlexander Graf      */
4444656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4454656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4464656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4474656e1f0SBenjamin Herrenschmidt 
448df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4494656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4504656e1f0SBenjamin Herrenschmidt             continue;
4514656e1f0SBenjamin Herrenschmidt         }
4524656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4534656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4544656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
455df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4564656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4574656e1f0SBenjamin Herrenschmidt                 continue;
4584656e1f0SBenjamin Herrenschmidt             }
4590d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4600d594f55SThomas Huth                 has_64k_pages = true;
4610d594f55SThomas Huth             }
4624656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4634656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4644656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4654656e1f0SBenjamin Herrenschmidt                 break;
4664656e1f0SBenjamin Herrenschmidt             }
4674656e1f0SBenjamin Herrenschmidt         }
4684656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4694656e1f0SBenjamin Herrenschmidt             break;
4704656e1f0SBenjamin Herrenschmidt         }
4714656e1f0SBenjamin Herrenschmidt     }
4724656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
47308215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
4744656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
4754656e1f0SBenjamin Herrenschmidt     }
4760d594f55SThomas Huth     if (!has_64k_pages) {
4770d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
4780d594f55SThomas Huth     }
4794656e1f0SBenjamin Herrenschmidt }
480df587133SThomas Huth 
481df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
482df587133SThomas Huth {
483df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
484df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
485df587133SThomas Huth     long pagesize;
486df587133SThomas Huth 
487df587133SThomas Huth     if (mempath) {
4889c607668SAlexey Kardashevskiy         pagesize = qemu_mempath_getpagesize(mempath);
489df587133SThomas Huth     } else {
490df587133SThomas Huth         pagesize = getpagesize();
491df587133SThomas Huth     }
492df587133SThomas Huth 
493df587133SThomas Huth     return pagesize >= max_cpu_page_size;
494df587133SThomas Huth }
495df587133SThomas Huth 
4964656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
4974656e1f0SBenjamin Herrenschmidt 
498a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4994656e1f0SBenjamin Herrenschmidt {
5004656e1f0SBenjamin Herrenschmidt }
5014656e1f0SBenjamin Herrenschmidt 
502df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
503df587133SThomas Huth {
504df587133SThomas Huth     return true;
505df587133SThomas Huth }
506df587133SThomas Huth 
5074656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5084656e1f0SBenjamin Herrenschmidt 
509b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
510b164e48eSEduardo Habkost {
5110f20ba62SAlexey Kardashevskiy     return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
512b164e48eSEduardo Habkost }
513b164e48eSEduardo Habkost 
51488365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
51588365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
51688365d17SBharat Bhushan  * of 4 is sufficient for now.
51788365d17SBharat Bhushan  */
51888365d17SBharat Bhushan #define MAX_HW_BKPTS 4
51988365d17SBharat Bhushan 
52088365d17SBharat Bhushan static struct HWBreakpoint {
52188365d17SBharat Bhushan     target_ulong addr;
52288365d17SBharat Bhushan     int type;
52388365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
52488365d17SBharat Bhushan 
52588365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
52688365d17SBharat Bhushan 
52788365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
52888365d17SBharat Bhushan static int max_hw_breakpoint;
52988365d17SBharat Bhushan static int max_hw_watchpoint;
53088365d17SBharat Bhushan static int nb_hw_breakpoint;
53188365d17SBharat Bhushan static int nb_hw_watchpoint;
53288365d17SBharat Bhushan 
53388365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
53488365d17SBharat Bhushan {
53588365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
53688365d17SBharat Bhushan         max_hw_breakpoint = 2;
53788365d17SBharat Bhushan         max_hw_watchpoint = 2;
53888365d17SBharat Bhushan     }
53988365d17SBharat Bhushan 
54088365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
54188365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
54288365d17SBharat Bhushan         return;
54388365d17SBharat Bhushan     }
54488365d17SBharat Bhushan }
54588365d17SBharat Bhushan 
54620d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5475666ca4aSScott Wood {
54820d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
54920d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5505666ca4aSScott Wood     int ret;
5515666ca4aSScott Wood 
5524656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
553a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5544656e1f0SBenjamin Herrenschmidt 
5554656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5561bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5575666ca4aSScott Wood     if (ret) {
558388e47c7SThomas Huth         if (ret == -EINVAL) {
559388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
560388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
561388e47c7SThomas Huth         }
5625666ca4aSScott Wood         return ret;
5635666ca4aSScott Wood     }
564861bbc80SAlexander Graf 
565bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
566c821c2bdSAlexander Graf 
56793dd5e85SScott Wood     switch (cenv->mmu_model) {
56893dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5697f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5701bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
57193dd5e85SScott Wood         break;
5727f516c96SThomas Huth     case POWERPC_MMU_2_07:
5737f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5747f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
575f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
576f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
577f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5787f516c96SThomas Huth                 cap_htm = true;
5797f516c96SThomas Huth             }
580f3d9f303SSam Bobroff         }
5817f516c96SThomas Huth         break;
58293dd5e85SScott Wood     default:
58393dd5e85SScott Wood         break;
58493dd5e85SScott Wood     }
58593dd5e85SScott Wood 
5863c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
58788365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5883c902d44SBharat Bhushan 
589861bbc80SAlexander Graf     return ret;
590d76d1650Saurel32 }
591d76d1650Saurel32 
5921bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
59393dd5e85SScott Wood {
5941bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5951bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
59693dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
59793dd5e85SScott Wood     unsigned char *bitmap;
59893dd5e85SScott Wood     int ret;
59993dd5e85SScott Wood 
60093dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
60193dd5e85SScott Wood         return;
60293dd5e85SScott Wood     }
60393dd5e85SScott Wood 
60493dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
60593dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
60693dd5e85SScott Wood 
60793dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
60893dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
60993dd5e85SScott Wood 
6101bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
61193dd5e85SScott Wood     if (ret) {
61293dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
61393dd5e85SScott Wood                 __func__, strerror(-ret));
61493dd5e85SScott Wood     }
61593dd5e85SScott Wood 
61693dd5e85SScott Wood     g_free(bitmap);
61793dd5e85SScott Wood }
61893dd5e85SScott Wood 
619d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
620d67d40eaSDavid Gibson {
621d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
622d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
623d67d40eaSDavid Gibson     union {
624d67d40eaSDavid Gibson         uint32_t u32;
625d67d40eaSDavid Gibson         uint64_t u64;
626d67d40eaSDavid Gibson     } val;
627d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
628d67d40eaSDavid Gibson         .id = id,
629d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
630d67d40eaSDavid Gibson     };
631d67d40eaSDavid Gibson     int ret;
632d67d40eaSDavid Gibson 
633d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
634d67d40eaSDavid Gibson     if (ret != 0) {
635b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
636d67d40eaSDavid Gibson     } else {
637d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
638d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
639d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
640d67d40eaSDavid Gibson             break;
641d67d40eaSDavid Gibson 
642d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
643d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
644d67d40eaSDavid Gibson             break;
645d67d40eaSDavid Gibson 
646d67d40eaSDavid Gibson         default:
647d67d40eaSDavid Gibson             /* Don't handle this size yet */
648d67d40eaSDavid Gibson             abort();
649d67d40eaSDavid Gibson         }
650d67d40eaSDavid Gibson     }
651d67d40eaSDavid Gibson }
652d67d40eaSDavid Gibson 
653d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
654d67d40eaSDavid Gibson {
655d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
656d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
657d67d40eaSDavid Gibson     union {
658d67d40eaSDavid Gibson         uint32_t u32;
659d67d40eaSDavid Gibson         uint64_t u64;
660d67d40eaSDavid Gibson     } val;
661d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
662d67d40eaSDavid Gibson         .id = id,
663d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
664d67d40eaSDavid Gibson     };
665d67d40eaSDavid Gibson     int ret;
666d67d40eaSDavid Gibson 
667d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
668d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
669d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
670d67d40eaSDavid Gibson         break;
671d67d40eaSDavid Gibson 
672d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
673d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
674d67d40eaSDavid Gibson         break;
675d67d40eaSDavid Gibson 
676d67d40eaSDavid Gibson     default:
677d67d40eaSDavid Gibson         /* Don't handle this size yet */
678d67d40eaSDavid Gibson         abort();
679d67d40eaSDavid Gibson     }
680d67d40eaSDavid Gibson 
681d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
682d67d40eaSDavid Gibson     if (ret != 0) {
683b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
684d67d40eaSDavid Gibson     }
685d67d40eaSDavid Gibson }
686d67d40eaSDavid Gibson 
68770b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
68870b79849SDavid Gibson {
68970b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
69070b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
69170b79849SDavid Gibson     struct kvm_one_reg reg;
69270b79849SDavid Gibson     int i;
69370b79849SDavid Gibson     int ret;
69470b79849SDavid Gibson 
69570b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
69670b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
69770b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
69870b79849SDavid Gibson 
69970b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
70070b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
70170b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
70270b79849SDavid Gibson         if (ret < 0) {
703da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
70470b79849SDavid Gibson             return ret;
70570b79849SDavid Gibson         }
70670b79849SDavid Gibson 
70770b79849SDavid Gibson         for (i = 0; i < 32; i++) {
70870b79849SDavid Gibson             uint64_t vsr[2];
70970b79849SDavid Gibson 
7103a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
71170b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
71270b79849SDavid Gibson             vsr[1] = env->vsr[i];
7133a4b791bSGreg Kurz #else
7143a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7153a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7163a4b791bSGreg Kurz #endif
71770b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
71870b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
71970b79849SDavid Gibson 
72070b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
72170b79849SDavid Gibson             if (ret < 0) {
722da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
72370b79849SDavid Gibson                         i, strerror(errno));
72470b79849SDavid Gibson                 return ret;
72570b79849SDavid Gibson             }
72670b79849SDavid Gibson         }
72770b79849SDavid Gibson     }
72870b79849SDavid Gibson 
72970b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
73070b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
73170b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
73270b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73370b79849SDavid Gibson         if (ret < 0) {
734da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
73570b79849SDavid Gibson             return ret;
73670b79849SDavid Gibson         }
73770b79849SDavid Gibson 
73870b79849SDavid Gibson         for (i = 0; i < 32; i++) {
73970b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
74070b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
74170b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74270b79849SDavid Gibson             if (ret < 0) {
743da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
74470b79849SDavid Gibson                 return ret;
74570b79849SDavid Gibson             }
74670b79849SDavid Gibson         }
74770b79849SDavid Gibson     }
74870b79849SDavid Gibson 
74970b79849SDavid Gibson     return 0;
75070b79849SDavid Gibson }
75170b79849SDavid Gibson 
75270b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
75370b79849SDavid Gibson {
75470b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
75570b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
75670b79849SDavid Gibson     struct kvm_one_reg reg;
75770b79849SDavid Gibson     int i;
75870b79849SDavid Gibson     int ret;
75970b79849SDavid Gibson 
76070b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
76170b79849SDavid Gibson         uint64_t fpscr;
76270b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
76370b79849SDavid Gibson 
76470b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
76570b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
76670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
76770b79849SDavid Gibson         if (ret < 0) {
768da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
76970b79849SDavid Gibson             return ret;
77070b79849SDavid Gibson         } else {
77170b79849SDavid Gibson             env->fpscr = fpscr;
77270b79849SDavid Gibson         }
77370b79849SDavid Gibson 
77470b79849SDavid Gibson         for (i = 0; i < 32; i++) {
77570b79849SDavid Gibson             uint64_t vsr[2];
77670b79849SDavid Gibson 
77770b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
77870b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
77970b79849SDavid Gibson 
78070b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
78170b79849SDavid Gibson             if (ret < 0) {
782da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
78370b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
78470b79849SDavid Gibson                 return ret;
78570b79849SDavid Gibson             } else {
7863a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
78770b79849SDavid Gibson                 env->fpr[i] = vsr[0];
78870b79849SDavid Gibson                 if (vsx) {
78970b79849SDavid Gibson                     env->vsr[i] = vsr[1];
79070b79849SDavid Gibson                 }
7913a4b791bSGreg Kurz #else
7923a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
7933a4b791bSGreg Kurz                 if (vsx) {
7943a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
7953a4b791bSGreg Kurz                 }
7963a4b791bSGreg Kurz #endif
79770b79849SDavid Gibson             }
79870b79849SDavid Gibson         }
79970b79849SDavid Gibson     }
80070b79849SDavid Gibson 
80170b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
80270b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
80370b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
80470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
80570b79849SDavid Gibson         if (ret < 0) {
806da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
80770b79849SDavid Gibson             return ret;
80870b79849SDavid Gibson         }
80970b79849SDavid Gibson 
81070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
81170b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
81270b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
81370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81470b79849SDavid Gibson             if (ret < 0) {
815da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
81670b79849SDavid Gibson                         i, strerror(errno));
81770b79849SDavid Gibson                 return ret;
81870b79849SDavid Gibson             }
81970b79849SDavid Gibson         }
82070b79849SDavid Gibson     }
82170b79849SDavid Gibson 
82270b79849SDavid Gibson     return 0;
82370b79849SDavid Gibson }
82470b79849SDavid Gibson 
8259b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8269b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8279b00ea49SDavid Gibson {
8289b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8299b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8309b00ea49SDavid Gibson     struct kvm_one_reg reg;
8319b00ea49SDavid Gibson     int ret;
8329b00ea49SDavid Gibson 
8339b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8349b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8359b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8369b00ea49SDavid Gibson     if (ret < 0) {
837da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8389b00ea49SDavid Gibson         return ret;
8399b00ea49SDavid Gibson     }
8409b00ea49SDavid Gibson 
8419b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8429b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8439b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8449b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8459b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8469b00ea49SDavid Gibson     if (ret < 0) {
847da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8489b00ea49SDavid Gibson                 strerror(errno));
8499b00ea49SDavid Gibson         return ret;
8509b00ea49SDavid Gibson     }
8519b00ea49SDavid Gibson 
8529b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8539b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8549b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8559b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8569b00ea49SDavid Gibson     if (ret < 0) {
857da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8589b00ea49SDavid Gibson                 strerror(errno));
8599b00ea49SDavid Gibson         return ret;
8609b00ea49SDavid Gibson     }
8619b00ea49SDavid Gibson 
8629b00ea49SDavid Gibson     return 0;
8639b00ea49SDavid Gibson }
8649b00ea49SDavid Gibson 
8659b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8669b00ea49SDavid Gibson {
8679b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8689b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8699b00ea49SDavid Gibson     struct kvm_one_reg reg;
8709b00ea49SDavid Gibson     int ret;
8719b00ea49SDavid Gibson 
8729b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8739b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8749b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8759b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8769b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8779b00ea49SDavid Gibson 
8789b00ea49SDavid Gibson     if (env->vpa_addr) {
8799b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8809b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8819b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8829b00ea49SDavid Gibson         if (ret < 0) {
883da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8849b00ea49SDavid Gibson             return ret;
8859b00ea49SDavid Gibson         }
8869b00ea49SDavid Gibson     }
8879b00ea49SDavid Gibson 
8889b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8899b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8909b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8919b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8929b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8939b00ea49SDavid Gibson     if (ret < 0) {
894da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8959b00ea49SDavid Gibson         return ret;
8969b00ea49SDavid Gibson     }
8979b00ea49SDavid Gibson 
8989b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8999b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9009b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9019b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9029b00ea49SDavid Gibson     if (ret < 0) {
903da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9049b00ea49SDavid Gibson                 strerror(errno));
9059b00ea49SDavid Gibson         return ret;
9069b00ea49SDavid Gibson     }
9079b00ea49SDavid Gibson 
9089b00ea49SDavid Gibson     if (!env->vpa_addr) {
9099b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9109b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9119b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9129b00ea49SDavid Gibson         if (ret < 0) {
913da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9149b00ea49SDavid Gibson             return ret;
9159b00ea49SDavid Gibson         }
9169b00ea49SDavid Gibson     }
9179b00ea49SDavid Gibson 
9189b00ea49SDavid Gibson     return 0;
9199b00ea49SDavid Gibson }
9209b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9219b00ea49SDavid Gibson 
922e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
923a7a00a72SDavid Gibson {
924a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
925a7a00a72SDavid Gibson     struct kvm_sregs sregs;
926a7a00a72SDavid Gibson     int i;
927a7a00a72SDavid Gibson 
928a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
929a7a00a72SDavid Gibson 
930a7a00a72SDavid Gibson     sregs.u.s.sdr1 = env->spr[SPR_SDR1];
931a7a00a72SDavid Gibson 
932a7a00a72SDavid Gibson     /* Sync SLB */
933a7a00a72SDavid Gibson #ifdef TARGET_PPC64
934a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
935a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
936a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
937a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
938a7a00a72SDavid Gibson         }
939a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
940a7a00a72SDavid Gibson     }
941a7a00a72SDavid Gibson #endif
942a7a00a72SDavid Gibson 
943a7a00a72SDavid Gibson     /* Sync SRs */
944a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
945a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
946a7a00a72SDavid Gibson     }
947a7a00a72SDavid Gibson 
948a7a00a72SDavid Gibson     /* Sync BATs */
949a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
950a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
951a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
952a7a00a72SDavid Gibson             | env->DBAT[1][i];
953a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
954a7a00a72SDavid Gibson             | env->IBAT[1][i];
955a7a00a72SDavid Gibson     }
956a7a00a72SDavid Gibson 
957a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
958a7a00a72SDavid Gibson }
959a7a00a72SDavid Gibson 
96020d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
961d76d1650Saurel32 {
96220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
96320d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
964d76d1650Saurel32     struct kvm_regs regs;
965d76d1650Saurel32     int ret;
966d76d1650Saurel32     int i;
967d76d1650Saurel32 
9681bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9691bc22652SAndreas Färber     if (ret < 0) {
970d76d1650Saurel32         return ret;
9711bc22652SAndreas Färber     }
972d76d1650Saurel32 
973d76d1650Saurel32     regs.ctr = env->ctr;
974d76d1650Saurel32     regs.lr  = env->lr;
975da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
976d76d1650Saurel32     regs.msr = env->msr;
977d76d1650Saurel32     regs.pc = env->nip;
978d76d1650Saurel32 
979d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
980d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
981d76d1650Saurel32 
982d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
983d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
984d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
985d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
986d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
987d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
988d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
989d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
990d76d1650Saurel32 
99190dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
99290dc8812SScott Wood 
993d76d1650Saurel32     for (i = 0;i < 32; i++)
994d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
995d76d1650Saurel32 
9964bddaf55SAlexey Kardashevskiy     regs.cr = 0;
9974bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
9984bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
9994bddaf55SAlexey Kardashevskiy     }
10004bddaf55SAlexey Kardashevskiy 
10011bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1002d76d1650Saurel32     if (ret < 0)
1003d76d1650Saurel32         return ret;
1004d76d1650Saurel32 
100570b79849SDavid Gibson     kvm_put_fp(cs);
100670b79849SDavid Gibson 
100793dd5e85SScott Wood     if (env->tlb_dirty) {
10081bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
100993dd5e85SScott Wood         env->tlb_dirty = false;
101093dd5e85SScott Wood     }
101193dd5e85SScott Wood 
1012f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1013a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1014a7a00a72SDavid Gibson         if (ret < 0) {
1015f1af19d7SDavid Gibson             return ret;
1016f1af19d7SDavid Gibson         }
1017f1af19d7SDavid Gibson     }
1018f1af19d7SDavid Gibson 
1019f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1020d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1021d67d40eaSDavid Gibson     }
1022f1af19d7SDavid Gibson 
1023d67d40eaSDavid Gibson     if (cap_one_reg) {
1024d67d40eaSDavid Gibson         int i;
1025d67d40eaSDavid Gibson 
1026d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1027d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1028d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1029d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1030d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1031d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1032d67d40eaSDavid Gibson 
1033d67d40eaSDavid Gibson             if (id != 0) {
1034d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1035d67d40eaSDavid Gibson             }
1036f1af19d7SDavid Gibson         }
10379b00ea49SDavid Gibson 
10389b00ea49SDavid Gibson #ifdef TARGET_PPC64
103980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
104080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
104180b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
104280b3f79bSAlexey Kardashevskiy             }
104380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
104480b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
104580b3f79bSAlexey Kardashevskiy             }
104680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
104780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
104880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
104980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
105080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
105180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
105280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
105380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
105480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
105580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
105680b3f79bSAlexey Kardashevskiy         }
105780b3f79bSAlexey Kardashevskiy 
10589b00ea49SDavid Gibson         if (cap_papr) {
10599b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1060da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10619b00ea49SDavid Gibson             }
10629b00ea49SDavid Gibson         }
106398a8b524SAlexey Kardashevskiy 
106498a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10659b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1066f1af19d7SDavid Gibson     }
1067f1af19d7SDavid Gibson 
1068d76d1650Saurel32     return ret;
1069d76d1650Saurel32 }
1070d76d1650Saurel32 
1071c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1072c371c2e3SBharat Bhushan {
1073c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1074c371c2e3SBharat Bhushan }
1075c371c2e3SBharat Bhushan 
1076a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1077d76d1650Saurel32 {
107820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1079ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1080a7a00a72SDavid Gibson     int ret;
1081d76d1650Saurel32 
1082a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
108390dc8812SScott Wood     if (ret < 0) {
108490dc8812SScott Wood         return ret;
108590dc8812SScott Wood     }
108690dc8812SScott Wood 
108790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
108890dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
108990dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
109090dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
109190dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
109290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
109390dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
109490dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
109590dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
109690dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
109790dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
109890dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
109990dc8812SScott Wood     }
110090dc8812SScott Wood 
110190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
110290dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
110390dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
110490dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
110590dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
110690dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
110790dc8812SScott Wood     }
110890dc8812SScott Wood 
110990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
111090dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
111190dc8812SScott Wood     }
111290dc8812SScott Wood 
111390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
111490dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
111590dc8812SScott Wood     }
111690dc8812SScott Wood 
111790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
111890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1119c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
112090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1121c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
112290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1123c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
112490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1125c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
112690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1127c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
112890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1129c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
113090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1131c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
113290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1133c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
113490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1135c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
113690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1137c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
113890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1139c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
114090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1141c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
114290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1143c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
114490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1145c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
114690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1147c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
114890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1149c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
115090dc8812SScott Wood 
115190dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
115290dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1153c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
115490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1155c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
115690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1157c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
115890dc8812SScott Wood         }
115990dc8812SScott Wood 
116090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
116190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1162c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
116390dc8812SScott Wood         }
116490dc8812SScott Wood 
116590dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
116690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1167c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
116890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1169c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
117090dc8812SScott Wood         }
117190dc8812SScott Wood     }
117290dc8812SScott Wood 
117390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
117490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
117590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
117690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
117790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
117890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
117990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
118090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
118190dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
118290dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
118390dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
118490dc8812SScott Wood     }
118590dc8812SScott Wood 
118690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
118790dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
118890dc8812SScott Wood     }
118990dc8812SScott Wood 
119090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
119190dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
119290dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
119390dc8812SScott Wood     }
119490dc8812SScott Wood 
119590dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
119690dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
119790dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
119890dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
119990dc8812SScott Wood 
120090dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
120190dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
120290dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
120390dc8812SScott Wood         }
120490dc8812SScott Wood     }
1205a7a00a72SDavid Gibson 
1206a7a00a72SDavid Gibson     return 0;
1207fafc0b6aSAlexander Graf }
120890dc8812SScott Wood 
1209a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1210a7a00a72SDavid Gibson {
1211a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1212a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1213a7a00a72SDavid Gibson     int ret;
1214a7a00a72SDavid Gibson     int i;
1215a7a00a72SDavid Gibson 
1216a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
121790dc8812SScott Wood     if (ret < 0) {
121890dc8812SScott Wood         return ret;
121990dc8812SScott Wood     }
122090dc8812SScott Wood 
1221e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1222bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1223f3c75d42SAneesh Kumar K.V     }
1224ba5e5090SAlexander Graf 
1225ba5e5090SAlexander Graf     /* Sync SLB */
122682c09f2fSAlexander Graf #ifdef TARGET_PPC64
12274b4d4a21SAneesh Kumar K.V     /*
12284b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1229a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1230a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1231a7a00a72SDavid Gibson      * in.
12324b4d4a21SAneesh Kumar K.V      */
12334b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1234d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12354b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12364b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12374b4d4a21SAneesh Kumar K.V         /*
12384b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12394b4d4a21SAneesh Kumar K.V          */
12404b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1241bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12424b4d4a21SAneesh Kumar K.V         }
1243ba5e5090SAlexander Graf     }
124482c09f2fSAlexander Graf #endif
1245ba5e5090SAlexander Graf 
1246ba5e5090SAlexander Graf     /* Sync SRs */
1247ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1248ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1249ba5e5090SAlexander Graf     }
1250ba5e5090SAlexander Graf 
1251ba5e5090SAlexander Graf     /* Sync BATs */
1252ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1253ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1254ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1255ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1256ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1257ba5e5090SAlexander Graf     }
1258a7a00a72SDavid Gibson 
1259a7a00a72SDavid Gibson     return 0;
1260a7a00a72SDavid Gibson }
1261a7a00a72SDavid Gibson 
1262a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1263a7a00a72SDavid Gibson {
1264a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1265a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1266a7a00a72SDavid Gibson     struct kvm_regs regs;
1267a7a00a72SDavid Gibson     uint32_t cr;
1268a7a00a72SDavid Gibson     int i, ret;
1269a7a00a72SDavid Gibson 
1270a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1271a7a00a72SDavid Gibson     if (ret < 0)
1272a7a00a72SDavid Gibson         return ret;
1273a7a00a72SDavid Gibson 
1274a7a00a72SDavid Gibson     cr = regs.cr;
1275a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1276a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1277a7a00a72SDavid Gibson         cr >>= 4;
1278a7a00a72SDavid Gibson     }
1279a7a00a72SDavid Gibson 
1280a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1281a7a00a72SDavid Gibson     env->lr = regs.lr;
1282a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1283a7a00a72SDavid Gibson     env->msr = regs.msr;
1284a7a00a72SDavid Gibson     env->nip = regs.pc;
1285a7a00a72SDavid Gibson 
1286a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1287a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1288a7a00a72SDavid Gibson 
1289a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1290a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1291a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1292a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1293a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1294a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1295a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1296a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1297a7a00a72SDavid Gibson 
1298a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1299a7a00a72SDavid Gibson 
1300a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1301a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1302a7a00a72SDavid Gibson 
1303a7a00a72SDavid Gibson     kvm_get_fp(cs);
1304a7a00a72SDavid Gibson 
1305a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1306a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1307a7a00a72SDavid Gibson         if (ret < 0) {
1308a7a00a72SDavid Gibson             return ret;
1309a7a00a72SDavid Gibson         }
1310a7a00a72SDavid Gibson     }
1311a7a00a72SDavid Gibson 
1312a7a00a72SDavid Gibson     if (cap_segstate) {
1313a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1314a7a00a72SDavid Gibson         if (ret < 0) {
1315a7a00a72SDavid Gibson             return ret;
1316a7a00a72SDavid Gibson         }
1317fafc0b6aSAlexander Graf     }
1318ba5e5090SAlexander Graf 
1319d67d40eaSDavid Gibson     if (cap_hior) {
1320d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1321d67d40eaSDavid Gibson     }
1322d67d40eaSDavid Gibson 
1323d67d40eaSDavid Gibson     if (cap_one_reg) {
1324d67d40eaSDavid Gibson         int i;
1325d67d40eaSDavid Gibson 
1326d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1327d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1328d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1329d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1330d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1331d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1332d67d40eaSDavid Gibson 
1333d67d40eaSDavid Gibson             if (id != 0) {
1334d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1335d67d40eaSDavid Gibson             }
1336d67d40eaSDavid Gibson         }
13379b00ea49SDavid Gibson 
13389b00ea49SDavid Gibson #ifdef TARGET_PPC64
133980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
134080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
134180b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
134280b3f79bSAlexey Kardashevskiy             }
134380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
134480b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
134580b3f79bSAlexey Kardashevskiy             }
134680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
134780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
134880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
134980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
135080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
135180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
135280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
135380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
135480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
135580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
135680b3f79bSAlexey Kardashevskiy         }
135780b3f79bSAlexey Kardashevskiy 
13589b00ea49SDavid Gibson         if (cap_papr) {
13599b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1360da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13619b00ea49SDavid Gibson             }
13629b00ea49SDavid Gibson         }
136398a8b524SAlexey Kardashevskiy 
136498a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13659b00ea49SDavid Gibson #endif
1366d67d40eaSDavid Gibson     }
1367d67d40eaSDavid Gibson 
1368d76d1650Saurel32     return 0;
1369d76d1650Saurel32 }
1370d76d1650Saurel32 
13711bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1372fc87e185SAlexander Graf {
1373fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1374fc87e185SAlexander Graf 
1375fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1376fc87e185SAlexander Graf         return 0;
1377fc87e185SAlexander Graf     }
1378fc87e185SAlexander Graf 
1379fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1380fc87e185SAlexander Graf         return 0;
1381fc87e185SAlexander Graf     }
1382fc87e185SAlexander Graf 
13831bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1384fc87e185SAlexander Graf 
1385fc87e185SAlexander Graf     return 0;
1386fc87e185SAlexander Graf }
1387fc87e185SAlexander Graf 
138816415335SAlexander Graf #if defined(TARGET_PPCEMB)
138916415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
139016415335SAlexander Graf #elif defined(TARGET_PPC64)
139116415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
139216415335SAlexander Graf #else
139316415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
139416415335SAlexander Graf #endif
139516415335SAlexander Graf 
139620d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1397d76d1650Saurel32 {
139820d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
139920d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1400d76d1650Saurel32     int r;
1401d76d1650Saurel32     unsigned irq;
1402d76d1650Saurel32 
14034b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14044b8523eeSJan Kiszka 
14055cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1406d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1407fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1408fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1409259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
141016415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1411d76d1650Saurel32     {
1412d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1413d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1414d76d1650Saurel32          * when reading the UIC.
1415d76d1650Saurel32          */
1416fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1417d76d1650Saurel32 
1418da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14191bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
142055e5c285SAndreas Färber         if (r < 0) {
142155e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
142255e5c285SAndreas Färber         }
1423c821c2bdSAlexander Graf 
1424c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1425bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
142673bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1427d76d1650Saurel32     }
1428d76d1650Saurel32 
1429d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1430d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1431d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14324b8523eeSJan Kiszka 
14334b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1434d76d1650Saurel32 }
1435d76d1650Saurel32 
14364c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1437d76d1650Saurel32 {
14384c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1439d76d1650Saurel32 }
1440d76d1650Saurel32 
144120d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14420af691d7SMarcelo Tosatti {
1443259186a7SAndreas Färber     return cs->halted;
14440af691d7SMarcelo Tosatti }
14450af691d7SMarcelo Tosatti 
1446259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1447d76d1650Saurel32 {
1448259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1449259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1450259186a7SAndreas Färber 
1451259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1452259186a7SAndreas Färber         cs->halted = 1;
145327103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1454d76d1650Saurel32     }
1455d76d1650Saurel32 
1456bb4ea393SJan Kiszka     return 0;
1457d76d1650Saurel32 }
1458d76d1650Saurel32 
1459d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14601328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1461d76d1650Saurel32 {
1462d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1463d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1464d76d1650Saurel32 
1465bb4ea393SJan Kiszka     return 0;
1466d76d1650Saurel32 }
1467d76d1650Saurel32 
14681328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1469d76d1650Saurel32 {
1470d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1471d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1472d76d1650Saurel32 
1473bb4ea393SJan Kiszka     return 0;
1474d76d1650Saurel32 }
1475d76d1650Saurel32 
14768a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14778a0548f9SBharat Bhushan {
14788a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14798a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14808a0548f9SBharat Bhushan 
14818a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14828a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14838a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14848a0548f9SBharat Bhushan         return -EINVAL;
14858a0548f9SBharat Bhushan     }
14868a0548f9SBharat Bhushan 
14878a0548f9SBharat Bhushan     return 0;
14888a0548f9SBharat Bhushan }
14898a0548f9SBharat Bhushan 
14908a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14918a0548f9SBharat Bhushan {
14928a0548f9SBharat Bhushan     uint32_t sc;
14938a0548f9SBharat Bhushan 
14948a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14958a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14968a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14978a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14988a0548f9SBharat Bhushan         return -EINVAL;
14998a0548f9SBharat Bhushan     }
15008a0548f9SBharat Bhushan 
15018a0548f9SBharat Bhushan     return 0;
15028a0548f9SBharat Bhushan }
15038a0548f9SBharat Bhushan 
150488365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
150588365d17SBharat Bhushan {
150688365d17SBharat Bhushan     int n;
150788365d17SBharat Bhushan 
150888365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
150988365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
151088365d17SBharat Bhushan 
151188365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
151288365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
151388365d17SBharat Bhushan              hw_debug_points[n].type == type) {
151488365d17SBharat Bhushan             return n;
151588365d17SBharat Bhushan         }
151688365d17SBharat Bhushan     }
151788365d17SBharat Bhushan 
151888365d17SBharat Bhushan     return -1;
151988365d17SBharat Bhushan }
152088365d17SBharat Bhushan 
152188365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
152288365d17SBharat Bhushan {
152388365d17SBharat Bhushan     int n;
152488365d17SBharat Bhushan 
152588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
152688365d17SBharat Bhushan     if (n >= 0) {
152788365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
152888365d17SBharat Bhushan         return n;
152988365d17SBharat Bhushan     }
153088365d17SBharat Bhushan 
153188365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
153288365d17SBharat Bhushan     if (n >= 0) {
153388365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
153488365d17SBharat Bhushan         return n;
153588365d17SBharat Bhushan     }
153688365d17SBharat Bhushan 
153788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
153888365d17SBharat Bhushan     if (n >= 0) {
153988365d17SBharat Bhushan         *flag = BP_MEM_READ;
154088365d17SBharat Bhushan         return n;
154188365d17SBharat Bhushan     }
154288365d17SBharat Bhushan 
154388365d17SBharat Bhushan     return -1;
154488365d17SBharat Bhushan }
154588365d17SBharat Bhushan 
154688365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
154788365d17SBharat Bhushan                                   target_ulong len, int type)
154888365d17SBharat Bhushan {
154988365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
155088365d17SBharat Bhushan         return -ENOBUFS;
155188365d17SBharat Bhushan     }
155288365d17SBharat Bhushan 
155388365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
155488365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
155588365d17SBharat Bhushan 
155688365d17SBharat Bhushan     switch (type) {
155788365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
155888365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
155988365d17SBharat Bhushan             return -ENOBUFS;
156088365d17SBharat Bhushan         }
156188365d17SBharat Bhushan 
156288365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
156388365d17SBharat Bhushan             return -EEXIST;
156488365d17SBharat Bhushan         }
156588365d17SBharat Bhushan 
156688365d17SBharat Bhushan         nb_hw_breakpoint++;
156788365d17SBharat Bhushan         break;
156888365d17SBharat Bhushan 
156988365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
157088365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
157188365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
157288365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
157388365d17SBharat Bhushan             return -ENOBUFS;
157488365d17SBharat Bhushan         }
157588365d17SBharat Bhushan 
157688365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
157788365d17SBharat Bhushan             return -EEXIST;
157888365d17SBharat Bhushan         }
157988365d17SBharat Bhushan 
158088365d17SBharat Bhushan         nb_hw_watchpoint++;
158188365d17SBharat Bhushan         break;
158288365d17SBharat Bhushan 
158388365d17SBharat Bhushan     default:
158488365d17SBharat Bhushan         return -ENOSYS;
158588365d17SBharat Bhushan     }
158688365d17SBharat Bhushan 
158788365d17SBharat Bhushan     return 0;
158888365d17SBharat Bhushan }
158988365d17SBharat Bhushan 
159088365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
159188365d17SBharat Bhushan                                   target_ulong len, int type)
159288365d17SBharat Bhushan {
159388365d17SBharat Bhushan     int n;
159488365d17SBharat Bhushan 
159588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
159688365d17SBharat Bhushan     if (n < 0) {
159788365d17SBharat Bhushan         return -ENOENT;
159888365d17SBharat Bhushan     }
159988365d17SBharat Bhushan 
160088365d17SBharat Bhushan     switch (type) {
160188365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
160288365d17SBharat Bhushan         nb_hw_breakpoint--;
160388365d17SBharat Bhushan         break;
160488365d17SBharat Bhushan 
160588365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
160688365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
160788365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
160888365d17SBharat Bhushan         nb_hw_watchpoint--;
160988365d17SBharat Bhushan         break;
161088365d17SBharat Bhushan 
161188365d17SBharat Bhushan     default:
161288365d17SBharat Bhushan         return -ENOSYS;
161388365d17SBharat Bhushan     }
161488365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
161588365d17SBharat Bhushan 
161688365d17SBharat Bhushan     return 0;
161788365d17SBharat Bhushan }
161888365d17SBharat Bhushan 
161988365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
162088365d17SBharat Bhushan {
162188365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
162288365d17SBharat Bhushan }
162388365d17SBharat Bhushan 
16248a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16258a0548f9SBharat Bhushan {
162688365d17SBharat Bhushan     int n;
162788365d17SBharat Bhushan 
16288a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16298a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16308a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16318a0548f9SBharat Bhushan     }
163288365d17SBharat Bhushan 
163388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
163488365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
163588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
163688365d17SBharat Bhushan 
163788365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
163888365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
163988365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
164088365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
164188365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
164288365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
164388365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
164488365d17SBharat Bhushan                 break;
164588365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
164688365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
164788365d17SBharat Bhushan                 break;
164888365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
164988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
165088365d17SBharat Bhushan                 break;
165188365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
165288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
165388365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
165488365d17SBharat Bhushan                 break;
165588365d17SBharat Bhushan             default:
165688365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
165788365d17SBharat Bhushan             }
165888365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
165988365d17SBharat Bhushan         }
166088365d17SBharat Bhushan     }
16618a0548f9SBharat Bhushan }
16628a0548f9SBharat Bhushan 
16638a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16648a0548f9SBharat Bhushan {
16658a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16668a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16678a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16688a0548f9SBharat Bhushan     int handle = 0;
166988365d17SBharat Bhushan     int n;
167088365d17SBharat Bhushan     int flag = 0;
16718a0548f9SBharat Bhushan 
167288365d17SBharat Bhushan     if (cs->singlestep_enabled) {
167388365d17SBharat Bhushan         handle = 1;
167488365d17SBharat Bhushan     } else if (arch_info->status) {
167588365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
167688365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
167788365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
167888365d17SBharat Bhushan                 if (n >= 0) {
167988365d17SBharat Bhushan                     handle = 1;
168088365d17SBharat Bhushan                 }
168188365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
168288365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
168388365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
168488365d17SBharat Bhushan                 if (n >= 0) {
168588365d17SBharat Bhushan                     handle = 1;
168688365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
168788365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
168888365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
168988365d17SBharat Bhushan                 }
169088365d17SBharat Bhushan             }
169188365d17SBharat Bhushan         }
169288365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
16938a0548f9SBharat Bhushan         handle = 1;
16948a0548f9SBharat Bhushan     } else {
16958a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
16968a0548f9SBharat Bhushan          * program exception to guest;
16978a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
169888365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
169988365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
170088365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
170188365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
170288365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
170388365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
170488365d17SBharat Bhushan          * So the result is that we cannot share debug resources
170588365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
170688365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
170788365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
170888365d17SBharat Bhushan          * cannot use them;
17098a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17108a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17118a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17128a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17138a0548f9SBharat Bhushan          * injecting a program interrupt.
17148a0548f9SBharat Bhushan          */
17158a0548f9SBharat Bhushan 
17168a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17178a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17188a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17198a0548f9SBharat Bhushan          */
17208a0548f9SBharat Bhushan         env->nip += 4;
17218a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17228a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17238a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17248a0548f9SBharat Bhushan     }
17258a0548f9SBharat Bhushan 
17268a0548f9SBharat Bhushan     return handle;
17278a0548f9SBharat Bhushan }
17288a0548f9SBharat Bhushan 
172920d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1730d76d1650Saurel32 {
173120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
173220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1733bb4ea393SJan Kiszka     int ret;
1734d76d1650Saurel32 
17354b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17364b8523eeSJan Kiszka 
1737d76d1650Saurel32     switch (run->exit_reason) {
1738d76d1650Saurel32     case KVM_EXIT_DCR:
1739d76d1650Saurel32         if (run->dcr.is_write) {
1740da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1741d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1742d76d1650Saurel32         } else {
1743da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1744d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1745d76d1650Saurel32         }
1746d76d1650Saurel32         break;
1747d76d1650Saurel32     case KVM_EXIT_HLT:
1748da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1749259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1750d76d1650Saurel32         break;
1751c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1752f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1753da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
175420d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1755aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1756f61b4bedSAlexander Graf                                               run->papr_hcall.args);
175778e8fde2SDavid Gibson         ret = 0;
1758f61b4bedSAlexander Graf         break;
1759f61b4bedSAlexander Graf #endif
17605b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1761da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1762933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17635b95b8b9SAlexander Graf         ret = 0;
17645b95b8b9SAlexander Graf         break;
176531f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1766da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
176731f2cb8fSBharat Bhushan         watchdog_perform_action();
176831f2cb8fSBharat Bhushan         ret = 0;
176931f2cb8fSBharat Bhushan         break;
177031f2cb8fSBharat Bhushan 
17718a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17728a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17738a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17748a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17758a0548f9SBharat Bhushan             break;
17768a0548f9SBharat Bhushan         }
17778a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17788a0548f9SBharat Bhushan         ret = 0;
17798a0548f9SBharat Bhushan         break;
17808a0548f9SBharat Bhushan 
178173aaec4aSJan Kiszka     default:
178273aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
178373aaec4aSJan Kiszka         ret = -1;
178473aaec4aSJan Kiszka         break;
1785d76d1650Saurel32     }
1786d76d1650Saurel32 
17874b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1788d76d1650Saurel32     return ret;
1789d76d1650Saurel32 }
1790d76d1650Saurel32 
179131f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
179231f2cb8fSBharat Bhushan {
179331f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
179431f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
179531f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
179631f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
179731f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
179831f2cb8fSBharat Bhushan     };
179931f2cb8fSBharat Bhushan 
180031f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
180131f2cb8fSBharat Bhushan }
180231f2cb8fSBharat Bhushan 
180331f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
180431f2cb8fSBharat Bhushan {
180531f2cb8fSBharat Bhushan 
180631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
180731f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
180831f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
180931f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
181031f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
181131f2cb8fSBharat Bhushan     };
181231f2cb8fSBharat Bhushan 
181331f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
181431f2cb8fSBharat Bhushan }
181531f2cb8fSBharat Bhushan 
181631f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
181731f2cb8fSBharat Bhushan {
181831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181931f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
182031f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
182131f2cb8fSBharat Bhushan 
182231f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
182331f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
182431f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
182531f2cb8fSBharat Bhushan     };
182631f2cb8fSBharat Bhushan 
182731f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182831f2cb8fSBharat Bhushan }
182931f2cb8fSBharat Bhushan 
183031f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
183131f2cb8fSBharat Bhushan {
183231f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
183331f2cb8fSBharat Bhushan     int ret;
183431f2cb8fSBharat Bhushan 
183531f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
183631f2cb8fSBharat Bhushan         return -1;
183731f2cb8fSBharat Bhushan     }
183831f2cb8fSBharat Bhushan 
183931f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
184031f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
184131f2cb8fSBharat Bhushan         return -1;
184231f2cb8fSBharat Bhushan     }
184331f2cb8fSBharat Bhushan 
184448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
184531f2cb8fSBharat Bhushan     if (ret < 0) {
184631f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
184731f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
184831f2cb8fSBharat Bhushan         return ret;
184931f2cb8fSBharat Bhushan     }
185031f2cb8fSBharat Bhushan 
185131f2cb8fSBharat Bhushan     return ret;
185231f2cb8fSBharat Bhushan }
185331f2cb8fSBharat Bhushan 
1854dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1855dc333cd6SAlexander Graf {
1856dc333cd6SAlexander Graf     FILE *f;
1857dc333cd6SAlexander Graf     int ret = -1;
1858dc333cd6SAlexander Graf     int field_len = strlen(field);
1859dc333cd6SAlexander Graf     char line[512];
1860dc333cd6SAlexander Graf 
1861dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1862dc333cd6SAlexander Graf     if (!f) {
1863dc333cd6SAlexander Graf         return -1;
1864dc333cd6SAlexander Graf     }
1865dc333cd6SAlexander Graf 
1866dc333cd6SAlexander Graf     do {
1867dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1868dc333cd6SAlexander Graf             break;
1869dc333cd6SAlexander Graf         }
1870dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1871ae215068SJim Meyering             pstrcpy(value, len, line);
1872dc333cd6SAlexander Graf             ret = 0;
1873dc333cd6SAlexander Graf             break;
1874dc333cd6SAlexander Graf         }
1875dc333cd6SAlexander Graf     } while(*line);
1876dc333cd6SAlexander Graf 
1877dc333cd6SAlexander Graf     fclose(f);
1878dc333cd6SAlexander Graf 
1879dc333cd6SAlexander Graf     return ret;
1880dc333cd6SAlexander Graf }
1881dc333cd6SAlexander Graf 
1882dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1883dc333cd6SAlexander Graf {
1884dc333cd6SAlexander Graf     char line[512];
1885dc333cd6SAlexander Graf     char *ns;
188673bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1887dc333cd6SAlexander Graf 
1888dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1889dc333cd6SAlexander Graf         return retval;
1890dc333cd6SAlexander Graf     }
1891dc333cd6SAlexander Graf 
1892dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1893dc333cd6SAlexander Graf         return retval;
1894dc333cd6SAlexander Graf     }
1895dc333cd6SAlexander Graf 
1896dc333cd6SAlexander Graf     ns++;
1897dc333cd6SAlexander Graf 
1898f9b8e7f6SShraddha Barke     return atoi(ns);
1899ef951443SNikunj A Dadhania }
1900ef951443SNikunj A Dadhania 
1901ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1902ef951443SNikunj A Dadhania {
1903ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1904ef951443SNikunj A Dadhania                                NULL);
1905ef951443SNikunj A Dadhania }
1906ef951443SNikunj A Dadhania 
1907ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1908ef951443SNikunj A Dadhania {
1909ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1910dc333cd6SAlexander Graf }
19114513d923SGleb Natapov 
1912eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1913eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1914eadaada1SAlexander Graf {
1915eadaada1SAlexander Graf     struct dirent *dirp;
1916eadaada1SAlexander Graf     DIR *dp;
1917eadaada1SAlexander Graf 
1918eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1919eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1920eadaada1SAlexander Graf         return -1;
1921eadaada1SAlexander Graf     }
1922eadaada1SAlexander Graf 
1923eadaada1SAlexander Graf     buf[0] = '\0';
1924eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1925eadaada1SAlexander Graf         FILE *f;
1926eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1927eadaada1SAlexander Graf                  dirp->d_name);
1928eadaada1SAlexander Graf         f = fopen(buf, "r");
1929eadaada1SAlexander Graf         if (f) {
1930eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1931eadaada1SAlexander Graf             fclose(f);
1932eadaada1SAlexander Graf             break;
1933eadaada1SAlexander Graf         }
1934eadaada1SAlexander Graf         buf[0] = '\0';
1935eadaada1SAlexander Graf     }
1936eadaada1SAlexander Graf     closedir(dp);
1937eadaada1SAlexander Graf     if (buf[0] == '\0') {
1938eadaada1SAlexander Graf         printf("Unknown host!\n");
1939eadaada1SAlexander Graf         return -1;
1940eadaada1SAlexander Graf     }
1941eadaada1SAlexander Graf 
1942eadaada1SAlexander Graf     return 0;
1943eadaada1SAlexander Graf }
1944eadaada1SAlexander Graf 
19457d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1946eadaada1SAlexander Graf {
19479bc884b7SDavid Gibson     union {
19489bc884b7SDavid Gibson         uint32_t v32;
19499bc884b7SDavid Gibson         uint64_t v64;
19509bc884b7SDavid Gibson     } u;
1951eadaada1SAlexander Graf     FILE *f;
1952eadaada1SAlexander Graf     int len;
1953eadaada1SAlexander Graf 
19547d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1955eadaada1SAlexander Graf     if (!f) {
1956eadaada1SAlexander Graf         return -1;
1957eadaada1SAlexander Graf     }
1958eadaada1SAlexander Graf 
19599bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1960eadaada1SAlexander Graf     fclose(f);
1961eadaada1SAlexander Graf     switch (len) {
19629bc884b7SDavid Gibson     case 4:
19639bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19649bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19659bc884b7SDavid Gibson     case 8:
19669bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1967eadaada1SAlexander Graf     }
1968eadaada1SAlexander Graf 
1969eadaada1SAlexander Graf     return 0;
1970eadaada1SAlexander Graf }
1971eadaada1SAlexander Graf 
19727d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19737d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19747d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19757d94a30bSSukadev Bhattiprolu  * format) */
19767d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19777d94a30bSSukadev Bhattiprolu {
19787d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19797d94a30bSSukadev Bhattiprolu     uint64_t val;
19807d94a30bSSukadev Bhattiprolu 
19817d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19827d94a30bSSukadev Bhattiprolu         return -1;
19837d94a30bSSukadev Bhattiprolu     }
19847d94a30bSSukadev Bhattiprolu 
19857d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19867d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19877d94a30bSSukadev Bhattiprolu     g_free(tmp);
19887d94a30bSSukadev Bhattiprolu 
19897d94a30bSSukadev Bhattiprolu     return val;
19907d94a30bSSukadev Bhattiprolu }
19917d94a30bSSukadev Bhattiprolu 
19929bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19939bc884b7SDavid Gibson {
19949bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19959bc884b7SDavid Gibson }
19969bc884b7SDavid Gibson 
19976659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
19986659394fSDavid Gibson {
19996659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
20006659394fSDavid Gibson }
20016659394fSDavid Gibson 
20026659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
20036659394fSDavid Gibson {
20046659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
20056659394fSDavid Gibson }
20066659394fSDavid Gibson 
20071a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
200845024f09SAlexander Graf  {
2009a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2010a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
201145024f09SAlexander Graf 
20126fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20131a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20141a61a9aeSStuart Yoder         return 0;
20151a61a9aeSStuart Yoder     }
201645024f09SAlexander Graf 
20171a61a9aeSStuart Yoder     return 1;
20181a61a9aeSStuart Yoder }
20191a61a9aeSStuart Yoder 
20201a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20211a61a9aeSStuart Yoder {
20221a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20231a61a9aeSStuart Yoder 
20241a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20251a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20261a61a9aeSStuart Yoder         return 1;
20271a61a9aeSStuart Yoder     }
20281a61a9aeSStuart Yoder 
20291a61a9aeSStuart Yoder     return 0;
20301a61a9aeSStuart Yoder }
20311a61a9aeSStuart Yoder 
20321a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20331a61a9aeSStuart Yoder {
20341a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20351a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20361a61a9aeSStuart Yoder 
20371a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20381a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
203945024f09SAlexander Graf         return 0;
204045024f09SAlexander Graf     }
204145024f09SAlexander Graf 
204245024f09SAlexander Graf     /*
2043d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
204445024f09SAlexander Graf      *
2045d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
204645024f09SAlexander Graf      *     li r3, -1
2047d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2048d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
204945024f09SAlexander Graf      */
205045024f09SAlexander Graf 
2051d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2052d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2053d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2054d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
205545024f09SAlexander Graf 
20560ddbd053SAlexey Kardashevskiy     return 1;
205745024f09SAlexander Graf }
205845024f09SAlexander Graf 
2059026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2060026bfd89SDavid Gibson {
2061026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2062026bfd89SDavid Gibson }
2063026bfd89SDavid Gibson 
2064026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2065026bfd89SDavid Gibson {
2066026bfd89SDavid Gibson     /*
2067026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2068026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2069026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2070026bfd89SDavid Gibson      * produce a warning.
2071026bfd89SDavid Gibson      */
2072026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2073026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2074026bfd89SDavid Gibson }
2075026bfd89SDavid Gibson 
2076ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2077ef9971ddSAlexey Kardashevskiy {
2078ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2079ef9971ddSAlexey Kardashevskiy }
2080ef9971ddSAlexey Kardashevskiy 
20815145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20825145ad4fSNathan Whitehorn {
20835145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20845145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20855145ad4fSNathan Whitehorn }
20865145ad4fSNathan Whitehorn 
20871bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2088f61b4bedSAlexander Graf {
20891bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2090f61b4bedSAlexander Graf     int ret;
2091f61b4bedSAlexander Graf 
209248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2093f61b4bedSAlexander Graf     if (ret) {
2094072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2095072ed5f2SThomas Huth         exit(1);
2096f61b4bedSAlexander Graf     }
20979b00ea49SDavid Gibson 
20989b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
20999b00ea49SDavid Gibson      * with kvm */
21009b00ea49SDavid Gibson     cap_papr = 1;
2101f1af19d7SDavid Gibson }
2102f61b4bedSAlexander Graf 
2103d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21046db5bb0fSAlexey Kardashevskiy {
2105d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21066db5bb0fSAlexey Kardashevskiy }
21076db5bb0fSAlexey Kardashevskiy 
21085b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21095b95b8b9SAlexander Graf {
21105b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21115b95b8b9SAlexander Graf     int ret;
21125b95b8b9SAlexander Graf 
211348add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21145b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2115072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2116072ed5f2SThomas Huth         exit(1);
21175b95b8b9SAlexander Graf     }
21185b95b8b9SAlexander Graf }
21195b95b8b9SAlexander Graf 
2120e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2121e97c3636SDavid Gibson {
2122e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2123e97c3636SDavid Gibson }
2124e97c3636SDavid Gibson 
21257f763a5dSDavid Gibson #ifdef TARGET_PPC64
2126658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2127354ac20aSDavid Gibson {
2128354ac20aSDavid Gibson     off_t size;
2129354ac20aSDavid Gibson     int fd;
2130354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2131354ac20aSDavid Gibson 
2132354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2133354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2134354ac20aSDavid Gibson      *                      not necessary on this hardware
2135354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2136354ac20aSDavid Gibson      *
2137354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2138354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2139354ac20aSDavid Gibson      */
2140354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2141354ac20aSDavid Gibson         return 0;
2142354ac20aSDavid Gibson     }
2143354ac20aSDavid Gibson 
2144354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2145354ac20aSDavid Gibson     if (fd < 0) {
2146354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2147354ac20aSDavid Gibson                 strerror(errno));
2148354ac20aSDavid Gibson         return -1;
2149354ac20aSDavid Gibson     }
2150354ac20aSDavid Gibson 
2151354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2152354ac20aSDavid Gibson 
2153658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2154658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2155354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2156354ac20aSDavid Gibson         return -1;
2157354ac20aSDavid Gibson     };
2158354ac20aSDavid Gibson 
2159354ac20aSDavid Gibson     return size;
2160354ac20aSDavid Gibson }
2161354ac20aSDavid Gibson 
21627f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21637f763a5dSDavid Gibson {
2164f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2165f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2166f36951c1SDavid Gibson     int i;
2167f36951c1SDavid Gibson 
21687f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
21697f763a5dSDavid Gibson         return current_size;
21707f763a5dSDavid Gibson     }
2171f36951c1SDavid Gibson 
2172f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2173f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2174182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
21759c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2176f36951c1SDavid Gibson     best_page_shift = 0;
2177f36951c1SDavid Gibson 
2178f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2179f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2180f36951c1SDavid Gibson 
2181f36951c1SDavid Gibson         if (!sps->page_shift) {
2182f36951c1SDavid Gibson             continue;
2183f36951c1SDavid Gibson         }
2184f36951c1SDavid Gibson 
2185f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2186f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2187f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2188f36951c1SDavid Gibson         }
2189f36951c1SDavid Gibson     }
2190f36951c1SDavid Gibson 
21917f763a5dSDavid Gibson     return MIN(current_size,
2192f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21937f763a5dSDavid Gibson }
21947f763a5dSDavid Gibson #endif
21957f763a5dSDavid Gibson 
2196da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2197da95324eSAlexey Kardashevskiy {
2198da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2199da95324eSAlexey Kardashevskiy }
2200da95324eSAlexey Kardashevskiy 
2201d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2202d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2203d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22040f5cb298SDavid Gibson {
22050f5cb298SDavid Gibson     long len;
22060f5cb298SDavid Gibson     int fd;
22070f5cb298SDavid Gibson     void *table;
22080f5cb298SDavid Gibson 
2209b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2210b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2211b5aec396SDavid Gibson      */
2212b5aec396SDavid Gibson     *pfd = -1;
22136a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22140f5cb298SDavid Gibson         return NULL;
22150f5cb298SDavid Gibson     }
22160f5cb298SDavid Gibson 
2217d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2218d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2219d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2220d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2221d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2222d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2223d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2224d6ee2a7cSAlexey Kardashevskiy         };
2225d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2226d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2227d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2228d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2229d6ee2a7cSAlexey Kardashevskiy                     liobn);
2230d6ee2a7cSAlexey Kardashevskiy             return NULL;
2231d6ee2a7cSAlexey Kardashevskiy         }
2232d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2233d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2234d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2235d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2236d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2237d6ee2a7cSAlexey Kardashevskiy         };
2238d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2239d6ee2a7cSAlexey Kardashevskiy             return NULL;
2240d6ee2a7cSAlexey Kardashevskiy         }
22410f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22420f5cb298SDavid Gibson         if (fd < 0) {
2243b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2244b5aec396SDavid Gibson                     liobn);
22450f5cb298SDavid Gibson             return NULL;
22460f5cb298SDavid Gibson         }
2247d6ee2a7cSAlexey Kardashevskiy     } else {
2248d6ee2a7cSAlexey Kardashevskiy         return NULL;
2249d6ee2a7cSAlexey Kardashevskiy     }
22500f5cb298SDavid Gibson 
2251d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22520f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22530f5cb298SDavid Gibson 
225474b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22550f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2256b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2257b5aec396SDavid Gibson                 liobn);
22580f5cb298SDavid Gibson         close(fd);
22590f5cb298SDavid Gibson         return NULL;
22600f5cb298SDavid Gibson     }
22610f5cb298SDavid Gibson 
22620f5cb298SDavid Gibson     *pfd = fd;
22630f5cb298SDavid Gibson     return table;
22640f5cb298SDavid Gibson }
22650f5cb298SDavid Gibson 
2266523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22670f5cb298SDavid Gibson {
22680f5cb298SDavid Gibson     long len;
22690f5cb298SDavid Gibson 
22700f5cb298SDavid Gibson     if (fd < 0) {
22710f5cb298SDavid Gibson         return -1;
22720f5cb298SDavid Gibson     }
22730f5cb298SDavid Gibson 
2274523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22750f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22760f5cb298SDavid Gibson         (close(fd) < 0)) {
2277b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2278b5aec396SDavid Gibson                 strerror(errno));
22790f5cb298SDavid Gibson         /* Leak the table */
22800f5cb298SDavid Gibson     }
22810f5cb298SDavid Gibson 
22820f5cb298SDavid Gibson     return 0;
22830f5cb298SDavid Gibson }
22840f5cb298SDavid Gibson 
22857f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22867f763a5dSDavid Gibson {
22877f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22887f763a5dSDavid Gibson 
2289ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2290ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2291ace9a2cbSDavid Gibson         return 0;
2292ace9a2cbSDavid Gibson     }
2293ace9a2cbSDavid Gibson     if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22947f763a5dSDavid Gibson         int ret;
22957f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2296ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2297ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2298ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2299ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2300ace9a2cbSDavid Gibson              * correct for PR. */
2301ace9a2cbSDavid Gibson             return 0;
2302ace9a2cbSDavid Gibson         } else if (ret < 0) {
23037f763a5dSDavid Gibson             return ret;
23047f763a5dSDavid Gibson         }
23057f763a5dSDavid Gibson         return shift;
23067f763a5dSDavid Gibson     }
23077f763a5dSDavid Gibson 
2308ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2309ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
231096c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
231196c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2312ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23137f763a5dSDavid Gibson         return 0;
2314ace9a2cbSDavid Gibson     } else {
2315ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2316ace9a2cbSDavid Gibson         return 24;
2317ace9a2cbSDavid Gibson     }
23187f763a5dSDavid Gibson }
23197f763a5dSDavid Gibson 
2320a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2321a1e98583SDavid Gibson {
2322a1e98583SDavid Gibson     uint32_t pvr;
2323a1e98583SDavid Gibson 
2324a1e98583SDavid Gibson     asm ("mfpvr %0"
2325a1e98583SDavid Gibson          : "=r"(pvr));
2326a1e98583SDavid Gibson     return pvr;
2327a1e98583SDavid Gibson }
2328a1e98583SDavid Gibson 
2329a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2330a7342588SDavid Gibson {
2331a7342588SDavid Gibson     if (on) {
2332a7342588SDavid Gibson         *word |= flags;
2333a7342588SDavid Gibson     } else {
2334a7342588SDavid Gibson         *word &= ~flags;
2335a7342588SDavid Gibson     }
2336a7342588SDavid Gibson }
2337a7342588SDavid Gibson 
23382985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23392985b86bSAndreas Färber {
23402985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2341a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2342a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
23430cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23440cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2345a1e98583SDavid Gibson 
2346cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23473bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2348a7342588SDavid Gibson 
234970bca53fSAlexander Graf     if (vmx != -1) {
235070bca53fSAlexander Graf         /* Only override when we know what the host supports */
2351cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2352cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
235370bca53fSAlexander Graf     }
235470bca53fSAlexander Graf     if (dfp != -1) {
235570bca53fSAlexander Graf         /* Only override when we know what the host supports */
2356cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
235770bca53fSAlexander Graf     }
23580cbad81fSDavid Gibson 
23590cbad81fSDavid Gibson     if (dcache_size != -1) {
23600cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23610cbad81fSDavid Gibson     }
23620cbad81fSDavid Gibson 
23630cbad81fSDavid Gibson     if (icache_size != -1) {
23640cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23650cbad81fSDavid Gibson     }
2366c64abd1fSSam Bobroff 
2367c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2368c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
2369c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2370a1e98583SDavid Gibson }
2371a1e98583SDavid Gibson 
23723b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23733b961124SStuart Yoder {
23743b961124SStuart Yoder     return cap_epr;
23753b961124SStuart Yoder }
23763b961124SStuart Yoder 
23777c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void)
23787c43bca0SAneesh Kumar K.V {
23797c43bca0SAneesh Kumar K.V     return cap_htab_fd;
23807c43bca0SAneesh Kumar K.V }
23817c43bca0SAneesh Kumar K.V 
238287a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
238387a91de6SAlexander Graf {
238487a91de6SAlexander Graf     return cap_fixup_hcalls;
238587a91de6SAlexander Graf }
238687a91de6SAlexander Graf 
2387bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2388bac3bf28SThomas Huth {
2389bac3bf28SThomas Huth     return cap_htm;
2390bac3bf28SThomas Huth }
2391bac3bf28SThomas Huth 
2392cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2393cf1c4cceSSam Bobroff {
2394cf1c4cceSSam Bobroff     return cap_mmu_radix;
2395cf1c4cceSSam Bobroff }
2396cf1c4cceSSam Bobroff 
2397cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2398cf1c4cceSSam Bobroff {
2399cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2400cf1c4cceSSam Bobroff }
2401cf1c4cceSSam Bobroff 
24025b79b1caSAlexey Kardashevskiy static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
24035b79b1caSAlexey Kardashevskiy {
24045b79b1caSAlexey Kardashevskiy     ObjectClass *oc = OBJECT_CLASS(pcc);
24055b79b1caSAlexey Kardashevskiy 
24065b79b1caSAlexey Kardashevskiy     while (oc && !object_class_is_abstract(oc)) {
24075b79b1caSAlexey Kardashevskiy         oc = object_class_get_parent(oc);
24085b79b1caSAlexey Kardashevskiy     }
24095b79b1caSAlexey Kardashevskiy     assert(oc);
24105b79b1caSAlexey Kardashevskiy 
24115b79b1caSAlexey Kardashevskiy     return POWERPC_CPU_CLASS(oc);
24125b79b1caSAlexey Kardashevskiy }
24135b79b1caSAlexey Kardashevskiy 
241452b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
241552b2519cSThomas Huth {
241652b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
241752b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
241852b2519cSThomas Huth 
241952b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
242052b2519cSThomas Huth     if (pvr_pcc == NULL) {
242152b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
242252b2519cSThomas Huth     }
242352b2519cSThomas Huth 
242452b2519cSThomas Huth     return pvr_pcc;
242552b2519cSThomas Huth }
242652b2519cSThomas Huth 
24275ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
24285ba4576bSAndreas Färber {
24295ba4576bSAndreas Färber     TypeInfo type_info = {
24305ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24315ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24325ba4576bSAndreas Färber     };
24335ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
24345b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2435715d4b96SThomas Huth     int i;
24365ba4576bSAndreas Färber 
243752b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
24383bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
24395ba4576bSAndreas Färber         return -1;
24405ba4576bSAndreas Färber     }
24415ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24425ba4576bSAndreas Färber     type_register(&type_info);
24435b79b1caSAlexey Kardashevskiy 
24443b542549SBharata B Rao #if defined(TARGET_PPC64)
24453b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
24463b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
24477ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
24487ebaf795SBharata B Rao     type_info.instance_init = NULL;
24497ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
24507ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
24513b542549SBharata B Rao     type_register(&type_info);
24523b542549SBharata B Rao     g_free((void *)type_info.name);
24533b542549SBharata B Rao #endif
24543b542549SBharata B Rao 
2455715d4b96SThomas Huth     /*
2456715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2457715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2458715d4b96SThomas Huth      * host CPU type, too)
2459715d4b96SThomas Huth      */
2460715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2461715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2462715d4b96SThomas Huth         if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2463715d4b96SThomas Huth             ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
2464715d4b96SThomas Huth             char *suffix;
2465715d4b96SThomas Huth 
2466715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2467715d4b96SThomas Huth             suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU);
2468715d4b96SThomas Huth             if (suffix) {
2469715d4b96SThomas Huth                 *suffix = 0;
2470715d4b96SThomas Huth             }
2471715d4b96SThomas Huth             ppc_cpu_aliases[i].oc = oc;
2472715d4b96SThomas Huth             break;
2473715d4b96SThomas Huth         }
2474715d4b96SThomas Huth     }
2475715d4b96SThomas Huth 
24765ba4576bSAndreas Färber     return 0;
24775ba4576bSAndreas Färber }
24785ba4576bSAndreas Färber 
2479feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2480feaa64c4SDavid Gibson {
2481feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2482feaa64c4SDavid Gibson         .token = token,
2483feaa64c4SDavid Gibson     };
2484feaa64c4SDavid Gibson 
2485feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2486feaa64c4SDavid Gibson         return -ENOENT;
2487feaa64c4SDavid Gibson     }
2488feaa64c4SDavid Gibson 
2489feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2490feaa64c4SDavid Gibson 
2491feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2492feaa64c4SDavid Gibson }
249312b1143bSDavid Gibson 
2494e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write)
2495e68cb8b4SAlexey Kardashevskiy {
2496e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2497e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
2498e68cb8b4SAlexey Kardashevskiy         .start_index = 0,
2499e68cb8b4SAlexey Kardashevskiy     };
2500e68cb8b4SAlexey Kardashevskiy 
2501e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
2502e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2503e68cb8b4SAlexey Kardashevskiy         return -1;
2504e68cb8b4SAlexey Kardashevskiy     }
2505e68cb8b4SAlexey Kardashevskiy 
2506e68cb8b4SAlexey Kardashevskiy     return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2507e68cb8b4SAlexey Kardashevskiy }
2508e68cb8b4SAlexey Kardashevskiy 
2509e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2510e68cb8b4SAlexey Kardashevskiy {
2511bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2512e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2513e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2514e68cb8b4SAlexey Kardashevskiy 
2515e68cb8b4SAlexey Kardashevskiy     do {
2516e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2517e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2518e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2519e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2520e68cb8b4SAlexey Kardashevskiy             return rc;
2521e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2522e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2523e094c4c1SCédric Le Goater             ssize_t n = rc;
2524e094c4c1SCédric Le Goater             while (n) {
2525e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2526e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2527e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2528e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2529e094c4c1SCédric Le Goater 
2530e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2531e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2532e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2533e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2534e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2535e094c4c1SCédric Le Goater 
2536e094c4c1SCédric Le Goater                 buffer += chunksize;
2537e094c4c1SCédric Le Goater                 n -= chunksize;
2538e094c4c1SCédric Le Goater             }
2539e68cb8b4SAlexey Kardashevskiy         }
2540e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2541e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2542bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2543e68cb8b4SAlexey Kardashevskiy 
2544e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2545e68cb8b4SAlexey Kardashevskiy }
2546e68cb8b4SAlexey Kardashevskiy 
2547e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2548e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2549e68cb8b4SAlexey Kardashevskiy {
2550e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2551e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2552e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2553e68cb8b4SAlexey Kardashevskiy 
2554e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2555e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2556e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2557e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2558e68cb8b4SAlexey Kardashevskiy 
2559e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2560e68cb8b4SAlexey Kardashevskiy 
2561e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2562e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2563e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2564e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2565e68cb8b4SAlexey Kardashevskiy         return rc;
2566e68cb8b4SAlexey Kardashevskiy     }
2567e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2568e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2569e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2570e68cb8b4SAlexey Kardashevskiy         return -1;
2571e68cb8b4SAlexey Kardashevskiy     }
2572e68cb8b4SAlexey Kardashevskiy     return 0;
2573e68cb8b4SAlexey Kardashevskiy }
2574e68cb8b4SAlexey Kardashevskiy 
257520d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
25764513d923SGleb Natapov {
25774513d923SGleb Natapov     return true;
25784513d923SGleb Natapov }
2579a1b87fe0SJan Kiszka 
258082169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
258182169660SScott Wood {
258282169660SScott Wood }
2583c65f9a07SGreg Kurz 
25841ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
25851ad9f0a4SDavid Gibson {
25861ad9f0a4SDavid Gibson     struct kvm_get_htab_fd ghf = {
25871ad9f0a4SDavid Gibson         .flags = 0,
25881ad9f0a4SDavid Gibson         .start_index = ptex,
25897c43bca0SAneesh Kumar K.V     };
25901ad9f0a4SDavid Gibson     int fd, rc;
25911ad9f0a4SDavid Gibson     int i;
25927c43bca0SAneesh Kumar K.V 
25931ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
25941ad9f0a4SDavid Gibson     if (fd < 0) {
25951ad9f0a4SDavid Gibson         hw_error("kvmppc_read_hptes: Unable to open HPT fd");
25961ad9f0a4SDavid Gibson     }
25971ad9f0a4SDavid Gibson 
25981ad9f0a4SDavid Gibson     i = 0;
25991ad9f0a4SDavid Gibson     while (i < n) {
26001ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26011ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26021ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26031ad9f0a4SDavid Gibson 
26041ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26051ad9f0a4SDavid Gibson         if (rc < 0) {
26061ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26071ad9f0a4SDavid Gibson         }
26081ad9f0a4SDavid Gibson 
26091ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26101ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
26111ad9f0a4SDavid Gibson             int invalid = hdr->n_invalid;
26121ad9f0a4SDavid Gibson 
26131ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26141ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26151ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26161ad9f0a4SDavid Gibson             }
26171ad9f0a4SDavid Gibson 
26181ad9f0a4SDavid Gibson             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
26191ad9f0a4SDavid Gibson             i += hdr->n_valid;
26201ad9f0a4SDavid Gibson 
26211ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
26221ad9f0a4SDavid Gibson                 invalid = n - i;
26231ad9f0a4SDavid Gibson             }
26241ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
26251ad9f0a4SDavid Gibson             i += hdr->n_invalid;
26261ad9f0a4SDavid Gibson 
26271ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
26281ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
26291ad9f0a4SDavid Gibson         }
26301ad9f0a4SDavid Gibson     }
26311ad9f0a4SDavid Gibson 
26321ad9f0a4SDavid Gibson     close(fd);
26331ad9f0a4SDavid Gibson }
26341ad9f0a4SDavid Gibson 
26351ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
26367c43bca0SAneesh Kumar K.V {
26371ad9f0a4SDavid Gibson     int fd, rc;
26387c43bca0SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
26391ad9f0a4SDavid Gibson     struct {
26401ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
26411ad9f0a4SDavid Gibson         uint64_t pte0;
26421ad9f0a4SDavid Gibson         uint64_t pte1;
26431ad9f0a4SDavid Gibson     } buf;
2644c1385933SAneesh Kumar K.V 
2645c1385933SAneesh Kumar K.V     ghf.flags = 0;
2646c1385933SAneesh Kumar K.V     ghf.start_index = 0;     /* Ignored */
26471ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
26481ad9f0a4SDavid Gibson     if (fd < 0) {
26491ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to open HPT fd");
2650c1385933SAneesh Kumar K.V     }
2651c1385933SAneesh Kumar K.V 
26521ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
26531ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
26541ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
26551ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
26561ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
26571ad9f0a4SDavid Gibson 
26581ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
26591ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
26601ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2661c1385933SAneesh Kumar K.V     }
26621ad9f0a4SDavid Gibson     close(fd);
2663c1385933SAneesh Kumar K.V }
26649e03a040SFrank Blaschka 
26659e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2666dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26679e03a040SFrank Blaschka {
26689e03a040SFrank Blaschka     return 0;
26699e03a040SFrank Blaschka }
26701850b6b7SEric Auger 
267138d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
267238d87493SPeter Xu                                 int vector, PCIDevice *dev)
267338d87493SPeter Xu {
267438d87493SPeter Xu     return 0;
267538d87493SPeter Xu }
267638d87493SPeter Xu 
267738d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
267838d87493SPeter Xu {
267938d87493SPeter Xu     return 0;
268038d87493SPeter Xu }
268138d87493SPeter Xu 
26821850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
26831850b6b7SEric Auger {
26841850b6b7SEric Auger     return data & 0xffff;
26851850b6b7SEric Auger }
26864d9392beSThomas Huth 
26874d9392beSThomas Huth int kvmppc_enable_hwrng(void)
26884d9392beSThomas Huth {
26894d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
26904d9392beSThomas Huth         return -1;
26914d9392beSThomas Huth     }
26924d9392beSThomas Huth 
26934d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
26944d9392beSThomas Huth }
2695