xref: /qemu/target/ppc/kvm.c (revision 9ded780c4cc92d15a977dba589d64862e25a340e)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
469c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
472d103aaeSMichael Roth #include "sysemu/hostmem.h"
48f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
499c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
50f3d9f303SSam Bobroff #include "elf.h"
51c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
52f61b4bedSAlexander Graf 
53d76d1650Saurel32 //#define DEBUG_KVM
54d76d1650Saurel32 
55d76d1650Saurel32 #ifdef DEBUG_KVM
56da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
57d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
58d76d1650Saurel32 #else
59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
60d76d1650Saurel32     do { } while (0)
61d76d1650Saurel32 #endif
62d76d1650Saurel32 
63eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
64eadaada1SAlexander Graf 
6594a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6694a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6794a8d39aSJan Kiszka };
6894a8d39aSJan Kiszka 
69fc87e185SAlexander Graf static int cap_interrupt_unset = false;
70fc87e185SAlexander Graf static int cap_interrupt_level = false;
7190dc8812SScott Wood static int cap_segstate;
7290dc8812SScott Wood static int cap_booke_sregs;
73e97c3636SDavid Gibson static int cap_ppc_smt;
74fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
75354ac20aSDavid Gibson static int cap_ppc_rma;
760f5cb298SDavid Gibson static int cap_spapr_tce;
77d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
78da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
799bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
80f1af19d7SDavid Gibson static int cap_hior;
81d67d40eaSDavid Gibson static int cap_one_reg;
823b961124SStuart Yoder static int cap_epr;
8331f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
849b00ea49SDavid Gibson static int cap_papr;
85e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8687a91de6SAlexander Graf static int cap_fixup_hcalls;
87bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
88cf1c4cceSSam Bobroff static int cap_mmu_radix;
89cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
90b55d295eSDavid Gibson static int cap_resize_hpt;
91c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
928acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
938acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
948acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
95fc87e185SAlexander Graf 
963c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
973c902d44SBharat Bhushan 
98c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
99c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
100c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
101c821c2bdSAlexander Graf  *     still an interrupt pending.
102c6a94ba5SAlexander Graf  *
103c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
104c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
105c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
106c6a94ba5SAlexander Graf  */
107c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
108c6a94ba5SAlexander Graf 
109d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
110c6a94ba5SAlexander Graf {
111d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
112d5a68146SAndreas Färber 
113c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
114c6a94ba5SAlexander Graf }
115c6a94ba5SAlexander Graf 
11696c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11796c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11896c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11996c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
12096c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12196c9cff0SThomas Huth {
12296c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12370a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12496c9cff0SThomas Huth }
12596c9cff0SThomas Huth 
1262e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms);
1278acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1285ba4576bSAndreas Färber 
129b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
130d76d1650Saurel32 {
131fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
132fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13390dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13490dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1356977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
136354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1370f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
138d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
139da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
140*9ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
141d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
142f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1433b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14431f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1459b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1469b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1476977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14887a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
149fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
150bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
151cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
152cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
153b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1548acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
155c363a37aSDaniel Henrique Barboza     /*
156c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
157c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
158c363a37aSDaniel Henrique Barboza      *
159c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
160c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
161c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
162fc87e185SAlexander Graf 
163fc87e185SAlexander Graf     if (!cap_interrupt_level) {
164fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
165fc87e185SAlexander Graf                         "VM to stall at times!\n");
166fc87e185SAlexander Graf     }
167fc87e185SAlexander Graf 
1682e9c10ebSIgor Mammedov     kvm_ppc_register_host_cpu_type(ms);
1695ba4576bSAndreas Färber 
170d76d1650Saurel32     return 0;
171d76d1650Saurel32 }
172d76d1650Saurel32 
173d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
174d525ffabSPaolo Bonzini {
175d525ffabSPaolo Bonzini     return 0;
176d525ffabSPaolo Bonzini }
177d525ffabSPaolo Bonzini 
1781bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
179d76d1650Saurel32 {
1801bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1811bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
182861bbc80SAlexander Graf     struct kvm_sregs sregs;
1835666ca4aSScott Wood     int ret;
1845666ca4aSScott Wood 
1855666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18664e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18764e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
18864e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
18964e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1905666ca4aSScott Wood         return 0;
1915666ca4aSScott Wood     } else {
19290dc8812SScott Wood         if (!cap_segstate) {
19364e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19464e07be5SAlexander Graf             return -ENOSYS;
1955666ca4aSScott Wood         }
1965666ca4aSScott Wood     }
1975666ca4aSScott Wood 
1981bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1995666ca4aSScott Wood     if (ret) {
2005666ca4aSScott Wood         return ret;
2015666ca4aSScott Wood     }
202861bbc80SAlexander Graf 
203861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2041bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2055666ca4aSScott Wood }
2065666ca4aSScott Wood 
20793dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2081bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
20993dd5e85SScott Wood {
2101bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2111bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21293dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21393dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21493dd5e85SScott Wood     unsigned int entries = 0;
21593dd5e85SScott Wood     int ret, i;
21693dd5e85SScott Wood 
21793dd5e85SScott Wood     if (!kvm_enabled() ||
218a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
21993dd5e85SScott Wood         return 0;
22093dd5e85SScott Wood     }
22193dd5e85SScott Wood 
22293dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22393dd5e85SScott Wood 
22493dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22593dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22693dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22793dd5e85SScott Wood         entries += params.tlb_sizes[i];
22893dd5e85SScott Wood     }
22993dd5e85SScott Wood 
23093dd5e85SScott Wood     assert(entries == env->nb_tlb);
23193dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23293dd5e85SScott Wood 
23393dd5e85SScott Wood     env->tlb_dirty = true;
23493dd5e85SScott Wood 
23593dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23693dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23793dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
23893dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
23993dd5e85SScott Wood 
24048add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
24193dd5e85SScott Wood     if (ret < 0) {
24293dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24393dd5e85SScott Wood                 __func__, strerror(-ret));
24493dd5e85SScott Wood         return ret;
24593dd5e85SScott Wood     }
24693dd5e85SScott Wood 
24793dd5e85SScott Wood     env->kvm_sw_tlb = true;
24893dd5e85SScott Wood     return 0;
24993dd5e85SScott Wood }
25093dd5e85SScott Wood 
2514656e1f0SBenjamin Herrenschmidt 
2524656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
253a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2544656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2554656e1f0SBenjamin Herrenschmidt {
256a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
257a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
258a60f24b5SAndreas Färber 
2594656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2604656e1f0SBenjamin Herrenschmidt 
2614656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2624656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2634656e1f0SBenjamin Herrenschmidt      *
2644656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2654656e1f0SBenjamin Herrenschmidt      *
26696c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
26796c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
26896c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2694656e1f0SBenjamin Herrenschmidt      *
2704656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2714656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2724656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2734656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2744656e1f0SBenjamin Herrenschmidt      *
2754656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2764656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2774656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2784656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2794656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2804656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2814656e1f0SBenjamin Herrenschmidt      *   this fallback.
2824656e1f0SBenjamin Herrenschmidt      */
28396c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2844656e1f0SBenjamin Herrenschmidt         /* No flags */
2854656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2864656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2874656e1f0SBenjamin Herrenschmidt 
2884656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2894656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2904656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2914656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2924656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2934656e1f0SBenjamin Herrenschmidt 
2944656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2954656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2964656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2974656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2984656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2994656e1f0SBenjamin Herrenschmidt     } else {
3004656e1f0SBenjamin Herrenschmidt         int i = 0;
3014656e1f0SBenjamin Herrenschmidt 
3024656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
3034656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
3044656e1f0SBenjamin Herrenschmidt 
3054656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
3064656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
3074656e1f0SBenjamin Herrenschmidt         }
3084656e1f0SBenjamin Herrenschmidt 
309ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
310ec975e83SSam Bobroff            POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3114656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
3124656e1f0SBenjamin Herrenschmidt         } else {
3134656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
3144656e1f0SBenjamin Herrenschmidt         }
3154656e1f0SBenjamin Herrenschmidt 
3164656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
3174656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
3184656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3194656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3204656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3214656e1f0SBenjamin Herrenschmidt         i++;
3224656e1f0SBenjamin Herrenschmidt 
323aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
324ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
325ec975e83SSam Bobroff             POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3264656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3274656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3284656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3294656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3304656e1f0SBenjamin Herrenschmidt             i++;
3314656e1f0SBenjamin Herrenschmidt         }
3324656e1f0SBenjamin Herrenschmidt 
3334656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3344656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3354656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3364656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3374656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3384656e1f0SBenjamin Herrenschmidt     }
3394656e1f0SBenjamin Herrenschmidt }
3404656e1f0SBenjamin Herrenschmidt 
341a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3424656e1f0SBenjamin Herrenschmidt {
343a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3444656e1f0SBenjamin Herrenschmidt     int ret;
3454656e1f0SBenjamin Herrenschmidt 
346a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
347a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3484656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3494656e1f0SBenjamin Herrenschmidt             return;
3504656e1f0SBenjamin Herrenschmidt         }
3514656e1f0SBenjamin Herrenschmidt     }
3524656e1f0SBenjamin Herrenschmidt 
353a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3544656e1f0SBenjamin Herrenschmidt }
3554656e1f0SBenjamin Herrenschmidt 
356c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
357c64abd1fSSam Bobroff {
358c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
359c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
360c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
361c64abd1fSSam Bobroff     int i;
362c64abd1fSSam Bobroff 
363c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
364c64abd1fSSam Bobroff         return NULL;
365c64abd1fSSam Bobroff     }
366c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
367c64abd1fSSam Bobroff         return NULL;
368c64abd1fSSam Bobroff     }
369c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
370c64abd1fSSam Bobroff     radix_page_info->count = 0;
371c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
372c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
373c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
374c64abd1fSSam Bobroff             radix_page_info->count++;
375c64abd1fSSam Bobroff         }
376c64abd1fSSam Bobroff     }
377c64abd1fSSam Bobroff     return radix_page_info;
378c64abd1fSSam Bobroff }
379c64abd1fSSam Bobroff 
380b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
381b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
382b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
383b4db5413SSuraj Jitindar Singh {
384b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
385b4db5413SSuraj Jitindar Singh     int ret;
386b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
387b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
388b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
389b4db5413SSuraj Jitindar Singh     };
390b4db5413SSuraj Jitindar Singh 
391b4db5413SSuraj Jitindar Singh     if (radix) {
392b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
393b4db5413SSuraj Jitindar Singh     }
394b4db5413SSuraj Jitindar Singh     if (gtse) {
395b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
396b4db5413SSuraj Jitindar Singh     }
397b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
398b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
399b4db5413SSuraj Jitindar Singh     switch (ret) {
400b4db5413SSuraj Jitindar Singh     case 0:
401b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
402b4db5413SSuraj Jitindar Singh     case -EINVAL:
403b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
404b4db5413SSuraj Jitindar Singh     case -ENODEV:
405b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
406b4db5413SSuraj Jitindar Singh     default:
407b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
408b4db5413SSuraj Jitindar Singh     }
409b4db5413SSuraj Jitindar Singh }
410b4db5413SSuraj Jitindar Singh 
4114656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4124656e1f0SBenjamin Herrenschmidt {
4134656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4144656e1f0SBenjamin Herrenschmidt         return true;
4154656e1f0SBenjamin Herrenschmidt     }
4164656e1f0SBenjamin Herrenschmidt 
4174656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4184656e1f0SBenjamin Herrenschmidt }
4194656e1f0SBenjamin Herrenschmidt 
420df587133SThomas Huth static long max_cpu_page_size;
421df587133SThomas Huth 
422a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4234656e1f0SBenjamin Herrenschmidt {
4244656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4254656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
426a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4274656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4280d594f55SThomas Huth     bool has_64k_pages = false;
4294656e1f0SBenjamin Herrenschmidt 
4304656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4314656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4324656e1f0SBenjamin Herrenschmidt         return;
4334656e1f0SBenjamin Herrenschmidt     }
4344656e1f0SBenjamin Herrenschmidt 
4354656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4364656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
437a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4384656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4394656e1f0SBenjamin Herrenschmidt     }
4404656e1f0SBenjamin Herrenschmidt 
441df587133SThomas Huth     if (!max_cpu_page_size) {
4429c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
443df587133SThomas Huth     }
4444656e1f0SBenjamin Herrenschmidt 
4454656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4464656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4474656e1f0SBenjamin Herrenschmidt 
44890da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
44990da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
45090da0d5aSBenjamin Herrenschmidt      */
45190da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
45290da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
45390da0d5aSBenjamin Herrenschmidt     }
45490da0d5aSBenjamin Herrenschmidt 
45508215d8fSAlexander Graf     /*
45608215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
45708215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
45808215d8fSAlexander Graf      */
4594656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4604656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4614656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4624656e1f0SBenjamin Herrenschmidt 
463df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4644656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4654656e1f0SBenjamin Herrenschmidt             continue;
4664656e1f0SBenjamin Herrenschmidt         }
4674656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4684656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4694656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
470df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4714656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4724656e1f0SBenjamin Herrenschmidt                 continue;
4734656e1f0SBenjamin Herrenschmidt             }
4740d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4750d594f55SThomas Huth                 has_64k_pages = true;
4760d594f55SThomas Huth             }
4774656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4784656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4794656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4804656e1f0SBenjamin Herrenschmidt                 break;
4814656e1f0SBenjamin Herrenschmidt             }
4824656e1f0SBenjamin Herrenschmidt         }
4834656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4844656e1f0SBenjamin Herrenschmidt             break;
4854656e1f0SBenjamin Herrenschmidt         }
4864656e1f0SBenjamin Herrenschmidt     }
4874656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
48808215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
4894656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
4904656e1f0SBenjamin Herrenschmidt     }
4910d594f55SThomas Huth     if (!has_64k_pages) {
4920d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
4930d594f55SThomas Huth     }
4944656e1f0SBenjamin Herrenschmidt }
495df587133SThomas Huth 
496ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
497df587133SThomas Huth {
498df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
499df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
500df587133SThomas Huth     long pagesize;
501df587133SThomas Huth 
502df587133SThomas Huth     if (mempath) {
5039c607668SAlexey Kardashevskiy         pagesize = qemu_mempath_getpagesize(mempath);
5042d3e302eSGreg Kurz         g_free(mempath);
505df587133SThomas Huth     } else {
506df587133SThomas Huth         pagesize = getpagesize();
507df587133SThomas Huth     }
508df587133SThomas Huth 
509df587133SThomas Huth     return pagesize >= max_cpu_page_size;
510df587133SThomas Huth }
511df587133SThomas Huth 
5124656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5134656e1f0SBenjamin Herrenschmidt 
514a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5154656e1f0SBenjamin Herrenschmidt {
5164656e1f0SBenjamin Herrenschmidt }
5174656e1f0SBenjamin Herrenschmidt 
518ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
519df587133SThomas Huth {
520df587133SThomas Huth     return true;
521df587133SThomas Huth }
522df587133SThomas Huth 
5234656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5244656e1f0SBenjamin Herrenschmidt 
525b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
526b164e48eSEduardo Habkost {
5272e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
528b164e48eSEduardo Habkost }
529b164e48eSEduardo Habkost 
53088365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
53188365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
53288365d17SBharat Bhushan  * of 4 is sufficient for now.
53388365d17SBharat Bhushan  */
53488365d17SBharat Bhushan #define MAX_HW_BKPTS 4
53588365d17SBharat Bhushan 
53688365d17SBharat Bhushan static struct HWBreakpoint {
53788365d17SBharat Bhushan     target_ulong addr;
53888365d17SBharat Bhushan     int type;
53988365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
54088365d17SBharat Bhushan 
54188365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
54288365d17SBharat Bhushan 
54388365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
54488365d17SBharat Bhushan static int max_hw_breakpoint;
54588365d17SBharat Bhushan static int max_hw_watchpoint;
54688365d17SBharat Bhushan static int nb_hw_breakpoint;
54788365d17SBharat Bhushan static int nb_hw_watchpoint;
54888365d17SBharat Bhushan 
54988365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
55088365d17SBharat Bhushan {
55188365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
55288365d17SBharat Bhushan         max_hw_breakpoint = 2;
55388365d17SBharat Bhushan         max_hw_watchpoint = 2;
55488365d17SBharat Bhushan     }
55588365d17SBharat Bhushan 
55688365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
55788365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
55888365d17SBharat Bhushan         return;
55988365d17SBharat Bhushan     }
56088365d17SBharat Bhushan }
56188365d17SBharat Bhushan 
56220d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5635666ca4aSScott Wood {
56420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
56520d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5665666ca4aSScott Wood     int ret;
5675666ca4aSScott Wood 
5684656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
569a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5704656e1f0SBenjamin Herrenschmidt 
5714656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5721bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5735666ca4aSScott Wood     if (ret) {
574388e47c7SThomas Huth         if (ret == -EINVAL) {
575388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
576388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
577388e47c7SThomas Huth         }
5785666ca4aSScott Wood         return ret;
5795666ca4aSScott Wood     }
580861bbc80SAlexander Graf 
581bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
582c821c2bdSAlexander Graf 
58393dd5e85SScott Wood     switch (cenv->mmu_model) {
58493dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5857f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5861bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
58793dd5e85SScott Wood         break;
5887f516c96SThomas Huth     case POWERPC_MMU_2_07:
5897f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5907f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
591f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
592f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
593f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5947f516c96SThomas Huth                 cap_htm = true;
5957f516c96SThomas Huth             }
596f3d9f303SSam Bobroff         }
5977f516c96SThomas Huth         break;
59893dd5e85SScott Wood     default:
59993dd5e85SScott Wood         break;
60093dd5e85SScott Wood     }
60193dd5e85SScott Wood 
6023c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
60388365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
6043c902d44SBharat Bhushan 
605861bbc80SAlexander Graf     return ret;
606d76d1650Saurel32 }
607d76d1650Saurel32 
6081bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
60993dd5e85SScott Wood {
6101bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
6111bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
61293dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
61393dd5e85SScott Wood     unsigned char *bitmap;
61493dd5e85SScott Wood     int ret;
61593dd5e85SScott Wood 
61693dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
61793dd5e85SScott Wood         return;
61893dd5e85SScott Wood     }
61993dd5e85SScott Wood 
62093dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
62193dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
62293dd5e85SScott Wood 
62393dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
62493dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
62593dd5e85SScott Wood 
6261bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
62793dd5e85SScott Wood     if (ret) {
62893dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
62993dd5e85SScott Wood                 __func__, strerror(-ret));
63093dd5e85SScott Wood     }
63193dd5e85SScott Wood 
63293dd5e85SScott Wood     g_free(bitmap);
63393dd5e85SScott Wood }
63493dd5e85SScott Wood 
635d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
636d67d40eaSDavid Gibson {
637d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
638d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
639d67d40eaSDavid Gibson     union {
640d67d40eaSDavid Gibson         uint32_t u32;
641d67d40eaSDavid Gibson         uint64_t u64;
642d67d40eaSDavid Gibson     } val;
643d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
644d67d40eaSDavid Gibson         .id = id,
645d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
646d67d40eaSDavid Gibson     };
647d67d40eaSDavid Gibson     int ret;
648d67d40eaSDavid Gibson 
649d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
650d67d40eaSDavid Gibson     if (ret != 0) {
651b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
652d67d40eaSDavid Gibson     } else {
653d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
654d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
655d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
656d67d40eaSDavid Gibson             break;
657d67d40eaSDavid Gibson 
658d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
659d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
660d67d40eaSDavid Gibson             break;
661d67d40eaSDavid Gibson 
662d67d40eaSDavid Gibson         default:
663d67d40eaSDavid Gibson             /* Don't handle this size yet */
664d67d40eaSDavid Gibson             abort();
665d67d40eaSDavid Gibson         }
666d67d40eaSDavid Gibson     }
667d67d40eaSDavid Gibson }
668d67d40eaSDavid Gibson 
669d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
670d67d40eaSDavid Gibson {
671d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
672d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
673d67d40eaSDavid Gibson     union {
674d67d40eaSDavid Gibson         uint32_t u32;
675d67d40eaSDavid Gibson         uint64_t u64;
676d67d40eaSDavid Gibson     } val;
677d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
678d67d40eaSDavid Gibson         .id = id,
679d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
680d67d40eaSDavid Gibson     };
681d67d40eaSDavid Gibson     int ret;
682d67d40eaSDavid Gibson 
683d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
684d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
685d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
686d67d40eaSDavid Gibson         break;
687d67d40eaSDavid Gibson 
688d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
689d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
690d67d40eaSDavid Gibson         break;
691d67d40eaSDavid Gibson 
692d67d40eaSDavid Gibson     default:
693d67d40eaSDavid Gibson         /* Don't handle this size yet */
694d67d40eaSDavid Gibson         abort();
695d67d40eaSDavid Gibson     }
696d67d40eaSDavid Gibson 
697d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
698d67d40eaSDavid Gibson     if (ret != 0) {
699b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
700d67d40eaSDavid Gibson     }
701d67d40eaSDavid Gibson }
702d67d40eaSDavid Gibson 
70370b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
70470b79849SDavid Gibson {
70570b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
70670b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
70770b79849SDavid Gibson     struct kvm_one_reg reg;
70870b79849SDavid Gibson     int i;
70970b79849SDavid Gibson     int ret;
71070b79849SDavid Gibson 
71170b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
71270b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
71370b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
71470b79849SDavid Gibson 
71570b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
71670b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
71770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
71870b79849SDavid Gibson         if (ret < 0) {
719da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
72070b79849SDavid Gibson             return ret;
72170b79849SDavid Gibson         }
72270b79849SDavid Gibson 
72370b79849SDavid Gibson         for (i = 0; i < 32; i++) {
72470b79849SDavid Gibson             uint64_t vsr[2];
72570b79849SDavid Gibson 
7263a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
72770b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
72870b79849SDavid Gibson             vsr[1] = env->vsr[i];
7293a4b791bSGreg Kurz #else
7303a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7313a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7323a4b791bSGreg Kurz #endif
73370b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
73470b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
73570b79849SDavid Gibson 
73670b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73770b79849SDavid Gibson             if (ret < 0) {
738da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
73970b79849SDavid Gibson                         i, strerror(errno));
74070b79849SDavid Gibson                 return ret;
74170b79849SDavid Gibson             }
74270b79849SDavid Gibson         }
74370b79849SDavid Gibson     }
74470b79849SDavid Gibson 
74570b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
74670b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
74770b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
74870b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74970b79849SDavid Gibson         if (ret < 0) {
750da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
75170b79849SDavid Gibson             return ret;
75270b79849SDavid Gibson         }
75370b79849SDavid Gibson 
75470b79849SDavid Gibson         for (i = 0; i < 32; i++) {
75570b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
75670b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
75770b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
75870b79849SDavid Gibson             if (ret < 0) {
759da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
76070b79849SDavid Gibson                 return ret;
76170b79849SDavid Gibson             }
76270b79849SDavid Gibson         }
76370b79849SDavid Gibson     }
76470b79849SDavid Gibson 
76570b79849SDavid Gibson     return 0;
76670b79849SDavid Gibson }
76770b79849SDavid Gibson 
76870b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
76970b79849SDavid Gibson {
77070b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
77170b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
77270b79849SDavid Gibson     struct kvm_one_reg reg;
77370b79849SDavid Gibson     int i;
77470b79849SDavid Gibson     int ret;
77570b79849SDavid Gibson 
77670b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
77770b79849SDavid Gibson         uint64_t fpscr;
77870b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
77970b79849SDavid Gibson 
78070b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
78170b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
78270b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
78370b79849SDavid Gibson         if (ret < 0) {
784da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
78570b79849SDavid Gibson             return ret;
78670b79849SDavid Gibson         } else {
78770b79849SDavid Gibson             env->fpscr = fpscr;
78870b79849SDavid Gibson         }
78970b79849SDavid Gibson 
79070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
79170b79849SDavid Gibson             uint64_t vsr[2];
79270b79849SDavid Gibson 
79370b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
79470b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
79570b79849SDavid Gibson 
79670b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
79770b79849SDavid Gibson             if (ret < 0) {
798da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
79970b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
80070b79849SDavid Gibson                 return ret;
80170b79849SDavid Gibson             } else {
8023a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
80370b79849SDavid Gibson                 env->fpr[i] = vsr[0];
80470b79849SDavid Gibson                 if (vsx) {
80570b79849SDavid Gibson                     env->vsr[i] = vsr[1];
80670b79849SDavid Gibson                 }
8073a4b791bSGreg Kurz #else
8083a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
8093a4b791bSGreg Kurz                 if (vsx) {
8103a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
8113a4b791bSGreg Kurz                 }
8123a4b791bSGreg Kurz #endif
81370b79849SDavid Gibson             }
81470b79849SDavid Gibson         }
81570b79849SDavid Gibson     }
81670b79849SDavid Gibson 
81770b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
81870b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
81970b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
82070b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
82170b79849SDavid Gibson         if (ret < 0) {
822da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
82370b79849SDavid Gibson             return ret;
82470b79849SDavid Gibson         }
82570b79849SDavid Gibson 
82670b79849SDavid Gibson         for (i = 0; i < 32; i++) {
82770b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
82870b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
82970b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
83070b79849SDavid Gibson             if (ret < 0) {
831da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
83270b79849SDavid Gibson                         i, strerror(errno));
83370b79849SDavid Gibson                 return ret;
83470b79849SDavid Gibson             }
83570b79849SDavid Gibson         }
83670b79849SDavid Gibson     }
83770b79849SDavid Gibson 
83870b79849SDavid Gibson     return 0;
83970b79849SDavid Gibson }
84070b79849SDavid Gibson 
8419b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8429b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8439b00ea49SDavid Gibson {
8449b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8459b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8469b00ea49SDavid Gibson     struct kvm_one_reg reg;
8479b00ea49SDavid Gibson     int ret;
8489b00ea49SDavid Gibson 
8499b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8509b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8519b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8529b00ea49SDavid Gibson     if (ret < 0) {
853da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8549b00ea49SDavid Gibson         return ret;
8559b00ea49SDavid Gibson     }
8569b00ea49SDavid Gibson 
8579b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8589b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8599b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8609b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8619b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8629b00ea49SDavid Gibson     if (ret < 0) {
863da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8649b00ea49SDavid Gibson                 strerror(errno));
8659b00ea49SDavid Gibson         return ret;
8669b00ea49SDavid Gibson     }
8679b00ea49SDavid Gibson 
8689b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8699b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8709b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8719b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8729b00ea49SDavid Gibson     if (ret < 0) {
873da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8749b00ea49SDavid Gibson                 strerror(errno));
8759b00ea49SDavid Gibson         return ret;
8769b00ea49SDavid Gibson     }
8779b00ea49SDavid Gibson 
8789b00ea49SDavid Gibson     return 0;
8799b00ea49SDavid Gibson }
8809b00ea49SDavid Gibson 
8819b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8829b00ea49SDavid Gibson {
8839b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8849b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8859b00ea49SDavid Gibson     struct kvm_one_reg reg;
8869b00ea49SDavid Gibson     int ret;
8879b00ea49SDavid Gibson 
8889b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8899b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8909b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8919b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8929b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8939b00ea49SDavid Gibson 
8949b00ea49SDavid Gibson     if (env->vpa_addr) {
8959b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8969b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8979b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8989b00ea49SDavid Gibson         if (ret < 0) {
899da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9009b00ea49SDavid Gibson             return ret;
9019b00ea49SDavid Gibson         }
9029b00ea49SDavid Gibson     }
9039b00ea49SDavid Gibson 
9049b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
9059b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
9069b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
9079b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
9089b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9099b00ea49SDavid Gibson     if (ret < 0) {
910da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
9119b00ea49SDavid Gibson         return ret;
9129b00ea49SDavid Gibson     }
9139b00ea49SDavid Gibson 
9149b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9159b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9169b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9179b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9189b00ea49SDavid Gibson     if (ret < 0) {
919da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9209b00ea49SDavid Gibson                 strerror(errno));
9219b00ea49SDavid Gibson         return ret;
9229b00ea49SDavid Gibson     }
9239b00ea49SDavid Gibson 
9249b00ea49SDavid Gibson     if (!env->vpa_addr) {
9259b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9269b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9279b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9289b00ea49SDavid Gibson         if (ret < 0) {
929da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9309b00ea49SDavid Gibson             return ret;
9319b00ea49SDavid Gibson         }
9329b00ea49SDavid Gibson     }
9339b00ea49SDavid Gibson 
9349b00ea49SDavid Gibson     return 0;
9359b00ea49SDavid Gibson }
9369b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9379b00ea49SDavid Gibson 
938e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
939a7a00a72SDavid Gibson {
940a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
941a7a00a72SDavid Gibson     struct kvm_sregs sregs;
942a7a00a72SDavid Gibson     int i;
943a7a00a72SDavid Gibson 
944a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
945a7a00a72SDavid Gibson 
9461ec26c75SGreg Kurz     if (cpu->vhyp) {
9471ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
9481ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
9491ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
9501ec26c75SGreg Kurz     } else {
951a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
9521ec26c75SGreg Kurz     }
953a7a00a72SDavid Gibson 
954a7a00a72SDavid Gibson     /* Sync SLB */
955a7a00a72SDavid Gibson #ifdef TARGET_PPC64
956a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
957a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
958a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
959a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
960a7a00a72SDavid Gibson         }
961a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
962a7a00a72SDavid Gibson     }
963a7a00a72SDavid Gibson #endif
964a7a00a72SDavid Gibson 
965a7a00a72SDavid Gibson     /* Sync SRs */
966a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
967a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
968a7a00a72SDavid Gibson     }
969a7a00a72SDavid Gibson 
970a7a00a72SDavid Gibson     /* Sync BATs */
971a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
972a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
973a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
974a7a00a72SDavid Gibson             | env->DBAT[1][i];
975a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
976a7a00a72SDavid Gibson             | env->IBAT[1][i];
977a7a00a72SDavid Gibson     }
978a7a00a72SDavid Gibson 
979a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
980a7a00a72SDavid Gibson }
981a7a00a72SDavid Gibson 
98220d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
983d76d1650Saurel32 {
98420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
98520d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
986d76d1650Saurel32     struct kvm_regs regs;
987d76d1650Saurel32     int ret;
988d76d1650Saurel32     int i;
989d76d1650Saurel32 
9901bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9911bc22652SAndreas Färber     if (ret < 0) {
992d76d1650Saurel32         return ret;
9931bc22652SAndreas Färber     }
994d76d1650Saurel32 
995d76d1650Saurel32     regs.ctr = env->ctr;
996d76d1650Saurel32     regs.lr  = env->lr;
997da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
998d76d1650Saurel32     regs.msr = env->msr;
999d76d1650Saurel32     regs.pc = env->nip;
1000d76d1650Saurel32 
1001d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
1002d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
1003d76d1650Saurel32 
1004d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
1005d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
1006d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
1007d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
1008d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
1009d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
1010d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
1011d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
1012d76d1650Saurel32 
101390dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
101490dc8812SScott Wood 
1015d76d1650Saurel32     for (i = 0;i < 32; i++)
1016d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1017d76d1650Saurel32 
10184bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10194bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10204bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10214bddaf55SAlexey Kardashevskiy     }
10224bddaf55SAlexey Kardashevskiy 
10231bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1024d76d1650Saurel32     if (ret < 0)
1025d76d1650Saurel32         return ret;
1026d76d1650Saurel32 
102770b79849SDavid Gibson     kvm_put_fp(cs);
102870b79849SDavid Gibson 
102993dd5e85SScott Wood     if (env->tlb_dirty) {
10301bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
103193dd5e85SScott Wood         env->tlb_dirty = false;
103293dd5e85SScott Wood     }
103393dd5e85SScott Wood 
1034f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1035a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1036a7a00a72SDavid Gibson         if (ret < 0) {
1037f1af19d7SDavid Gibson             return ret;
1038f1af19d7SDavid Gibson         }
1039f1af19d7SDavid Gibson     }
1040f1af19d7SDavid Gibson 
1041f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1042d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1043d67d40eaSDavid Gibson     }
1044f1af19d7SDavid Gibson 
1045d67d40eaSDavid Gibson     if (cap_one_reg) {
1046d67d40eaSDavid Gibson         int i;
1047d67d40eaSDavid Gibson 
1048d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1049d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1050d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1051d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1052d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1053d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1054d67d40eaSDavid Gibson 
1055d67d40eaSDavid Gibson             if (id != 0) {
1056d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1057d67d40eaSDavid Gibson             }
1058f1af19d7SDavid Gibson         }
10599b00ea49SDavid Gibson 
10609b00ea49SDavid Gibson #ifdef TARGET_PPC64
106180b3f79bSAlexey Kardashevskiy         if (msr_ts) {
106280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
106380b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
106480b3f79bSAlexey Kardashevskiy             }
106580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
106680b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
106780b3f79bSAlexey Kardashevskiy             }
106880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
106980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
107080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
107180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
107280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
107380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
107480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
107580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
107680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
107780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
107880b3f79bSAlexey Kardashevskiy         }
107980b3f79bSAlexey Kardashevskiy 
10809b00ea49SDavid Gibson         if (cap_papr) {
10819b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1082da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10839b00ea49SDavid Gibson             }
10849b00ea49SDavid Gibson         }
108598a8b524SAlexey Kardashevskiy 
108698a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10879b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1088f1af19d7SDavid Gibson     }
1089f1af19d7SDavid Gibson 
1090d76d1650Saurel32     return ret;
1091d76d1650Saurel32 }
1092d76d1650Saurel32 
1093c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1094c371c2e3SBharat Bhushan {
1095c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1096c371c2e3SBharat Bhushan }
1097c371c2e3SBharat Bhushan 
1098a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1099d76d1650Saurel32 {
110020d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1101ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1102a7a00a72SDavid Gibson     int ret;
1103d76d1650Saurel32 
1104a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
110590dc8812SScott Wood     if (ret < 0) {
110690dc8812SScott Wood         return ret;
110790dc8812SScott Wood     }
110890dc8812SScott Wood 
110990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
111090dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
111190dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
111290dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
111390dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
111590dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
111690dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
111790dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
111890dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
111990dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
112090dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
112190dc8812SScott Wood     }
112290dc8812SScott Wood 
112390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
112490dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
112590dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
112690dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
112790dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
112890dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
112990dc8812SScott Wood     }
113090dc8812SScott Wood 
113190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
113290dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
113390dc8812SScott Wood     }
113490dc8812SScott Wood 
113590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
113690dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
113790dc8812SScott Wood     }
113890dc8812SScott Wood 
113990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
114090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1141c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
114290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1143c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
114490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1145c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
114690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1147c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
114890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1149c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
115090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1151c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
115290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1153c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
115490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1155c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
115690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1157c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
115890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1159c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
116090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1161c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
116290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1163c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
116490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1165c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
116690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1167c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
116890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1169c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
117090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1171c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
117290dc8812SScott Wood 
117390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
117490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1175c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
117690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1177c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
117890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1179c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
118090dc8812SScott Wood         }
118190dc8812SScott Wood 
118290dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
118390dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1184c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
118590dc8812SScott Wood         }
118690dc8812SScott Wood 
118790dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
118890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1189c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
119090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1191c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
119290dc8812SScott Wood         }
119390dc8812SScott Wood     }
119490dc8812SScott Wood 
119590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
119690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
119790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
119890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
119990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
120090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
120190dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
120290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
120390dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
120490dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
120590dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
120690dc8812SScott Wood     }
120790dc8812SScott Wood 
120890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
120990dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
121090dc8812SScott Wood     }
121190dc8812SScott Wood 
121290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
121390dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
121490dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
121590dc8812SScott Wood     }
121690dc8812SScott Wood 
121790dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
121890dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
121990dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
122090dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
122190dc8812SScott Wood 
122290dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
122390dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
122490dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
122590dc8812SScott Wood         }
122690dc8812SScott Wood     }
1227a7a00a72SDavid Gibson 
1228a7a00a72SDavid Gibson     return 0;
1229fafc0b6aSAlexander Graf }
123090dc8812SScott Wood 
1231a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1232a7a00a72SDavid Gibson {
1233a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1234a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1235a7a00a72SDavid Gibson     int ret;
1236a7a00a72SDavid Gibson     int i;
1237a7a00a72SDavid Gibson 
1238a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
123990dc8812SScott Wood     if (ret < 0) {
124090dc8812SScott Wood         return ret;
124190dc8812SScott Wood     }
124290dc8812SScott Wood 
1243e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1244bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1245f3c75d42SAneesh Kumar K.V     }
1246ba5e5090SAlexander Graf 
1247ba5e5090SAlexander Graf     /* Sync SLB */
124882c09f2fSAlexander Graf #ifdef TARGET_PPC64
12494b4d4a21SAneesh Kumar K.V     /*
12504b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1251a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1252a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1253a7a00a72SDavid Gibson      * in.
12544b4d4a21SAneesh Kumar K.V      */
12554b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1256d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12574b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12584b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12594b4d4a21SAneesh Kumar K.V         /*
12604b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12614b4d4a21SAneesh Kumar K.V          */
12624b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1263bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12644b4d4a21SAneesh Kumar K.V         }
1265ba5e5090SAlexander Graf     }
126682c09f2fSAlexander Graf #endif
1267ba5e5090SAlexander Graf 
1268ba5e5090SAlexander Graf     /* Sync SRs */
1269ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1270ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1271ba5e5090SAlexander Graf     }
1272ba5e5090SAlexander Graf 
1273ba5e5090SAlexander Graf     /* Sync BATs */
1274ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1275ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1276ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1277ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1278ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1279ba5e5090SAlexander Graf     }
1280a7a00a72SDavid Gibson 
1281a7a00a72SDavid Gibson     return 0;
1282a7a00a72SDavid Gibson }
1283a7a00a72SDavid Gibson 
1284a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1285a7a00a72SDavid Gibson {
1286a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1287a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1288a7a00a72SDavid Gibson     struct kvm_regs regs;
1289a7a00a72SDavid Gibson     uint32_t cr;
1290a7a00a72SDavid Gibson     int i, ret;
1291a7a00a72SDavid Gibson 
1292a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1293a7a00a72SDavid Gibson     if (ret < 0)
1294a7a00a72SDavid Gibson         return ret;
1295a7a00a72SDavid Gibson 
1296a7a00a72SDavid Gibson     cr = regs.cr;
1297a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1298a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1299a7a00a72SDavid Gibson         cr >>= 4;
1300a7a00a72SDavid Gibson     }
1301a7a00a72SDavid Gibson 
1302a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1303a7a00a72SDavid Gibson     env->lr = regs.lr;
1304a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1305a7a00a72SDavid Gibson     env->msr = regs.msr;
1306a7a00a72SDavid Gibson     env->nip = regs.pc;
1307a7a00a72SDavid Gibson 
1308a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1309a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1310a7a00a72SDavid Gibson 
1311a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1312a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1313a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1314a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1315a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1316a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1317a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1318a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1319a7a00a72SDavid Gibson 
1320a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1321a7a00a72SDavid Gibson 
1322a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1323a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1324a7a00a72SDavid Gibson 
1325a7a00a72SDavid Gibson     kvm_get_fp(cs);
1326a7a00a72SDavid Gibson 
1327a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1328a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1329a7a00a72SDavid Gibson         if (ret < 0) {
1330a7a00a72SDavid Gibson             return ret;
1331a7a00a72SDavid Gibson         }
1332a7a00a72SDavid Gibson     }
1333a7a00a72SDavid Gibson 
1334a7a00a72SDavid Gibson     if (cap_segstate) {
1335a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1336a7a00a72SDavid Gibson         if (ret < 0) {
1337a7a00a72SDavid Gibson             return ret;
1338a7a00a72SDavid Gibson         }
1339fafc0b6aSAlexander Graf     }
1340ba5e5090SAlexander Graf 
1341d67d40eaSDavid Gibson     if (cap_hior) {
1342d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1343d67d40eaSDavid Gibson     }
1344d67d40eaSDavid Gibson 
1345d67d40eaSDavid Gibson     if (cap_one_reg) {
1346d67d40eaSDavid Gibson         int i;
1347d67d40eaSDavid Gibson 
1348d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1349d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1350d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1351d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1352d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1353d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1354d67d40eaSDavid Gibson 
1355d67d40eaSDavid Gibson             if (id != 0) {
1356d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1357d67d40eaSDavid Gibson             }
1358d67d40eaSDavid Gibson         }
13599b00ea49SDavid Gibson 
13609b00ea49SDavid Gibson #ifdef TARGET_PPC64
136180b3f79bSAlexey Kardashevskiy         if (msr_ts) {
136280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
136380b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
136480b3f79bSAlexey Kardashevskiy             }
136580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
136680b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
136780b3f79bSAlexey Kardashevskiy             }
136880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
136980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
137080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
137180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
137280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
137380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
137480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
137580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
137680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
137780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
137880b3f79bSAlexey Kardashevskiy         }
137980b3f79bSAlexey Kardashevskiy 
13809b00ea49SDavid Gibson         if (cap_papr) {
13819b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1382da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13839b00ea49SDavid Gibson             }
13849b00ea49SDavid Gibson         }
138598a8b524SAlexey Kardashevskiy 
138698a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13879b00ea49SDavid Gibson #endif
1388d67d40eaSDavid Gibson     }
1389d67d40eaSDavid Gibson 
1390d76d1650Saurel32     return 0;
1391d76d1650Saurel32 }
1392d76d1650Saurel32 
13931bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1394fc87e185SAlexander Graf {
1395fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1396fc87e185SAlexander Graf 
1397fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1398fc87e185SAlexander Graf         return 0;
1399fc87e185SAlexander Graf     }
1400fc87e185SAlexander Graf 
1401fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1402fc87e185SAlexander Graf         return 0;
1403fc87e185SAlexander Graf     }
1404fc87e185SAlexander Graf 
14051bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1406fc87e185SAlexander Graf 
1407fc87e185SAlexander Graf     return 0;
1408fc87e185SAlexander Graf }
1409fc87e185SAlexander Graf 
141016415335SAlexander Graf #if defined(TARGET_PPCEMB)
141116415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
141216415335SAlexander Graf #elif defined(TARGET_PPC64)
141316415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
141416415335SAlexander Graf #else
141516415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
141616415335SAlexander Graf #endif
141716415335SAlexander Graf 
141820d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1419d76d1650Saurel32 {
142020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
142120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1422d76d1650Saurel32     int r;
1423d76d1650Saurel32     unsigned irq;
1424d76d1650Saurel32 
14254b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14264b8523eeSJan Kiszka 
14275cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1428d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1429fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1430fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1431259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
143216415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1433d76d1650Saurel32     {
1434d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1435d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1436d76d1650Saurel32          * when reading the UIC.
1437d76d1650Saurel32          */
1438fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1439d76d1650Saurel32 
1440da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14411bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
144255e5c285SAndreas Färber         if (r < 0) {
144355e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
144455e5c285SAndreas Färber         }
1445c821c2bdSAlexander Graf 
1446c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1447bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
144873bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1449d76d1650Saurel32     }
1450d76d1650Saurel32 
1451d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1452d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1453d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14544b8523eeSJan Kiszka 
14554b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1456d76d1650Saurel32 }
1457d76d1650Saurel32 
14584c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1459d76d1650Saurel32 {
14604c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1461d76d1650Saurel32 }
1462d76d1650Saurel32 
146320d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14640af691d7SMarcelo Tosatti {
1465259186a7SAndreas Färber     return cs->halted;
14660af691d7SMarcelo Tosatti }
14670af691d7SMarcelo Tosatti 
1468259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1469d76d1650Saurel32 {
1470259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1471259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1472259186a7SAndreas Färber 
1473259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1474259186a7SAndreas Färber         cs->halted = 1;
147527103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1476d76d1650Saurel32     }
1477d76d1650Saurel32 
1478bb4ea393SJan Kiszka     return 0;
1479d76d1650Saurel32 }
1480d76d1650Saurel32 
1481d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14821328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1483d76d1650Saurel32 {
1484d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1485d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1486d76d1650Saurel32 
1487bb4ea393SJan Kiszka     return 0;
1488d76d1650Saurel32 }
1489d76d1650Saurel32 
14901328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1491d76d1650Saurel32 {
1492d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1493d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1494d76d1650Saurel32 
1495bb4ea393SJan Kiszka     return 0;
1496d76d1650Saurel32 }
1497d76d1650Saurel32 
14988a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14998a0548f9SBharat Bhushan {
15008a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
15018a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
15028a0548f9SBharat Bhushan 
15038a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15048a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
15058a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
15068a0548f9SBharat Bhushan         return -EINVAL;
15078a0548f9SBharat Bhushan     }
15088a0548f9SBharat Bhushan 
15098a0548f9SBharat Bhushan     return 0;
15108a0548f9SBharat Bhushan }
15118a0548f9SBharat Bhushan 
15128a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15138a0548f9SBharat Bhushan {
15148a0548f9SBharat Bhushan     uint32_t sc;
15158a0548f9SBharat Bhushan 
15168a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15178a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15188a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15198a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15208a0548f9SBharat Bhushan         return -EINVAL;
15218a0548f9SBharat Bhushan     }
15228a0548f9SBharat Bhushan 
15238a0548f9SBharat Bhushan     return 0;
15248a0548f9SBharat Bhushan }
15258a0548f9SBharat Bhushan 
152688365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
152788365d17SBharat Bhushan {
152888365d17SBharat Bhushan     int n;
152988365d17SBharat Bhushan 
153088365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
153188365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
153288365d17SBharat Bhushan 
153388365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
153488365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
153588365d17SBharat Bhushan              hw_debug_points[n].type == type) {
153688365d17SBharat Bhushan             return n;
153788365d17SBharat Bhushan         }
153888365d17SBharat Bhushan     }
153988365d17SBharat Bhushan 
154088365d17SBharat Bhushan     return -1;
154188365d17SBharat Bhushan }
154288365d17SBharat Bhushan 
154388365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
154488365d17SBharat Bhushan {
154588365d17SBharat Bhushan     int n;
154688365d17SBharat Bhushan 
154788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
154888365d17SBharat Bhushan     if (n >= 0) {
154988365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
155088365d17SBharat Bhushan         return n;
155188365d17SBharat Bhushan     }
155288365d17SBharat Bhushan 
155388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
155488365d17SBharat Bhushan     if (n >= 0) {
155588365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
155688365d17SBharat Bhushan         return n;
155788365d17SBharat Bhushan     }
155888365d17SBharat Bhushan 
155988365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
156088365d17SBharat Bhushan     if (n >= 0) {
156188365d17SBharat Bhushan         *flag = BP_MEM_READ;
156288365d17SBharat Bhushan         return n;
156388365d17SBharat Bhushan     }
156488365d17SBharat Bhushan 
156588365d17SBharat Bhushan     return -1;
156688365d17SBharat Bhushan }
156788365d17SBharat Bhushan 
156888365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
156988365d17SBharat Bhushan                                   target_ulong len, int type)
157088365d17SBharat Bhushan {
157188365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
157288365d17SBharat Bhushan         return -ENOBUFS;
157388365d17SBharat Bhushan     }
157488365d17SBharat Bhushan 
157588365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
157688365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
157788365d17SBharat Bhushan 
157888365d17SBharat Bhushan     switch (type) {
157988365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
158088365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
158188365d17SBharat Bhushan             return -ENOBUFS;
158288365d17SBharat Bhushan         }
158388365d17SBharat Bhushan 
158488365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
158588365d17SBharat Bhushan             return -EEXIST;
158688365d17SBharat Bhushan         }
158788365d17SBharat Bhushan 
158888365d17SBharat Bhushan         nb_hw_breakpoint++;
158988365d17SBharat Bhushan         break;
159088365d17SBharat Bhushan 
159188365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
159288365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
159388365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
159488365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
159588365d17SBharat Bhushan             return -ENOBUFS;
159688365d17SBharat Bhushan         }
159788365d17SBharat Bhushan 
159888365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
159988365d17SBharat Bhushan             return -EEXIST;
160088365d17SBharat Bhushan         }
160188365d17SBharat Bhushan 
160288365d17SBharat Bhushan         nb_hw_watchpoint++;
160388365d17SBharat Bhushan         break;
160488365d17SBharat Bhushan 
160588365d17SBharat Bhushan     default:
160688365d17SBharat Bhushan         return -ENOSYS;
160788365d17SBharat Bhushan     }
160888365d17SBharat Bhushan 
160988365d17SBharat Bhushan     return 0;
161088365d17SBharat Bhushan }
161188365d17SBharat Bhushan 
161288365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
161388365d17SBharat Bhushan                                   target_ulong len, int type)
161488365d17SBharat Bhushan {
161588365d17SBharat Bhushan     int n;
161688365d17SBharat Bhushan 
161788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
161888365d17SBharat Bhushan     if (n < 0) {
161988365d17SBharat Bhushan         return -ENOENT;
162088365d17SBharat Bhushan     }
162188365d17SBharat Bhushan 
162288365d17SBharat Bhushan     switch (type) {
162388365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
162488365d17SBharat Bhushan         nb_hw_breakpoint--;
162588365d17SBharat Bhushan         break;
162688365d17SBharat Bhushan 
162788365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
162888365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
162988365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
163088365d17SBharat Bhushan         nb_hw_watchpoint--;
163188365d17SBharat Bhushan         break;
163288365d17SBharat Bhushan 
163388365d17SBharat Bhushan     default:
163488365d17SBharat Bhushan         return -ENOSYS;
163588365d17SBharat Bhushan     }
163688365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
163788365d17SBharat Bhushan 
163888365d17SBharat Bhushan     return 0;
163988365d17SBharat Bhushan }
164088365d17SBharat Bhushan 
164188365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
164288365d17SBharat Bhushan {
164388365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
164488365d17SBharat Bhushan }
164588365d17SBharat Bhushan 
16468a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16478a0548f9SBharat Bhushan {
164888365d17SBharat Bhushan     int n;
164988365d17SBharat Bhushan 
16508a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16518a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16528a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16538a0548f9SBharat Bhushan     }
165488365d17SBharat Bhushan 
165588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
165688365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
165788365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
165888365d17SBharat Bhushan 
165988365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
166088365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
166188365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
166288365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
166388365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
166488365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
166588365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
166688365d17SBharat Bhushan                 break;
166788365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
166888365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
166988365d17SBharat Bhushan                 break;
167088365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
167188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
167288365d17SBharat Bhushan                 break;
167388365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
167488365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
167588365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
167688365d17SBharat Bhushan                 break;
167788365d17SBharat Bhushan             default:
167888365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
167988365d17SBharat Bhushan             }
168088365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
168188365d17SBharat Bhushan         }
168288365d17SBharat Bhushan     }
16838a0548f9SBharat Bhushan }
16848a0548f9SBharat Bhushan 
16858a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16868a0548f9SBharat Bhushan {
16878a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16888a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16898a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16908a0548f9SBharat Bhushan     int handle = 0;
169188365d17SBharat Bhushan     int n;
169288365d17SBharat Bhushan     int flag = 0;
16938a0548f9SBharat Bhushan 
169488365d17SBharat Bhushan     if (cs->singlestep_enabled) {
169588365d17SBharat Bhushan         handle = 1;
169688365d17SBharat Bhushan     } else if (arch_info->status) {
169788365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
169888365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
169988365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
170088365d17SBharat Bhushan                 if (n >= 0) {
170188365d17SBharat Bhushan                     handle = 1;
170288365d17SBharat Bhushan                 }
170388365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
170488365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
170588365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
170688365d17SBharat Bhushan                 if (n >= 0) {
170788365d17SBharat Bhushan                     handle = 1;
170888365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
170988365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
171088365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
171188365d17SBharat Bhushan                 }
171288365d17SBharat Bhushan             }
171388365d17SBharat Bhushan         }
171488365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17158a0548f9SBharat Bhushan         handle = 1;
17168a0548f9SBharat Bhushan     } else {
17178a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17188a0548f9SBharat Bhushan          * program exception to guest;
17198a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
172088365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
172188365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
172288365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
172388365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
172488365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
172588365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
172688365d17SBharat Bhushan          * So the result is that we cannot share debug resources
172788365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
172888365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
172988365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
173088365d17SBharat Bhushan          * cannot use them;
17318a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17328a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17338a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17348a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17358a0548f9SBharat Bhushan          * injecting a program interrupt.
17368a0548f9SBharat Bhushan          */
17378a0548f9SBharat Bhushan 
17388a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17398a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17408a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17418a0548f9SBharat Bhushan          */
17428a0548f9SBharat Bhushan         env->nip += 4;
17438a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17448a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17458a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17468a0548f9SBharat Bhushan     }
17478a0548f9SBharat Bhushan 
17488a0548f9SBharat Bhushan     return handle;
17498a0548f9SBharat Bhushan }
17508a0548f9SBharat Bhushan 
175120d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1752d76d1650Saurel32 {
175320d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
175420d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1755bb4ea393SJan Kiszka     int ret;
1756d76d1650Saurel32 
17574b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17584b8523eeSJan Kiszka 
1759d76d1650Saurel32     switch (run->exit_reason) {
1760d76d1650Saurel32     case KVM_EXIT_DCR:
1761d76d1650Saurel32         if (run->dcr.is_write) {
1762da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1763d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1764d76d1650Saurel32         } else {
1765da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1766d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1767d76d1650Saurel32         }
1768d76d1650Saurel32         break;
1769d76d1650Saurel32     case KVM_EXIT_HLT:
1770da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1771259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1772d76d1650Saurel32         break;
1773c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1774f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1775da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
177620d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1777aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1778f61b4bedSAlexander Graf                                               run->papr_hcall.args);
177978e8fde2SDavid Gibson         ret = 0;
1780f61b4bedSAlexander Graf         break;
1781f61b4bedSAlexander Graf #endif
17825b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1783da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1784933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17855b95b8b9SAlexander Graf         ret = 0;
17865b95b8b9SAlexander Graf         break;
178731f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1788da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
178931f2cb8fSBharat Bhushan         watchdog_perform_action();
179031f2cb8fSBharat Bhushan         ret = 0;
179131f2cb8fSBharat Bhushan         break;
179231f2cb8fSBharat Bhushan 
17938a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17948a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17958a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17968a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17978a0548f9SBharat Bhushan             break;
17988a0548f9SBharat Bhushan         }
17998a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
18008a0548f9SBharat Bhushan         ret = 0;
18018a0548f9SBharat Bhushan         break;
18028a0548f9SBharat Bhushan 
180373aaec4aSJan Kiszka     default:
180473aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
180573aaec4aSJan Kiszka         ret = -1;
180673aaec4aSJan Kiszka         break;
1807d76d1650Saurel32     }
1808d76d1650Saurel32 
18094b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1810d76d1650Saurel32     return ret;
1811d76d1650Saurel32 }
1812d76d1650Saurel32 
181331f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
181431f2cb8fSBharat Bhushan {
181531f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181631f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
181731f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
181831f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
181931f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
182031f2cb8fSBharat Bhushan     };
182131f2cb8fSBharat Bhushan 
182231f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182331f2cb8fSBharat Bhushan }
182431f2cb8fSBharat Bhushan 
182531f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
182631f2cb8fSBharat Bhushan {
182731f2cb8fSBharat Bhushan 
182831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
182931f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
183031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
183131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
183231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
183331f2cb8fSBharat Bhushan     };
183431f2cb8fSBharat Bhushan 
183531f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
183631f2cb8fSBharat Bhushan }
183731f2cb8fSBharat Bhushan 
183831f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
183931f2cb8fSBharat Bhushan {
184031f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
184131f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
184231f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
184331f2cb8fSBharat Bhushan 
184431f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
184531f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
184631f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
184731f2cb8fSBharat Bhushan     };
184831f2cb8fSBharat Bhushan 
184931f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
185031f2cb8fSBharat Bhushan }
185131f2cb8fSBharat Bhushan 
185231f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
185331f2cb8fSBharat Bhushan {
185431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
185531f2cb8fSBharat Bhushan     int ret;
185631f2cb8fSBharat Bhushan 
185731f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
185831f2cb8fSBharat Bhushan         return -1;
185931f2cb8fSBharat Bhushan     }
186031f2cb8fSBharat Bhushan 
186131f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
186231f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
186331f2cb8fSBharat Bhushan         return -1;
186431f2cb8fSBharat Bhushan     }
186531f2cb8fSBharat Bhushan 
186648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
186731f2cb8fSBharat Bhushan     if (ret < 0) {
186831f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
186931f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
187031f2cb8fSBharat Bhushan         return ret;
187131f2cb8fSBharat Bhushan     }
187231f2cb8fSBharat Bhushan 
187331f2cb8fSBharat Bhushan     return ret;
187431f2cb8fSBharat Bhushan }
187531f2cb8fSBharat Bhushan 
1876dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1877dc333cd6SAlexander Graf {
1878dc333cd6SAlexander Graf     FILE *f;
1879dc333cd6SAlexander Graf     int ret = -1;
1880dc333cd6SAlexander Graf     int field_len = strlen(field);
1881dc333cd6SAlexander Graf     char line[512];
1882dc333cd6SAlexander Graf 
1883dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1884dc333cd6SAlexander Graf     if (!f) {
1885dc333cd6SAlexander Graf         return -1;
1886dc333cd6SAlexander Graf     }
1887dc333cd6SAlexander Graf 
1888dc333cd6SAlexander Graf     do {
1889dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1890dc333cd6SAlexander Graf             break;
1891dc333cd6SAlexander Graf         }
1892dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1893ae215068SJim Meyering             pstrcpy(value, len, line);
1894dc333cd6SAlexander Graf             ret = 0;
1895dc333cd6SAlexander Graf             break;
1896dc333cd6SAlexander Graf         }
1897dc333cd6SAlexander Graf     } while(*line);
1898dc333cd6SAlexander Graf 
1899dc333cd6SAlexander Graf     fclose(f);
1900dc333cd6SAlexander Graf 
1901dc333cd6SAlexander Graf     return ret;
1902dc333cd6SAlexander Graf }
1903dc333cd6SAlexander Graf 
1904dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1905dc333cd6SAlexander Graf {
1906dc333cd6SAlexander Graf     char line[512];
1907dc333cd6SAlexander Graf     char *ns;
190873bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1909dc333cd6SAlexander Graf 
1910dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1911dc333cd6SAlexander Graf         return retval;
1912dc333cd6SAlexander Graf     }
1913dc333cd6SAlexander Graf 
1914dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1915dc333cd6SAlexander Graf         return retval;
1916dc333cd6SAlexander Graf     }
1917dc333cd6SAlexander Graf 
1918dc333cd6SAlexander Graf     ns++;
1919dc333cd6SAlexander Graf 
1920f9b8e7f6SShraddha Barke     return atoi(ns);
1921ef951443SNikunj A Dadhania }
1922ef951443SNikunj A Dadhania 
1923ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1924ef951443SNikunj A Dadhania {
1925ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1926ef951443SNikunj A Dadhania                                NULL);
1927ef951443SNikunj A Dadhania }
1928ef951443SNikunj A Dadhania 
1929ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1930ef951443SNikunj A Dadhania {
1931ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1932dc333cd6SAlexander Graf }
19334513d923SGleb Natapov 
1934eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1935eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1936eadaada1SAlexander Graf {
1937eadaada1SAlexander Graf     struct dirent *dirp;
1938eadaada1SAlexander Graf     DIR *dp;
1939eadaada1SAlexander Graf 
1940eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1941eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1942eadaada1SAlexander Graf         return -1;
1943eadaada1SAlexander Graf     }
1944eadaada1SAlexander Graf 
1945eadaada1SAlexander Graf     buf[0] = '\0';
1946eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1947eadaada1SAlexander Graf         FILE *f;
1948eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1949eadaada1SAlexander Graf                  dirp->d_name);
1950eadaada1SAlexander Graf         f = fopen(buf, "r");
1951eadaada1SAlexander Graf         if (f) {
1952eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1953eadaada1SAlexander Graf             fclose(f);
1954eadaada1SAlexander Graf             break;
1955eadaada1SAlexander Graf         }
1956eadaada1SAlexander Graf         buf[0] = '\0';
1957eadaada1SAlexander Graf     }
1958eadaada1SAlexander Graf     closedir(dp);
1959eadaada1SAlexander Graf     if (buf[0] == '\0') {
1960eadaada1SAlexander Graf         printf("Unknown host!\n");
1961eadaada1SAlexander Graf         return -1;
1962eadaada1SAlexander Graf     }
1963eadaada1SAlexander Graf 
1964eadaada1SAlexander Graf     return 0;
1965eadaada1SAlexander Graf }
1966eadaada1SAlexander Graf 
19677d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1968eadaada1SAlexander Graf {
19699bc884b7SDavid Gibson     union {
19709bc884b7SDavid Gibson         uint32_t v32;
19719bc884b7SDavid Gibson         uint64_t v64;
19729bc884b7SDavid Gibson     } u;
1973eadaada1SAlexander Graf     FILE *f;
1974eadaada1SAlexander Graf     int len;
1975eadaada1SAlexander Graf 
19767d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1977eadaada1SAlexander Graf     if (!f) {
1978eadaada1SAlexander Graf         return -1;
1979eadaada1SAlexander Graf     }
1980eadaada1SAlexander Graf 
19819bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1982eadaada1SAlexander Graf     fclose(f);
1983eadaada1SAlexander Graf     switch (len) {
19849bc884b7SDavid Gibson     case 4:
19859bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19869bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19879bc884b7SDavid Gibson     case 8:
19889bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1989eadaada1SAlexander Graf     }
1990eadaada1SAlexander Graf 
1991eadaada1SAlexander Graf     return 0;
1992eadaada1SAlexander Graf }
1993eadaada1SAlexander Graf 
19947d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19957d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19967d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19977d94a30bSSukadev Bhattiprolu  * format) */
19987d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19997d94a30bSSukadev Bhattiprolu {
20007d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
20017d94a30bSSukadev Bhattiprolu     uint64_t val;
20027d94a30bSSukadev Bhattiprolu 
20037d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
20047d94a30bSSukadev Bhattiprolu         return -1;
20057d94a30bSSukadev Bhattiprolu     }
20067d94a30bSSukadev Bhattiprolu 
20077d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
20087d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
20097d94a30bSSukadev Bhattiprolu     g_free(tmp);
20107d94a30bSSukadev Bhattiprolu 
20117d94a30bSSukadev Bhattiprolu     return val;
20127d94a30bSSukadev Bhattiprolu }
20137d94a30bSSukadev Bhattiprolu 
20149bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20159bc884b7SDavid Gibson {
20169bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20179bc884b7SDavid Gibson }
20189bc884b7SDavid Gibson 
20191a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
202045024f09SAlexander Graf  {
2021a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2022a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
202345024f09SAlexander Graf 
20246fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20251a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20261a61a9aeSStuart Yoder         return 0;
20271a61a9aeSStuart Yoder     }
202845024f09SAlexander Graf 
20291a61a9aeSStuart Yoder     return 1;
20301a61a9aeSStuart Yoder }
20311a61a9aeSStuart Yoder 
20321a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20331a61a9aeSStuart Yoder {
20341a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20351a61a9aeSStuart Yoder 
20361a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20371a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20381a61a9aeSStuart Yoder         return 1;
20391a61a9aeSStuart Yoder     }
20401a61a9aeSStuart Yoder 
20411a61a9aeSStuart Yoder     return 0;
20421a61a9aeSStuart Yoder }
20431a61a9aeSStuart Yoder 
20441a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20451a61a9aeSStuart Yoder {
20461a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20471a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20481a61a9aeSStuart Yoder 
20491a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20501a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
205145024f09SAlexander Graf         return 0;
205245024f09SAlexander Graf     }
205345024f09SAlexander Graf 
205445024f09SAlexander Graf     /*
2055d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
205645024f09SAlexander Graf      *
2057d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
205845024f09SAlexander Graf      *     li r3, -1
2059d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2060d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
206145024f09SAlexander Graf      */
206245024f09SAlexander Graf 
2063d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2064d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2065d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2066d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
206745024f09SAlexander Graf 
20680ddbd053SAlexey Kardashevskiy     return 1;
206945024f09SAlexander Graf }
207045024f09SAlexander Graf 
2071026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2072026bfd89SDavid Gibson {
2073026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2074026bfd89SDavid Gibson }
2075026bfd89SDavid Gibson 
2076026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2077026bfd89SDavid Gibson {
2078026bfd89SDavid Gibson     /*
2079026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2080026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2081026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2082026bfd89SDavid Gibson      * produce a warning.
2083026bfd89SDavid Gibson      */
2084026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2085026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2086026bfd89SDavid Gibson }
2087026bfd89SDavid Gibson 
2088ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2089ef9971ddSAlexey Kardashevskiy {
2090ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2091ef9971ddSAlexey Kardashevskiy }
2092ef9971ddSAlexey Kardashevskiy 
20935145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20945145ad4fSNathan Whitehorn {
20955145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20965145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20975145ad4fSNathan Whitehorn }
20985145ad4fSNathan Whitehorn 
20991bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2100f61b4bedSAlexander Graf {
21011bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2102f61b4bedSAlexander Graf     int ret;
2103f61b4bedSAlexander Graf 
210448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2105f61b4bedSAlexander Graf     if (ret) {
2106072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2107072ed5f2SThomas Huth         exit(1);
2108f61b4bedSAlexander Graf     }
21099b00ea49SDavid Gibson 
21109b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21119b00ea49SDavid Gibson      * with kvm */
21129b00ea49SDavid Gibson     cap_papr = 1;
2113f1af19d7SDavid Gibson }
2114f61b4bedSAlexander Graf 
2115d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21166db5bb0fSAlexey Kardashevskiy {
2117d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21186db5bb0fSAlexey Kardashevskiy }
21196db5bb0fSAlexey Kardashevskiy 
21205b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21215b95b8b9SAlexander Graf {
21225b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21235b95b8b9SAlexander Graf     int ret;
21245b95b8b9SAlexander Graf 
212548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21265b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2127072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2128072ed5f2SThomas Huth         exit(1);
21295b95b8b9SAlexander Graf     }
21305b95b8b9SAlexander Graf }
21315b95b8b9SAlexander Graf 
2132e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2133e97c3636SDavid Gibson {
2134e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2135e97c3636SDavid Gibson }
2136e97c3636SDavid Gibson 
2137fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2138fa98fbfcSSam Bobroff {
2139fa98fbfcSSam Bobroff     int ret;
2140fa98fbfcSSam Bobroff 
2141fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2142fa98fbfcSSam Bobroff     if (!ret) {
2143fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2144fa98fbfcSSam Bobroff     }
2145fa98fbfcSSam Bobroff     return ret;
2146fa98fbfcSSam Bobroff }
2147fa98fbfcSSam Bobroff 
2148fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2149fa98fbfcSSam Bobroff {
2150fa98fbfcSSam Bobroff     int i;
2151fa98fbfcSSam Bobroff     GString *g;
2152fa98fbfcSSam Bobroff     char *s;
2153fa98fbfcSSam Bobroff 
2154fa98fbfcSSam Bobroff     assert(kvm_enabled());
2155fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2156fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2157fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2158fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2159fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2160fa98fbfcSSam Bobroff             }
2161fa98fbfcSSam Bobroff         }
2162fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2163fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2164fa98fbfcSSam Bobroff         g_free(s);
2165fa98fbfcSSam Bobroff     } else {
2166fa98fbfcSSam Bobroff         error_append_hint(errp,
2167fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2168fa98fbfcSSam Bobroff     }
2169fa98fbfcSSam Bobroff }
2170fa98fbfcSSam Bobroff 
2171fa98fbfcSSam Bobroff 
21727f763a5dSDavid Gibson #ifdef TARGET_PPC64
2173658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2174354ac20aSDavid Gibson {
2175354ac20aSDavid Gibson     off_t size;
2176354ac20aSDavid Gibson     int fd;
2177354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2178354ac20aSDavid Gibson 
2179354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2180354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2181354ac20aSDavid Gibson      *                      not necessary on this hardware
2182354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2183354ac20aSDavid Gibson      *
2184354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2185354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2186354ac20aSDavid Gibson      */
2187354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2188354ac20aSDavid Gibson         return 0;
2189354ac20aSDavid Gibson     }
2190354ac20aSDavid Gibson 
2191354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2192354ac20aSDavid Gibson     if (fd < 0) {
2193354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2194354ac20aSDavid Gibson                 strerror(errno));
2195354ac20aSDavid Gibson         return -1;
2196354ac20aSDavid Gibson     }
2197354ac20aSDavid Gibson 
2198354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2199354ac20aSDavid Gibson 
2200658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2201658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2202354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2203354ac20aSDavid Gibson         return -1;
2204354ac20aSDavid Gibson     };
2205354ac20aSDavid Gibson 
2206354ac20aSDavid Gibson     return size;
2207354ac20aSDavid Gibson }
2208354ac20aSDavid Gibson 
22097f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
22107f763a5dSDavid Gibson {
2211f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2212f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2213f36951c1SDavid Gibson     int i;
2214f36951c1SDavid Gibson 
22157f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
22167f763a5dSDavid Gibson         return current_size;
22177f763a5dSDavid Gibson     }
2218f36951c1SDavid Gibson 
2219f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2220f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2221182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
22229c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2223f36951c1SDavid Gibson     best_page_shift = 0;
2224f36951c1SDavid Gibson 
2225f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2226f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2227f36951c1SDavid Gibson 
2228f36951c1SDavid Gibson         if (!sps->page_shift) {
2229f36951c1SDavid Gibson             continue;
2230f36951c1SDavid Gibson         }
2231f36951c1SDavid Gibson 
2232f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2233f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2234f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2235f36951c1SDavid Gibson         }
2236f36951c1SDavid Gibson     }
2237f36951c1SDavid Gibson 
22387f763a5dSDavid Gibson     return MIN(current_size,
2239f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
22407f763a5dSDavid Gibson }
22417f763a5dSDavid Gibson #endif
22427f763a5dSDavid Gibson 
2243da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2244da95324eSAlexey Kardashevskiy {
2245da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2246da95324eSAlexey Kardashevskiy }
2247da95324eSAlexey Kardashevskiy 
22483dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
22493dc410aeSAlexey Kardashevskiy {
22503dc410aeSAlexey Kardashevskiy     int ret;
22513dc410aeSAlexey Kardashevskiy 
22523dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22533dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
22543dc410aeSAlexey Kardashevskiy     if (!ret) {
22553dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22563dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
22573dc410aeSAlexey Kardashevskiy     }
22583dc410aeSAlexey Kardashevskiy 
22593dc410aeSAlexey Kardashevskiy     return ret;
22603dc410aeSAlexey Kardashevskiy }
22613dc410aeSAlexey Kardashevskiy 
2262d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2263d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2264d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22650f5cb298SDavid Gibson {
22660f5cb298SDavid Gibson     long len;
22670f5cb298SDavid Gibson     int fd;
22680f5cb298SDavid Gibson     void *table;
22690f5cb298SDavid Gibson 
2270b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2271b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2272b5aec396SDavid Gibson      */
2273b5aec396SDavid Gibson     *pfd = -1;
22746a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22750f5cb298SDavid Gibson         return NULL;
22760f5cb298SDavid Gibson     }
22770f5cb298SDavid Gibson 
2278d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2279d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2280d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2281d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2282d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2283d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2284d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2285d6ee2a7cSAlexey Kardashevskiy         };
2286d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2287d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2288d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2289d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2290d6ee2a7cSAlexey Kardashevskiy                     liobn);
2291d6ee2a7cSAlexey Kardashevskiy             return NULL;
2292d6ee2a7cSAlexey Kardashevskiy         }
2293d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2294d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2295d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2296d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2297d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2298d6ee2a7cSAlexey Kardashevskiy         };
2299d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2300d6ee2a7cSAlexey Kardashevskiy             return NULL;
2301d6ee2a7cSAlexey Kardashevskiy         }
23020f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
23030f5cb298SDavid Gibson         if (fd < 0) {
2304b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2305b5aec396SDavid Gibson                     liobn);
23060f5cb298SDavid Gibson             return NULL;
23070f5cb298SDavid Gibson         }
2308d6ee2a7cSAlexey Kardashevskiy     } else {
2309d6ee2a7cSAlexey Kardashevskiy         return NULL;
2310d6ee2a7cSAlexey Kardashevskiy     }
23110f5cb298SDavid Gibson 
2312d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
23130f5cb298SDavid Gibson     /* FIXME: round this up to page size */
23140f5cb298SDavid Gibson 
231574b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
23160f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2317b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2318b5aec396SDavid Gibson                 liobn);
23190f5cb298SDavid Gibson         close(fd);
23200f5cb298SDavid Gibson         return NULL;
23210f5cb298SDavid Gibson     }
23220f5cb298SDavid Gibson 
23230f5cb298SDavid Gibson     *pfd = fd;
23240f5cb298SDavid Gibson     return table;
23250f5cb298SDavid Gibson }
23260f5cb298SDavid Gibson 
2327523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
23280f5cb298SDavid Gibson {
23290f5cb298SDavid Gibson     long len;
23300f5cb298SDavid Gibson 
23310f5cb298SDavid Gibson     if (fd < 0) {
23320f5cb298SDavid Gibson         return -1;
23330f5cb298SDavid Gibson     }
23340f5cb298SDavid Gibson 
2335523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
23360f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
23370f5cb298SDavid Gibson         (close(fd) < 0)) {
2338b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2339b5aec396SDavid Gibson                 strerror(errno));
23400f5cb298SDavid Gibson         /* Leak the table */
23410f5cb298SDavid Gibson     }
23420f5cb298SDavid Gibson 
23430f5cb298SDavid Gibson     return 0;
23440f5cb298SDavid Gibson }
23450f5cb298SDavid Gibson 
23467f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
23477f763a5dSDavid Gibson {
23487f763a5dSDavid Gibson     uint32_t shift = shift_hint;
23497f763a5dSDavid Gibson 
2350ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2351ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2352ace9a2cbSDavid Gibson         return 0;
2353ace9a2cbSDavid Gibson     }
23546977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23557f763a5dSDavid Gibson         int ret;
23567f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2357ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2358ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2359ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2360ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2361ace9a2cbSDavid Gibson              * correct for PR. */
2362ace9a2cbSDavid Gibson             return 0;
2363ace9a2cbSDavid Gibson         } else if (ret < 0) {
23647f763a5dSDavid Gibson             return ret;
23657f763a5dSDavid Gibson         }
23667f763a5dSDavid Gibson         return shift;
23677f763a5dSDavid Gibson     }
23687f763a5dSDavid Gibson 
2369ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2370ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
237196c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
237296c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2373ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23747f763a5dSDavid Gibson         return 0;
2375ace9a2cbSDavid Gibson     } else {
2376ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2377ace9a2cbSDavid Gibson         return 24;
2378ace9a2cbSDavid Gibson     }
23797f763a5dSDavid Gibson }
23807f763a5dSDavid Gibson 
2381a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2382a1e98583SDavid Gibson {
2383a1e98583SDavid Gibson     uint32_t pvr;
2384a1e98583SDavid Gibson 
2385a1e98583SDavid Gibson     asm ("mfpvr %0"
2386a1e98583SDavid Gibson          : "=r"(pvr));
2387a1e98583SDavid Gibson     return pvr;
2388a1e98583SDavid Gibson }
2389a1e98583SDavid Gibson 
2390a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2391a7342588SDavid Gibson {
2392a7342588SDavid Gibson     if (on) {
2393a7342588SDavid Gibson         *word |= flags;
2394a7342588SDavid Gibson     } else {
2395a7342588SDavid Gibson         *word &= ~flags;
2396a7342588SDavid Gibson     }
2397a7342588SDavid Gibson }
2398a7342588SDavid Gibson 
23992985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
24002985b86bSAndreas Färber {
24012985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
24020cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
24030cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2404a1e98583SDavid Gibson 
2405cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
24063bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2407a7342588SDavid Gibson 
24083f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
24093f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
24103f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
24113f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
24123f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
24133f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
24140cbad81fSDavid Gibson 
24150cbad81fSDavid Gibson     if (dcache_size != -1) {
24160cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
24170cbad81fSDavid Gibson     }
24180cbad81fSDavid Gibson 
24190cbad81fSDavid Gibson     if (icache_size != -1) {
24200cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
24210cbad81fSDavid Gibson     }
2422c64abd1fSSam Bobroff 
2423c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2424c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
24255f3066d8SDavid Gibson 
24265f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
24275f3066d8SDavid Gibson         /*
24285f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
24295f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
24305f3066d8SDavid Gibson          * architected mode may prevent guests from activating
24315f3066d8SDavid Gibson          * necessary DD1 workarounds.
24325f3066d8SDavid Gibson          */
24335f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
24345f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
24355f3066d8SDavid Gibson     }
2436c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2437a1e98583SDavid Gibson }
2438a1e98583SDavid Gibson 
24393b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
24403b961124SStuart Yoder {
24413b961124SStuart Yoder     return cap_epr;
24423b961124SStuart Yoder }
24433b961124SStuart Yoder 
244487a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
244587a91de6SAlexander Graf {
244687a91de6SAlexander Graf     return cap_fixup_hcalls;
244787a91de6SAlexander Graf }
244887a91de6SAlexander Graf 
2449bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2450bac3bf28SThomas Huth {
2451bac3bf28SThomas Huth     return cap_htm;
2452bac3bf28SThomas Huth }
2453bac3bf28SThomas Huth 
2454cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2455cf1c4cceSSam Bobroff {
2456cf1c4cceSSam Bobroff     return cap_mmu_radix;
2457cf1c4cceSSam Bobroff }
2458cf1c4cceSSam Bobroff 
2459cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2460cf1c4cceSSam Bobroff {
2461cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2462cf1c4cceSSam Bobroff }
2463cf1c4cceSSam Bobroff 
24648acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
24658acc2ae5SSuraj Jitindar Singh {
24668acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
24678acc2ae5SSuraj Jitindar Singh     int ret;
24688acc2ae5SSuraj Jitindar Singh 
24698acc2ae5SSuraj Jitindar Singh     /* Assume broken */
24708acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
24718acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
24728acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
24738acc2ae5SSuraj Jitindar Singh 
24748acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
24758acc2ae5SSuraj Jitindar Singh     if (!ret) {
24768acc2ae5SSuraj Jitindar Singh         return;
24778acc2ae5SSuraj Jitindar Singh     }
24788acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
24798acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
24808acc2ae5SSuraj Jitindar Singh         return;
24818acc2ae5SSuraj Jitindar Singh     }
24828acc2ae5SSuraj Jitindar Singh     /* Parse and set cap_ppc_safe_cache */
24838acc2ae5SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
24848acc2ae5SSuraj Jitindar Singh         cap_ppc_safe_cache = 2;
24858acc2ae5SSuraj Jitindar Singh     } else if ((c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
24868acc2ae5SSuraj Jitindar Singh                (c.character & c.character_mask
24878acc2ae5SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
24888acc2ae5SSuraj Jitindar Singh         cap_ppc_safe_cache = 1;
24898acc2ae5SSuraj Jitindar Singh     }
24908acc2ae5SSuraj Jitindar Singh     /* Parse and set cap_ppc_safe_bounds_check */
24918acc2ae5SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
24928acc2ae5SSuraj Jitindar Singh         cap_ppc_safe_bounds_check = 2;
24938acc2ae5SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
24948acc2ae5SSuraj Jitindar Singh         cap_ppc_safe_bounds_check = 1;
24958acc2ae5SSuraj Jitindar Singh     }
24968acc2ae5SSuraj Jitindar Singh     /* Parse and set cap_ppc_safe_indirect_branch */
24978acc2ae5SSuraj Jitindar Singh     if (c.character & H_CPU_CHAR_BCCTRL_SERIALISED) {
24988acc2ae5SSuraj Jitindar Singh         cap_ppc_safe_indirect_branch = 2;
24998acc2ae5SSuraj Jitindar Singh     }
25008acc2ae5SSuraj Jitindar Singh }
25018acc2ae5SSuraj Jitindar Singh 
25028acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
25038acc2ae5SSuraj Jitindar Singh {
25048acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
25058acc2ae5SSuraj Jitindar Singh }
25068acc2ae5SSuraj Jitindar Singh 
25078acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
25088acc2ae5SSuraj Jitindar Singh {
25098acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
25108acc2ae5SSuraj Jitindar Singh }
25118acc2ae5SSuraj Jitindar Singh 
25128acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
25138acc2ae5SSuraj Jitindar Singh {
25148acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
25158acc2ae5SSuraj Jitindar Singh }
25168acc2ae5SSuraj Jitindar Singh 
2517*9ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
2518*9ded780cSAlexey Kardashevskiy {
2519*9ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
2520*9ded780cSAlexey Kardashevskiy }
2521*9ded780cSAlexey Kardashevskiy 
252252b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
252352b2519cSThomas Huth {
252452b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
252552b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
252652b2519cSThomas Huth 
252752b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
252852b2519cSThomas Huth     if (pvr_pcc == NULL) {
252952b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
253052b2519cSThomas Huth     }
253152b2519cSThomas Huth 
253252b2519cSThomas Huth     return pvr_pcc;
253352b2519cSThomas Huth }
253452b2519cSThomas Huth 
25352e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms)
25365ba4576bSAndreas Färber {
25375ba4576bSAndreas Färber     TypeInfo type_info = {
25385ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
25395ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
25405ba4576bSAndreas Färber     };
25412e9c10ebSIgor Mammedov     MachineClass *mc = MACHINE_GET_CLASS(ms);
25425ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
254392e926e1SGreg Kurz     ObjectClass *oc;
25445b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2545715d4b96SThomas Huth     int i;
25465ba4576bSAndreas Färber 
254752b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
25483bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
25495ba4576bSAndreas Färber         return -1;
25505ba4576bSAndreas Färber     }
25515ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
25525ba4576bSAndreas Färber     type_register(&type_info);
25532e9c10ebSIgor Mammedov     if (object_dynamic_cast(OBJECT(ms), TYPE_SPAPR_MACHINE)) {
25542e9c10ebSIgor Mammedov         /* override TCG default cpu type with 'host' cpu model */
25552e9c10ebSIgor Mammedov         mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
25562e9c10ebSIgor Mammedov     }
25575b79b1caSAlexey Kardashevskiy 
255892e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
255992e926e1SGreg Kurz     g_assert(oc);
256092e926e1SGreg Kurz 
2561715d4b96SThomas Huth     /*
2562715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2563715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2564715d4b96SThomas Huth      * host CPU type, too)
2565715d4b96SThomas Huth      */
2566715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2567715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2568c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2569715d4b96SThomas Huth             char *suffix;
2570715d4b96SThomas Huth 
2571715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2572c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2573715d4b96SThomas Huth             if (suffix) {
2574715d4b96SThomas Huth                 *suffix = 0;
2575715d4b96SThomas Huth             }
2576715d4b96SThomas Huth             break;
2577715d4b96SThomas Huth         }
2578715d4b96SThomas Huth     }
2579715d4b96SThomas Huth 
25805ba4576bSAndreas Färber     return 0;
25815ba4576bSAndreas Färber }
25825ba4576bSAndreas Färber 
2583feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2584feaa64c4SDavid Gibson {
2585feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2586feaa64c4SDavid Gibson         .token = token,
2587feaa64c4SDavid Gibson     };
2588feaa64c4SDavid Gibson 
2589feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2590feaa64c4SDavid Gibson         return -ENOENT;
2591feaa64c4SDavid Gibson     }
2592feaa64c4SDavid Gibson 
2593feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2594feaa64c4SDavid Gibson 
2595feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2596feaa64c4SDavid Gibson }
259712b1143bSDavid Gibson 
259814b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2599e68cb8b4SAlexey Kardashevskiy {
2600e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2601e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
260214b0d748SGreg Kurz         .start_index = index,
2603e68cb8b4SAlexey Kardashevskiy     };
260482be8e73SGreg Kurz     int ret;
2605e68cb8b4SAlexey Kardashevskiy 
2606e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
260714b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
260814b0d748SGreg Kurz                    write ? "writing" : "reading");
260982be8e73SGreg Kurz         return -ENOTSUP;
2610e68cb8b4SAlexey Kardashevskiy     }
2611e68cb8b4SAlexey Kardashevskiy 
261282be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
261382be8e73SGreg Kurz     if (ret < 0) {
261414b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
261514b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
261614b0d748SGreg Kurz                    strerror(errno));
261782be8e73SGreg Kurz         return -errno;
261882be8e73SGreg Kurz     }
261982be8e73SGreg Kurz 
262082be8e73SGreg Kurz     return ret;
2621e68cb8b4SAlexey Kardashevskiy }
2622e68cb8b4SAlexey Kardashevskiy 
2623e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2624e68cb8b4SAlexey Kardashevskiy {
2625bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2626e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2627e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2628e68cb8b4SAlexey Kardashevskiy 
2629e68cb8b4SAlexey Kardashevskiy     do {
2630e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2631e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2632e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2633e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2634e68cb8b4SAlexey Kardashevskiy             return rc;
2635e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2636e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2637e094c4c1SCédric Le Goater             ssize_t n = rc;
2638e094c4c1SCédric Le Goater             while (n) {
2639e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2640e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2641e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2642e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2643e094c4c1SCédric Le Goater 
2644e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2645e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2646e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2647e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2648e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2649e094c4c1SCédric Le Goater 
2650e094c4c1SCédric Le Goater                 buffer += chunksize;
2651e094c4c1SCédric Le Goater                 n -= chunksize;
2652e094c4c1SCédric Le Goater             }
2653e68cb8b4SAlexey Kardashevskiy         }
2654e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2655e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2656bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2657e68cb8b4SAlexey Kardashevskiy 
2658e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2659e68cb8b4SAlexey Kardashevskiy }
2660e68cb8b4SAlexey Kardashevskiy 
2661e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2662e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2663e68cb8b4SAlexey Kardashevskiy {
2664e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2665e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2666e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2667e68cb8b4SAlexey Kardashevskiy 
2668e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2669e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2670e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2671e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2672e68cb8b4SAlexey Kardashevskiy 
2673e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2674e68cb8b4SAlexey Kardashevskiy 
2675e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2676e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2677e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2678e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2679e68cb8b4SAlexey Kardashevskiy         return rc;
2680e68cb8b4SAlexey Kardashevskiy     }
2681e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2682e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2683e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2684e68cb8b4SAlexey Kardashevskiy         return -1;
2685e68cb8b4SAlexey Kardashevskiy     }
2686e68cb8b4SAlexey Kardashevskiy     return 0;
2687e68cb8b4SAlexey Kardashevskiy }
2688e68cb8b4SAlexey Kardashevskiy 
268920d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
26904513d923SGleb Natapov {
26914513d923SGleb Natapov     return true;
26924513d923SGleb Natapov }
2693a1b87fe0SJan Kiszka 
269482169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
269582169660SScott Wood {
269682169660SScott Wood }
2697c65f9a07SGreg Kurz 
26981ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26991ad9f0a4SDavid Gibson {
27001ad9f0a4SDavid Gibson     int fd, rc;
27011ad9f0a4SDavid Gibson     int i;
27027c43bca0SAneesh Kumar K.V 
270314b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
27041ad9f0a4SDavid Gibson 
27051ad9f0a4SDavid Gibson     i = 0;
27061ad9f0a4SDavid Gibson     while (i < n) {
27071ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
27081ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
27091ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
27101ad9f0a4SDavid Gibson 
27111ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
27121ad9f0a4SDavid Gibson         if (rc < 0) {
27131ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
27141ad9f0a4SDavid Gibson         }
27151ad9f0a4SDavid Gibson 
27161ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
27171ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2718a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
27191ad9f0a4SDavid Gibson 
27201ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
27211ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
27221ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
27231ad9f0a4SDavid Gibson             }
27241ad9f0a4SDavid Gibson 
2725a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2726a36593e1SAlexey Kardashevskiy                 valid = n - i;
2727a36593e1SAlexey Kardashevskiy             }
2728a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2729a36593e1SAlexey Kardashevskiy             i += valid;
27301ad9f0a4SDavid Gibson 
27311ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
27321ad9f0a4SDavid Gibson                 invalid = n - i;
27331ad9f0a4SDavid Gibson             }
27341ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2735a36593e1SAlexey Kardashevskiy             i += invalid;
27361ad9f0a4SDavid Gibson 
27371ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
27381ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
27391ad9f0a4SDavid Gibson         }
27401ad9f0a4SDavid Gibson     }
27411ad9f0a4SDavid Gibson 
27421ad9f0a4SDavid Gibson     close(fd);
27431ad9f0a4SDavid Gibson }
27441ad9f0a4SDavid Gibson 
27451ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
27467c43bca0SAneesh Kumar K.V {
27471ad9f0a4SDavid Gibson     int fd, rc;
27481ad9f0a4SDavid Gibson     struct {
27491ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
27501ad9f0a4SDavid Gibson         uint64_t pte0;
27511ad9f0a4SDavid Gibson         uint64_t pte1;
27521ad9f0a4SDavid Gibson     } buf;
2753c1385933SAneesh Kumar K.V 
275414b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2755c1385933SAneesh Kumar K.V 
27561ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
27571ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
27581ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
27591ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
27601ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
27611ad9f0a4SDavid Gibson 
27621ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
27631ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
27641ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2765c1385933SAneesh Kumar K.V     }
27661ad9f0a4SDavid Gibson     close(fd);
2767c1385933SAneesh Kumar K.V }
27689e03a040SFrank Blaschka 
27699e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2770dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
27719e03a040SFrank Blaschka {
27729e03a040SFrank Blaschka     return 0;
27739e03a040SFrank Blaschka }
27741850b6b7SEric Auger 
277538d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
277638d87493SPeter Xu                                 int vector, PCIDevice *dev)
277738d87493SPeter Xu {
277838d87493SPeter Xu     return 0;
277938d87493SPeter Xu }
278038d87493SPeter Xu 
278138d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
278238d87493SPeter Xu {
278338d87493SPeter Xu     return 0;
278438d87493SPeter Xu }
278538d87493SPeter Xu 
27861850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
27871850b6b7SEric Auger {
27881850b6b7SEric Auger     return data & 0xffff;
27891850b6b7SEric Auger }
27904d9392beSThomas Huth 
27914d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27924d9392beSThomas Huth {
27934d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27944d9392beSThomas Huth         return -1;
27954d9392beSThomas Huth     }
27964d9392beSThomas Huth 
27974d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27984d9392beSThomas Huth }
279930f4b05bSDavid Gibson 
280030f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
280130f4b05bSDavid Gibson {
280230f4b05bSDavid Gibson     if (!kvm_enabled()) {
2803b55d295eSDavid Gibson         return; /* No KVM, we're good */
2804b55d295eSDavid Gibson     }
2805b55d295eSDavid Gibson 
2806b55d295eSDavid Gibson     if (cap_resize_hpt) {
2807b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2808b55d295eSDavid Gibson     }
2809b55d295eSDavid Gibson 
2810b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2811b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
281230f4b05bSDavid Gibson         return;
281330f4b05bSDavid Gibson     }
281430f4b05bSDavid Gibson 
281530f4b05bSDavid Gibson     error_setg(errp,
281630f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
281730f4b05bSDavid Gibson }
2818b55d295eSDavid Gibson 
2819b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2820b55d295eSDavid Gibson {
2821b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2822b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2823b55d295eSDavid Gibson         .flags = flags,
2824b55d295eSDavid Gibson         .shift = shift,
2825b55d295eSDavid Gibson     };
2826b55d295eSDavid Gibson 
2827b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2828b55d295eSDavid Gibson         return -ENOSYS;
2829b55d295eSDavid Gibson     }
2830b55d295eSDavid Gibson 
2831b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2832b55d295eSDavid Gibson }
2833b55d295eSDavid Gibson 
2834b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2835b55d295eSDavid Gibson {
2836b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2837b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2838b55d295eSDavid Gibson         .flags = flags,
2839b55d295eSDavid Gibson         .shift = shift,
2840b55d295eSDavid Gibson     };
2841b55d295eSDavid Gibson 
2842b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2843b55d295eSDavid Gibson         return -ENOSYS;
2844b55d295eSDavid Gibson     }
2845b55d295eSDavid Gibson 
2846b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2847b55d295eSDavid Gibson }
2848b55d295eSDavid Gibson 
2849c363a37aSDaniel Henrique Barboza /*
2850c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2851c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2852c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2853c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2854c363a37aSDaniel Henrique Barboza  *
2855c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2856c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2857c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2858c363a37aSDaniel Henrique Barboza  *
2859c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2860c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2861c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2862c363a37aSDaniel Henrique Barboza  * in QEMU level.
2863c363a37aSDaniel Henrique Barboza  */
2864c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2865c363a37aSDaniel Henrique Barboza {
2866c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2867c363a37aSDaniel Henrique Barboza 
2868c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2869c363a37aSDaniel Henrique Barboza         return false;
2870c363a37aSDaniel Henrique Barboza     }
2871c363a37aSDaniel Henrique Barboza 
2872c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2873c363a37aSDaniel Henrique Barboza         return false;
2874c363a37aSDaniel Henrique Barboza     }
2875c363a37aSDaniel Henrique Barboza 
2876c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2877c363a37aSDaniel Henrique Barboza }
2878