xref: /qemu/target/ppc/kvm.c (revision c64abd1f9c732f58181d0a46a0da72168759e77b)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
25072ed5f2SThomas Huth #include "qemu/error-report.h"
2633c11879SPaolo Bonzini #include "cpu.h"
27715d4b96SThomas Huth #include "cpu-models.h"
281de7afc9SPaolo Bonzini #include "qemu/timer.h"
299c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
30b3946626SVincent Palatin #include "sysemu/hw_accel.h"
31d76d1650Saurel32 #include "kvm_ppc.h"
329c17d615SPaolo Bonzini #include "sysemu/cpus.h"
339c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
34d5aea6f3SDavid Gibson #include "mmu-hash64.h"
35d76d1650Saurel32 
36f61b4bedSAlexander Graf #include "hw/sysbus.h"
370d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
42b36f100eSAlexey Kardashevskiy #include "trace.h"
4388365d17SBharat Bhushan #include "exec/gdbstub.h"
444c663752SPaolo Bonzini #include "exec/memattrs.h"
459c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
462d103aaeSMichael Roth #include "sysemu/hostmem.h"
47f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
493b542549SBharata B Rao #if defined(TARGET_PPC64)
503b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
513b542549SBharata B Rao #endif
52f3d9f303SSam Bobroff #include "elf.h"
53*c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
54f61b4bedSAlexander Graf 
55d76d1650Saurel32 //#define DEBUG_KVM
56d76d1650Saurel32 
57d76d1650Saurel32 #ifdef DEBUG_KVM
58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
59d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
60d76d1650Saurel32 #else
61da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
62d76d1650Saurel32     do { } while (0)
63d76d1650Saurel32 #endif
64d76d1650Saurel32 
65eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
66eadaada1SAlexander Graf 
6794a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6894a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6994a8d39aSJan Kiszka };
7094a8d39aSJan Kiszka 
71fc87e185SAlexander Graf static int cap_interrupt_unset = false;
72fc87e185SAlexander Graf static int cap_interrupt_level = false;
7390dc8812SScott Wood static int cap_segstate;
7490dc8812SScott Wood static int cap_booke_sregs;
75e97c3636SDavid Gibson static int cap_ppc_smt;
76354ac20aSDavid Gibson static int cap_ppc_rma;
770f5cb298SDavid Gibson static int cap_spapr_tce;
78d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
79da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
809bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
81f1af19d7SDavid Gibson static int cap_hior;
82d67d40eaSDavid Gibson static int cap_one_reg;
833b961124SStuart Yoder static int cap_epr;
8431f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
859b00ea49SDavid Gibson static int cap_papr;
86e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8787a91de6SAlexander Graf static int cap_fixup_hcalls;
88bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
89fc87e185SAlexander Graf 
903c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
913c902d44SBharat Bhushan 
92c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
93c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
94c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
95c821c2bdSAlexander Graf  *     still an interrupt pending.
96c6a94ba5SAlexander Graf  *
97c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
98c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
99c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
100c6a94ba5SAlexander Graf  */
101c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
102c6a94ba5SAlexander Graf 
103d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
104c6a94ba5SAlexander Graf {
105d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
106d5a68146SAndreas Färber 
107c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
108c6a94ba5SAlexander Graf }
109c6a94ba5SAlexander Graf 
11096c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11196c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11296c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11396c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11496c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
11596c9cff0SThomas Huth {
11696c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
11796c9cff0SThomas Huth     return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
11896c9cff0SThomas Huth }
11996c9cff0SThomas Huth 
1205ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1215ba4576bSAndreas Färber 
122b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
123d76d1650Saurel32 {
124fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
125fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
12690dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
12790dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
128e97c3636SDavid Gibson     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
129354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1300f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
131d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
132da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1339bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
134d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
135f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1363b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
13731f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1389b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1399b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
140e68cb8b4SAlexey Kardashevskiy     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14187a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
142bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
143fc87e185SAlexander Graf 
144fc87e185SAlexander Graf     if (!cap_interrupt_level) {
145fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
146fc87e185SAlexander Graf                         "VM to stall at times!\n");
147fc87e185SAlexander Graf     }
148fc87e185SAlexander Graf 
1495ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1505ba4576bSAndreas Färber 
151d76d1650Saurel32     return 0;
152d76d1650Saurel32 }
153d76d1650Saurel32 
154d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
155d525ffabSPaolo Bonzini {
156d525ffabSPaolo Bonzini     return 0;
157d525ffabSPaolo Bonzini }
158d525ffabSPaolo Bonzini 
1591bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
160d76d1650Saurel32 {
1611bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1621bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
163861bbc80SAlexander Graf     struct kvm_sregs sregs;
1645666ca4aSScott Wood     int ret;
1655666ca4aSScott Wood 
1665666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
16764e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
16864e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
16964e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
17064e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1715666ca4aSScott Wood         return 0;
1725666ca4aSScott Wood     } else {
17390dc8812SScott Wood         if (!cap_segstate) {
17464e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
17564e07be5SAlexander Graf             return -ENOSYS;
1765666ca4aSScott Wood         }
1775666ca4aSScott Wood     }
1785666ca4aSScott Wood 
1791bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1805666ca4aSScott Wood     if (ret) {
1815666ca4aSScott Wood         return ret;
1825666ca4aSScott Wood     }
183861bbc80SAlexander Graf 
184861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
1851bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
1865666ca4aSScott Wood }
1875666ca4aSScott Wood 
18893dd5e85SScott Wood /* Set up a shared TLB array with KVM */
1891bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
19093dd5e85SScott Wood {
1911bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
1921bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
19393dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
19493dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
19593dd5e85SScott Wood     unsigned int entries = 0;
19693dd5e85SScott Wood     int ret, i;
19793dd5e85SScott Wood 
19893dd5e85SScott Wood     if (!kvm_enabled() ||
199a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
20093dd5e85SScott Wood         return 0;
20193dd5e85SScott Wood     }
20293dd5e85SScott Wood 
20393dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
20493dd5e85SScott Wood 
20593dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
20693dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
20793dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
20893dd5e85SScott Wood         entries += params.tlb_sizes[i];
20993dd5e85SScott Wood     }
21093dd5e85SScott Wood 
21193dd5e85SScott Wood     assert(entries == env->nb_tlb);
21293dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
21393dd5e85SScott Wood 
21493dd5e85SScott Wood     env->tlb_dirty = true;
21593dd5e85SScott Wood 
21693dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
21793dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
21893dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
21993dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
22093dd5e85SScott Wood 
22148add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
22293dd5e85SScott Wood     if (ret < 0) {
22393dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
22493dd5e85SScott Wood                 __func__, strerror(-ret));
22593dd5e85SScott Wood         return ret;
22693dd5e85SScott Wood     }
22793dd5e85SScott Wood 
22893dd5e85SScott Wood     env->kvm_sw_tlb = true;
22993dd5e85SScott Wood     return 0;
23093dd5e85SScott Wood }
23193dd5e85SScott Wood 
2324656e1f0SBenjamin Herrenschmidt 
2334656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
234a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2354656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2364656e1f0SBenjamin Herrenschmidt {
237a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
238a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
239a60f24b5SAndreas Färber 
2404656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2414656e1f0SBenjamin Herrenschmidt 
2424656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2434656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2444656e1f0SBenjamin Herrenschmidt      *
2454656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2464656e1f0SBenjamin Herrenschmidt      *
24796c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
24896c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
24996c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2504656e1f0SBenjamin Herrenschmidt      *
2514656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2524656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2534656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2544656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2554656e1f0SBenjamin Herrenschmidt      *
2564656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2574656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2584656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2594656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2604656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2614656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2624656e1f0SBenjamin Herrenschmidt      *   this fallback.
2634656e1f0SBenjamin Herrenschmidt      */
26496c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2654656e1f0SBenjamin Herrenschmidt         /* No flags */
2664656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2674656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2684656e1f0SBenjamin Herrenschmidt 
2694656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2704656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2714656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2724656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2734656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2744656e1f0SBenjamin Herrenschmidt 
2754656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2764656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2774656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2784656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2794656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2804656e1f0SBenjamin Herrenschmidt     } else {
2814656e1f0SBenjamin Herrenschmidt         int i = 0;
2824656e1f0SBenjamin Herrenschmidt 
2834656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
2844656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2854656e1f0SBenjamin Herrenschmidt 
2864656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
2874656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
2884656e1f0SBenjamin Herrenschmidt         }
2894656e1f0SBenjamin Herrenschmidt 
290ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
291ec975e83SSam Bobroff            POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
2924656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
2934656e1f0SBenjamin Herrenschmidt         } else {
2944656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
2954656e1f0SBenjamin Herrenschmidt         }
2964656e1f0SBenjamin Herrenschmidt 
2974656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2984656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
2994656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3004656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3014656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3024656e1f0SBenjamin Herrenschmidt         i++;
3034656e1f0SBenjamin Herrenschmidt 
304aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
305ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
306ec975e83SSam Bobroff             POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3074656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3084656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3094656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3104656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3114656e1f0SBenjamin Herrenschmidt             i++;
3124656e1f0SBenjamin Herrenschmidt         }
3134656e1f0SBenjamin Herrenschmidt 
3144656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3154656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3164656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3174656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3184656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3194656e1f0SBenjamin Herrenschmidt     }
3204656e1f0SBenjamin Herrenschmidt }
3214656e1f0SBenjamin Herrenschmidt 
322a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3234656e1f0SBenjamin Herrenschmidt {
324a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3254656e1f0SBenjamin Herrenschmidt     int ret;
3264656e1f0SBenjamin Herrenschmidt 
327a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
328a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3294656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3304656e1f0SBenjamin Herrenschmidt             return;
3314656e1f0SBenjamin Herrenschmidt         }
3324656e1f0SBenjamin Herrenschmidt     }
3334656e1f0SBenjamin Herrenschmidt 
334a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3354656e1f0SBenjamin Herrenschmidt }
3364656e1f0SBenjamin Herrenschmidt 
337*c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
338*c64abd1fSSam Bobroff {
339*c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
340*c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
341*c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
342*c64abd1fSSam Bobroff     int i;
343*c64abd1fSSam Bobroff 
344*c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
345*c64abd1fSSam Bobroff         return NULL;
346*c64abd1fSSam Bobroff     }
347*c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
348*c64abd1fSSam Bobroff         return NULL;
349*c64abd1fSSam Bobroff     }
350*c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
351*c64abd1fSSam Bobroff     radix_page_info->count = 0;
352*c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
353*c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
354*c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
355*c64abd1fSSam Bobroff             radix_page_info->count++;
356*c64abd1fSSam Bobroff         }
357*c64abd1fSSam Bobroff     }
358*c64abd1fSSam Bobroff     return radix_page_info;
359*c64abd1fSSam Bobroff }
360*c64abd1fSSam Bobroff 
3614656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
3624656e1f0SBenjamin Herrenschmidt {
3634656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
3644656e1f0SBenjamin Herrenschmidt         return true;
3654656e1f0SBenjamin Herrenschmidt     }
3664656e1f0SBenjamin Herrenschmidt 
3674656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
3684656e1f0SBenjamin Herrenschmidt }
3694656e1f0SBenjamin Herrenschmidt 
370df587133SThomas Huth static long max_cpu_page_size;
371df587133SThomas Huth 
372a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
3734656e1f0SBenjamin Herrenschmidt {
3744656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
3754656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
376a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
3774656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
3780d594f55SThomas Huth     bool has_64k_pages = false;
3794656e1f0SBenjamin Herrenschmidt 
3804656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
3814656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
3824656e1f0SBenjamin Herrenschmidt         return;
3834656e1f0SBenjamin Herrenschmidt     }
3844656e1f0SBenjamin Herrenschmidt 
3854656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
3864656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
387a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
3884656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
3894656e1f0SBenjamin Herrenschmidt     }
3904656e1f0SBenjamin Herrenschmidt 
391df587133SThomas Huth     if (!max_cpu_page_size) {
3929c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
393df587133SThomas Huth     }
3944656e1f0SBenjamin Herrenschmidt 
3954656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
3964656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
3974656e1f0SBenjamin Herrenschmidt 
39890da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
39990da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
40090da0d5aSBenjamin Herrenschmidt      */
40190da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
40290da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
40390da0d5aSBenjamin Herrenschmidt     }
40490da0d5aSBenjamin Herrenschmidt 
40508215d8fSAlexander Graf     /*
40608215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
40708215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
40808215d8fSAlexander Graf      */
4094656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4104656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4114656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4124656e1f0SBenjamin Herrenschmidt 
413df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4144656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4154656e1f0SBenjamin Herrenschmidt             continue;
4164656e1f0SBenjamin Herrenschmidt         }
4174656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4184656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4194656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
420df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4214656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4224656e1f0SBenjamin Herrenschmidt                 continue;
4234656e1f0SBenjamin Herrenschmidt             }
4240d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4250d594f55SThomas Huth                 has_64k_pages = true;
4260d594f55SThomas Huth             }
4274656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4284656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4294656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4304656e1f0SBenjamin Herrenschmidt                 break;
4314656e1f0SBenjamin Herrenschmidt             }
4324656e1f0SBenjamin Herrenschmidt         }
4334656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4344656e1f0SBenjamin Herrenschmidt             break;
4354656e1f0SBenjamin Herrenschmidt         }
4364656e1f0SBenjamin Herrenschmidt     }
4374656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
43808215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
4394656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
4404656e1f0SBenjamin Herrenschmidt     }
4410d594f55SThomas Huth     if (!has_64k_pages) {
4420d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
4430d594f55SThomas Huth     }
4444656e1f0SBenjamin Herrenschmidt }
445df587133SThomas Huth 
446df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
447df587133SThomas Huth {
448df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
449df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
450df587133SThomas Huth     long pagesize;
451df587133SThomas Huth 
452df587133SThomas Huth     if (mempath) {
4539c607668SAlexey Kardashevskiy         pagesize = qemu_mempath_getpagesize(mempath);
454df587133SThomas Huth     } else {
455df587133SThomas Huth         pagesize = getpagesize();
456df587133SThomas Huth     }
457df587133SThomas Huth 
458df587133SThomas Huth     return pagesize >= max_cpu_page_size;
459df587133SThomas Huth }
460df587133SThomas Huth 
4614656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
4624656e1f0SBenjamin Herrenschmidt 
463a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4644656e1f0SBenjamin Herrenschmidt {
4654656e1f0SBenjamin Herrenschmidt }
4664656e1f0SBenjamin Herrenschmidt 
467df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path)
468df587133SThomas Huth {
469df587133SThomas Huth     return true;
470df587133SThomas Huth }
471df587133SThomas Huth 
4724656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
4734656e1f0SBenjamin Herrenschmidt 
474b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
475b164e48eSEduardo Habkost {
4760f20ba62SAlexey Kardashevskiy     return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
477b164e48eSEduardo Habkost }
478b164e48eSEduardo Habkost 
47988365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
48088365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
48188365d17SBharat Bhushan  * of 4 is sufficient for now.
48288365d17SBharat Bhushan  */
48388365d17SBharat Bhushan #define MAX_HW_BKPTS 4
48488365d17SBharat Bhushan 
48588365d17SBharat Bhushan static struct HWBreakpoint {
48688365d17SBharat Bhushan     target_ulong addr;
48788365d17SBharat Bhushan     int type;
48888365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
48988365d17SBharat Bhushan 
49088365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
49188365d17SBharat Bhushan 
49288365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
49388365d17SBharat Bhushan static int max_hw_breakpoint;
49488365d17SBharat Bhushan static int max_hw_watchpoint;
49588365d17SBharat Bhushan static int nb_hw_breakpoint;
49688365d17SBharat Bhushan static int nb_hw_watchpoint;
49788365d17SBharat Bhushan 
49888365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
49988365d17SBharat Bhushan {
50088365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
50188365d17SBharat Bhushan         max_hw_breakpoint = 2;
50288365d17SBharat Bhushan         max_hw_watchpoint = 2;
50388365d17SBharat Bhushan     }
50488365d17SBharat Bhushan 
50588365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
50688365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
50788365d17SBharat Bhushan         return;
50888365d17SBharat Bhushan     }
50988365d17SBharat Bhushan }
51088365d17SBharat Bhushan 
51120d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5125666ca4aSScott Wood {
51320d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
51420d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5155666ca4aSScott Wood     int ret;
5165666ca4aSScott Wood 
5174656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
518a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5194656e1f0SBenjamin Herrenschmidt 
5204656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5211bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5225666ca4aSScott Wood     if (ret) {
523388e47c7SThomas Huth         if (ret == -EINVAL) {
524388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
525388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
526388e47c7SThomas Huth         }
5275666ca4aSScott Wood         return ret;
5285666ca4aSScott Wood     }
529861bbc80SAlexander Graf 
530bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
531c821c2bdSAlexander Graf 
53293dd5e85SScott Wood     switch (cenv->mmu_model) {
53393dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5347f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5351bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
53693dd5e85SScott Wood         break;
5377f516c96SThomas Huth     case POWERPC_MMU_2_07:
5387f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5397f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
540f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
541f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
542f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5437f516c96SThomas Huth                 cap_htm = true;
5447f516c96SThomas Huth             }
545f3d9f303SSam Bobroff         }
5467f516c96SThomas Huth         break;
54793dd5e85SScott Wood     default:
54893dd5e85SScott Wood         break;
54993dd5e85SScott Wood     }
55093dd5e85SScott Wood 
5513c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
55288365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5533c902d44SBharat Bhushan 
554861bbc80SAlexander Graf     return ret;
555d76d1650Saurel32 }
556d76d1650Saurel32 
5571bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
55893dd5e85SScott Wood {
5591bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5601bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
56193dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
56293dd5e85SScott Wood     unsigned char *bitmap;
56393dd5e85SScott Wood     int ret;
56493dd5e85SScott Wood 
56593dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
56693dd5e85SScott Wood         return;
56793dd5e85SScott Wood     }
56893dd5e85SScott Wood 
56993dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
57093dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
57193dd5e85SScott Wood 
57293dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
57393dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
57493dd5e85SScott Wood 
5751bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
57693dd5e85SScott Wood     if (ret) {
57793dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
57893dd5e85SScott Wood                 __func__, strerror(-ret));
57993dd5e85SScott Wood     }
58093dd5e85SScott Wood 
58193dd5e85SScott Wood     g_free(bitmap);
58293dd5e85SScott Wood }
58393dd5e85SScott Wood 
584d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
585d67d40eaSDavid Gibson {
586d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
587d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
588d67d40eaSDavid Gibson     union {
589d67d40eaSDavid Gibson         uint32_t u32;
590d67d40eaSDavid Gibson         uint64_t u64;
591d67d40eaSDavid Gibson     } val;
592d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
593d67d40eaSDavid Gibson         .id = id,
594d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
595d67d40eaSDavid Gibson     };
596d67d40eaSDavid Gibson     int ret;
597d67d40eaSDavid Gibson 
598d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
599d67d40eaSDavid Gibson     if (ret != 0) {
600b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
601d67d40eaSDavid Gibson     } else {
602d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
603d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
604d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
605d67d40eaSDavid Gibson             break;
606d67d40eaSDavid Gibson 
607d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
608d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
609d67d40eaSDavid Gibson             break;
610d67d40eaSDavid Gibson 
611d67d40eaSDavid Gibson         default:
612d67d40eaSDavid Gibson             /* Don't handle this size yet */
613d67d40eaSDavid Gibson             abort();
614d67d40eaSDavid Gibson         }
615d67d40eaSDavid Gibson     }
616d67d40eaSDavid Gibson }
617d67d40eaSDavid Gibson 
618d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
619d67d40eaSDavid Gibson {
620d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
621d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
622d67d40eaSDavid Gibson     union {
623d67d40eaSDavid Gibson         uint32_t u32;
624d67d40eaSDavid Gibson         uint64_t u64;
625d67d40eaSDavid Gibson     } val;
626d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
627d67d40eaSDavid Gibson         .id = id,
628d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
629d67d40eaSDavid Gibson     };
630d67d40eaSDavid Gibson     int ret;
631d67d40eaSDavid Gibson 
632d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
633d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
634d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
635d67d40eaSDavid Gibson         break;
636d67d40eaSDavid Gibson 
637d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
638d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
639d67d40eaSDavid Gibson         break;
640d67d40eaSDavid Gibson 
641d67d40eaSDavid Gibson     default:
642d67d40eaSDavid Gibson         /* Don't handle this size yet */
643d67d40eaSDavid Gibson         abort();
644d67d40eaSDavid Gibson     }
645d67d40eaSDavid Gibson 
646d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
647d67d40eaSDavid Gibson     if (ret != 0) {
648b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
649d67d40eaSDavid Gibson     }
650d67d40eaSDavid Gibson }
651d67d40eaSDavid Gibson 
65270b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
65370b79849SDavid Gibson {
65470b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
65570b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
65670b79849SDavid Gibson     struct kvm_one_reg reg;
65770b79849SDavid Gibson     int i;
65870b79849SDavid Gibson     int ret;
65970b79849SDavid Gibson 
66070b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
66170b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
66270b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
66370b79849SDavid Gibson 
66470b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
66570b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
66670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66770b79849SDavid Gibson         if (ret < 0) {
668da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
66970b79849SDavid Gibson             return ret;
67070b79849SDavid Gibson         }
67170b79849SDavid Gibson 
67270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
67370b79849SDavid Gibson             uint64_t vsr[2];
67470b79849SDavid Gibson 
6753a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
67670b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
67770b79849SDavid Gibson             vsr[1] = env->vsr[i];
6783a4b791bSGreg Kurz #else
6793a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
6803a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
6813a4b791bSGreg Kurz #endif
68270b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
68370b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
68470b79849SDavid Gibson 
68570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
68670b79849SDavid Gibson             if (ret < 0) {
687da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
68870b79849SDavid Gibson                         i, strerror(errno));
68970b79849SDavid Gibson                 return ret;
69070b79849SDavid Gibson             }
69170b79849SDavid Gibson         }
69270b79849SDavid Gibson     }
69370b79849SDavid Gibson 
69470b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
69570b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
69670b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
69770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
69870b79849SDavid Gibson         if (ret < 0) {
699da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
70070b79849SDavid Gibson             return ret;
70170b79849SDavid Gibson         }
70270b79849SDavid Gibson 
70370b79849SDavid Gibson         for (i = 0; i < 32; i++) {
70470b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
70570b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
70670b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
70770b79849SDavid Gibson             if (ret < 0) {
708da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
70970b79849SDavid Gibson                 return ret;
71070b79849SDavid Gibson             }
71170b79849SDavid Gibson         }
71270b79849SDavid Gibson     }
71370b79849SDavid Gibson 
71470b79849SDavid Gibson     return 0;
71570b79849SDavid Gibson }
71670b79849SDavid Gibson 
71770b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
71870b79849SDavid Gibson {
71970b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
72070b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
72170b79849SDavid Gibson     struct kvm_one_reg reg;
72270b79849SDavid Gibson     int i;
72370b79849SDavid Gibson     int ret;
72470b79849SDavid Gibson 
72570b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
72670b79849SDavid Gibson         uint64_t fpscr;
72770b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
72870b79849SDavid Gibson 
72970b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
73070b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
73170b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
73270b79849SDavid Gibson         if (ret < 0) {
733da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
73470b79849SDavid Gibson             return ret;
73570b79849SDavid Gibson         } else {
73670b79849SDavid Gibson             env->fpscr = fpscr;
73770b79849SDavid Gibson         }
73870b79849SDavid Gibson 
73970b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74070b79849SDavid Gibson             uint64_t vsr[2];
74170b79849SDavid Gibson 
74270b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
74370b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
74470b79849SDavid Gibson 
74570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
74670b79849SDavid Gibson             if (ret < 0) {
747da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
74870b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
74970b79849SDavid Gibson                 return ret;
75070b79849SDavid Gibson             } else {
7513a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
75270b79849SDavid Gibson                 env->fpr[i] = vsr[0];
75370b79849SDavid Gibson                 if (vsx) {
75470b79849SDavid Gibson                     env->vsr[i] = vsr[1];
75570b79849SDavid Gibson                 }
7563a4b791bSGreg Kurz #else
7573a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
7583a4b791bSGreg Kurz                 if (vsx) {
7593a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
7603a4b791bSGreg Kurz                 }
7613a4b791bSGreg Kurz #endif
76270b79849SDavid Gibson             }
76370b79849SDavid Gibson         }
76470b79849SDavid Gibson     }
76570b79849SDavid Gibson 
76670b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
76770b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
76870b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
76970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
77070b79849SDavid Gibson         if (ret < 0) {
771da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
77270b79849SDavid Gibson             return ret;
77370b79849SDavid Gibson         }
77470b79849SDavid Gibson 
77570b79849SDavid Gibson         for (i = 0; i < 32; i++) {
77670b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
77770b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
77870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
77970b79849SDavid Gibson             if (ret < 0) {
780da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
78170b79849SDavid Gibson                         i, strerror(errno));
78270b79849SDavid Gibson                 return ret;
78370b79849SDavid Gibson             }
78470b79849SDavid Gibson         }
78570b79849SDavid Gibson     }
78670b79849SDavid Gibson 
78770b79849SDavid Gibson     return 0;
78870b79849SDavid Gibson }
78970b79849SDavid Gibson 
7909b00ea49SDavid Gibson #if defined(TARGET_PPC64)
7919b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
7929b00ea49SDavid Gibson {
7939b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
7949b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
7959b00ea49SDavid Gibson     struct kvm_one_reg reg;
7969b00ea49SDavid Gibson     int ret;
7979b00ea49SDavid Gibson 
7989b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
7999b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8009b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8019b00ea49SDavid Gibson     if (ret < 0) {
802da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8039b00ea49SDavid Gibson         return ret;
8049b00ea49SDavid Gibson     }
8059b00ea49SDavid Gibson 
8069b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8079b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8089b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8099b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8109b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8119b00ea49SDavid Gibson     if (ret < 0) {
812da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8139b00ea49SDavid Gibson                 strerror(errno));
8149b00ea49SDavid Gibson         return ret;
8159b00ea49SDavid Gibson     }
8169b00ea49SDavid Gibson 
8179b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8189b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8199b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8209b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8219b00ea49SDavid Gibson     if (ret < 0) {
822da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8239b00ea49SDavid Gibson                 strerror(errno));
8249b00ea49SDavid Gibson         return ret;
8259b00ea49SDavid Gibson     }
8269b00ea49SDavid Gibson 
8279b00ea49SDavid Gibson     return 0;
8289b00ea49SDavid Gibson }
8299b00ea49SDavid Gibson 
8309b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8319b00ea49SDavid Gibson {
8329b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8339b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8349b00ea49SDavid Gibson     struct kvm_one_reg reg;
8359b00ea49SDavid Gibson     int ret;
8369b00ea49SDavid Gibson 
8379b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8389b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8399b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8409b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8419b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8429b00ea49SDavid Gibson 
8439b00ea49SDavid Gibson     if (env->vpa_addr) {
8449b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8459b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8469b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8479b00ea49SDavid Gibson         if (ret < 0) {
848da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8499b00ea49SDavid Gibson             return ret;
8509b00ea49SDavid Gibson         }
8519b00ea49SDavid Gibson     }
8529b00ea49SDavid Gibson 
8539b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8549b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8559b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8569b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8579b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8589b00ea49SDavid Gibson     if (ret < 0) {
859da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8609b00ea49SDavid Gibson         return ret;
8619b00ea49SDavid Gibson     }
8629b00ea49SDavid Gibson 
8639b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8649b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8659b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8669b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8679b00ea49SDavid Gibson     if (ret < 0) {
868da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
8699b00ea49SDavid Gibson                 strerror(errno));
8709b00ea49SDavid Gibson         return ret;
8719b00ea49SDavid Gibson     }
8729b00ea49SDavid Gibson 
8739b00ea49SDavid Gibson     if (!env->vpa_addr) {
8749b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8759b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8769b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8779b00ea49SDavid Gibson         if (ret < 0) {
878da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8799b00ea49SDavid Gibson             return ret;
8809b00ea49SDavid Gibson         }
8819b00ea49SDavid Gibson     }
8829b00ea49SDavid Gibson 
8839b00ea49SDavid Gibson     return 0;
8849b00ea49SDavid Gibson }
8859b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
8869b00ea49SDavid Gibson 
887e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
888a7a00a72SDavid Gibson {
889a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
890a7a00a72SDavid Gibson     struct kvm_sregs sregs;
891a7a00a72SDavid Gibson     int i;
892a7a00a72SDavid Gibson 
893a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
894a7a00a72SDavid Gibson 
895a7a00a72SDavid Gibson     sregs.u.s.sdr1 = env->spr[SPR_SDR1];
896a7a00a72SDavid Gibson 
897a7a00a72SDavid Gibson     /* Sync SLB */
898a7a00a72SDavid Gibson #ifdef TARGET_PPC64
899a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
900a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
901a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
902a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
903a7a00a72SDavid Gibson         }
904a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
905a7a00a72SDavid Gibson     }
906a7a00a72SDavid Gibson #endif
907a7a00a72SDavid Gibson 
908a7a00a72SDavid Gibson     /* Sync SRs */
909a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
910a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
911a7a00a72SDavid Gibson     }
912a7a00a72SDavid Gibson 
913a7a00a72SDavid Gibson     /* Sync BATs */
914a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
915a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
916a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
917a7a00a72SDavid Gibson             | env->DBAT[1][i];
918a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
919a7a00a72SDavid Gibson             | env->IBAT[1][i];
920a7a00a72SDavid Gibson     }
921a7a00a72SDavid Gibson 
922a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
923a7a00a72SDavid Gibson }
924a7a00a72SDavid Gibson 
92520d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
926d76d1650Saurel32 {
92720d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
92820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
929d76d1650Saurel32     struct kvm_regs regs;
930d76d1650Saurel32     int ret;
931d76d1650Saurel32     int i;
932d76d1650Saurel32 
9331bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9341bc22652SAndreas Färber     if (ret < 0) {
935d76d1650Saurel32         return ret;
9361bc22652SAndreas Färber     }
937d76d1650Saurel32 
938d76d1650Saurel32     regs.ctr = env->ctr;
939d76d1650Saurel32     regs.lr  = env->lr;
940da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
941d76d1650Saurel32     regs.msr = env->msr;
942d76d1650Saurel32     regs.pc = env->nip;
943d76d1650Saurel32 
944d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
945d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
946d76d1650Saurel32 
947d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
948d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
949d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
950d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
951d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
952d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
953d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
954d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
955d76d1650Saurel32 
95690dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
95790dc8812SScott Wood 
958d76d1650Saurel32     for (i = 0;i < 32; i++)
959d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
960d76d1650Saurel32 
9614bddaf55SAlexey Kardashevskiy     regs.cr = 0;
9624bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
9634bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
9644bddaf55SAlexey Kardashevskiy     }
9654bddaf55SAlexey Kardashevskiy 
9661bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
967d76d1650Saurel32     if (ret < 0)
968d76d1650Saurel32         return ret;
969d76d1650Saurel32 
97070b79849SDavid Gibson     kvm_put_fp(cs);
97170b79849SDavid Gibson 
97293dd5e85SScott Wood     if (env->tlb_dirty) {
9731bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
97493dd5e85SScott Wood         env->tlb_dirty = false;
97593dd5e85SScott Wood     }
97693dd5e85SScott Wood 
977f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
978a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
979a7a00a72SDavid Gibson         if (ret < 0) {
980f1af19d7SDavid Gibson             return ret;
981f1af19d7SDavid Gibson         }
982f1af19d7SDavid Gibson     }
983f1af19d7SDavid Gibson 
984f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
985d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
986d67d40eaSDavid Gibson     }
987f1af19d7SDavid Gibson 
988d67d40eaSDavid Gibson     if (cap_one_reg) {
989d67d40eaSDavid Gibson         int i;
990d67d40eaSDavid Gibson 
991d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
992d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
993d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
994d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
995d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
996d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
997d67d40eaSDavid Gibson 
998d67d40eaSDavid Gibson             if (id != 0) {
999d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1000d67d40eaSDavid Gibson             }
1001f1af19d7SDavid Gibson         }
10029b00ea49SDavid Gibson 
10039b00ea49SDavid Gibson #ifdef TARGET_PPC64
100480b3f79bSAlexey Kardashevskiy         if (msr_ts) {
100580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
100680b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
100780b3f79bSAlexey Kardashevskiy             }
100880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
100980b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
101080b3f79bSAlexey Kardashevskiy             }
101180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
101280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
101380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
101480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
101580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
101680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
101780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
101880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
101980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
102080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
102180b3f79bSAlexey Kardashevskiy         }
102280b3f79bSAlexey Kardashevskiy 
10239b00ea49SDavid Gibson         if (cap_papr) {
10249b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1025da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10269b00ea49SDavid Gibson             }
10279b00ea49SDavid Gibson         }
102898a8b524SAlexey Kardashevskiy 
102998a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10309b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1031f1af19d7SDavid Gibson     }
1032f1af19d7SDavid Gibson 
1033d76d1650Saurel32     return ret;
1034d76d1650Saurel32 }
1035d76d1650Saurel32 
1036c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1037c371c2e3SBharat Bhushan {
1038c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1039c371c2e3SBharat Bhushan }
1040c371c2e3SBharat Bhushan 
1041a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1042d76d1650Saurel32 {
104320d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1044ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1045a7a00a72SDavid Gibson     int ret;
1046d76d1650Saurel32 
1047a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
104890dc8812SScott Wood     if (ret < 0) {
104990dc8812SScott Wood         return ret;
105090dc8812SScott Wood     }
105190dc8812SScott Wood 
105290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
105390dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
105490dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
105590dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
105690dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
105790dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
105890dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
105990dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
106090dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
106190dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
106290dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
106390dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
106490dc8812SScott Wood     }
106590dc8812SScott Wood 
106690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
106790dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
106890dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
106990dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
107090dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
107190dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
107290dc8812SScott Wood     }
107390dc8812SScott Wood 
107490dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
107590dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
107690dc8812SScott Wood     }
107790dc8812SScott Wood 
107890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
107990dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
108090dc8812SScott Wood     }
108190dc8812SScott Wood 
108290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
108390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1084c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
108590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1086c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
108790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1088c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
108990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1090c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
109190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1092c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
109390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1094c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
109590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1096c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
109790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1098c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
109990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1100c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
110190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1102c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
110390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1104c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
110590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1106c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
110790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1108c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
110990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1110c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
111190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1112c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
111390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1114c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
111590dc8812SScott Wood 
111690dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
111790dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1118c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
111990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1120c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
112190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1122c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
112390dc8812SScott Wood         }
112490dc8812SScott Wood 
112590dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
112690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1127c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
112890dc8812SScott Wood         }
112990dc8812SScott Wood 
113090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
113190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1132c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
113390dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1134c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
113590dc8812SScott Wood         }
113690dc8812SScott Wood     }
113790dc8812SScott Wood 
113890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
113990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
114090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
114190dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
114290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
114390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
114490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
114590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
114690dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
114790dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
114890dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
114990dc8812SScott Wood     }
115090dc8812SScott Wood 
115190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
115290dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
115390dc8812SScott Wood     }
115490dc8812SScott Wood 
115590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
115690dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
115790dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
115890dc8812SScott Wood     }
115990dc8812SScott Wood 
116090dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
116190dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
116290dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
116390dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
116490dc8812SScott Wood 
116590dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
116690dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
116790dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
116890dc8812SScott Wood         }
116990dc8812SScott Wood     }
1170a7a00a72SDavid Gibson 
1171a7a00a72SDavid Gibson     return 0;
1172fafc0b6aSAlexander Graf }
117390dc8812SScott Wood 
1174a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1175a7a00a72SDavid Gibson {
1176a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1177a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1178a7a00a72SDavid Gibson     int ret;
1179a7a00a72SDavid Gibson     int i;
1180a7a00a72SDavid Gibson 
1181a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
118290dc8812SScott Wood     if (ret < 0) {
118390dc8812SScott Wood         return ret;
118490dc8812SScott Wood     }
118590dc8812SScott Wood 
1186e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1187bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1188f3c75d42SAneesh Kumar K.V     }
1189ba5e5090SAlexander Graf 
1190ba5e5090SAlexander Graf     /* Sync SLB */
119182c09f2fSAlexander Graf #ifdef TARGET_PPC64
11924b4d4a21SAneesh Kumar K.V     /*
11934b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1194a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1195a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1196a7a00a72SDavid Gibson      * in.
11974b4d4a21SAneesh Kumar K.V      */
11984b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1199d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12004b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12014b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12024b4d4a21SAneesh Kumar K.V         /*
12034b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12044b4d4a21SAneesh Kumar K.V          */
12054b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1206bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12074b4d4a21SAneesh Kumar K.V         }
1208ba5e5090SAlexander Graf     }
120982c09f2fSAlexander Graf #endif
1210ba5e5090SAlexander Graf 
1211ba5e5090SAlexander Graf     /* Sync SRs */
1212ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1213ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1214ba5e5090SAlexander Graf     }
1215ba5e5090SAlexander Graf 
1216ba5e5090SAlexander Graf     /* Sync BATs */
1217ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1218ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1219ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1220ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1221ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1222ba5e5090SAlexander Graf     }
1223a7a00a72SDavid Gibson 
1224a7a00a72SDavid Gibson     return 0;
1225a7a00a72SDavid Gibson }
1226a7a00a72SDavid Gibson 
1227a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1228a7a00a72SDavid Gibson {
1229a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1230a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1231a7a00a72SDavid Gibson     struct kvm_regs regs;
1232a7a00a72SDavid Gibson     uint32_t cr;
1233a7a00a72SDavid Gibson     int i, ret;
1234a7a00a72SDavid Gibson 
1235a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1236a7a00a72SDavid Gibson     if (ret < 0)
1237a7a00a72SDavid Gibson         return ret;
1238a7a00a72SDavid Gibson 
1239a7a00a72SDavid Gibson     cr = regs.cr;
1240a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1241a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1242a7a00a72SDavid Gibson         cr >>= 4;
1243a7a00a72SDavid Gibson     }
1244a7a00a72SDavid Gibson 
1245a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1246a7a00a72SDavid Gibson     env->lr = regs.lr;
1247a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1248a7a00a72SDavid Gibson     env->msr = regs.msr;
1249a7a00a72SDavid Gibson     env->nip = regs.pc;
1250a7a00a72SDavid Gibson 
1251a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1252a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1253a7a00a72SDavid Gibson 
1254a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1255a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1256a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1257a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1258a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1259a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1260a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1261a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1262a7a00a72SDavid Gibson 
1263a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1264a7a00a72SDavid Gibson 
1265a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1266a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1267a7a00a72SDavid Gibson 
1268a7a00a72SDavid Gibson     kvm_get_fp(cs);
1269a7a00a72SDavid Gibson 
1270a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1271a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1272a7a00a72SDavid Gibson         if (ret < 0) {
1273a7a00a72SDavid Gibson             return ret;
1274a7a00a72SDavid Gibson         }
1275a7a00a72SDavid Gibson     }
1276a7a00a72SDavid Gibson 
1277a7a00a72SDavid Gibson     if (cap_segstate) {
1278a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1279a7a00a72SDavid Gibson         if (ret < 0) {
1280a7a00a72SDavid Gibson             return ret;
1281a7a00a72SDavid Gibson         }
1282fafc0b6aSAlexander Graf     }
1283ba5e5090SAlexander Graf 
1284d67d40eaSDavid Gibson     if (cap_hior) {
1285d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1286d67d40eaSDavid Gibson     }
1287d67d40eaSDavid Gibson 
1288d67d40eaSDavid Gibson     if (cap_one_reg) {
1289d67d40eaSDavid Gibson         int i;
1290d67d40eaSDavid Gibson 
1291d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1292d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1293d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1294d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1295d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1296d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1297d67d40eaSDavid Gibson 
1298d67d40eaSDavid Gibson             if (id != 0) {
1299d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1300d67d40eaSDavid Gibson             }
1301d67d40eaSDavid Gibson         }
13029b00ea49SDavid Gibson 
13039b00ea49SDavid Gibson #ifdef TARGET_PPC64
130480b3f79bSAlexey Kardashevskiy         if (msr_ts) {
130580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
130680b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
130780b3f79bSAlexey Kardashevskiy             }
130880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
130980b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
131080b3f79bSAlexey Kardashevskiy             }
131180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
131280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
131380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
131480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
131580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
131680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
131780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
131880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
131980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
132080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
132180b3f79bSAlexey Kardashevskiy         }
132280b3f79bSAlexey Kardashevskiy 
13239b00ea49SDavid Gibson         if (cap_papr) {
13249b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1325da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13269b00ea49SDavid Gibson             }
13279b00ea49SDavid Gibson         }
132898a8b524SAlexey Kardashevskiy 
132998a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13309b00ea49SDavid Gibson #endif
1331d67d40eaSDavid Gibson     }
1332d67d40eaSDavid Gibson 
1333d76d1650Saurel32     return 0;
1334d76d1650Saurel32 }
1335d76d1650Saurel32 
13361bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1337fc87e185SAlexander Graf {
1338fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1339fc87e185SAlexander Graf 
1340fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1341fc87e185SAlexander Graf         return 0;
1342fc87e185SAlexander Graf     }
1343fc87e185SAlexander Graf 
1344fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1345fc87e185SAlexander Graf         return 0;
1346fc87e185SAlexander Graf     }
1347fc87e185SAlexander Graf 
13481bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1349fc87e185SAlexander Graf 
1350fc87e185SAlexander Graf     return 0;
1351fc87e185SAlexander Graf }
1352fc87e185SAlexander Graf 
135316415335SAlexander Graf #if defined(TARGET_PPCEMB)
135416415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
135516415335SAlexander Graf #elif defined(TARGET_PPC64)
135616415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
135716415335SAlexander Graf #else
135816415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
135916415335SAlexander Graf #endif
136016415335SAlexander Graf 
136120d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1362d76d1650Saurel32 {
136320d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
136420d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1365d76d1650Saurel32     int r;
1366d76d1650Saurel32     unsigned irq;
1367d76d1650Saurel32 
13684b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
13694b8523eeSJan Kiszka 
13705cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1371d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1372fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1373fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1374259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
137516415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1376d76d1650Saurel32     {
1377d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1378d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1379d76d1650Saurel32          * when reading the UIC.
1380d76d1650Saurel32          */
1381fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1382d76d1650Saurel32 
1383da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
13841bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
138555e5c285SAndreas Färber         if (r < 0) {
138655e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
138755e5c285SAndreas Färber         }
1388c821c2bdSAlexander Graf 
1389c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1390bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
139173bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1392d76d1650Saurel32     }
1393d76d1650Saurel32 
1394d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1395d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1396d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
13974b8523eeSJan Kiszka 
13984b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1399d76d1650Saurel32 }
1400d76d1650Saurel32 
14014c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1402d76d1650Saurel32 {
14034c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1404d76d1650Saurel32 }
1405d76d1650Saurel32 
140620d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14070af691d7SMarcelo Tosatti {
1408259186a7SAndreas Färber     return cs->halted;
14090af691d7SMarcelo Tosatti }
14100af691d7SMarcelo Tosatti 
1411259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1412d76d1650Saurel32 {
1413259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1414259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1415259186a7SAndreas Färber 
1416259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1417259186a7SAndreas Färber         cs->halted = 1;
141827103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1419d76d1650Saurel32     }
1420d76d1650Saurel32 
1421bb4ea393SJan Kiszka     return 0;
1422d76d1650Saurel32 }
1423d76d1650Saurel32 
1424d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14251328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1426d76d1650Saurel32 {
1427d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1428d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1429d76d1650Saurel32 
1430bb4ea393SJan Kiszka     return 0;
1431d76d1650Saurel32 }
1432d76d1650Saurel32 
14331328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1434d76d1650Saurel32 {
1435d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1436d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1437d76d1650Saurel32 
1438bb4ea393SJan Kiszka     return 0;
1439d76d1650Saurel32 }
1440d76d1650Saurel32 
14418a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14428a0548f9SBharat Bhushan {
14438a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14448a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14458a0548f9SBharat Bhushan 
14468a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14478a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14488a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14498a0548f9SBharat Bhushan         return -EINVAL;
14508a0548f9SBharat Bhushan     }
14518a0548f9SBharat Bhushan 
14528a0548f9SBharat Bhushan     return 0;
14538a0548f9SBharat Bhushan }
14548a0548f9SBharat Bhushan 
14558a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14568a0548f9SBharat Bhushan {
14578a0548f9SBharat Bhushan     uint32_t sc;
14588a0548f9SBharat Bhushan 
14598a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14608a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14618a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14628a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14638a0548f9SBharat Bhushan         return -EINVAL;
14648a0548f9SBharat Bhushan     }
14658a0548f9SBharat Bhushan 
14668a0548f9SBharat Bhushan     return 0;
14678a0548f9SBharat Bhushan }
14688a0548f9SBharat Bhushan 
146988365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
147088365d17SBharat Bhushan {
147188365d17SBharat Bhushan     int n;
147288365d17SBharat Bhushan 
147388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
147488365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
147588365d17SBharat Bhushan 
147688365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
147788365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
147888365d17SBharat Bhushan              hw_debug_points[n].type == type) {
147988365d17SBharat Bhushan             return n;
148088365d17SBharat Bhushan         }
148188365d17SBharat Bhushan     }
148288365d17SBharat Bhushan 
148388365d17SBharat Bhushan     return -1;
148488365d17SBharat Bhushan }
148588365d17SBharat Bhushan 
148688365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
148788365d17SBharat Bhushan {
148888365d17SBharat Bhushan     int n;
148988365d17SBharat Bhushan 
149088365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
149188365d17SBharat Bhushan     if (n >= 0) {
149288365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
149388365d17SBharat Bhushan         return n;
149488365d17SBharat Bhushan     }
149588365d17SBharat Bhushan 
149688365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
149788365d17SBharat Bhushan     if (n >= 0) {
149888365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
149988365d17SBharat Bhushan         return n;
150088365d17SBharat Bhushan     }
150188365d17SBharat Bhushan 
150288365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
150388365d17SBharat Bhushan     if (n >= 0) {
150488365d17SBharat Bhushan         *flag = BP_MEM_READ;
150588365d17SBharat Bhushan         return n;
150688365d17SBharat Bhushan     }
150788365d17SBharat Bhushan 
150888365d17SBharat Bhushan     return -1;
150988365d17SBharat Bhushan }
151088365d17SBharat Bhushan 
151188365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
151288365d17SBharat Bhushan                                   target_ulong len, int type)
151388365d17SBharat Bhushan {
151488365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
151588365d17SBharat Bhushan         return -ENOBUFS;
151688365d17SBharat Bhushan     }
151788365d17SBharat Bhushan 
151888365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
151988365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
152088365d17SBharat Bhushan 
152188365d17SBharat Bhushan     switch (type) {
152288365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
152388365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
152488365d17SBharat Bhushan             return -ENOBUFS;
152588365d17SBharat Bhushan         }
152688365d17SBharat Bhushan 
152788365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
152888365d17SBharat Bhushan             return -EEXIST;
152988365d17SBharat Bhushan         }
153088365d17SBharat Bhushan 
153188365d17SBharat Bhushan         nb_hw_breakpoint++;
153288365d17SBharat Bhushan         break;
153388365d17SBharat Bhushan 
153488365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
153588365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
153688365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
153788365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
153888365d17SBharat Bhushan             return -ENOBUFS;
153988365d17SBharat Bhushan         }
154088365d17SBharat Bhushan 
154188365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
154288365d17SBharat Bhushan             return -EEXIST;
154388365d17SBharat Bhushan         }
154488365d17SBharat Bhushan 
154588365d17SBharat Bhushan         nb_hw_watchpoint++;
154688365d17SBharat Bhushan         break;
154788365d17SBharat Bhushan 
154888365d17SBharat Bhushan     default:
154988365d17SBharat Bhushan         return -ENOSYS;
155088365d17SBharat Bhushan     }
155188365d17SBharat Bhushan 
155288365d17SBharat Bhushan     return 0;
155388365d17SBharat Bhushan }
155488365d17SBharat Bhushan 
155588365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
155688365d17SBharat Bhushan                                   target_ulong len, int type)
155788365d17SBharat Bhushan {
155888365d17SBharat Bhushan     int n;
155988365d17SBharat Bhushan 
156088365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
156188365d17SBharat Bhushan     if (n < 0) {
156288365d17SBharat Bhushan         return -ENOENT;
156388365d17SBharat Bhushan     }
156488365d17SBharat Bhushan 
156588365d17SBharat Bhushan     switch (type) {
156688365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
156788365d17SBharat Bhushan         nb_hw_breakpoint--;
156888365d17SBharat Bhushan         break;
156988365d17SBharat Bhushan 
157088365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
157188365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
157288365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
157388365d17SBharat Bhushan         nb_hw_watchpoint--;
157488365d17SBharat Bhushan         break;
157588365d17SBharat Bhushan 
157688365d17SBharat Bhushan     default:
157788365d17SBharat Bhushan         return -ENOSYS;
157888365d17SBharat Bhushan     }
157988365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
158088365d17SBharat Bhushan 
158188365d17SBharat Bhushan     return 0;
158288365d17SBharat Bhushan }
158388365d17SBharat Bhushan 
158488365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
158588365d17SBharat Bhushan {
158688365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
158788365d17SBharat Bhushan }
158888365d17SBharat Bhushan 
15898a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
15908a0548f9SBharat Bhushan {
159188365d17SBharat Bhushan     int n;
159288365d17SBharat Bhushan 
15938a0548f9SBharat Bhushan     /* Software Breakpoint updates */
15948a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
15958a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
15968a0548f9SBharat Bhushan     }
159788365d17SBharat Bhushan 
159888365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
159988365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
160088365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
160188365d17SBharat Bhushan 
160288365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
160388365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
160488365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
160588365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
160688365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
160788365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
160888365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
160988365d17SBharat Bhushan                 break;
161088365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
161188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
161288365d17SBharat Bhushan                 break;
161388365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
161488365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
161588365d17SBharat Bhushan                 break;
161688365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
161788365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
161888365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
161988365d17SBharat Bhushan                 break;
162088365d17SBharat Bhushan             default:
162188365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
162288365d17SBharat Bhushan             }
162388365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
162488365d17SBharat Bhushan         }
162588365d17SBharat Bhushan     }
16268a0548f9SBharat Bhushan }
16278a0548f9SBharat Bhushan 
16288a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16298a0548f9SBharat Bhushan {
16308a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16318a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16328a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16338a0548f9SBharat Bhushan     int handle = 0;
163488365d17SBharat Bhushan     int n;
163588365d17SBharat Bhushan     int flag = 0;
16368a0548f9SBharat Bhushan 
163788365d17SBharat Bhushan     if (cs->singlestep_enabled) {
163888365d17SBharat Bhushan         handle = 1;
163988365d17SBharat Bhushan     } else if (arch_info->status) {
164088365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
164188365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
164288365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
164388365d17SBharat Bhushan                 if (n >= 0) {
164488365d17SBharat Bhushan                     handle = 1;
164588365d17SBharat Bhushan                 }
164688365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
164788365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
164888365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
164988365d17SBharat Bhushan                 if (n >= 0) {
165088365d17SBharat Bhushan                     handle = 1;
165188365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
165288365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
165388365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
165488365d17SBharat Bhushan                 }
165588365d17SBharat Bhushan             }
165688365d17SBharat Bhushan         }
165788365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
16588a0548f9SBharat Bhushan         handle = 1;
16598a0548f9SBharat Bhushan     } else {
16608a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
16618a0548f9SBharat Bhushan          * program exception to guest;
16628a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
166388365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
166488365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
166588365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
166688365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
166788365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
166888365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
166988365d17SBharat Bhushan          * So the result is that we cannot share debug resources
167088365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
167188365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
167288365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
167388365d17SBharat Bhushan          * cannot use them;
16748a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
16758a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
16768a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
16778a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
16788a0548f9SBharat Bhushan          * injecting a program interrupt.
16798a0548f9SBharat Bhushan          */
16808a0548f9SBharat Bhushan 
16818a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
16828a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
16838a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
16848a0548f9SBharat Bhushan          */
16858a0548f9SBharat Bhushan         env->nip += 4;
16868a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
16878a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
16888a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
16898a0548f9SBharat Bhushan     }
16908a0548f9SBharat Bhushan 
16918a0548f9SBharat Bhushan     return handle;
16928a0548f9SBharat Bhushan }
16938a0548f9SBharat Bhushan 
169420d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1695d76d1650Saurel32 {
169620d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
169720d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1698bb4ea393SJan Kiszka     int ret;
1699d76d1650Saurel32 
17004b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17014b8523eeSJan Kiszka 
1702d76d1650Saurel32     switch (run->exit_reason) {
1703d76d1650Saurel32     case KVM_EXIT_DCR:
1704d76d1650Saurel32         if (run->dcr.is_write) {
1705da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1706d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1707d76d1650Saurel32         } else {
1708da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1709d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1710d76d1650Saurel32         }
1711d76d1650Saurel32         break;
1712d76d1650Saurel32     case KVM_EXIT_HLT:
1713da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1714259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1715d76d1650Saurel32         break;
1716c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1717f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1718da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
171920d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1720aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1721f61b4bedSAlexander Graf                                               run->papr_hcall.args);
172278e8fde2SDavid Gibson         ret = 0;
1723f61b4bedSAlexander Graf         break;
1724f61b4bedSAlexander Graf #endif
17255b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1726da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1727933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17285b95b8b9SAlexander Graf         ret = 0;
17295b95b8b9SAlexander Graf         break;
173031f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1731da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
173231f2cb8fSBharat Bhushan         watchdog_perform_action();
173331f2cb8fSBharat Bhushan         ret = 0;
173431f2cb8fSBharat Bhushan         break;
173531f2cb8fSBharat Bhushan 
17368a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17378a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17388a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17398a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17408a0548f9SBharat Bhushan             break;
17418a0548f9SBharat Bhushan         }
17428a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17438a0548f9SBharat Bhushan         ret = 0;
17448a0548f9SBharat Bhushan         break;
17458a0548f9SBharat Bhushan 
174673aaec4aSJan Kiszka     default:
174773aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
174873aaec4aSJan Kiszka         ret = -1;
174973aaec4aSJan Kiszka         break;
1750d76d1650Saurel32     }
1751d76d1650Saurel32 
17524b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1753d76d1650Saurel32     return ret;
1754d76d1650Saurel32 }
1755d76d1650Saurel32 
175631f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
175731f2cb8fSBharat Bhushan {
175831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
175931f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
176031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
176131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
176231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
176331f2cb8fSBharat Bhushan     };
176431f2cb8fSBharat Bhushan 
176531f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
176631f2cb8fSBharat Bhushan }
176731f2cb8fSBharat Bhushan 
176831f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
176931f2cb8fSBharat Bhushan {
177031f2cb8fSBharat Bhushan 
177131f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
177231f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
177331f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
177431f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
177531f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
177631f2cb8fSBharat Bhushan     };
177731f2cb8fSBharat Bhushan 
177831f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
177931f2cb8fSBharat Bhushan }
178031f2cb8fSBharat Bhushan 
178131f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
178231f2cb8fSBharat Bhushan {
178331f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
178431f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
178531f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
178631f2cb8fSBharat Bhushan 
178731f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
178831f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
178931f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
179031f2cb8fSBharat Bhushan     };
179131f2cb8fSBharat Bhushan 
179231f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
179331f2cb8fSBharat Bhushan }
179431f2cb8fSBharat Bhushan 
179531f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
179631f2cb8fSBharat Bhushan {
179731f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
179831f2cb8fSBharat Bhushan     int ret;
179931f2cb8fSBharat Bhushan 
180031f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
180131f2cb8fSBharat Bhushan         return -1;
180231f2cb8fSBharat Bhushan     }
180331f2cb8fSBharat Bhushan 
180431f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
180531f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
180631f2cb8fSBharat Bhushan         return -1;
180731f2cb8fSBharat Bhushan     }
180831f2cb8fSBharat Bhushan 
180948add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
181031f2cb8fSBharat Bhushan     if (ret < 0) {
181131f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
181231f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
181331f2cb8fSBharat Bhushan         return ret;
181431f2cb8fSBharat Bhushan     }
181531f2cb8fSBharat Bhushan 
181631f2cb8fSBharat Bhushan     return ret;
181731f2cb8fSBharat Bhushan }
181831f2cb8fSBharat Bhushan 
1819dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1820dc333cd6SAlexander Graf {
1821dc333cd6SAlexander Graf     FILE *f;
1822dc333cd6SAlexander Graf     int ret = -1;
1823dc333cd6SAlexander Graf     int field_len = strlen(field);
1824dc333cd6SAlexander Graf     char line[512];
1825dc333cd6SAlexander Graf 
1826dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1827dc333cd6SAlexander Graf     if (!f) {
1828dc333cd6SAlexander Graf         return -1;
1829dc333cd6SAlexander Graf     }
1830dc333cd6SAlexander Graf 
1831dc333cd6SAlexander Graf     do {
1832dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1833dc333cd6SAlexander Graf             break;
1834dc333cd6SAlexander Graf         }
1835dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1836ae215068SJim Meyering             pstrcpy(value, len, line);
1837dc333cd6SAlexander Graf             ret = 0;
1838dc333cd6SAlexander Graf             break;
1839dc333cd6SAlexander Graf         }
1840dc333cd6SAlexander Graf     } while(*line);
1841dc333cd6SAlexander Graf 
1842dc333cd6SAlexander Graf     fclose(f);
1843dc333cd6SAlexander Graf 
1844dc333cd6SAlexander Graf     return ret;
1845dc333cd6SAlexander Graf }
1846dc333cd6SAlexander Graf 
1847dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1848dc333cd6SAlexander Graf {
1849dc333cd6SAlexander Graf     char line[512];
1850dc333cd6SAlexander Graf     char *ns;
185173bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1852dc333cd6SAlexander Graf 
1853dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1854dc333cd6SAlexander Graf         return retval;
1855dc333cd6SAlexander Graf     }
1856dc333cd6SAlexander Graf 
1857dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1858dc333cd6SAlexander Graf         return retval;
1859dc333cd6SAlexander Graf     }
1860dc333cd6SAlexander Graf 
1861dc333cd6SAlexander Graf     ns++;
1862dc333cd6SAlexander Graf 
1863f9b8e7f6SShraddha Barke     return atoi(ns);
1864ef951443SNikunj A Dadhania }
1865ef951443SNikunj A Dadhania 
1866ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1867ef951443SNikunj A Dadhania {
1868ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1869ef951443SNikunj A Dadhania                                NULL);
1870ef951443SNikunj A Dadhania }
1871ef951443SNikunj A Dadhania 
1872ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1873ef951443SNikunj A Dadhania {
1874ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1875dc333cd6SAlexander Graf }
18764513d923SGleb Natapov 
1877eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1878eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1879eadaada1SAlexander Graf {
1880eadaada1SAlexander Graf     struct dirent *dirp;
1881eadaada1SAlexander Graf     DIR *dp;
1882eadaada1SAlexander Graf 
1883eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1884eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1885eadaada1SAlexander Graf         return -1;
1886eadaada1SAlexander Graf     }
1887eadaada1SAlexander Graf 
1888eadaada1SAlexander Graf     buf[0] = '\0';
1889eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1890eadaada1SAlexander Graf         FILE *f;
1891eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1892eadaada1SAlexander Graf                  dirp->d_name);
1893eadaada1SAlexander Graf         f = fopen(buf, "r");
1894eadaada1SAlexander Graf         if (f) {
1895eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1896eadaada1SAlexander Graf             fclose(f);
1897eadaada1SAlexander Graf             break;
1898eadaada1SAlexander Graf         }
1899eadaada1SAlexander Graf         buf[0] = '\0';
1900eadaada1SAlexander Graf     }
1901eadaada1SAlexander Graf     closedir(dp);
1902eadaada1SAlexander Graf     if (buf[0] == '\0') {
1903eadaada1SAlexander Graf         printf("Unknown host!\n");
1904eadaada1SAlexander Graf         return -1;
1905eadaada1SAlexander Graf     }
1906eadaada1SAlexander Graf 
1907eadaada1SAlexander Graf     return 0;
1908eadaada1SAlexander Graf }
1909eadaada1SAlexander Graf 
19107d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1911eadaada1SAlexander Graf {
19129bc884b7SDavid Gibson     union {
19139bc884b7SDavid Gibson         uint32_t v32;
19149bc884b7SDavid Gibson         uint64_t v64;
19159bc884b7SDavid Gibson     } u;
1916eadaada1SAlexander Graf     FILE *f;
1917eadaada1SAlexander Graf     int len;
1918eadaada1SAlexander Graf 
19197d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1920eadaada1SAlexander Graf     if (!f) {
1921eadaada1SAlexander Graf         return -1;
1922eadaada1SAlexander Graf     }
1923eadaada1SAlexander Graf 
19249bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1925eadaada1SAlexander Graf     fclose(f);
1926eadaada1SAlexander Graf     switch (len) {
19279bc884b7SDavid Gibson     case 4:
19289bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19299bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19309bc884b7SDavid Gibson     case 8:
19319bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1932eadaada1SAlexander Graf     }
1933eadaada1SAlexander Graf 
1934eadaada1SAlexander Graf     return 0;
1935eadaada1SAlexander Graf }
1936eadaada1SAlexander Graf 
19377d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19387d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19397d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19407d94a30bSSukadev Bhattiprolu  * format) */
19417d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19427d94a30bSSukadev Bhattiprolu {
19437d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19447d94a30bSSukadev Bhattiprolu     uint64_t val;
19457d94a30bSSukadev Bhattiprolu 
19467d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19477d94a30bSSukadev Bhattiprolu         return -1;
19487d94a30bSSukadev Bhattiprolu     }
19497d94a30bSSukadev Bhattiprolu 
19507d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19517d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19527d94a30bSSukadev Bhattiprolu     g_free(tmp);
19537d94a30bSSukadev Bhattiprolu 
19547d94a30bSSukadev Bhattiprolu     return val;
19557d94a30bSSukadev Bhattiprolu }
19567d94a30bSSukadev Bhattiprolu 
19579bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19589bc884b7SDavid Gibson {
19599bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19609bc884b7SDavid Gibson }
19619bc884b7SDavid Gibson 
19626659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
19636659394fSDavid Gibson {
19646659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
19656659394fSDavid Gibson }
19666659394fSDavid Gibson 
19676659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
19686659394fSDavid Gibson {
19696659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
19706659394fSDavid Gibson }
19716659394fSDavid Gibson 
19721a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
197345024f09SAlexander Graf  {
1974a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
1975a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
197645024f09SAlexander Graf 
19776fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
19781a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
19791a61a9aeSStuart Yoder         return 0;
19801a61a9aeSStuart Yoder     }
198145024f09SAlexander Graf 
19821a61a9aeSStuart Yoder     return 1;
19831a61a9aeSStuart Yoder }
19841a61a9aeSStuart Yoder 
19851a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
19861a61a9aeSStuart Yoder {
19871a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19881a61a9aeSStuart Yoder 
19891a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
19901a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
19911a61a9aeSStuart Yoder         return 1;
19921a61a9aeSStuart Yoder     }
19931a61a9aeSStuart Yoder 
19941a61a9aeSStuart Yoder     return 0;
19951a61a9aeSStuart Yoder }
19961a61a9aeSStuart Yoder 
19971a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
19981a61a9aeSStuart Yoder {
19991a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20001a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20011a61a9aeSStuart Yoder 
20021a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20031a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
200445024f09SAlexander Graf         return 0;
200545024f09SAlexander Graf     }
200645024f09SAlexander Graf 
200745024f09SAlexander Graf     /*
2008d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
200945024f09SAlexander Graf      *
2010d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
201145024f09SAlexander Graf      *     li r3, -1
2012d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2013d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
201445024f09SAlexander Graf      */
201545024f09SAlexander Graf 
2016d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2017d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2018d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2019d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
202045024f09SAlexander Graf 
20210ddbd053SAlexey Kardashevskiy     return 1;
202245024f09SAlexander Graf }
202345024f09SAlexander Graf 
2024026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2025026bfd89SDavid Gibson {
2026026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2027026bfd89SDavid Gibson }
2028026bfd89SDavid Gibson 
2029026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2030026bfd89SDavid Gibson {
2031026bfd89SDavid Gibson     /*
2032026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2033026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2034026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2035026bfd89SDavid Gibson      * produce a warning.
2036026bfd89SDavid Gibson      */
2037026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2038026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2039026bfd89SDavid Gibson }
2040026bfd89SDavid Gibson 
2041ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2042ef9971ddSAlexey Kardashevskiy {
2043ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2044ef9971ddSAlexey Kardashevskiy }
2045ef9971ddSAlexey Kardashevskiy 
20465145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20475145ad4fSNathan Whitehorn {
20485145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20495145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20505145ad4fSNathan Whitehorn }
20515145ad4fSNathan Whitehorn 
20521bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2053f61b4bedSAlexander Graf {
20541bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2055f61b4bedSAlexander Graf     int ret;
2056f61b4bedSAlexander Graf 
205748add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2058f61b4bedSAlexander Graf     if (ret) {
2059072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2060072ed5f2SThomas Huth         exit(1);
2061f61b4bedSAlexander Graf     }
20629b00ea49SDavid Gibson 
20639b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
20649b00ea49SDavid Gibson      * with kvm */
20659b00ea49SDavid Gibson     cap_papr = 1;
2066f1af19d7SDavid Gibson }
2067f61b4bedSAlexander Graf 
2068d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
20696db5bb0fSAlexey Kardashevskiy {
2070d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
20716db5bb0fSAlexey Kardashevskiy }
20726db5bb0fSAlexey Kardashevskiy 
20735b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
20745b95b8b9SAlexander Graf {
20755b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
20765b95b8b9SAlexander Graf     int ret;
20775b95b8b9SAlexander Graf 
207848add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
20795b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2080072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2081072ed5f2SThomas Huth         exit(1);
20825b95b8b9SAlexander Graf     }
20835b95b8b9SAlexander Graf }
20845b95b8b9SAlexander Graf 
2085e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2086e97c3636SDavid Gibson {
2087e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2088e97c3636SDavid Gibson }
2089e97c3636SDavid Gibson 
20907f763a5dSDavid Gibson #ifdef TARGET_PPC64
2091658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2092354ac20aSDavid Gibson {
2093354ac20aSDavid Gibson     off_t size;
2094354ac20aSDavid Gibson     int fd;
2095354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2096354ac20aSDavid Gibson 
2097354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2098354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2099354ac20aSDavid Gibson      *                      not necessary on this hardware
2100354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2101354ac20aSDavid Gibson      *
2102354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2103354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2104354ac20aSDavid Gibson      */
2105354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2106354ac20aSDavid Gibson         return 0;
2107354ac20aSDavid Gibson     }
2108354ac20aSDavid Gibson 
2109354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2110354ac20aSDavid Gibson     if (fd < 0) {
2111354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2112354ac20aSDavid Gibson                 strerror(errno));
2113354ac20aSDavid Gibson         return -1;
2114354ac20aSDavid Gibson     }
2115354ac20aSDavid Gibson 
2116354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2117354ac20aSDavid Gibson 
2118658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2119658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2120354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2121354ac20aSDavid Gibson         return -1;
2122354ac20aSDavid Gibson     };
2123354ac20aSDavid Gibson 
2124354ac20aSDavid Gibson     return size;
2125354ac20aSDavid Gibson }
2126354ac20aSDavid Gibson 
21277f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21287f763a5dSDavid Gibson {
2129f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2130f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2131f36951c1SDavid Gibson     int i;
2132f36951c1SDavid Gibson 
21337f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
21347f763a5dSDavid Gibson         return current_size;
21357f763a5dSDavid Gibson     }
2136f36951c1SDavid Gibson 
2137f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2138f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2139182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
21409c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2141f36951c1SDavid Gibson     best_page_shift = 0;
2142f36951c1SDavid Gibson 
2143f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2144f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2145f36951c1SDavid Gibson 
2146f36951c1SDavid Gibson         if (!sps->page_shift) {
2147f36951c1SDavid Gibson             continue;
2148f36951c1SDavid Gibson         }
2149f36951c1SDavid Gibson 
2150f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2151f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2152f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2153f36951c1SDavid Gibson         }
2154f36951c1SDavid Gibson     }
2155f36951c1SDavid Gibson 
21567f763a5dSDavid Gibson     return MIN(current_size,
2157f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21587f763a5dSDavid Gibson }
21597f763a5dSDavid Gibson #endif
21607f763a5dSDavid Gibson 
2161da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2162da95324eSAlexey Kardashevskiy {
2163da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2164da95324eSAlexey Kardashevskiy }
2165da95324eSAlexey Kardashevskiy 
2166d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2167d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2168d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
21690f5cb298SDavid Gibson {
21700f5cb298SDavid Gibson     long len;
21710f5cb298SDavid Gibson     int fd;
21720f5cb298SDavid Gibson     void *table;
21730f5cb298SDavid Gibson 
2174b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2175b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2176b5aec396SDavid Gibson      */
2177b5aec396SDavid Gibson     *pfd = -1;
21786a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
21790f5cb298SDavid Gibson         return NULL;
21800f5cb298SDavid Gibson     }
21810f5cb298SDavid Gibson 
2182d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2183d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2184d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2185d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2186d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2187d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2188d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2189d6ee2a7cSAlexey Kardashevskiy         };
2190d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2191d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2192d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2193d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2194d6ee2a7cSAlexey Kardashevskiy                     liobn);
2195d6ee2a7cSAlexey Kardashevskiy             return NULL;
2196d6ee2a7cSAlexey Kardashevskiy         }
2197d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2198d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2199d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2200d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2201d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2202d6ee2a7cSAlexey Kardashevskiy         };
2203d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2204d6ee2a7cSAlexey Kardashevskiy             return NULL;
2205d6ee2a7cSAlexey Kardashevskiy         }
22060f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22070f5cb298SDavid Gibson         if (fd < 0) {
2208b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2209b5aec396SDavid Gibson                     liobn);
22100f5cb298SDavid Gibson             return NULL;
22110f5cb298SDavid Gibson         }
2212d6ee2a7cSAlexey Kardashevskiy     } else {
2213d6ee2a7cSAlexey Kardashevskiy         return NULL;
2214d6ee2a7cSAlexey Kardashevskiy     }
22150f5cb298SDavid Gibson 
2216d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22170f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22180f5cb298SDavid Gibson 
221974b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22200f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2221b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2222b5aec396SDavid Gibson                 liobn);
22230f5cb298SDavid Gibson         close(fd);
22240f5cb298SDavid Gibson         return NULL;
22250f5cb298SDavid Gibson     }
22260f5cb298SDavid Gibson 
22270f5cb298SDavid Gibson     *pfd = fd;
22280f5cb298SDavid Gibson     return table;
22290f5cb298SDavid Gibson }
22300f5cb298SDavid Gibson 
2231523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22320f5cb298SDavid Gibson {
22330f5cb298SDavid Gibson     long len;
22340f5cb298SDavid Gibson 
22350f5cb298SDavid Gibson     if (fd < 0) {
22360f5cb298SDavid Gibson         return -1;
22370f5cb298SDavid Gibson     }
22380f5cb298SDavid Gibson 
2239523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22400f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22410f5cb298SDavid Gibson         (close(fd) < 0)) {
2242b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2243b5aec396SDavid Gibson                 strerror(errno));
22440f5cb298SDavid Gibson         /* Leak the table */
22450f5cb298SDavid Gibson     }
22460f5cb298SDavid Gibson 
22470f5cb298SDavid Gibson     return 0;
22480f5cb298SDavid Gibson }
22490f5cb298SDavid Gibson 
22507f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22517f763a5dSDavid Gibson {
22527f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22537f763a5dSDavid Gibson 
2254ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2255ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2256ace9a2cbSDavid Gibson         return 0;
2257ace9a2cbSDavid Gibson     }
2258ace9a2cbSDavid Gibson     if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22597f763a5dSDavid Gibson         int ret;
22607f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2261ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2262ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2263ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2264ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2265ace9a2cbSDavid Gibson              * correct for PR. */
2266ace9a2cbSDavid Gibson             return 0;
2267ace9a2cbSDavid Gibson         } else if (ret < 0) {
22687f763a5dSDavid Gibson             return ret;
22697f763a5dSDavid Gibson         }
22707f763a5dSDavid Gibson         return shift;
22717f763a5dSDavid Gibson     }
22727f763a5dSDavid Gibson 
2273ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2274ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
227596c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
227696c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2277ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
22787f763a5dSDavid Gibson         return 0;
2279ace9a2cbSDavid Gibson     } else {
2280ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2281ace9a2cbSDavid Gibson         return 24;
2282ace9a2cbSDavid Gibson     }
22837f763a5dSDavid Gibson }
22847f763a5dSDavid Gibson 
2285a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2286a1e98583SDavid Gibson {
2287a1e98583SDavid Gibson     uint32_t pvr;
2288a1e98583SDavid Gibson 
2289a1e98583SDavid Gibson     asm ("mfpvr %0"
2290a1e98583SDavid Gibson          : "=r"(pvr));
2291a1e98583SDavid Gibson     return pvr;
2292a1e98583SDavid Gibson }
2293a1e98583SDavid Gibson 
2294a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2295a7342588SDavid Gibson {
2296a7342588SDavid Gibson     if (on) {
2297a7342588SDavid Gibson         *word |= flags;
2298a7342588SDavid Gibson     } else {
2299a7342588SDavid Gibson         *word &= ~flags;
2300a7342588SDavid Gibson     }
2301a7342588SDavid Gibson }
2302a7342588SDavid Gibson 
23032985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23042985b86bSAndreas Färber {
23052985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2306a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2307a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
23080cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23090cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2310a1e98583SDavid Gibson 
2311cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23123bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2313a7342588SDavid Gibson 
231470bca53fSAlexander Graf     if (vmx != -1) {
231570bca53fSAlexander Graf         /* Only override when we know what the host supports */
2316cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2317cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
231870bca53fSAlexander Graf     }
231970bca53fSAlexander Graf     if (dfp != -1) {
232070bca53fSAlexander Graf         /* Only override when we know what the host supports */
2321cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
232270bca53fSAlexander Graf     }
23230cbad81fSDavid Gibson 
23240cbad81fSDavid Gibson     if (dcache_size != -1) {
23250cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23260cbad81fSDavid Gibson     }
23270cbad81fSDavid Gibson 
23280cbad81fSDavid Gibson     if (icache_size != -1) {
23290cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23300cbad81fSDavid Gibson     }
2331*c64abd1fSSam Bobroff 
2332*c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2333*c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
2334*c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2335a1e98583SDavid Gibson }
2336a1e98583SDavid Gibson 
23373b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23383b961124SStuart Yoder {
23393b961124SStuart Yoder     return cap_epr;
23403b961124SStuart Yoder }
23413b961124SStuart Yoder 
23427c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void)
23437c43bca0SAneesh Kumar K.V {
23447c43bca0SAneesh Kumar K.V     return cap_htab_fd;
23457c43bca0SAneesh Kumar K.V }
23467c43bca0SAneesh Kumar K.V 
234787a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
234887a91de6SAlexander Graf {
234987a91de6SAlexander Graf     return cap_fixup_hcalls;
235087a91de6SAlexander Graf }
235187a91de6SAlexander Graf 
2352bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2353bac3bf28SThomas Huth {
2354bac3bf28SThomas Huth     return cap_htm;
2355bac3bf28SThomas Huth }
2356bac3bf28SThomas Huth 
23575b79b1caSAlexey Kardashevskiy static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
23585b79b1caSAlexey Kardashevskiy {
23595b79b1caSAlexey Kardashevskiy     ObjectClass *oc = OBJECT_CLASS(pcc);
23605b79b1caSAlexey Kardashevskiy 
23615b79b1caSAlexey Kardashevskiy     while (oc && !object_class_is_abstract(oc)) {
23625b79b1caSAlexey Kardashevskiy         oc = object_class_get_parent(oc);
23635b79b1caSAlexey Kardashevskiy     }
23645b79b1caSAlexey Kardashevskiy     assert(oc);
23655b79b1caSAlexey Kardashevskiy 
23665b79b1caSAlexey Kardashevskiy     return POWERPC_CPU_CLASS(oc);
23675b79b1caSAlexey Kardashevskiy }
23685b79b1caSAlexey Kardashevskiy 
236952b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
237052b2519cSThomas Huth {
237152b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
237252b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
237352b2519cSThomas Huth 
237452b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
237552b2519cSThomas Huth     if (pvr_pcc == NULL) {
237652b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
237752b2519cSThomas Huth     }
237852b2519cSThomas Huth 
237952b2519cSThomas Huth     return pvr_pcc;
238052b2519cSThomas Huth }
238152b2519cSThomas Huth 
23825ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
23835ba4576bSAndreas Färber {
23845ba4576bSAndreas Färber     TypeInfo type_info = {
23855ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
23865ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
23875ba4576bSAndreas Färber     };
23885ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
23895b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2390715d4b96SThomas Huth     int i;
23915ba4576bSAndreas Färber 
239252b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
23933bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
23945ba4576bSAndreas Färber         return -1;
23955ba4576bSAndreas Färber     }
23965ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
23975ba4576bSAndreas Färber     type_register(&type_info);
23985b79b1caSAlexey Kardashevskiy 
23993b542549SBharata B Rao #if defined(TARGET_PPC64)
24003b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
24013b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
24027ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
24037ebaf795SBharata B Rao     type_info.instance_init = NULL;
24047ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
24057ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
24063b542549SBharata B Rao     type_register(&type_info);
24073b542549SBharata B Rao     g_free((void *)type_info.name);
24083b542549SBharata B Rao #endif
24093b542549SBharata B Rao 
2410715d4b96SThomas Huth     /*
2411715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2412715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2413715d4b96SThomas Huth      * host CPU type, too)
2414715d4b96SThomas Huth      */
2415715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2416715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2417715d4b96SThomas Huth         if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2418715d4b96SThomas Huth             ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
2419715d4b96SThomas Huth             char *suffix;
2420715d4b96SThomas Huth 
2421715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2422715d4b96SThomas Huth             suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU);
2423715d4b96SThomas Huth             if (suffix) {
2424715d4b96SThomas Huth                 *suffix = 0;
2425715d4b96SThomas Huth             }
2426715d4b96SThomas Huth             ppc_cpu_aliases[i].oc = oc;
2427715d4b96SThomas Huth             break;
2428715d4b96SThomas Huth         }
2429715d4b96SThomas Huth     }
2430715d4b96SThomas Huth 
24315ba4576bSAndreas Färber     return 0;
24325ba4576bSAndreas Färber }
24335ba4576bSAndreas Färber 
2434feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2435feaa64c4SDavid Gibson {
2436feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2437feaa64c4SDavid Gibson         .token = token,
2438feaa64c4SDavid Gibson     };
2439feaa64c4SDavid Gibson 
2440feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2441feaa64c4SDavid Gibson         return -ENOENT;
2442feaa64c4SDavid Gibson     }
2443feaa64c4SDavid Gibson 
2444feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2445feaa64c4SDavid Gibson 
2446feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2447feaa64c4SDavid Gibson }
244812b1143bSDavid Gibson 
2449e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write)
2450e68cb8b4SAlexey Kardashevskiy {
2451e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2452e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
2453e68cb8b4SAlexey Kardashevskiy         .start_index = 0,
2454e68cb8b4SAlexey Kardashevskiy     };
2455e68cb8b4SAlexey Kardashevskiy 
2456e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
2457e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2458e68cb8b4SAlexey Kardashevskiy         return -1;
2459e68cb8b4SAlexey Kardashevskiy     }
2460e68cb8b4SAlexey Kardashevskiy 
2461e68cb8b4SAlexey Kardashevskiy     return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2462e68cb8b4SAlexey Kardashevskiy }
2463e68cb8b4SAlexey Kardashevskiy 
2464e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2465e68cb8b4SAlexey Kardashevskiy {
2466bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2467e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2468e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2469e68cb8b4SAlexey Kardashevskiy 
2470e68cb8b4SAlexey Kardashevskiy     do {
2471e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2472e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2473e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2474e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2475e68cb8b4SAlexey Kardashevskiy             return rc;
2476e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2477e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2478e094c4c1SCédric Le Goater             ssize_t n = rc;
2479e094c4c1SCédric Le Goater             while (n) {
2480e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2481e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2482e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2483e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2484e094c4c1SCédric Le Goater 
2485e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2486e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2487e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2488e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2489e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2490e094c4c1SCédric Le Goater 
2491e094c4c1SCédric Le Goater                 buffer += chunksize;
2492e094c4c1SCédric Le Goater                 n -= chunksize;
2493e094c4c1SCédric Le Goater             }
2494e68cb8b4SAlexey Kardashevskiy         }
2495e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2496e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2497bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2498e68cb8b4SAlexey Kardashevskiy 
2499e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2500e68cb8b4SAlexey Kardashevskiy }
2501e68cb8b4SAlexey Kardashevskiy 
2502e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2503e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2504e68cb8b4SAlexey Kardashevskiy {
2505e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2506e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2507e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2508e68cb8b4SAlexey Kardashevskiy 
2509e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2510e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2511e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2512e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2513e68cb8b4SAlexey Kardashevskiy 
2514e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2515e68cb8b4SAlexey Kardashevskiy 
2516e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2517e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2518e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2519e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2520e68cb8b4SAlexey Kardashevskiy         return rc;
2521e68cb8b4SAlexey Kardashevskiy     }
2522e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2523e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2524e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2525e68cb8b4SAlexey Kardashevskiy         return -1;
2526e68cb8b4SAlexey Kardashevskiy     }
2527e68cb8b4SAlexey Kardashevskiy     return 0;
2528e68cb8b4SAlexey Kardashevskiy }
2529e68cb8b4SAlexey Kardashevskiy 
253020d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
25314513d923SGleb Natapov {
25324513d923SGleb Natapov     return true;
25334513d923SGleb Natapov }
2534a1b87fe0SJan Kiszka 
253582169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
253682169660SScott Wood {
253782169660SScott Wood }
2538c65f9a07SGreg Kurz 
25391ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
25401ad9f0a4SDavid Gibson {
25411ad9f0a4SDavid Gibson     struct kvm_get_htab_fd ghf = {
25421ad9f0a4SDavid Gibson         .flags = 0,
25431ad9f0a4SDavid Gibson         .start_index = ptex,
25447c43bca0SAneesh Kumar K.V     };
25451ad9f0a4SDavid Gibson     int fd, rc;
25461ad9f0a4SDavid Gibson     int i;
25477c43bca0SAneesh Kumar K.V 
25481ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
25491ad9f0a4SDavid Gibson     if (fd < 0) {
25501ad9f0a4SDavid Gibson         hw_error("kvmppc_read_hptes: Unable to open HPT fd");
25511ad9f0a4SDavid Gibson     }
25521ad9f0a4SDavid Gibson 
25531ad9f0a4SDavid Gibson     i = 0;
25541ad9f0a4SDavid Gibson     while (i < n) {
25551ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
25561ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
25571ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
25581ad9f0a4SDavid Gibson 
25591ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
25601ad9f0a4SDavid Gibson         if (rc < 0) {
25611ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
25621ad9f0a4SDavid Gibson         }
25631ad9f0a4SDavid Gibson 
25641ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
25651ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
25661ad9f0a4SDavid Gibson             int invalid = hdr->n_invalid;
25671ad9f0a4SDavid Gibson 
25681ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
25691ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
25701ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
25711ad9f0a4SDavid Gibson             }
25721ad9f0a4SDavid Gibson 
25731ad9f0a4SDavid Gibson             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
25741ad9f0a4SDavid Gibson             i += hdr->n_valid;
25751ad9f0a4SDavid Gibson 
25761ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
25771ad9f0a4SDavid Gibson                 invalid = n - i;
25781ad9f0a4SDavid Gibson             }
25791ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
25801ad9f0a4SDavid Gibson             i += hdr->n_invalid;
25811ad9f0a4SDavid Gibson 
25821ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
25831ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
25841ad9f0a4SDavid Gibson         }
25851ad9f0a4SDavid Gibson     }
25861ad9f0a4SDavid Gibson 
25871ad9f0a4SDavid Gibson     close(fd);
25881ad9f0a4SDavid Gibson }
25891ad9f0a4SDavid Gibson 
25901ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
25917c43bca0SAneesh Kumar K.V {
25921ad9f0a4SDavid Gibson     int fd, rc;
25937c43bca0SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
25941ad9f0a4SDavid Gibson     struct {
25951ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
25961ad9f0a4SDavid Gibson         uint64_t pte0;
25971ad9f0a4SDavid Gibson         uint64_t pte1;
25981ad9f0a4SDavid Gibson     } buf;
2599c1385933SAneesh Kumar K.V 
2600c1385933SAneesh Kumar K.V     ghf.flags = 0;
2601c1385933SAneesh Kumar K.V     ghf.start_index = 0;     /* Ignored */
26021ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
26031ad9f0a4SDavid Gibson     if (fd < 0) {
26041ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to open HPT fd");
2605c1385933SAneesh Kumar K.V     }
2606c1385933SAneesh Kumar K.V 
26071ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
26081ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
26091ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
26101ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
26111ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
26121ad9f0a4SDavid Gibson 
26131ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
26141ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
26151ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2616c1385933SAneesh Kumar K.V     }
26171ad9f0a4SDavid Gibson     close(fd);
2618c1385933SAneesh Kumar K.V }
26199e03a040SFrank Blaschka 
26209e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2621dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26229e03a040SFrank Blaschka {
26239e03a040SFrank Blaschka     return 0;
26249e03a040SFrank Blaschka }
26251850b6b7SEric Auger 
262638d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
262738d87493SPeter Xu                                 int vector, PCIDevice *dev)
262838d87493SPeter Xu {
262938d87493SPeter Xu     return 0;
263038d87493SPeter Xu }
263138d87493SPeter Xu 
263238d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
263338d87493SPeter Xu {
263438d87493SPeter Xu     return 0;
263538d87493SPeter Xu }
263638d87493SPeter Xu 
26371850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
26381850b6b7SEric Auger {
26391850b6b7SEric Auger     return data & 0xffff;
26401850b6b7SEric Auger }
26414d9392beSThomas Huth 
26424d9392beSThomas Huth int kvmppc_enable_hwrng(void)
26434d9392beSThomas Huth {
26444d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
26454d9392beSThomas Huth         return -1;
26464d9392beSThomas Huth     }
26474d9392beSThomas Huth 
26484d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
26494d9392beSThomas Huth }
2650