xref: /qemu/target/ppc/kvm.c (revision 1ec26c757d5996468afcc0dced4fad04139574b3)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
469c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
472d103aaeSMichael Roth #include "sysemu/hostmem.h"
48f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
499c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
503b542549SBharata B Rao #if defined(TARGET_PPC64)
513b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
523b542549SBharata B Rao #endif
53f3d9f303SSam Bobroff #include "elf.h"
54c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
55f61b4bedSAlexander Graf 
56d76d1650Saurel32 //#define DEBUG_KVM
57d76d1650Saurel32 
58d76d1650Saurel32 #ifdef DEBUG_KVM
59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
60d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
61d76d1650Saurel32 #else
62da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
63d76d1650Saurel32     do { } while (0)
64d76d1650Saurel32 #endif
65d76d1650Saurel32 
66eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
67eadaada1SAlexander Graf 
6894a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6994a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
7094a8d39aSJan Kiszka };
7194a8d39aSJan Kiszka 
72fc87e185SAlexander Graf static int cap_interrupt_unset = false;
73fc87e185SAlexander Graf static int cap_interrupt_level = false;
7490dc8812SScott Wood static int cap_segstate;
7590dc8812SScott Wood static int cap_booke_sregs;
76e97c3636SDavid Gibson static int cap_ppc_smt;
77fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
78354ac20aSDavid Gibson static int cap_ppc_rma;
790f5cb298SDavid Gibson static int cap_spapr_tce;
80d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
81da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
829bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
83f1af19d7SDavid Gibson static int cap_hior;
84d67d40eaSDavid Gibson static int cap_one_reg;
853b961124SStuart Yoder static int cap_epr;
8631f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
879b00ea49SDavid Gibson static int cap_papr;
88e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8987a91de6SAlexander Graf static int cap_fixup_hcalls;
90bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
91cf1c4cceSSam Bobroff static int cap_mmu_radix;
92cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
93b55d295eSDavid Gibson static int cap_resize_hpt;
94c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
95fc87e185SAlexander Graf 
963c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
973c902d44SBharat Bhushan 
98c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
99c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
100c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
101c821c2bdSAlexander Graf  *     still an interrupt pending.
102c6a94ba5SAlexander Graf  *
103c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
104c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
105c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
106c6a94ba5SAlexander Graf  */
107c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
108c6a94ba5SAlexander Graf 
109d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
110c6a94ba5SAlexander Graf {
111d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
112d5a68146SAndreas Färber 
113c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
114c6a94ba5SAlexander Graf }
115c6a94ba5SAlexander Graf 
11696c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11796c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11896c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11996c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
12096c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12196c9cff0SThomas Huth {
12296c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12370a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12496c9cff0SThomas Huth }
12596c9cff0SThomas Huth 
1265ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1275ba4576bSAndreas Färber 
128b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
129d76d1650Saurel32 {
130fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
131fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13290dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13390dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1346977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
135354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1360f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
137d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
138da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1399bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
140d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
141f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1423b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14331f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1449b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1459b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1466977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14787a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
148fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
149bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
150cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
151cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
152b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
153c363a37aSDaniel Henrique Barboza     /*
154c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
155c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
156c363a37aSDaniel Henrique Barboza      *
157c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
158c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
159c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
160fc87e185SAlexander Graf 
161fc87e185SAlexander Graf     if (!cap_interrupt_level) {
162fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
163fc87e185SAlexander Graf                         "VM to stall at times!\n");
164fc87e185SAlexander Graf     }
165fc87e185SAlexander Graf 
1665ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1675ba4576bSAndreas Färber 
168d76d1650Saurel32     return 0;
169d76d1650Saurel32 }
170d76d1650Saurel32 
171d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
172d525ffabSPaolo Bonzini {
173d525ffabSPaolo Bonzini     return 0;
174d525ffabSPaolo Bonzini }
175d525ffabSPaolo Bonzini 
1761bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
177d76d1650Saurel32 {
1781bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1791bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
180861bbc80SAlexander Graf     struct kvm_sregs sregs;
1815666ca4aSScott Wood     int ret;
1825666ca4aSScott Wood 
1835666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18464e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18564e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
18664e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
18764e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1885666ca4aSScott Wood         return 0;
1895666ca4aSScott Wood     } else {
19090dc8812SScott Wood         if (!cap_segstate) {
19164e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19264e07be5SAlexander Graf             return -ENOSYS;
1935666ca4aSScott Wood         }
1945666ca4aSScott Wood     }
1955666ca4aSScott Wood 
1961bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1975666ca4aSScott Wood     if (ret) {
1985666ca4aSScott Wood         return ret;
1995666ca4aSScott Wood     }
200861bbc80SAlexander Graf 
201861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2021bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2035666ca4aSScott Wood }
2045666ca4aSScott Wood 
20593dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2061bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
20793dd5e85SScott Wood {
2081bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2091bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21093dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21193dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21293dd5e85SScott Wood     unsigned int entries = 0;
21393dd5e85SScott Wood     int ret, i;
21493dd5e85SScott Wood 
21593dd5e85SScott Wood     if (!kvm_enabled() ||
216a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
21793dd5e85SScott Wood         return 0;
21893dd5e85SScott Wood     }
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22193dd5e85SScott Wood 
22293dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22393dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22493dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22593dd5e85SScott Wood         entries += params.tlb_sizes[i];
22693dd5e85SScott Wood     }
22793dd5e85SScott Wood 
22893dd5e85SScott Wood     assert(entries == env->nb_tlb);
22993dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23093dd5e85SScott Wood 
23193dd5e85SScott Wood     env->tlb_dirty = true;
23293dd5e85SScott Wood 
23393dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23493dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23593dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
23693dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
23793dd5e85SScott Wood 
23848add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
23993dd5e85SScott Wood     if (ret < 0) {
24093dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24193dd5e85SScott Wood                 __func__, strerror(-ret));
24293dd5e85SScott Wood         return ret;
24393dd5e85SScott Wood     }
24493dd5e85SScott Wood 
24593dd5e85SScott Wood     env->kvm_sw_tlb = true;
24693dd5e85SScott Wood     return 0;
24793dd5e85SScott Wood }
24893dd5e85SScott Wood 
2494656e1f0SBenjamin Herrenschmidt 
2504656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
251a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2524656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2534656e1f0SBenjamin Herrenschmidt {
254a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
255a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
256a60f24b5SAndreas Färber 
2574656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2584656e1f0SBenjamin Herrenschmidt 
2594656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2604656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2614656e1f0SBenjamin Herrenschmidt      *
2624656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2634656e1f0SBenjamin Herrenschmidt      *
26496c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
26596c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
26696c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2674656e1f0SBenjamin Herrenschmidt      *
2684656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2694656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2704656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2714656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2724656e1f0SBenjamin Herrenschmidt      *
2734656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2744656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2754656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2764656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2774656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2784656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2794656e1f0SBenjamin Herrenschmidt      *   this fallback.
2804656e1f0SBenjamin Herrenschmidt      */
28196c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2824656e1f0SBenjamin Herrenschmidt         /* No flags */
2834656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2844656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2854656e1f0SBenjamin Herrenschmidt 
2864656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2874656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2884656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2894656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2904656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2914656e1f0SBenjamin Herrenschmidt 
2924656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2934656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2944656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2954656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2964656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2974656e1f0SBenjamin Herrenschmidt     } else {
2984656e1f0SBenjamin Herrenschmidt         int i = 0;
2994656e1f0SBenjamin Herrenschmidt 
3004656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
3014656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
3024656e1f0SBenjamin Herrenschmidt 
3034656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
3044656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
3054656e1f0SBenjamin Herrenschmidt         }
3064656e1f0SBenjamin Herrenschmidt 
307ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
308ec975e83SSam Bobroff            POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3094656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
3104656e1f0SBenjamin Herrenschmidt         } else {
3114656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
3124656e1f0SBenjamin Herrenschmidt         }
3134656e1f0SBenjamin Herrenschmidt 
3144656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
3154656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
3164656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3174656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3184656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3194656e1f0SBenjamin Herrenschmidt         i++;
3204656e1f0SBenjamin Herrenschmidt 
321aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
322ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
323ec975e83SSam Bobroff             POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3244656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3254656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3264656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3274656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3284656e1f0SBenjamin Herrenschmidt             i++;
3294656e1f0SBenjamin Herrenschmidt         }
3304656e1f0SBenjamin Herrenschmidt 
3314656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3324656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3334656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3344656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3354656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3364656e1f0SBenjamin Herrenschmidt     }
3374656e1f0SBenjamin Herrenschmidt }
3384656e1f0SBenjamin Herrenschmidt 
339a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3404656e1f0SBenjamin Herrenschmidt {
341a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3424656e1f0SBenjamin Herrenschmidt     int ret;
3434656e1f0SBenjamin Herrenschmidt 
344a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
345a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3464656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3474656e1f0SBenjamin Herrenschmidt             return;
3484656e1f0SBenjamin Herrenschmidt         }
3494656e1f0SBenjamin Herrenschmidt     }
3504656e1f0SBenjamin Herrenschmidt 
351a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3524656e1f0SBenjamin Herrenschmidt }
3534656e1f0SBenjamin Herrenschmidt 
354c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
355c64abd1fSSam Bobroff {
356c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
357c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
358c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
359c64abd1fSSam Bobroff     int i;
360c64abd1fSSam Bobroff 
361c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
362c64abd1fSSam Bobroff         return NULL;
363c64abd1fSSam Bobroff     }
364c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
365c64abd1fSSam Bobroff         return NULL;
366c64abd1fSSam Bobroff     }
367c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
368c64abd1fSSam Bobroff     radix_page_info->count = 0;
369c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
370c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
371c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
372c64abd1fSSam Bobroff             radix_page_info->count++;
373c64abd1fSSam Bobroff         }
374c64abd1fSSam Bobroff     }
375c64abd1fSSam Bobroff     return radix_page_info;
376c64abd1fSSam Bobroff }
377c64abd1fSSam Bobroff 
378b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
379b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
380b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
381b4db5413SSuraj Jitindar Singh {
382b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
383b4db5413SSuraj Jitindar Singh     int ret;
384b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
385b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
386b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
387b4db5413SSuraj Jitindar Singh     };
388b4db5413SSuraj Jitindar Singh 
389b4db5413SSuraj Jitindar Singh     if (radix) {
390b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
391b4db5413SSuraj Jitindar Singh     }
392b4db5413SSuraj Jitindar Singh     if (gtse) {
393b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
394b4db5413SSuraj Jitindar Singh     }
395b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
396b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
397b4db5413SSuraj Jitindar Singh     switch (ret) {
398b4db5413SSuraj Jitindar Singh     case 0:
399b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
400b4db5413SSuraj Jitindar Singh     case -EINVAL:
401b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
402b4db5413SSuraj Jitindar Singh     case -ENODEV:
403b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
404b4db5413SSuraj Jitindar Singh     default:
405b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
406b4db5413SSuraj Jitindar Singh     }
407b4db5413SSuraj Jitindar Singh }
408b4db5413SSuraj Jitindar Singh 
4094656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4104656e1f0SBenjamin Herrenschmidt {
4114656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4124656e1f0SBenjamin Herrenschmidt         return true;
4134656e1f0SBenjamin Herrenschmidt     }
4144656e1f0SBenjamin Herrenschmidt 
4154656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4164656e1f0SBenjamin Herrenschmidt }
4174656e1f0SBenjamin Herrenschmidt 
418df587133SThomas Huth static long max_cpu_page_size;
419df587133SThomas Huth 
420a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4214656e1f0SBenjamin Herrenschmidt {
4224656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4234656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
424a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4254656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4260d594f55SThomas Huth     bool has_64k_pages = false;
4274656e1f0SBenjamin Herrenschmidt 
4284656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4294656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4304656e1f0SBenjamin Herrenschmidt         return;
4314656e1f0SBenjamin Herrenschmidt     }
4324656e1f0SBenjamin Herrenschmidt 
4334656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4344656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
435a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4364656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4374656e1f0SBenjamin Herrenschmidt     }
4384656e1f0SBenjamin Herrenschmidt 
439df587133SThomas Huth     if (!max_cpu_page_size) {
4409c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
441df587133SThomas Huth     }
4424656e1f0SBenjamin Herrenschmidt 
4434656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4444656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4454656e1f0SBenjamin Herrenschmidt 
44690da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
44790da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
44890da0d5aSBenjamin Herrenschmidt      */
44990da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
45090da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
45190da0d5aSBenjamin Herrenschmidt     }
45290da0d5aSBenjamin Herrenschmidt 
45308215d8fSAlexander Graf     /*
45408215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
45508215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
45608215d8fSAlexander Graf      */
4574656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4584656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4594656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4604656e1f0SBenjamin Herrenschmidt 
461df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4624656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4634656e1f0SBenjamin Herrenschmidt             continue;
4644656e1f0SBenjamin Herrenschmidt         }
4654656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4664656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4674656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
468df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4694656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4704656e1f0SBenjamin Herrenschmidt                 continue;
4714656e1f0SBenjamin Herrenschmidt             }
4720d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4730d594f55SThomas Huth                 has_64k_pages = true;
4740d594f55SThomas Huth             }
4754656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4764656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4774656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4784656e1f0SBenjamin Herrenschmidt                 break;
4794656e1f0SBenjamin Herrenschmidt             }
4804656e1f0SBenjamin Herrenschmidt         }
4814656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4824656e1f0SBenjamin Herrenschmidt             break;
4834656e1f0SBenjamin Herrenschmidt         }
4844656e1f0SBenjamin Herrenschmidt     }
4854656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
48608215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
4874656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
4884656e1f0SBenjamin Herrenschmidt     }
4890d594f55SThomas Huth     if (!has_64k_pages) {
4900d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
4910d594f55SThomas Huth     }
4924656e1f0SBenjamin Herrenschmidt }
493df587133SThomas Huth 
494ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
495df587133SThomas Huth {
496df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
497df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
498df587133SThomas Huth     long pagesize;
499df587133SThomas Huth 
500df587133SThomas Huth     if (mempath) {
5019c607668SAlexey Kardashevskiy         pagesize = qemu_mempath_getpagesize(mempath);
5022d3e302eSGreg Kurz         g_free(mempath);
503df587133SThomas Huth     } else {
504df587133SThomas Huth         pagesize = getpagesize();
505df587133SThomas Huth     }
506df587133SThomas Huth 
507df587133SThomas Huth     return pagesize >= max_cpu_page_size;
508df587133SThomas Huth }
509df587133SThomas Huth 
5104656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5114656e1f0SBenjamin Herrenschmidt 
512a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5134656e1f0SBenjamin Herrenschmidt {
5144656e1f0SBenjamin Herrenschmidt }
5154656e1f0SBenjamin Herrenschmidt 
516ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
517df587133SThomas Huth {
518df587133SThomas Huth     return true;
519df587133SThomas Huth }
520df587133SThomas Huth 
5214656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5224656e1f0SBenjamin Herrenschmidt 
523b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
524b164e48eSEduardo Habkost {
5252e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
526b164e48eSEduardo Habkost }
527b164e48eSEduardo Habkost 
52888365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
52988365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
53088365d17SBharat Bhushan  * of 4 is sufficient for now.
53188365d17SBharat Bhushan  */
53288365d17SBharat Bhushan #define MAX_HW_BKPTS 4
53388365d17SBharat Bhushan 
53488365d17SBharat Bhushan static struct HWBreakpoint {
53588365d17SBharat Bhushan     target_ulong addr;
53688365d17SBharat Bhushan     int type;
53788365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
53888365d17SBharat Bhushan 
53988365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
54088365d17SBharat Bhushan 
54188365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
54288365d17SBharat Bhushan static int max_hw_breakpoint;
54388365d17SBharat Bhushan static int max_hw_watchpoint;
54488365d17SBharat Bhushan static int nb_hw_breakpoint;
54588365d17SBharat Bhushan static int nb_hw_watchpoint;
54688365d17SBharat Bhushan 
54788365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
54888365d17SBharat Bhushan {
54988365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
55088365d17SBharat Bhushan         max_hw_breakpoint = 2;
55188365d17SBharat Bhushan         max_hw_watchpoint = 2;
55288365d17SBharat Bhushan     }
55388365d17SBharat Bhushan 
55488365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
55588365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
55688365d17SBharat Bhushan         return;
55788365d17SBharat Bhushan     }
55888365d17SBharat Bhushan }
55988365d17SBharat Bhushan 
56020d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5615666ca4aSScott Wood {
56220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
56320d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5645666ca4aSScott Wood     int ret;
5655666ca4aSScott Wood 
5664656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
567a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5684656e1f0SBenjamin Herrenschmidt 
5694656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5701bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5715666ca4aSScott Wood     if (ret) {
572388e47c7SThomas Huth         if (ret == -EINVAL) {
573388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
574388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
575388e47c7SThomas Huth         }
5765666ca4aSScott Wood         return ret;
5775666ca4aSScott Wood     }
578861bbc80SAlexander Graf 
579bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
580c821c2bdSAlexander Graf 
58193dd5e85SScott Wood     switch (cenv->mmu_model) {
58293dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5837f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5841bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
58593dd5e85SScott Wood         break;
5867f516c96SThomas Huth     case POWERPC_MMU_2_07:
5877f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5887f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
589f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
590f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
591f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5927f516c96SThomas Huth                 cap_htm = true;
5937f516c96SThomas Huth             }
594f3d9f303SSam Bobroff         }
5957f516c96SThomas Huth         break;
59693dd5e85SScott Wood     default:
59793dd5e85SScott Wood         break;
59893dd5e85SScott Wood     }
59993dd5e85SScott Wood 
6003c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
60188365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
6023c902d44SBharat Bhushan 
603861bbc80SAlexander Graf     return ret;
604d76d1650Saurel32 }
605d76d1650Saurel32 
6061bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
60793dd5e85SScott Wood {
6081bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
6091bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
61093dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
61193dd5e85SScott Wood     unsigned char *bitmap;
61293dd5e85SScott Wood     int ret;
61393dd5e85SScott Wood 
61493dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
61593dd5e85SScott Wood         return;
61693dd5e85SScott Wood     }
61793dd5e85SScott Wood 
61893dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
61993dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
62093dd5e85SScott Wood 
62193dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
62293dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
62393dd5e85SScott Wood 
6241bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
62593dd5e85SScott Wood     if (ret) {
62693dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
62793dd5e85SScott Wood                 __func__, strerror(-ret));
62893dd5e85SScott Wood     }
62993dd5e85SScott Wood 
63093dd5e85SScott Wood     g_free(bitmap);
63193dd5e85SScott Wood }
63293dd5e85SScott Wood 
633d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
634d67d40eaSDavid Gibson {
635d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
636d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
637d67d40eaSDavid Gibson     union {
638d67d40eaSDavid Gibson         uint32_t u32;
639d67d40eaSDavid Gibson         uint64_t u64;
640d67d40eaSDavid Gibson     } val;
641d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
642d67d40eaSDavid Gibson         .id = id,
643d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
644d67d40eaSDavid Gibson     };
645d67d40eaSDavid Gibson     int ret;
646d67d40eaSDavid Gibson 
647d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
648d67d40eaSDavid Gibson     if (ret != 0) {
649b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
650d67d40eaSDavid Gibson     } else {
651d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
652d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
653d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
654d67d40eaSDavid Gibson             break;
655d67d40eaSDavid Gibson 
656d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
657d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
658d67d40eaSDavid Gibson             break;
659d67d40eaSDavid Gibson 
660d67d40eaSDavid Gibson         default:
661d67d40eaSDavid Gibson             /* Don't handle this size yet */
662d67d40eaSDavid Gibson             abort();
663d67d40eaSDavid Gibson         }
664d67d40eaSDavid Gibson     }
665d67d40eaSDavid Gibson }
666d67d40eaSDavid Gibson 
667d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
668d67d40eaSDavid Gibson {
669d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
670d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
671d67d40eaSDavid Gibson     union {
672d67d40eaSDavid Gibson         uint32_t u32;
673d67d40eaSDavid Gibson         uint64_t u64;
674d67d40eaSDavid Gibson     } val;
675d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
676d67d40eaSDavid Gibson         .id = id,
677d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
678d67d40eaSDavid Gibson     };
679d67d40eaSDavid Gibson     int ret;
680d67d40eaSDavid Gibson 
681d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
682d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
683d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
684d67d40eaSDavid Gibson         break;
685d67d40eaSDavid Gibson 
686d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
687d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
688d67d40eaSDavid Gibson         break;
689d67d40eaSDavid Gibson 
690d67d40eaSDavid Gibson     default:
691d67d40eaSDavid Gibson         /* Don't handle this size yet */
692d67d40eaSDavid Gibson         abort();
693d67d40eaSDavid Gibson     }
694d67d40eaSDavid Gibson 
695d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
696d67d40eaSDavid Gibson     if (ret != 0) {
697b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
698d67d40eaSDavid Gibson     }
699d67d40eaSDavid Gibson }
700d67d40eaSDavid Gibson 
70170b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
70270b79849SDavid Gibson {
70370b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
70470b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
70570b79849SDavid Gibson     struct kvm_one_reg reg;
70670b79849SDavid Gibson     int i;
70770b79849SDavid Gibson     int ret;
70870b79849SDavid Gibson 
70970b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
71070b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
71170b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
71270b79849SDavid Gibson 
71370b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
71470b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
71570b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
71670b79849SDavid Gibson         if (ret < 0) {
717da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
71870b79849SDavid Gibson             return ret;
71970b79849SDavid Gibson         }
72070b79849SDavid Gibson 
72170b79849SDavid Gibson         for (i = 0; i < 32; i++) {
72270b79849SDavid Gibson             uint64_t vsr[2];
72370b79849SDavid Gibson 
7243a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
72570b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
72670b79849SDavid Gibson             vsr[1] = env->vsr[i];
7273a4b791bSGreg Kurz #else
7283a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7293a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7303a4b791bSGreg Kurz #endif
73170b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
73270b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
73370b79849SDavid Gibson 
73470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73570b79849SDavid Gibson             if (ret < 0) {
736da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
73770b79849SDavid Gibson                         i, strerror(errno));
73870b79849SDavid Gibson                 return ret;
73970b79849SDavid Gibson             }
74070b79849SDavid Gibson         }
74170b79849SDavid Gibson     }
74270b79849SDavid Gibson 
74370b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
74470b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
74570b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
74670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74770b79849SDavid Gibson         if (ret < 0) {
748da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
74970b79849SDavid Gibson             return ret;
75070b79849SDavid Gibson         }
75170b79849SDavid Gibson 
75270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
75370b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
75470b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
75570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
75670b79849SDavid Gibson             if (ret < 0) {
757da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
75870b79849SDavid Gibson                 return ret;
75970b79849SDavid Gibson             }
76070b79849SDavid Gibson         }
76170b79849SDavid Gibson     }
76270b79849SDavid Gibson 
76370b79849SDavid Gibson     return 0;
76470b79849SDavid Gibson }
76570b79849SDavid Gibson 
76670b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
76770b79849SDavid Gibson {
76870b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
76970b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
77070b79849SDavid Gibson     struct kvm_one_reg reg;
77170b79849SDavid Gibson     int i;
77270b79849SDavid Gibson     int ret;
77370b79849SDavid Gibson 
77470b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
77570b79849SDavid Gibson         uint64_t fpscr;
77670b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
77770b79849SDavid Gibson 
77870b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
77970b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
78070b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
78170b79849SDavid Gibson         if (ret < 0) {
782da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
78370b79849SDavid Gibson             return ret;
78470b79849SDavid Gibson         } else {
78570b79849SDavid Gibson             env->fpscr = fpscr;
78670b79849SDavid Gibson         }
78770b79849SDavid Gibson 
78870b79849SDavid Gibson         for (i = 0; i < 32; i++) {
78970b79849SDavid Gibson             uint64_t vsr[2];
79070b79849SDavid Gibson 
79170b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
79270b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
79370b79849SDavid Gibson 
79470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
79570b79849SDavid Gibson             if (ret < 0) {
796da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
79770b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
79870b79849SDavid Gibson                 return ret;
79970b79849SDavid Gibson             } else {
8003a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
80170b79849SDavid Gibson                 env->fpr[i] = vsr[0];
80270b79849SDavid Gibson                 if (vsx) {
80370b79849SDavid Gibson                     env->vsr[i] = vsr[1];
80470b79849SDavid Gibson                 }
8053a4b791bSGreg Kurz #else
8063a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
8073a4b791bSGreg Kurz                 if (vsx) {
8083a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
8093a4b791bSGreg Kurz                 }
8103a4b791bSGreg Kurz #endif
81170b79849SDavid Gibson             }
81270b79849SDavid Gibson         }
81370b79849SDavid Gibson     }
81470b79849SDavid Gibson 
81570b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
81670b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
81770b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
81870b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81970b79849SDavid Gibson         if (ret < 0) {
820da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
82170b79849SDavid Gibson             return ret;
82270b79849SDavid Gibson         }
82370b79849SDavid Gibson 
82470b79849SDavid Gibson         for (i = 0; i < 32; i++) {
82570b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
82670b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
82770b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
82870b79849SDavid Gibson             if (ret < 0) {
829da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
83070b79849SDavid Gibson                         i, strerror(errno));
83170b79849SDavid Gibson                 return ret;
83270b79849SDavid Gibson             }
83370b79849SDavid Gibson         }
83470b79849SDavid Gibson     }
83570b79849SDavid Gibson 
83670b79849SDavid Gibson     return 0;
83770b79849SDavid Gibson }
83870b79849SDavid Gibson 
8399b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8409b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8419b00ea49SDavid Gibson {
8429b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8439b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8449b00ea49SDavid Gibson     struct kvm_one_reg reg;
8459b00ea49SDavid Gibson     int ret;
8469b00ea49SDavid Gibson 
8479b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8489b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8499b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8509b00ea49SDavid Gibson     if (ret < 0) {
851da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8529b00ea49SDavid Gibson         return ret;
8539b00ea49SDavid Gibson     }
8549b00ea49SDavid Gibson 
8559b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8569b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8579b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8589b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8599b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8609b00ea49SDavid Gibson     if (ret < 0) {
861da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8629b00ea49SDavid Gibson                 strerror(errno));
8639b00ea49SDavid Gibson         return ret;
8649b00ea49SDavid Gibson     }
8659b00ea49SDavid Gibson 
8669b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8679b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8689b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8699b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8709b00ea49SDavid Gibson     if (ret < 0) {
871da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8729b00ea49SDavid Gibson                 strerror(errno));
8739b00ea49SDavid Gibson         return ret;
8749b00ea49SDavid Gibson     }
8759b00ea49SDavid Gibson 
8769b00ea49SDavid Gibson     return 0;
8779b00ea49SDavid Gibson }
8789b00ea49SDavid Gibson 
8799b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8809b00ea49SDavid Gibson {
8819b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8829b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8839b00ea49SDavid Gibson     struct kvm_one_reg reg;
8849b00ea49SDavid Gibson     int ret;
8859b00ea49SDavid Gibson 
8869b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8879b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8889b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8899b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8909b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8919b00ea49SDavid Gibson 
8929b00ea49SDavid Gibson     if (env->vpa_addr) {
8939b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8949b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8959b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8969b00ea49SDavid Gibson         if (ret < 0) {
897da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8989b00ea49SDavid Gibson             return ret;
8999b00ea49SDavid Gibson         }
9009b00ea49SDavid Gibson     }
9019b00ea49SDavid Gibson 
9029b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
9039b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
9049b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
9059b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
9069b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9079b00ea49SDavid Gibson     if (ret < 0) {
908da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
9099b00ea49SDavid Gibson         return ret;
9109b00ea49SDavid Gibson     }
9119b00ea49SDavid Gibson 
9129b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9139b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9149b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9159b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9169b00ea49SDavid Gibson     if (ret < 0) {
917da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9189b00ea49SDavid Gibson                 strerror(errno));
9199b00ea49SDavid Gibson         return ret;
9209b00ea49SDavid Gibson     }
9219b00ea49SDavid Gibson 
9229b00ea49SDavid Gibson     if (!env->vpa_addr) {
9239b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9249b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9259b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9269b00ea49SDavid Gibson         if (ret < 0) {
927da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9289b00ea49SDavid Gibson             return ret;
9299b00ea49SDavid Gibson         }
9309b00ea49SDavid Gibson     }
9319b00ea49SDavid Gibson 
9329b00ea49SDavid Gibson     return 0;
9339b00ea49SDavid Gibson }
9349b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9359b00ea49SDavid Gibson 
936e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
937a7a00a72SDavid Gibson {
938a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
939a7a00a72SDavid Gibson     struct kvm_sregs sregs;
940a7a00a72SDavid Gibson     int i;
941a7a00a72SDavid Gibson 
942a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
943a7a00a72SDavid Gibson 
944*1ec26c75SGreg Kurz     if (cpu->vhyp) {
945*1ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
946*1ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
947*1ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
948*1ec26c75SGreg Kurz     } else {
949a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
950*1ec26c75SGreg Kurz     }
951a7a00a72SDavid Gibson 
952a7a00a72SDavid Gibson     /* Sync SLB */
953a7a00a72SDavid Gibson #ifdef TARGET_PPC64
954a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
955a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
956a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
957a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
958a7a00a72SDavid Gibson         }
959a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
960a7a00a72SDavid Gibson     }
961a7a00a72SDavid Gibson #endif
962a7a00a72SDavid Gibson 
963a7a00a72SDavid Gibson     /* Sync SRs */
964a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
965a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
966a7a00a72SDavid Gibson     }
967a7a00a72SDavid Gibson 
968a7a00a72SDavid Gibson     /* Sync BATs */
969a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
970a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
971a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
972a7a00a72SDavid Gibson             | env->DBAT[1][i];
973a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
974a7a00a72SDavid Gibson             | env->IBAT[1][i];
975a7a00a72SDavid Gibson     }
976a7a00a72SDavid Gibson 
977a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
978a7a00a72SDavid Gibson }
979a7a00a72SDavid Gibson 
98020d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
981d76d1650Saurel32 {
98220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
98320d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
984d76d1650Saurel32     struct kvm_regs regs;
985d76d1650Saurel32     int ret;
986d76d1650Saurel32     int i;
987d76d1650Saurel32 
9881bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9891bc22652SAndreas Färber     if (ret < 0) {
990d76d1650Saurel32         return ret;
9911bc22652SAndreas Färber     }
992d76d1650Saurel32 
993d76d1650Saurel32     regs.ctr = env->ctr;
994d76d1650Saurel32     regs.lr  = env->lr;
995da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
996d76d1650Saurel32     regs.msr = env->msr;
997d76d1650Saurel32     regs.pc = env->nip;
998d76d1650Saurel32 
999d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
1000d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
1001d76d1650Saurel32 
1002d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
1003d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
1004d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
1005d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
1006d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
1007d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
1008d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
1009d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
1010d76d1650Saurel32 
101190dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
101290dc8812SScott Wood 
1013d76d1650Saurel32     for (i = 0;i < 32; i++)
1014d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1015d76d1650Saurel32 
10164bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10174bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10184bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10194bddaf55SAlexey Kardashevskiy     }
10204bddaf55SAlexey Kardashevskiy 
10211bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1022d76d1650Saurel32     if (ret < 0)
1023d76d1650Saurel32         return ret;
1024d76d1650Saurel32 
102570b79849SDavid Gibson     kvm_put_fp(cs);
102670b79849SDavid Gibson 
102793dd5e85SScott Wood     if (env->tlb_dirty) {
10281bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
102993dd5e85SScott Wood         env->tlb_dirty = false;
103093dd5e85SScott Wood     }
103193dd5e85SScott Wood 
1032f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1033a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1034a7a00a72SDavid Gibson         if (ret < 0) {
1035f1af19d7SDavid Gibson             return ret;
1036f1af19d7SDavid Gibson         }
1037f1af19d7SDavid Gibson     }
1038f1af19d7SDavid Gibson 
1039f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1040d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1041d67d40eaSDavid Gibson     }
1042f1af19d7SDavid Gibson 
1043d67d40eaSDavid Gibson     if (cap_one_reg) {
1044d67d40eaSDavid Gibson         int i;
1045d67d40eaSDavid Gibson 
1046d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1047d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1048d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1049d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1050d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1051d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1052d67d40eaSDavid Gibson 
1053d67d40eaSDavid Gibson             if (id != 0) {
1054d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1055d67d40eaSDavid Gibson             }
1056f1af19d7SDavid Gibson         }
10579b00ea49SDavid Gibson 
10589b00ea49SDavid Gibson #ifdef TARGET_PPC64
105980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
106080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
106180b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
106280b3f79bSAlexey Kardashevskiy             }
106380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
106480b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
106580b3f79bSAlexey Kardashevskiy             }
106680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
106780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
106880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
106980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
107080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
107180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
107280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
107380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
107480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
107580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
107680b3f79bSAlexey Kardashevskiy         }
107780b3f79bSAlexey Kardashevskiy 
10789b00ea49SDavid Gibson         if (cap_papr) {
10799b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1080da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10819b00ea49SDavid Gibson             }
10829b00ea49SDavid Gibson         }
108398a8b524SAlexey Kardashevskiy 
108498a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10859b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1086f1af19d7SDavid Gibson     }
1087f1af19d7SDavid Gibson 
1088d76d1650Saurel32     return ret;
1089d76d1650Saurel32 }
1090d76d1650Saurel32 
1091c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1092c371c2e3SBharat Bhushan {
1093c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1094c371c2e3SBharat Bhushan }
1095c371c2e3SBharat Bhushan 
1096a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1097d76d1650Saurel32 {
109820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1099ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1100a7a00a72SDavid Gibson     int ret;
1101d76d1650Saurel32 
1102a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
110390dc8812SScott Wood     if (ret < 0) {
110490dc8812SScott Wood         return ret;
110590dc8812SScott Wood     }
110690dc8812SScott Wood 
110790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
110890dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
110990dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
111090dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
111190dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
111290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
111390dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
111590dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
111690dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
111790dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
111890dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
111990dc8812SScott Wood     }
112090dc8812SScott Wood 
112190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
112290dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
112390dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
112490dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
112590dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
112690dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
112790dc8812SScott Wood     }
112890dc8812SScott Wood 
112990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
113090dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
113190dc8812SScott Wood     }
113290dc8812SScott Wood 
113390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
113490dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
113590dc8812SScott Wood     }
113690dc8812SScott Wood 
113790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
113890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1139c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
114090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1141c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
114290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1143c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
114490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1145c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
114690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1147c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
114890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1149c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
115090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1151c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
115290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1153c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
115490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1155c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
115690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1157c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
115890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1159c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
116090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1161c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
116290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1163c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
116490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1165c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
116690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1167c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
116890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1169c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
117090dc8812SScott Wood 
117190dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
117290dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1173c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
117490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1175c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
117690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1177c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
117890dc8812SScott Wood         }
117990dc8812SScott Wood 
118090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
118190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1182c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
118390dc8812SScott Wood         }
118490dc8812SScott Wood 
118590dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
118690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1187c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
118890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1189c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
119090dc8812SScott Wood         }
119190dc8812SScott Wood     }
119290dc8812SScott Wood 
119390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
119490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
119590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
119690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
119790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
119890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
119990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
120090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
120190dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
120290dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
120390dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
120490dc8812SScott Wood     }
120590dc8812SScott Wood 
120690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
120790dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
120890dc8812SScott Wood     }
120990dc8812SScott Wood 
121090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
121190dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
121290dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
121390dc8812SScott Wood     }
121490dc8812SScott Wood 
121590dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
121690dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
121790dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
121890dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
121990dc8812SScott Wood 
122090dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
122190dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
122290dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
122390dc8812SScott Wood         }
122490dc8812SScott Wood     }
1225a7a00a72SDavid Gibson 
1226a7a00a72SDavid Gibson     return 0;
1227fafc0b6aSAlexander Graf }
122890dc8812SScott Wood 
1229a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1230a7a00a72SDavid Gibson {
1231a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1232a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1233a7a00a72SDavid Gibson     int ret;
1234a7a00a72SDavid Gibson     int i;
1235a7a00a72SDavid Gibson 
1236a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
123790dc8812SScott Wood     if (ret < 0) {
123890dc8812SScott Wood         return ret;
123990dc8812SScott Wood     }
124090dc8812SScott Wood 
1241e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1242bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1243f3c75d42SAneesh Kumar K.V     }
1244ba5e5090SAlexander Graf 
1245ba5e5090SAlexander Graf     /* Sync SLB */
124682c09f2fSAlexander Graf #ifdef TARGET_PPC64
12474b4d4a21SAneesh Kumar K.V     /*
12484b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1249a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1250a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1251a7a00a72SDavid Gibson      * in.
12524b4d4a21SAneesh Kumar K.V      */
12534b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1254d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12554b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12564b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12574b4d4a21SAneesh Kumar K.V         /*
12584b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12594b4d4a21SAneesh Kumar K.V          */
12604b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1261bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12624b4d4a21SAneesh Kumar K.V         }
1263ba5e5090SAlexander Graf     }
126482c09f2fSAlexander Graf #endif
1265ba5e5090SAlexander Graf 
1266ba5e5090SAlexander Graf     /* Sync SRs */
1267ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1268ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1269ba5e5090SAlexander Graf     }
1270ba5e5090SAlexander Graf 
1271ba5e5090SAlexander Graf     /* Sync BATs */
1272ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1273ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1274ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1275ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1276ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1277ba5e5090SAlexander Graf     }
1278a7a00a72SDavid Gibson 
1279a7a00a72SDavid Gibson     return 0;
1280a7a00a72SDavid Gibson }
1281a7a00a72SDavid Gibson 
1282a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1283a7a00a72SDavid Gibson {
1284a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1285a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1286a7a00a72SDavid Gibson     struct kvm_regs regs;
1287a7a00a72SDavid Gibson     uint32_t cr;
1288a7a00a72SDavid Gibson     int i, ret;
1289a7a00a72SDavid Gibson 
1290a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1291a7a00a72SDavid Gibson     if (ret < 0)
1292a7a00a72SDavid Gibson         return ret;
1293a7a00a72SDavid Gibson 
1294a7a00a72SDavid Gibson     cr = regs.cr;
1295a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1296a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1297a7a00a72SDavid Gibson         cr >>= 4;
1298a7a00a72SDavid Gibson     }
1299a7a00a72SDavid Gibson 
1300a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1301a7a00a72SDavid Gibson     env->lr = regs.lr;
1302a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1303a7a00a72SDavid Gibson     env->msr = regs.msr;
1304a7a00a72SDavid Gibson     env->nip = regs.pc;
1305a7a00a72SDavid Gibson 
1306a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1307a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1308a7a00a72SDavid Gibson 
1309a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1310a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1311a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1312a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1313a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1314a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1315a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1316a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1317a7a00a72SDavid Gibson 
1318a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1319a7a00a72SDavid Gibson 
1320a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1321a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1322a7a00a72SDavid Gibson 
1323a7a00a72SDavid Gibson     kvm_get_fp(cs);
1324a7a00a72SDavid Gibson 
1325a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1326a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1327a7a00a72SDavid Gibson         if (ret < 0) {
1328a7a00a72SDavid Gibson             return ret;
1329a7a00a72SDavid Gibson         }
1330a7a00a72SDavid Gibson     }
1331a7a00a72SDavid Gibson 
1332a7a00a72SDavid Gibson     if (cap_segstate) {
1333a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1334a7a00a72SDavid Gibson         if (ret < 0) {
1335a7a00a72SDavid Gibson             return ret;
1336a7a00a72SDavid Gibson         }
1337fafc0b6aSAlexander Graf     }
1338ba5e5090SAlexander Graf 
1339d67d40eaSDavid Gibson     if (cap_hior) {
1340d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1341d67d40eaSDavid Gibson     }
1342d67d40eaSDavid Gibson 
1343d67d40eaSDavid Gibson     if (cap_one_reg) {
1344d67d40eaSDavid Gibson         int i;
1345d67d40eaSDavid Gibson 
1346d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1347d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1348d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1349d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1350d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1351d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1352d67d40eaSDavid Gibson 
1353d67d40eaSDavid Gibson             if (id != 0) {
1354d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1355d67d40eaSDavid Gibson             }
1356d67d40eaSDavid Gibson         }
13579b00ea49SDavid Gibson 
13589b00ea49SDavid Gibson #ifdef TARGET_PPC64
135980b3f79bSAlexey Kardashevskiy         if (msr_ts) {
136080b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
136180b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
136280b3f79bSAlexey Kardashevskiy             }
136380b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
136480b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
136580b3f79bSAlexey Kardashevskiy             }
136680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
136780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
136880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
136980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
137080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
137180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
137280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
137380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
137480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
137580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
137680b3f79bSAlexey Kardashevskiy         }
137780b3f79bSAlexey Kardashevskiy 
13789b00ea49SDavid Gibson         if (cap_papr) {
13799b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1380da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13819b00ea49SDavid Gibson             }
13829b00ea49SDavid Gibson         }
138398a8b524SAlexey Kardashevskiy 
138498a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13859b00ea49SDavid Gibson #endif
1386d67d40eaSDavid Gibson     }
1387d67d40eaSDavid Gibson 
1388d76d1650Saurel32     return 0;
1389d76d1650Saurel32 }
1390d76d1650Saurel32 
13911bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1392fc87e185SAlexander Graf {
1393fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1394fc87e185SAlexander Graf 
1395fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1396fc87e185SAlexander Graf         return 0;
1397fc87e185SAlexander Graf     }
1398fc87e185SAlexander Graf 
1399fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1400fc87e185SAlexander Graf         return 0;
1401fc87e185SAlexander Graf     }
1402fc87e185SAlexander Graf 
14031bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1404fc87e185SAlexander Graf 
1405fc87e185SAlexander Graf     return 0;
1406fc87e185SAlexander Graf }
1407fc87e185SAlexander Graf 
140816415335SAlexander Graf #if defined(TARGET_PPCEMB)
140916415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
141016415335SAlexander Graf #elif defined(TARGET_PPC64)
141116415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
141216415335SAlexander Graf #else
141316415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
141416415335SAlexander Graf #endif
141516415335SAlexander Graf 
141620d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1417d76d1650Saurel32 {
141820d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
141920d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1420d76d1650Saurel32     int r;
1421d76d1650Saurel32     unsigned irq;
1422d76d1650Saurel32 
14234b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14244b8523eeSJan Kiszka 
14255cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1426d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1427fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1428fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1429259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
143016415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1431d76d1650Saurel32     {
1432d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1433d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1434d76d1650Saurel32          * when reading the UIC.
1435d76d1650Saurel32          */
1436fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1437d76d1650Saurel32 
1438da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14391bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
144055e5c285SAndreas Färber         if (r < 0) {
144155e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
144255e5c285SAndreas Färber         }
1443c821c2bdSAlexander Graf 
1444c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1445bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
144673bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1447d76d1650Saurel32     }
1448d76d1650Saurel32 
1449d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1450d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1451d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14524b8523eeSJan Kiszka 
14534b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1454d76d1650Saurel32 }
1455d76d1650Saurel32 
14564c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1457d76d1650Saurel32 {
14584c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1459d76d1650Saurel32 }
1460d76d1650Saurel32 
146120d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14620af691d7SMarcelo Tosatti {
1463259186a7SAndreas Färber     return cs->halted;
14640af691d7SMarcelo Tosatti }
14650af691d7SMarcelo Tosatti 
1466259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1467d76d1650Saurel32 {
1468259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1469259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1470259186a7SAndreas Färber 
1471259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1472259186a7SAndreas Färber         cs->halted = 1;
147327103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1474d76d1650Saurel32     }
1475d76d1650Saurel32 
1476bb4ea393SJan Kiszka     return 0;
1477d76d1650Saurel32 }
1478d76d1650Saurel32 
1479d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14801328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1481d76d1650Saurel32 {
1482d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1483d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1484d76d1650Saurel32 
1485bb4ea393SJan Kiszka     return 0;
1486d76d1650Saurel32 }
1487d76d1650Saurel32 
14881328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1489d76d1650Saurel32 {
1490d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1491d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1492d76d1650Saurel32 
1493bb4ea393SJan Kiszka     return 0;
1494d76d1650Saurel32 }
1495d76d1650Saurel32 
14968a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14978a0548f9SBharat Bhushan {
14988a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14998a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
15008a0548f9SBharat Bhushan 
15018a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15028a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
15038a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
15048a0548f9SBharat Bhushan         return -EINVAL;
15058a0548f9SBharat Bhushan     }
15068a0548f9SBharat Bhushan 
15078a0548f9SBharat Bhushan     return 0;
15088a0548f9SBharat Bhushan }
15098a0548f9SBharat Bhushan 
15108a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15118a0548f9SBharat Bhushan {
15128a0548f9SBharat Bhushan     uint32_t sc;
15138a0548f9SBharat Bhushan 
15148a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15158a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15168a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15178a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15188a0548f9SBharat Bhushan         return -EINVAL;
15198a0548f9SBharat Bhushan     }
15208a0548f9SBharat Bhushan 
15218a0548f9SBharat Bhushan     return 0;
15228a0548f9SBharat Bhushan }
15238a0548f9SBharat Bhushan 
152488365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
152588365d17SBharat Bhushan {
152688365d17SBharat Bhushan     int n;
152788365d17SBharat Bhushan 
152888365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
152988365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
153088365d17SBharat Bhushan 
153188365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
153288365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
153388365d17SBharat Bhushan              hw_debug_points[n].type == type) {
153488365d17SBharat Bhushan             return n;
153588365d17SBharat Bhushan         }
153688365d17SBharat Bhushan     }
153788365d17SBharat Bhushan 
153888365d17SBharat Bhushan     return -1;
153988365d17SBharat Bhushan }
154088365d17SBharat Bhushan 
154188365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
154288365d17SBharat Bhushan {
154388365d17SBharat Bhushan     int n;
154488365d17SBharat Bhushan 
154588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
154688365d17SBharat Bhushan     if (n >= 0) {
154788365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
154888365d17SBharat Bhushan         return n;
154988365d17SBharat Bhushan     }
155088365d17SBharat Bhushan 
155188365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
155288365d17SBharat Bhushan     if (n >= 0) {
155388365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
155488365d17SBharat Bhushan         return n;
155588365d17SBharat Bhushan     }
155688365d17SBharat Bhushan 
155788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
155888365d17SBharat Bhushan     if (n >= 0) {
155988365d17SBharat Bhushan         *flag = BP_MEM_READ;
156088365d17SBharat Bhushan         return n;
156188365d17SBharat Bhushan     }
156288365d17SBharat Bhushan 
156388365d17SBharat Bhushan     return -1;
156488365d17SBharat Bhushan }
156588365d17SBharat Bhushan 
156688365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
156788365d17SBharat Bhushan                                   target_ulong len, int type)
156888365d17SBharat Bhushan {
156988365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
157088365d17SBharat Bhushan         return -ENOBUFS;
157188365d17SBharat Bhushan     }
157288365d17SBharat Bhushan 
157388365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
157488365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
157588365d17SBharat Bhushan 
157688365d17SBharat Bhushan     switch (type) {
157788365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
157888365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
157988365d17SBharat Bhushan             return -ENOBUFS;
158088365d17SBharat Bhushan         }
158188365d17SBharat Bhushan 
158288365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
158388365d17SBharat Bhushan             return -EEXIST;
158488365d17SBharat Bhushan         }
158588365d17SBharat Bhushan 
158688365d17SBharat Bhushan         nb_hw_breakpoint++;
158788365d17SBharat Bhushan         break;
158888365d17SBharat Bhushan 
158988365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
159088365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
159188365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
159288365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
159388365d17SBharat Bhushan             return -ENOBUFS;
159488365d17SBharat Bhushan         }
159588365d17SBharat Bhushan 
159688365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
159788365d17SBharat Bhushan             return -EEXIST;
159888365d17SBharat Bhushan         }
159988365d17SBharat Bhushan 
160088365d17SBharat Bhushan         nb_hw_watchpoint++;
160188365d17SBharat Bhushan         break;
160288365d17SBharat Bhushan 
160388365d17SBharat Bhushan     default:
160488365d17SBharat Bhushan         return -ENOSYS;
160588365d17SBharat Bhushan     }
160688365d17SBharat Bhushan 
160788365d17SBharat Bhushan     return 0;
160888365d17SBharat Bhushan }
160988365d17SBharat Bhushan 
161088365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
161188365d17SBharat Bhushan                                   target_ulong len, int type)
161288365d17SBharat Bhushan {
161388365d17SBharat Bhushan     int n;
161488365d17SBharat Bhushan 
161588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
161688365d17SBharat Bhushan     if (n < 0) {
161788365d17SBharat Bhushan         return -ENOENT;
161888365d17SBharat Bhushan     }
161988365d17SBharat Bhushan 
162088365d17SBharat Bhushan     switch (type) {
162188365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
162288365d17SBharat Bhushan         nb_hw_breakpoint--;
162388365d17SBharat Bhushan         break;
162488365d17SBharat Bhushan 
162588365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
162688365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
162788365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
162888365d17SBharat Bhushan         nb_hw_watchpoint--;
162988365d17SBharat Bhushan         break;
163088365d17SBharat Bhushan 
163188365d17SBharat Bhushan     default:
163288365d17SBharat Bhushan         return -ENOSYS;
163388365d17SBharat Bhushan     }
163488365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
163588365d17SBharat Bhushan 
163688365d17SBharat Bhushan     return 0;
163788365d17SBharat Bhushan }
163888365d17SBharat Bhushan 
163988365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
164088365d17SBharat Bhushan {
164188365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
164288365d17SBharat Bhushan }
164388365d17SBharat Bhushan 
16448a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16458a0548f9SBharat Bhushan {
164688365d17SBharat Bhushan     int n;
164788365d17SBharat Bhushan 
16488a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16498a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16508a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16518a0548f9SBharat Bhushan     }
165288365d17SBharat Bhushan 
165388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
165488365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
165588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
165688365d17SBharat Bhushan 
165788365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
165888365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
165988365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
166088365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
166188365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
166288365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
166388365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
166488365d17SBharat Bhushan                 break;
166588365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
166688365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
166788365d17SBharat Bhushan                 break;
166888365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
166988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
167088365d17SBharat Bhushan                 break;
167188365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
167288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
167388365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
167488365d17SBharat Bhushan                 break;
167588365d17SBharat Bhushan             default:
167688365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
167788365d17SBharat Bhushan             }
167888365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
167988365d17SBharat Bhushan         }
168088365d17SBharat Bhushan     }
16818a0548f9SBharat Bhushan }
16828a0548f9SBharat Bhushan 
16838a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16848a0548f9SBharat Bhushan {
16858a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16868a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16878a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16888a0548f9SBharat Bhushan     int handle = 0;
168988365d17SBharat Bhushan     int n;
169088365d17SBharat Bhushan     int flag = 0;
16918a0548f9SBharat Bhushan 
169288365d17SBharat Bhushan     if (cs->singlestep_enabled) {
169388365d17SBharat Bhushan         handle = 1;
169488365d17SBharat Bhushan     } else if (arch_info->status) {
169588365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
169688365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
169788365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
169888365d17SBharat Bhushan                 if (n >= 0) {
169988365d17SBharat Bhushan                     handle = 1;
170088365d17SBharat Bhushan                 }
170188365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
170288365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
170388365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
170488365d17SBharat Bhushan                 if (n >= 0) {
170588365d17SBharat Bhushan                     handle = 1;
170688365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
170788365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
170888365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
170988365d17SBharat Bhushan                 }
171088365d17SBharat Bhushan             }
171188365d17SBharat Bhushan         }
171288365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17138a0548f9SBharat Bhushan         handle = 1;
17148a0548f9SBharat Bhushan     } else {
17158a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17168a0548f9SBharat Bhushan          * program exception to guest;
17178a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
171888365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
171988365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
172088365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
172188365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
172288365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
172388365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
172488365d17SBharat Bhushan          * So the result is that we cannot share debug resources
172588365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
172688365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
172788365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
172888365d17SBharat Bhushan          * cannot use them;
17298a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17308a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17318a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17328a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17338a0548f9SBharat Bhushan          * injecting a program interrupt.
17348a0548f9SBharat Bhushan          */
17358a0548f9SBharat Bhushan 
17368a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17378a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17388a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17398a0548f9SBharat Bhushan          */
17408a0548f9SBharat Bhushan         env->nip += 4;
17418a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17428a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17438a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17448a0548f9SBharat Bhushan     }
17458a0548f9SBharat Bhushan 
17468a0548f9SBharat Bhushan     return handle;
17478a0548f9SBharat Bhushan }
17488a0548f9SBharat Bhushan 
174920d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1750d76d1650Saurel32 {
175120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
175220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1753bb4ea393SJan Kiszka     int ret;
1754d76d1650Saurel32 
17554b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17564b8523eeSJan Kiszka 
1757d76d1650Saurel32     switch (run->exit_reason) {
1758d76d1650Saurel32     case KVM_EXIT_DCR:
1759d76d1650Saurel32         if (run->dcr.is_write) {
1760da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1761d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1762d76d1650Saurel32         } else {
1763da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1764d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1765d76d1650Saurel32         }
1766d76d1650Saurel32         break;
1767d76d1650Saurel32     case KVM_EXIT_HLT:
1768da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1769259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1770d76d1650Saurel32         break;
1771c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1772f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1773da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
177420d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1775aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1776f61b4bedSAlexander Graf                                               run->papr_hcall.args);
177778e8fde2SDavid Gibson         ret = 0;
1778f61b4bedSAlexander Graf         break;
1779f61b4bedSAlexander Graf #endif
17805b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1781da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1782933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17835b95b8b9SAlexander Graf         ret = 0;
17845b95b8b9SAlexander Graf         break;
178531f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1786da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
178731f2cb8fSBharat Bhushan         watchdog_perform_action();
178831f2cb8fSBharat Bhushan         ret = 0;
178931f2cb8fSBharat Bhushan         break;
179031f2cb8fSBharat Bhushan 
17918a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17928a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17938a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17948a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17958a0548f9SBharat Bhushan             break;
17968a0548f9SBharat Bhushan         }
17978a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17988a0548f9SBharat Bhushan         ret = 0;
17998a0548f9SBharat Bhushan         break;
18008a0548f9SBharat Bhushan 
180173aaec4aSJan Kiszka     default:
180273aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
180373aaec4aSJan Kiszka         ret = -1;
180473aaec4aSJan Kiszka         break;
1805d76d1650Saurel32     }
1806d76d1650Saurel32 
18074b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1808d76d1650Saurel32     return ret;
1809d76d1650Saurel32 }
1810d76d1650Saurel32 
181131f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
181231f2cb8fSBharat Bhushan {
181331f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181431f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
181531f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
181631f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
181731f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
181831f2cb8fSBharat Bhushan     };
181931f2cb8fSBharat Bhushan 
182031f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182131f2cb8fSBharat Bhushan }
182231f2cb8fSBharat Bhushan 
182331f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
182431f2cb8fSBharat Bhushan {
182531f2cb8fSBharat Bhushan 
182631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
182731f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
182831f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
182931f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
183031f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
183131f2cb8fSBharat Bhushan     };
183231f2cb8fSBharat Bhushan 
183331f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
183431f2cb8fSBharat Bhushan }
183531f2cb8fSBharat Bhushan 
183631f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
183731f2cb8fSBharat Bhushan {
183831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
183931f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
184031f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
184131f2cb8fSBharat Bhushan 
184231f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
184331f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
184431f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
184531f2cb8fSBharat Bhushan     };
184631f2cb8fSBharat Bhushan 
184731f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
184831f2cb8fSBharat Bhushan }
184931f2cb8fSBharat Bhushan 
185031f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
185131f2cb8fSBharat Bhushan {
185231f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
185331f2cb8fSBharat Bhushan     int ret;
185431f2cb8fSBharat Bhushan 
185531f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
185631f2cb8fSBharat Bhushan         return -1;
185731f2cb8fSBharat Bhushan     }
185831f2cb8fSBharat Bhushan 
185931f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
186031f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
186131f2cb8fSBharat Bhushan         return -1;
186231f2cb8fSBharat Bhushan     }
186331f2cb8fSBharat Bhushan 
186448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
186531f2cb8fSBharat Bhushan     if (ret < 0) {
186631f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
186731f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
186831f2cb8fSBharat Bhushan         return ret;
186931f2cb8fSBharat Bhushan     }
187031f2cb8fSBharat Bhushan 
187131f2cb8fSBharat Bhushan     return ret;
187231f2cb8fSBharat Bhushan }
187331f2cb8fSBharat Bhushan 
1874dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1875dc333cd6SAlexander Graf {
1876dc333cd6SAlexander Graf     FILE *f;
1877dc333cd6SAlexander Graf     int ret = -1;
1878dc333cd6SAlexander Graf     int field_len = strlen(field);
1879dc333cd6SAlexander Graf     char line[512];
1880dc333cd6SAlexander Graf 
1881dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1882dc333cd6SAlexander Graf     if (!f) {
1883dc333cd6SAlexander Graf         return -1;
1884dc333cd6SAlexander Graf     }
1885dc333cd6SAlexander Graf 
1886dc333cd6SAlexander Graf     do {
1887dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1888dc333cd6SAlexander Graf             break;
1889dc333cd6SAlexander Graf         }
1890dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1891ae215068SJim Meyering             pstrcpy(value, len, line);
1892dc333cd6SAlexander Graf             ret = 0;
1893dc333cd6SAlexander Graf             break;
1894dc333cd6SAlexander Graf         }
1895dc333cd6SAlexander Graf     } while(*line);
1896dc333cd6SAlexander Graf 
1897dc333cd6SAlexander Graf     fclose(f);
1898dc333cd6SAlexander Graf 
1899dc333cd6SAlexander Graf     return ret;
1900dc333cd6SAlexander Graf }
1901dc333cd6SAlexander Graf 
1902dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1903dc333cd6SAlexander Graf {
1904dc333cd6SAlexander Graf     char line[512];
1905dc333cd6SAlexander Graf     char *ns;
190673bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1907dc333cd6SAlexander Graf 
1908dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1909dc333cd6SAlexander Graf         return retval;
1910dc333cd6SAlexander Graf     }
1911dc333cd6SAlexander Graf 
1912dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1913dc333cd6SAlexander Graf         return retval;
1914dc333cd6SAlexander Graf     }
1915dc333cd6SAlexander Graf 
1916dc333cd6SAlexander Graf     ns++;
1917dc333cd6SAlexander Graf 
1918f9b8e7f6SShraddha Barke     return atoi(ns);
1919ef951443SNikunj A Dadhania }
1920ef951443SNikunj A Dadhania 
1921ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1922ef951443SNikunj A Dadhania {
1923ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1924ef951443SNikunj A Dadhania                                NULL);
1925ef951443SNikunj A Dadhania }
1926ef951443SNikunj A Dadhania 
1927ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1928ef951443SNikunj A Dadhania {
1929ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1930dc333cd6SAlexander Graf }
19314513d923SGleb Natapov 
1932eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1933eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1934eadaada1SAlexander Graf {
1935eadaada1SAlexander Graf     struct dirent *dirp;
1936eadaada1SAlexander Graf     DIR *dp;
1937eadaada1SAlexander Graf 
1938eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1939eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1940eadaada1SAlexander Graf         return -1;
1941eadaada1SAlexander Graf     }
1942eadaada1SAlexander Graf 
1943eadaada1SAlexander Graf     buf[0] = '\0';
1944eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1945eadaada1SAlexander Graf         FILE *f;
1946eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1947eadaada1SAlexander Graf                  dirp->d_name);
1948eadaada1SAlexander Graf         f = fopen(buf, "r");
1949eadaada1SAlexander Graf         if (f) {
1950eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1951eadaada1SAlexander Graf             fclose(f);
1952eadaada1SAlexander Graf             break;
1953eadaada1SAlexander Graf         }
1954eadaada1SAlexander Graf         buf[0] = '\0';
1955eadaada1SAlexander Graf     }
1956eadaada1SAlexander Graf     closedir(dp);
1957eadaada1SAlexander Graf     if (buf[0] == '\0') {
1958eadaada1SAlexander Graf         printf("Unknown host!\n");
1959eadaada1SAlexander Graf         return -1;
1960eadaada1SAlexander Graf     }
1961eadaada1SAlexander Graf 
1962eadaada1SAlexander Graf     return 0;
1963eadaada1SAlexander Graf }
1964eadaada1SAlexander Graf 
19657d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1966eadaada1SAlexander Graf {
19679bc884b7SDavid Gibson     union {
19689bc884b7SDavid Gibson         uint32_t v32;
19699bc884b7SDavid Gibson         uint64_t v64;
19709bc884b7SDavid Gibson     } u;
1971eadaada1SAlexander Graf     FILE *f;
1972eadaada1SAlexander Graf     int len;
1973eadaada1SAlexander Graf 
19747d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1975eadaada1SAlexander Graf     if (!f) {
1976eadaada1SAlexander Graf         return -1;
1977eadaada1SAlexander Graf     }
1978eadaada1SAlexander Graf 
19799bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1980eadaada1SAlexander Graf     fclose(f);
1981eadaada1SAlexander Graf     switch (len) {
19829bc884b7SDavid Gibson     case 4:
19839bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19849bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19859bc884b7SDavid Gibson     case 8:
19869bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1987eadaada1SAlexander Graf     }
1988eadaada1SAlexander Graf 
1989eadaada1SAlexander Graf     return 0;
1990eadaada1SAlexander Graf }
1991eadaada1SAlexander Graf 
19927d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19937d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19947d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19957d94a30bSSukadev Bhattiprolu  * format) */
19967d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19977d94a30bSSukadev Bhattiprolu {
19987d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19997d94a30bSSukadev Bhattiprolu     uint64_t val;
20007d94a30bSSukadev Bhattiprolu 
20017d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
20027d94a30bSSukadev Bhattiprolu         return -1;
20037d94a30bSSukadev Bhattiprolu     }
20047d94a30bSSukadev Bhattiprolu 
20057d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
20067d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
20077d94a30bSSukadev Bhattiprolu     g_free(tmp);
20087d94a30bSSukadev Bhattiprolu 
20097d94a30bSSukadev Bhattiprolu     return val;
20107d94a30bSSukadev Bhattiprolu }
20117d94a30bSSukadev Bhattiprolu 
20129bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20139bc884b7SDavid Gibson {
20149bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20159bc884b7SDavid Gibson }
20169bc884b7SDavid Gibson 
20176659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
20186659394fSDavid Gibson {
20196659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
20206659394fSDavid Gibson }
20216659394fSDavid Gibson 
20226659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
20236659394fSDavid Gibson {
20246659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
20256659394fSDavid Gibson }
20266659394fSDavid Gibson 
20271a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
202845024f09SAlexander Graf  {
2029a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2030a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
203145024f09SAlexander Graf 
20326fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20331a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20341a61a9aeSStuart Yoder         return 0;
20351a61a9aeSStuart Yoder     }
203645024f09SAlexander Graf 
20371a61a9aeSStuart Yoder     return 1;
20381a61a9aeSStuart Yoder }
20391a61a9aeSStuart Yoder 
20401a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20411a61a9aeSStuart Yoder {
20421a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20431a61a9aeSStuart Yoder 
20441a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20451a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20461a61a9aeSStuart Yoder         return 1;
20471a61a9aeSStuart Yoder     }
20481a61a9aeSStuart Yoder 
20491a61a9aeSStuart Yoder     return 0;
20501a61a9aeSStuart Yoder }
20511a61a9aeSStuart Yoder 
20521a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20531a61a9aeSStuart Yoder {
20541a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20551a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20561a61a9aeSStuart Yoder 
20571a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20581a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
205945024f09SAlexander Graf         return 0;
206045024f09SAlexander Graf     }
206145024f09SAlexander Graf 
206245024f09SAlexander Graf     /*
2063d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
206445024f09SAlexander Graf      *
2065d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
206645024f09SAlexander Graf      *     li r3, -1
2067d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2068d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
206945024f09SAlexander Graf      */
207045024f09SAlexander Graf 
2071d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2072d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2073d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2074d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
207545024f09SAlexander Graf 
20760ddbd053SAlexey Kardashevskiy     return 1;
207745024f09SAlexander Graf }
207845024f09SAlexander Graf 
2079026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2080026bfd89SDavid Gibson {
2081026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2082026bfd89SDavid Gibson }
2083026bfd89SDavid Gibson 
2084026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2085026bfd89SDavid Gibson {
2086026bfd89SDavid Gibson     /*
2087026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2088026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2089026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2090026bfd89SDavid Gibson      * produce a warning.
2091026bfd89SDavid Gibson      */
2092026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2093026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2094026bfd89SDavid Gibson }
2095026bfd89SDavid Gibson 
2096ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2097ef9971ddSAlexey Kardashevskiy {
2098ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2099ef9971ddSAlexey Kardashevskiy }
2100ef9971ddSAlexey Kardashevskiy 
21015145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
21025145ad4fSNathan Whitehorn {
21035145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
21045145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
21055145ad4fSNathan Whitehorn }
21065145ad4fSNathan Whitehorn 
21071bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2108f61b4bedSAlexander Graf {
21091bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2110f61b4bedSAlexander Graf     int ret;
2111f61b4bedSAlexander Graf 
211248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2113f61b4bedSAlexander Graf     if (ret) {
2114072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2115072ed5f2SThomas Huth         exit(1);
2116f61b4bedSAlexander Graf     }
21179b00ea49SDavid Gibson 
21189b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21199b00ea49SDavid Gibson      * with kvm */
21209b00ea49SDavid Gibson     cap_papr = 1;
2121f1af19d7SDavid Gibson }
2122f61b4bedSAlexander Graf 
2123d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21246db5bb0fSAlexey Kardashevskiy {
2125d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21266db5bb0fSAlexey Kardashevskiy }
21276db5bb0fSAlexey Kardashevskiy 
21285b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21295b95b8b9SAlexander Graf {
21305b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21315b95b8b9SAlexander Graf     int ret;
21325b95b8b9SAlexander Graf 
213348add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21345b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2135072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2136072ed5f2SThomas Huth         exit(1);
21375b95b8b9SAlexander Graf     }
21385b95b8b9SAlexander Graf }
21395b95b8b9SAlexander Graf 
2140e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2141e97c3636SDavid Gibson {
2142e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2143e97c3636SDavid Gibson }
2144e97c3636SDavid Gibson 
2145fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2146fa98fbfcSSam Bobroff {
2147fa98fbfcSSam Bobroff     int ret;
2148fa98fbfcSSam Bobroff 
2149fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2150fa98fbfcSSam Bobroff     if (!ret) {
2151fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2152fa98fbfcSSam Bobroff     }
2153fa98fbfcSSam Bobroff     return ret;
2154fa98fbfcSSam Bobroff }
2155fa98fbfcSSam Bobroff 
2156fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2157fa98fbfcSSam Bobroff {
2158fa98fbfcSSam Bobroff     int i;
2159fa98fbfcSSam Bobroff     GString *g;
2160fa98fbfcSSam Bobroff     char *s;
2161fa98fbfcSSam Bobroff 
2162fa98fbfcSSam Bobroff     assert(kvm_enabled());
2163fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2164fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2165fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2166fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2167fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2168fa98fbfcSSam Bobroff             }
2169fa98fbfcSSam Bobroff         }
2170fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2171fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2172fa98fbfcSSam Bobroff         g_free(s);
2173fa98fbfcSSam Bobroff     } else {
2174fa98fbfcSSam Bobroff         error_append_hint(errp,
2175fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2176fa98fbfcSSam Bobroff     }
2177fa98fbfcSSam Bobroff }
2178fa98fbfcSSam Bobroff 
2179fa98fbfcSSam Bobroff 
21807f763a5dSDavid Gibson #ifdef TARGET_PPC64
2181658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2182354ac20aSDavid Gibson {
2183354ac20aSDavid Gibson     off_t size;
2184354ac20aSDavid Gibson     int fd;
2185354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2186354ac20aSDavid Gibson 
2187354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2188354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2189354ac20aSDavid Gibson      *                      not necessary on this hardware
2190354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2191354ac20aSDavid Gibson      *
2192354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2193354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2194354ac20aSDavid Gibson      */
2195354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2196354ac20aSDavid Gibson         return 0;
2197354ac20aSDavid Gibson     }
2198354ac20aSDavid Gibson 
2199354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2200354ac20aSDavid Gibson     if (fd < 0) {
2201354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2202354ac20aSDavid Gibson                 strerror(errno));
2203354ac20aSDavid Gibson         return -1;
2204354ac20aSDavid Gibson     }
2205354ac20aSDavid Gibson 
2206354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2207354ac20aSDavid Gibson 
2208658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2209658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2210354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2211354ac20aSDavid Gibson         return -1;
2212354ac20aSDavid Gibson     };
2213354ac20aSDavid Gibson 
2214354ac20aSDavid Gibson     return size;
2215354ac20aSDavid Gibson }
2216354ac20aSDavid Gibson 
22177f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
22187f763a5dSDavid Gibson {
2219f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2220f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2221f36951c1SDavid Gibson     int i;
2222f36951c1SDavid Gibson 
22237f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
22247f763a5dSDavid Gibson         return current_size;
22257f763a5dSDavid Gibson     }
2226f36951c1SDavid Gibson 
2227f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2228f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2229182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
22309c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2231f36951c1SDavid Gibson     best_page_shift = 0;
2232f36951c1SDavid Gibson 
2233f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2234f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2235f36951c1SDavid Gibson 
2236f36951c1SDavid Gibson         if (!sps->page_shift) {
2237f36951c1SDavid Gibson             continue;
2238f36951c1SDavid Gibson         }
2239f36951c1SDavid Gibson 
2240f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2241f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2242f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2243f36951c1SDavid Gibson         }
2244f36951c1SDavid Gibson     }
2245f36951c1SDavid Gibson 
22467f763a5dSDavid Gibson     return MIN(current_size,
2247f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
22487f763a5dSDavid Gibson }
22497f763a5dSDavid Gibson #endif
22507f763a5dSDavid Gibson 
2251da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2252da95324eSAlexey Kardashevskiy {
2253da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2254da95324eSAlexey Kardashevskiy }
2255da95324eSAlexey Kardashevskiy 
22563dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
22573dc410aeSAlexey Kardashevskiy {
22583dc410aeSAlexey Kardashevskiy     int ret;
22593dc410aeSAlexey Kardashevskiy 
22603dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22613dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
22623dc410aeSAlexey Kardashevskiy     if (!ret) {
22633dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22643dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
22653dc410aeSAlexey Kardashevskiy     }
22663dc410aeSAlexey Kardashevskiy 
22673dc410aeSAlexey Kardashevskiy     return ret;
22683dc410aeSAlexey Kardashevskiy }
22693dc410aeSAlexey Kardashevskiy 
2270d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2271d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2272d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22730f5cb298SDavid Gibson {
22740f5cb298SDavid Gibson     long len;
22750f5cb298SDavid Gibson     int fd;
22760f5cb298SDavid Gibson     void *table;
22770f5cb298SDavid Gibson 
2278b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2279b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2280b5aec396SDavid Gibson      */
2281b5aec396SDavid Gibson     *pfd = -1;
22826a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22830f5cb298SDavid Gibson         return NULL;
22840f5cb298SDavid Gibson     }
22850f5cb298SDavid Gibson 
2286d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2287d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2288d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2289d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2290d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2291d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2292d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2293d6ee2a7cSAlexey Kardashevskiy         };
2294d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2295d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2296d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2297d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2298d6ee2a7cSAlexey Kardashevskiy                     liobn);
2299d6ee2a7cSAlexey Kardashevskiy             return NULL;
2300d6ee2a7cSAlexey Kardashevskiy         }
2301d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2302d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2303d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2304d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2305d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2306d6ee2a7cSAlexey Kardashevskiy         };
2307d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2308d6ee2a7cSAlexey Kardashevskiy             return NULL;
2309d6ee2a7cSAlexey Kardashevskiy         }
23100f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
23110f5cb298SDavid Gibson         if (fd < 0) {
2312b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2313b5aec396SDavid Gibson                     liobn);
23140f5cb298SDavid Gibson             return NULL;
23150f5cb298SDavid Gibson         }
2316d6ee2a7cSAlexey Kardashevskiy     } else {
2317d6ee2a7cSAlexey Kardashevskiy         return NULL;
2318d6ee2a7cSAlexey Kardashevskiy     }
23190f5cb298SDavid Gibson 
2320d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
23210f5cb298SDavid Gibson     /* FIXME: round this up to page size */
23220f5cb298SDavid Gibson 
232374b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
23240f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2325b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2326b5aec396SDavid Gibson                 liobn);
23270f5cb298SDavid Gibson         close(fd);
23280f5cb298SDavid Gibson         return NULL;
23290f5cb298SDavid Gibson     }
23300f5cb298SDavid Gibson 
23310f5cb298SDavid Gibson     *pfd = fd;
23320f5cb298SDavid Gibson     return table;
23330f5cb298SDavid Gibson }
23340f5cb298SDavid Gibson 
2335523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
23360f5cb298SDavid Gibson {
23370f5cb298SDavid Gibson     long len;
23380f5cb298SDavid Gibson 
23390f5cb298SDavid Gibson     if (fd < 0) {
23400f5cb298SDavid Gibson         return -1;
23410f5cb298SDavid Gibson     }
23420f5cb298SDavid Gibson 
2343523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
23440f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
23450f5cb298SDavid Gibson         (close(fd) < 0)) {
2346b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2347b5aec396SDavid Gibson                 strerror(errno));
23480f5cb298SDavid Gibson         /* Leak the table */
23490f5cb298SDavid Gibson     }
23500f5cb298SDavid Gibson 
23510f5cb298SDavid Gibson     return 0;
23520f5cb298SDavid Gibson }
23530f5cb298SDavid Gibson 
23547f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
23557f763a5dSDavid Gibson {
23567f763a5dSDavid Gibson     uint32_t shift = shift_hint;
23577f763a5dSDavid Gibson 
2358ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2359ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2360ace9a2cbSDavid Gibson         return 0;
2361ace9a2cbSDavid Gibson     }
23626977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23637f763a5dSDavid Gibson         int ret;
23647f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2365ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2366ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2367ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2368ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2369ace9a2cbSDavid Gibson              * correct for PR. */
2370ace9a2cbSDavid Gibson             return 0;
2371ace9a2cbSDavid Gibson         } else if (ret < 0) {
23727f763a5dSDavid Gibson             return ret;
23737f763a5dSDavid Gibson         }
23747f763a5dSDavid Gibson         return shift;
23757f763a5dSDavid Gibson     }
23767f763a5dSDavid Gibson 
2377ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2378ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
237996c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
238096c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2381ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23827f763a5dSDavid Gibson         return 0;
2383ace9a2cbSDavid Gibson     } else {
2384ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2385ace9a2cbSDavid Gibson         return 24;
2386ace9a2cbSDavid Gibson     }
23877f763a5dSDavid Gibson }
23887f763a5dSDavid Gibson 
2389a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2390a1e98583SDavid Gibson {
2391a1e98583SDavid Gibson     uint32_t pvr;
2392a1e98583SDavid Gibson 
2393a1e98583SDavid Gibson     asm ("mfpvr %0"
2394a1e98583SDavid Gibson          : "=r"(pvr));
2395a1e98583SDavid Gibson     return pvr;
2396a1e98583SDavid Gibson }
2397a1e98583SDavid Gibson 
2398a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2399a7342588SDavid Gibson {
2400a7342588SDavid Gibson     if (on) {
2401a7342588SDavid Gibson         *word |= flags;
2402a7342588SDavid Gibson     } else {
2403a7342588SDavid Gibson         *word &= ~flags;
2404a7342588SDavid Gibson     }
2405a7342588SDavid Gibson }
2406a7342588SDavid Gibson 
24072985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
24082985b86bSAndreas Färber {
24092985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2410a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2411a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
24120cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
24130cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2414a1e98583SDavid Gibson 
2415cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
24163bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2417a7342588SDavid Gibson 
241870bca53fSAlexander Graf     if (vmx != -1) {
241970bca53fSAlexander Graf         /* Only override when we know what the host supports */
2420cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2421cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
242270bca53fSAlexander Graf     }
242370bca53fSAlexander Graf     if (dfp != -1) {
242470bca53fSAlexander Graf         /* Only override when we know what the host supports */
2425cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
242670bca53fSAlexander Graf     }
24270cbad81fSDavid Gibson 
24280cbad81fSDavid Gibson     if (dcache_size != -1) {
24290cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
24300cbad81fSDavid Gibson     }
24310cbad81fSDavid Gibson 
24320cbad81fSDavid Gibson     if (icache_size != -1) {
24330cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
24340cbad81fSDavid Gibson     }
2435c64abd1fSSam Bobroff 
2436c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2437c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
24385f3066d8SDavid Gibson 
24395f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
24405f3066d8SDavid Gibson         /*
24415f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
24425f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
24435f3066d8SDavid Gibson          * architected mode may prevent guests from activating
24445f3066d8SDavid Gibson          * necessary DD1 workarounds.
24455f3066d8SDavid Gibson          */
24465f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
24475f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
24485f3066d8SDavid Gibson     }
2449c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2450a1e98583SDavid Gibson }
2451a1e98583SDavid Gibson 
24523b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
24533b961124SStuart Yoder {
24543b961124SStuart Yoder     return cap_epr;
24553b961124SStuart Yoder }
24563b961124SStuart Yoder 
245787a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
245887a91de6SAlexander Graf {
245987a91de6SAlexander Graf     return cap_fixup_hcalls;
246087a91de6SAlexander Graf }
246187a91de6SAlexander Graf 
2462bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2463bac3bf28SThomas Huth {
2464bac3bf28SThomas Huth     return cap_htm;
2465bac3bf28SThomas Huth }
2466bac3bf28SThomas Huth 
2467cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2468cf1c4cceSSam Bobroff {
2469cf1c4cceSSam Bobroff     return cap_mmu_radix;
2470cf1c4cceSSam Bobroff }
2471cf1c4cceSSam Bobroff 
2472cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2473cf1c4cceSSam Bobroff {
2474cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2475cf1c4cceSSam Bobroff }
2476cf1c4cceSSam Bobroff 
247752b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
247852b2519cSThomas Huth {
247952b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
248052b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
248152b2519cSThomas Huth 
248252b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
248352b2519cSThomas Huth     if (pvr_pcc == NULL) {
248452b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
248552b2519cSThomas Huth     }
248652b2519cSThomas Huth 
248752b2519cSThomas Huth     return pvr_pcc;
248852b2519cSThomas Huth }
248952b2519cSThomas Huth 
24905ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
24915ba4576bSAndreas Färber {
24925ba4576bSAndreas Färber     TypeInfo type_info = {
24935ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24945ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24955ba4576bSAndreas Färber     };
24965ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
249792e926e1SGreg Kurz     ObjectClass *oc;
24985b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2499715d4b96SThomas Huth     int i;
25005ba4576bSAndreas Färber 
250152b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
25023bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
25035ba4576bSAndreas Färber         return -1;
25045ba4576bSAndreas Färber     }
25055ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
25065ba4576bSAndreas Färber     type_register(&type_info);
25075b79b1caSAlexey Kardashevskiy 
250892e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
250992e926e1SGreg Kurz     g_assert(oc);
251092e926e1SGreg Kurz 
25113b542549SBharata B Rao #if defined(TARGET_PPC64)
25123b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
25133b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
25147ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
25157ebaf795SBharata B Rao     type_info.instance_init = NULL;
25167ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
25177ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
25183b542549SBharata B Rao     type_register(&type_info);
25193b542549SBharata B Rao     g_free((void *)type_info.name);
25203b542549SBharata B Rao #endif
25213b542549SBharata B Rao 
2522715d4b96SThomas Huth     /*
2523715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2524715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2525715d4b96SThomas Huth      * host CPU type, too)
2526715d4b96SThomas Huth      */
2527715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2528715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2529c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2530715d4b96SThomas Huth             char *suffix;
2531715d4b96SThomas Huth 
2532715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2533c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2534715d4b96SThomas Huth             if (suffix) {
2535715d4b96SThomas Huth                 *suffix = 0;
2536715d4b96SThomas Huth             }
2537715d4b96SThomas Huth             break;
2538715d4b96SThomas Huth         }
2539715d4b96SThomas Huth     }
2540715d4b96SThomas Huth 
25415ba4576bSAndreas Färber     return 0;
25425ba4576bSAndreas Färber }
25435ba4576bSAndreas Färber 
2544feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2545feaa64c4SDavid Gibson {
2546feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2547feaa64c4SDavid Gibson         .token = token,
2548feaa64c4SDavid Gibson     };
2549feaa64c4SDavid Gibson 
2550feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2551feaa64c4SDavid Gibson         return -ENOENT;
2552feaa64c4SDavid Gibson     }
2553feaa64c4SDavid Gibson 
2554feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2555feaa64c4SDavid Gibson 
2556feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2557feaa64c4SDavid Gibson }
255812b1143bSDavid Gibson 
255914b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2560e68cb8b4SAlexey Kardashevskiy {
2561e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2562e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
256314b0d748SGreg Kurz         .start_index = index,
2564e68cb8b4SAlexey Kardashevskiy     };
256582be8e73SGreg Kurz     int ret;
2566e68cb8b4SAlexey Kardashevskiy 
2567e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
256814b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
256914b0d748SGreg Kurz                    write ? "writing" : "reading");
257082be8e73SGreg Kurz         return -ENOTSUP;
2571e68cb8b4SAlexey Kardashevskiy     }
2572e68cb8b4SAlexey Kardashevskiy 
257382be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
257482be8e73SGreg Kurz     if (ret < 0) {
257514b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
257614b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
257714b0d748SGreg Kurz                    strerror(errno));
257882be8e73SGreg Kurz         return -errno;
257982be8e73SGreg Kurz     }
258082be8e73SGreg Kurz 
258182be8e73SGreg Kurz     return ret;
2582e68cb8b4SAlexey Kardashevskiy }
2583e68cb8b4SAlexey Kardashevskiy 
2584e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2585e68cb8b4SAlexey Kardashevskiy {
2586bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2587e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2588e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2589e68cb8b4SAlexey Kardashevskiy 
2590e68cb8b4SAlexey Kardashevskiy     do {
2591e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2592e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2593e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2594e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2595e68cb8b4SAlexey Kardashevskiy             return rc;
2596e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2597e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2598e094c4c1SCédric Le Goater             ssize_t n = rc;
2599e094c4c1SCédric Le Goater             while (n) {
2600e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2601e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2602e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2603e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2604e094c4c1SCédric Le Goater 
2605e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2606e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2607e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2608e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2609e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2610e094c4c1SCédric Le Goater 
2611e094c4c1SCédric Le Goater                 buffer += chunksize;
2612e094c4c1SCédric Le Goater                 n -= chunksize;
2613e094c4c1SCédric Le Goater             }
2614e68cb8b4SAlexey Kardashevskiy         }
2615e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2616e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2617bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2618e68cb8b4SAlexey Kardashevskiy 
2619e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2620e68cb8b4SAlexey Kardashevskiy }
2621e68cb8b4SAlexey Kardashevskiy 
2622e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2623e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2624e68cb8b4SAlexey Kardashevskiy {
2625e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2626e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2627e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2628e68cb8b4SAlexey Kardashevskiy 
2629e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2630e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2631e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2632e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2633e68cb8b4SAlexey Kardashevskiy 
2634e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2635e68cb8b4SAlexey Kardashevskiy 
2636e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2637e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2638e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2639e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2640e68cb8b4SAlexey Kardashevskiy         return rc;
2641e68cb8b4SAlexey Kardashevskiy     }
2642e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2643e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2644e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2645e68cb8b4SAlexey Kardashevskiy         return -1;
2646e68cb8b4SAlexey Kardashevskiy     }
2647e68cb8b4SAlexey Kardashevskiy     return 0;
2648e68cb8b4SAlexey Kardashevskiy }
2649e68cb8b4SAlexey Kardashevskiy 
265020d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
26514513d923SGleb Natapov {
26524513d923SGleb Natapov     return true;
26534513d923SGleb Natapov }
2654a1b87fe0SJan Kiszka 
265582169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
265682169660SScott Wood {
265782169660SScott Wood }
2658c65f9a07SGreg Kurz 
26591ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26601ad9f0a4SDavid Gibson {
26611ad9f0a4SDavid Gibson     int fd, rc;
26621ad9f0a4SDavid Gibson     int i;
26637c43bca0SAneesh Kumar K.V 
266414b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
26651ad9f0a4SDavid Gibson 
26661ad9f0a4SDavid Gibson     i = 0;
26671ad9f0a4SDavid Gibson     while (i < n) {
26681ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26691ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26701ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26711ad9f0a4SDavid Gibson 
26721ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26731ad9f0a4SDavid Gibson         if (rc < 0) {
26741ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26751ad9f0a4SDavid Gibson         }
26761ad9f0a4SDavid Gibson 
26771ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26781ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
26791ad9f0a4SDavid Gibson             int invalid = hdr->n_invalid;
26801ad9f0a4SDavid Gibson 
26811ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26821ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26831ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26841ad9f0a4SDavid Gibson             }
26851ad9f0a4SDavid Gibson 
26861ad9f0a4SDavid Gibson             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
26871ad9f0a4SDavid Gibson             i += hdr->n_valid;
26881ad9f0a4SDavid Gibson 
26891ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
26901ad9f0a4SDavid Gibson                 invalid = n - i;
26911ad9f0a4SDavid Gibson             }
26921ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
26931ad9f0a4SDavid Gibson             i += hdr->n_invalid;
26941ad9f0a4SDavid Gibson 
26951ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
26961ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
26971ad9f0a4SDavid Gibson         }
26981ad9f0a4SDavid Gibson     }
26991ad9f0a4SDavid Gibson 
27001ad9f0a4SDavid Gibson     close(fd);
27011ad9f0a4SDavid Gibson }
27021ad9f0a4SDavid Gibson 
27031ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
27047c43bca0SAneesh Kumar K.V {
27051ad9f0a4SDavid Gibson     int fd, rc;
27061ad9f0a4SDavid Gibson     struct {
27071ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
27081ad9f0a4SDavid Gibson         uint64_t pte0;
27091ad9f0a4SDavid Gibson         uint64_t pte1;
27101ad9f0a4SDavid Gibson     } buf;
2711c1385933SAneesh Kumar K.V 
271214b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2713c1385933SAneesh Kumar K.V 
27141ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
27151ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
27161ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
27171ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
27181ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
27191ad9f0a4SDavid Gibson 
27201ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
27211ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
27221ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2723c1385933SAneesh Kumar K.V     }
27241ad9f0a4SDavid Gibson     close(fd);
2725c1385933SAneesh Kumar K.V }
27269e03a040SFrank Blaschka 
27279e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2728dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
27299e03a040SFrank Blaschka {
27309e03a040SFrank Blaschka     return 0;
27319e03a040SFrank Blaschka }
27321850b6b7SEric Auger 
273338d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
273438d87493SPeter Xu                                 int vector, PCIDevice *dev)
273538d87493SPeter Xu {
273638d87493SPeter Xu     return 0;
273738d87493SPeter Xu }
273838d87493SPeter Xu 
273938d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
274038d87493SPeter Xu {
274138d87493SPeter Xu     return 0;
274238d87493SPeter Xu }
274338d87493SPeter Xu 
27441850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
27451850b6b7SEric Auger {
27461850b6b7SEric Auger     return data & 0xffff;
27471850b6b7SEric Auger }
27484d9392beSThomas Huth 
27494d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27504d9392beSThomas Huth {
27514d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27524d9392beSThomas Huth         return -1;
27534d9392beSThomas Huth     }
27544d9392beSThomas Huth 
27554d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27564d9392beSThomas Huth }
275730f4b05bSDavid Gibson 
275830f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
275930f4b05bSDavid Gibson {
276030f4b05bSDavid Gibson     if (!kvm_enabled()) {
2761b55d295eSDavid Gibson         return; /* No KVM, we're good */
2762b55d295eSDavid Gibson     }
2763b55d295eSDavid Gibson 
2764b55d295eSDavid Gibson     if (cap_resize_hpt) {
2765b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2766b55d295eSDavid Gibson     }
2767b55d295eSDavid Gibson 
2768b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2769b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
277030f4b05bSDavid Gibson         return;
277130f4b05bSDavid Gibson     }
277230f4b05bSDavid Gibson 
277330f4b05bSDavid Gibson     error_setg(errp,
277430f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
277530f4b05bSDavid Gibson }
2776b55d295eSDavid Gibson 
2777b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2778b55d295eSDavid Gibson {
2779b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2780b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2781b55d295eSDavid Gibson         .flags = flags,
2782b55d295eSDavid Gibson         .shift = shift,
2783b55d295eSDavid Gibson     };
2784b55d295eSDavid Gibson 
2785b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2786b55d295eSDavid Gibson         return -ENOSYS;
2787b55d295eSDavid Gibson     }
2788b55d295eSDavid Gibson 
2789b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2790b55d295eSDavid Gibson }
2791b55d295eSDavid Gibson 
2792b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2793b55d295eSDavid Gibson {
2794b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2795b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2796b55d295eSDavid Gibson         .flags = flags,
2797b55d295eSDavid Gibson         .shift = shift,
2798b55d295eSDavid Gibson     };
2799b55d295eSDavid Gibson 
2800b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2801b55d295eSDavid Gibson         return -ENOSYS;
2802b55d295eSDavid Gibson     }
2803b55d295eSDavid Gibson 
2804b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2805b55d295eSDavid Gibson }
2806b55d295eSDavid Gibson 
2807c363a37aSDaniel Henrique Barboza /*
2808c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2809c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2810c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2811c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2812c363a37aSDaniel Henrique Barboza  *
2813c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2814c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2815c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2816c363a37aSDaniel Henrique Barboza  *
2817c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2818c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2819c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2820c363a37aSDaniel Henrique Barboza  * in QEMU level.
2821c363a37aSDaniel Henrique Barboza  */
2822c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2823c363a37aSDaniel Henrique Barboza {
2824c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2825c363a37aSDaniel Henrique Barboza 
2826c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2827c363a37aSDaniel Henrique Barboza         return false;
2828c363a37aSDaniel Henrique Barboza     }
2829c363a37aSDaniel Henrique Barboza 
2830c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2831c363a37aSDaniel Henrique Barboza         return false;
2832c363a37aSDaniel Henrique Barboza     }
2833c363a37aSDaniel Henrique Barboza 
2834c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2835c363a37aSDaniel Henrique Barboza }
2836