xref: /qemu/target/ppc/kvm.c (revision 2e886fb39168942ab03b91062e715946e4af8436)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
390d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h"
407ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4198a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4231f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
43b36f100eSAlexey Kardashevskiy #include "trace.h"
4488365d17SBharat Bhushan #include "exec/gdbstub.h"
454c663752SPaolo Bonzini #include "exec/memattrs.h"
469c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
472d103aaeSMichael Roth #include "sysemu/hostmem.h"
48f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
499c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
503b542549SBharata B Rao #if defined(TARGET_PPC64)
513b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
523b542549SBharata B Rao #endif
53f3d9f303SSam Bobroff #include "elf.h"
54c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
55f61b4bedSAlexander Graf 
56d76d1650Saurel32 //#define DEBUG_KVM
57d76d1650Saurel32 
58d76d1650Saurel32 #ifdef DEBUG_KVM
59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
60d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
61d76d1650Saurel32 #else
62da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
63d76d1650Saurel32     do { } while (0)
64d76d1650Saurel32 #endif
65d76d1650Saurel32 
66eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
67eadaada1SAlexander Graf 
6894a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6994a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
7094a8d39aSJan Kiszka };
7194a8d39aSJan Kiszka 
72fc87e185SAlexander Graf static int cap_interrupt_unset = false;
73fc87e185SAlexander Graf static int cap_interrupt_level = false;
7490dc8812SScott Wood static int cap_segstate;
7590dc8812SScott Wood static int cap_booke_sregs;
76e97c3636SDavid Gibson static int cap_ppc_smt;
77354ac20aSDavid Gibson static int cap_ppc_rma;
780f5cb298SDavid Gibson static int cap_spapr_tce;
79d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
80da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
819bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
82f1af19d7SDavid Gibson static int cap_hior;
83d67d40eaSDavid Gibson static int cap_one_reg;
843b961124SStuart Yoder static int cap_epr;
8531f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
869b00ea49SDavid Gibson static int cap_papr;
87e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8887a91de6SAlexander Graf static int cap_fixup_hcalls;
89bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
90cf1c4cceSSam Bobroff static int cap_mmu_radix;
91cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
92b55d295eSDavid Gibson static int cap_resize_hpt;
93c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
94fc87e185SAlexander Graf 
953c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
963c902d44SBharat Bhushan 
97c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
98c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
99c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
100c821c2bdSAlexander Graf  *     still an interrupt pending.
101c6a94ba5SAlexander Graf  *
102c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
103c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
104c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
105c6a94ba5SAlexander Graf  */
106c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
107c6a94ba5SAlexander Graf 
108d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
109c6a94ba5SAlexander Graf {
110d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
111d5a68146SAndreas Färber 
112c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
113c6a94ba5SAlexander Graf }
114c6a94ba5SAlexander Graf 
11596c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11696c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11796c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11896c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
11996c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12096c9cff0SThomas Huth {
12196c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12296c9cff0SThomas Huth     return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12396c9cff0SThomas Huth }
12496c9cff0SThomas Huth 
1255ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void);
1265ba4576bSAndreas Färber 
127b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
128d76d1650Saurel32 {
129fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
130fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13190dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13290dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
133e97c3636SDavid Gibson     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
134354ac20aSDavid Gibson     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
1350f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
136d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
137da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1389bb62a07SAlexey Kardashevskiy     cap_spapr_vfio = false;
139d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
140f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1413b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14231f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1439b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1449b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
145e68cb8b4SAlexey Kardashevskiy     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14687a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
147bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
148cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
149cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
150b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
151c363a37aSDaniel Henrique Barboza     /*
152c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
153c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
154c363a37aSDaniel Henrique Barboza      *
155c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
156c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
157c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
158fc87e185SAlexander Graf 
159fc87e185SAlexander Graf     if (!cap_interrupt_level) {
160fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
161fc87e185SAlexander Graf                         "VM to stall at times!\n");
162fc87e185SAlexander Graf     }
163fc87e185SAlexander Graf 
1645ba4576bSAndreas Färber     kvm_ppc_register_host_cpu_type();
1655ba4576bSAndreas Färber 
166d76d1650Saurel32     return 0;
167d76d1650Saurel32 }
168d76d1650Saurel32 
169d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
170d525ffabSPaolo Bonzini {
171d525ffabSPaolo Bonzini     return 0;
172d525ffabSPaolo Bonzini }
173d525ffabSPaolo Bonzini 
1741bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
175d76d1650Saurel32 {
1761bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1771bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
178861bbc80SAlexander Graf     struct kvm_sregs sregs;
1795666ca4aSScott Wood     int ret;
1805666ca4aSScott Wood 
1815666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18264e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18364e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
18464e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
18564e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1865666ca4aSScott Wood         return 0;
1875666ca4aSScott Wood     } else {
18890dc8812SScott Wood         if (!cap_segstate) {
18964e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19064e07be5SAlexander Graf             return -ENOSYS;
1915666ca4aSScott Wood         }
1925666ca4aSScott Wood     }
1935666ca4aSScott Wood 
1941bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1955666ca4aSScott Wood     if (ret) {
1965666ca4aSScott Wood         return ret;
1975666ca4aSScott Wood     }
198861bbc80SAlexander Graf 
199861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2001bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2015666ca4aSScott Wood }
2025666ca4aSScott Wood 
20393dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2041bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
20593dd5e85SScott Wood {
2061bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2071bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
20893dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
20993dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21093dd5e85SScott Wood     unsigned int entries = 0;
21193dd5e85SScott Wood     int ret, i;
21293dd5e85SScott Wood 
21393dd5e85SScott Wood     if (!kvm_enabled() ||
214a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
21593dd5e85SScott Wood         return 0;
21693dd5e85SScott Wood     }
21793dd5e85SScott Wood 
21893dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22193dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22293dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22393dd5e85SScott Wood         entries += params.tlb_sizes[i];
22493dd5e85SScott Wood     }
22593dd5e85SScott Wood 
22693dd5e85SScott Wood     assert(entries == env->nb_tlb);
22793dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
22893dd5e85SScott Wood 
22993dd5e85SScott Wood     env->tlb_dirty = true;
23093dd5e85SScott Wood 
23193dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23293dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23393dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
23493dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
23593dd5e85SScott Wood 
23648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
23793dd5e85SScott Wood     if (ret < 0) {
23893dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
23993dd5e85SScott Wood                 __func__, strerror(-ret));
24093dd5e85SScott Wood         return ret;
24193dd5e85SScott Wood     }
24293dd5e85SScott Wood 
24393dd5e85SScott Wood     env->kvm_sw_tlb = true;
24493dd5e85SScott Wood     return 0;
24593dd5e85SScott Wood }
24693dd5e85SScott Wood 
2474656e1f0SBenjamin Herrenschmidt 
2484656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
249a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
2504656e1f0SBenjamin Herrenschmidt                                        struct kvm_ppc_smmu_info *info)
2514656e1f0SBenjamin Herrenschmidt {
252a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
253a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
254a60f24b5SAndreas Färber 
2554656e1f0SBenjamin Herrenschmidt     memset(info, 0, sizeof(*info));
2564656e1f0SBenjamin Herrenschmidt 
2574656e1f0SBenjamin Herrenschmidt     /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
2584656e1f0SBenjamin Herrenschmidt      * need to "guess" what the supported page sizes are.
2594656e1f0SBenjamin Herrenschmidt      *
2604656e1f0SBenjamin Herrenschmidt      * For that to work we make a few assumptions:
2614656e1f0SBenjamin Herrenschmidt      *
26296c9cff0SThomas Huth      * - Check whether we are running "PR" KVM which only supports 4K
26396c9cff0SThomas Huth      *   and 16M pages, but supports them regardless of the backing
26496c9cff0SThomas Huth      *   store characteritics. We also don't support 1T segments.
2654656e1f0SBenjamin Herrenschmidt      *
2664656e1f0SBenjamin Herrenschmidt      *   This is safe as if HV KVM ever supports that capability or PR
2674656e1f0SBenjamin Herrenschmidt      *   KVM grows supports for more page/segment sizes, those versions
2684656e1f0SBenjamin Herrenschmidt      *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
2694656e1f0SBenjamin Herrenschmidt      *   will not hit this fallback
2704656e1f0SBenjamin Herrenschmidt      *
2714656e1f0SBenjamin Herrenschmidt      * - Else we are running HV KVM. This means we only support page
2724656e1f0SBenjamin Herrenschmidt      *   sizes that fit in the backing store. Additionally we only
2734656e1f0SBenjamin Herrenschmidt      *   advertize 64K pages if the processor is ARCH 2.06 and we assume
2744656e1f0SBenjamin Herrenschmidt      *   P7 encodings for the SLB and hash table. Here too, we assume
2754656e1f0SBenjamin Herrenschmidt      *   support for any newer processor will mean a kernel that
2764656e1f0SBenjamin Herrenschmidt      *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
2774656e1f0SBenjamin Herrenschmidt      *   this fallback.
2784656e1f0SBenjamin Herrenschmidt      */
27996c9cff0SThomas Huth     if (kvmppc_is_pr(cs->kvm_state)) {
2804656e1f0SBenjamin Herrenschmidt         /* No flags */
2814656e1f0SBenjamin Herrenschmidt         info->flags = 0;
2824656e1f0SBenjamin Herrenschmidt         info->slb_size = 64;
2834656e1f0SBenjamin Herrenschmidt 
2844656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
2854656e1f0SBenjamin Herrenschmidt         info->sps[0].page_shift = 12;
2864656e1f0SBenjamin Herrenschmidt         info->sps[0].slb_enc = 0;
2874656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].page_shift = 12;
2884656e1f0SBenjamin Herrenschmidt         info->sps[0].enc[0].pte_enc = 0;
2894656e1f0SBenjamin Herrenschmidt 
2904656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
2914656e1f0SBenjamin Herrenschmidt         info->sps[1].page_shift = 24;
2924656e1f0SBenjamin Herrenschmidt         info->sps[1].slb_enc = SLB_VSID_L;
2934656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].page_shift = 24;
2944656e1f0SBenjamin Herrenschmidt         info->sps[1].enc[0].pte_enc = 0;
2954656e1f0SBenjamin Herrenschmidt     } else {
2964656e1f0SBenjamin Herrenschmidt         int i = 0;
2974656e1f0SBenjamin Herrenschmidt 
2984656e1f0SBenjamin Herrenschmidt         /* HV KVM has backing store size restrictions */
2994656e1f0SBenjamin Herrenschmidt         info->flags = KVM_PPC_PAGE_SIZES_REAL;
3004656e1f0SBenjamin Herrenschmidt 
3014656e1f0SBenjamin Herrenschmidt         if (env->mmu_model & POWERPC_MMU_1TSEG) {
3024656e1f0SBenjamin Herrenschmidt             info->flags |= KVM_PPC_1T_SEGMENTS;
3034656e1f0SBenjamin Herrenschmidt         }
3044656e1f0SBenjamin Herrenschmidt 
305ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
306ec975e83SSam Bobroff            POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3074656e1f0SBenjamin Herrenschmidt             info->slb_size = 32;
3084656e1f0SBenjamin Herrenschmidt         } else {
3094656e1f0SBenjamin Herrenschmidt             info->slb_size = 64;
3104656e1f0SBenjamin Herrenschmidt         }
3114656e1f0SBenjamin Herrenschmidt 
3124656e1f0SBenjamin Herrenschmidt         /* Standard 4k base page size segment */
3134656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 12;
3144656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = 0;
3154656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 12;
3164656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3174656e1f0SBenjamin Herrenschmidt         i++;
3184656e1f0SBenjamin Herrenschmidt 
319aa4bb587SBenjamin Herrenschmidt         /* 64K on MMU 2.06 and later */
320ec975e83SSam Bobroff         if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 ||
321ec975e83SSam Bobroff             POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) {
3224656e1f0SBenjamin Herrenschmidt             info->sps[i].page_shift = 16;
3234656e1f0SBenjamin Herrenschmidt             info->sps[i].slb_enc = 0x110;
3244656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].page_shift = 16;
3254656e1f0SBenjamin Herrenschmidt             info->sps[i].enc[0].pte_enc = 1;
3264656e1f0SBenjamin Herrenschmidt             i++;
3274656e1f0SBenjamin Herrenschmidt         }
3284656e1f0SBenjamin Herrenschmidt 
3294656e1f0SBenjamin Herrenschmidt         /* Standard 16M large page size segment */
3304656e1f0SBenjamin Herrenschmidt         info->sps[i].page_shift = 24;
3314656e1f0SBenjamin Herrenschmidt         info->sps[i].slb_enc = SLB_VSID_L;
3324656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].page_shift = 24;
3334656e1f0SBenjamin Herrenschmidt         info->sps[i].enc[0].pte_enc = 0;
3344656e1f0SBenjamin Herrenschmidt     }
3354656e1f0SBenjamin Herrenschmidt }
3364656e1f0SBenjamin Herrenschmidt 
337a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
3384656e1f0SBenjamin Herrenschmidt {
339a60f24b5SAndreas Färber     CPUState *cs = CPU(cpu);
3404656e1f0SBenjamin Herrenschmidt     int ret;
3414656e1f0SBenjamin Herrenschmidt 
342a60f24b5SAndreas Färber     if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
343a60f24b5SAndreas Färber         ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
3444656e1f0SBenjamin Herrenschmidt         if (ret == 0) {
3454656e1f0SBenjamin Herrenschmidt             return;
3464656e1f0SBenjamin Herrenschmidt         }
3474656e1f0SBenjamin Herrenschmidt     }
3484656e1f0SBenjamin Herrenschmidt 
349a60f24b5SAndreas Färber     kvm_get_fallback_smmu_info(cpu, info);
3504656e1f0SBenjamin Herrenschmidt }
3514656e1f0SBenjamin Herrenschmidt 
352c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
353c64abd1fSSam Bobroff {
354c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
355c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
356c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
357c64abd1fSSam Bobroff     int i;
358c64abd1fSSam Bobroff 
359c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
360c64abd1fSSam Bobroff         return NULL;
361c64abd1fSSam Bobroff     }
362c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
363c64abd1fSSam Bobroff         return NULL;
364c64abd1fSSam Bobroff     }
365c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
366c64abd1fSSam Bobroff     radix_page_info->count = 0;
367c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
368c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
369c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
370c64abd1fSSam Bobroff             radix_page_info->count++;
371c64abd1fSSam Bobroff         }
372c64abd1fSSam Bobroff     }
373c64abd1fSSam Bobroff     return radix_page_info;
374c64abd1fSSam Bobroff }
375c64abd1fSSam Bobroff 
376b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
377b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
378b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
379b4db5413SSuraj Jitindar Singh {
380b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
381b4db5413SSuraj Jitindar Singh     int ret;
382b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
383b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
384b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
385b4db5413SSuraj Jitindar Singh     };
386b4db5413SSuraj Jitindar Singh 
387b4db5413SSuraj Jitindar Singh     if (radix) {
388b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
389b4db5413SSuraj Jitindar Singh     }
390b4db5413SSuraj Jitindar Singh     if (gtse) {
391b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
392b4db5413SSuraj Jitindar Singh     }
393b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
394b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
395b4db5413SSuraj Jitindar Singh     switch (ret) {
396b4db5413SSuraj Jitindar Singh     case 0:
397b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
398b4db5413SSuraj Jitindar Singh     case -EINVAL:
399b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
400b4db5413SSuraj Jitindar Singh     case -ENODEV:
401b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
402b4db5413SSuraj Jitindar Singh     default:
403b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
404b4db5413SSuraj Jitindar Singh     }
405b4db5413SSuraj Jitindar Singh }
406b4db5413SSuraj Jitindar Singh 
4074656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
4084656e1f0SBenjamin Herrenschmidt {
4094656e1f0SBenjamin Herrenschmidt     if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
4104656e1f0SBenjamin Herrenschmidt         return true;
4114656e1f0SBenjamin Herrenschmidt     }
4124656e1f0SBenjamin Herrenschmidt 
4134656e1f0SBenjamin Herrenschmidt     return (1ul << shift) <= rampgsize;
4144656e1f0SBenjamin Herrenschmidt }
4154656e1f0SBenjamin Herrenschmidt 
416df587133SThomas Huth static long max_cpu_page_size;
417df587133SThomas Huth 
418a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
4194656e1f0SBenjamin Herrenschmidt {
4204656e1f0SBenjamin Herrenschmidt     static struct kvm_ppc_smmu_info smmu_info;
4214656e1f0SBenjamin Herrenschmidt     static bool has_smmu_info;
422a60f24b5SAndreas Färber     CPUPPCState *env = &cpu->env;
4234656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
4240d594f55SThomas Huth     bool has_64k_pages = false;
4254656e1f0SBenjamin Herrenschmidt 
4264656e1f0SBenjamin Herrenschmidt     /* We only handle page sizes for 64-bit server guests for now */
4274656e1f0SBenjamin Herrenschmidt     if (!(env->mmu_model & POWERPC_MMU_64)) {
4284656e1f0SBenjamin Herrenschmidt         return;
4294656e1f0SBenjamin Herrenschmidt     }
4304656e1f0SBenjamin Herrenschmidt 
4314656e1f0SBenjamin Herrenschmidt     /* Collect MMU info from kernel if not already */
4324656e1f0SBenjamin Herrenschmidt     if (!has_smmu_info) {
433a60f24b5SAndreas Färber         kvm_get_smmu_info(cpu, &smmu_info);
4344656e1f0SBenjamin Herrenschmidt         has_smmu_info = true;
4354656e1f0SBenjamin Herrenschmidt     }
4364656e1f0SBenjamin Herrenschmidt 
437df587133SThomas Huth     if (!max_cpu_page_size) {
4389c607668SAlexey Kardashevskiy         max_cpu_page_size = qemu_getrampagesize();
439df587133SThomas Huth     }
4404656e1f0SBenjamin Herrenschmidt 
4414656e1f0SBenjamin Herrenschmidt     /* Convert to QEMU form */
4424656e1f0SBenjamin Herrenschmidt     memset(&env->sps, 0, sizeof(env->sps));
4434656e1f0SBenjamin Herrenschmidt 
44490da0d5aSBenjamin Herrenschmidt     /* If we have HV KVM, we need to forbid CI large pages if our
44590da0d5aSBenjamin Herrenschmidt      * host page size is smaller than 64K.
44690da0d5aSBenjamin Herrenschmidt      */
44790da0d5aSBenjamin Herrenschmidt     if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
44890da0d5aSBenjamin Herrenschmidt         env->ci_large_pages = getpagesize() >= 0x10000;
44990da0d5aSBenjamin Herrenschmidt     }
45090da0d5aSBenjamin Herrenschmidt 
45108215d8fSAlexander Graf     /*
45208215d8fSAlexander Graf      * XXX This loop should be an entry wide AND of the capabilities that
45308215d8fSAlexander Graf      *     the selected CPU has with the capabilities that KVM supports.
45408215d8fSAlexander Graf      */
4554656e1f0SBenjamin Herrenschmidt     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
4564656e1f0SBenjamin Herrenschmidt         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
4574656e1f0SBenjamin Herrenschmidt         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
4584656e1f0SBenjamin Herrenschmidt 
459df587133SThomas Huth         if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4604656e1f0SBenjamin Herrenschmidt                                  ksps->page_shift)) {
4614656e1f0SBenjamin Herrenschmidt             continue;
4624656e1f0SBenjamin Herrenschmidt         }
4634656e1f0SBenjamin Herrenschmidt         qsps->page_shift = ksps->page_shift;
4644656e1f0SBenjamin Herrenschmidt         qsps->slb_enc = ksps->slb_enc;
4654656e1f0SBenjamin Herrenschmidt         for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
466df587133SThomas Huth             if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
4674656e1f0SBenjamin Herrenschmidt                                      ksps->enc[jk].page_shift)) {
4684656e1f0SBenjamin Herrenschmidt                 continue;
4694656e1f0SBenjamin Herrenschmidt             }
4700d594f55SThomas Huth             if (ksps->enc[jk].page_shift == 16) {
4710d594f55SThomas Huth                 has_64k_pages = true;
4720d594f55SThomas Huth             }
4734656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
4744656e1f0SBenjamin Herrenschmidt             qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
4754656e1f0SBenjamin Herrenschmidt             if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
4764656e1f0SBenjamin Herrenschmidt                 break;
4774656e1f0SBenjamin Herrenschmidt             }
4784656e1f0SBenjamin Herrenschmidt         }
4794656e1f0SBenjamin Herrenschmidt         if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
4804656e1f0SBenjamin Herrenschmidt             break;
4814656e1f0SBenjamin Herrenschmidt         }
4824656e1f0SBenjamin Herrenschmidt     }
4834656e1f0SBenjamin Herrenschmidt     env->slb_nr = smmu_info.slb_size;
48408215d8fSAlexander Graf     if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
4854656e1f0SBenjamin Herrenschmidt         env->mmu_model &= ~POWERPC_MMU_1TSEG;
4864656e1f0SBenjamin Herrenschmidt     }
4870d594f55SThomas Huth     if (!has_64k_pages) {
4880d594f55SThomas Huth         env->mmu_model &= ~POWERPC_MMU_64K;
4890d594f55SThomas Huth     }
4904656e1f0SBenjamin Herrenschmidt }
491df587133SThomas Huth 
492ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
493df587133SThomas Huth {
494df587133SThomas Huth     Object *mem_obj = object_resolve_path(obj_path, NULL);
495df587133SThomas Huth     char *mempath = object_property_get_str(mem_obj, "mem-path", NULL);
496df587133SThomas Huth     long pagesize;
497df587133SThomas Huth 
498df587133SThomas Huth     if (mempath) {
4999c607668SAlexey Kardashevskiy         pagesize = qemu_mempath_getpagesize(mempath);
5002d3e302eSGreg Kurz         g_free(mempath);
501df587133SThomas Huth     } else {
502df587133SThomas Huth         pagesize = getpagesize();
503df587133SThomas Huth     }
504df587133SThomas Huth 
505df587133SThomas Huth     return pagesize >= max_cpu_page_size;
506df587133SThomas Huth }
507df587133SThomas Huth 
5084656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */
5094656e1f0SBenjamin Herrenschmidt 
510a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
5114656e1f0SBenjamin Herrenschmidt {
5124656e1f0SBenjamin Herrenschmidt }
5134656e1f0SBenjamin Herrenschmidt 
514ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
515df587133SThomas Huth {
516df587133SThomas Huth     return true;
517df587133SThomas Huth }
518df587133SThomas Huth 
5194656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
5204656e1f0SBenjamin Herrenschmidt 
521b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
522b164e48eSEduardo Habkost {
523*2e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
524b164e48eSEduardo Habkost }
525b164e48eSEduardo Habkost 
52688365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
52788365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
52888365d17SBharat Bhushan  * of 4 is sufficient for now.
52988365d17SBharat Bhushan  */
53088365d17SBharat Bhushan #define MAX_HW_BKPTS 4
53188365d17SBharat Bhushan 
53288365d17SBharat Bhushan static struct HWBreakpoint {
53388365d17SBharat Bhushan     target_ulong addr;
53488365d17SBharat Bhushan     int type;
53588365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
53688365d17SBharat Bhushan 
53788365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
53888365d17SBharat Bhushan 
53988365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
54088365d17SBharat Bhushan static int max_hw_breakpoint;
54188365d17SBharat Bhushan static int max_hw_watchpoint;
54288365d17SBharat Bhushan static int nb_hw_breakpoint;
54388365d17SBharat Bhushan static int nb_hw_watchpoint;
54488365d17SBharat Bhushan 
54588365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
54688365d17SBharat Bhushan {
54788365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
54888365d17SBharat Bhushan         max_hw_breakpoint = 2;
54988365d17SBharat Bhushan         max_hw_watchpoint = 2;
55088365d17SBharat Bhushan     }
55188365d17SBharat Bhushan 
55288365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
55388365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
55488365d17SBharat Bhushan         return;
55588365d17SBharat Bhushan     }
55688365d17SBharat Bhushan }
55788365d17SBharat Bhushan 
55820d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
5595666ca4aSScott Wood {
56020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
56120d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
5625666ca4aSScott Wood     int ret;
5635666ca4aSScott Wood 
5644656e1f0SBenjamin Herrenschmidt     /* Gather server mmu info from KVM and update the CPU state */
565a60f24b5SAndreas Färber     kvm_fixup_page_sizes(cpu);
5664656e1f0SBenjamin Herrenschmidt 
5674656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
5681bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
5695666ca4aSScott Wood     if (ret) {
570388e47c7SThomas Huth         if (ret == -EINVAL) {
571388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
572388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
573388e47c7SThomas Huth         }
5745666ca4aSScott Wood         return ret;
5755666ca4aSScott Wood     }
576861bbc80SAlexander Graf 
577bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
578c821c2bdSAlexander Graf 
57993dd5e85SScott Wood     switch (cenv->mmu_model) {
58093dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5817f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5821bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
58393dd5e85SScott Wood         break;
5847f516c96SThomas Huth     case POWERPC_MMU_2_07:
5857f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5867f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
587f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
588f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
589f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5907f516c96SThomas Huth                 cap_htm = true;
5917f516c96SThomas Huth             }
592f3d9f303SSam Bobroff         }
5937f516c96SThomas Huth         break;
59493dd5e85SScott Wood     default:
59593dd5e85SScott Wood         break;
59693dd5e85SScott Wood     }
59793dd5e85SScott Wood 
5983c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
59988365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
6003c902d44SBharat Bhushan 
601861bbc80SAlexander Graf     return ret;
602d76d1650Saurel32 }
603d76d1650Saurel32 
6041bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
60593dd5e85SScott Wood {
6061bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
6071bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
60893dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
60993dd5e85SScott Wood     unsigned char *bitmap;
61093dd5e85SScott Wood     int ret;
61193dd5e85SScott Wood 
61293dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
61393dd5e85SScott Wood         return;
61493dd5e85SScott Wood     }
61593dd5e85SScott Wood 
61693dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
61793dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
61893dd5e85SScott Wood 
61993dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
62093dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
62193dd5e85SScott Wood 
6221bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
62393dd5e85SScott Wood     if (ret) {
62493dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
62593dd5e85SScott Wood                 __func__, strerror(-ret));
62693dd5e85SScott Wood     }
62793dd5e85SScott Wood 
62893dd5e85SScott Wood     g_free(bitmap);
62993dd5e85SScott Wood }
63093dd5e85SScott Wood 
631d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
632d67d40eaSDavid Gibson {
633d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
634d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
635d67d40eaSDavid Gibson     union {
636d67d40eaSDavid Gibson         uint32_t u32;
637d67d40eaSDavid Gibson         uint64_t u64;
638d67d40eaSDavid Gibson     } val;
639d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
640d67d40eaSDavid Gibson         .id = id,
641d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
642d67d40eaSDavid Gibson     };
643d67d40eaSDavid Gibson     int ret;
644d67d40eaSDavid Gibson 
645d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
646d67d40eaSDavid Gibson     if (ret != 0) {
647b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
648d67d40eaSDavid Gibson     } else {
649d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
650d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
651d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
652d67d40eaSDavid Gibson             break;
653d67d40eaSDavid Gibson 
654d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
655d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
656d67d40eaSDavid Gibson             break;
657d67d40eaSDavid Gibson 
658d67d40eaSDavid Gibson         default:
659d67d40eaSDavid Gibson             /* Don't handle this size yet */
660d67d40eaSDavid Gibson             abort();
661d67d40eaSDavid Gibson         }
662d67d40eaSDavid Gibson     }
663d67d40eaSDavid Gibson }
664d67d40eaSDavid Gibson 
665d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
666d67d40eaSDavid Gibson {
667d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
668d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
669d67d40eaSDavid Gibson     union {
670d67d40eaSDavid Gibson         uint32_t u32;
671d67d40eaSDavid Gibson         uint64_t u64;
672d67d40eaSDavid Gibson     } val;
673d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
674d67d40eaSDavid Gibson         .id = id,
675d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
676d67d40eaSDavid Gibson     };
677d67d40eaSDavid Gibson     int ret;
678d67d40eaSDavid Gibson 
679d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
680d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
681d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
682d67d40eaSDavid Gibson         break;
683d67d40eaSDavid Gibson 
684d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
685d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
686d67d40eaSDavid Gibson         break;
687d67d40eaSDavid Gibson 
688d67d40eaSDavid Gibson     default:
689d67d40eaSDavid Gibson         /* Don't handle this size yet */
690d67d40eaSDavid Gibson         abort();
691d67d40eaSDavid Gibson     }
692d67d40eaSDavid Gibson 
693d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
694d67d40eaSDavid Gibson     if (ret != 0) {
695b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
696d67d40eaSDavid Gibson     }
697d67d40eaSDavid Gibson }
698d67d40eaSDavid Gibson 
69970b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
70070b79849SDavid Gibson {
70170b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
70270b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
70370b79849SDavid Gibson     struct kvm_one_reg reg;
70470b79849SDavid Gibson     int i;
70570b79849SDavid Gibson     int ret;
70670b79849SDavid Gibson 
70770b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
70870b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
70970b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
71070b79849SDavid Gibson 
71170b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
71270b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
71370b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
71470b79849SDavid Gibson         if (ret < 0) {
715da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
71670b79849SDavid Gibson             return ret;
71770b79849SDavid Gibson         }
71870b79849SDavid Gibson 
71970b79849SDavid Gibson         for (i = 0; i < 32; i++) {
72070b79849SDavid Gibson             uint64_t vsr[2];
72170b79849SDavid Gibson 
7223a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
72370b79849SDavid Gibson             vsr[0] = float64_val(env->fpr[i]);
72470b79849SDavid Gibson             vsr[1] = env->vsr[i];
7253a4b791bSGreg Kurz #else
7263a4b791bSGreg Kurz             vsr[0] = env->vsr[i];
7273a4b791bSGreg Kurz             vsr[1] = float64_val(env->fpr[i]);
7283a4b791bSGreg Kurz #endif
72970b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
73070b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
73170b79849SDavid Gibson 
73270b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
73370b79849SDavid Gibson             if (ret < 0) {
734da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
73570b79849SDavid Gibson                         i, strerror(errno));
73670b79849SDavid Gibson                 return ret;
73770b79849SDavid Gibson             }
73870b79849SDavid Gibson         }
73970b79849SDavid Gibson     }
74070b79849SDavid Gibson 
74170b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
74270b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
74370b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
74470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
74570b79849SDavid Gibson         if (ret < 0) {
746da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
74770b79849SDavid Gibson             return ret;
74870b79849SDavid Gibson         }
74970b79849SDavid Gibson 
75070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
75170b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
75270b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
75370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
75470b79849SDavid Gibson             if (ret < 0) {
755da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
75670b79849SDavid Gibson                 return ret;
75770b79849SDavid Gibson             }
75870b79849SDavid Gibson         }
75970b79849SDavid Gibson     }
76070b79849SDavid Gibson 
76170b79849SDavid Gibson     return 0;
76270b79849SDavid Gibson }
76370b79849SDavid Gibson 
76470b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
76570b79849SDavid Gibson {
76670b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
76770b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
76870b79849SDavid Gibson     struct kvm_one_reg reg;
76970b79849SDavid Gibson     int i;
77070b79849SDavid Gibson     int ret;
77170b79849SDavid Gibson 
77270b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
77370b79849SDavid Gibson         uint64_t fpscr;
77470b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
77570b79849SDavid Gibson 
77670b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
77770b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
77870b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
77970b79849SDavid Gibson         if (ret < 0) {
780da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
78170b79849SDavid Gibson             return ret;
78270b79849SDavid Gibson         } else {
78370b79849SDavid Gibson             env->fpscr = fpscr;
78470b79849SDavid Gibson         }
78570b79849SDavid Gibson 
78670b79849SDavid Gibson         for (i = 0; i < 32; i++) {
78770b79849SDavid Gibson             uint64_t vsr[2];
78870b79849SDavid Gibson 
78970b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
79070b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
79170b79849SDavid Gibson 
79270b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
79370b79849SDavid Gibson             if (ret < 0) {
794da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
79570b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
79670b79849SDavid Gibson                 return ret;
79770b79849SDavid Gibson             } else {
7983a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
79970b79849SDavid Gibson                 env->fpr[i] = vsr[0];
80070b79849SDavid Gibson                 if (vsx) {
80170b79849SDavid Gibson                     env->vsr[i] = vsr[1];
80270b79849SDavid Gibson                 }
8033a4b791bSGreg Kurz #else
8043a4b791bSGreg Kurz                 env->fpr[i] = vsr[1];
8053a4b791bSGreg Kurz                 if (vsx) {
8063a4b791bSGreg Kurz                     env->vsr[i] = vsr[0];
8073a4b791bSGreg Kurz                 }
8083a4b791bSGreg Kurz #endif
80970b79849SDavid Gibson             }
81070b79849SDavid Gibson         }
81170b79849SDavid Gibson     }
81270b79849SDavid Gibson 
81370b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
81470b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
81570b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
81670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
81770b79849SDavid Gibson         if (ret < 0) {
818da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
81970b79849SDavid Gibson             return ret;
82070b79849SDavid Gibson         }
82170b79849SDavid Gibson 
82270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
82370b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
82470b79849SDavid Gibson             reg.addr = (uintptr_t)&env->avr[i];
82570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
82670b79849SDavid Gibson             if (ret < 0) {
827da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
82870b79849SDavid Gibson                         i, strerror(errno));
82970b79849SDavid Gibson                 return ret;
83070b79849SDavid Gibson             }
83170b79849SDavid Gibson         }
83270b79849SDavid Gibson     }
83370b79849SDavid Gibson 
83470b79849SDavid Gibson     return 0;
83570b79849SDavid Gibson }
83670b79849SDavid Gibson 
8379b00ea49SDavid Gibson #if defined(TARGET_PPC64)
8389b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
8399b00ea49SDavid Gibson {
8409b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8419b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8429b00ea49SDavid Gibson     struct kvm_one_reg reg;
8439b00ea49SDavid Gibson     int ret;
8449b00ea49SDavid Gibson 
8459b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
8469b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->vpa_addr;
8479b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8489b00ea49SDavid Gibson     if (ret < 0) {
849da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
8509b00ea49SDavid Gibson         return ret;
8519b00ea49SDavid Gibson     }
8529b00ea49SDavid Gibson 
8539b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
8549b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
8559b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8569b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
8579b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8589b00ea49SDavid Gibson     if (ret < 0) {
859da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
8609b00ea49SDavid Gibson                 strerror(errno));
8619b00ea49SDavid Gibson         return ret;
8629b00ea49SDavid Gibson     }
8639b00ea49SDavid Gibson 
8649b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
8659b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8669b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
8679b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8689b00ea49SDavid Gibson     if (ret < 0) {
869da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
8709b00ea49SDavid Gibson                 strerror(errno));
8719b00ea49SDavid Gibson         return ret;
8729b00ea49SDavid Gibson     }
8739b00ea49SDavid Gibson 
8749b00ea49SDavid Gibson     return 0;
8759b00ea49SDavid Gibson }
8769b00ea49SDavid Gibson 
8779b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8789b00ea49SDavid Gibson {
8799b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
8809b00ea49SDavid Gibson     CPUPPCState *env = &cpu->env;
8819b00ea49SDavid Gibson     struct kvm_one_reg reg;
8829b00ea49SDavid Gibson     int ret;
8839b00ea49SDavid Gibson 
8849b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8859b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8869b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8879b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8889b00ea49SDavid Gibson     assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
8899b00ea49SDavid Gibson 
8909b00ea49SDavid Gibson     if (env->vpa_addr) {
8919b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8929b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
8939b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8949b00ea49SDavid Gibson         if (ret < 0) {
895da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8969b00ea49SDavid Gibson             return ret;
8979b00ea49SDavid Gibson         }
8989b00ea49SDavid Gibson     }
8999b00ea49SDavid Gibson 
9009b00ea49SDavid Gibson     assert((uintptr_t)&env->slb_shadow_size
9019b00ea49SDavid Gibson            == ((uintptr_t)&env->slb_shadow_addr + 8));
9029b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
9039b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->slb_shadow_addr;
9049b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9059b00ea49SDavid Gibson     if (ret < 0) {
906da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
9079b00ea49SDavid Gibson         return ret;
9089b00ea49SDavid Gibson     }
9099b00ea49SDavid Gibson 
9109b00ea49SDavid Gibson     assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
9119b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
9129b00ea49SDavid Gibson     reg.addr = (uintptr_t)&env->dtl_addr;
9139b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9149b00ea49SDavid Gibson     if (ret < 0) {
915da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
9169b00ea49SDavid Gibson                 strerror(errno));
9179b00ea49SDavid Gibson         return ret;
9189b00ea49SDavid Gibson     }
9199b00ea49SDavid Gibson 
9209b00ea49SDavid Gibson     if (!env->vpa_addr) {
9219b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
9229b00ea49SDavid Gibson         reg.addr = (uintptr_t)&env->vpa_addr;
9239b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
9249b00ea49SDavid Gibson         if (ret < 0) {
925da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
9269b00ea49SDavid Gibson             return ret;
9279b00ea49SDavid Gibson         }
9289b00ea49SDavid Gibson     }
9299b00ea49SDavid Gibson 
9309b00ea49SDavid Gibson     return 0;
9319b00ea49SDavid Gibson }
9329b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
9339b00ea49SDavid Gibson 
934e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
935a7a00a72SDavid Gibson {
936a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
937a7a00a72SDavid Gibson     struct kvm_sregs sregs;
938a7a00a72SDavid Gibson     int i;
939a7a00a72SDavid Gibson 
940a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
941a7a00a72SDavid Gibson 
942a7a00a72SDavid Gibson     sregs.u.s.sdr1 = env->spr[SPR_SDR1];
943a7a00a72SDavid Gibson 
944a7a00a72SDavid Gibson     /* Sync SLB */
945a7a00a72SDavid Gibson #ifdef TARGET_PPC64
946a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
947a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
948a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
949a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
950a7a00a72SDavid Gibson         }
951a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
952a7a00a72SDavid Gibson     }
953a7a00a72SDavid Gibson #endif
954a7a00a72SDavid Gibson 
955a7a00a72SDavid Gibson     /* Sync SRs */
956a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
957a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
958a7a00a72SDavid Gibson     }
959a7a00a72SDavid Gibson 
960a7a00a72SDavid Gibson     /* Sync BATs */
961a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
962a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
963a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
964a7a00a72SDavid Gibson             | env->DBAT[1][i];
965a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
966a7a00a72SDavid Gibson             | env->IBAT[1][i];
967a7a00a72SDavid Gibson     }
968a7a00a72SDavid Gibson 
969a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
970a7a00a72SDavid Gibson }
971a7a00a72SDavid Gibson 
97220d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
973d76d1650Saurel32 {
97420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
97520d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
976d76d1650Saurel32     struct kvm_regs regs;
977d76d1650Saurel32     int ret;
978d76d1650Saurel32     int i;
979d76d1650Saurel32 
9801bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9811bc22652SAndreas Färber     if (ret < 0) {
982d76d1650Saurel32         return ret;
9831bc22652SAndreas Färber     }
984d76d1650Saurel32 
985d76d1650Saurel32     regs.ctr = env->ctr;
986d76d1650Saurel32     regs.lr  = env->lr;
987da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
988d76d1650Saurel32     regs.msr = env->msr;
989d76d1650Saurel32     regs.pc = env->nip;
990d76d1650Saurel32 
991d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
992d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
993d76d1650Saurel32 
994d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
995d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
996d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
997d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
998d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
999d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
1000d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
1001d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
1002d76d1650Saurel32 
100390dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
100490dc8812SScott Wood 
1005d76d1650Saurel32     for (i = 0;i < 32; i++)
1006d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
1007d76d1650Saurel32 
10084bddaf55SAlexey Kardashevskiy     regs.cr = 0;
10094bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
10104bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
10114bddaf55SAlexey Kardashevskiy     }
10124bddaf55SAlexey Kardashevskiy 
10131bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1014d76d1650Saurel32     if (ret < 0)
1015d76d1650Saurel32         return ret;
1016d76d1650Saurel32 
101770b79849SDavid Gibson     kvm_put_fp(cs);
101870b79849SDavid Gibson 
101993dd5e85SScott Wood     if (env->tlb_dirty) {
10201bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
102193dd5e85SScott Wood         env->tlb_dirty = false;
102293dd5e85SScott Wood     }
102393dd5e85SScott Wood 
1024f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
1025a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
1026a7a00a72SDavid Gibson         if (ret < 0) {
1027f1af19d7SDavid Gibson             return ret;
1028f1af19d7SDavid Gibson         }
1029f1af19d7SDavid Gibson     }
1030f1af19d7SDavid Gibson 
1031f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
1032d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1033d67d40eaSDavid Gibson     }
1034f1af19d7SDavid Gibson 
1035d67d40eaSDavid Gibson     if (cap_one_reg) {
1036d67d40eaSDavid Gibson         int i;
1037d67d40eaSDavid Gibson 
1038d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1039d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1040d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1041d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1042d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1043d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1044d67d40eaSDavid Gibson 
1045d67d40eaSDavid Gibson             if (id != 0) {
1046d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
1047d67d40eaSDavid Gibson             }
1048f1af19d7SDavid Gibson         }
10499b00ea49SDavid Gibson 
10509b00ea49SDavid Gibson #ifdef TARGET_PPC64
105180b3f79bSAlexey Kardashevskiy         if (msr_ts) {
105280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
105380b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
105480b3f79bSAlexey Kardashevskiy             }
105580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
105680b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
105780b3f79bSAlexey Kardashevskiy             }
105880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
105980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
106080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
106180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
106280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
106380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
106480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
106580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
106680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
106780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
106880b3f79bSAlexey Kardashevskiy         }
106980b3f79bSAlexey Kardashevskiy 
10709b00ea49SDavid Gibson         if (cap_papr) {
10719b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
1072da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10739b00ea49SDavid Gibson             }
10749b00ea49SDavid Gibson         }
107598a8b524SAlexey Kardashevskiy 
107698a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10779b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1078f1af19d7SDavid Gibson     }
1079f1af19d7SDavid Gibson 
1080d76d1650Saurel32     return ret;
1081d76d1650Saurel32 }
1082d76d1650Saurel32 
1083c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1084c371c2e3SBharat Bhushan {
1085c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1086c371c2e3SBharat Bhushan }
1087c371c2e3SBharat Bhushan 
1088a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1089d76d1650Saurel32 {
109020d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1091ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1092a7a00a72SDavid Gibson     int ret;
1093d76d1650Saurel32 
1094a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
109590dc8812SScott Wood     if (ret < 0) {
109690dc8812SScott Wood         return ret;
109790dc8812SScott Wood     }
109890dc8812SScott Wood 
109990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
110090dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
110190dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
110290dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
110390dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
110490dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
110590dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
110690dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
110790dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
110890dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
110990dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
111090dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
111190dc8812SScott Wood     }
111290dc8812SScott Wood 
111390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
111490dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
111590dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
111690dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
111790dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
111890dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
111990dc8812SScott Wood     }
112090dc8812SScott Wood 
112190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
112290dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
112390dc8812SScott Wood     }
112490dc8812SScott Wood 
112590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
112690dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
112790dc8812SScott Wood     }
112890dc8812SScott Wood 
112990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
113090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1131c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
113290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1133c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
113490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1135c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
113690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1137c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
113890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1139c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
114090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1141c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
114290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1143c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
114490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1145c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
114690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1147c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
114890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1149c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
115090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1151c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
115290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1153c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
115490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1155c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
115690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1157c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
115890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1159c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
116090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1161c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
116290dc8812SScott Wood 
116390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
116490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1165c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
116690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1167c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
116890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1169c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
117090dc8812SScott Wood         }
117190dc8812SScott Wood 
117290dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
117390dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1174c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
117590dc8812SScott Wood         }
117690dc8812SScott Wood 
117790dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
117890dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1179c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
118090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1181c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
118290dc8812SScott Wood         }
118390dc8812SScott Wood     }
118490dc8812SScott Wood 
118590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
118690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
118790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
118890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
118990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
119090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
119190dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
119290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
119390dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
119490dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
119590dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
119690dc8812SScott Wood     }
119790dc8812SScott Wood 
119890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
119990dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
120090dc8812SScott Wood     }
120190dc8812SScott Wood 
120290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
120390dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
120490dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
120590dc8812SScott Wood     }
120690dc8812SScott Wood 
120790dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
120890dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
120990dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
121090dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
121190dc8812SScott Wood 
121290dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
121390dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
121490dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
121590dc8812SScott Wood         }
121690dc8812SScott Wood     }
1217a7a00a72SDavid Gibson 
1218a7a00a72SDavid Gibson     return 0;
1219fafc0b6aSAlexander Graf }
122090dc8812SScott Wood 
1221a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1222a7a00a72SDavid Gibson {
1223a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1224a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1225a7a00a72SDavid Gibson     int ret;
1226a7a00a72SDavid Gibson     int i;
1227a7a00a72SDavid Gibson 
1228a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
122990dc8812SScott Wood     if (ret < 0) {
123090dc8812SScott Wood         return ret;
123190dc8812SScott Wood     }
123290dc8812SScott Wood 
1233e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1234bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1235f3c75d42SAneesh Kumar K.V     }
1236ba5e5090SAlexander Graf 
1237ba5e5090SAlexander Graf     /* Sync SLB */
123882c09f2fSAlexander Graf #ifdef TARGET_PPC64
12394b4d4a21SAneesh Kumar K.V     /*
12404b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1241a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1242a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1243a7a00a72SDavid Gibson      * in.
12444b4d4a21SAneesh Kumar K.V      */
12454b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1246d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
12474b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
12484b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
12494b4d4a21SAneesh Kumar K.V         /*
12504b4d4a21SAneesh Kumar K.V          * Only restore valid entries
12514b4d4a21SAneesh Kumar K.V          */
12524b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1253bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
12544b4d4a21SAneesh Kumar K.V         }
1255ba5e5090SAlexander Graf     }
125682c09f2fSAlexander Graf #endif
1257ba5e5090SAlexander Graf 
1258ba5e5090SAlexander Graf     /* Sync SRs */
1259ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1260ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1261ba5e5090SAlexander Graf     }
1262ba5e5090SAlexander Graf 
1263ba5e5090SAlexander Graf     /* Sync BATs */
1264ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1265ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1266ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1267ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1268ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1269ba5e5090SAlexander Graf     }
1270a7a00a72SDavid Gibson 
1271a7a00a72SDavid Gibson     return 0;
1272a7a00a72SDavid Gibson }
1273a7a00a72SDavid Gibson 
1274a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1275a7a00a72SDavid Gibson {
1276a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1277a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1278a7a00a72SDavid Gibson     struct kvm_regs regs;
1279a7a00a72SDavid Gibson     uint32_t cr;
1280a7a00a72SDavid Gibson     int i, ret;
1281a7a00a72SDavid Gibson 
1282a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1283a7a00a72SDavid Gibson     if (ret < 0)
1284a7a00a72SDavid Gibson         return ret;
1285a7a00a72SDavid Gibson 
1286a7a00a72SDavid Gibson     cr = regs.cr;
1287a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1288a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1289a7a00a72SDavid Gibson         cr >>= 4;
1290a7a00a72SDavid Gibson     }
1291a7a00a72SDavid Gibson 
1292a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1293a7a00a72SDavid Gibson     env->lr = regs.lr;
1294a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1295a7a00a72SDavid Gibson     env->msr = regs.msr;
1296a7a00a72SDavid Gibson     env->nip = regs.pc;
1297a7a00a72SDavid Gibson 
1298a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1299a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1300a7a00a72SDavid Gibson 
1301a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1302a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1303a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1304a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1305a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1306a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1307a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1308a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1309a7a00a72SDavid Gibson 
1310a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1311a7a00a72SDavid Gibson 
1312a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1313a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1314a7a00a72SDavid Gibson 
1315a7a00a72SDavid Gibson     kvm_get_fp(cs);
1316a7a00a72SDavid Gibson 
1317a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1318a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1319a7a00a72SDavid Gibson         if (ret < 0) {
1320a7a00a72SDavid Gibson             return ret;
1321a7a00a72SDavid Gibson         }
1322a7a00a72SDavid Gibson     }
1323a7a00a72SDavid Gibson 
1324a7a00a72SDavid Gibson     if (cap_segstate) {
1325a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1326a7a00a72SDavid Gibson         if (ret < 0) {
1327a7a00a72SDavid Gibson             return ret;
1328a7a00a72SDavid Gibson         }
1329fafc0b6aSAlexander Graf     }
1330ba5e5090SAlexander Graf 
1331d67d40eaSDavid Gibson     if (cap_hior) {
1332d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1333d67d40eaSDavid Gibson     }
1334d67d40eaSDavid Gibson 
1335d67d40eaSDavid Gibson     if (cap_one_reg) {
1336d67d40eaSDavid Gibson         int i;
1337d67d40eaSDavid Gibson 
1338d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1339d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1340d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1341d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1342d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1343d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1344d67d40eaSDavid Gibson 
1345d67d40eaSDavid Gibson             if (id != 0) {
1346d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1347d67d40eaSDavid Gibson             }
1348d67d40eaSDavid Gibson         }
13499b00ea49SDavid Gibson 
13509b00ea49SDavid Gibson #ifdef TARGET_PPC64
135180b3f79bSAlexey Kardashevskiy         if (msr_ts) {
135280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
135380b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
135480b3f79bSAlexey Kardashevskiy             }
135580b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
135680b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
135780b3f79bSAlexey Kardashevskiy             }
135880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
135980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
136080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
136180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
136280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
136380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
136480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
136580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
136680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
136780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
136880b3f79bSAlexey Kardashevskiy         }
136980b3f79bSAlexey Kardashevskiy 
13709b00ea49SDavid Gibson         if (cap_papr) {
13719b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1372da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13739b00ea49SDavid Gibson             }
13749b00ea49SDavid Gibson         }
137598a8b524SAlexey Kardashevskiy 
137698a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13779b00ea49SDavid Gibson #endif
1378d67d40eaSDavid Gibson     }
1379d67d40eaSDavid Gibson 
1380d76d1650Saurel32     return 0;
1381d76d1650Saurel32 }
1382d76d1650Saurel32 
13831bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1384fc87e185SAlexander Graf {
1385fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1386fc87e185SAlexander Graf 
1387fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1388fc87e185SAlexander Graf         return 0;
1389fc87e185SAlexander Graf     }
1390fc87e185SAlexander Graf 
1391fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1392fc87e185SAlexander Graf         return 0;
1393fc87e185SAlexander Graf     }
1394fc87e185SAlexander Graf 
13951bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1396fc87e185SAlexander Graf 
1397fc87e185SAlexander Graf     return 0;
1398fc87e185SAlexander Graf }
1399fc87e185SAlexander Graf 
140016415335SAlexander Graf #if defined(TARGET_PPCEMB)
140116415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT
140216415335SAlexander Graf #elif defined(TARGET_PPC64)
140316415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
140416415335SAlexander Graf #else
140516415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
140616415335SAlexander Graf #endif
140716415335SAlexander Graf 
140820d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1409d76d1650Saurel32 {
141020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
141120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1412d76d1650Saurel32     int r;
1413d76d1650Saurel32     unsigned irq;
1414d76d1650Saurel32 
14154b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
14164b8523eeSJan Kiszka 
14175cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1418d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1419fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1420fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1421259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
142216415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1423d76d1650Saurel32     {
1424d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1425d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1426d76d1650Saurel32          * when reading the UIC.
1427d76d1650Saurel32          */
1428fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1429d76d1650Saurel32 
1430da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
14311bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
143255e5c285SAndreas Färber         if (r < 0) {
143355e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
143455e5c285SAndreas Färber         }
1435c821c2bdSAlexander Graf 
1436c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1437bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
143873bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1439d76d1650Saurel32     }
1440d76d1650Saurel32 
1441d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1442d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1443d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
14444b8523eeSJan Kiszka 
14454b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1446d76d1650Saurel32 }
1447d76d1650Saurel32 
14484c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1449d76d1650Saurel32 {
14504c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1451d76d1650Saurel32 }
1452d76d1650Saurel32 
145320d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
14540af691d7SMarcelo Tosatti {
1455259186a7SAndreas Färber     return cs->halted;
14560af691d7SMarcelo Tosatti }
14570af691d7SMarcelo Tosatti 
1458259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1459d76d1650Saurel32 {
1460259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1461259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1462259186a7SAndreas Färber 
1463259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1464259186a7SAndreas Färber         cs->halted = 1;
146527103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1466d76d1650Saurel32     }
1467d76d1650Saurel32 
1468bb4ea393SJan Kiszka     return 0;
1469d76d1650Saurel32 }
1470d76d1650Saurel32 
1471d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
14721328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1473d76d1650Saurel32 {
1474d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1475d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1476d76d1650Saurel32 
1477bb4ea393SJan Kiszka     return 0;
1478d76d1650Saurel32 }
1479d76d1650Saurel32 
14801328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1481d76d1650Saurel32 {
1482d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1483d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1484d76d1650Saurel32 
1485bb4ea393SJan Kiszka     return 0;
1486d76d1650Saurel32 }
1487d76d1650Saurel32 
14888a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14898a0548f9SBharat Bhushan {
14908a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14918a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14928a0548f9SBharat Bhushan 
14938a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14948a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14958a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14968a0548f9SBharat Bhushan         return -EINVAL;
14978a0548f9SBharat Bhushan     }
14988a0548f9SBharat Bhushan 
14998a0548f9SBharat Bhushan     return 0;
15008a0548f9SBharat Bhushan }
15018a0548f9SBharat Bhushan 
15028a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
15038a0548f9SBharat Bhushan {
15048a0548f9SBharat Bhushan     uint32_t sc;
15058a0548f9SBharat Bhushan 
15068a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
15078a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
15088a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
15098a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
15108a0548f9SBharat Bhushan         return -EINVAL;
15118a0548f9SBharat Bhushan     }
15128a0548f9SBharat Bhushan 
15138a0548f9SBharat Bhushan     return 0;
15148a0548f9SBharat Bhushan }
15158a0548f9SBharat Bhushan 
151688365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
151788365d17SBharat Bhushan {
151888365d17SBharat Bhushan     int n;
151988365d17SBharat Bhushan 
152088365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
152188365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
152288365d17SBharat Bhushan 
152388365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
152488365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
152588365d17SBharat Bhushan              hw_debug_points[n].type == type) {
152688365d17SBharat Bhushan             return n;
152788365d17SBharat Bhushan         }
152888365d17SBharat Bhushan     }
152988365d17SBharat Bhushan 
153088365d17SBharat Bhushan     return -1;
153188365d17SBharat Bhushan }
153288365d17SBharat Bhushan 
153388365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
153488365d17SBharat Bhushan {
153588365d17SBharat Bhushan     int n;
153688365d17SBharat Bhushan 
153788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
153888365d17SBharat Bhushan     if (n >= 0) {
153988365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
154088365d17SBharat Bhushan         return n;
154188365d17SBharat Bhushan     }
154288365d17SBharat Bhushan 
154388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
154488365d17SBharat Bhushan     if (n >= 0) {
154588365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
154688365d17SBharat Bhushan         return n;
154788365d17SBharat Bhushan     }
154888365d17SBharat Bhushan 
154988365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
155088365d17SBharat Bhushan     if (n >= 0) {
155188365d17SBharat Bhushan         *flag = BP_MEM_READ;
155288365d17SBharat Bhushan         return n;
155388365d17SBharat Bhushan     }
155488365d17SBharat Bhushan 
155588365d17SBharat Bhushan     return -1;
155688365d17SBharat Bhushan }
155788365d17SBharat Bhushan 
155888365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
155988365d17SBharat Bhushan                                   target_ulong len, int type)
156088365d17SBharat Bhushan {
156188365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
156288365d17SBharat Bhushan         return -ENOBUFS;
156388365d17SBharat Bhushan     }
156488365d17SBharat Bhushan 
156588365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
156688365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
156788365d17SBharat Bhushan 
156888365d17SBharat Bhushan     switch (type) {
156988365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
157088365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
157188365d17SBharat Bhushan             return -ENOBUFS;
157288365d17SBharat Bhushan         }
157388365d17SBharat Bhushan 
157488365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
157588365d17SBharat Bhushan             return -EEXIST;
157688365d17SBharat Bhushan         }
157788365d17SBharat Bhushan 
157888365d17SBharat Bhushan         nb_hw_breakpoint++;
157988365d17SBharat Bhushan         break;
158088365d17SBharat Bhushan 
158188365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
158288365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
158388365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
158488365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
158588365d17SBharat Bhushan             return -ENOBUFS;
158688365d17SBharat Bhushan         }
158788365d17SBharat Bhushan 
158888365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
158988365d17SBharat Bhushan             return -EEXIST;
159088365d17SBharat Bhushan         }
159188365d17SBharat Bhushan 
159288365d17SBharat Bhushan         nb_hw_watchpoint++;
159388365d17SBharat Bhushan         break;
159488365d17SBharat Bhushan 
159588365d17SBharat Bhushan     default:
159688365d17SBharat Bhushan         return -ENOSYS;
159788365d17SBharat Bhushan     }
159888365d17SBharat Bhushan 
159988365d17SBharat Bhushan     return 0;
160088365d17SBharat Bhushan }
160188365d17SBharat Bhushan 
160288365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
160388365d17SBharat Bhushan                                   target_ulong len, int type)
160488365d17SBharat Bhushan {
160588365d17SBharat Bhushan     int n;
160688365d17SBharat Bhushan 
160788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
160888365d17SBharat Bhushan     if (n < 0) {
160988365d17SBharat Bhushan         return -ENOENT;
161088365d17SBharat Bhushan     }
161188365d17SBharat Bhushan 
161288365d17SBharat Bhushan     switch (type) {
161388365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
161488365d17SBharat Bhushan         nb_hw_breakpoint--;
161588365d17SBharat Bhushan         break;
161688365d17SBharat Bhushan 
161788365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
161888365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
161988365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
162088365d17SBharat Bhushan         nb_hw_watchpoint--;
162188365d17SBharat Bhushan         break;
162288365d17SBharat Bhushan 
162388365d17SBharat Bhushan     default:
162488365d17SBharat Bhushan         return -ENOSYS;
162588365d17SBharat Bhushan     }
162688365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
162788365d17SBharat Bhushan 
162888365d17SBharat Bhushan     return 0;
162988365d17SBharat Bhushan }
163088365d17SBharat Bhushan 
163188365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
163288365d17SBharat Bhushan {
163388365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
163488365d17SBharat Bhushan }
163588365d17SBharat Bhushan 
16368a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
16378a0548f9SBharat Bhushan {
163888365d17SBharat Bhushan     int n;
163988365d17SBharat Bhushan 
16408a0548f9SBharat Bhushan     /* Software Breakpoint updates */
16418a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
16428a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
16438a0548f9SBharat Bhushan     }
164488365d17SBharat Bhushan 
164588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
164688365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
164788365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
164888365d17SBharat Bhushan 
164988365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
165088365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
165188365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
165288365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
165388365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
165488365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
165588365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
165688365d17SBharat Bhushan                 break;
165788365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
165888365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
165988365d17SBharat Bhushan                 break;
166088365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
166188365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
166288365d17SBharat Bhushan                 break;
166388365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
166488365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
166588365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
166688365d17SBharat Bhushan                 break;
166788365d17SBharat Bhushan             default:
166888365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
166988365d17SBharat Bhushan             }
167088365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
167188365d17SBharat Bhushan         }
167288365d17SBharat Bhushan     }
16738a0548f9SBharat Bhushan }
16748a0548f9SBharat Bhushan 
16758a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16768a0548f9SBharat Bhushan {
16778a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16788a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16798a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16808a0548f9SBharat Bhushan     int handle = 0;
168188365d17SBharat Bhushan     int n;
168288365d17SBharat Bhushan     int flag = 0;
16838a0548f9SBharat Bhushan 
168488365d17SBharat Bhushan     if (cs->singlestep_enabled) {
168588365d17SBharat Bhushan         handle = 1;
168688365d17SBharat Bhushan     } else if (arch_info->status) {
168788365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
168888365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
168988365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
169088365d17SBharat Bhushan                 if (n >= 0) {
169188365d17SBharat Bhushan                     handle = 1;
169288365d17SBharat Bhushan                 }
169388365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
169488365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
169588365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
169688365d17SBharat Bhushan                 if (n >= 0) {
169788365d17SBharat Bhushan                     handle = 1;
169888365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
169988365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
170088365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
170188365d17SBharat Bhushan                 }
170288365d17SBharat Bhushan             }
170388365d17SBharat Bhushan         }
170488365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
17058a0548f9SBharat Bhushan         handle = 1;
17068a0548f9SBharat Bhushan     } else {
17078a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
17088a0548f9SBharat Bhushan          * program exception to guest;
17098a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
171088365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
171188365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
171288365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
171388365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
171488365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
171588365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
171688365d17SBharat Bhushan          * So the result is that we cannot share debug resources
171788365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
171888365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
171988365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
172088365d17SBharat Bhushan          * cannot use them;
17218a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
17228a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
17238a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
17248a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
17258a0548f9SBharat Bhushan          * injecting a program interrupt.
17268a0548f9SBharat Bhushan          */
17278a0548f9SBharat Bhushan 
17288a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
17298a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
17308a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
17318a0548f9SBharat Bhushan          */
17328a0548f9SBharat Bhushan         env->nip += 4;
17338a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
17348a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
17358a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
17368a0548f9SBharat Bhushan     }
17378a0548f9SBharat Bhushan 
17388a0548f9SBharat Bhushan     return handle;
17398a0548f9SBharat Bhushan }
17408a0548f9SBharat Bhushan 
174120d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1742d76d1650Saurel32 {
174320d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
174420d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1745bb4ea393SJan Kiszka     int ret;
1746d76d1650Saurel32 
17474b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
17484b8523eeSJan Kiszka 
1749d76d1650Saurel32     switch (run->exit_reason) {
1750d76d1650Saurel32     case KVM_EXIT_DCR:
1751d76d1650Saurel32         if (run->dcr.is_write) {
1752da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1753d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1754d76d1650Saurel32         } else {
1755da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1756d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1757d76d1650Saurel32         }
1758d76d1650Saurel32         break;
1759d76d1650Saurel32     case KVM_EXIT_HLT:
1760da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1761259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1762d76d1650Saurel32         break;
1763c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1764f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1765da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
176620d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1767aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1768f61b4bedSAlexander Graf                                               run->papr_hcall.args);
176978e8fde2SDavid Gibson         ret = 0;
1770f61b4bedSAlexander Graf         break;
1771f61b4bedSAlexander Graf #endif
17725b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1773da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1774933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17755b95b8b9SAlexander Graf         ret = 0;
17765b95b8b9SAlexander Graf         break;
177731f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1778da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
177931f2cb8fSBharat Bhushan         watchdog_perform_action();
178031f2cb8fSBharat Bhushan         ret = 0;
178131f2cb8fSBharat Bhushan         break;
178231f2cb8fSBharat Bhushan 
17838a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17848a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17858a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17868a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17878a0548f9SBharat Bhushan             break;
17888a0548f9SBharat Bhushan         }
17898a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17908a0548f9SBharat Bhushan         ret = 0;
17918a0548f9SBharat Bhushan         break;
17928a0548f9SBharat Bhushan 
179373aaec4aSJan Kiszka     default:
179473aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
179573aaec4aSJan Kiszka         ret = -1;
179673aaec4aSJan Kiszka         break;
1797d76d1650Saurel32     }
1798d76d1650Saurel32 
17994b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1800d76d1650Saurel32     return ret;
1801d76d1650Saurel32 }
1802d76d1650Saurel32 
180331f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
180431f2cb8fSBharat Bhushan {
180531f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
180631f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
180731f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
180831f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
180931f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
181031f2cb8fSBharat Bhushan     };
181131f2cb8fSBharat Bhushan 
181231f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
181331f2cb8fSBharat Bhushan }
181431f2cb8fSBharat Bhushan 
181531f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
181631f2cb8fSBharat Bhushan {
181731f2cb8fSBharat Bhushan 
181831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
181931f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
182031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
182131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
182231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
182331f2cb8fSBharat Bhushan     };
182431f2cb8fSBharat Bhushan 
182531f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182631f2cb8fSBharat Bhushan }
182731f2cb8fSBharat Bhushan 
182831f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
182931f2cb8fSBharat Bhushan {
183031f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
183131f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
183231f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
183331f2cb8fSBharat Bhushan 
183431f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
183531f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
183631f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
183731f2cb8fSBharat Bhushan     };
183831f2cb8fSBharat Bhushan 
183931f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
184031f2cb8fSBharat Bhushan }
184131f2cb8fSBharat Bhushan 
184231f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
184331f2cb8fSBharat Bhushan {
184431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
184531f2cb8fSBharat Bhushan     int ret;
184631f2cb8fSBharat Bhushan 
184731f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
184831f2cb8fSBharat Bhushan         return -1;
184931f2cb8fSBharat Bhushan     }
185031f2cb8fSBharat Bhushan 
185131f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
185231f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
185331f2cb8fSBharat Bhushan         return -1;
185431f2cb8fSBharat Bhushan     }
185531f2cb8fSBharat Bhushan 
185648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
185731f2cb8fSBharat Bhushan     if (ret < 0) {
185831f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
185931f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
186031f2cb8fSBharat Bhushan         return ret;
186131f2cb8fSBharat Bhushan     }
186231f2cb8fSBharat Bhushan 
186331f2cb8fSBharat Bhushan     return ret;
186431f2cb8fSBharat Bhushan }
186531f2cb8fSBharat Bhushan 
1866dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1867dc333cd6SAlexander Graf {
1868dc333cd6SAlexander Graf     FILE *f;
1869dc333cd6SAlexander Graf     int ret = -1;
1870dc333cd6SAlexander Graf     int field_len = strlen(field);
1871dc333cd6SAlexander Graf     char line[512];
1872dc333cd6SAlexander Graf 
1873dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1874dc333cd6SAlexander Graf     if (!f) {
1875dc333cd6SAlexander Graf         return -1;
1876dc333cd6SAlexander Graf     }
1877dc333cd6SAlexander Graf 
1878dc333cd6SAlexander Graf     do {
1879dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1880dc333cd6SAlexander Graf             break;
1881dc333cd6SAlexander Graf         }
1882dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1883ae215068SJim Meyering             pstrcpy(value, len, line);
1884dc333cd6SAlexander Graf             ret = 0;
1885dc333cd6SAlexander Graf             break;
1886dc333cd6SAlexander Graf         }
1887dc333cd6SAlexander Graf     } while(*line);
1888dc333cd6SAlexander Graf 
1889dc333cd6SAlexander Graf     fclose(f);
1890dc333cd6SAlexander Graf 
1891dc333cd6SAlexander Graf     return ret;
1892dc333cd6SAlexander Graf }
1893dc333cd6SAlexander Graf 
1894dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1895dc333cd6SAlexander Graf {
1896dc333cd6SAlexander Graf     char line[512];
1897dc333cd6SAlexander Graf     char *ns;
189873bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1899dc333cd6SAlexander Graf 
1900dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1901dc333cd6SAlexander Graf         return retval;
1902dc333cd6SAlexander Graf     }
1903dc333cd6SAlexander Graf 
1904dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1905dc333cd6SAlexander Graf         return retval;
1906dc333cd6SAlexander Graf     }
1907dc333cd6SAlexander Graf 
1908dc333cd6SAlexander Graf     ns++;
1909dc333cd6SAlexander Graf 
1910f9b8e7f6SShraddha Barke     return atoi(ns);
1911ef951443SNikunj A Dadhania }
1912ef951443SNikunj A Dadhania 
1913ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1914ef951443SNikunj A Dadhania {
1915ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1916ef951443SNikunj A Dadhania                                NULL);
1917ef951443SNikunj A Dadhania }
1918ef951443SNikunj A Dadhania 
1919ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1920ef951443SNikunj A Dadhania {
1921ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1922dc333cd6SAlexander Graf }
19234513d923SGleb Natapov 
1924eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1925eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1926eadaada1SAlexander Graf {
1927eadaada1SAlexander Graf     struct dirent *dirp;
1928eadaada1SAlexander Graf     DIR *dp;
1929eadaada1SAlexander Graf 
1930eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1931eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1932eadaada1SAlexander Graf         return -1;
1933eadaada1SAlexander Graf     }
1934eadaada1SAlexander Graf 
1935eadaada1SAlexander Graf     buf[0] = '\0';
1936eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1937eadaada1SAlexander Graf         FILE *f;
1938eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1939eadaada1SAlexander Graf                  dirp->d_name);
1940eadaada1SAlexander Graf         f = fopen(buf, "r");
1941eadaada1SAlexander Graf         if (f) {
1942eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1943eadaada1SAlexander Graf             fclose(f);
1944eadaada1SAlexander Graf             break;
1945eadaada1SAlexander Graf         }
1946eadaada1SAlexander Graf         buf[0] = '\0';
1947eadaada1SAlexander Graf     }
1948eadaada1SAlexander Graf     closedir(dp);
1949eadaada1SAlexander Graf     if (buf[0] == '\0') {
1950eadaada1SAlexander Graf         printf("Unknown host!\n");
1951eadaada1SAlexander Graf         return -1;
1952eadaada1SAlexander Graf     }
1953eadaada1SAlexander Graf 
1954eadaada1SAlexander Graf     return 0;
1955eadaada1SAlexander Graf }
1956eadaada1SAlexander Graf 
19577d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1958eadaada1SAlexander Graf {
19599bc884b7SDavid Gibson     union {
19609bc884b7SDavid Gibson         uint32_t v32;
19619bc884b7SDavid Gibson         uint64_t v64;
19629bc884b7SDavid Gibson     } u;
1963eadaada1SAlexander Graf     FILE *f;
1964eadaada1SAlexander Graf     int len;
1965eadaada1SAlexander Graf 
19667d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1967eadaada1SAlexander Graf     if (!f) {
1968eadaada1SAlexander Graf         return -1;
1969eadaada1SAlexander Graf     }
1970eadaada1SAlexander Graf 
19719bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1972eadaada1SAlexander Graf     fclose(f);
1973eadaada1SAlexander Graf     switch (len) {
19749bc884b7SDavid Gibson     case 4:
19759bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19769bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19779bc884b7SDavid Gibson     case 8:
19789bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1979eadaada1SAlexander Graf     }
1980eadaada1SAlexander Graf 
1981eadaada1SAlexander Graf     return 0;
1982eadaada1SAlexander Graf }
1983eadaada1SAlexander Graf 
19847d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19857d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19867d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19877d94a30bSSukadev Bhattiprolu  * format) */
19887d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19897d94a30bSSukadev Bhattiprolu {
19907d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19917d94a30bSSukadev Bhattiprolu     uint64_t val;
19927d94a30bSSukadev Bhattiprolu 
19937d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19947d94a30bSSukadev Bhattiprolu         return -1;
19957d94a30bSSukadev Bhattiprolu     }
19967d94a30bSSukadev Bhattiprolu 
19977d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19987d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19997d94a30bSSukadev Bhattiprolu     g_free(tmp);
20007d94a30bSSukadev Bhattiprolu 
20017d94a30bSSukadev Bhattiprolu     return val;
20027d94a30bSSukadev Bhattiprolu }
20037d94a30bSSukadev Bhattiprolu 
20049bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
20059bc884b7SDavid Gibson {
20069bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
20079bc884b7SDavid Gibson }
20089bc884b7SDavid Gibson 
20096659394fSDavid Gibson uint32_t kvmppc_get_vmx(void)
20106659394fSDavid Gibson {
20116659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,vmx");
20126659394fSDavid Gibson }
20136659394fSDavid Gibson 
20146659394fSDavid Gibson uint32_t kvmppc_get_dfp(void)
20156659394fSDavid Gibson {
20166659394fSDavid Gibson     return kvmppc_read_int_cpu_dt("ibm,dfp");
20176659394fSDavid Gibson }
20186659394fSDavid Gibson 
20191a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
202045024f09SAlexander Graf  {
2021a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
2022a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
202345024f09SAlexander Graf 
20246fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
20251a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
20261a61a9aeSStuart Yoder         return 0;
20271a61a9aeSStuart Yoder     }
202845024f09SAlexander Graf 
20291a61a9aeSStuart Yoder     return 1;
20301a61a9aeSStuart Yoder }
20311a61a9aeSStuart Yoder 
20321a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
20331a61a9aeSStuart Yoder {
20341a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20351a61a9aeSStuart Yoder 
20361a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
20371a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
20381a61a9aeSStuart Yoder         return 1;
20391a61a9aeSStuart Yoder     }
20401a61a9aeSStuart Yoder 
20411a61a9aeSStuart Yoder     return 0;
20421a61a9aeSStuart Yoder }
20431a61a9aeSStuart Yoder 
20441a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20451a61a9aeSStuart Yoder {
20461a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
20471a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20481a61a9aeSStuart Yoder 
20491a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20501a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
205145024f09SAlexander Graf         return 0;
205245024f09SAlexander Graf     }
205345024f09SAlexander Graf 
205445024f09SAlexander Graf     /*
2055d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
205645024f09SAlexander Graf      *
2057d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
205845024f09SAlexander Graf      *     li r3, -1
2059d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2060d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
206145024f09SAlexander Graf      */
206245024f09SAlexander Graf 
2063d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2064d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2065d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2066d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
206745024f09SAlexander Graf 
20680ddbd053SAlexey Kardashevskiy     return 1;
206945024f09SAlexander Graf }
207045024f09SAlexander Graf 
2071026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2072026bfd89SDavid Gibson {
2073026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2074026bfd89SDavid Gibson }
2075026bfd89SDavid Gibson 
2076026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2077026bfd89SDavid Gibson {
2078026bfd89SDavid Gibson     /*
2079026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2080026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2081026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2082026bfd89SDavid Gibson      * produce a warning.
2083026bfd89SDavid Gibson      */
2084026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2085026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2086026bfd89SDavid Gibson }
2087026bfd89SDavid Gibson 
2088ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2089ef9971ddSAlexey Kardashevskiy {
2090ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2091ef9971ddSAlexey Kardashevskiy }
2092ef9971ddSAlexey Kardashevskiy 
20935145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20945145ad4fSNathan Whitehorn {
20955145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20965145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20975145ad4fSNathan Whitehorn }
20985145ad4fSNathan Whitehorn 
20991bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2100f61b4bedSAlexander Graf {
21011bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2102f61b4bedSAlexander Graf     int ret;
2103f61b4bedSAlexander Graf 
210448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2105f61b4bedSAlexander Graf     if (ret) {
2106072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2107072ed5f2SThomas Huth         exit(1);
2108f61b4bedSAlexander Graf     }
21099b00ea49SDavid Gibson 
21109b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
21119b00ea49SDavid Gibson      * with kvm */
21129b00ea49SDavid Gibson     cap_papr = 1;
2113f1af19d7SDavid Gibson }
2114f61b4bedSAlexander Graf 
2115d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
21166db5bb0fSAlexey Kardashevskiy {
2117d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
21186db5bb0fSAlexey Kardashevskiy }
21196db5bb0fSAlexey Kardashevskiy 
21205b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21215b95b8b9SAlexander Graf {
21225b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21235b95b8b9SAlexander Graf     int ret;
21245b95b8b9SAlexander Graf 
212548add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21265b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2127072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2128072ed5f2SThomas Huth         exit(1);
21295b95b8b9SAlexander Graf     }
21305b95b8b9SAlexander Graf }
21315b95b8b9SAlexander Graf 
2132e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2133e97c3636SDavid Gibson {
2134e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2135e97c3636SDavid Gibson }
2136e97c3636SDavid Gibson 
21377f763a5dSDavid Gibson #ifdef TARGET_PPC64
2138658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma)
2139354ac20aSDavid Gibson {
2140354ac20aSDavid Gibson     off_t size;
2141354ac20aSDavid Gibson     int fd;
2142354ac20aSDavid Gibson     struct kvm_allocate_rma ret;
2143354ac20aSDavid Gibson 
2144354ac20aSDavid Gibson     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
2145354ac20aSDavid Gibson      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
2146354ac20aSDavid Gibson      *                      not necessary on this hardware
2147354ac20aSDavid Gibson      * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
2148354ac20aSDavid Gibson      *
2149354ac20aSDavid Gibson      * FIXME: We should allow the user to force contiguous RMA
2150354ac20aSDavid Gibson      * allocation in the cap_ppc_rma==1 case.
2151354ac20aSDavid Gibson      */
2152354ac20aSDavid Gibson     if (cap_ppc_rma < 2) {
2153354ac20aSDavid Gibson         return 0;
2154354ac20aSDavid Gibson     }
2155354ac20aSDavid Gibson 
2156354ac20aSDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
2157354ac20aSDavid Gibson     if (fd < 0) {
2158354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
2159354ac20aSDavid Gibson                 strerror(errno));
2160354ac20aSDavid Gibson         return -1;
2161354ac20aSDavid Gibson     }
2162354ac20aSDavid Gibson 
2163354ac20aSDavid Gibson     size = MIN(ret.rma_size, 256ul << 20);
2164354ac20aSDavid Gibson 
2165658fa66bSAlexey Kardashevskiy     *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2166658fa66bSAlexey Kardashevskiy     if (*rma == MAP_FAILED) {
2167354ac20aSDavid Gibson         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
2168354ac20aSDavid Gibson         return -1;
2169354ac20aSDavid Gibson     };
2170354ac20aSDavid Gibson 
2171354ac20aSDavid Gibson     return size;
2172354ac20aSDavid Gibson }
2173354ac20aSDavid Gibson 
21747f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21757f763a5dSDavid Gibson {
2176f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2177f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2178f36951c1SDavid Gibson     int i;
2179f36951c1SDavid Gibson 
21807f763a5dSDavid Gibson     if (cap_ppc_rma >= 2) {
21817f763a5dSDavid Gibson         return current_size;
21827f763a5dSDavid Gibson     }
2183f36951c1SDavid Gibson 
2184f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2185f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2186182735efSAndreas Färber     kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
21879c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2188f36951c1SDavid Gibson     best_page_shift = 0;
2189f36951c1SDavid Gibson 
2190f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2191f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2192f36951c1SDavid Gibson 
2193f36951c1SDavid Gibson         if (!sps->page_shift) {
2194f36951c1SDavid Gibson             continue;
2195f36951c1SDavid Gibson         }
2196f36951c1SDavid Gibson 
2197f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2198f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2199f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2200f36951c1SDavid Gibson         }
2201f36951c1SDavid Gibson     }
2202f36951c1SDavid Gibson 
22037f763a5dSDavid Gibson     return MIN(current_size,
2204f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
22057f763a5dSDavid Gibson }
22067f763a5dSDavid Gibson #endif
22077f763a5dSDavid Gibson 
2208da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2209da95324eSAlexey Kardashevskiy {
2210da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2211da95324eSAlexey Kardashevskiy }
2212da95324eSAlexey Kardashevskiy 
22133dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
22143dc410aeSAlexey Kardashevskiy {
22153dc410aeSAlexey Kardashevskiy     int ret;
22163dc410aeSAlexey Kardashevskiy 
22173dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22183dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
22193dc410aeSAlexey Kardashevskiy     if (!ret) {
22203dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22213dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
22223dc410aeSAlexey Kardashevskiy     }
22233dc410aeSAlexey Kardashevskiy 
22243dc410aeSAlexey Kardashevskiy     return ret;
22253dc410aeSAlexey Kardashevskiy }
22263dc410aeSAlexey Kardashevskiy 
2227d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2228d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2229d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22300f5cb298SDavid Gibson {
22310f5cb298SDavid Gibson     long len;
22320f5cb298SDavid Gibson     int fd;
22330f5cb298SDavid Gibson     void *table;
22340f5cb298SDavid Gibson 
2235b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2236b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2237b5aec396SDavid Gibson      */
2238b5aec396SDavid Gibson     *pfd = -1;
22396a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22400f5cb298SDavid Gibson         return NULL;
22410f5cb298SDavid Gibson     }
22420f5cb298SDavid Gibson 
2243d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2244d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2245d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2246d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2247d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2248d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2249d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2250d6ee2a7cSAlexey Kardashevskiy         };
2251d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2252d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2253d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2254d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2255d6ee2a7cSAlexey Kardashevskiy                     liobn);
2256d6ee2a7cSAlexey Kardashevskiy             return NULL;
2257d6ee2a7cSAlexey Kardashevskiy         }
2258d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2259d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2260d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2261d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2262d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2263d6ee2a7cSAlexey Kardashevskiy         };
2264d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2265d6ee2a7cSAlexey Kardashevskiy             return NULL;
2266d6ee2a7cSAlexey Kardashevskiy         }
22670f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22680f5cb298SDavid Gibson         if (fd < 0) {
2269b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2270b5aec396SDavid Gibson                     liobn);
22710f5cb298SDavid Gibson             return NULL;
22720f5cb298SDavid Gibson         }
2273d6ee2a7cSAlexey Kardashevskiy     } else {
2274d6ee2a7cSAlexey Kardashevskiy         return NULL;
2275d6ee2a7cSAlexey Kardashevskiy     }
22760f5cb298SDavid Gibson 
2277d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22780f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22790f5cb298SDavid Gibson 
228074b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22810f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2282b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2283b5aec396SDavid Gibson                 liobn);
22840f5cb298SDavid Gibson         close(fd);
22850f5cb298SDavid Gibson         return NULL;
22860f5cb298SDavid Gibson     }
22870f5cb298SDavid Gibson 
22880f5cb298SDavid Gibson     *pfd = fd;
22890f5cb298SDavid Gibson     return table;
22900f5cb298SDavid Gibson }
22910f5cb298SDavid Gibson 
2292523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22930f5cb298SDavid Gibson {
22940f5cb298SDavid Gibson     long len;
22950f5cb298SDavid Gibson 
22960f5cb298SDavid Gibson     if (fd < 0) {
22970f5cb298SDavid Gibson         return -1;
22980f5cb298SDavid Gibson     }
22990f5cb298SDavid Gibson 
2300523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
23010f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
23020f5cb298SDavid Gibson         (close(fd) < 0)) {
2303b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2304b5aec396SDavid Gibson                 strerror(errno));
23050f5cb298SDavid Gibson         /* Leak the table */
23060f5cb298SDavid Gibson     }
23070f5cb298SDavid Gibson 
23080f5cb298SDavid Gibson     return 0;
23090f5cb298SDavid Gibson }
23100f5cb298SDavid Gibson 
23117f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
23127f763a5dSDavid Gibson {
23137f763a5dSDavid Gibson     uint32_t shift = shift_hint;
23147f763a5dSDavid Gibson 
2315ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2316ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2317ace9a2cbSDavid Gibson         return 0;
2318ace9a2cbSDavid Gibson     }
2319ace9a2cbSDavid Gibson     if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23207f763a5dSDavid Gibson         int ret;
23217f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2322ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2323ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2324ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2325ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2326ace9a2cbSDavid Gibson              * correct for PR. */
2327ace9a2cbSDavid Gibson             return 0;
2328ace9a2cbSDavid Gibson         } else if (ret < 0) {
23297f763a5dSDavid Gibson             return ret;
23307f763a5dSDavid Gibson         }
23317f763a5dSDavid Gibson         return shift;
23327f763a5dSDavid Gibson     }
23337f763a5dSDavid Gibson 
2334ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2335ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
233696c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
233796c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2338ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23397f763a5dSDavid Gibson         return 0;
2340ace9a2cbSDavid Gibson     } else {
2341ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2342ace9a2cbSDavid Gibson         return 24;
2343ace9a2cbSDavid Gibson     }
23447f763a5dSDavid Gibson }
23457f763a5dSDavid Gibson 
2346a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2347a1e98583SDavid Gibson {
2348a1e98583SDavid Gibson     uint32_t pvr;
2349a1e98583SDavid Gibson 
2350a1e98583SDavid Gibson     asm ("mfpvr %0"
2351a1e98583SDavid Gibson          : "=r"(pvr));
2352a1e98583SDavid Gibson     return pvr;
2353a1e98583SDavid Gibson }
2354a1e98583SDavid Gibson 
2355a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2356a7342588SDavid Gibson {
2357a7342588SDavid Gibson     if (on) {
2358a7342588SDavid Gibson         *word |= flags;
2359a7342588SDavid Gibson     } else {
2360a7342588SDavid Gibson         *word &= ~flags;
2361a7342588SDavid Gibson     }
2362a7342588SDavid Gibson }
2363a7342588SDavid Gibson 
23642985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23652985b86bSAndreas Färber {
23662985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2367a7342588SDavid Gibson     uint32_t vmx = kvmppc_get_vmx();
2368a7342588SDavid Gibson     uint32_t dfp = kvmppc_get_dfp();
23690cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23700cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2371a1e98583SDavid Gibson 
2372cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23733bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2374a7342588SDavid Gibson 
237570bca53fSAlexander Graf     if (vmx != -1) {
237670bca53fSAlexander Graf         /* Only override when we know what the host supports */
2377cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
2378cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
237970bca53fSAlexander Graf     }
238070bca53fSAlexander Graf     if (dfp != -1) {
238170bca53fSAlexander Graf         /* Only override when we know what the host supports */
2382cfe34f44SAndreas Färber         alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
238370bca53fSAlexander Graf     }
23840cbad81fSDavid Gibson 
23850cbad81fSDavid Gibson     if (dcache_size != -1) {
23860cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23870cbad81fSDavid Gibson     }
23880cbad81fSDavid Gibson 
23890cbad81fSDavid Gibson     if (icache_size != -1) {
23900cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23910cbad81fSDavid Gibson     }
2392c64abd1fSSam Bobroff 
2393c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2394c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
23955f3066d8SDavid Gibson 
23965f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
23975f3066d8SDavid Gibson         /*
23985f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
23995f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
24005f3066d8SDavid Gibson          * architected mode may prevent guests from activating
24015f3066d8SDavid Gibson          * necessary DD1 workarounds.
24025f3066d8SDavid Gibson          */
24035f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
24045f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
24055f3066d8SDavid Gibson     }
2406c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2407a1e98583SDavid Gibson }
2408a1e98583SDavid Gibson 
24093b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
24103b961124SStuart Yoder {
24113b961124SStuart Yoder     return cap_epr;
24123b961124SStuart Yoder }
24133b961124SStuart Yoder 
24147c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void)
24157c43bca0SAneesh Kumar K.V {
24167c43bca0SAneesh Kumar K.V     return cap_htab_fd;
24177c43bca0SAneesh Kumar K.V }
24187c43bca0SAneesh Kumar K.V 
241987a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
242087a91de6SAlexander Graf {
242187a91de6SAlexander Graf     return cap_fixup_hcalls;
242287a91de6SAlexander Graf }
242387a91de6SAlexander Graf 
2424bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2425bac3bf28SThomas Huth {
2426bac3bf28SThomas Huth     return cap_htm;
2427bac3bf28SThomas Huth }
2428bac3bf28SThomas Huth 
2429cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2430cf1c4cceSSam Bobroff {
2431cf1c4cceSSam Bobroff     return cap_mmu_radix;
2432cf1c4cceSSam Bobroff }
2433cf1c4cceSSam Bobroff 
2434cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2435cf1c4cceSSam Bobroff {
2436cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2437cf1c4cceSSam Bobroff }
2438cf1c4cceSSam Bobroff 
243952b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
244052b2519cSThomas Huth {
244152b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
244252b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
244352b2519cSThomas Huth 
244452b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
244552b2519cSThomas Huth     if (pvr_pcc == NULL) {
244652b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
244752b2519cSThomas Huth     }
244852b2519cSThomas Huth 
244952b2519cSThomas Huth     return pvr_pcc;
245052b2519cSThomas Huth }
245152b2519cSThomas Huth 
24525ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void)
24535ba4576bSAndreas Färber {
24545ba4576bSAndreas Färber     TypeInfo type_info = {
24555ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
24565ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
24575ba4576bSAndreas Färber     };
24585ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
245992e926e1SGreg Kurz     ObjectClass *oc;
24605b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2461715d4b96SThomas Huth     int i;
24625ba4576bSAndreas Färber 
246352b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
24643bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
24655ba4576bSAndreas Färber         return -1;
24665ba4576bSAndreas Färber     }
24675ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
24685ba4576bSAndreas Färber     type_register(&type_info);
24695b79b1caSAlexey Kardashevskiy 
247092e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
247192e926e1SGreg Kurz     g_assert(oc);
247292e926e1SGreg Kurz 
24733b542549SBharata B Rao #if defined(TARGET_PPC64)
24743b542549SBharata B Rao     type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
24753b542549SBharata B Rao     type_info.parent = TYPE_SPAPR_CPU_CORE,
24767ebaf795SBharata B Rao     type_info.instance_size = sizeof(sPAPRCPUCore);
24777ebaf795SBharata B Rao     type_info.instance_init = NULL;
24787ebaf795SBharata B Rao     type_info.class_init = spapr_cpu_core_class_init;
24797ebaf795SBharata B Rao     type_info.class_data = (void *) "host";
24803b542549SBharata B Rao     type_register(&type_info);
24813b542549SBharata B Rao     g_free((void *)type_info.name);
24823b542549SBharata B Rao #endif
24833b542549SBharata B Rao 
2484715d4b96SThomas Huth     /*
2485715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2486715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2487715d4b96SThomas Huth      * host CPU type, too)
2488715d4b96SThomas Huth      */
2489715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2490715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2491715d4b96SThomas Huth         if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2492715d4b96SThomas Huth             char *suffix;
2493715d4b96SThomas Huth 
2494715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2495715d4b96SThomas Huth             suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU);
2496715d4b96SThomas Huth             if (suffix) {
2497715d4b96SThomas Huth                 *suffix = 0;
2498715d4b96SThomas Huth             }
2499715d4b96SThomas Huth             ppc_cpu_aliases[i].oc = oc;
2500715d4b96SThomas Huth             break;
2501715d4b96SThomas Huth         }
2502715d4b96SThomas Huth     }
2503715d4b96SThomas Huth 
25045ba4576bSAndreas Färber     return 0;
25055ba4576bSAndreas Färber }
25065ba4576bSAndreas Färber 
2507feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2508feaa64c4SDavid Gibson {
2509feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2510feaa64c4SDavid Gibson         .token = token,
2511feaa64c4SDavid Gibson     };
2512feaa64c4SDavid Gibson 
2513feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2514feaa64c4SDavid Gibson         return -ENOENT;
2515feaa64c4SDavid Gibson     }
2516feaa64c4SDavid Gibson 
2517feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2518feaa64c4SDavid Gibson 
2519feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2520feaa64c4SDavid Gibson }
252112b1143bSDavid Gibson 
2522e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write)
2523e68cb8b4SAlexey Kardashevskiy {
2524e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2525e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
2526e68cb8b4SAlexey Kardashevskiy         .start_index = 0,
2527e68cb8b4SAlexey Kardashevskiy     };
2528e68cb8b4SAlexey Kardashevskiy 
2529e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
2530e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "KVM version doesn't support saving the hash table\n");
2531e68cb8b4SAlexey Kardashevskiy         return -1;
2532e68cb8b4SAlexey Kardashevskiy     }
2533e68cb8b4SAlexey Kardashevskiy 
2534e68cb8b4SAlexey Kardashevskiy     return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2535e68cb8b4SAlexey Kardashevskiy }
2536e68cb8b4SAlexey Kardashevskiy 
2537e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2538e68cb8b4SAlexey Kardashevskiy {
2539bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2540e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2541e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2542e68cb8b4SAlexey Kardashevskiy 
2543e68cb8b4SAlexey Kardashevskiy     do {
2544e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2545e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2546e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2547e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2548e68cb8b4SAlexey Kardashevskiy             return rc;
2549e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2550e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2551e094c4c1SCédric Le Goater             ssize_t n = rc;
2552e094c4c1SCédric Le Goater             while (n) {
2553e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2554e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2555e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2556e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2557e094c4c1SCédric Le Goater 
2558e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2559e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2560e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2561e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2562e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2563e094c4c1SCédric Le Goater 
2564e094c4c1SCédric Le Goater                 buffer += chunksize;
2565e094c4c1SCédric Le Goater                 n -= chunksize;
2566e094c4c1SCédric Le Goater             }
2567e68cb8b4SAlexey Kardashevskiy         }
2568e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2569e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2570bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2571e68cb8b4SAlexey Kardashevskiy 
2572e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2573e68cb8b4SAlexey Kardashevskiy }
2574e68cb8b4SAlexey Kardashevskiy 
2575e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2576e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2577e68cb8b4SAlexey Kardashevskiy {
2578e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2579e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2580e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2581e68cb8b4SAlexey Kardashevskiy 
2582e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2583e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2584e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2585e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2586e68cb8b4SAlexey Kardashevskiy 
2587e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2588e68cb8b4SAlexey Kardashevskiy 
2589e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2590e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2591e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2592e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2593e68cb8b4SAlexey Kardashevskiy         return rc;
2594e68cb8b4SAlexey Kardashevskiy     }
2595e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2596e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2597e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2598e68cb8b4SAlexey Kardashevskiy         return -1;
2599e68cb8b4SAlexey Kardashevskiy     }
2600e68cb8b4SAlexey Kardashevskiy     return 0;
2601e68cb8b4SAlexey Kardashevskiy }
2602e68cb8b4SAlexey Kardashevskiy 
260320d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
26044513d923SGleb Natapov {
26054513d923SGleb Natapov     return true;
26064513d923SGleb Natapov }
2607a1b87fe0SJan Kiszka 
260882169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
260982169660SScott Wood {
261082169660SScott Wood }
2611c65f9a07SGreg Kurz 
26121ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26131ad9f0a4SDavid Gibson {
26141ad9f0a4SDavid Gibson     struct kvm_get_htab_fd ghf = {
26151ad9f0a4SDavid Gibson         .flags = 0,
26161ad9f0a4SDavid Gibson         .start_index = ptex,
26177c43bca0SAneesh Kumar K.V     };
26181ad9f0a4SDavid Gibson     int fd, rc;
26191ad9f0a4SDavid Gibson     int i;
26207c43bca0SAneesh Kumar K.V 
26211ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
26221ad9f0a4SDavid Gibson     if (fd < 0) {
26231ad9f0a4SDavid Gibson         hw_error("kvmppc_read_hptes: Unable to open HPT fd");
26241ad9f0a4SDavid Gibson     }
26251ad9f0a4SDavid Gibson 
26261ad9f0a4SDavid Gibson     i = 0;
26271ad9f0a4SDavid Gibson     while (i < n) {
26281ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26291ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26301ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26311ad9f0a4SDavid Gibson 
26321ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26331ad9f0a4SDavid Gibson         if (rc < 0) {
26341ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26351ad9f0a4SDavid Gibson         }
26361ad9f0a4SDavid Gibson 
26371ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26381ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
26391ad9f0a4SDavid Gibson             int invalid = hdr->n_invalid;
26401ad9f0a4SDavid Gibson 
26411ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26421ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26431ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26441ad9f0a4SDavid Gibson             }
26451ad9f0a4SDavid Gibson 
26461ad9f0a4SDavid Gibson             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
26471ad9f0a4SDavid Gibson             i += hdr->n_valid;
26481ad9f0a4SDavid Gibson 
26491ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
26501ad9f0a4SDavid Gibson                 invalid = n - i;
26511ad9f0a4SDavid Gibson             }
26521ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
26531ad9f0a4SDavid Gibson             i += hdr->n_invalid;
26541ad9f0a4SDavid Gibson 
26551ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
26561ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
26571ad9f0a4SDavid Gibson         }
26581ad9f0a4SDavid Gibson     }
26591ad9f0a4SDavid Gibson 
26601ad9f0a4SDavid Gibson     close(fd);
26611ad9f0a4SDavid Gibson }
26621ad9f0a4SDavid Gibson 
26631ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
26647c43bca0SAneesh Kumar K.V {
26651ad9f0a4SDavid Gibson     int fd, rc;
26667c43bca0SAneesh Kumar K.V     struct kvm_get_htab_fd ghf;
26671ad9f0a4SDavid Gibson     struct {
26681ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
26691ad9f0a4SDavid Gibson         uint64_t pte0;
26701ad9f0a4SDavid Gibson         uint64_t pte1;
26711ad9f0a4SDavid Gibson     } buf;
2672c1385933SAneesh Kumar K.V 
2673c1385933SAneesh Kumar K.V     ghf.flags = 0;
2674c1385933SAneesh Kumar K.V     ghf.start_index = 0;     /* Ignored */
26751ad9f0a4SDavid Gibson     fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
26761ad9f0a4SDavid Gibson     if (fd < 0) {
26771ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to open HPT fd");
2678c1385933SAneesh Kumar K.V     }
2679c1385933SAneesh Kumar K.V 
26801ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
26811ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
26821ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
26831ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
26841ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
26851ad9f0a4SDavid Gibson 
26861ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
26871ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
26881ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2689c1385933SAneesh Kumar K.V     }
26901ad9f0a4SDavid Gibson     close(fd);
2691c1385933SAneesh Kumar K.V }
26929e03a040SFrank Blaschka 
26939e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2694dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
26959e03a040SFrank Blaschka {
26969e03a040SFrank Blaschka     return 0;
26979e03a040SFrank Blaschka }
26981850b6b7SEric Auger 
269938d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
270038d87493SPeter Xu                                 int vector, PCIDevice *dev)
270138d87493SPeter Xu {
270238d87493SPeter Xu     return 0;
270338d87493SPeter Xu }
270438d87493SPeter Xu 
270538d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
270638d87493SPeter Xu {
270738d87493SPeter Xu     return 0;
270838d87493SPeter Xu }
270938d87493SPeter Xu 
27101850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
27111850b6b7SEric Auger {
27121850b6b7SEric Auger     return data & 0xffff;
27131850b6b7SEric Auger }
27144d9392beSThomas Huth 
27154d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27164d9392beSThomas Huth {
27174d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27184d9392beSThomas Huth         return -1;
27194d9392beSThomas Huth     }
27204d9392beSThomas Huth 
27214d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27224d9392beSThomas Huth }
272330f4b05bSDavid Gibson 
272430f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
272530f4b05bSDavid Gibson {
272630f4b05bSDavid Gibson     if (!kvm_enabled()) {
2727b55d295eSDavid Gibson         return; /* No KVM, we're good */
2728b55d295eSDavid Gibson     }
2729b55d295eSDavid Gibson 
2730b55d295eSDavid Gibson     if (cap_resize_hpt) {
2731b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2732b55d295eSDavid Gibson     }
2733b55d295eSDavid Gibson 
2734b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2735b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
273630f4b05bSDavid Gibson         return;
273730f4b05bSDavid Gibson     }
273830f4b05bSDavid Gibson 
273930f4b05bSDavid Gibson     error_setg(errp,
274030f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
274130f4b05bSDavid Gibson }
2742b55d295eSDavid Gibson 
2743b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2744b55d295eSDavid Gibson {
2745b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2746b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2747b55d295eSDavid Gibson         .flags = flags,
2748b55d295eSDavid Gibson         .shift = shift,
2749b55d295eSDavid Gibson     };
2750b55d295eSDavid Gibson 
2751b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2752b55d295eSDavid Gibson         return -ENOSYS;
2753b55d295eSDavid Gibson     }
2754b55d295eSDavid Gibson 
2755b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2756b55d295eSDavid Gibson }
2757b55d295eSDavid Gibson 
2758b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2759b55d295eSDavid Gibson {
2760b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2761b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2762b55d295eSDavid Gibson         .flags = flags,
2763b55d295eSDavid Gibson         .shift = shift,
2764b55d295eSDavid Gibson     };
2765b55d295eSDavid Gibson 
2766b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2767b55d295eSDavid Gibson         return -ENOSYS;
2768b55d295eSDavid Gibson     }
2769b55d295eSDavid Gibson 
2770b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2771b55d295eSDavid Gibson }
2772b55d295eSDavid Gibson 
2773b55d295eSDavid Gibson static void kvmppc_pivot_hpt_cpu(CPUState *cs, run_on_cpu_data arg)
2774b55d295eSDavid Gibson {
2775b55d295eSDavid Gibson     target_ulong sdr1 = arg.target_ptr;
2776b55d295eSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
2777b55d295eSDavid Gibson     CPUPPCState *env = &cpu->env;
2778b55d295eSDavid Gibson 
2779b55d295eSDavid Gibson     /* This is just for the benefit of PR KVM */
2780b55d295eSDavid Gibson     cpu_synchronize_state(cs);
2781b55d295eSDavid Gibson     env->spr[SPR_SDR1] = sdr1;
2782b55d295eSDavid Gibson     if (kvmppc_put_books_sregs(cpu) < 0) {
2783b55d295eSDavid Gibson         error_report("Unable to update SDR1 in KVM");
2784b55d295eSDavid Gibson         exit(1);
2785b55d295eSDavid Gibson     }
2786b55d295eSDavid Gibson }
2787b55d295eSDavid Gibson 
2788b55d295eSDavid Gibson void kvmppc_update_sdr1(target_ulong sdr1)
2789b55d295eSDavid Gibson {
2790b55d295eSDavid Gibson     CPUState *cs;
2791b55d295eSDavid Gibson 
2792b55d295eSDavid Gibson     CPU_FOREACH(cs) {
2793b55d295eSDavid Gibson         run_on_cpu(cs, kvmppc_pivot_hpt_cpu, RUN_ON_CPU_TARGET_PTR(sdr1));
2794b55d295eSDavid Gibson     }
2795b55d295eSDavid Gibson }
2796c363a37aSDaniel Henrique Barboza 
2797c363a37aSDaniel Henrique Barboza /*
2798c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2799c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2800c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2801c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2802c363a37aSDaniel Henrique Barboza  *
2803c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2804c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2805c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2806c363a37aSDaniel Henrique Barboza  *
2807c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2808c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2809c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2810c363a37aSDaniel Henrique Barboza  * in QEMU level.
2811c363a37aSDaniel Henrique Barboza  */
2812c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2813c363a37aSDaniel Henrique Barboza {
2814c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2815c363a37aSDaniel Henrique Barboza 
2816c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2817c363a37aSDaniel Henrique Barboza         return false;
2818c363a37aSDaniel Henrique Barboza     }
2819c363a37aSDaniel Henrique Barboza 
2820c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2821c363a37aSDaniel Henrique Barboza         return false;
2822c363a37aSDaniel Henrique Barboza     }
2823c363a37aSDaniel Henrique Barboza 
2824c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2825c363a37aSDaniel Henrique Barboza }
2826