1d76d1650Saurel32 /* 2d76d1650Saurel32 * PowerPC implementation of KVM hooks 3d76d1650Saurel32 * 4d76d1650Saurel32 * Copyright IBM Corp. 2007 590dc8812SScott Wood * Copyright (C) 2011 Freescale Semiconductor, Inc. 6d76d1650Saurel32 * 7d76d1650Saurel32 * Authors: 8d76d1650Saurel32 * Jerone Young <jyoung5@us.ibm.com> 9d76d1650Saurel32 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 10d76d1650Saurel32 * Hollis Blanchard <hollisb@us.ibm.com> 11d76d1650Saurel32 * 12d76d1650Saurel32 * This work is licensed under the terms of the GNU GPL, version 2 or later. 13d76d1650Saurel32 * See the COPYING file in the top-level directory. 14d76d1650Saurel32 * 15d76d1650Saurel32 */ 16d76d1650Saurel32 170d75590dSPeter Maydell #include "qemu/osdep.h" 18eadaada1SAlexander Graf #include <dirent.h> 19d76d1650Saurel32 #include <sys/ioctl.h> 204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h> 21d76d1650Saurel32 22d76d1650Saurel32 #include <linux/kvm.h> 23d76d1650Saurel32 24d76d1650Saurel32 #include "qemu-common.h" 25072ed5f2SThomas Huth #include "qemu/error-report.h" 2633c11879SPaolo Bonzini #include "cpu.h" 27715d4b96SThomas Huth #include "cpu-models.h" 281de7afc9SPaolo Bonzini #include "qemu/timer.h" 299c17d615SPaolo Bonzini #include "sysemu/sysemu.h" 30b3946626SVincent Palatin #include "sysemu/hw_accel.h" 31d76d1650Saurel32 #include "kvm_ppc.h" 329c17d615SPaolo Bonzini #include "sysemu/cpus.h" 339c17d615SPaolo Bonzini #include "sysemu/device_tree.h" 34d5aea6f3SDavid Gibson #include "mmu-hash64.h" 35d76d1650Saurel32 36f61b4bedSAlexander Graf #include "hw/sysbus.h" 370d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 380d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h" 4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h" 42b36f100eSAlexey Kardashevskiy #include "trace.h" 4388365d17SBharat Bhushan #include "exec/gdbstub.h" 444c663752SPaolo Bonzini #include "exec/memattrs.h" 45*9c607668SAlexey Kardashevskiy #include "exec/ram_addr.h" 462d103aaeSMichael Roth #include "sysemu/hostmem.h" 47f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 48*9c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h" 493b542549SBharata B Rao #if defined(TARGET_PPC64) 503b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 513b542549SBharata B Rao #endif 52f61b4bedSAlexander Graf 53d76d1650Saurel32 //#define DEBUG_KVM 54d76d1650Saurel32 55d76d1650Saurel32 #ifdef DEBUG_KVM 56da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \ 57d76d1650Saurel32 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 58d76d1650Saurel32 #else 59da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \ 60d76d1650Saurel32 do { } while (0) 61d76d1650Saurel32 #endif 62d76d1650Saurel32 63eadaada1SAlexander Graf #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/" 64eadaada1SAlexander Graf 6594a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 6694a8d39aSJan Kiszka KVM_CAP_LAST_INFO 6794a8d39aSJan Kiszka }; 6894a8d39aSJan Kiszka 69fc87e185SAlexander Graf static int cap_interrupt_unset = false; 70fc87e185SAlexander Graf static int cap_interrupt_level = false; 7190dc8812SScott Wood static int cap_segstate; 7290dc8812SScott Wood static int cap_booke_sregs; 73e97c3636SDavid Gibson static int cap_ppc_smt; 74354ac20aSDavid Gibson static int cap_ppc_rma; 750f5cb298SDavid Gibson static int cap_spapr_tce; 76da95324eSAlexey Kardashevskiy static int cap_spapr_multitce; 779bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio; 78f1af19d7SDavid Gibson static int cap_hior; 79d67d40eaSDavid Gibson static int cap_one_reg; 803b961124SStuart Yoder static int cap_epr; 8131f2cb8fSBharat Bhushan static int cap_ppc_watchdog; 829b00ea49SDavid Gibson static int cap_papr; 83e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd; 8487a91de6SAlexander Graf static int cap_fixup_hcalls; 85bac3bf28SThomas Huth static int cap_htm; /* Hardware transactional memory support */ 86fc87e185SAlexander Graf 873c902d44SBharat Bhushan static uint32_t debug_inst_opcode; 883c902d44SBharat Bhushan 89c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered 90c821c2bdSAlexander Graf * interrupt, but the infrastructure can't expose that yet, so the guest 91c821c2bdSAlexander Graf * takes but ignores it, goes to sleep and never gets notified that there's 92c821c2bdSAlexander Graf * still an interrupt pending. 93c6a94ba5SAlexander Graf * 94c821c2bdSAlexander Graf * As a quick workaround, let's just wake up again 20 ms after we injected 95c821c2bdSAlexander Graf * an interrupt. That way we can assure that we're always reinjecting 96c821c2bdSAlexander Graf * interrupts in case the guest swallowed them. 97c6a94ba5SAlexander Graf */ 98c6a94ba5SAlexander Graf static QEMUTimer *idle_timer; 99c6a94ba5SAlexander Graf 100d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque) 101c6a94ba5SAlexander Graf { 102d5a68146SAndreas Färber PowerPCCPU *cpu = opaque; 103d5a68146SAndreas Färber 104c08d7424SAndreas Färber qemu_cpu_kick(CPU(cpu)); 105c6a94ba5SAlexander Graf } 106c6a94ba5SAlexander Graf 10796c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV). This 10896c9cff0SThomas Huth * should only be used for fallback tests - generally we should use 10996c9cff0SThomas Huth * explicit capabilities for the features we want, rather than 11096c9cff0SThomas Huth * assuming what is/isn't available depending on the KVM variant. */ 11196c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks) 11296c9cff0SThomas Huth { 11396c9cff0SThomas Huth /* Assume KVM-PR if the GET_PVINFO capability is available */ 11496c9cff0SThomas Huth return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0; 11596c9cff0SThomas Huth } 11696c9cff0SThomas Huth 1175ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void); 1185ba4576bSAndreas Färber 119b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s) 120d76d1650Saurel32 { 121fc87e185SAlexander Graf cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ); 122fc87e185SAlexander Graf cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); 12390dc8812SScott Wood cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); 12490dc8812SScott Wood cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); 125e97c3636SDavid Gibson cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT); 126354ac20aSDavid Gibson cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA); 1270f5cb298SDavid Gibson cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE); 128da95324eSAlexey Kardashevskiy cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE); 1299bb62a07SAlexey Kardashevskiy cap_spapr_vfio = false; 130d67d40eaSDavid Gibson cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG); 131f1af19d7SDavid Gibson cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR); 1323b961124SStuart Yoder cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR); 13331f2cb8fSBharat Bhushan cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG); 1349b00ea49SDavid Gibson /* Note: we don't set cap_papr here, because this capability is 1359b00ea49SDavid Gibson * only activated after this by kvmppc_set_papr() */ 136e68cb8b4SAlexey Kardashevskiy cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD); 13787a91de6SAlexander Graf cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL); 138bac3bf28SThomas Huth cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM); 139fc87e185SAlexander Graf 140fc87e185SAlexander Graf if (!cap_interrupt_level) { 141fc87e185SAlexander Graf fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the " 142fc87e185SAlexander Graf "VM to stall at times!\n"); 143fc87e185SAlexander Graf } 144fc87e185SAlexander Graf 1455ba4576bSAndreas Färber kvm_ppc_register_host_cpu_type(); 1465ba4576bSAndreas Färber 147d76d1650Saurel32 return 0; 148d76d1650Saurel32 } 149d76d1650Saurel32 150d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) 151d525ffabSPaolo Bonzini { 152d525ffabSPaolo Bonzini return 0; 153d525ffabSPaolo Bonzini } 154d525ffabSPaolo Bonzini 1551bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu) 156d76d1650Saurel32 { 1571bc22652SAndreas Färber CPUPPCState *cenv = &cpu->env; 1581bc22652SAndreas Färber CPUState *cs = CPU(cpu); 159861bbc80SAlexander Graf struct kvm_sregs sregs; 1605666ca4aSScott Wood int ret; 1615666ca4aSScott Wood 1625666ca4aSScott Wood if (cenv->excp_model == POWERPC_EXCP_BOOKE) { 16364e07be5SAlexander Graf /* What we're really trying to say is "if we're on BookE, we use 16464e07be5SAlexander Graf the native PVR for now". This is the only sane way to check 16564e07be5SAlexander Graf it though, so we potentially confuse users that they can run 16664e07be5SAlexander Graf BookE guests on BookS. Let's hope nobody dares enough :) */ 1675666ca4aSScott Wood return 0; 1685666ca4aSScott Wood } else { 16990dc8812SScott Wood if (!cap_segstate) { 17064e07be5SAlexander Graf fprintf(stderr, "kvm error: missing PVR setting capability\n"); 17164e07be5SAlexander Graf return -ENOSYS; 1725666ca4aSScott Wood } 1735666ca4aSScott Wood } 1745666ca4aSScott Wood 1751bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 1765666ca4aSScott Wood if (ret) { 1775666ca4aSScott Wood return ret; 1785666ca4aSScott Wood } 179861bbc80SAlexander Graf 180861bbc80SAlexander Graf sregs.pvr = cenv->spr[SPR_PVR]; 1811bc22652SAndreas Färber return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 1825666ca4aSScott Wood } 1835666ca4aSScott Wood 18493dd5e85SScott Wood /* Set up a shared TLB array with KVM */ 1851bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu) 18693dd5e85SScott Wood { 1871bc22652SAndreas Färber CPUPPCState *env = &cpu->env; 1881bc22652SAndreas Färber CPUState *cs = CPU(cpu); 18993dd5e85SScott Wood struct kvm_book3e_206_tlb_params params = {}; 19093dd5e85SScott Wood struct kvm_config_tlb cfg = {}; 19193dd5e85SScott Wood unsigned int entries = 0; 19293dd5e85SScott Wood int ret, i; 19393dd5e85SScott Wood 19493dd5e85SScott Wood if (!kvm_enabled() || 195a60f24b5SAndreas Färber !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) { 19693dd5e85SScott Wood return 0; 19793dd5e85SScott Wood } 19893dd5e85SScott Wood 19993dd5e85SScott Wood assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN); 20093dd5e85SScott Wood 20193dd5e85SScott Wood for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 20293dd5e85SScott Wood params.tlb_sizes[i] = booke206_tlb_size(env, i); 20393dd5e85SScott Wood params.tlb_ways[i] = booke206_tlb_ways(env, i); 20493dd5e85SScott Wood entries += params.tlb_sizes[i]; 20593dd5e85SScott Wood } 20693dd5e85SScott Wood 20793dd5e85SScott Wood assert(entries == env->nb_tlb); 20893dd5e85SScott Wood assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t)); 20993dd5e85SScott Wood 21093dd5e85SScott Wood env->tlb_dirty = true; 21193dd5e85SScott Wood 21293dd5e85SScott Wood cfg.array = (uintptr_t)env->tlb.tlbm; 21393dd5e85SScott Wood cfg.array_len = sizeof(ppcmas_tlb_t) * entries; 21493dd5e85SScott Wood cfg.params = (uintptr_t)¶ms; 21593dd5e85SScott Wood cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV; 21693dd5e85SScott Wood 21748add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg); 21893dd5e85SScott Wood if (ret < 0) { 21993dd5e85SScott Wood fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n", 22093dd5e85SScott Wood __func__, strerror(-ret)); 22193dd5e85SScott Wood return ret; 22293dd5e85SScott Wood } 22393dd5e85SScott Wood 22493dd5e85SScott Wood env->kvm_sw_tlb = true; 22593dd5e85SScott Wood return 0; 22693dd5e85SScott Wood } 22793dd5e85SScott Wood 2284656e1f0SBenjamin Herrenschmidt 2294656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64) 230a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu, 2314656e1f0SBenjamin Herrenschmidt struct kvm_ppc_smmu_info *info) 2324656e1f0SBenjamin Herrenschmidt { 233a60f24b5SAndreas Färber CPUPPCState *env = &cpu->env; 234a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 235a60f24b5SAndreas Färber 2364656e1f0SBenjamin Herrenschmidt memset(info, 0, sizeof(*info)); 2374656e1f0SBenjamin Herrenschmidt 2384656e1f0SBenjamin Herrenschmidt /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so 2394656e1f0SBenjamin Herrenschmidt * need to "guess" what the supported page sizes are. 2404656e1f0SBenjamin Herrenschmidt * 2414656e1f0SBenjamin Herrenschmidt * For that to work we make a few assumptions: 2424656e1f0SBenjamin Herrenschmidt * 24396c9cff0SThomas Huth * - Check whether we are running "PR" KVM which only supports 4K 24496c9cff0SThomas Huth * and 16M pages, but supports them regardless of the backing 24596c9cff0SThomas Huth * store characteritics. We also don't support 1T segments. 2464656e1f0SBenjamin Herrenschmidt * 2474656e1f0SBenjamin Herrenschmidt * This is safe as if HV KVM ever supports that capability or PR 2484656e1f0SBenjamin Herrenschmidt * KVM grows supports for more page/segment sizes, those versions 2494656e1f0SBenjamin Herrenschmidt * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we 2504656e1f0SBenjamin Herrenschmidt * will not hit this fallback 2514656e1f0SBenjamin Herrenschmidt * 2524656e1f0SBenjamin Herrenschmidt * - Else we are running HV KVM. This means we only support page 2534656e1f0SBenjamin Herrenschmidt * sizes that fit in the backing store. Additionally we only 2544656e1f0SBenjamin Herrenschmidt * advertize 64K pages if the processor is ARCH 2.06 and we assume 2554656e1f0SBenjamin Herrenschmidt * P7 encodings for the SLB and hash table. Here too, we assume 2564656e1f0SBenjamin Herrenschmidt * support for any newer processor will mean a kernel that 2574656e1f0SBenjamin Herrenschmidt * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit 2584656e1f0SBenjamin Herrenschmidt * this fallback. 2594656e1f0SBenjamin Herrenschmidt */ 26096c9cff0SThomas Huth if (kvmppc_is_pr(cs->kvm_state)) { 2614656e1f0SBenjamin Herrenschmidt /* No flags */ 2624656e1f0SBenjamin Herrenschmidt info->flags = 0; 2634656e1f0SBenjamin Herrenschmidt info->slb_size = 64; 2644656e1f0SBenjamin Herrenschmidt 2654656e1f0SBenjamin Herrenschmidt /* Standard 4k base page size segment */ 2664656e1f0SBenjamin Herrenschmidt info->sps[0].page_shift = 12; 2674656e1f0SBenjamin Herrenschmidt info->sps[0].slb_enc = 0; 2684656e1f0SBenjamin Herrenschmidt info->sps[0].enc[0].page_shift = 12; 2694656e1f0SBenjamin Herrenschmidt info->sps[0].enc[0].pte_enc = 0; 2704656e1f0SBenjamin Herrenschmidt 2714656e1f0SBenjamin Herrenschmidt /* Standard 16M large page size segment */ 2724656e1f0SBenjamin Herrenschmidt info->sps[1].page_shift = 24; 2734656e1f0SBenjamin Herrenschmidt info->sps[1].slb_enc = SLB_VSID_L; 2744656e1f0SBenjamin Herrenschmidt info->sps[1].enc[0].page_shift = 24; 2754656e1f0SBenjamin Herrenschmidt info->sps[1].enc[0].pte_enc = 0; 2764656e1f0SBenjamin Herrenschmidt } else { 2774656e1f0SBenjamin Herrenschmidt int i = 0; 2784656e1f0SBenjamin Herrenschmidt 2794656e1f0SBenjamin Herrenschmidt /* HV KVM has backing store size restrictions */ 2804656e1f0SBenjamin Herrenschmidt info->flags = KVM_PPC_PAGE_SIZES_REAL; 2814656e1f0SBenjamin Herrenschmidt 2824656e1f0SBenjamin Herrenschmidt if (env->mmu_model & POWERPC_MMU_1TSEG) { 2834656e1f0SBenjamin Herrenschmidt info->flags |= KVM_PPC_1T_SEGMENTS; 2844656e1f0SBenjamin Herrenschmidt } 2854656e1f0SBenjamin Herrenschmidt 286aa4bb587SBenjamin Herrenschmidt if (env->mmu_model == POWERPC_MMU_2_06 || 287aa4bb587SBenjamin Herrenschmidt env->mmu_model == POWERPC_MMU_2_07) { 2884656e1f0SBenjamin Herrenschmidt info->slb_size = 32; 2894656e1f0SBenjamin Herrenschmidt } else { 2904656e1f0SBenjamin Herrenschmidt info->slb_size = 64; 2914656e1f0SBenjamin Herrenschmidt } 2924656e1f0SBenjamin Herrenschmidt 2934656e1f0SBenjamin Herrenschmidt /* Standard 4k base page size segment */ 2944656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 12; 2954656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = 0; 2964656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 12; 2974656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 0; 2984656e1f0SBenjamin Herrenschmidt i++; 2994656e1f0SBenjamin Herrenschmidt 300aa4bb587SBenjamin Herrenschmidt /* 64K on MMU 2.06 and later */ 301aa4bb587SBenjamin Herrenschmidt if (env->mmu_model == POWERPC_MMU_2_06 || 302aa4bb587SBenjamin Herrenschmidt env->mmu_model == POWERPC_MMU_2_07) { 3034656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 16; 3044656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = 0x110; 3054656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 16; 3064656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 1; 3074656e1f0SBenjamin Herrenschmidt i++; 3084656e1f0SBenjamin Herrenschmidt } 3094656e1f0SBenjamin Herrenschmidt 3104656e1f0SBenjamin Herrenschmidt /* Standard 16M large page size segment */ 3114656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 24; 3124656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = SLB_VSID_L; 3134656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 24; 3144656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 0; 3154656e1f0SBenjamin Herrenschmidt } 3164656e1f0SBenjamin Herrenschmidt } 3174656e1f0SBenjamin Herrenschmidt 318a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info) 3194656e1f0SBenjamin Herrenschmidt { 320a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 3214656e1f0SBenjamin Herrenschmidt int ret; 3224656e1f0SBenjamin Herrenschmidt 323a60f24b5SAndreas Färber if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { 324a60f24b5SAndreas Färber ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info); 3254656e1f0SBenjamin Herrenschmidt if (ret == 0) { 3264656e1f0SBenjamin Herrenschmidt return; 3274656e1f0SBenjamin Herrenschmidt } 3284656e1f0SBenjamin Herrenschmidt } 3294656e1f0SBenjamin Herrenschmidt 330a60f24b5SAndreas Färber kvm_get_fallback_smmu_info(cpu, info); 3314656e1f0SBenjamin Herrenschmidt } 3324656e1f0SBenjamin Herrenschmidt 3334656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) 3344656e1f0SBenjamin Herrenschmidt { 3354656e1f0SBenjamin Herrenschmidt if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) { 3364656e1f0SBenjamin Herrenschmidt return true; 3374656e1f0SBenjamin Herrenschmidt } 3384656e1f0SBenjamin Herrenschmidt 3394656e1f0SBenjamin Herrenschmidt return (1ul << shift) <= rampgsize; 3404656e1f0SBenjamin Herrenschmidt } 3414656e1f0SBenjamin Herrenschmidt 342df587133SThomas Huth static long max_cpu_page_size; 343df587133SThomas Huth 344a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu) 3454656e1f0SBenjamin Herrenschmidt { 3464656e1f0SBenjamin Herrenschmidt static struct kvm_ppc_smmu_info smmu_info; 3474656e1f0SBenjamin Herrenschmidt static bool has_smmu_info; 348a60f24b5SAndreas Färber CPUPPCState *env = &cpu->env; 3494656e1f0SBenjamin Herrenschmidt int iq, ik, jq, jk; 3500d594f55SThomas Huth bool has_64k_pages = false; 3514656e1f0SBenjamin Herrenschmidt 3524656e1f0SBenjamin Herrenschmidt /* We only handle page sizes for 64-bit server guests for now */ 3534656e1f0SBenjamin Herrenschmidt if (!(env->mmu_model & POWERPC_MMU_64)) { 3544656e1f0SBenjamin Herrenschmidt return; 3554656e1f0SBenjamin Herrenschmidt } 3564656e1f0SBenjamin Herrenschmidt 3574656e1f0SBenjamin Herrenschmidt /* Collect MMU info from kernel if not already */ 3584656e1f0SBenjamin Herrenschmidt if (!has_smmu_info) { 359a60f24b5SAndreas Färber kvm_get_smmu_info(cpu, &smmu_info); 3604656e1f0SBenjamin Herrenschmidt has_smmu_info = true; 3614656e1f0SBenjamin Herrenschmidt } 3624656e1f0SBenjamin Herrenschmidt 363df587133SThomas Huth if (!max_cpu_page_size) { 364*9c607668SAlexey Kardashevskiy max_cpu_page_size = qemu_getrampagesize(); 365df587133SThomas Huth } 3664656e1f0SBenjamin Herrenschmidt 3674656e1f0SBenjamin Herrenschmidt /* Convert to QEMU form */ 3684656e1f0SBenjamin Herrenschmidt memset(&env->sps, 0, sizeof(env->sps)); 3694656e1f0SBenjamin Herrenschmidt 37090da0d5aSBenjamin Herrenschmidt /* If we have HV KVM, we need to forbid CI large pages if our 37190da0d5aSBenjamin Herrenschmidt * host page size is smaller than 64K. 37290da0d5aSBenjamin Herrenschmidt */ 37390da0d5aSBenjamin Herrenschmidt if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) { 37490da0d5aSBenjamin Herrenschmidt env->ci_large_pages = getpagesize() >= 0x10000; 37590da0d5aSBenjamin Herrenschmidt } 37690da0d5aSBenjamin Herrenschmidt 37708215d8fSAlexander Graf /* 37808215d8fSAlexander Graf * XXX This loop should be an entry wide AND of the capabilities that 37908215d8fSAlexander Graf * the selected CPU has with the capabilities that KVM supports. 38008215d8fSAlexander Graf */ 3814656e1f0SBenjamin Herrenschmidt for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) { 3824656e1f0SBenjamin Herrenschmidt struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; 3834656e1f0SBenjamin Herrenschmidt struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; 3844656e1f0SBenjamin Herrenschmidt 385df587133SThomas Huth if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, 3864656e1f0SBenjamin Herrenschmidt ksps->page_shift)) { 3874656e1f0SBenjamin Herrenschmidt continue; 3884656e1f0SBenjamin Herrenschmidt } 3894656e1f0SBenjamin Herrenschmidt qsps->page_shift = ksps->page_shift; 3904656e1f0SBenjamin Herrenschmidt qsps->slb_enc = ksps->slb_enc; 3914656e1f0SBenjamin Herrenschmidt for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { 392df587133SThomas Huth if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, 3934656e1f0SBenjamin Herrenschmidt ksps->enc[jk].page_shift)) { 3944656e1f0SBenjamin Herrenschmidt continue; 3954656e1f0SBenjamin Herrenschmidt } 3960d594f55SThomas Huth if (ksps->enc[jk].page_shift == 16) { 3970d594f55SThomas Huth has_64k_pages = true; 3980d594f55SThomas Huth } 3994656e1f0SBenjamin Herrenschmidt qsps->enc[jq].page_shift = ksps->enc[jk].page_shift; 4004656e1f0SBenjamin Herrenschmidt qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc; 4014656e1f0SBenjamin Herrenschmidt if (++jq >= PPC_PAGE_SIZES_MAX_SZ) { 4024656e1f0SBenjamin Herrenschmidt break; 4034656e1f0SBenjamin Herrenschmidt } 4044656e1f0SBenjamin Herrenschmidt } 4054656e1f0SBenjamin Herrenschmidt if (++iq >= PPC_PAGE_SIZES_MAX_SZ) { 4064656e1f0SBenjamin Herrenschmidt break; 4074656e1f0SBenjamin Herrenschmidt } 4084656e1f0SBenjamin Herrenschmidt } 4094656e1f0SBenjamin Herrenschmidt env->slb_nr = smmu_info.slb_size; 41008215d8fSAlexander Graf if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) { 4114656e1f0SBenjamin Herrenschmidt env->mmu_model &= ~POWERPC_MMU_1TSEG; 4124656e1f0SBenjamin Herrenschmidt } 4130d594f55SThomas Huth if (!has_64k_pages) { 4140d594f55SThomas Huth env->mmu_model &= ~POWERPC_MMU_64K; 4150d594f55SThomas Huth } 4164656e1f0SBenjamin Herrenschmidt } 417df587133SThomas Huth 418df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) 419df587133SThomas Huth { 420df587133SThomas Huth Object *mem_obj = object_resolve_path(obj_path, NULL); 421df587133SThomas Huth char *mempath = object_property_get_str(mem_obj, "mem-path", NULL); 422df587133SThomas Huth long pagesize; 423df587133SThomas Huth 424df587133SThomas Huth if (mempath) { 425*9c607668SAlexey Kardashevskiy pagesize = qemu_mempath_getpagesize(mempath); 426df587133SThomas Huth } else { 427df587133SThomas Huth pagesize = getpagesize(); 428df587133SThomas Huth } 429df587133SThomas Huth 430df587133SThomas Huth return pagesize >= max_cpu_page_size; 431df587133SThomas Huth } 432df587133SThomas Huth 4334656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */ 4344656e1f0SBenjamin Herrenschmidt 435a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu) 4364656e1f0SBenjamin Herrenschmidt { 4374656e1f0SBenjamin Herrenschmidt } 4384656e1f0SBenjamin Herrenschmidt 439df587133SThomas Huth bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) 440df587133SThomas Huth { 441df587133SThomas Huth return true; 442df587133SThomas Huth } 443df587133SThomas Huth 4444656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */ 4454656e1f0SBenjamin Herrenschmidt 446b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu) 447b164e48eSEduardo Habkost { 4480f20ba62SAlexey Kardashevskiy return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu)); 449b164e48eSEduardo Habkost } 450b164e48eSEduardo Habkost 45188365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint. 45288365d17SBharat Bhushan * book3s supports only 1 watchpoint, so array size 45388365d17SBharat Bhushan * of 4 is sufficient for now. 45488365d17SBharat Bhushan */ 45588365d17SBharat Bhushan #define MAX_HW_BKPTS 4 45688365d17SBharat Bhushan 45788365d17SBharat Bhushan static struct HWBreakpoint { 45888365d17SBharat Bhushan target_ulong addr; 45988365d17SBharat Bhushan int type; 46088365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS]; 46188365d17SBharat Bhushan 46288365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint; 46388365d17SBharat Bhushan 46488365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */ 46588365d17SBharat Bhushan static int max_hw_breakpoint; 46688365d17SBharat Bhushan static int max_hw_watchpoint; 46788365d17SBharat Bhushan static int nb_hw_breakpoint; 46888365d17SBharat Bhushan static int nb_hw_watchpoint; 46988365d17SBharat Bhushan 47088365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv) 47188365d17SBharat Bhushan { 47288365d17SBharat Bhushan if (cenv->excp_model == POWERPC_EXCP_BOOKE) { 47388365d17SBharat Bhushan max_hw_breakpoint = 2; 47488365d17SBharat Bhushan max_hw_watchpoint = 2; 47588365d17SBharat Bhushan } 47688365d17SBharat Bhushan 47788365d17SBharat Bhushan if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) { 47888365d17SBharat Bhushan fprintf(stderr, "Error initializing h/w breakpoints\n"); 47988365d17SBharat Bhushan return; 48088365d17SBharat Bhushan } 48188365d17SBharat Bhushan } 48288365d17SBharat Bhushan 48320d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs) 4845666ca4aSScott Wood { 48520d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 48620d695a9SAndreas Färber CPUPPCState *cenv = &cpu->env; 4875666ca4aSScott Wood int ret; 4885666ca4aSScott Wood 4894656e1f0SBenjamin Herrenschmidt /* Gather server mmu info from KVM and update the CPU state */ 490a60f24b5SAndreas Färber kvm_fixup_page_sizes(cpu); 4914656e1f0SBenjamin Herrenschmidt 4924656e1f0SBenjamin Herrenschmidt /* Synchronize sregs with kvm */ 4931bc22652SAndreas Färber ret = kvm_arch_sync_sregs(cpu); 4945666ca4aSScott Wood if (ret) { 495388e47c7SThomas Huth if (ret == -EINVAL) { 496388e47c7SThomas Huth error_report("Register sync failed... If you're using kvm-hv.ko," 497388e47c7SThomas Huth " only \"-cpu host\" is possible"); 498388e47c7SThomas Huth } 4995666ca4aSScott Wood return ret; 5005666ca4aSScott Wood } 501861bbc80SAlexander Graf 502bc72ad67SAlex Bligh idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu); 503c821c2bdSAlexander Graf 50493dd5e85SScott Wood switch (cenv->mmu_model) { 50593dd5e85SScott Wood case POWERPC_MMU_BOOKE206: 5067f516c96SThomas Huth /* This target supports access to KVM's guest TLB */ 5071bc22652SAndreas Färber ret = kvm_booke206_tlb_init(cpu); 50893dd5e85SScott Wood break; 5097f516c96SThomas Huth case POWERPC_MMU_2_07: 5107f516c96SThomas Huth if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) { 5117f516c96SThomas Huth /* KVM-HV has transactional memory on POWER8 also without the 5127f516c96SThomas Huth * KVM_CAP_PPC_HTM extension, so enable it here instead. */ 5137f516c96SThomas Huth cap_htm = true; 5147f516c96SThomas Huth } 5157f516c96SThomas Huth break; 51693dd5e85SScott Wood default: 51793dd5e85SScott Wood break; 51893dd5e85SScott Wood } 51993dd5e85SScott Wood 5203c902d44SBharat Bhushan kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode); 52188365d17SBharat Bhushan kvmppc_hw_debug_points_init(cenv); 5223c902d44SBharat Bhushan 523861bbc80SAlexander Graf return ret; 524d76d1650Saurel32 } 525d76d1650Saurel32 5261bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu) 52793dd5e85SScott Wood { 5281bc22652SAndreas Färber CPUPPCState *env = &cpu->env; 5291bc22652SAndreas Färber CPUState *cs = CPU(cpu); 53093dd5e85SScott Wood struct kvm_dirty_tlb dirty_tlb; 53193dd5e85SScott Wood unsigned char *bitmap; 53293dd5e85SScott Wood int ret; 53393dd5e85SScott Wood 53493dd5e85SScott Wood if (!env->kvm_sw_tlb) { 53593dd5e85SScott Wood return; 53693dd5e85SScott Wood } 53793dd5e85SScott Wood 53893dd5e85SScott Wood bitmap = g_malloc((env->nb_tlb + 7) / 8); 53993dd5e85SScott Wood memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8); 54093dd5e85SScott Wood 54193dd5e85SScott Wood dirty_tlb.bitmap = (uintptr_t)bitmap; 54293dd5e85SScott Wood dirty_tlb.num_dirty = env->nb_tlb; 54393dd5e85SScott Wood 5441bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb); 54593dd5e85SScott Wood if (ret) { 54693dd5e85SScott Wood fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n", 54793dd5e85SScott Wood __func__, strerror(-ret)); 54893dd5e85SScott Wood } 54993dd5e85SScott Wood 55093dd5e85SScott Wood g_free(bitmap); 55193dd5e85SScott Wood } 55293dd5e85SScott Wood 553d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr) 554d67d40eaSDavid Gibson { 555d67d40eaSDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 556d67d40eaSDavid Gibson CPUPPCState *env = &cpu->env; 557d67d40eaSDavid Gibson union { 558d67d40eaSDavid Gibson uint32_t u32; 559d67d40eaSDavid Gibson uint64_t u64; 560d67d40eaSDavid Gibson } val; 561d67d40eaSDavid Gibson struct kvm_one_reg reg = { 562d67d40eaSDavid Gibson .id = id, 563d67d40eaSDavid Gibson .addr = (uintptr_t) &val, 564d67d40eaSDavid Gibson }; 565d67d40eaSDavid Gibson int ret; 566d67d40eaSDavid Gibson 567d67d40eaSDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 568d67d40eaSDavid Gibson if (ret != 0) { 569b36f100eSAlexey Kardashevskiy trace_kvm_failed_spr_get(spr, strerror(errno)); 570d67d40eaSDavid Gibson } else { 571d67d40eaSDavid Gibson switch (id & KVM_REG_SIZE_MASK) { 572d67d40eaSDavid Gibson case KVM_REG_SIZE_U32: 573d67d40eaSDavid Gibson env->spr[spr] = val.u32; 574d67d40eaSDavid Gibson break; 575d67d40eaSDavid Gibson 576d67d40eaSDavid Gibson case KVM_REG_SIZE_U64: 577d67d40eaSDavid Gibson env->spr[spr] = val.u64; 578d67d40eaSDavid Gibson break; 579d67d40eaSDavid Gibson 580d67d40eaSDavid Gibson default: 581d67d40eaSDavid Gibson /* Don't handle this size yet */ 582d67d40eaSDavid Gibson abort(); 583d67d40eaSDavid Gibson } 584d67d40eaSDavid Gibson } 585d67d40eaSDavid Gibson } 586d67d40eaSDavid Gibson 587d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr) 588d67d40eaSDavid Gibson { 589d67d40eaSDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 590d67d40eaSDavid Gibson CPUPPCState *env = &cpu->env; 591d67d40eaSDavid Gibson union { 592d67d40eaSDavid Gibson uint32_t u32; 593d67d40eaSDavid Gibson uint64_t u64; 594d67d40eaSDavid Gibson } val; 595d67d40eaSDavid Gibson struct kvm_one_reg reg = { 596d67d40eaSDavid Gibson .id = id, 597d67d40eaSDavid Gibson .addr = (uintptr_t) &val, 598d67d40eaSDavid Gibson }; 599d67d40eaSDavid Gibson int ret; 600d67d40eaSDavid Gibson 601d67d40eaSDavid Gibson switch (id & KVM_REG_SIZE_MASK) { 602d67d40eaSDavid Gibson case KVM_REG_SIZE_U32: 603d67d40eaSDavid Gibson val.u32 = env->spr[spr]; 604d67d40eaSDavid Gibson break; 605d67d40eaSDavid Gibson 606d67d40eaSDavid Gibson case KVM_REG_SIZE_U64: 607d67d40eaSDavid Gibson val.u64 = env->spr[spr]; 608d67d40eaSDavid Gibson break; 609d67d40eaSDavid Gibson 610d67d40eaSDavid Gibson default: 611d67d40eaSDavid Gibson /* Don't handle this size yet */ 612d67d40eaSDavid Gibson abort(); 613d67d40eaSDavid Gibson } 614d67d40eaSDavid Gibson 615d67d40eaSDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 616d67d40eaSDavid Gibson if (ret != 0) { 617b36f100eSAlexey Kardashevskiy trace_kvm_failed_spr_set(spr, strerror(errno)); 618d67d40eaSDavid Gibson } 619d67d40eaSDavid Gibson } 620d67d40eaSDavid Gibson 62170b79849SDavid Gibson static int kvm_put_fp(CPUState *cs) 62270b79849SDavid Gibson { 62370b79849SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 62470b79849SDavid Gibson CPUPPCState *env = &cpu->env; 62570b79849SDavid Gibson struct kvm_one_reg reg; 62670b79849SDavid Gibson int i; 62770b79849SDavid Gibson int ret; 62870b79849SDavid Gibson 62970b79849SDavid Gibson if (env->insns_flags & PPC_FLOAT) { 63070b79849SDavid Gibson uint64_t fpscr = env->fpscr; 63170b79849SDavid Gibson bool vsx = !!(env->insns_flags2 & PPC2_VSX); 63270b79849SDavid Gibson 63370b79849SDavid Gibson reg.id = KVM_REG_PPC_FPSCR; 63470b79849SDavid Gibson reg.addr = (uintptr_t)&fpscr; 63570b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 63670b79849SDavid Gibson if (ret < 0) { 637da56ff91SPeter Maydell DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno)); 63870b79849SDavid Gibson return ret; 63970b79849SDavid Gibson } 64070b79849SDavid Gibson 64170b79849SDavid Gibson for (i = 0; i < 32; i++) { 64270b79849SDavid Gibson uint64_t vsr[2]; 64370b79849SDavid Gibson 6443a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN 64570b79849SDavid Gibson vsr[0] = float64_val(env->fpr[i]); 64670b79849SDavid Gibson vsr[1] = env->vsr[i]; 6473a4b791bSGreg Kurz #else 6483a4b791bSGreg Kurz vsr[0] = env->vsr[i]; 6493a4b791bSGreg Kurz vsr[1] = float64_val(env->fpr[i]); 6503a4b791bSGreg Kurz #endif 65170b79849SDavid Gibson reg.addr = (uintptr_t) &vsr; 65270b79849SDavid Gibson reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i); 65370b79849SDavid Gibson 65470b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 65570b79849SDavid Gibson if (ret < 0) { 656da56ff91SPeter Maydell DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR", 65770b79849SDavid Gibson i, strerror(errno)); 65870b79849SDavid Gibson return ret; 65970b79849SDavid Gibson } 66070b79849SDavid Gibson } 66170b79849SDavid Gibson } 66270b79849SDavid Gibson 66370b79849SDavid Gibson if (env->insns_flags & PPC_ALTIVEC) { 66470b79849SDavid Gibson reg.id = KVM_REG_PPC_VSCR; 66570b79849SDavid Gibson reg.addr = (uintptr_t)&env->vscr; 66670b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 66770b79849SDavid Gibson if (ret < 0) { 668da56ff91SPeter Maydell DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno)); 66970b79849SDavid Gibson return ret; 67070b79849SDavid Gibson } 67170b79849SDavid Gibson 67270b79849SDavid Gibson for (i = 0; i < 32; i++) { 67370b79849SDavid Gibson reg.id = KVM_REG_PPC_VR(i); 67470b79849SDavid Gibson reg.addr = (uintptr_t)&env->avr[i]; 67570b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 67670b79849SDavid Gibson if (ret < 0) { 677da56ff91SPeter Maydell DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno)); 67870b79849SDavid Gibson return ret; 67970b79849SDavid Gibson } 68070b79849SDavid Gibson } 68170b79849SDavid Gibson } 68270b79849SDavid Gibson 68370b79849SDavid Gibson return 0; 68470b79849SDavid Gibson } 68570b79849SDavid Gibson 68670b79849SDavid Gibson static int kvm_get_fp(CPUState *cs) 68770b79849SDavid Gibson { 68870b79849SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 68970b79849SDavid Gibson CPUPPCState *env = &cpu->env; 69070b79849SDavid Gibson struct kvm_one_reg reg; 69170b79849SDavid Gibson int i; 69270b79849SDavid Gibson int ret; 69370b79849SDavid Gibson 69470b79849SDavid Gibson if (env->insns_flags & PPC_FLOAT) { 69570b79849SDavid Gibson uint64_t fpscr; 69670b79849SDavid Gibson bool vsx = !!(env->insns_flags2 & PPC2_VSX); 69770b79849SDavid Gibson 69870b79849SDavid Gibson reg.id = KVM_REG_PPC_FPSCR; 69970b79849SDavid Gibson reg.addr = (uintptr_t)&fpscr; 70070b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 70170b79849SDavid Gibson if (ret < 0) { 702da56ff91SPeter Maydell DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno)); 70370b79849SDavid Gibson return ret; 70470b79849SDavid Gibson } else { 70570b79849SDavid Gibson env->fpscr = fpscr; 70670b79849SDavid Gibson } 70770b79849SDavid Gibson 70870b79849SDavid Gibson for (i = 0; i < 32; i++) { 70970b79849SDavid Gibson uint64_t vsr[2]; 71070b79849SDavid Gibson 71170b79849SDavid Gibson reg.addr = (uintptr_t) &vsr; 71270b79849SDavid Gibson reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i); 71370b79849SDavid Gibson 71470b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 71570b79849SDavid Gibson if (ret < 0) { 716da56ff91SPeter Maydell DPRINTF("Unable to get %s%d from KVM: %s\n", 71770b79849SDavid Gibson vsx ? "VSR" : "FPR", i, strerror(errno)); 71870b79849SDavid Gibson return ret; 71970b79849SDavid Gibson } else { 7203a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN 72170b79849SDavid Gibson env->fpr[i] = vsr[0]; 72270b79849SDavid Gibson if (vsx) { 72370b79849SDavid Gibson env->vsr[i] = vsr[1]; 72470b79849SDavid Gibson } 7253a4b791bSGreg Kurz #else 7263a4b791bSGreg Kurz env->fpr[i] = vsr[1]; 7273a4b791bSGreg Kurz if (vsx) { 7283a4b791bSGreg Kurz env->vsr[i] = vsr[0]; 7293a4b791bSGreg Kurz } 7303a4b791bSGreg Kurz #endif 73170b79849SDavid Gibson } 73270b79849SDavid Gibson } 73370b79849SDavid Gibson } 73470b79849SDavid Gibson 73570b79849SDavid Gibson if (env->insns_flags & PPC_ALTIVEC) { 73670b79849SDavid Gibson reg.id = KVM_REG_PPC_VSCR; 73770b79849SDavid Gibson reg.addr = (uintptr_t)&env->vscr; 73870b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 73970b79849SDavid Gibson if (ret < 0) { 740da56ff91SPeter Maydell DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno)); 74170b79849SDavid Gibson return ret; 74270b79849SDavid Gibson } 74370b79849SDavid Gibson 74470b79849SDavid Gibson for (i = 0; i < 32; i++) { 74570b79849SDavid Gibson reg.id = KVM_REG_PPC_VR(i); 74670b79849SDavid Gibson reg.addr = (uintptr_t)&env->avr[i]; 74770b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 74870b79849SDavid Gibson if (ret < 0) { 749da56ff91SPeter Maydell DPRINTF("Unable to get VR%d from KVM: %s\n", 75070b79849SDavid Gibson i, strerror(errno)); 75170b79849SDavid Gibson return ret; 75270b79849SDavid Gibson } 75370b79849SDavid Gibson } 75470b79849SDavid Gibson } 75570b79849SDavid Gibson 75670b79849SDavid Gibson return 0; 75770b79849SDavid Gibson } 75870b79849SDavid Gibson 7599b00ea49SDavid Gibson #if defined(TARGET_PPC64) 7609b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs) 7619b00ea49SDavid Gibson { 7629b00ea49SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 7639b00ea49SDavid Gibson CPUPPCState *env = &cpu->env; 7649b00ea49SDavid Gibson struct kvm_one_reg reg; 7659b00ea49SDavid Gibson int ret; 7669b00ea49SDavid Gibson 7679b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 7689b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 7699b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 7709b00ea49SDavid Gibson if (ret < 0) { 771da56ff91SPeter Maydell DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno)); 7729b00ea49SDavid Gibson return ret; 7739b00ea49SDavid Gibson } 7749b00ea49SDavid Gibson 7759b00ea49SDavid Gibson assert((uintptr_t)&env->slb_shadow_size 7769b00ea49SDavid Gibson == ((uintptr_t)&env->slb_shadow_addr + 8)); 7779b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_SLB; 7789b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->slb_shadow_addr; 7799b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 7809b00ea49SDavid Gibson if (ret < 0) { 781da56ff91SPeter Maydell DPRINTF("Unable to get SLB shadow state from KVM: %s\n", 7829b00ea49SDavid Gibson strerror(errno)); 7839b00ea49SDavid Gibson return ret; 7849b00ea49SDavid Gibson } 7859b00ea49SDavid Gibson 7869b00ea49SDavid Gibson assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8)); 7879b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_DTL; 7889b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->dtl_addr; 7899b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 7909b00ea49SDavid Gibson if (ret < 0) { 791da56ff91SPeter Maydell DPRINTF("Unable to get dispatch trace log state from KVM: %s\n", 7929b00ea49SDavid Gibson strerror(errno)); 7939b00ea49SDavid Gibson return ret; 7949b00ea49SDavid Gibson } 7959b00ea49SDavid Gibson 7969b00ea49SDavid Gibson return 0; 7979b00ea49SDavid Gibson } 7989b00ea49SDavid Gibson 7999b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs) 8009b00ea49SDavid Gibson { 8019b00ea49SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 8029b00ea49SDavid Gibson CPUPPCState *env = &cpu->env; 8039b00ea49SDavid Gibson struct kvm_one_reg reg; 8049b00ea49SDavid Gibson int ret; 8059b00ea49SDavid Gibson 8069b00ea49SDavid Gibson /* SLB shadow or DTL can't be registered unless a master VPA is 8079b00ea49SDavid Gibson * registered. That means when restoring state, if a VPA *is* 8089b00ea49SDavid Gibson * registered, we need to set that up first. If not, we need to 8099b00ea49SDavid Gibson * deregister the others before deregistering the master VPA */ 8109b00ea49SDavid Gibson assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr)); 8119b00ea49SDavid Gibson 8129b00ea49SDavid Gibson if (env->vpa_addr) { 8139b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 8149b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 8159b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8169b00ea49SDavid Gibson if (ret < 0) { 817da56ff91SPeter Maydell DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 8189b00ea49SDavid Gibson return ret; 8199b00ea49SDavid Gibson } 8209b00ea49SDavid Gibson } 8219b00ea49SDavid Gibson 8229b00ea49SDavid Gibson assert((uintptr_t)&env->slb_shadow_size 8239b00ea49SDavid Gibson == ((uintptr_t)&env->slb_shadow_addr + 8)); 8249b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_SLB; 8259b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->slb_shadow_addr; 8269b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8279b00ea49SDavid Gibson if (ret < 0) { 828da56ff91SPeter Maydell DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno)); 8299b00ea49SDavid Gibson return ret; 8309b00ea49SDavid Gibson } 8319b00ea49SDavid Gibson 8329b00ea49SDavid Gibson assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8)); 8339b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_DTL; 8349b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->dtl_addr; 8359b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8369b00ea49SDavid Gibson if (ret < 0) { 837da56ff91SPeter Maydell DPRINTF("Unable to set dispatch trace log state to KVM: %s\n", 8389b00ea49SDavid Gibson strerror(errno)); 8399b00ea49SDavid Gibson return ret; 8409b00ea49SDavid Gibson } 8419b00ea49SDavid Gibson 8429b00ea49SDavid Gibson if (!env->vpa_addr) { 8439b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 8449b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 8459b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8469b00ea49SDavid Gibson if (ret < 0) { 847da56ff91SPeter Maydell DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 8489b00ea49SDavid Gibson return ret; 8499b00ea49SDavid Gibson } 8509b00ea49SDavid Gibson } 8519b00ea49SDavid Gibson 8529b00ea49SDavid Gibson return 0; 8539b00ea49SDavid Gibson } 8549b00ea49SDavid Gibson #endif /* TARGET_PPC64 */ 8559b00ea49SDavid Gibson 856e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu) 857a7a00a72SDavid Gibson { 858a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 859a7a00a72SDavid Gibson struct kvm_sregs sregs; 860a7a00a72SDavid Gibson int i; 861a7a00a72SDavid Gibson 862a7a00a72SDavid Gibson sregs.pvr = env->spr[SPR_PVR]; 863a7a00a72SDavid Gibson 864a7a00a72SDavid Gibson sregs.u.s.sdr1 = env->spr[SPR_SDR1]; 865a7a00a72SDavid Gibson 866a7a00a72SDavid Gibson /* Sync SLB */ 867a7a00a72SDavid Gibson #ifdef TARGET_PPC64 868a7a00a72SDavid Gibson for (i = 0; i < ARRAY_SIZE(env->slb); i++) { 869a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid; 870a7a00a72SDavid Gibson if (env->slb[i].esid & SLB_ESID_V) { 871a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbe |= i; 872a7a00a72SDavid Gibson } 873a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid; 874a7a00a72SDavid Gibson } 875a7a00a72SDavid Gibson #endif 876a7a00a72SDavid Gibson 877a7a00a72SDavid Gibson /* Sync SRs */ 878a7a00a72SDavid Gibson for (i = 0; i < 16; i++) { 879a7a00a72SDavid Gibson sregs.u.s.ppc32.sr[i] = env->sr[i]; 880a7a00a72SDavid Gibson } 881a7a00a72SDavid Gibson 882a7a00a72SDavid Gibson /* Sync BATs */ 883a7a00a72SDavid Gibson for (i = 0; i < 8; i++) { 884a7a00a72SDavid Gibson /* Beware. We have to swap upper and lower bits here */ 885a7a00a72SDavid Gibson sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32) 886a7a00a72SDavid Gibson | env->DBAT[1][i]; 887a7a00a72SDavid Gibson sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32) 888a7a00a72SDavid Gibson | env->IBAT[1][i]; 889a7a00a72SDavid Gibson } 890a7a00a72SDavid Gibson 891a7a00a72SDavid Gibson return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 892a7a00a72SDavid Gibson } 893a7a00a72SDavid Gibson 89420d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level) 895d76d1650Saurel32 { 89620d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 89720d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 898d76d1650Saurel32 struct kvm_regs regs; 899d76d1650Saurel32 int ret; 900d76d1650Saurel32 int i; 901d76d1650Saurel32 9021bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 9031bc22652SAndreas Färber if (ret < 0) { 904d76d1650Saurel32 return ret; 9051bc22652SAndreas Färber } 906d76d1650Saurel32 907d76d1650Saurel32 regs.ctr = env->ctr; 908d76d1650Saurel32 regs.lr = env->lr; 909da91a00fSRichard Henderson regs.xer = cpu_read_xer(env); 910d76d1650Saurel32 regs.msr = env->msr; 911d76d1650Saurel32 regs.pc = env->nip; 912d76d1650Saurel32 913d76d1650Saurel32 regs.srr0 = env->spr[SPR_SRR0]; 914d76d1650Saurel32 regs.srr1 = env->spr[SPR_SRR1]; 915d76d1650Saurel32 916d76d1650Saurel32 regs.sprg0 = env->spr[SPR_SPRG0]; 917d76d1650Saurel32 regs.sprg1 = env->spr[SPR_SPRG1]; 918d76d1650Saurel32 regs.sprg2 = env->spr[SPR_SPRG2]; 919d76d1650Saurel32 regs.sprg3 = env->spr[SPR_SPRG3]; 920d76d1650Saurel32 regs.sprg4 = env->spr[SPR_SPRG4]; 921d76d1650Saurel32 regs.sprg5 = env->spr[SPR_SPRG5]; 922d76d1650Saurel32 regs.sprg6 = env->spr[SPR_SPRG6]; 923d76d1650Saurel32 regs.sprg7 = env->spr[SPR_SPRG7]; 924d76d1650Saurel32 92590dc8812SScott Wood regs.pid = env->spr[SPR_BOOKE_PID]; 92690dc8812SScott Wood 927d76d1650Saurel32 for (i = 0;i < 32; i++) 928d76d1650Saurel32 regs.gpr[i] = env->gpr[i]; 929d76d1650Saurel32 9304bddaf55SAlexey Kardashevskiy regs.cr = 0; 9314bddaf55SAlexey Kardashevskiy for (i = 0; i < 8; i++) { 9324bddaf55SAlexey Kardashevskiy regs.cr |= (env->crf[i] & 15) << (4 * (7 - i)); 9334bddaf55SAlexey Kardashevskiy } 9344bddaf55SAlexey Kardashevskiy 9351bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 936d76d1650Saurel32 if (ret < 0) 937d76d1650Saurel32 return ret; 938d76d1650Saurel32 93970b79849SDavid Gibson kvm_put_fp(cs); 94070b79849SDavid Gibson 94193dd5e85SScott Wood if (env->tlb_dirty) { 9421bc22652SAndreas Färber kvm_sw_tlb_put(cpu); 94393dd5e85SScott Wood env->tlb_dirty = false; 94493dd5e85SScott Wood } 94593dd5e85SScott Wood 946f1af19d7SDavid Gibson if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) { 947a7a00a72SDavid Gibson ret = kvmppc_put_books_sregs(cpu); 948a7a00a72SDavid Gibson if (ret < 0) { 949f1af19d7SDavid Gibson return ret; 950f1af19d7SDavid Gibson } 951f1af19d7SDavid Gibson } 952f1af19d7SDavid Gibson 953f1af19d7SDavid Gibson if (cap_hior && (level >= KVM_PUT_RESET_STATE)) { 954d67d40eaSDavid Gibson kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR); 955d67d40eaSDavid Gibson } 956f1af19d7SDavid Gibson 957d67d40eaSDavid Gibson if (cap_one_reg) { 958d67d40eaSDavid Gibson int i; 959d67d40eaSDavid Gibson 960d67d40eaSDavid Gibson /* We deliberately ignore errors here, for kernels which have 961d67d40eaSDavid Gibson * the ONE_REG calls, but don't support the specific 962d67d40eaSDavid Gibson * registers, there's a reasonable chance things will still 963d67d40eaSDavid Gibson * work, at least until we try to migrate. */ 964d67d40eaSDavid Gibson for (i = 0; i < 1024; i++) { 965d67d40eaSDavid Gibson uint64_t id = env->spr_cb[i].one_reg_id; 966d67d40eaSDavid Gibson 967d67d40eaSDavid Gibson if (id != 0) { 968d67d40eaSDavid Gibson kvm_put_one_spr(cs, id, i); 969d67d40eaSDavid Gibson } 970f1af19d7SDavid Gibson } 9719b00ea49SDavid Gibson 9729b00ea49SDavid Gibson #ifdef TARGET_PPC64 97380b3f79bSAlexey Kardashevskiy if (msr_ts) { 97480b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { 97580b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); 97680b3f79bSAlexey Kardashevskiy } 97780b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { 97880b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); 97980b3f79bSAlexey Kardashevskiy } 98080b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); 98180b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); 98280b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); 98380b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); 98480b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); 98580b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); 98680b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); 98780b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); 98880b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); 98980b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); 99080b3f79bSAlexey Kardashevskiy } 99180b3f79bSAlexey Kardashevskiy 9929b00ea49SDavid Gibson if (cap_papr) { 9939b00ea49SDavid Gibson if (kvm_put_vpa(cs) < 0) { 994da56ff91SPeter Maydell DPRINTF("Warning: Unable to set VPA information to KVM\n"); 9959b00ea49SDavid Gibson } 9969b00ea49SDavid Gibson } 99798a8b524SAlexey Kardashevskiy 99898a8b524SAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset); 9999b00ea49SDavid Gibson #endif /* TARGET_PPC64 */ 1000f1af19d7SDavid Gibson } 1001f1af19d7SDavid Gibson 1002d76d1650Saurel32 return ret; 1003d76d1650Saurel32 } 1004d76d1650Saurel32 1005c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor) 1006c371c2e3SBharat Bhushan { 1007c371c2e3SBharat Bhushan env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR]; 1008c371c2e3SBharat Bhushan } 1009c371c2e3SBharat Bhushan 1010a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu) 1011d76d1650Saurel32 { 101220d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1013ba5e5090SAlexander Graf struct kvm_sregs sregs; 1014a7a00a72SDavid Gibson int ret; 1015d76d1650Saurel32 1016a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 101790dc8812SScott Wood if (ret < 0) { 101890dc8812SScott Wood return ret; 101990dc8812SScott Wood } 102090dc8812SScott Wood 102190dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_BASE) { 102290dc8812SScott Wood env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0; 102390dc8812SScott Wood env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1; 102490dc8812SScott Wood env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr; 102590dc8812SScott Wood env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear; 102690dc8812SScott Wood env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr; 102790dc8812SScott Wood env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr; 102890dc8812SScott Wood env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr; 102990dc8812SScott Wood env->spr[SPR_DECR] = sregs.u.e.dec; 103090dc8812SScott Wood env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff; 103190dc8812SScott Wood env->spr[SPR_TBU] = sregs.u.e.tb >> 32; 103290dc8812SScott Wood env->spr[SPR_VRSAVE] = sregs.u.e.vrsave; 103390dc8812SScott Wood } 103490dc8812SScott Wood 103590dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_ARCH206) { 103690dc8812SScott Wood env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir; 103790dc8812SScott Wood env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0; 103890dc8812SScott Wood env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1; 103990dc8812SScott Wood env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar; 104090dc8812SScott Wood env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr; 104190dc8812SScott Wood } 104290dc8812SScott Wood 104390dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_64) { 104490dc8812SScott Wood env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr; 104590dc8812SScott Wood } 104690dc8812SScott Wood 104790dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_SPRG8) { 104890dc8812SScott Wood env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8; 104990dc8812SScott Wood } 105090dc8812SScott Wood 105190dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_IVOR) { 105290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0]; 1053c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0); 105490dc8812SScott Wood env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1]; 1055c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1); 105690dc8812SScott Wood env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2]; 1057c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2); 105890dc8812SScott Wood env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3]; 1059c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3); 106090dc8812SScott Wood env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4]; 1061c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4); 106290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5]; 1063c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5); 106490dc8812SScott Wood env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6]; 1065c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6); 106690dc8812SScott Wood env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7]; 1067c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7); 106890dc8812SScott Wood env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8]; 1069c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8); 107090dc8812SScott Wood env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9]; 1071c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9); 107290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10]; 1073c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10); 107490dc8812SScott Wood env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11]; 1075c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11); 107690dc8812SScott Wood env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12]; 1077c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12); 107890dc8812SScott Wood env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13]; 1079c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13); 108090dc8812SScott Wood env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14]; 1081c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14); 108290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15]; 1083c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15); 108490dc8812SScott Wood 108590dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_SPE) { 108690dc8812SScott Wood env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0]; 1087c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32); 108890dc8812SScott Wood env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1]; 1089c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33); 109090dc8812SScott Wood env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2]; 1091c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34); 109290dc8812SScott Wood } 109390dc8812SScott Wood 109490dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PM) { 109590dc8812SScott Wood env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3]; 1096c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35); 109790dc8812SScott Wood } 109890dc8812SScott Wood 109990dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PC) { 110090dc8812SScott Wood env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4]; 1101c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36); 110290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5]; 1103c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37); 110490dc8812SScott Wood } 110590dc8812SScott Wood } 110690dc8812SScott Wood 110790dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) { 110890dc8812SScott Wood env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0; 110990dc8812SScott Wood env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1; 111090dc8812SScott Wood env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2; 111190dc8812SScott Wood env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff; 111290dc8812SScott Wood env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4; 111390dc8812SScott Wood env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6; 111490dc8812SScott Wood env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32; 111590dc8812SScott Wood env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg; 111690dc8812SScott Wood env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0]; 111790dc8812SScott Wood env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1]; 111890dc8812SScott Wood } 111990dc8812SScott Wood 112090dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_EXP) { 112190dc8812SScott Wood env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr; 112290dc8812SScott Wood } 112390dc8812SScott Wood 112490dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PD) { 112590dc8812SScott Wood env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc; 112690dc8812SScott Wood env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc; 112790dc8812SScott Wood } 112890dc8812SScott Wood 112990dc8812SScott Wood if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { 113090dc8812SScott Wood env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr; 113190dc8812SScott Wood env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar; 113290dc8812SScott Wood env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0; 113390dc8812SScott Wood 113490dc8812SScott Wood if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) { 113590dc8812SScott Wood env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1; 113690dc8812SScott Wood env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2; 113790dc8812SScott Wood } 113890dc8812SScott Wood } 1139a7a00a72SDavid Gibson 1140a7a00a72SDavid Gibson return 0; 1141fafc0b6aSAlexander Graf } 114290dc8812SScott Wood 1143a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu) 1144a7a00a72SDavid Gibson { 1145a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 1146a7a00a72SDavid Gibson struct kvm_sregs sregs; 1147a7a00a72SDavid Gibson int ret; 1148a7a00a72SDavid Gibson int i; 1149a7a00a72SDavid Gibson 1150a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 115190dc8812SScott Wood if (ret < 0) { 115290dc8812SScott Wood return ret; 115390dc8812SScott Wood } 115490dc8812SScott Wood 1155e57ca75cSDavid Gibson if (!cpu->vhyp) { 1156bb593904SDavid Gibson ppc_store_sdr1(env, sregs.u.s.sdr1); 1157f3c75d42SAneesh Kumar K.V } 1158ba5e5090SAlexander Graf 1159ba5e5090SAlexander Graf /* Sync SLB */ 116082c09f2fSAlexander Graf #ifdef TARGET_PPC64 11614b4d4a21SAneesh Kumar K.V /* 11624b4d4a21SAneesh Kumar K.V * The packed SLB array we get from KVM_GET_SREGS only contains 1163a7a00a72SDavid Gibson * information about valid entries. So we flush our internal copy 1164a7a00a72SDavid Gibson * to get rid of stale ones, then put all valid SLB entries back 1165a7a00a72SDavid Gibson * in. 11664b4d4a21SAneesh Kumar K.V */ 11674b4d4a21SAneesh Kumar K.V memset(env->slb, 0, sizeof(env->slb)); 1168d83af167SAneesh Kumar K.V for (i = 0; i < ARRAY_SIZE(env->slb); i++) { 11694b4d4a21SAneesh Kumar K.V target_ulong rb = sregs.u.s.ppc64.slb[i].slbe; 11704b4d4a21SAneesh Kumar K.V target_ulong rs = sregs.u.s.ppc64.slb[i].slbv; 11714b4d4a21SAneesh Kumar K.V /* 11724b4d4a21SAneesh Kumar K.V * Only restore valid entries 11734b4d4a21SAneesh Kumar K.V */ 11744b4d4a21SAneesh Kumar K.V if (rb & SLB_ESID_V) { 1175bcd81230SDavid Gibson ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs); 11764b4d4a21SAneesh Kumar K.V } 1177ba5e5090SAlexander Graf } 117882c09f2fSAlexander Graf #endif 1179ba5e5090SAlexander Graf 1180ba5e5090SAlexander Graf /* Sync SRs */ 1181ba5e5090SAlexander Graf for (i = 0; i < 16; i++) { 1182ba5e5090SAlexander Graf env->sr[i] = sregs.u.s.ppc32.sr[i]; 1183ba5e5090SAlexander Graf } 1184ba5e5090SAlexander Graf 1185ba5e5090SAlexander Graf /* Sync BATs */ 1186ba5e5090SAlexander Graf for (i = 0; i < 8; i++) { 1187ba5e5090SAlexander Graf env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; 1188ba5e5090SAlexander Graf env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; 1189ba5e5090SAlexander Graf env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; 1190ba5e5090SAlexander Graf env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; 1191ba5e5090SAlexander Graf } 1192a7a00a72SDavid Gibson 1193a7a00a72SDavid Gibson return 0; 1194a7a00a72SDavid Gibson } 1195a7a00a72SDavid Gibson 1196a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs) 1197a7a00a72SDavid Gibson { 1198a7a00a72SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 1199a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 1200a7a00a72SDavid Gibson struct kvm_regs regs; 1201a7a00a72SDavid Gibson uint32_t cr; 1202a7a00a72SDavid Gibson int i, ret; 1203a7a00a72SDavid Gibson 1204a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1205a7a00a72SDavid Gibson if (ret < 0) 1206a7a00a72SDavid Gibson return ret; 1207a7a00a72SDavid Gibson 1208a7a00a72SDavid Gibson cr = regs.cr; 1209a7a00a72SDavid Gibson for (i = 7; i >= 0; i--) { 1210a7a00a72SDavid Gibson env->crf[i] = cr & 15; 1211a7a00a72SDavid Gibson cr >>= 4; 1212a7a00a72SDavid Gibson } 1213a7a00a72SDavid Gibson 1214a7a00a72SDavid Gibson env->ctr = regs.ctr; 1215a7a00a72SDavid Gibson env->lr = regs.lr; 1216a7a00a72SDavid Gibson cpu_write_xer(env, regs.xer); 1217a7a00a72SDavid Gibson env->msr = regs.msr; 1218a7a00a72SDavid Gibson env->nip = regs.pc; 1219a7a00a72SDavid Gibson 1220a7a00a72SDavid Gibson env->spr[SPR_SRR0] = regs.srr0; 1221a7a00a72SDavid Gibson env->spr[SPR_SRR1] = regs.srr1; 1222a7a00a72SDavid Gibson 1223a7a00a72SDavid Gibson env->spr[SPR_SPRG0] = regs.sprg0; 1224a7a00a72SDavid Gibson env->spr[SPR_SPRG1] = regs.sprg1; 1225a7a00a72SDavid Gibson env->spr[SPR_SPRG2] = regs.sprg2; 1226a7a00a72SDavid Gibson env->spr[SPR_SPRG3] = regs.sprg3; 1227a7a00a72SDavid Gibson env->spr[SPR_SPRG4] = regs.sprg4; 1228a7a00a72SDavid Gibson env->spr[SPR_SPRG5] = regs.sprg5; 1229a7a00a72SDavid Gibson env->spr[SPR_SPRG6] = regs.sprg6; 1230a7a00a72SDavid Gibson env->spr[SPR_SPRG7] = regs.sprg7; 1231a7a00a72SDavid Gibson 1232a7a00a72SDavid Gibson env->spr[SPR_BOOKE_PID] = regs.pid; 1233a7a00a72SDavid Gibson 1234a7a00a72SDavid Gibson for (i = 0;i < 32; i++) 1235a7a00a72SDavid Gibson env->gpr[i] = regs.gpr[i]; 1236a7a00a72SDavid Gibson 1237a7a00a72SDavid Gibson kvm_get_fp(cs); 1238a7a00a72SDavid Gibson 1239a7a00a72SDavid Gibson if (cap_booke_sregs) { 1240a7a00a72SDavid Gibson ret = kvmppc_get_booke_sregs(cpu); 1241a7a00a72SDavid Gibson if (ret < 0) { 1242a7a00a72SDavid Gibson return ret; 1243a7a00a72SDavid Gibson } 1244a7a00a72SDavid Gibson } 1245a7a00a72SDavid Gibson 1246a7a00a72SDavid Gibson if (cap_segstate) { 1247a7a00a72SDavid Gibson ret = kvmppc_get_books_sregs(cpu); 1248a7a00a72SDavid Gibson if (ret < 0) { 1249a7a00a72SDavid Gibson return ret; 1250a7a00a72SDavid Gibson } 1251fafc0b6aSAlexander Graf } 1252ba5e5090SAlexander Graf 1253d67d40eaSDavid Gibson if (cap_hior) { 1254d67d40eaSDavid Gibson kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR); 1255d67d40eaSDavid Gibson } 1256d67d40eaSDavid Gibson 1257d67d40eaSDavid Gibson if (cap_one_reg) { 1258d67d40eaSDavid Gibson int i; 1259d67d40eaSDavid Gibson 1260d67d40eaSDavid Gibson /* We deliberately ignore errors here, for kernels which have 1261d67d40eaSDavid Gibson * the ONE_REG calls, but don't support the specific 1262d67d40eaSDavid Gibson * registers, there's a reasonable chance things will still 1263d67d40eaSDavid Gibson * work, at least until we try to migrate. */ 1264d67d40eaSDavid Gibson for (i = 0; i < 1024; i++) { 1265d67d40eaSDavid Gibson uint64_t id = env->spr_cb[i].one_reg_id; 1266d67d40eaSDavid Gibson 1267d67d40eaSDavid Gibson if (id != 0) { 1268d67d40eaSDavid Gibson kvm_get_one_spr(cs, id, i); 1269d67d40eaSDavid Gibson } 1270d67d40eaSDavid Gibson } 12719b00ea49SDavid Gibson 12729b00ea49SDavid Gibson #ifdef TARGET_PPC64 127380b3f79bSAlexey Kardashevskiy if (msr_ts) { 127480b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { 127580b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); 127680b3f79bSAlexey Kardashevskiy } 127780b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { 127880b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); 127980b3f79bSAlexey Kardashevskiy } 128080b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); 128180b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); 128280b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); 128380b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); 128480b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); 128580b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); 128680b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); 128780b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); 128880b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); 128980b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); 129080b3f79bSAlexey Kardashevskiy } 129180b3f79bSAlexey Kardashevskiy 12929b00ea49SDavid Gibson if (cap_papr) { 12939b00ea49SDavid Gibson if (kvm_get_vpa(cs) < 0) { 1294da56ff91SPeter Maydell DPRINTF("Warning: Unable to get VPA information from KVM\n"); 12959b00ea49SDavid Gibson } 12969b00ea49SDavid Gibson } 129798a8b524SAlexey Kardashevskiy 129898a8b524SAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset); 12999b00ea49SDavid Gibson #endif 1300d67d40eaSDavid Gibson } 1301d67d40eaSDavid Gibson 1302d76d1650Saurel32 return 0; 1303d76d1650Saurel32 } 1304d76d1650Saurel32 13051bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) 1306fc87e185SAlexander Graf { 1307fc87e185SAlexander Graf unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; 1308fc87e185SAlexander Graf 1309fc87e185SAlexander Graf if (irq != PPC_INTERRUPT_EXT) { 1310fc87e185SAlexander Graf return 0; 1311fc87e185SAlexander Graf } 1312fc87e185SAlexander Graf 1313fc87e185SAlexander Graf if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) { 1314fc87e185SAlexander Graf return 0; 1315fc87e185SAlexander Graf } 1316fc87e185SAlexander Graf 13171bc22652SAndreas Färber kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq); 1318fc87e185SAlexander Graf 1319fc87e185SAlexander Graf return 0; 1320fc87e185SAlexander Graf } 1321fc87e185SAlexander Graf 132216415335SAlexander Graf #if defined(TARGET_PPCEMB) 132316415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT 132416415335SAlexander Graf #elif defined(TARGET_PPC64) 132516415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT 132616415335SAlexander Graf #else 132716415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT 132816415335SAlexander Graf #endif 132916415335SAlexander Graf 133020d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 1331d76d1650Saurel32 { 133220d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 133320d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1334d76d1650Saurel32 int r; 1335d76d1650Saurel32 unsigned irq; 1336d76d1650Saurel32 13374b8523eeSJan Kiszka qemu_mutex_lock_iothread(); 13384b8523eeSJan Kiszka 13395cbdb3a3SStefan Weil /* PowerPC QEMU tracks the various core input pins (interrupt, critical 1340d76d1650Saurel32 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */ 1341fc87e185SAlexander Graf if (!cap_interrupt_level && 1342fc87e185SAlexander Graf run->ready_for_interrupt_injection && 1343259186a7SAndreas Färber (cs->interrupt_request & CPU_INTERRUPT_HARD) && 134416415335SAlexander Graf (env->irq_input_state & (1<<PPC_INPUT_INT))) 1345d76d1650Saurel32 { 1346d76d1650Saurel32 /* For now KVM disregards the 'irq' argument. However, in the 1347d76d1650Saurel32 * future KVM could cache it in-kernel to avoid a heavyweight exit 1348d76d1650Saurel32 * when reading the UIC. 1349d76d1650Saurel32 */ 1350fc87e185SAlexander Graf irq = KVM_INTERRUPT_SET; 1351d76d1650Saurel32 1352da56ff91SPeter Maydell DPRINTF("injected interrupt %d\n", irq); 13531bc22652SAndreas Färber r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq); 135455e5c285SAndreas Färber if (r < 0) { 135555e5c285SAndreas Färber printf("cpu %d fail inject %x\n", cs->cpu_index, irq); 135655e5c285SAndreas Färber } 1357c821c2bdSAlexander Graf 1358c821c2bdSAlexander Graf /* Always wake up soon in case the interrupt was level based */ 1359bc72ad67SAlex Bligh timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 136073bcb24dSRutuja Shah (NANOSECONDS_PER_SECOND / 50)); 1361d76d1650Saurel32 } 1362d76d1650Saurel32 1363d76d1650Saurel32 /* We don't know if there are more interrupts pending after this. However, 1364d76d1650Saurel32 * the guest will return to userspace in the course of handling this one 1365d76d1650Saurel32 * anyways, so we will get a chance to deliver the rest. */ 13664b8523eeSJan Kiszka 13674b8523eeSJan Kiszka qemu_mutex_unlock_iothread(); 1368d76d1650Saurel32 } 1369d76d1650Saurel32 13704c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1371d76d1650Saurel32 { 13724c663752SPaolo Bonzini return MEMTXATTRS_UNSPECIFIED; 1373d76d1650Saurel32 } 1374d76d1650Saurel32 137520d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs) 13760af691d7SMarcelo Tosatti { 1377259186a7SAndreas Färber return cs->halted; 13780af691d7SMarcelo Tosatti } 13790af691d7SMarcelo Tosatti 1380259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu) 1381d76d1650Saurel32 { 1382259186a7SAndreas Färber CPUState *cs = CPU(cpu); 1383259186a7SAndreas Färber CPUPPCState *env = &cpu->env; 1384259186a7SAndreas Färber 1385259186a7SAndreas Färber if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) { 1386259186a7SAndreas Färber cs->halted = 1; 138727103424SAndreas Färber cs->exception_index = EXCP_HLT; 1388d76d1650Saurel32 } 1389d76d1650Saurel32 1390bb4ea393SJan Kiszka return 0; 1391d76d1650Saurel32 } 1392d76d1650Saurel32 1393d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */ 13941328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data) 1395d76d1650Saurel32 { 1396d76d1650Saurel32 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) 1397d76d1650Saurel32 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn); 1398d76d1650Saurel32 1399bb4ea393SJan Kiszka return 0; 1400d76d1650Saurel32 } 1401d76d1650Saurel32 14021328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data) 1403d76d1650Saurel32 { 1404d76d1650Saurel32 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) 1405d76d1650Saurel32 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn); 1406d76d1650Saurel32 1407bb4ea393SJan Kiszka return 0; 1408d76d1650Saurel32 } 1409d76d1650Saurel32 14108a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 14118a0548f9SBharat Bhushan { 14128a0548f9SBharat Bhushan /* Mixed endian case is not handled */ 14138a0548f9SBharat Bhushan uint32_t sc = debug_inst_opcode; 14148a0548f9SBharat Bhushan 14158a0548f9SBharat Bhushan if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 14168a0548f9SBharat Bhushan sizeof(sc), 0) || 14178a0548f9SBharat Bhushan cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) { 14188a0548f9SBharat Bhushan return -EINVAL; 14198a0548f9SBharat Bhushan } 14208a0548f9SBharat Bhushan 14218a0548f9SBharat Bhushan return 0; 14228a0548f9SBharat Bhushan } 14238a0548f9SBharat Bhushan 14248a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 14258a0548f9SBharat Bhushan { 14268a0548f9SBharat Bhushan uint32_t sc; 14278a0548f9SBharat Bhushan 14288a0548f9SBharat Bhushan if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) || 14298a0548f9SBharat Bhushan sc != debug_inst_opcode || 14308a0548f9SBharat Bhushan cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 14318a0548f9SBharat Bhushan sizeof(sc), 1)) { 14328a0548f9SBharat Bhushan return -EINVAL; 14338a0548f9SBharat Bhushan } 14348a0548f9SBharat Bhushan 14358a0548f9SBharat Bhushan return 0; 14368a0548f9SBharat Bhushan } 14378a0548f9SBharat Bhushan 143888365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type) 143988365d17SBharat Bhushan { 144088365d17SBharat Bhushan int n; 144188365d17SBharat Bhushan 144288365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) 144388365d17SBharat Bhushan <= ARRAY_SIZE(hw_debug_points)); 144488365d17SBharat Bhushan 144588365d17SBharat Bhushan for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { 144688365d17SBharat Bhushan if (hw_debug_points[n].addr == addr && 144788365d17SBharat Bhushan hw_debug_points[n].type == type) { 144888365d17SBharat Bhushan return n; 144988365d17SBharat Bhushan } 145088365d17SBharat Bhushan } 145188365d17SBharat Bhushan 145288365d17SBharat Bhushan return -1; 145388365d17SBharat Bhushan } 145488365d17SBharat Bhushan 145588365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag) 145688365d17SBharat Bhushan { 145788365d17SBharat Bhushan int n; 145888365d17SBharat Bhushan 145988365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS); 146088365d17SBharat Bhushan if (n >= 0) { 146188365d17SBharat Bhushan *flag = BP_MEM_ACCESS; 146288365d17SBharat Bhushan return n; 146388365d17SBharat Bhushan } 146488365d17SBharat Bhushan 146588365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE); 146688365d17SBharat Bhushan if (n >= 0) { 146788365d17SBharat Bhushan *flag = BP_MEM_WRITE; 146888365d17SBharat Bhushan return n; 146988365d17SBharat Bhushan } 147088365d17SBharat Bhushan 147188365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ); 147288365d17SBharat Bhushan if (n >= 0) { 147388365d17SBharat Bhushan *flag = BP_MEM_READ; 147488365d17SBharat Bhushan return n; 147588365d17SBharat Bhushan } 147688365d17SBharat Bhushan 147788365d17SBharat Bhushan return -1; 147888365d17SBharat Bhushan } 147988365d17SBharat Bhushan 148088365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr, 148188365d17SBharat Bhushan target_ulong len, int type) 148288365d17SBharat Bhushan { 148388365d17SBharat Bhushan if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) { 148488365d17SBharat Bhushan return -ENOBUFS; 148588365d17SBharat Bhushan } 148688365d17SBharat Bhushan 148788365d17SBharat Bhushan hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr; 148888365d17SBharat Bhushan hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type; 148988365d17SBharat Bhushan 149088365d17SBharat Bhushan switch (type) { 149188365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 149288365d17SBharat Bhushan if (nb_hw_breakpoint >= max_hw_breakpoint) { 149388365d17SBharat Bhushan return -ENOBUFS; 149488365d17SBharat Bhushan } 149588365d17SBharat Bhushan 149688365d17SBharat Bhushan if (find_hw_breakpoint(addr, type) >= 0) { 149788365d17SBharat Bhushan return -EEXIST; 149888365d17SBharat Bhushan } 149988365d17SBharat Bhushan 150088365d17SBharat Bhushan nb_hw_breakpoint++; 150188365d17SBharat Bhushan break; 150288365d17SBharat Bhushan 150388365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 150488365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 150588365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 150688365d17SBharat Bhushan if (nb_hw_watchpoint >= max_hw_watchpoint) { 150788365d17SBharat Bhushan return -ENOBUFS; 150888365d17SBharat Bhushan } 150988365d17SBharat Bhushan 151088365d17SBharat Bhushan if (find_hw_breakpoint(addr, type) >= 0) { 151188365d17SBharat Bhushan return -EEXIST; 151288365d17SBharat Bhushan } 151388365d17SBharat Bhushan 151488365d17SBharat Bhushan nb_hw_watchpoint++; 151588365d17SBharat Bhushan break; 151688365d17SBharat Bhushan 151788365d17SBharat Bhushan default: 151888365d17SBharat Bhushan return -ENOSYS; 151988365d17SBharat Bhushan } 152088365d17SBharat Bhushan 152188365d17SBharat Bhushan return 0; 152288365d17SBharat Bhushan } 152388365d17SBharat Bhushan 152488365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr, 152588365d17SBharat Bhushan target_ulong len, int type) 152688365d17SBharat Bhushan { 152788365d17SBharat Bhushan int n; 152888365d17SBharat Bhushan 152988365d17SBharat Bhushan n = find_hw_breakpoint(addr, type); 153088365d17SBharat Bhushan if (n < 0) { 153188365d17SBharat Bhushan return -ENOENT; 153288365d17SBharat Bhushan } 153388365d17SBharat Bhushan 153488365d17SBharat Bhushan switch (type) { 153588365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 153688365d17SBharat Bhushan nb_hw_breakpoint--; 153788365d17SBharat Bhushan break; 153888365d17SBharat Bhushan 153988365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 154088365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 154188365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 154288365d17SBharat Bhushan nb_hw_watchpoint--; 154388365d17SBharat Bhushan break; 154488365d17SBharat Bhushan 154588365d17SBharat Bhushan default: 154688365d17SBharat Bhushan return -ENOSYS; 154788365d17SBharat Bhushan } 154888365d17SBharat Bhushan hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint]; 154988365d17SBharat Bhushan 155088365d17SBharat Bhushan return 0; 155188365d17SBharat Bhushan } 155288365d17SBharat Bhushan 155388365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void) 155488365d17SBharat Bhushan { 155588365d17SBharat Bhushan nb_hw_breakpoint = nb_hw_watchpoint = 0; 155688365d17SBharat Bhushan } 155788365d17SBharat Bhushan 15588a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) 15598a0548f9SBharat Bhushan { 156088365d17SBharat Bhushan int n; 156188365d17SBharat Bhushan 15628a0548f9SBharat Bhushan /* Software Breakpoint updates */ 15638a0548f9SBharat Bhushan if (kvm_sw_breakpoints_active(cs)) { 15648a0548f9SBharat Bhushan dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 15658a0548f9SBharat Bhushan } 156688365d17SBharat Bhushan 156788365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) 156888365d17SBharat Bhushan <= ARRAY_SIZE(hw_debug_points)); 156988365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp)); 157088365d17SBharat Bhushan 157188365d17SBharat Bhushan if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { 157288365d17SBharat Bhushan dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 157388365d17SBharat Bhushan memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp)); 157488365d17SBharat Bhushan for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { 157588365d17SBharat Bhushan switch (hw_debug_points[n].type) { 157688365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 157788365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT; 157888365d17SBharat Bhushan break; 157988365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 158088365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE; 158188365d17SBharat Bhushan break; 158288365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 158388365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ; 158488365d17SBharat Bhushan break; 158588365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 158688365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE | 158788365d17SBharat Bhushan KVMPPC_DEBUG_WATCH_READ; 158888365d17SBharat Bhushan break; 158988365d17SBharat Bhushan default: 159088365d17SBharat Bhushan cpu_abort(cs, "Unsupported breakpoint type\n"); 159188365d17SBharat Bhushan } 159288365d17SBharat Bhushan dbg->arch.bp[n].addr = hw_debug_points[n].addr; 159388365d17SBharat Bhushan } 159488365d17SBharat Bhushan } 15958a0548f9SBharat Bhushan } 15968a0548f9SBharat Bhushan 15978a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run) 15988a0548f9SBharat Bhushan { 15998a0548f9SBharat Bhushan CPUState *cs = CPU(cpu); 16008a0548f9SBharat Bhushan CPUPPCState *env = &cpu->env; 16018a0548f9SBharat Bhushan struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 16028a0548f9SBharat Bhushan int handle = 0; 160388365d17SBharat Bhushan int n; 160488365d17SBharat Bhushan int flag = 0; 16058a0548f9SBharat Bhushan 160688365d17SBharat Bhushan if (cs->singlestep_enabled) { 160788365d17SBharat Bhushan handle = 1; 160888365d17SBharat Bhushan } else if (arch_info->status) { 160988365d17SBharat Bhushan if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { 161088365d17SBharat Bhushan if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) { 161188365d17SBharat Bhushan n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW); 161288365d17SBharat Bhushan if (n >= 0) { 161388365d17SBharat Bhushan handle = 1; 161488365d17SBharat Bhushan } 161588365d17SBharat Bhushan } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ | 161688365d17SBharat Bhushan KVMPPC_DEBUG_WATCH_WRITE)) { 161788365d17SBharat Bhushan n = find_hw_watchpoint(arch_info->address, &flag); 161888365d17SBharat Bhushan if (n >= 0) { 161988365d17SBharat Bhushan handle = 1; 162088365d17SBharat Bhushan cs->watchpoint_hit = &hw_watchpoint; 162188365d17SBharat Bhushan hw_watchpoint.vaddr = hw_debug_points[n].addr; 162288365d17SBharat Bhushan hw_watchpoint.flags = flag; 162388365d17SBharat Bhushan } 162488365d17SBharat Bhushan } 162588365d17SBharat Bhushan } 162688365d17SBharat Bhushan } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) { 16278a0548f9SBharat Bhushan handle = 1; 16288a0548f9SBharat Bhushan } else { 16298a0548f9SBharat Bhushan /* QEMU is not able to handle debug exception, so inject 16308a0548f9SBharat Bhushan * program exception to guest; 16318a0548f9SBharat Bhushan * Yes program exception NOT debug exception !! 163288365d17SBharat Bhushan * When QEMU is using debug resources then debug exception must 163388365d17SBharat Bhushan * be always set. To achieve this we set MSR_DE and also set 163488365d17SBharat Bhushan * MSRP_DEP so guest cannot change MSR_DE. 163588365d17SBharat Bhushan * When emulating debug resource for guest we want guest 163688365d17SBharat Bhushan * to control MSR_DE (enable/disable debug interrupt on need). 163788365d17SBharat Bhushan * Supporting both configurations are NOT possible. 163888365d17SBharat Bhushan * So the result is that we cannot share debug resources 163988365d17SBharat Bhushan * between QEMU and Guest on BOOKE architecture. 164088365d17SBharat Bhushan * In the current design QEMU gets the priority over guest, 164188365d17SBharat Bhushan * this means that if QEMU is using debug resources then guest 164288365d17SBharat Bhushan * cannot use them; 16438a0548f9SBharat Bhushan * For software breakpoint QEMU uses a privileged instruction; 16448a0548f9SBharat Bhushan * So there cannot be any reason that we are here for guest 16458a0548f9SBharat Bhushan * set debug exception, only possibility is guest executed a 16468a0548f9SBharat Bhushan * privileged / illegal instruction and that's why we are 16478a0548f9SBharat Bhushan * injecting a program interrupt. 16488a0548f9SBharat Bhushan */ 16498a0548f9SBharat Bhushan 16508a0548f9SBharat Bhushan cpu_synchronize_state(cs); 16518a0548f9SBharat Bhushan /* env->nip is PC, so increment this by 4 to use 16528a0548f9SBharat Bhushan * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4. 16538a0548f9SBharat Bhushan */ 16548a0548f9SBharat Bhushan env->nip += 4; 16558a0548f9SBharat Bhushan cs->exception_index = POWERPC_EXCP_PROGRAM; 16568a0548f9SBharat Bhushan env->error_code = POWERPC_EXCP_INVAL; 16578a0548f9SBharat Bhushan ppc_cpu_do_interrupt(cs); 16588a0548f9SBharat Bhushan } 16598a0548f9SBharat Bhushan 16608a0548f9SBharat Bhushan return handle; 16618a0548f9SBharat Bhushan } 16628a0548f9SBharat Bhushan 166320d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1664d76d1650Saurel32 { 166520d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 166620d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1667bb4ea393SJan Kiszka int ret; 1668d76d1650Saurel32 16694b8523eeSJan Kiszka qemu_mutex_lock_iothread(); 16704b8523eeSJan Kiszka 1671d76d1650Saurel32 switch (run->exit_reason) { 1672d76d1650Saurel32 case KVM_EXIT_DCR: 1673d76d1650Saurel32 if (run->dcr.is_write) { 1674da56ff91SPeter Maydell DPRINTF("handle dcr write\n"); 1675d76d1650Saurel32 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data); 1676d76d1650Saurel32 } else { 1677da56ff91SPeter Maydell DPRINTF("handle dcr read\n"); 1678d76d1650Saurel32 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data); 1679d76d1650Saurel32 } 1680d76d1650Saurel32 break; 1681d76d1650Saurel32 case KVM_EXIT_HLT: 1682da56ff91SPeter Maydell DPRINTF("handle halt\n"); 1683259186a7SAndreas Färber ret = kvmppc_handle_halt(cpu); 1684d76d1650Saurel32 break; 1685c6304a4aSDavid Gibson #if defined(TARGET_PPC64) 1686f61b4bedSAlexander Graf case KVM_EXIT_PAPR_HCALL: 1687da56ff91SPeter Maydell DPRINTF("handle PAPR hypercall\n"); 168820d695a9SAndreas Färber run->papr_hcall.ret = spapr_hypercall(cpu, 1689aa100fa4SAndreas Färber run->papr_hcall.nr, 1690f61b4bedSAlexander Graf run->papr_hcall.args); 169178e8fde2SDavid Gibson ret = 0; 1692f61b4bedSAlexander Graf break; 1693f61b4bedSAlexander Graf #endif 16945b95b8b9SAlexander Graf case KVM_EXIT_EPR: 1695da56ff91SPeter Maydell DPRINTF("handle epr\n"); 1696933b19eaSAlexander Graf run->epr.epr = ldl_phys(cs->as, env->mpic_iack); 16975b95b8b9SAlexander Graf ret = 0; 16985b95b8b9SAlexander Graf break; 169931f2cb8fSBharat Bhushan case KVM_EXIT_WATCHDOG: 1700da56ff91SPeter Maydell DPRINTF("handle watchdog expiry\n"); 170131f2cb8fSBharat Bhushan watchdog_perform_action(); 170231f2cb8fSBharat Bhushan ret = 0; 170331f2cb8fSBharat Bhushan break; 170431f2cb8fSBharat Bhushan 17058a0548f9SBharat Bhushan case KVM_EXIT_DEBUG: 17068a0548f9SBharat Bhushan DPRINTF("handle debug exception\n"); 17078a0548f9SBharat Bhushan if (kvm_handle_debug(cpu, run)) { 17088a0548f9SBharat Bhushan ret = EXCP_DEBUG; 17098a0548f9SBharat Bhushan break; 17108a0548f9SBharat Bhushan } 17118a0548f9SBharat Bhushan /* re-enter, this exception was guest-internal */ 17128a0548f9SBharat Bhushan ret = 0; 17138a0548f9SBharat Bhushan break; 17148a0548f9SBharat Bhushan 171573aaec4aSJan Kiszka default: 171673aaec4aSJan Kiszka fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 171773aaec4aSJan Kiszka ret = -1; 171873aaec4aSJan Kiszka break; 1719d76d1650Saurel32 } 1720d76d1650Saurel32 17214b8523eeSJan Kiszka qemu_mutex_unlock_iothread(); 1722d76d1650Saurel32 return ret; 1723d76d1650Saurel32 } 1724d76d1650Saurel32 172531f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 172631f2cb8fSBharat Bhushan { 172731f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 172831f2cb8fSBharat Bhushan uint32_t bits = tsr_bits; 172931f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 173031f2cb8fSBharat Bhushan .id = KVM_REG_PPC_OR_TSR, 173131f2cb8fSBharat Bhushan .addr = (uintptr_t) &bits, 173231f2cb8fSBharat Bhushan }; 173331f2cb8fSBharat Bhushan 173431f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 173531f2cb8fSBharat Bhushan } 173631f2cb8fSBharat Bhushan 173731f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 173831f2cb8fSBharat Bhushan { 173931f2cb8fSBharat Bhushan 174031f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 174131f2cb8fSBharat Bhushan uint32_t bits = tsr_bits; 174231f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 174331f2cb8fSBharat Bhushan .id = KVM_REG_PPC_CLEAR_TSR, 174431f2cb8fSBharat Bhushan .addr = (uintptr_t) &bits, 174531f2cb8fSBharat Bhushan }; 174631f2cb8fSBharat Bhushan 174731f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 174831f2cb8fSBharat Bhushan } 174931f2cb8fSBharat Bhushan 175031f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu) 175131f2cb8fSBharat Bhushan { 175231f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 175331f2cb8fSBharat Bhushan CPUPPCState *env = &cpu->env; 175431f2cb8fSBharat Bhushan uint32_t tcr = env->spr[SPR_BOOKE_TCR]; 175531f2cb8fSBharat Bhushan 175631f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 175731f2cb8fSBharat Bhushan .id = KVM_REG_PPC_TCR, 175831f2cb8fSBharat Bhushan .addr = (uintptr_t) &tcr, 175931f2cb8fSBharat Bhushan }; 176031f2cb8fSBharat Bhushan 176131f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 176231f2cb8fSBharat Bhushan } 176331f2cb8fSBharat Bhushan 176431f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) 176531f2cb8fSBharat Bhushan { 176631f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 176731f2cb8fSBharat Bhushan int ret; 176831f2cb8fSBharat Bhushan 176931f2cb8fSBharat Bhushan if (!kvm_enabled()) { 177031f2cb8fSBharat Bhushan return -1; 177131f2cb8fSBharat Bhushan } 177231f2cb8fSBharat Bhushan 177331f2cb8fSBharat Bhushan if (!cap_ppc_watchdog) { 177431f2cb8fSBharat Bhushan printf("warning: KVM does not support watchdog"); 177531f2cb8fSBharat Bhushan return -1; 177631f2cb8fSBharat Bhushan } 177731f2cb8fSBharat Bhushan 177848add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0); 177931f2cb8fSBharat Bhushan if (ret < 0) { 178031f2cb8fSBharat Bhushan fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n", 178131f2cb8fSBharat Bhushan __func__, strerror(-ret)); 178231f2cb8fSBharat Bhushan return ret; 178331f2cb8fSBharat Bhushan } 178431f2cb8fSBharat Bhushan 178531f2cb8fSBharat Bhushan return ret; 178631f2cb8fSBharat Bhushan } 178731f2cb8fSBharat Bhushan 1788dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len) 1789dc333cd6SAlexander Graf { 1790dc333cd6SAlexander Graf FILE *f; 1791dc333cd6SAlexander Graf int ret = -1; 1792dc333cd6SAlexander Graf int field_len = strlen(field); 1793dc333cd6SAlexander Graf char line[512]; 1794dc333cd6SAlexander Graf 1795dc333cd6SAlexander Graf f = fopen("/proc/cpuinfo", "r"); 1796dc333cd6SAlexander Graf if (!f) { 1797dc333cd6SAlexander Graf return -1; 1798dc333cd6SAlexander Graf } 1799dc333cd6SAlexander Graf 1800dc333cd6SAlexander Graf do { 1801dc333cd6SAlexander Graf if (!fgets(line, sizeof(line), f)) { 1802dc333cd6SAlexander Graf break; 1803dc333cd6SAlexander Graf } 1804dc333cd6SAlexander Graf if (!strncmp(line, field, field_len)) { 1805ae215068SJim Meyering pstrcpy(value, len, line); 1806dc333cd6SAlexander Graf ret = 0; 1807dc333cd6SAlexander Graf break; 1808dc333cd6SAlexander Graf } 1809dc333cd6SAlexander Graf } while(*line); 1810dc333cd6SAlexander Graf 1811dc333cd6SAlexander Graf fclose(f); 1812dc333cd6SAlexander Graf 1813dc333cd6SAlexander Graf return ret; 1814dc333cd6SAlexander Graf } 1815dc333cd6SAlexander Graf 1816dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void) 1817dc333cd6SAlexander Graf { 1818dc333cd6SAlexander Graf char line[512]; 1819dc333cd6SAlexander Graf char *ns; 182073bcb24dSRutuja Shah uint32_t retval = NANOSECONDS_PER_SECOND; 1821dc333cd6SAlexander Graf 1822dc333cd6SAlexander Graf if (read_cpuinfo("timebase", line, sizeof(line))) { 1823dc333cd6SAlexander Graf return retval; 1824dc333cd6SAlexander Graf } 1825dc333cd6SAlexander Graf 1826dc333cd6SAlexander Graf if (!(ns = strchr(line, ':'))) { 1827dc333cd6SAlexander Graf return retval; 1828dc333cd6SAlexander Graf } 1829dc333cd6SAlexander Graf 1830dc333cd6SAlexander Graf ns++; 1831dc333cd6SAlexander Graf 1832f9b8e7f6SShraddha Barke return atoi(ns); 1833ef951443SNikunj A Dadhania } 1834ef951443SNikunj A Dadhania 1835ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value) 1836ef951443SNikunj A Dadhania { 1837ef951443SNikunj A Dadhania return g_file_get_contents("/proc/device-tree/system-id", value, NULL, 1838ef951443SNikunj A Dadhania NULL); 1839ef951443SNikunj A Dadhania } 1840ef951443SNikunj A Dadhania 1841ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value) 1842ef951443SNikunj A Dadhania { 1843ef951443SNikunj A Dadhania return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL); 1844dc333cd6SAlexander Graf } 18454513d923SGleb Natapov 1846eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */ 1847eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len) 1848eadaada1SAlexander Graf { 1849eadaada1SAlexander Graf struct dirent *dirp; 1850eadaada1SAlexander Graf DIR *dp; 1851eadaada1SAlexander Graf 1852eadaada1SAlexander Graf if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) { 1853eadaada1SAlexander Graf printf("Can't open directory " PROC_DEVTREE_CPU "\n"); 1854eadaada1SAlexander Graf return -1; 1855eadaada1SAlexander Graf } 1856eadaada1SAlexander Graf 1857eadaada1SAlexander Graf buf[0] = '\0'; 1858eadaada1SAlexander Graf while ((dirp = readdir(dp)) != NULL) { 1859eadaada1SAlexander Graf FILE *f; 1860eadaada1SAlexander Graf snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU, 1861eadaada1SAlexander Graf dirp->d_name); 1862eadaada1SAlexander Graf f = fopen(buf, "r"); 1863eadaada1SAlexander Graf if (f) { 1864eadaada1SAlexander Graf snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name); 1865eadaada1SAlexander Graf fclose(f); 1866eadaada1SAlexander Graf break; 1867eadaada1SAlexander Graf } 1868eadaada1SAlexander Graf buf[0] = '\0'; 1869eadaada1SAlexander Graf } 1870eadaada1SAlexander Graf closedir(dp); 1871eadaada1SAlexander Graf if (buf[0] == '\0') { 1872eadaada1SAlexander Graf printf("Unknown host!\n"); 1873eadaada1SAlexander Graf return -1; 1874eadaada1SAlexander Graf } 1875eadaada1SAlexander Graf 1876eadaada1SAlexander Graf return 0; 1877eadaada1SAlexander Graf } 1878eadaada1SAlexander Graf 18797d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename) 1880eadaada1SAlexander Graf { 18819bc884b7SDavid Gibson union { 18829bc884b7SDavid Gibson uint32_t v32; 18839bc884b7SDavid Gibson uint64_t v64; 18849bc884b7SDavid Gibson } u; 1885eadaada1SAlexander Graf FILE *f; 1886eadaada1SAlexander Graf int len; 1887eadaada1SAlexander Graf 18887d94a30bSSukadev Bhattiprolu f = fopen(filename, "rb"); 1889eadaada1SAlexander Graf if (!f) { 1890eadaada1SAlexander Graf return -1; 1891eadaada1SAlexander Graf } 1892eadaada1SAlexander Graf 18939bc884b7SDavid Gibson len = fread(&u, 1, sizeof(u), f); 1894eadaada1SAlexander Graf fclose(f); 1895eadaada1SAlexander Graf switch (len) { 18969bc884b7SDavid Gibson case 4: 18979bc884b7SDavid Gibson /* property is a 32-bit quantity */ 18989bc884b7SDavid Gibson return be32_to_cpu(u.v32); 18999bc884b7SDavid Gibson case 8: 19009bc884b7SDavid Gibson return be64_to_cpu(u.v64); 1901eadaada1SAlexander Graf } 1902eadaada1SAlexander Graf 1903eadaada1SAlexander Graf return 0; 1904eadaada1SAlexander Graf } 1905eadaada1SAlexander Graf 19067d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single 19077d94a30bSSukadev Bhattiprolu * integer (32-bit or 64-bit). Returns 0 if anything goes wrong 19087d94a30bSSukadev Bhattiprolu * (can't find or open the property, or doesn't understand the 19097d94a30bSSukadev Bhattiprolu * format) */ 19107d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname) 19117d94a30bSSukadev Bhattiprolu { 19127d94a30bSSukadev Bhattiprolu char buf[PATH_MAX], *tmp; 19137d94a30bSSukadev Bhattiprolu uint64_t val; 19147d94a30bSSukadev Bhattiprolu 19157d94a30bSSukadev Bhattiprolu if (kvmppc_find_cpu_dt(buf, sizeof(buf))) { 19167d94a30bSSukadev Bhattiprolu return -1; 19177d94a30bSSukadev Bhattiprolu } 19187d94a30bSSukadev Bhattiprolu 19197d94a30bSSukadev Bhattiprolu tmp = g_strdup_printf("%s/%s", buf, propname); 19207d94a30bSSukadev Bhattiprolu val = kvmppc_read_int_dt(tmp); 19217d94a30bSSukadev Bhattiprolu g_free(tmp); 19227d94a30bSSukadev Bhattiprolu 19237d94a30bSSukadev Bhattiprolu return val; 19247d94a30bSSukadev Bhattiprolu } 19257d94a30bSSukadev Bhattiprolu 19269bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void) 19279bc884b7SDavid Gibson { 19289bc884b7SDavid Gibson return kvmppc_read_int_cpu_dt("clock-frequency"); 19299bc884b7SDavid Gibson } 19309bc884b7SDavid Gibson 19316659394fSDavid Gibson uint32_t kvmppc_get_vmx(void) 19326659394fSDavid Gibson { 19336659394fSDavid Gibson return kvmppc_read_int_cpu_dt("ibm,vmx"); 19346659394fSDavid Gibson } 19356659394fSDavid Gibson 19366659394fSDavid Gibson uint32_t kvmppc_get_dfp(void) 19376659394fSDavid Gibson { 19386659394fSDavid Gibson return kvmppc_read_int_cpu_dt("ibm,dfp"); 19396659394fSDavid Gibson } 19406659394fSDavid Gibson 19411a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo) 194245024f09SAlexander Graf { 1943a60f24b5SAndreas Färber PowerPCCPU *cpu = ppc_env_get_cpu(env); 1944a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 194545024f09SAlexander Graf 19466fd33a75SAlexander Graf if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && 19471a61a9aeSStuart Yoder !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) { 19481a61a9aeSStuart Yoder return 0; 19491a61a9aeSStuart Yoder } 195045024f09SAlexander Graf 19511a61a9aeSStuart Yoder return 1; 19521a61a9aeSStuart Yoder } 19531a61a9aeSStuart Yoder 19541a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env) 19551a61a9aeSStuart Yoder { 19561a61a9aeSStuart Yoder struct kvm_ppc_pvinfo pvinfo; 19571a61a9aeSStuart Yoder 19581a61a9aeSStuart Yoder if (!kvmppc_get_pvinfo(env, &pvinfo) && 19591a61a9aeSStuart Yoder (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) { 19601a61a9aeSStuart Yoder return 1; 19611a61a9aeSStuart Yoder } 19621a61a9aeSStuart Yoder 19631a61a9aeSStuart Yoder return 0; 19641a61a9aeSStuart Yoder } 19651a61a9aeSStuart Yoder 19661a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) 19671a61a9aeSStuart Yoder { 19681a61a9aeSStuart Yoder uint32_t *hc = (uint32_t*)buf; 19691a61a9aeSStuart Yoder struct kvm_ppc_pvinfo pvinfo; 19701a61a9aeSStuart Yoder 19711a61a9aeSStuart Yoder if (!kvmppc_get_pvinfo(env, &pvinfo)) { 19721a61a9aeSStuart Yoder memcpy(buf, pvinfo.hcall, buf_len); 197345024f09SAlexander Graf return 0; 197445024f09SAlexander Graf } 197545024f09SAlexander Graf 197645024f09SAlexander Graf /* 1977d13fc32eSAlexander Graf * Fallback to always fail hypercalls regardless of endianness: 197845024f09SAlexander Graf * 1979d13fc32eSAlexander Graf * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian) 198045024f09SAlexander Graf * li r3, -1 1981d13fc32eSAlexander Graf * b .+8 (becomes nop in wrong endian) 1982d13fc32eSAlexander Graf * bswap32(li r3, -1) 198345024f09SAlexander Graf */ 198445024f09SAlexander Graf 1985d13fc32eSAlexander Graf hc[0] = cpu_to_be32(0x08000048); 1986d13fc32eSAlexander Graf hc[1] = cpu_to_be32(0x3860ffff); 1987d13fc32eSAlexander Graf hc[2] = cpu_to_be32(0x48000008); 1988d13fc32eSAlexander Graf hc[3] = cpu_to_be32(bswap32(0x3860ffff)); 198945024f09SAlexander Graf 19900ddbd053SAlexey Kardashevskiy return 1; 199145024f09SAlexander Graf } 199245024f09SAlexander Graf 1993026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall) 1994026bfd89SDavid Gibson { 1995026bfd89SDavid Gibson return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1); 1996026bfd89SDavid Gibson } 1997026bfd89SDavid Gibson 1998026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void) 1999026bfd89SDavid Gibson { 2000026bfd89SDavid Gibson /* 2001026bfd89SDavid Gibson * FIXME: it would be nice if we could detect the cases where 2002026bfd89SDavid Gibson * we're using a device which requires the in kernel 2003026bfd89SDavid Gibson * implementation of these hcalls, but the kernel lacks them and 2004026bfd89SDavid Gibson * produce a warning. 2005026bfd89SDavid Gibson */ 2006026bfd89SDavid Gibson kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD); 2007026bfd89SDavid Gibson kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE); 2008026bfd89SDavid Gibson } 2009026bfd89SDavid Gibson 2010ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void) 2011ef9971ddSAlexey Kardashevskiy { 2012ef9971ddSAlexey Kardashevskiy kvmppc_enable_hcall(kvm_state, H_SET_MODE); 2013ef9971ddSAlexey Kardashevskiy } 2014ef9971ddSAlexey Kardashevskiy 20155145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void) 20165145ad4fSNathan Whitehorn { 20175145ad4fSNathan Whitehorn kvmppc_enable_hcall(kvm_state, H_CLEAR_REF); 20185145ad4fSNathan Whitehorn kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD); 20195145ad4fSNathan Whitehorn } 20205145ad4fSNathan Whitehorn 20211bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu) 2022f61b4bedSAlexander Graf { 20231bc22652SAndreas Färber CPUState *cs = CPU(cpu); 2024f61b4bedSAlexander Graf int ret; 2025f61b4bedSAlexander Graf 202648add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0); 2027f61b4bedSAlexander Graf if (ret) { 2028072ed5f2SThomas Huth error_report("This vCPU type or KVM version does not support PAPR"); 2029072ed5f2SThomas Huth exit(1); 2030f61b4bedSAlexander Graf } 20319b00ea49SDavid Gibson 20329b00ea49SDavid Gibson /* Update the capability flag so we sync the right information 20339b00ea49SDavid Gibson * with kvm */ 20349b00ea49SDavid Gibson cap_papr = 1; 2035f1af19d7SDavid Gibson } 2036f61b4bedSAlexander Graf 2037d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) 20386db5bb0fSAlexey Kardashevskiy { 2039d6e166c0SDavid Gibson return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr); 20406db5bb0fSAlexey Kardashevskiy } 20416db5bb0fSAlexey Kardashevskiy 20425b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) 20435b95b8b9SAlexander Graf { 20445b95b8b9SAlexander Graf CPUState *cs = CPU(cpu); 20455b95b8b9SAlexander Graf int ret; 20465b95b8b9SAlexander Graf 204748add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy); 20485b95b8b9SAlexander Graf if (ret && mpic_proxy) { 2049072ed5f2SThomas Huth error_report("This KVM version does not support EPR"); 2050072ed5f2SThomas Huth exit(1); 20515b95b8b9SAlexander Graf } 20525b95b8b9SAlexander Graf } 20535b95b8b9SAlexander Graf 2054e97c3636SDavid Gibson int kvmppc_smt_threads(void) 2055e97c3636SDavid Gibson { 2056e97c3636SDavid Gibson return cap_ppc_smt ? cap_ppc_smt : 1; 2057e97c3636SDavid Gibson } 2058e97c3636SDavid Gibson 20597f763a5dSDavid Gibson #ifdef TARGET_PPC64 2060658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma) 2061354ac20aSDavid Gibson { 2062354ac20aSDavid Gibson off_t size; 2063354ac20aSDavid Gibson int fd; 2064354ac20aSDavid Gibson struct kvm_allocate_rma ret; 2065354ac20aSDavid Gibson 2066354ac20aSDavid Gibson /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported 2067354ac20aSDavid Gibson * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but 2068354ac20aSDavid Gibson * not necessary on this hardware 2069354ac20aSDavid Gibson * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware 2070354ac20aSDavid Gibson * 2071354ac20aSDavid Gibson * FIXME: We should allow the user to force contiguous RMA 2072354ac20aSDavid Gibson * allocation in the cap_ppc_rma==1 case. 2073354ac20aSDavid Gibson */ 2074354ac20aSDavid Gibson if (cap_ppc_rma < 2) { 2075354ac20aSDavid Gibson return 0; 2076354ac20aSDavid Gibson } 2077354ac20aSDavid Gibson 2078354ac20aSDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret); 2079354ac20aSDavid Gibson if (fd < 0) { 2080354ac20aSDavid Gibson fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n", 2081354ac20aSDavid Gibson strerror(errno)); 2082354ac20aSDavid Gibson return -1; 2083354ac20aSDavid Gibson } 2084354ac20aSDavid Gibson 2085354ac20aSDavid Gibson size = MIN(ret.rma_size, 256ul << 20); 2086354ac20aSDavid Gibson 2087658fa66bSAlexey Kardashevskiy *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 2088658fa66bSAlexey Kardashevskiy if (*rma == MAP_FAILED) { 2089354ac20aSDavid Gibson fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno)); 2090354ac20aSDavid Gibson return -1; 2091354ac20aSDavid Gibson }; 2092354ac20aSDavid Gibson 2093354ac20aSDavid Gibson return size; 2094354ac20aSDavid Gibson } 2095354ac20aSDavid Gibson 20967f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) 20977f763a5dSDavid Gibson { 2098f36951c1SDavid Gibson struct kvm_ppc_smmu_info info; 2099f36951c1SDavid Gibson long rampagesize, best_page_shift; 2100f36951c1SDavid Gibson int i; 2101f36951c1SDavid Gibson 21027f763a5dSDavid Gibson if (cap_ppc_rma >= 2) { 21037f763a5dSDavid Gibson return current_size; 21047f763a5dSDavid Gibson } 2105f36951c1SDavid Gibson 2106f36951c1SDavid Gibson /* Find the largest hardware supported page size that's less than 2107f36951c1SDavid Gibson * or equal to the (logical) backing page size of guest RAM */ 2108182735efSAndreas Färber kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info); 2109*9c607668SAlexey Kardashevskiy rampagesize = qemu_getrampagesize(); 2110f36951c1SDavid Gibson best_page_shift = 0; 2111f36951c1SDavid Gibson 2112f36951c1SDavid Gibson for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { 2113f36951c1SDavid Gibson struct kvm_ppc_one_seg_page_size *sps = &info.sps[i]; 2114f36951c1SDavid Gibson 2115f36951c1SDavid Gibson if (!sps->page_shift) { 2116f36951c1SDavid Gibson continue; 2117f36951c1SDavid Gibson } 2118f36951c1SDavid Gibson 2119f36951c1SDavid Gibson if ((sps->page_shift > best_page_shift) 2120f36951c1SDavid Gibson && ((1UL << sps->page_shift) <= rampagesize)) { 2121f36951c1SDavid Gibson best_page_shift = sps->page_shift; 2122f36951c1SDavid Gibson } 2123f36951c1SDavid Gibson } 2124f36951c1SDavid Gibson 21257f763a5dSDavid Gibson return MIN(current_size, 2126f36951c1SDavid Gibson 1ULL << (best_page_shift + hash_shift - 7)); 21277f763a5dSDavid Gibson } 21287f763a5dSDavid Gibson #endif 21297f763a5dSDavid Gibson 2130da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void) 2131da95324eSAlexey Kardashevskiy { 2132da95324eSAlexey Kardashevskiy return cap_spapr_multitce; 2133da95324eSAlexey Kardashevskiy } 2134da95324eSAlexey Kardashevskiy 21359bb62a07SAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd, 21366a81dd17SDavid Gibson bool need_vfio) 21370f5cb298SDavid Gibson { 21380f5cb298SDavid Gibson struct kvm_create_spapr_tce args = { 21390f5cb298SDavid Gibson .liobn = liobn, 21400f5cb298SDavid Gibson .window_size = window_size, 21410f5cb298SDavid Gibson }; 21420f5cb298SDavid Gibson long len; 21430f5cb298SDavid Gibson int fd; 21440f5cb298SDavid Gibson void *table; 21450f5cb298SDavid Gibson 2146b5aec396SDavid Gibson /* Must set fd to -1 so we don't try to munmap when called for 2147b5aec396SDavid Gibson * destroying the table, which the upper layers -will- do 2148b5aec396SDavid Gibson */ 2149b5aec396SDavid Gibson *pfd = -1; 21506a81dd17SDavid Gibson if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) { 21510f5cb298SDavid Gibson return NULL; 21520f5cb298SDavid Gibson } 21530f5cb298SDavid Gibson 21540f5cb298SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args); 21550f5cb298SDavid Gibson if (fd < 0) { 2156b5aec396SDavid Gibson fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n", 2157b5aec396SDavid Gibson liobn); 21580f5cb298SDavid Gibson return NULL; 21590f5cb298SDavid Gibson } 21600f5cb298SDavid Gibson 2161a83000f5SAnthony Liguori len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t); 21620f5cb298SDavid Gibson /* FIXME: round this up to page size */ 21630f5cb298SDavid Gibson 216474b41e56SDavid Gibson table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 21650f5cb298SDavid Gibson if (table == MAP_FAILED) { 2166b5aec396SDavid Gibson fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n", 2167b5aec396SDavid Gibson liobn); 21680f5cb298SDavid Gibson close(fd); 21690f5cb298SDavid Gibson return NULL; 21700f5cb298SDavid Gibson } 21710f5cb298SDavid Gibson 21720f5cb298SDavid Gibson *pfd = fd; 21730f5cb298SDavid Gibson return table; 21740f5cb298SDavid Gibson } 21750f5cb298SDavid Gibson 2176523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table) 21770f5cb298SDavid Gibson { 21780f5cb298SDavid Gibson long len; 21790f5cb298SDavid Gibson 21800f5cb298SDavid Gibson if (fd < 0) { 21810f5cb298SDavid Gibson return -1; 21820f5cb298SDavid Gibson } 21830f5cb298SDavid Gibson 2184523e7b8aSAlexey Kardashevskiy len = nb_table * sizeof(uint64_t); 21850f5cb298SDavid Gibson if ((munmap(table, len) < 0) || 21860f5cb298SDavid Gibson (close(fd) < 0)) { 2187b5aec396SDavid Gibson fprintf(stderr, "KVM: Unexpected error removing TCE table: %s", 2188b5aec396SDavid Gibson strerror(errno)); 21890f5cb298SDavid Gibson /* Leak the table */ 21900f5cb298SDavid Gibson } 21910f5cb298SDavid Gibson 21920f5cb298SDavid Gibson return 0; 21930f5cb298SDavid Gibson } 21940f5cb298SDavid Gibson 21957f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint) 21967f763a5dSDavid Gibson { 21977f763a5dSDavid Gibson uint32_t shift = shift_hint; 21987f763a5dSDavid Gibson 2199ace9a2cbSDavid Gibson if (!kvm_enabled()) { 2200ace9a2cbSDavid Gibson /* Full emulation, tell caller to allocate htab itself */ 2201ace9a2cbSDavid Gibson return 0; 2202ace9a2cbSDavid Gibson } 2203ace9a2cbSDavid Gibson if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) { 22047f763a5dSDavid Gibson int ret; 22057f763a5dSDavid Gibson ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift); 2206ace9a2cbSDavid Gibson if (ret == -ENOTTY) { 2207ace9a2cbSDavid Gibson /* At least some versions of PR KVM advertise the 2208ace9a2cbSDavid Gibson * capability, but don't implement the ioctl(). Oops. 2209ace9a2cbSDavid Gibson * Return 0 so that we allocate the htab in qemu, as is 2210ace9a2cbSDavid Gibson * correct for PR. */ 2211ace9a2cbSDavid Gibson return 0; 2212ace9a2cbSDavid Gibson } else if (ret < 0) { 22137f763a5dSDavid Gibson return ret; 22147f763a5dSDavid Gibson } 22157f763a5dSDavid Gibson return shift; 22167f763a5dSDavid Gibson } 22177f763a5dSDavid Gibson 2218ace9a2cbSDavid Gibson /* We have a kernel that predates the htab reset calls. For PR 2219ace9a2cbSDavid Gibson * KVM, we need to allocate the htab ourselves, for an HV KVM of 222096c9cff0SThomas Huth * this era, it has allocated a 16MB fixed size hash table already. */ 222196c9cff0SThomas Huth if (kvmppc_is_pr(kvm_state)) { 2222ace9a2cbSDavid Gibson /* PR - tell caller to allocate htab */ 22237f763a5dSDavid Gibson return 0; 2224ace9a2cbSDavid Gibson } else { 2225ace9a2cbSDavid Gibson /* HV - assume 16MB kernel allocated htab */ 2226ace9a2cbSDavid Gibson return 24; 2227ace9a2cbSDavid Gibson } 22287f763a5dSDavid Gibson } 22297f763a5dSDavid Gibson 2230a1e98583SDavid Gibson static inline uint32_t mfpvr(void) 2231a1e98583SDavid Gibson { 2232a1e98583SDavid Gibson uint32_t pvr; 2233a1e98583SDavid Gibson 2234a1e98583SDavid Gibson asm ("mfpvr %0" 2235a1e98583SDavid Gibson : "=r"(pvr)); 2236a1e98583SDavid Gibson return pvr; 2237a1e98583SDavid Gibson } 2238a1e98583SDavid Gibson 2239a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on) 2240a7342588SDavid Gibson { 2241a7342588SDavid Gibson if (on) { 2242a7342588SDavid Gibson *word |= flags; 2243a7342588SDavid Gibson } else { 2244a7342588SDavid Gibson *word &= ~flags; 2245a7342588SDavid Gibson } 2246a7342588SDavid Gibson } 2247a7342588SDavid Gibson 22482985b86bSAndreas Färber static void kvmppc_host_cpu_initfn(Object *obj) 2249a1e98583SDavid Gibson { 22502985b86bSAndreas Färber assert(kvm_enabled()); 22512985b86bSAndreas Färber } 22522985b86bSAndreas Färber 22532985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) 22542985b86bSAndreas Färber { 22554c315c27SMarkus Armbruster DeviceClass *dc = DEVICE_CLASS(oc); 22562985b86bSAndreas Färber PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); 2257a7342588SDavid Gibson uint32_t vmx = kvmppc_get_vmx(); 2258a7342588SDavid Gibson uint32_t dfp = kvmppc_get_dfp(); 22590cbad81fSDavid Gibson uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size"); 22600cbad81fSDavid Gibson uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size"); 2261a1e98583SDavid Gibson 2262cfe34f44SAndreas Färber /* Now fix up the class with information we can query from the host */ 22633bc9ccc0SAlexey Kardashevskiy pcc->pvr = mfpvr(); 2264a7342588SDavid Gibson 226570bca53fSAlexander Graf if (vmx != -1) { 226670bca53fSAlexander Graf /* Only override when we know what the host supports */ 2267cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0); 2268cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1); 226970bca53fSAlexander Graf } 227070bca53fSAlexander Graf if (dfp != -1) { 227170bca53fSAlexander Graf /* Only override when we know what the host supports */ 2272cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp); 227370bca53fSAlexander Graf } 22740cbad81fSDavid Gibson 22750cbad81fSDavid Gibson if (dcache_size != -1) { 22760cbad81fSDavid Gibson pcc->l1_dcache_size = dcache_size; 22770cbad81fSDavid Gibson } 22780cbad81fSDavid Gibson 22790cbad81fSDavid Gibson if (icache_size != -1) { 22800cbad81fSDavid Gibson pcc->l1_icache_size = icache_size; 22810cbad81fSDavid Gibson } 22824c315c27SMarkus Armbruster 22834c315c27SMarkus Armbruster /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */ 22844c315c27SMarkus Armbruster dc->cannot_destroy_with_object_finalize_yet = true; 2285a1e98583SDavid Gibson } 2286a1e98583SDavid Gibson 22873b961124SStuart Yoder bool kvmppc_has_cap_epr(void) 22883b961124SStuart Yoder { 22893b961124SStuart Yoder return cap_epr; 22903b961124SStuart Yoder } 22913b961124SStuart Yoder 22927c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void) 22937c43bca0SAneesh Kumar K.V { 22947c43bca0SAneesh Kumar K.V return cap_htab_fd; 22957c43bca0SAneesh Kumar K.V } 22967c43bca0SAneesh Kumar K.V 229787a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void) 229887a91de6SAlexander Graf { 229987a91de6SAlexander Graf return cap_fixup_hcalls; 230087a91de6SAlexander Graf } 230187a91de6SAlexander Graf 2302bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void) 2303bac3bf28SThomas Huth { 2304bac3bf28SThomas Huth return cap_htm; 2305bac3bf28SThomas Huth } 2306bac3bf28SThomas Huth 23075b79b1caSAlexey Kardashevskiy static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) 23085b79b1caSAlexey Kardashevskiy { 23095b79b1caSAlexey Kardashevskiy ObjectClass *oc = OBJECT_CLASS(pcc); 23105b79b1caSAlexey Kardashevskiy 23115b79b1caSAlexey Kardashevskiy while (oc && !object_class_is_abstract(oc)) { 23125b79b1caSAlexey Kardashevskiy oc = object_class_get_parent(oc); 23135b79b1caSAlexey Kardashevskiy } 23145b79b1caSAlexey Kardashevskiy assert(oc); 23155b79b1caSAlexey Kardashevskiy 23165b79b1caSAlexey Kardashevskiy return POWERPC_CPU_CLASS(oc); 23175b79b1caSAlexey Kardashevskiy } 23185b79b1caSAlexey Kardashevskiy 231952b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) 232052b2519cSThomas Huth { 232152b2519cSThomas Huth uint32_t host_pvr = mfpvr(); 232252b2519cSThomas Huth PowerPCCPUClass *pvr_pcc; 232352b2519cSThomas Huth 232452b2519cSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr(host_pvr); 232552b2519cSThomas Huth if (pvr_pcc == NULL) { 232652b2519cSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr); 232752b2519cSThomas Huth } 232852b2519cSThomas Huth 232952b2519cSThomas Huth return pvr_pcc; 233052b2519cSThomas Huth } 233152b2519cSThomas Huth 23325ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void) 23335ba4576bSAndreas Färber { 23345ba4576bSAndreas Färber TypeInfo type_info = { 23355ba4576bSAndreas Färber .name = TYPE_HOST_POWERPC_CPU, 23365ba4576bSAndreas Färber .instance_init = kvmppc_host_cpu_initfn, 23375ba4576bSAndreas Färber .class_init = kvmppc_host_cpu_class_init, 23385ba4576bSAndreas Färber }; 23395ba4576bSAndreas Färber PowerPCCPUClass *pvr_pcc; 23405b79b1caSAlexey Kardashevskiy DeviceClass *dc; 2341715d4b96SThomas Huth int i; 23425ba4576bSAndreas Färber 234352b2519cSThomas Huth pvr_pcc = kvm_ppc_get_host_cpu_class(); 23443bc9ccc0SAlexey Kardashevskiy if (pvr_pcc == NULL) { 23455ba4576bSAndreas Färber return -1; 23465ba4576bSAndreas Färber } 23475ba4576bSAndreas Färber type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc)); 23485ba4576bSAndreas Färber type_register(&type_info); 23495b79b1caSAlexey Kardashevskiy 23503b542549SBharata B Rao #if defined(TARGET_PPC64) 23513b542549SBharata B Rao type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host"); 23523b542549SBharata B Rao type_info.parent = TYPE_SPAPR_CPU_CORE, 23537ebaf795SBharata B Rao type_info.instance_size = sizeof(sPAPRCPUCore); 23547ebaf795SBharata B Rao type_info.instance_init = NULL; 23557ebaf795SBharata B Rao type_info.class_init = spapr_cpu_core_class_init; 23567ebaf795SBharata B Rao type_info.class_data = (void *) "host"; 23573b542549SBharata B Rao type_register(&type_info); 23583b542549SBharata B Rao g_free((void *)type_info.name); 23593b542549SBharata B Rao #endif 23603b542549SBharata B Rao 2361715d4b96SThomas Huth /* 2362715d4b96SThomas Huth * Update generic CPU family class alias (e.g. on a POWER8NVL host, 2363715d4b96SThomas Huth * we want "POWER8" to be a "family" alias that points to the current 2364715d4b96SThomas Huth * host CPU type, too) 2365715d4b96SThomas Huth */ 2366715d4b96SThomas Huth dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc)); 2367715d4b96SThomas Huth for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { 2368715d4b96SThomas Huth if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) { 2369715d4b96SThomas Huth ObjectClass *oc = OBJECT_CLASS(pvr_pcc); 2370715d4b96SThomas Huth char *suffix; 2371715d4b96SThomas Huth 2372715d4b96SThomas Huth ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc)); 2373715d4b96SThomas Huth suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU); 2374715d4b96SThomas Huth if (suffix) { 2375715d4b96SThomas Huth *suffix = 0; 2376715d4b96SThomas Huth } 2377715d4b96SThomas Huth ppc_cpu_aliases[i].oc = oc; 2378715d4b96SThomas Huth break; 2379715d4b96SThomas Huth } 2380715d4b96SThomas Huth } 2381715d4b96SThomas Huth 23825ba4576bSAndreas Färber return 0; 23835ba4576bSAndreas Färber } 23845ba4576bSAndreas Färber 2385feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function) 2386feaa64c4SDavid Gibson { 2387feaa64c4SDavid Gibson struct kvm_rtas_token_args args = { 2388feaa64c4SDavid Gibson .token = token, 2389feaa64c4SDavid Gibson }; 2390feaa64c4SDavid Gibson 2391feaa64c4SDavid Gibson if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) { 2392feaa64c4SDavid Gibson return -ENOENT; 2393feaa64c4SDavid Gibson } 2394feaa64c4SDavid Gibson 2395feaa64c4SDavid Gibson strncpy(args.name, function, sizeof(args.name)); 2396feaa64c4SDavid Gibson 2397feaa64c4SDavid Gibson return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args); 2398feaa64c4SDavid Gibson } 239912b1143bSDavid Gibson 2400e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write) 2401e68cb8b4SAlexey Kardashevskiy { 2402e68cb8b4SAlexey Kardashevskiy struct kvm_get_htab_fd s = { 2403e68cb8b4SAlexey Kardashevskiy .flags = write ? KVM_GET_HTAB_WRITE : 0, 2404e68cb8b4SAlexey Kardashevskiy .start_index = 0, 2405e68cb8b4SAlexey Kardashevskiy }; 2406e68cb8b4SAlexey Kardashevskiy 2407e68cb8b4SAlexey Kardashevskiy if (!cap_htab_fd) { 2408e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "KVM version doesn't support saving the hash table\n"); 2409e68cb8b4SAlexey Kardashevskiy return -1; 2410e68cb8b4SAlexey Kardashevskiy } 2411e68cb8b4SAlexey Kardashevskiy 2412e68cb8b4SAlexey Kardashevskiy return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s); 2413e68cb8b4SAlexey Kardashevskiy } 2414e68cb8b4SAlexey Kardashevskiy 2415e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns) 2416e68cb8b4SAlexey Kardashevskiy { 2417bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 2418e68cb8b4SAlexey Kardashevskiy uint8_t buf[bufsize]; 2419e68cb8b4SAlexey Kardashevskiy ssize_t rc; 2420e68cb8b4SAlexey Kardashevskiy 2421e68cb8b4SAlexey Kardashevskiy do { 2422e68cb8b4SAlexey Kardashevskiy rc = read(fd, buf, bufsize); 2423e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2424e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n", 2425e68cb8b4SAlexey Kardashevskiy strerror(errno)); 2426e68cb8b4SAlexey Kardashevskiy return rc; 2427e68cb8b4SAlexey Kardashevskiy } else if (rc) { 2428e094c4c1SCédric Le Goater uint8_t *buffer = buf; 2429e094c4c1SCédric Le Goater ssize_t n = rc; 2430e094c4c1SCédric Le Goater while (n) { 2431e094c4c1SCédric Le Goater struct kvm_get_htab_header *head = 2432e094c4c1SCédric Le Goater (struct kvm_get_htab_header *) buffer; 2433e094c4c1SCédric Le Goater size_t chunksize = sizeof(*head) + 2434e094c4c1SCédric Le Goater HASH_PTE_SIZE_64 * head->n_valid; 2435e094c4c1SCédric Le Goater 2436e094c4c1SCédric Le Goater qemu_put_be32(f, head->index); 2437e094c4c1SCédric Le Goater qemu_put_be16(f, head->n_valid); 2438e094c4c1SCédric Le Goater qemu_put_be16(f, head->n_invalid); 2439e094c4c1SCédric Le Goater qemu_put_buffer(f, (void *)(head + 1), 2440e094c4c1SCédric Le Goater HASH_PTE_SIZE_64 * head->n_valid); 2441e094c4c1SCédric Le Goater 2442e094c4c1SCédric Le Goater buffer += chunksize; 2443e094c4c1SCédric Le Goater n -= chunksize; 2444e094c4c1SCédric Le Goater } 2445e68cb8b4SAlexey Kardashevskiy } 2446e68cb8b4SAlexey Kardashevskiy } while ((rc != 0) 2447e68cb8b4SAlexey Kardashevskiy && ((max_ns < 0) 2448bc72ad67SAlex Bligh || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns))); 2449e68cb8b4SAlexey Kardashevskiy 2450e68cb8b4SAlexey Kardashevskiy return (rc == 0) ? 1 : 0; 2451e68cb8b4SAlexey Kardashevskiy } 2452e68cb8b4SAlexey Kardashevskiy 2453e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 2454e68cb8b4SAlexey Kardashevskiy uint16_t n_valid, uint16_t n_invalid) 2455e68cb8b4SAlexey Kardashevskiy { 2456e68cb8b4SAlexey Kardashevskiy struct kvm_get_htab_header *buf; 2457e68cb8b4SAlexey Kardashevskiy size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64; 2458e68cb8b4SAlexey Kardashevskiy ssize_t rc; 2459e68cb8b4SAlexey Kardashevskiy 2460e68cb8b4SAlexey Kardashevskiy buf = alloca(chunksize); 2461e68cb8b4SAlexey Kardashevskiy buf->index = index; 2462e68cb8b4SAlexey Kardashevskiy buf->n_valid = n_valid; 2463e68cb8b4SAlexey Kardashevskiy buf->n_invalid = n_invalid; 2464e68cb8b4SAlexey Kardashevskiy 2465e68cb8b4SAlexey Kardashevskiy qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid); 2466e68cb8b4SAlexey Kardashevskiy 2467e68cb8b4SAlexey Kardashevskiy rc = write(fd, buf, chunksize); 2468e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2469e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Error writing KVM hash table: %s\n", 2470e68cb8b4SAlexey Kardashevskiy strerror(errno)); 2471e68cb8b4SAlexey Kardashevskiy return rc; 2472e68cb8b4SAlexey Kardashevskiy } 2473e68cb8b4SAlexey Kardashevskiy if (rc != chunksize) { 2474e68cb8b4SAlexey Kardashevskiy /* We should never get a short write on a single chunk */ 2475e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Short write, restoring KVM hash table\n"); 2476e68cb8b4SAlexey Kardashevskiy return -1; 2477e68cb8b4SAlexey Kardashevskiy } 2478e68cb8b4SAlexey Kardashevskiy return 0; 2479e68cb8b4SAlexey Kardashevskiy } 2480e68cb8b4SAlexey Kardashevskiy 248120d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 24824513d923SGleb Natapov { 24834513d923SGleb Natapov return true; 24844513d923SGleb Natapov } 2485a1b87fe0SJan Kiszka 248620d695a9SAndreas Färber int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) 2487a1b87fe0SJan Kiszka { 2488a1b87fe0SJan Kiszka return 1; 2489a1b87fe0SJan Kiszka } 2490a1b87fe0SJan Kiszka 2491a1b87fe0SJan Kiszka int kvm_arch_on_sigbus(int code, void *addr) 2492a1b87fe0SJan Kiszka { 2493a1b87fe0SJan Kiszka return 1; 2494a1b87fe0SJan Kiszka } 249582169660SScott Wood 249682169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s) 249782169660SScott Wood { 249882169660SScott Wood } 2499c65f9a07SGreg Kurz 25001ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n) 25011ad9f0a4SDavid Gibson { 25021ad9f0a4SDavid Gibson struct kvm_get_htab_fd ghf = { 25031ad9f0a4SDavid Gibson .flags = 0, 25041ad9f0a4SDavid Gibson .start_index = ptex, 25057c43bca0SAneesh Kumar K.V }; 25061ad9f0a4SDavid Gibson int fd, rc; 25071ad9f0a4SDavid Gibson int i; 25087c43bca0SAneesh Kumar K.V 25091ad9f0a4SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf); 25101ad9f0a4SDavid Gibson if (fd < 0) { 25111ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unable to open HPT fd"); 25121ad9f0a4SDavid Gibson } 25131ad9f0a4SDavid Gibson 25141ad9f0a4SDavid Gibson i = 0; 25151ad9f0a4SDavid Gibson while (i < n) { 25161ad9f0a4SDavid Gibson struct kvm_get_htab_header *hdr; 25171ad9f0a4SDavid Gibson int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP; 25181ad9f0a4SDavid Gibson char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64]; 25191ad9f0a4SDavid Gibson 25201ad9f0a4SDavid Gibson rc = read(fd, buf, sizeof(buf)); 25211ad9f0a4SDavid Gibson if (rc < 0) { 25221ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unable to read HPTEs"); 25231ad9f0a4SDavid Gibson } 25241ad9f0a4SDavid Gibson 25251ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *)buf; 25261ad9f0a4SDavid Gibson while ((i < n) && ((char *)hdr < (buf + rc))) { 25271ad9f0a4SDavid Gibson int invalid = hdr->n_invalid; 25281ad9f0a4SDavid Gibson 25291ad9f0a4SDavid Gibson if (hdr->index != (ptex + i)) { 25301ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32 25311ad9f0a4SDavid Gibson " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i); 25321ad9f0a4SDavid Gibson } 25331ad9f0a4SDavid Gibson 25341ad9f0a4SDavid Gibson memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid); 25351ad9f0a4SDavid Gibson i += hdr->n_valid; 25361ad9f0a4SDavid Gibson 25371ad9f0a4SDavid Gibson if ((n - i) < invalid) { 25381ad9f0a4SDavid Gibson invalid = n - i; 25391ad9f0a4SDavid Gibson } 25401ad9f0a4SDavid Gibson memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64); 25411ad9f0a4SDavid Gibson i += hdr->n_invalid; 25421ad9f0a4SDavid Gibson 25431ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *) 25441ad9f0a4SDavid Gibson ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid); 25451ad9f0a4SDavid Gibson } 25461ad9f0a4SDavid Gibson } 25471ad9f0a4SDavid Gibson 25481ad9f0a4SDavid Gibson close(fd); 25491ad9f0a4SDavid Gibson } 25501ad9f0a4SDavid Gibson 25511ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) 25527c43bca0SAneesh Kumar K.V { 25531ad9f0a4SDavid Gibson int fd, rc; 25547c43bca0SAneesh Kumar K.V struct kvm_get_htab_fd ghf; 25551ad9f0a4SDavid Gibson struct { 25561ad9f0a4SDavid Gibson struct kvm_get_htab_header hdr; 25571ad9f0a4SDavid Gibson uint64_t pte0; 25581ad9f0a4SDavid Gibson uint64_t pte1; 25591ad9f0a4SDavid Gibson } buf; 2560c1385933SAneesh Kumar K.V 2561c1385933SAneesh Kumar K.V ghf.flags = 0; 2562c1385933SAneesh Kumar K.V ghf.start_index = 0; /* Ignored */ 25631ad9f0a4SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf); 25641ad9f0a4SDavid Gibson if (fd < 0) { 25651ad9f0a4SDavid Gibson hw_error("kvmppc_write_hpte: Unable to open HPT fd"); 2566c1385933SAneesh Kumar K.V } 2567c1385933SAneesh Kumar K.V 25681ad9f0a4SDavid Gibson buf.hdr.n_valid = 1; 25691ad9f0a4SDavid Gibson buf.hdr.n_invalid = 0; 25701ad9f0a4SDavid Gibson buf.hdr.index = ptex; 25711ad9f0a4SDavid Gibson buf.pte0 = cpu_to_be64(pte0); 25721ad9f0a4SDavid Gibson buf.pte1 = cpu_to_be64(pte1); 25731ad9f0a4SDavid Gibson 25741ad9f0a4SDavid Gibson rc = write(fd, &buf, sizeof(buf)); 25751ad9f0a4SDavid Gibson if (rc != sizeof(buf)) { 25761ad9f0a4SDavid Gibson hw_error("kvmppc_write_hpte: Unable to update KVM HPT"); 2577c1385933SAneesh Kumar K.V } 25781ad9f0a4SDavid Gibson close(fd); 2579c1385933SAneesh Kumar K.V } 25809e03a040SFrank Blaschka 25819e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2582dc9f06caSPavel Fedin uint64_t address, uint32_t data, PCIDevice *dev) 25839e03a040SFrank Blaschka { 25849e03a040SFrank Blaschka return 0; 25859e03a040SFrank Blaschka } 25861850b6b7SEric Auger 258738d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 258838d87493SPeter Xu int vector, PCIDevice *dev) 258938d87493SPeter Xu { 259038d87493SPeter Xu return 0; 259138d87493SPeter Xu } 259238d87493SPeter Xu 259338d87493SPeter Xu int kvm_arch_release_virq_post(int virq) 259438d87493SPeter Xu { 259538d87493SPeter Xu return 0; 259638d87493SPeter Xu } 259738d87493SPeter Xu 25981850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data) 25991850b6b7SEric Auger { 26001850b6b7SEric Auger return data & 0xffff; 26011850b6b7SEric Auger } 26024d9392beSThomas Huth 26034d9392beSThomas Huth int kvmppc_enable_hwrng(void) 26044d9392beSThomas Huth { 26054d9392beSThomas Huth if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) { 26064d9392beSThomas Huth return -1; 26074d9392beSThomas Huth } 26084d9392beSThomas Huth 26094d9392beSThomas Huth return kvmppc_enable_hcall(kvm_state, H_RANDOM); 26104d9392beSThomas Huth } 2611