1d76d1650Saurel32 /* 2d76d1650Saurel32 * PowerPC implementation of KVM hooks 3d76d1650Saurel32 * 4d76d1650Saurel32 * Copyright IBM Corp. 2007 590dc8812SScott Wood * Copyright (C) 2011 Freescale Semiconductor, Inc. 6d76d1650Saurel32 * 7d76d1650Saurel32 * Authors: 8d76d1650Saurel32 * Jerone Young <jyoung5@us.ibm.com> 9d76d1650Saurel32 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 10d76d1650Saurel32 * Hollis Blanchard <hollisb@us.ibm.com> 11d76d1650Saurel32 * 12d76d1650Saurel32 * This work is licensed under the terms of the GNU GPL, version 2 or later. 13d76d1650Saurel32 * See the COPYING file in the top-level directory. 14d76d1650Saurel32 * 15d76d1650Saurel32 */ 16d76d1650Saurel32 170d75590dSPeter Maydell #include "qemu/osdep.h" 18eadaada1SAlexander Graf #include <dirent.h> 19d76d1650Saurel32 #include <sys/ioctl.h> 204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h> 21d76d1650Saurel32 22d76d1650Saurel32 #include <linux/kvm.h> 23d76d1650Saurel32 24d76d1650Saurel32 #include "qemu-common.h" 25072ed5f2SThomas Huth #include "qemu/error-report.h" 2633c11879SPaolo Bonzini #include "cpu.h" 27715d4b96SThomas Huth #include "cpu-models.h" 281de7afc9SPaolo Bonzini #include "qemu/timer.h" 299c17d615SPaolo Bonzini #include "sysemu/sysemu.h" 30b3946626SVincent Palatin #include "sysemu/hw_accel.h" 31d76d1650Saurel32 #include "kvm_ppc.h" 329c17d615SPaolo Bonzini #include "sysemu/cpus.h" 339c17d615SPaolo Bonzini #include "sysemu/device_tree.h" 34d5aea6f3SDavid Gibson #include "mmu-hash64.h" 35d76d1650Saurel32 36f61b4bedSAlexander Graf #include "hw/sysbus.h" 370d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h" 380d09e41aSPaolo Bonzini #include "hw/ppc/spapr_vio.h" 397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h" 4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h" 42b36f100eSAlexey Kardashevskiy #include "trace.h" 4388365d17SBharat Bhushan #include "exec/gdbstub.h" 444c663752SPaolo Bonzini #include "exec/memattrs.h" 459c607668SAlexey Kardashevskiy #include "exec/ram_addr.h" 462d103aaeSMichael Roth #include "sysemu/hostmem.h" 47f348b6d1SVeronia Bahaa #include "qemu/cutils.h" 489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h" 493b542549SBharata B Rao #if defined(TARGET_PPC64) 503b542549SBharata B Rao #include "hw/ppc/spapr_cpu_core.h" 513b542549SBharata B Rao #endif 52f3d9f303SSam Bobroff #include "elf.h" 53c64abd1fSSam Bobroff #include "sysemu/kvm_int.h" 54f61b4bedSAlexander Graf 55d76d1650Saurel32 //#define DEBUG_KVM 56d76d1650Saurel32 57d76d1650Saurel32 #ifdef DEBUG_KVM 58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \ 59d76d1650Saurel32 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 60d76d1650Saurel32 #else 61da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \ 62d76d1650Saurel32 do { } while (0) 63d76d1650Saurel32 #endif 64d76d1650Saurel32 65eadaada1SAlexander Graf #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/" 66eadaada1SAlexander Graf 6794a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 6894a8d39aSJan Kiszka KVM_CAP_LAST_INFO 6994a8d39aSJan Kiszka }; 7094a8d39aSJan Kiszka 71fc87e185SAlexander Graf static int cap_interrupt_unset = false; 72fc87e185SAlexander Graf static int cap_interrupt_level = false; 7390dc8812SScott Wood static int cap_segstate; 7490dc8812SScott Wood static int cap_booke_sregs; 75e97c3636SDavid Gibson static int cap_ppc_smt; 76354ac20aSDavid Gibson static int cap_ppc_rma; 770f5cb298SDavid Gibson static int cap_spapr_tce; 78d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64; 79da95324eSAlexey Kardashevskiy static int cap_spapr_multitce; 809bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio; 81f1af19d7SDavid Gibson static int cap_hior; 82d67d40eaSDavid Gibson static int cap_one_reg; 833b961124SStuart Yoder static int cap_epr; 8431f2cb8fSBharat Bhushan static int cap_ppc_watchdog; 859b00ea49SDavid Gibson static int cap_papr; 86e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd; 8787a91de6SAlexander Graf static int cap_fixup_hcalls; 88bac3bf28SThomas Huth static int cap_htm; /* Hardware transactional memory support */ 89cf1c4cceSSam Bobroff static int cap_mmu_radix; 90cf1c4cceSSam Bobroff static int cap_mmu_hash_v3; 91fc87e185SAlexander Graf 923c902d44SBharat Bhushan static uint32_t debug_inst_opcode; 933c902d44SBharat Bhushan 94c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered 95c821c2bdSAlexander Graf * interrupt, but the infrastructure can't expose that yet, so the guest 96c821c2bdSAlexander Graf * takes but ignores it, goes to sleep and never gets notified that there's 97c821c2bdSAlexander Graf * still an interrupt pending. 98c6a94ba5SAlexander Graf * 99c821c2bdSAlexander Graf * As a quick workaround, let's just wake up again 20 ms after we injected 100c821c2bdSAlexander Graf * an interrupt. That way we can assure that we're always reinjecting 101c821c2bdSAlexander Graf * interrupts in case the guest swallowed them. 102c6a94ba5SAlexander Graf */ 103c6a94ba5SAlexander Graf static QEMUTimer *idle_timer; 104c6a94ba5SAlexander Graf 105d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque) 106c6a94ba5SAlexander Graf { 107d5a68146SAndreas Färber PowerPCCPU *cpu = opaque; 108d5a68146SAndreas Färber 109c08d7424SAndreas Färber qemu_cpu_kick(CPU(cpu)); 110c6a94ba5SAlexander Graf } 111c6a94ba5SAlexander Graf 11296c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV). This 11396c9cff0SThomas Huth * should only be used for fallback tests - generally we should use 11496c9cff0SThomas Huth * explicit capabilities for the features we want, rather than 11596c9cff0SThomas Huth * assuming what is/isn't available depending on the KVM variant. */ 11696c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks) 11796c9cff0SThomas Huth { 11896c9cff0SThomas Huth /* Assume KVM-PR if the GET_PVINFO capability is available */ 11996c9cff0SThomas Huth return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0; 12096c9cff0SThomas Huth } 12196c9cff0SThomas Huth 1225ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void); 1235ba4576bSAndreas Färber 124b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s) 125d76d1650Saurel32 { 126fc87e185SAlexander Graf cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ); 127fc87e185SAlexander Graf cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); 12890dc8812SScott Wood cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); 12990dc8812SScott Wood cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); 130e97c3636SDavid Gibson cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT); 131354ac20aSDavid Gibson cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA); 1320f5cb298SDavid Gibson cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE); 133d6ee2a7cSAlexey Kardashevskiy cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64); 134da95324eSAlexey Kardashevskiy cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE); 1359bb62a07SAlexey Kardashevskiy cap_spapr_vfio = false; 136d67d40eaSDavid Gibson cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG); 137f1af19d7SDavid Gibson cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR); 1383b961124SStuart Yoder cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR); 13931f2cb8fSBharat Bhushan cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG); 1409b00ea49SDavid Gibson /* Note: we don't set cap_papr here, because this capability is 1419b00ea49SDavid Gibson * only activated after this by kvmppc_set_papr() */ 142e68cb8b4SAlexey Kardashevskiy cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD); 14387a91de6SAlexander Graf cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL); 144bac3bf28SThomas Huth cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM); 145cf1c4cceSSam Bobroff cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX); 146cf1c4cceSSam Bobroff cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3); 147fc87e185SAlexander Graf 148fc87e185SAlexander Graf if (!cap_interrupt_level) { 149fc87e185SAlexander Graf fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the " 150fc87e185SAlexander Graf "VM to stall at times!\n"); 151fc87e185SAlexander Graf } 152fc87e185SAlexander Graf 1535ba4576bSAndreas Färber kvm_ppc_register_host_cpu_type(); 1545ba4576bSAndreas Färber 155d76d1650Saurel32 return 0; 156d76d1650Saurel32 } 157d76d1650Saurel32 158d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) 159d525ffabSPaolo Bonzini { 160d525ffabSPaolo Bonzini return 0; 161d525ffabSPaolo Bonzini } 162d525ffabSPaolo Bonzini 1631bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu) 164d76d1650Saurel32 { 1651bc22652SAndreas Färber CPUPPCState *cenv = &cpu->env; 1661bc22652SAndreas Färber CPUState *cs = CPU(cpu); 167861bbc80SAlexander Graf struct kvm_sregs sregs; 1685666ca4aSScott Wood int ret; 1695666ca4aSScott Wood 1705666ca4aSScott Wood if (cenv->excp_model == POWERPC_EXCP_BOOKE) { 17164e07be5SAlexander Graf /* What we're really trying to say is "if we're on BookE, we use 17264e07be5SAlexander Graf the native PVR for now". This is the only sane way to check 17364e07be5SAlexander Graf it though, so we potentially confuse users that they can run 17464e07be5SAlexander Graf BookE guests on BookS. Let's hope nobody dares enough :) */ 1755666ca4aSScott Wood return 0; 1765666ca4aSScott Wood } else { 17790dc8812SScott Wood if (!cap_segstate) { 17864e07be5SAlexander Graf fprintf(stderr, "kvm error: missing PVR setting capability\n"); 17964e07be5SAlexander Graf return -ENOSYS; 1805666ca4aSScott Wood } 1815666ca4aSScott Wood } 1825666ca4aSScott Wood 1831bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 1845666ca4aSScott Wood if (ret) { 1855666ca4aSScott Wood return ret; 1865666ca4aSScott Wood } 187861bbc80SAlexander Graf 188861bbc80SAlexander Graf sregs.pvr = cenv->spr[SPR_PVR]; 1891bc22652SAndreas Färber return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 1905666ca4aSScott Wood } 1915666ca4aSScott Wood 19293dd5e85SScott Wood /* Set up a shared TLB array with KVM */ 1931bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu) 19493dd5e85SScott Wood { 1951bc22652SAndreas Färber CPUPPCState *env = &cpu->env; 1961bc22652SAndreas Färber CPUState *cs = CPU(cpu); 19793dd5e85SScott Wood struct kvm_book3e_206_tlb_params params = {}; 19893dd5e85SScott Wood struct kvm_config_tlb cfg = {}; 19993dd5e85SScott Wood unsigned int entries = 0; 20093dd5e85SScott Wood int ret, i; 20193dd5e85SScott Wood 20293dd5e85SScott Wood if (!kvm_enabled() || 203a60f24b5SAndreas Färber !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) { 20493dd5e85SScott Wood return 0; 20593dd5e85SScott Wood } 20693dd5e85SScott Wood 20793dd5e85SScott Wood assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN); 20893dd5e85SScott Wood 20993dd5e85SScott Wood for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 21093dd5e85SScott Wood params.tlb_sizes[i] = booke206_tlb_size(env, i); 21193dd5e85SScott Wood params.tlb_ways[i] = booke206_tlb_ways(env, i); 21293dd5e85SScott Wood entries += params.tlb_sizes[i]; 21393dd5e85SScott Wood } 21493dd5e85SScott Wood 21593dd5e85SScott Wood assert(entries == env->nb_tlb); 21693dd5e85SScott Wood assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t)); 21793dd5e85SScott Wood 21893dd5e85SScott Wood env->tlb_dirty = true; 21993dd5e85SScott Wood 22093dd5e85SScott Wood cfg.array = (uintptr_t)env->tlb.tlbm; 22193dd5e85SScott Wood cfg.array_len = sizeof(ppcmas_tlb_t) * entries; 22293dd5e85SScott Wood cfg.params = (uintptr_t)¶ms; 22393dd5e85SScott Wood cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV; 22493dd5e85SScott Wood 22548add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg); 22693dd5e85SScott Wood if (ret < 0) { 22793dd5e85SScott Wood fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n", 22893dd5e85SScott Wood __func__, strerror(-ret)); 22993dd5e85SScott Wood return ret; 23093dd5e85SScott Wood } 23193dd5e85SScott Wood 23293dd5e85SScott Wood env->kvm_sw_tlb = true; 23393dd5e85SScott Wood return 0; 23493dd5e85SScott Wood } 23593dd5e85SScott Wood 2364656e1f0SBenjamin Herrenschmidt 2374656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64) 238a60f24b5SAndreas Färber static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu, 2394656e1f0SBenjamin Herrenschmidt struct kvm_ppc_smmu_info *info) 2404656e1f0SBenjamin Herrenschmidt { 241a60f24b5SAndreas Färber CPUPPCState *env = &cpu->env; 242a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 243a60f24b5SAndreas Färber 2444656e1f0SBenjamin Herrenschmidt memset(info, 0, sizeof(*info)); 2454656e1f0SBenjamin Herrenschmidt 2464656e1f0SBenjamin Herrenschmidt /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so 2474656e1f0SBenjamin Herrenschmidt * need to "guess" what the supported page sizes are. 2484656e1f0SBenjamin Herrenschmidt * 2494656e1f0SBenjamin Herrenschmidt * For that to work we make a few assumptions: 2504656e1f0SBenjamin Herrenschmidt * 25196c9cff0SThomas Huth * - Check whether we are running "PR" KVM which only supports 4K 25296c9cff0SThomas Huth * and 16M pages, but supports them regardless of the backing 25396c9cff0SThomas Huth * store characteritics. We also don't support 1T segments. 2544656e1f0SBenjamin Herrenschmidt * 2554656e1f0SBenjamin Herrenschmidt * This is safe as if HV KVM ever supports that capability or PR 2564656e1f0SBenjamin Herrenschmidt * KVM grows supports for more page/segment sizes, those versions 2574656e1f0SBenjamin Herrenschmidt * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we 2584656e1f0SBenjamin Herrenschmidt * will not hit this fallback 2594656e1f0SBenjamin Herrenschmidt * 2604656e1f0SBenjamin Herrenschmidt * - Else we are running HV KVM. This means we only support page 2614656e1f0SBenjamin Herrenschmidt * sizes that fit in the backing store. Additionally we only 2624656e1f0SBenjamin Herrenschmidt * advertize 64K pages if the processor is ARCH 2.06 and we assume 2634656e1f0SBenjamin Herrenschmidt * P7 encodings for the SLB and hash table. Here too, we assume 2644656e1f0SBenjamin Herrenschmidt * support for any newer processor will mean a kernel that 2654656e1f0SBenjamin Herrenschmidt * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit 2664656e1f0SBenjamin Herrenschmidt * this fallback. 2674656e1f0SBenjamin Herrenschmidt */ 26896c9cff0SThomas Huth if (kvmppc_is_pr(cs->kvm_state)) { 2694656e1f0SBenjamin Herrenschmidt /* No flags */ 2704656e1f0SBenjamin Herrenschmidt info->flags = 0; 2714656e1f0SBenjamin Herrenschmidt info->slb_size = 64; 2724656e1f0SBenjamin Herrenschmidt 2734656e1f0SBenjamin Herrenschmidt /* Standard 4k base page size segment */ 2744656e1f0SBenjamin Herrenschmidt info->sps[0].page_shift = 12; 2754656e1f0SBenjamin Herrenschmidt info->sps[0].slb_enc = 0; 2764656e1f0SBenjamin Herrenschmidt info->sps[0].enc[0].page_shift = 12; 2774656e1f0SBenjamin Herrenschmidt info->sps[0].enc[0].pte_enc = 0; 2784656e1f0SBenjamin Herrenschmidt 2794656e1f0SBenjamin Herrenschmidt /* Standard 16M large page size segment */ 2804656e1f0SBenjamin Herrenschmidt info->sps[1].page_shift = 24; 2814656e1f0SBenjamin Herrenschmidt info->sps[1].slb_enc = SLB_VSID_L; 2824656e1f0SBenjamin Herrenschmidt info->sps[1].enc[0].page_shift = 24; 2834656e1f0SBenjamin Herrenschmidt info->sps[1].enc[0].pte_enc = 0; 2844656e1f0SBenjamin Herrenschmidt } else { 2854656e1f0SBenjamin Herrenschmidt int i = 0; 2864656e1f0SBenjamin Herrenschmidt 2874656e1f0SBenjamin Herrenschmidt /* HV KVM has backing store size restrictions */ 2884656e1f0SBenjamin Herrenschmidt info->flags = KVM_PPC_PAGE_SIZES_REAL; 2894656e1f0SBenjamin Herrenschmidt 2904656e1f0SBenjamin Herrenschmidt if (env->mmu_model & POWERPC_MMU_1TSEG) { 2914656e1f0SBenjamin Herrenschmidt info->flags |= KVM_PPC_1T_SEGMENTS; 2924656e1f0SBenjamin Herrenschmidt } 2934656e1f0SBenjamin Herrenschmidt 294ec975e83SSam Bobroff if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 || 295ec975e83SSam Bobroff POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) { 2964656e1f0SBenjamin Herrenschmidt info->slb_size = 32; 2974656e1f0SBenjamin Herrenschmidt } else { 2984656e1f0SBenjamin Herrenschmidt info->slb_size = 64; 2994656e1f0SBenjamin Herrenschmidt } 3004656e1f0SBenjamin Herrenschmidt 3014656e1f0SBenjamin Herrenschmidt /* Standard 4k base page size segment */ 3024656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 12; 3034656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = 0; 3044656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 12; 3054656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 0; 3064656e1f0SBenjamin Herrenschmidt i++; 3074656e1f0SBenjamin Herrenschmidt 308aa4bb587SBenjamin Herrenschmidt /* 64K on MMU 2.06 and later */ 309ec975e83SSam Bobroff if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 || 310ec975e83SSam Bobroff POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) { 3114656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 16; 3124656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = 0x110; 3134656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 16; 3144656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 1; 3154656e1f0SBenjamin Herrenschmidt i++; 3164656e1f0SBenjamin Herrenschmidt } 3174656e1f0SBenjamin Herrenschmidt 3184656e1f0SBenjamin Herrenschmidt /* Standard 16M large page size segment */ 3194656e1f0SBenjamin Herrenschmidt info->sps[i].page_shift = 24; 3204656e1f0SBenjamin Herrenschmidt info->sps[i].slb_enc = SLB_VSID_L; 3214656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].page_shift = 24; 3224656e1f0SBenjamin Herrenschmidt info->sps[i].enc[0].pte_enc = 0; 3234656e1f0SBenjamin Herrenschmidt } 3244656e1f0SBenjamin Herrenschmidt } 3254656e1f0SBenjamin Herrenschmidt 326a60f24b5SAndreas Färber static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info) 3274656e1f0SBenjamin Herrenschmidt { 328a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 3294656e1f0SBenjamin Herrenschmidt int ret; 3304656e1f0SBenjamin Herrenschmidt 331a60f24b5SAndreas Färber if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { 332a60f24b5SAndreas Färber ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info); 3334656e1f0SBenjamin Herrenschmidt if (ret == 0) { 3344656e1f0SBenjamin Herrenschmidt return; 3354656e1f0SBenjamin Herrenschmidt } 3364656e1f0SBenjamin Herrenschmidt } 3374656e1f0SBenjamin Herrenschmidt 338a60f24b5SAndreas Färber kvm_get_fallback_smmu_info(cpu, info); 3394656e1f0SBenjamin Herrenschmidt } 3404656e1f0SBenjamin Herrenschmidt 341c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void) 342c64abd1fSSam Bobroff { 343c64abd1fSSam Bobroff KVMState *s = KVM_STATE(current_machine->accelerator); 344c64abd1fSSam Bobroff struct ppc_radix_page_info *radix_page_info; 345c64abd1fSSam Bobroff struct kvm_ppc_rmmu_info rmmu_info; 346c64abd1fSSam Bobroff int i; 347c64abd1fSSam Bobroff 348c64abd1fSSam Bobroff if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) { 349c64abd1fSSam Bobroff return NULL; 350c64abd1fSSam Bobroff } 351c64abd1fSSam Bobroff if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) { 352c64abd1fSSam Bobroff return NULL; 353c64abd1fSSam Bobroff } 354c64abd1fSSam Bobroff radix_page_info = g_malloc0(sizeof(*radix_page_info)); 355c64abd1fSSam Bobroff radix_page_info->count = 0; 356c64abd1fSSam Bobroff for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 357c64abd1fSSam Bobroff if (rmmu_info.ap_encodings[i]) { 358c64abd1fSSam Bobroff radix_page_info->entries[i] = rmmu_info.ap_encodings[i]; 359c64abd1fSSam Bobroff radix_page_info->count++; 360c64abd1fSSam Bobroff } 361c64abd1fSSam Bobroff } 362c64abd1fSSam Bobroff return radix_page_info; 363c64abd1fSSam Bobroff } 364c64abd1fSSam Bobroff 365b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 366b4db5413SSuraj Jitindar Singh bool radix, bool gtse, 367b4db5413SSuraj Jitindar Singh uint64_t proc_tbl) 368b4db5413SSuraj Jitindar Singh { 369b4db5413SSuraj Jitindar Singh CPUState *cs = CPU(cpu); 370b4db5413SSuraj Jitindar Singh int ret; 371b4db5413SSuraj Jitindar Singh uint64_t flags = 0; 372b4db5413SSuraj Jitindar Singh struct kvm_ppc_mmuv3_cfg cfg = { 373b4db5413SSuraj Jitindar Singh .process_table = proc_tbl, 374b4db5413SSuraj Jitindar Singh }; 375b4db5413SSuraj Jitindar Singh 376b4db5413SSuraj Jitindar Singh if (radix) { 377b4db5413SSuraj Jitindar Singh flags |= KVM_PPC_MMUV3_RADIX; 378b4db5413SSuraj Jitindar Singh } 379b4db5413SSuraj Jitindar Singh if (gtse) { 380b4db5413SSuraj Jitindar Singh flags |= KVM_PPC_MMUV3_GTSE; 381b4db5413SSuraj Jitindar Singh } 382b4db5413SSuraj Jitindar Singh cfg.flags = flags; 383b4db5413SSuraj Jitindar Singh ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg); 384b4db5413SSuraj Jitindar Singh switch (ret) { 385b4db5413SSuraj Jitindar Singh case 0: 386b4db5413SSuraj Jitindar Singh return H_SUCCESS; 387b4db5413SSuraj Jitindar Singh case -EINVAL: 388b4db5413SSuraj Jitindar Singh return H_PARAMETER; 389b4db5413SSuraj Jitindar Singh case -ENODEV: 390b4db5413SSuraj Jitindar Singh return H_NOT_AVAILABLE; 391b4db5413SSuraj Jitindar Singh default: 392b4db5413SSuraj Jitindar Singh return H_HARDWARE; 393b4db5413SSuraj Jitindar Singh } 394b4db5413SSuraj Jitindar Singh } 395b4db5413SSuraj Jitindar Singh 3964656e1f0SBenjamin Herrenschmidt static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) 3974656e1f0SBenjamin Herrenschmidt { 3984656e1f0SBenjamin Herrenschmidt if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) { 3994656e1f0SBenjamin Herrenschmidt return true; 4004656e1f0SBenjamin Herrenschmidt } 4014656e1f0SBenjamin Herrenschmidt 4024656e1f0SBenjamin Herrenschmidt return (1ul << shift) <= rampgsize; 4034656e1f0SBenjamin Herrenschmidt } 4044656e1f0SBenjamin Herrenschmidt 405df587133SThomas Huth static long max_cpu_page_size; 406df587133SThomas Huth 407a60f24b5SAndreas Färber static void kvm_fixup_page_sizes(PowerPCCPU *cpu) 4084656e1f0SBenjamin Herrenschmidt { 4094656e1f0SBenjamin Herrenschmidt static struct kvm_ppc_smmu_info smmu_info; 4104656e1f0SBenjamin Herrenschmidt static bool has_smmu_info; 411a60f24b5SAndreas Färber CPUPPCState *env = &cpu->env; 4124656e1f0SBenjamin Herrenschmidt int iq, ik, jq, jk; 4130d594f55SThomas Huth bool has_64k_pages = false; 4144656e1f0SBenjamin Herrenschmidt 4154656e1f0SBenjamin Herrenschmidt /* We only handle page sizes for 64-bit server guests for now */ 4164656e1f0SBenjamin Herrenschmidt if (!(env->mmu_model & POWERPC_MMU_64)) { 4174656e1f0SBenjamin Herrenschmidt return; 4184656e1f0SBenjamin Herrenschmidt } 4194656e1f0SBenjamin Herrenschmidt 4204656e1f0SBenjamin Herrenschmidt /* Collect MMU info from kernel if not already */ 4214656e1f0SBenjamin Herrenschmidt if (!has_smmu_info) { 422a60f24b5SAndreas Färber kvm_get_smmu_info(cpu, &smmu_info); 4234656e1f0SBenjamin Herrenschmidt has_smmu_info = true; 4244656e1f0SBenjamin Herrenschmidt } 4254656e1f0SBenjamin Herrenschmidt 426df587133SThomas Huth if (!max_cpu_page_size) { 4279c607668SAlexey Kardashevskiy max_cpu_page_size = qemu_getrampagesize(); 428df587133SThomas Huth } 4294656e1f0SBenjamin Herrenschmidt 4304656e1f0SBenjamin Herrenschmidt /* Convert to QEMU form */ 4314656e1f0SBenjamin Herrenschmidt memset(&env->sps, 0, sizeof(env->sps)); 4324656e1f0SBenjamin Herrenschmidt 43390da0d5aSBenjamin Herrenschmidt /* If we have HV KVM, we need to forbid CI large pages if our 43490da0d5aSBenjamin Herrenschmidt * host page size is smaller than 64K. 43590da0d5aSBenjamin Herrenschmidt */ 43690da0d5aSBenjamin Herrenschmidt if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) { 43790da0d5aSBenjamin Herrenschmidt env->ci_large_pages = getpagesize() >= 0x10000; 43890da0d5aSBenjamin Herrenschmidt } 43990da0d5aSBenjamin Herrenschmidt 44008215d8fSAlexander Graf /* 44108215d8fSAlexander Graf * XXX This loop should be an entry wide AND of the capabilities that 44208215d8fSAlexander Graf * the selected CPU has with the capabilities that KVM supports. 44308215d8fSAlexander Graf */ 4444656e1f0SBenjamin Herrenschmidt for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) { 4454656e1f0SBenjamin Herrenschmidt struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; 4464656e1f0SBenjamin Herrenschmidt struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; 4474656e1f0SBenjamin Herrenschmidt 448df587133SThomas Huth if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, 4494656e1f0SBenjamin Herrenschmidt ksps->page_shift)) { 4504656e1f0SBenjamin Herrenschmidt continue; 4514656e1f0SBenjamin Herrenschmidt } 4524656e1f0SBenjamin Herrenschmidt qsps->page_shift = ksps->page_shift; 4534656e1f0SBenjamin Herrenschmidt qsps->slb_enc = ksps->slb_enc; 4544656e1f0SBenjamin Herrenschmidt for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { 455df587133SThomas Huth if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, 4564656e1f0SBenjamin Herrenschmidt ksps->enc[jk].page_shift)) { 4574656e1f0SBenjamin Herrenschmidt continue; 4584656e1f0SBenjamin Herrenschmidt } 4590d594f55SThomas Huth if (ksps->enc[jk].page_shift == 16) { 4600d594f55SThomas Huth has_64k_pages = true; 4610d594f55SThomas Huth } 4624656e1f0SBenjamin Herrenschmidt qsps->enc[jq].page_shift = ksps->enc[jk].page_shift; 4634656e1f0SBenjamin Herrenschmidt qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc; 4644656e1f0SBenjamin Herrenschmidt if (++jq >= PPC_PAGE_SIZES_MAX_SZ) { 4654656e1f0SBenjamin Herrenschmidt break; 4664656e1f0SBenjamin Herrenschmidt } 4674656e1f0SBenjamin Herrenschmidt } 4684656e1f0SBenjamin Herrenschmidt if (++iq >= PPC_PAGE_SIZES_MAX_SZ) { 4694656e1f0SBenjamin Herrenschmidt break; 4704656e1f0SBenjamin Herrenschmidt } 4714656e1f0SBenjamin Herrenschmidt } 4724656e1f0SBenjamin Herrenschmidt env->slb_nr = smmu_info.slb_size; 47308215d8fSAlexander Graf if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) { 4744656e1f0SBenjamin Herrenschmidt env->mmu_model &= ~POWERPC_MMU_1TSEG; 4754656e1f0SBenjamin Herrenschmidt } 4760d594f55SThomas Huth if (!has_64k_pages) { 4770d594f55SThomas Huth env->mmu_model &= ~POWERPC_MMU_64K; 4780d594f55SThomas Huth } 4794656e1f0SBenjamin Herrenschmidt } 480df587133SThomas Huth 481ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path) 482df587133SThomas Huth { 483df587133SThomas Huth Object *mem_obj = object_resolve_path(obj_path, NULL); 484df587133SThomas Huth char *mempath = object_property_get_str(mem_obj, "mem-path", NULL); 485df587133SThomas Huth long pagesize; 486df587133SThomas Huth 487df587133SThomas Huth if (mempath) { 4889c607668SAlexey Kardashevskiy pagesize = qemu_mempath_getpagesize(mempath); 489*2d3e302eSGreg Kurz g_free(mempath); 490df587133SThomas Huth } else { 491df587133SThomas Huth pagesize = getpagesize(); 492df587133SThomas Huth } 493df587133SThomas Huth 494df587133SThomas Huth return pagesize >= max_cpu_page_size; 495df587133SThomas Huth } 496df587133SThomas Huth 4974656e1f0SBenjamin Herrenschmidt #else /* defined (TARGET_PPC64) */ 4984656e1f0SBenjamin Herrenschmidt 499a60f24b5SAndreas Färber static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu) 5004656e1f0SBenjamin Herrenschmidt { 5014656e1f0SBenjamin Herrenschmidt } 5024656e1f0SBenjamin Herrenschmidt 503ec69355bSGreg Kurz bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path) 504df587133SThomas Huth { 505df587133SThomas Huth return true; 506df587133SThomas Huth } 507df587133SThomas Huth 5084656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */ 5094656e1f0SBenjamin Herrenschmidt 510b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu) 511b164e48eSEduardo Habkost { 5120f20ba62SAlexey Kardashevskiy return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu)); 513b164e48eSEduardo Habkost } 514b164e48eSEduardo Habkost 51588365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint. 51688365d17SBharat Bhushan * book3s supports only 1 watchpoint, so array size 51788365d17SBharat Bhushan * of 4 is sufficient for now. 51888365d17SBharat Bhushan */ 51988365d17SBharat Bhushan #define MAX_HW_BKPTS 4 52088365d17SBharat Bhushan 52188365d17SBharat Bhushan static struct HWBreakpoint { 52288365d17SBharat Bhushan target_ulong addr; 52388365d17SBharat Bhushan int type; 52488365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS]; 52588365d17SBharat Bhushan 52688365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint; 52788365d17SBharat Bhushan 52888365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */ 52988365d17SBharat Bhushan static int max_hw_breakpoint; 53088365d17SBharat Bhushan static int max_hw_watchpoint; 53188365d17SBharat Bhushan static int nb_hw_breakpoint; 53288365d17SBharat Bhushan static int nb_hw_watchpoint; 53388365d17SBharat Bhushan 53488365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv) 53588365d17SBharat Bhushan { 53688365d17SBharat Bhushan if (cenv->excp_model == POWERPC_EXCP_BOOKE) { 53788365d17SBharat Bhushan max_hw_breakpoint = 2; 53888365d17SBharat Bhushan max_hw_watchpoint = 2; 53988365d17SBharat Bhushan } 54088365d17SBharat Bhushan 54188365d17SBharat Bhushan if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) { 54288365d17SBharat Bhushan fprintf(stderr, "Error initializing h/w breakpoints\n"); 54388365d17SBharat Bhushan return; 54488365d17SBharat Bhushan } 54588365d17SBharat Bhushan } 54688365d17SBharat Bhushan 54720d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs) 5485666ca4aSScott Wood { 54920d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 55020d695a9SAndreas Färber CPUPPCState *cenv = &cpu->env; 5515666ca4aSScott Wood int ret; 5525666ca4aSScott Wood 5534656e1f0SBenjamin Herrenschmidt /* Gather server mmu info from KVM and update the CPU state */ 554a60f24b5SAndreas Färber kvm_fixup_page_sizes(cpu); 5554656e1f0SBenjamin Herrenschmidt 5564656e1f0SBenjamin Herrenschmidt /* Synchronize sregs with kvm */ 5571bc22652SAndreas Färber ret = kvm_arch_sync_sregs(cpu); 5585666ca4aSScott Wood if (ret) { 559388e47c7SThomas Huth if (ret == -EINVAL) { 560388e47c7SThomas Huth error_report("Register sync failed... If you're using kvm-hv.ko," 561388e47c7SThomas Huth " only \"-cpu host\" is possible"); 562388e47c7SThomas Huth } 5635666ca4aSScott Wood return ret; 5645666ca4aSScott Wood } 565861bbc80SAlexander Graf 566bc72ad67SAlex Bligh idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu); 567c821c2bdSAlexander Graf 56893dd5e85SScott Wood switch (cenv->mmu_model) { 56993dd5e85SScott Wood case POWERPC_MMU_BOOKE206: 5707f516c96SThomas Huth /* This target supports access to KVM's guest TLB */ 5711bc22652SAndreas Färber ret = kvm_booke206_tlb_init(cpu); 57293dd5e85SScott Wood break; 5737f516c96SThomas Huth case POWERPC_MMU_2_07: 5747f516c96SThomas Huth if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) { 5757f516c96SThomas Huth /* KVM-HV has transactional memory on POWER8 also without the 576f3d9f303SSam Bobroff * KVM_CAP_PPC_HTM extension, so enable it here instead as 577f3d9f303SSam Bobroff * long as it's availble to userspace on the host. */ 578f3d9f303SSam Bobroff if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) { 5797f516c96SThomas Huth cap_htm = true; 5807f516c96SThomas Huth } 581f3d9f303SSam Bobroff } 5827f516c96SThomas Huth break; 58393dd5e85SScott Wood default: 58493dd5e85SScott Wood break; 58593dd5e85SScott Wood } 58693dd5e85SScott Wood 5873c902d44SBharat Bhushan kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode); 58888365d17SBharat Bhushan kvmppc_hw_debug_points_init(cenv); 5893c902d44SBharat Bhushan 590861bbc80SAlexander Graf return ret; 591d76d1650Saurel32 } 592d76d1650Saurel32 5931bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu) 59493dd5e85SScott Wood { 5951bc22652SAndreas Färber CPUPPCState *env = &cpu->env; 5961bc22652SAndreas Färber CPUState *cs = CPU(cpu); 59793dd5e85SScott Wood struct kvm_dirty_tlb dirty_tlb; 59893dd5e85SScott Wood unsigned char *bitmap; 59993dd5e85SScott Wood int ret; 60093dd5e85SScott Wood 60193dd5e85SScott Wood if (!env->kvm_sw_tlb) { 60293dd5e85SScott Wood return; 60393dd5e85SScott Wood } 60493dd5e85SScott Wood 60593dd5e85SScott Wood bitmap = g_malloc((env->nb_tlb + 7) / 8); 60693dd5e85SScott Wood memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8); 60793dd5e85SScott Wood 60893dd5e85SScott Wood dirty_tlb.bitmap = (uintptr_t)bitmap; 60993dd5e85SScott Wood dirty_tlb.num_dirty = env->nb_tlb; 61093dd5e85SScott Wood 6111bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb); 61293dd5e85SScott Wood if (ret) { 61393dd5e85SScott Wood fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n", 61493dd5e85SScott Wood __func__, strerror(-ret)); 61593dd5e85SScott Wood } 61693dd5e85SScott Wood 61793dd5e85SScott Wood g_free(bitmap); 61893dd5e85SScott Wood } 61993dd5e85SScott Wood 620d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr) 621d67d40eaSDavid Gibson { 622d67d40eaSDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 623d67d40eaSDavid Gibson CPUPPCState *env = &cpu->env; 624d67d40eaSDavid Gibson union { 625d67d40eaSDavid Gibson uint32_t u32; 626d67d40eaSDavid Gibson uint64_t u64; 627d67d40eaSDavid Gibson } val; 628d67d40eaSDavid Gibson struct kvm_one_reg reg = { 629d67d40eaSDavid Gibson .id = id, 630d67d40eaSDavid Gibson .addr = (uintptr_t) &val, 631d67d40eaSDavid Gibson }; 632d67d40eaSDavid Gibson int ret; 633d67d40eaSDavid Gibson 634d67d40eaSDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 635d67d40eaSDavid Gibson if (ret != 0) { 636b36f100eSAlexey Kardashevskiy trace_kvm_failed_spr_get(spr, strerror(errno)); 637d67d40eaSDavid Gibson } else { 638d67d40eaSDavid Gibson switch (id & KVM_REG_SIZE_MASK) { 639d67d40eaSDavid Gibson case KVM_REG_SIZE_U32: 640d67d40eaSDavid Gibson env->spr[spr] = val.u32; 641d67d40eaSDavid Gibson break; 642d67d40eaSDavid Gibson 643d67d40eaSDavid Gibson case KVM_REG_SIZE_U64: 644d67d40eaSDavid Gibson env->spr[spr] = val.u64; 645d67d40eaSDavid Gibson break; 646d67d40eaSDavid Gibson 647d67d40eaSDavid Gibson default: 648d67d40eaSDavid Gibson /* Don't handle this size yet */ 649d67d40eaSDavid Gibson abort(); 650d67d40eaSDavid Gibson } 651d67d40eaSDavid Gibson } 652d67d40eaSDavid Gibson } 653d67d40eaSDavid Gibson 654d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr) 655d67d40eaSDavid Gibson { 656d67d40eaSDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 657d67d40eaSDavid Gibson CPUPPCState *env = &cpu->env; 658d67d40eaSDavid Gibson union { 659d67d40eaSDavid Gibson uint32_t u32; 660d67d40eaSDavid Gibson uint64_t u64; 661d67d40eaSDavid Gibson } val; 662d67d40eaSDavid Gibson struct kvm_one_reg reg = { 663d67d40eaSDavid Gibson .id = id, 664d67d40eaSDavid Gibson .addr = (uintptr_t) &val, 665d67d40eaSDavid Gibson }; 666d67d40eaSDavid Gibson int ret; 667d67d40eaSDavid Gibson 668d67d40eaSDavid Gibson switch (id & KVM_REG_SIZE_MASK) { 669d67d40eaSDavid Gibson case KVM_REG_SIZE_U32: 670d67d40eaSDavid Gibson val.u32 = env->spr[spr]; 671d67d40eaSDavid Gibson break; 672d67d40eaSDavid Gibson 673d67d40eaSDavid Gibson case KVM_REG_SIZE_U64: 674d67d40eaSDavid Gibson val.u64 = env->spr[spr]; 675d67d40eaSDavid Gibson break; 676d67d40eaSDavid Gibson 677d67d40eaSDavid Gibson default: 678d67d40eaSDavid Gibson /* Don't handle this size yet */ 679d67d40eaSDavid Gibson abort(); 680d67d40eaSDavid Gibson } 681d67d40eaSDavid Gibson 682d67d40eaSDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 683d67d40eaSDavid Gibson if (ret != 0) { 684b36f100eSAlexey Kardashevskiy trace_kvm_failed_spr_set(spr, strerror(errno)); 685d67d40eaSDavid Gibson } 686d67d40eaSDavid Gibson } 687d67d40eaSDavid Gibson 68870b79849SDavid Gibson static int kvm_put_fp(CPUState *cs) 68970b79849SDavid Gibson { 69070b79849SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 69170b79849SDavid Gibson CPUPPCState *env = &cpu->env; 69270b79849SDavid Gibson struct kvm_one_reg reg; 69370b79849SDavid Gibson int i; 69470b79849SDavid Gibson int ret; 69570b79849SDavid Gibson 69670b79849SDavid Gibson if (env->insns_flags & PPC_FLOAT) { 69770b79849SDavid Gibson uint64_t fpscr = env->fpscr; 69870b79849SDavid Gibson bool vsx = !!(env->insns_flags2 & PPC2_VSX); 69970b79849SDavid Gibson 70070b79849SDavid Gibson reg.id = KVM_REG_PPC_FPSCR; 70170b79849SDavid Gibson reg.addr = (uintptr_t)&fpscr; 70270b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 70370b79849SDavid Gibson if (ret < 0) { 704da56ff91SPeter Maydell DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno)); 70570b79849SDavid Gibson return ret; 70670b79849SDavid Gibson } 70770b79849SDavid Gibson 70870b79849SDavid Gibson for (i = 0; i < 32; i++) { 70970b79849SDavid Gibson uint64_t vsr[2]; 71070b79849SDavid Gibson 7113a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN 71270b79849SDavid Gibson vsr[0] = float64_val(env->fpr[i]); 71370b79849SDavid Gibson vsr[1] = env->vsr[i]; 7143a4b791bSGreg Kurz #else 7153a4b791bSGreg Kurz vsr[0] = env->vsr[i]; 7163a4b791bSGreg Kurz vsr[1] = float64_val(env->fpr[i]); 7173a4b791bSGreg Kurz #endif 71870b79849SDavid Gibson reg.addr = (uintptr_t) &vsr; 71970b79849SDavid Gibson reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i); 72070b79849SDavid Gibson 72170b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 72270b79849SDavid Gibson if (ret < 0) { 723da56ff91SPeter Maydell DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR", 72470b79849SDavid Gibson i, strerror(errno)); 72570b79849SDavid Gibson return ret; 72670b79849SDavid Gibson } 72770b79849SDavid Gibson } 72870b79849SDavid Gibson } 72970b79849SDavid Gibson 73070b79849SDavid Gibson if (env->insns_flags & PPC_ALTIVEC) { 73170b79849SDavid Gibson reg.id = KVM_REG_PPC_VSCR; 73270b79849SDavid Gibson reg.addr = (uintptr_t)&env->vscr; 73370b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 73470b79849SDavid Gibson if (ret < 0) { 735da56ff91SPeter Maydell DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno)); 73670b79849SDavid Gibson return ret; 73770b79849SDavid Gibson } 73870b79849SDavid Gibson 73970b79849SDavid Gibson for (i = 0; i < 32; i++) { 74070b79849SDavid Gibson reg.id = KVM_REG_PPC_VR(i); 74170b79849SDavid Gibson reg.addr = (uintptr_t)&env->avr[i]; 74270b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 74370b79849SDavid Gibson if (ret < 0) { 744da56ff91SPeter Maydell DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno)); 74570b79849SDavid Gibson return ret; 74670b79849SDavid Gibson } 74770b79849SDavid Gibson } 74870b79849SDavid Gibson } 74970b79849SDavid Gibson 75070b79849SDavid Gibson return 0; 75170b79849SDavid Gibson } 75270b79849SDavid Gibson 75370b79849SDavid Gibson static int kvm_get_fp(CPUState *cs) 75470b79849SDavid Gibson { 75570b79849SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 75670b79849SDavid Gibson CPUPPCState *env = &cpu->env; 75770b79849SDavid Gibson struct kvm_one_reg reg; 75870b79849SDavid Gibson int i; 75970b79849SDavid Gibson int ret; 76070b79849SDavid Gibson 76170b79849SDavid Gibson if (env->insns_flags & PPC_FLOAT) { 76270b79849SDavid Gibson uint64_t fpscr; 76370b79849SDavid Gibson bool vsx = !!(env->insns_flags2 & PPC2_VSX); 76470b79849SDavid Gibson 76570b79849SDavid Gibson reg.id = KVM_REG_PPC_FPSCR; 76670b79849SDavid Gibson reg.addr = (uintptr_t)&fpscr; 76770b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 76870b79849SDavid Gibson if (ret < 0) { 769da56ff91SPeter Maydell DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno)); 77070b79849SDavid Gibson return ret; 77170b79849SDavid Gibson } else { 77270b79849SDavid Gibson env->fpscr = fpscr; 77370b79849SDavid Gibson } 77470b79849SDavid Gibson 77570b79849SDavid Gibson for (i = 0; i < 32; i++) { 77670b79849SDavid Gibson uint64_t vsr[2]; 77770b79849SDavid Gibson 77870b79849SDavid Gibson reg.addr = (uintptr_t) &vsr; 77970b79849SDavid Gibson reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i); 78070b79849SDavid Gibson 78170b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 78270b79849SDavid Gibson if (ret < 0) { 783da56ff91SPeter Maydell DPRINTF("Unable to get %s%d from KVM: %s\n", 78470b79849SDavid Gibson vsx ? "VSR" : "FPR", i, strerror(errno)); 78570b79849SDavid Gibson return ret; 78670b79849SDavid Gibson } else { 7873a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN 78870b79849SDavid Gibson env->fpr[i] = vsr[0]; 78970b79849SDavid Gibson if (vsx) { 79070b79849SDavid Gibson env->vsr[i] = vsr[1]; 79170b79849SDavid Gibson } 7923a4b791bSGreg Kurz #else 7933a4b791bSGreg Kurz env->fpr[i] = vsr[1]; 7943a4b791bSGreg Kurz if (vsx) { 7953a4b791bSGreg Kurz env->vsr[i] = vsr[0]; 7963a4b791bSGreg Kurz } 7973a4b791bSGreg Kurz #endif 79870b79849SDavid Gibson } 79970b79849SDavid Gibson } 80070b79849SDavid Gibson } 80170b79849SDavid Gibson 80270b79849SDavid Gibson if (env->insns_flags & PPC_ALTIVEC) { 80370b79849SDavid Gibson reg.id = KVM_REG_PPC_VSCR; 80470b79849SDavid Gibson reg.addr = (uintptr_t)&env->vscr; 80570b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 80670b79849SDavid Gibson if (ret < 0) { 807da56ff91SPeter Maydell DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno)); 80870b79849SDavid Gibson return ret; 80970b79849SDavid Gibson } 81070b79849SDavid Gibson 81170b79849SDavid Gibson for (i = 0; i < 32; i++) { 81270b79849SDavid Gibson reg.id = KVM_REG_PPC_VR(i); 81370b79849SDavid Gibson reg.addr = (uintptr_t)&env->avr[i]; 81470b79849SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 81570b79849SDavid Gibson if (ret < 0) { 816da56ff91SPeter Maydell DPRINTF("Unable to get VR%d from KVM: %s\n", 81770b79849SDavid Gibson i, strerror(errno)); 81870b79849SDavid Gibson return ret; 81970b79849SDavid Gibson } 82070b79849SDavid Gibson } 82170b79849SDavid Gibson } 82270b79849SDavid Gibson 82370b79849SDavid Gibson return 0; 82470b79849SDavid Gibson } 82570b79849SDavid Gibson 8269b00ea49SDavid Gibson #if defined(TARGET_PPC64) 8279b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs) 8289b00ea49SDavid Gibson { 8299b00ea49SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 8309b00ea49SDavid Gibson CPUPPCState *env = &cpu->env; 8319b00ea49SDavid Gibson struct kvm_one_reg reg; 8329b00ea49SDavid Gibson int ret; 8339b00ea49SDavid Gibson 8349b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 8359b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 8369b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 8379b00ea49SDavid Gibson if (ret < 0) { 838da56ff91SPeter Maydell DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno)); 8399b00ea49SDavid Gibson return ret; 8409b00ea49SDavid Gibson } 8419b00ea49SDavid Gibson 8429b00ea49SDavid Gibson assert((uintptr_t)&env->slb_shadow_size 8439b00ea49SDavid Gibson == ((uintptr_t)&env->slb_shadow_addr + 8)); 8449b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_SLB; 8459b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->slb_shadow_addr; 8469b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 8479b00ea49SDavid Gibson if (ret < 0) { 848da56ff91SPeter Maydell DPRINTF("Unable to get SLB shadow state from KVM: %s\n", 8499b00ea49SDavid Gibson strerror(errno)); 8509b00ea49SDavid Gibson return ret; 8519b00ea49SDavid Gibson } 8529b00ea49SDavid Gibson 8539b00ea49SDavid Gibson assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8)); 8549b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_DTL; 8559b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->dtl_addr; 8569b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 8579b00ea49SDavid Gibson if (ret < 0) { 858da56ff91SPeter Maydell DPRINTF("Unable to get dispatch trace log state from KVM: %s\n", 8599b00ea49SDavid Gibson strerror(errno)); 8609b00ea49SDavid Gibson return ret; 8619b00ea49SDavid Gibson } 8629b00ea49SDavid Gibson 8639b00ea49SDavid Gibson return 0; 8649b00ea49SDavid Gibson } 8659b00ea49SDavid Gibson 8669b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs) 8679b00ea49SDavid Gibson { 8689b00ea49SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 8699b00ea49SDavid Gibson CPUPPCState *env = &cpu->env; 8709b00ea49SDavid Gibson struct kvm_one_reg reg; 8719b00ea49SDavid Gibson int ret; 8729b00ea49SDavid Gibson 8739b00ea49SDavid Gibson /* SLB shadow or DTL can't be registered unless a master VPA is 8749b00ea49SDavid Gibson * registered. That means when restoring state, if a VPA *is* 8759b00ea49SDavid Gibson * registered, we need to set that up first. If not, we need to 8769b00ea49SDavid Gibson * deregister the others before deregistering the master VPA */ 8779b00ea49SDavid Gibson assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr)); 8789b00ea49SDavid Gibson 8799b00ea49SDavid Gibson if (env->vpa_addr) { 8809b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 8819b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 8829b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8839b00ea49SDavid Gibson if (ret < 0) { 884da56ff91SPeter Maydell DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 8859b00ea49SDavid Gibson return ret; 8869b00ea49SDavid Gibson } 8879b00ea49SDavid Gibson } 8889b00ea49SDavid Gibson 8899b00ea49SDavid Gibson assert((uintptr_t)&env->slb_shadow_size 8909b00ea49SDavid Gibson == ((uintptr_t)&env->slb_shadow_addr + 8)); 8919b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_SLB; 8929b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->slb_shadow_addr; 8939b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 8949b00ea49SDavid Gibson if (ret < 0) { 895da56ff91SPeter Maydell DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno)); 8969b00ea49SDavid Gibson return ret; 8979b00ea49SDavid Gibson } 8989b00ea49SDavid Gibson 8999b00ea49SDavid Gibson assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8)); 9009b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_DTL; 9019b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->dtl_addr; 9029b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 9039b00ea49SDavid Gibson if (ret < 0) { 904da56ff91SPeter Maydell DPRINTF("Unable to set dispatch trace log state to KVM: %s\n", 9059b00ea49SDavid Gibson strerror(errno)); 9069b00ea49SDavid Gibson return ret; 9079b00ea49SDavid Gibson } 9089b00ea49SDavid Gibson 9099b00ea49SDavid Gibson if (!env->vpa_addr) { 9109b00ea49SDavid Gibson reg.id = KVM_REG_PPC_VPA_ADDR; 9119b00ea49SDavid Gibson reg.addr = (uintptr_t)&env->vpa_addr; 9129b00ea49SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 9139b00ea49SDavid Gibson if (ret < 0) { 914da56ff91SPeter Maydell DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 9159b00ea49SDavid Gibson return ret; 9169b00ea49SDavid Gibson } 9179b00ea49SDavid Gibson } 9189b00ea49SDavid Gibson 9199b00ea49SDavid Gibson return 0; 9209b00ea49SDavid Gibson } 9219b00ea49SDavid Gibson #endif /* TARGET_PPC64 */ 9229b00ea49SDavid Gibson 923e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu) 924a7a00a72SDavid Gibson { 925a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 926a7a00a72SDavid Gibson struct kvm_sregs sregs; 927a7a00a72SDavid Gibson int i; 928a7a00a72SDavid Gibson 929a7a00a72SDavid Gibson sregs.pvr = env->spr[SPR_PVR]; 930a7a00a72SDavid Gibson 931a7a00a72SDavid Gibson sregs.u.s.sdr1 = env->spr[SPR_SDR1]; 932a7a00a72SDavid Gibson 933a7a00a72SDavid Gibson /* Sync SLB */ 934a7a00a72SDavid Gibson #ifdef TARGET_PPC64 935a7a00a72SDavid Gibson for (i = 0; i < ARRAY_SIZE(env->slb); i++) { 936a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid; 937a7a00a72SDavid Gibson if (env->slb[i].esid & SLB_ESID_V) { 938a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbe |= i; 939a7a00a72SDavid Gibson } 940a7a00a72SDavid Gibson sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid; 941a7a00a72SDavid Gibson } 942a7a00a72SDavid Gibson #endif 943a7a00a72SDavid Gibson 944a7a00a72SDavid Gibson /* Sync SRs */ 945a7a00a72SDavid Gibson for (i = 0; i < 16; i++) { 946a7a00a72SDavid Gibson sregs.u.s.ppc32.sr[i] = env->sr[i]; 947a7a00a72SDavid Gibson } 948a7a00a72SDavid Gibson 949a7a00a72SDavid Gibson /* Sync BATs */ 950a7a00a72SDavid Gibson for (i = 0; i < 8; i++) { 951a7a00a72SDavid Gibson /* Beware. We have to swap upper and lower bits here */ 952a7a00a72SDavid Gibson sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32) 953a7a00a72SDavid Gibson | env->DBAT[1][i]; 954a7a00a72SDavid Gibson sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32) 955a7a00a72SDavid Gibson | env->IBAT[1][i]; 956a7a00a72SDavid Gibson } 957a7a00a72SDavid Gibson 958a7a00a72SDavid Gibson return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 959a7a00a72SDavid Gibson } 960a7a00a72SDavid Gibson 96120d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level) 962d76d1650Saurel32 { 96320d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 96420d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 965d76d1650Saurel32 struct kvm_regs regs; 966d76d1650Saurel32 int ret; 967d76d1650Saurel32 int i; 968d76d1650Saurel32 9691bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 9701bc22652SAndreas Färber if (ret < 0) { 971d76d1650Saurel32 return ret; 9721bc22652SAndreas Färber } 973d76d1650Saurel32 974d76d1650Saurel32 regs.ctr = env->ctr; 975d76d1650Saurel32 regs.lr = env->lr; 976da91a00fSRichard Henderson regs.xer = cpu_read_xer(env); 977d76d1650Saurel32 regs.msr = env->msr; 978d76d1650Saurel32 regs.pc = env->nip; 979d76d1650Saurel32 980d76d1650Saurel32 regs.srr0 = env->spr[SPR_SRR0]; 981d76d1650Saurel32 regs.srr1 = env->spr[SPR_SRR1]; 982d76d1650Saurel32 983d76d1650Saurel32 regs.sprg0 = env->spr[SPR_SPRG0]; 984d76d1650Saurel32 regs.sprg1 = env->spr[SPR_SPRG1]; 985d76d1650Saurel32 regs.sprg2 = env->spr[SPR_SPRG2]; 986d76d1650Saurel32 regs.sprg3 = env->spr[SPR_SPRG3]; 987d76d1650Saurel32 regs.sprg4 = env->spr[SPR_SPRG4]; 988d76d1650Saurel32 regs.sprg5 = env->spr[SPR_SPRG5]; 989d76d1650Saurel32 regs.sprg6 = env->spr[SPR_SPRG6]; 990d76d1650Saurel32 regs.sprg7 = env->spr[SPR_SPRG7]; 991d76d1650Saurel32 99290dc8812SScott Wood regs.pid = env->spr[SPR_BOOKE_PID]; 99390dc8812SScott Wood 994d76d1650Saurel32 for (i = 0;i < 32; i++) 995d76d1650Saurel32 regs.gpr[i] = env->gpr[i]; 996d76d1650Saurel32 9974bddaf55SAlexey Kardashevskiy regs.cr = 0; 9984bddaf55SAlexey Kardashevskiy for (i = 0; i < 8; i++) { 9994bddaf55SAlexey Kardashevskiy regs.cr |= (env->crf[i] & 15) << (4 * (7 - i)); 10004bddaf55SAlexey Kardashevskiy } 10014bddaf55SAlexey Kardashevskiy 10021bc22652SAndreas Färber ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 1003d76d1650Saurel32 if (ret < 0) 1004d76d1650Saurel32 return ret; 1005d76d1650Saurel32 100670b79849SDavid Gibson kvm_put_fp(cs); 100770b79849SDavid Gibson 100893dd5e85SScott Wood if (env->tlb_dirty) { 10091bc22652SAndreas Färber kvm_sw_tlb_put(cpu); 101093dd5e85SScott Wood env->tlb_dirty = false; 101193dd5e85SScott Wood } 101293dd5e85SScott Wood 1013f1af19d7SDavid Gibson if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) { 1014a7a00a72SDavid Gibson ret = kvmppc_put_books_sregs(cpu); 1015a7a00a72SDavid Gibson if (ret < 0) { 1016f1af19d7SDavid Gibson return ret; 1017f1af19d7SDavid Gibson } 1018f1af19d7SDavid Gibson } 1019f1af19d7SDavid Gibson 1020f1af19d7SDavid Gibson if (cap_hior && (level >= KVM_PUT_RESET_STATE)) { 1021d67d40eaSDavid Gibson kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR); 1022d67d40eaSDavid Gibson } 1023f1af19d7SDavid Gibson 1024d67d40eaSDavid Gibson if (cap_one_reg) { 1025d67d40eaSDavid Gibson int i; 1026d67d40eaSDavid Gibson 1027d67d40eaSDavid Gibson /* We deliberately ignore errors here, for kernels which have 1028d67d40eaSDavid Gibson * the ONE_REG calls, but don't support the specific 1029d67d40eaSDavid Gibson * registers, there's a reasonable chance things will still 1030d67d40eaSDavid Gibson * work, at least until we try to migrate. */ 1031d67d40eaSDavid Gibson for (i = 0; i < 1024; i++) { 1032d67d40eaSDavid Gibson uint64_t id = env->spr_cb[i].one_reg_id; 1033d67d40eaSDavid Gibson 1034d67d40eaSDavid Gibson if (id != 0) { 1035d67d40eaSDavid Gibson kvm_put_one_spr(cs, id, i); 1036d67d40eaSDavid Gibson } 1037f1af19d7SDavid Gibson } 10389b00ea49SDavid Gibson 10399b00ea49SDavid Gibson #ifdef TARGET_PPC64 104080b3f79bSAlexey Kardashevskiy if (msr_ts) { 104180b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { 104280b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); 104380b3f79bSAlexey Kardashevskiy } 104480b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { 104580b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); 104680b3f79bSAlexey Kardashevskiy } 104780b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); 104880b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); 104980b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); 105080b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); 105180b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); 105280b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); 105380b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); 105480b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); 105580b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); 105680b3f79bSAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); 105780b3f79bSAlexey Kardashevskiy } 105880b3f79bSAlexey Kardashevskiy 10599b00ea49SDavid Gibson if (cap_papr) { 10609b00ea49SDavid Gibson if (kvm_put_vpa(cs) < 0) { 1061da56ff91SPeter Maydell DPRINTF("Warning: Unable to set VPA information to KVM\n"); 10629b00ea49SDavid Gibson } 10639b00ea49SDavid Gibson } 106498a8b524SAlexey Kardashevskiy 106598a8b524SAlexey Kardashevskiy kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset); 10669b00ea49SDavid Gibson #endif /* TARGET_PPC64 */ 1067f1af19d7SDavid Gibson } 1068f1af19d7SDavid Gibson 1069d76d1650Saurel32 return ret; 1070d76d1650Saurel32 } 1071d76d1650Saurel32 1072c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor) 1073c371c2e3SBharat Bhushan { 1074c371c2e3SBharat Bhushan env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR]; 1075c371c2e3SBharat Bhushan } 1076c371c2e3SBharat Bhushan 1077a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu) 1078d76d1650Saurel32 { 107920d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1080ba5e5090SAlexander Graf struct kvm_sregs sregs; 1081a7a00a72SDavid Gibson int ret; 1082d76d1650Saurel32 1083a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 108490dc8812SScott Wood if (ret < 0) { 108590dc8812SScott Wood return ret; 108690dc8812SScott Wood } 108790dc8812SScott Wood 108890dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_BASE) { 108990dc8812SScott Wood env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0; 109090dc8812SScott Wood env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1; 109190dc8812SScott Wood env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr; 109290dc8812SScott Wood env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear; 109390dc8812SScott Wood env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr; 109490dc8812SScott Wood env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr; 109590dc8812SScott Wood env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr; 109690dc8812SScott Wood env->spr[SPR_DECR] = sregs.u.e.dec; 109790dc8812SScott Wood env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff; 109890dc8812SScott Wood env->spr[SPR_TBU] = sregs.u.e.tb >> 32; 109990dc8812SScott Wood env->spr[SPR_VRSAVE] = sregs.u.e.vrsave; 110090dc8812SScott Wood } 110190dc8812SScott Wood 110290dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_ARCH206) { 110390dc8812SScott Wood env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir; 110490dc8812SScott Wood env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0; 110590dc8812SScott Wood env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1; 110690dc8812SScott Wood env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar; 110790dc8812SScott Wood env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr; 110890dc8812SScott Wood } 110990dc8812SScott Wood 111090dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_64) { 111190dc8812SScott Wood env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr; 111290dc8812SScott Wood } 111390dc8812SScott Wood 111490dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_SPRG8) { 111590dc8812SScott Wood env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8; 111690dc8812SScott Wood } 111790dc8812SScott Wood 111890dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_IVOR) { 111990dc8812SScott Wood env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0]; 1120c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0); 112190dc8812SScott Wood env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1]; 1122c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1); 112390dc8812SScott Wood env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2]; 1124c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2); 112590dc8812SScott Wood env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3]; 1126c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3); 112790dc8812SScott Wood env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4]; 1128c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4); 112990dc8812SScott Wood env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5]; 1130c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5); 113190dc8812SScott Wood env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6]; 1132c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6); 113390dc8812SScott Wood env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7]; 1134c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7); 113590dc8812SScott Wood env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8]; 1136c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8); 113790dc8812SScott Wood env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9]; 1138c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9); 113990dc8812SScott Wood env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10]; 1140c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10); 114190dc8812SScott Wood env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11]; 1142c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11); 114390dc8812SScott Wood env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12]; 1144c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12); 114590dc8812SScott Wood env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13]; 1146c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13); 114790dc8812SScott Wood env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14]; 1148c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14); 114990dc8812SScott Wood env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15]; 1150c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15); 115190dc8812SScott Wood 115290dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_SPE) { 115390dc8812SScott Wood env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0]; 1154c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32); 115590dc8812SScott Wood env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1]; 1156c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33); 115790dc8812SScott Wood env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2]; 1158c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34); 115990dc8812SScott Wood } 116090dc8812SScott Wood 116190dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PM) { 116290dc8812SScott Wood env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3]; 1163c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35); 116490dc8812SScott Wood } 116590dc8812SScott Wood 116690dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PC) { 116790dc8812SScott Wood env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4]; 1168c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36); 116990dc8812SScott Wood env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5]; 1170c371c2e3SBharat Bhushan kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37); 117190dc8812SScott Wood } 117290dc8812SScott Wood } 117390dc8812SScott Wood 117490dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) { 117590dc8812SScott Wood env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0; 117690dc8812SScott Wood env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1; 117790dc8812SScott Wood env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2; 117890dc8812SScott Wood env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff; 117990dc8812SScott Wood env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4; 118090dc8812SScott Wood env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6; 118190dc8812SScott Wood env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32; 118290dc8812SScott Wood env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg; 118390dc8812SScott Wood env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0]; 118490dc8812SScott Wood env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1]; 118590dc8812SScott Wood } 118690dc8812SScott Wood 118790dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_EXP) { 118890dc8812SScott Wood env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr; 118990dc8812SScott Wood } 119090dc8812SScott Wood 119190dc8812SScott Wood if (sregs.u.e.features & KVM_SREGS_E_PD) { 119290dc8812SScott Wood env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc; 119390dc8812SScott Wood env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc; 119490dc8812SScott Wood } 119590dc8812SScott Wood 119690dc8812SScott Wood if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { 119790dc8812SScott Wood env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr; 119890dc8812SScott Wood env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar; 119990dc8812SScott Wood env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0; 120090dc8812SScott Wood 120190dc8812SScott Wood if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) { 120290dc8812SScott Wood env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1; 120390dc8812SScott Wood env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2; 120490dc8812SScott Wood } 120590dc8812SScott Wood } 1206a7a00a72SDavid Gibson 1207a7a00a72SDavid Gibson return 0; 1208fafc0b6aSAlexander Graf } 120990dc8812SScott Wood 1210a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu) 1211a7a00a72SDavid Gibson { 1212a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 1213a7a00a72SDavid Gibson struct kvm_sregs sregs; 1214a7a00a72SDavid Gibson int ret; 1215a7a00a72SDavid Gibson int i; 1216a7a00a72SDavid Gibson 1217a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 121890dc8812SScott Wood if (ret < 0) { 121990dc8812SScott Wood return ret; 122090dc8812SScott Wood } 122190dc8812SScott Wood 1222e57ca75cSDavid Gibson if (!cpu->vhyp) { 1223bb593904SDavid Gibson ppc_store_sdr1(env, sregs.u.s.sdr1); 1224f3c75d42SAneesh Kumar K.V } 1225ba5e5090SAlexander Graf 1226ba5e5090SAlexander Graf /* Sync SLB */ 122782c09f2fSAlexander Graf #ifdef TARGET_PPC64 12284b4d4a21SAneesh Kumar K.V /* 12294b4d4a21SAneesh Kumar K.V * The packed SLB array we get from KVM_GET_SREGS only contains 1230a7a00a72SDavid Gibson * information about valid entries. So we flush our internal copy 1231a7a00a72SDavid Gibson * to get rid of stale ones, then put all valid SLB entries back 1232a7a00a72SDavid Gibson * in. 12334b4d4a21SAneesh Kumar K.V */ 12344b4d4a21SAneesh Kumar K.V memset(env->slb, 0, sizeof(env->slb)); 1235d83af167SAneesh Kumar K.V for (i = 0; i < ARRAY_SIZE(env->slb); i++) { 12364b4d4a21SAneesh Kumar K.V target_ulong rb = sregs.u.s.ppc64.slb[i].slbe; 12374b4d4a21SAneesh Kumar K.V target_ulong rs = sregs.u.s.ppc64.slb[i].slbv; 12384b4d4a21SAneesh Kumar K.V /* 12394b4d4a21SAneesh Kumar K.V * Only restore valid entries 12404b4d4a21SAneesh Kumar K.V */ 12414b4d4a21SAneesh Kumar K.V if (rb & SLB_ESID_V) { 1242bcd81230SDavid Gibson ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs); 12434b4d4a21SAneesh Kumar K.V } 1244ba5e5090SAlexander Graf } 124582c09f2fSAlexander Graf #endif 1246ba5e5090SAlexander Graf 1247ba5e5090SAlexander Graf /* Sync SRs */ 1248ba5e5090SAlexander Graf for (i = 0; i < 16; i++) { 1249ba5e5090SAlexander Graf env->sr[i] = sregs.u.s.ppc32.sr[i]; 1250ba5e5090SAlexander Graf } 1251ba5e5090SAlexander Graf 1252ba5e5090SAlexander Graf /* Sync BATs */ 1253ba5e5090SAlexander Graf for (i = 0; i < 8; i++) { 1254ba5e5090SAlexander Graf env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; 1255ba5e5090SAlexander Graf env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; 1256ba5e5090SAlexander Graf env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; 1257ba5e5090SAlexander Graf env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; 1258ba5e5090SAlexander Graf } 1259a7a00a72SDavid Gibson 1260a7a00a72SDavid Gibson return 0; 1261a7a00a72SDavid Gibson } 1262a7a00a72SDavid Gibson 1263a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs) 1264a7a00a72SDavid Gibson { 1265a7a00a72SDavid Gibson PowerPCCPU *cpu = POWERPC_CPU(cs); 1266a7a00a72SDavid Gibson CPUPPCState *env = &cpu->env; 1267a7a00a72SDavid Gibson struct kvm_regs regs; 1268a7a00a72SDavid Gibson uint32_t cr; 1269a7a00a72SDavid Gibson int i, ret; 1270a7a00a72SDavid Gibson 1271a7a00a72SDavid Gibson ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1272a7a00a72SDavid Gibson if (ret < 0) 1273a7a00a72SDavid Gibson return ret; 1274a7a00a72SDavid Gibson 1275a7a00a72SDavid Gibson cr = regs.cr; 1276a7a00a72SDavid Gibson for (i = 7; i >= 0; i--) { 1277a7a00a72SDavid Gibson env->crf[i] = cr & 15; 1278a7a00a72SDavid Gibson cr >>= 4; 1279a7a00a72SDavid Gibson } 1280a7a00a72SDavid Gibson 1281a7a00a72SDavid Gibson env->ctr = regs.ctr; 1282a7a00a72SDavid Gibson env->lr = regs.lr; 1283a7a00a72SDavid Gibson cpu_write_xer(env, regs.xer); 1284a7a00a72SDavid Gibson env->msr = regs.msr; 1285a7a00a72SDavid Gibson env->nip = regs.pc; 1286a7a00a72SDavid Gibson 1287a7a00a72SDavid Gibson env->spr[SPR_SRR0] = regs.srr0; 1288a7a00a72SDavid Gibson env->spr[SPR_SRR1] = regs.srr1; 1289a7a00a72SDavid Gibson 1290a7a00a72SDavid Gibson env->spr[SPR_SPRG0] = regs.sprg0; 1291a7a00a72SDavid Gibson env->spr[SPR_SPRG1] = regs.sprg1; 1292a7a00a72SDavid Gibson env->spr[SPR_SPRG2] = regs.sprg2; 1293a7a00a72SDavid Gibson env->spr[SPR_SPRG3] = regs.sprg3; 1294a7a00a72SDavid Gibson env->spr[SPR_SPRG4] = regs.sprg4; 1295a7a00a72SDavid Gibson env->spr[SPR_SPRG5] = regs.sprg5; 1296a7a00a72SDavid Gibson env->spr[SPR_SPRG6] = regs.sprg6; 1297a7a00a72SDavid Gibson env->spr[SPR_SPRG7] = regs.sprg7; 1298a7a00a72SDavid Gibson 1299a7a00a72SDavid Gibson env->spr[SPR_BOOKE_PID] = regs.pid; 1300a7a00a72SDavid Gibson 1301a7a00a72SDavid Gibson for (i = 0;i < 32; i++) 1302a7a00a72SDavid Gibson env->gpr[i] = regs.gpr[i]; 1303a7a00a72SDavid Gibson 1304a7a00a72SDavid Gibson kvm_get_fp(cs); 1305a7a00a72SDavid Gibson 1306a7a00a72SDavid Gibson if (cap_booke_sregs) { 1307a7a00a72SDavid Gibson ret = kvmppc_get_booke_sregs(cpu); 1308a7a00a72SDavid Gibson if (ret < 0) { 1309a7a00a72SDavid Gibson return ret; 1310a7a00a72SDavid Gibson } 1311a7a00a72SDavid Gibson } 1312a7a00a72SDavid Gibson 1313a7a00a72SDavid Gibson if (cap_segstate) { 1314a7a00a72SDavid Gibson ret = kvmppc_get_books_sregs(cpu); 1315a7a00a72SDavid Gibson if (ret < 0) { 1316a7a00a72SDavid Gibson return ret; 1317a7a00a72SDavid Gibson } 1318fafc0b6aSAlexander Graf } 1319ba5e5090SAlexander Graf 1320d67d40eaSDavid Gibson if (cap_hior) { 1321d67d40eaSDavid Gibson kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR); 1322d67d40eaSDavid Gibson } 1323d67d40eaSDavid Gibson 1324d67d40eaSDavid Gibson if (cap_one_reg) { 1325d67d40eaSDavid Gibson int i; 1326d67d40eaSDavid Gibson 1327d67d40eaSDavid Gibson /* We deliberately ignore errors here, for kernels which have 1328d67d40eaSDavid Gibson * the ONE_REG calls, but don't support the specific 1329d67d40eaSDavid Gibson * registers, there's a reasonable chance things will still 1330d67d40eaSDavid Gibson * work, at least until we try to migrate. */ 1331d67d40eaSDavid Gibson for (i = 0; i < 1024; i++) { 1332d67d40eaSDavid Gibson uint64_t id = env->spr_cb[i].one_reg_id; 1333d67d40eaSDavid Gibson 1334d67d40eaSDavid Gibson if (id != 0) { 1335d67d40eaSDavid Gibson kvm_get_one_spr(cs, id, i); 1336d67d40eaSDavid Gibson } 1337d67d40eaSDavid Gibson } 13389b00ea49SDavid Gibson 13399b00ea49SDavid Gibson #ifdef TARGET_PPC64 134080b3f79bSAlexey Kardashevskiy if (msr_ts) { 134180b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) { 134280b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]); 134380b3f79bSAlexey Kardashevskiy } 134480b3f79bSAlexey Kardashevskiy for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) { 134580b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]); 134680b3f79bSAlexey Kardashevskiy } 134780b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr); 134880b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr); 134980b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr); 135080b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr); 135180b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr); 135280b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr); 135380b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave); 135480b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr); 135580b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr); 135680b3f79bSAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar); 135780b3f79bSAlexey Kardashevskiy } 135880b3f79bSAlexey Kardashevskiy 13599b00ea49SDavid Gibson if (cap_papr) { 13609b00ea49SDavid Gibson if (kvm_get_vpa(cs) < 0) { 1361da56ff91SPeter Maydell DPRINTF("Warning: Unable to get VPA information from KVM\n"); 13629b00ea49SDavid Gibson } 13639b00ea49SDavid Gibson } 136498a8b524SAlexey Kardashevskiy 136598a8b524SAlexey Kardashevskiy kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset); 13669b00ea49SDavid Gibson #endif 1367d67d40eaSDavid Gibson } 1368d67d40eaSDavid Gibson 1369d76d1650Saurel32 return 0; 1370d76d1650Saurel32 } 1371d76d1650Saurel32 13721bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) 1373fc87e185SAlexander Graf { 1374fc87e185SAlexander Graf unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; 1375fc87e185SAlexander Graf 1376fc87e185SAlexander Graf if (irq != PPC_INTERRUPT_EXT) { 1377fc87e185SAlexander Graf return 0; 1378fc87e185SAlexander Graf } 1379fc87e185SAlexander Graf 1380fc87e185SAlexander Graf if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) { 1381fc87e185SAlexander Graf return 0; 1382fc87e185SAlexander Graf } 1383fc87e185SAlexander Graf 13841bc22652SAndreas Färber kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq); 1385fc87e185SAlexander Graf 1386fc87e185SAlexander Graf return 0; 1387fc87e185SAlexander Graf } 1388fc87e185SAlexander Graf 138916415335SAlexander Graf #if defined(TARGET_PPCEMB) 139016415335SAlexander Graf #define PPC_INPUT_INT PPC40x_INPUT_INT 139116415335SAlexander Graf #elif defined(TARGET_PPC64) 139216415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT 139316415335SAlexander Graf #else 139416415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT 139516415335SAlexander Graf #endif 139616415335SAlexander Graf 139720d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 1398d76d1650Saurel32 { 139920d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 140020d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1401d76d1650Saurel32 int r; 1402d76d1650Saurel32 unsigned irq; 1403d76d1650Saurel32 14044b8523eeSJan Kiszka qemu_mutex_lock_iothread(); 14054b8523eeSJan Kiszka 14065cbdb3a3SStefan Weil /* PowerPC QEMU tracks the various core input pins (interrupt, critical 1407d76d1650Saurel32 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */ 1408fc87e185SAlexander Graf if (!cap_interrupt_level && 1409fc87e185SAlexander Graf run->ready_for_interrupt_injection && 1410259186a7SAndreas Färber (cs->interrupt_request & CPU_INTERRUPT_HARD) && 141116415335SAlexander Graf (env->irq_input_state & (1<<PPC_INPUT_INT))) 1412d76d1650Saurel32 { 1413d76d1650Saurel32 /* For now KVM disregards the 'irq' argument. However, in the 1414d76d1650Saurel32 * future KVM could cache it in-kernel to avoid a heavyweight exit 1415d76d1650Saurel32 * when reading the UIC. 1416d76d1650Saurel32 */ 1417fc87e185SAlexander Graf irq = KVM_INTERRUPT_SET; 1418d76d1650Saurel32 1419da56ff91SPeter Maydell DPRINTF("injected interrupt %d\n", irq); 14201bc22652SAndreas Färber r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq); 142155e5c285SAndreas Färber if (r < 0) { 142255e5c285SAndreas Färber printf("cpu %d fail inject %x\n", cs->cpu_index, irq); 142355e5c285SAndreas Färber } 1424c821c2bdSAlexander Graf 1425c821c2bdSAlexander Graf /* Always wake up soon in case the interrupt was level based */ 1426bc72ad67SAlex Bligh timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 142773bcb24dSRutuja Shah (NANOSECONDS_PER_SECOND / 50)); 1428d76d1650Saurel32 } 1429d76d1650Saurel32 1430d76d1650Saurel32 /* We don't know if there are more interrupts pending after this. However, 1431d76d1650Saurel32 * the guest will return to userspace in the course of handling this one 1432d76d1650Saurel32 * anyways, so we will get a chance to deliver the rest. */ 14334b8523eeSJan Kiszka 14344b8523eeSJan Kiszka qemu_mutex_unlock_iothread(); 1435d76d1650Saurel32 } 1436d76d1650Saurel32 14374c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1438d76d1650Saurel32 { 14394c663752SPaolo Bonzini return MEMTXATTRS_UNSPECIFIED; 1440d76d1650Saurel32 } 1441d76d1650Saurel32 144220d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs) 14430af691d7SMarcelo Tosatti { 1444259186a7SAndreas Färber return cs->halted; 14450af691d7SMarcelo Tosatti } 14460af691d7SMarcelo Tosatti 1447259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu) 1448d76d1650Saurel32 { 1449259186a7SAndreas Färber CPUState *cs = CPU(cpu); 1450259186a7SAndreas Färber CPUPPCState *env = &cpu->env; 1451259186a7SAndreas Färber 1452259186a7SAndreas Färber if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) { 1453259186a7SAndreas Färber cs->halted = 1; 145427103424SAndreas Färber cs->exception_index = EXCP_HLT; 1455d76d1650Saurel32 } 1456d76d1650Saurel32 1457bb4ea393SJan Kiszka return 0; 1458d76d1650Saurel32 } 1459d76d1650Saurel32 1460d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */ 14611328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data) 1462d76d1650Saurel32 { 1463d76d1650Saurel32 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) 1464d76d1650Saurel32 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn); 1465d76d1650Saurel32 1466bb4ea393SJan Kiszka return 0; 1467d76d1650Saurel32 } 1468d76d1650Saurel32 14691328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data) 1470d76d1650Saurel32 { 1471d76d1650Saurel32 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) 1472d76d1650Saurel32 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn); 1473d76d1650Saurel32 1474bb4ea393SJan Kiszka return 0; 1475d76d1650Saurel32 } 1476d76d1650Saurel32 14778a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 14788a0548f9SBharat Bhushan { 14798a0548f9SBharat Bhushan /* Mixed endian case is not handled */ 14808a0548f9SBharat Bhushan uint32_t sc = debug_inst_opcode; 14818a0548f9SBharat Bhushan 14828a0548f9SBharat Bhushan if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 14838a0548f9SBharat Bhushan sizeof(sc), 0) || 14848a0548f9SBharat Bhushan cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) { 14858a0548f9SBharat Bhushan return -EINVAL; 14868a0548f9SBharat Bhushan } 14878a0548f9SBharat Bhushan 14888a0548f9SBharat Bhushan return 0; 14898a0548f9SBharat Bhushan } 14908a0548f9SBharat Bhushan 14918a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 14928a0548f9SBharat Bhushan { 14938a0548f9SBharat Bhushan uint32_t sc; 14948a0548f9SBharat Bhushan 14958a0548f9SBharat Bhushan if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) || 14968a0548f9SBharat Bhushan sc != debug_inst_opcode || 14978a0548f9SBharat Bhushan cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 14988a0548f9SBharat Bhushan sizeof(sc), 1)) { 14998a0548f9SBharat Bhushan return -EINVAL; 15008a0548f9SBharat Bhushan } 15018a0548f9SBharat Bhushan 15028a0548f9SBharat Bhushan return 0; 15038a0548f9SBharat Bhushan } 15048a0548f9SBharat Bhushan 150588365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type) 150688365d17SBharat Bhushan { 150788365d17SBharat Bhushan int n; 150888365d17SBharat Bhushan 150988365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) 151088365d17SBharat Bhushan <= ARRAY_SIZE(hw_debug_points)); 151188365d17SBharat Bhushan 151288365d17SBharat Bhushan for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { 151388365d17SBharat Bhushan if (hw_debug_points[n].addr == addr && 151488365d17SBharat Bhushan hw_debug_points[n].type == type) { 151588365d17SBharat Bhushan return n; 151688365d17SBharat Bhushan } 151788365d17SBharat Bhushan } 151888365d17SBharat Bhushan 151988365d17SBharat Bhushan return -1; 152088365d17SBharat Bhushan } 152188365d17SBharat Bhushan 152288365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag) 152388365d17SBharat Bhushan { 152488365d17SBharat Bhushan int n; 152588365d17SBharat Bhushan 152688365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS); 152788365d17SBharat Bhushan if (n >= 0) { 152888365d17SBharat Bhushan *flag = BP_MEM_ACCESS; 152988365d17SBharat Bhushan return n; 153088365d17SBharat Bhushan } 153188365d17SBharat Bhushan 153288365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE); 153388365d17SBharat Bhushan if (n >= 0) { 153488365d17SBharat Bhushan *flag = BP_MEM_WRITE; 153588365d17SBharat Bhushan return n; 153688365d17SBharat Bhushan } 153788365d17SBharat Bhushan 153888365d17SBharat Bhushan n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ); 153988365d17SBharat Bhushan if (n >= 0) { 154088365d17SBharat Bhushan *flag = BP_MEM_READ; 154188365d17SBharat Bhushan return n; 154288365d17SBharat Bhushan } 154388365d17SBharat Bhushan 154488365d17SBharat Bhushan return -1; 154588365d17SBharat Bhushan } 154688365d17SBharat Bhushan 154788365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr, 154888365d17SBharat Bhushan target_ulong len, int type) 154988365d17SBharat Bhushan { 155088365d17SBharat Bhushan if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) { 155188365d17SBharat Bhushan return -ENOBUFS; 155288365d17SBharat Bhushan } 155388365d17SBharat Bhushan 155488365d17SBharat Bhushan hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr; 155588365d17SBharat Bhushan hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type; 155688365d17SBharat Bhushan 155788365d17SBharat Bhushan switch (type) { 155888365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 155988365d17SBharat Bhushan if (nb_hw_breakpoint >= max_hw_breakpoint) { 156088365d17SBharat Bhushan return -ENOBUFS; 156188365d17SBharat Bhushan } 156288365d17SBharat Bhushan 156388365d17SBharat Bhushan if (find_hw_breakpoint(addr, type) >= 0) { 156488365d17SBharat Bhushan return -EEXIST; 156588365d17SBharat Bhushan } 156688365d17SBharat Bhushan 156788365d17SBharat Bhushan nb_hw_breakpoint++; 156888365d17SBharat Bhushan break; 156988365d17SBharat Bhushan 157088365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 157188365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 157288365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 157388365d17SBharat Bhushan if (nb_hw_watchpoint >= max_hw_watchpoint) { 157488365d17SBharat Bhushan return -ENOBUFS; 157588365d17SBharat Bhushan } 157688365d17SBharat Bhushan 157788365d17SBharat Bhushan if (find_hw_breakpoint(addr, type) >= 0) { 157888365d17SBharat Bhushan return -EEXIST; 157988365d17SBharat Bhushan } 158088365d17SBharat Bhushan 158188365d17SBharat Bhushan nb_hw_watchpoint++; 158288365d17SBharat Bhushan break; 158388365d17SBharat Bhushan 158488365d17SBharat Bhushan default: 158588365d17SBharat Bhushan return -ENOSYS; 158688365d17SBharat Bhushan } 158788365d17SBharat Bhushan 158888365d17SBharat Bhushan return 0; 158988365d17SBharat Bhushan } 159088365d17SBharat Bhushan 159188365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr, 159288365d17SBharat Bhushan target_ulong len, int type) 159388365d17SBharat Bhushan { 159488365d17SBharat Bhushan int n; 159588365d17SBharat Bhushan 159688365d17SBharat Bhushan n = find_hw_breakpoint(addr, type); 159788365d17SBharat Bhushan if (n < 0) { 159888365d17SBharat Bhushan return -ENOENT; 159988365d17SBharat Bhushan } 160088365d17SBharat Bhushan 160188365d17SBharat Bhushan switch (type) { 160288365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 160388365d17SBharat Bhushan nb_hw_breakpoint--; 160488365d17SBharat Bhushan break; 160588365d17SBharat Bhushan 160688365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 160788365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 160888365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 160988365d17SBharat Bhushan nb_hw_watchpoint--; 161088365d17SBharat Bhushan break; 161188365d17SBharat Bhushan 161288365d17SBharat Bhushan default: 161388365d17SBharat Bhushan return -ENOSYS; 161488365d17SBharat Bhushan } 161588365d17SBharat Bhushan hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint]; 161688365d17SBharat Bhushan 161788365d17SBharat Bhushan return 0; 161888365d17SBharat Bhushan } 161988365d17SBharat Bhushan 162088365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void) 162188365d17SBharat Bhushan { 162288365d17SBharat Bhushan nb_hw_breakpoint = nb_hw_watchpoint = 0; 162388365d17SBharat Bhushan } 162488365d17SBharat Bhushan 16258a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) 16268a0548f9SBharat Bhushan { 162788365d17SBharat Bhushan int n; 162888365d17SBharat Bhushan 16298a0548f9SBharat Bhushan /* Software Breakpoint updates */ 16308a0548f9SBharat Bhushan if (kvm_sw_breakpoints_active(cs)) { 16318a0548f9SBharat Bhushan dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 16328a0548f9SBharat Bhushan } 163388365d17SBharat Bhushan 163488365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) 163588365d17SBharat Bhushan <= ARRAY_SIZE(hw_debug_points)); 163688365d17SBharat Bhushan assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp)); 163788365d17SBharat Bhushan 163888365d17SBharat Bhushan if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { 163988365d17SBharat Bhushan dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 164088365d17SBharat Bhushan memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp)); 164188365d17SBharat Bhushan for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) { 164288365d17SBharat Bhushan switch (hw_debug_points[n].type) { 164388365d17SBharat Bhushan case GDB_BREAKPOINT_HW: 164488365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT; 164588365d17SBharat Bhushan break; 164688365d17SBharat Bhushan case GDB_WATCHPOINT_WRITE: 164788365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE; 164888365d17SBharat Bhushan break; 164988365d17SBharat Bhushan case GDB_WATCHPOINT_READ: 165088365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ; 165188365d17SBharat Bhushan break; 165288365d17SBharat Bhushan case GDB_WATCHPOINT_ACCESS: 165388365d17SBharat Bhushan dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE | 165488365d17SBharat Bhushan KVMPPC_DEBUG_WATCH_READ; 165588365d17SBharat Bhushan break; 165688365d17SBharat Bhushan default: 165788365d17SBharat Bhushan cpu_abort(cs, "Unsupported breakpoint type\n"); 165888365d17SBharat Bhushan } 165988365d17SBharat Bhushan dbg->arch.bp[n].addr = hw_debug_points[n].addr; 166088365d17SBharat Bhushan } 166188365d17SBharat Bhushan } 16628a0548f9SBharat Bhushan } 16638a0548f9SBharat Bhushan 16648a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run) 16658a0548f9SBharat Bhushan { 16668a0548f9SBharat Bhushan CPUState *cs = CPU(cpu); 16678a0548f9SBharat Bhushan CPUPPCState *env = &cpu->env; 16688a0548f9SBharat Bhushan struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 16698a0548f9SBharat Bhushan int handle = 0; 167088365d17SBharat Bhushan int n; 167188365d17SBharat Bhushan int flag = 0; 16728a0548f9SBharat Bhushan 167388365d17SBharat Bhushan if (cs->singlestep_enabled) { 167488365d17SBharat Bhushan handle = 1; 167588365d17SBharat Bhushan } else if (arch_info->status) { 167688365d17SBharat Bhushan if (nb_hw_breakpoint + nb_hw_watchpoint > 0) { 167788365d17SBharat Bhushan if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) { 167888365d17SBharat Bhushan n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW); 167988365d17SBharat Bhushan if (n >= 0) { 168088365d17SBharat Bhushan handle = 1; 168188365d17SBharat Bhushan } 168288365d17SBharat Bhushan } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ | 168388365d17SBharat Bhushan KVMPPC_DEBUG_WATCH_WRITE)) { 168488365d17SBharat Bhushan n = find_hw_watchpoint(arch_info->address, &flag); 168588365d17SBharat Bhushan if (n >= 0) { 168688365d17SBharat Bhushan handle = 1; 168788365d17SBharat Bhushan cs->watchpoint_hit = &hw_watchpoint; 168888365d17SBharat Bhushan hw_watchpoint.vaddr = hw_debug_points[n].addr; 168988365d17SBharat Bhushan hw_watchpoint.flags = flag; 169088365d17SBharat Bhushan } 169188365d17SBharat Bhushan } 169288365d17SBharat Bhushan } 169388365d17SBharat Bhushan } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) { 16948a0548f9SBharat Bhushan handle = 1; 16958a0548f9SBharat Bhushan } else { 16968a0548f9SBharat Bhushan /* QEMU is not able to handle debug exception, so inject 16978a0548f9SBharat Bhushan * program exception to guest; 16988a0548f9SBharat Bhushan * Yes program exception NOT debug exception !! 169988365d17SBharat Bhushan * When QEMU is using debug resources then debug exception must 170088365d17SBharat Bhushan * be always set. To achieve this we set MSR_DE and also set 170188365d17SBharat Bhushan * MSRP_DEP so guest cannot change MSR_DE. 170288365d17SBharat Bhushan * When emulating debug resource for guest we want guest 170388365d17SBharat Bhushan * to control MSR_DE (enable/disable debug interrupt on need). 170488365d17SBharat Bhushan * Supporting both configurations are NOT possible. 170588365d17SBharat Bhushan * So the result is that we cannot share debug resources 170688365d17SBharat Bhushan * between QEMU and Guest on BOOKE architecture. 170788365d17SBharat Bhushan * In the current design QEMU gets the priority over guest, 170888365d17SBharat Bhushan * this means that if QEMU is using debug resources then guest 170988365d17SBharat Bhushan * cannot use them; 17108a0548f9SBharat Bhushan * For software breakpoint QEMU uses a privileged instruction; 17118a0548f9SBharat Bhushan * So there cannot be any reason that we are here for guest 17128a0548f9SBharat Bhushan * set debug exception, only possibility is guest executed a 17138a0548f9SBharat Bhushan * privileged / illegal instruction and that's why we are 17148a0548f9SBharat Bhushan * injecting a program interrupt. 17158a0548f9SBharat Bhushan */ 17168a0548f9SBharat Bhushan 17178a0548f9SBharat Bhushan cpu_synchronize_state(cs); 17188a0548f9SBharat Bhushan /* env->nip is PC, so increment this by 4 to use 17198a0548f9SBharat Bhushan * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4. 17208a0548f9SBharat Bhushan */ 17218a0548f9SBharat Bhushan env->nip += 4; 17228a0548f9SBharat Bhushan cs->exception_index = POWERPC_EXCP_PROGRAM; 17238a0548f9SBharat Bhushan env->error_code = POWERPC_EXCP_INVAL; 17248a0548f9SBharat Bhushan ppc_cpu_do_interrupt(cs); 17258a0548f9SBharat Bhushan } 17268a0548f9SBharat Bhushan 17278a0548f9SBharat Bhushan return handle; 17288a0548f9SBharat Bhushan } 17298a0548f9SBharat Bhushan 173020d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1731d76d1650Saurel32 { 173220d695a9SAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(cs); 173320d695a9SAndreas Färber CPUPPCState *env = &cpu->env; 1734bb4ea393SJan Kiszka int ret; 1735d76d1650Saurel32 17364b8523eeSJan Kiszka qemu_mutex_lock_iothread(); 17374b8523eeSJan Kiszka 1738d76d1650Saurel32 switch (run->exit_reason) { 1739d76d1650Saurel32 case KVM_EXIT_DCR: 1740d76d1650Saurel32 if (run->dcr.is_write) { 1741da56ff91SPeter Maydell DPRINTF("handle dcr write\n"); 1742d76d1650Saurel32 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data); 1743d76d1650Saurel32 } else { 1744da56ff91SPeter Maydell DPRINTF("handle dcr read\n"); 1745d76d1650Saurel32 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data); 1746d76d1650Saurel32 } 1747d76d1650Saurel32 break; 1748d76d1650Saurel32 case KVM_EXIT_HLT: 1749da56ff91SPeter Maydell DPRINTF("handle halt\n"); 1750259186a7SAndreas Färber ret = kvmppc_handle_halt(cpu); 1751d76d1650Saurel32 break; 1752c6304a4aSDavid Gibson #if defined(TARGET_PPC64) 1753f61b4bedSAlexander Graf case KVM_EXIT_PAPR_HCALL: 1754da56ff91SPeter Maydell DPRINTF("handle PAPR hypercall\n"); 175520d695a9SAndreas Färber run->papr_hcall.ret = spapr_hypercall(cpu, 1756aa100fa4SAndreas Färber run->papr_hcall.nr, 1757f61b4bedSAlexander Graf run->papr_hcall.args); 175878e8fde2SDavid Gibson ret = 0; 1759f61b4bedSAlexander Graf break; 1760f61b4bedSAlexander Graf #endif 17615b95b8b9SAlexander Graf case KVM_EXIT_EPR: 1762da56ff91SPeter Maydell DPRINTF("handle epr\n"); 1763933b19eaSAlexander Graf run->epr.epr = ldl_phys(cs->as, env->mpic_iack); 17645b95b8b9SAlexander Graf ret = 0; 17655b95b8b9SAlexander Graf break; 176631f2cb8fSBharat Bhushan case KVM_EXIT_WATCHDOG: 1767da56ff91SPeter Maydell DPRINTF("handle watchdog expiry\n"); 176831f2cb8fSBharat Bhushan watchdog_perform_action(); 176931f2cb8fSBharat Bhushan ret = 0; 177031f2cb8fSBharat Bhushan break; 177131f2cb8fSBharat Bhushan 17728a0548f9SBharat Bhushan case KVM_EXIT_DEBUG: 17738a0548f9SBharat Bhushan DPRINTF("handle debug exception\n"); 17748a0548f9SBharat Bhushan if (kvm_handle_debug(cpu, run)) { 17758a0548f9SBharat Bhushan ret = EXCP_DEBUG; 17768a0548f9SBharat Bhushan break; 17778a0548f9SBharat Bhushan } 17788a0548f9SBharat Bhushan /* re-enter, this exception was guest-internal */ 17798a0548f9SBharat Bhushan ret = 0; 17808a0548f9SBharat Bhushan break; 17818a0548f9SBharat Bhushan 178273aaec4aSJan Kiszka default: 178373aaec4aSJan Kiszka fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 178473aaec4aSJan Kiszka ret = -1; 178573aaec4aSJan Kiszka break; 1786d76d1650Saurel32 } 1787d76d1650Saurel32 17884b8523eeSJan Kiszka qemu_mutex_unlock_iothread(); 1789d76d1650Saurel32 return ret; 1790d76d1650Saurel32 } 1791d76d1650Saurel32 179231f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 179331f2cb8fSBharat Bhushan { 179431f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 179531f2cb8fSBharat Bhushan uint32_t bits = tsr_bits; 179631f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 179731f2cb8fSBharat Bhushan .id = KVM_REG_PPC_OR_TSR, 179831f2cb8fSBharat Bhushan .addr = (uintptr_t) &bits, 179931f2cb8fSBharat Bhushan }; 180031f2cb8fSBharat Bhushan 180131f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 180231f2cb8fSBharat Bhushan } 180331f2cb8fSBharat Bhushan 180431f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 180531f2cb8fSBharat Bhushan { 180631f2cb8fSBharat Bhushan 180731f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 180831f2cb8fSBharat Bhushan uint32_t bits = tsr_bits; 180931f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 181031f2cb8fSBharat Bhushan .id = KVM_REG_PPC_CLEAR_TSR, 181131f2cb8fSBharat Bhushan .addr = (uintptr_t) &bits, 181231f2cb8fSBharat Bhushan }; 181331f2cb8fSBharat Bhushan 181431f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 181531f2cb8fSBharat Bhushan } 181631f2cb8fSBharat Bhushan 181731f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu) 181831f2cb8fSBharat Bhushan { 181931f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 182031f2cb8fSBharat Bhushan CPUPPCState *env = &cpu->env; 182131f2cb8fSBharat Bhushan uint32_t tcr = env->spr[SPR_BOOKE_TCR]; 182231f2cb8fSBharat Bhushan 182331f2cb8fSBharat Bhushan struct kvm_one_reg reg = { 182431f2cb8fSBharat Bhushan .id = KVM_REG_PPC_TCR, 182531f2cb8fSBharat Bhushan .addr = (uintptr_t) &tcr, 182631f2cb8fSBharat Bhushan }; 182731f2cb8fSBharat Bhushan 182831f2cb8fSBharat Bhushan return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 182931f2cb8fSBharat Bhushan } 183031f2cb8fSBharat Bhushan 183131f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) 183231f2cb8fSBharat Bhushan { 183331f2cb8fSBharat Bhushan CPUState *cs = CPU(cpu); 183431f2cb8fSBharat Bhushan int ret; 183531f2cb8fSBharat Bhushan 183631f2cb8fSBharat Bhushan if (!kvm_enabled()) { 183731f2cb8fSBharat Bhushan return -1; 183831f2cb8fSBharat Bhushan } 183931f2cb8fSBharat Bhushan 184031f2cb8fSBharat Bhushan if (!cap_ppc_watchdog) { 184131f2cb8fSBharat Bhushan printf("warning: KVM does not support watchdog"); 184231f2cb8fSBharat Bhushan return -1; 184331f2cb8fSBharat Bhushan } 184431f2cb8fSBharat Bhushan 184548add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0); 184631f2cb8fSBharat Bhushan if (ret < 0) { 184731f2cb8fSBharat Bhushan fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n", 184831f2cb8fSBharat Bhushan __func__, strerror(-ret)); 184931f2cb8fSBharat Bhushan return ret; 185031f2cb8fSBharat Bhushan } 185131f2cb8fSBharat Bhushan 185231f2cb8fSBharat Bhushan return ret; 185331f2cb8fSBharat Bhushan } 185431f2cb8fSBharat Bhushan 1855dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len) 1856dc333cd6SAlexander Graf { 1857dc333cd6SAlexander Graf FILE *f; 1858dc333cd6SAlexander Graf int ret = -1; 1859dc333cd6SAlexander Graf int field_len = strlen(field); 1860dc333cd6SAlexander Graf char line[512]; 1861dc333cd6SAlexander Graf 1862dc333cd6SAlexander Graf f = fopen("/proc/cpuinfo", "r"); 1863dc333cd6SAlexander Graf if (!f) { 1864dc333cd6SAlexander Graf return -1; 1865dc333cd6SAlexander Graf } 1866dc333cd6SAlexander Graf 1867dc333cd6SAlexander Graf do { 1868dc333cd6SAlexander Graf if (!fgets(line, sizeof(line), f)) { 1869dc333cd6SAlexander Graf break; 1870dc333cd6SAlexander Graf } 1871dc333cd6SAlexander Graf if (!strncmp(line, field, field_len)) { 1872ae215068SJim Meyering pstrcpy(value, len, line); 1873dc333cd6SAlexander Graf ret = 0; 1874dc333cd6SAlexander Graf break; 1875dc333cd6SAlexander Graf } 1876dc333cd6SAlexander Graf } while(*line); 1877dc333cd6SAlexander Graf 1878dc333cd6SAlexander Graf fclose(f); 1879dc333cd6SAlexander Graf 1880dc333cd6SAlexander Graf return ret; 1881dc333cd6SAlexander Graf } 1882dc333cd6SAlexander Graf 1883dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void) 1884dc333cd6SAlexander Graf { 1885dc333cd6SAlexander Graf char line[512]; 1886dc333cd6SAlexander Graf char *ns; 188773bcb24dSRutuja Shah uint32_t retval = NANOSECONDS_PER_SECOND; 1888dc333cd6SAlexander Graf 1889dc333cd6SAlexander Graf if (read_cpuinfo("timebase", line, sizeof(line))) { 1890dc333cd6SAlexander Graf return retval; 1891dc333cd6SAlexander Graf } 1892dc333cd6SAlexander Graf 1893dc333cd6SAlexander Graf if (!(ns = strchr(line, ':'))) { 1894dc333cd6SAlexander Graf return retval; 1895dc333cd6SAlexander Graf } 1896dc333cd6SAlexander Graf 1897dc333cd6SAlexander Graf ns++; 1898dc333cd6SAlexander Graf 1899f9b8e7f6SShraddha Barke return atoi(ns); 1900ef951443SNikunj A Dadhania } 1901ef951443SNikunj A Dadhania 1902ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value) 1903ef951443SNikunj A Dadhania { 1904ef951443SNikunj A Dadhania return g_file_get_contents("/proc/device-tree/system-id", value, NULL, 1905ef951443SNikunj A Dadhania NULL); 1906ef951443SNikunj A Dadhania } 1907ef951443SNikunj A Dadhania 1908ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value) 1909ef951443SNikunj A Dadhania { 1910ef951443SNikunj A Dadhania return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL); 1911dc333cd6SAlexander Graf } 19124513d923SGleb Natapov 1913eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */ 1914eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len) 1915eadaada1SAlexander Graf { 1916eadaada1SAlexander Graf struct dirent *dirp; 1917eadaada1SAlexander Graf DIR *dp; 1918eadaada1SAlexander Graf 1919eadaada1SAlexander Graf if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) { 1920eadaada1SAlexander Graf printf("Can't open directory " PROC_DEVTREE_CPU "\n"); 1921eadaada1SAlexander Graf return -1; 1922eadaada1SAlexander Graf } 1923eadaada1SAlexander Graf 1924eadaada1SAlexander Graf buf[0] = '\0'; 1925eadaada1SAlexander Graf while ((dirp = readdir(dp)) != NULL) { 1926eadaada1SAlexander Graf FILE *f; 1927eadaada1SAlexander Graf snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU, 1928eadaada1SAlexander Graf dirp->d_name); 1929eadaada1SAlexander Graf f = fopen(buf, "r"); 1930eadaada1SAlexander Graf if (f) { 1931eadaada1SAlexander Graf snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name); 1932eadaada1SAlexander Graf fclose(f); 1933eadaada1SAlexander Graf break; 1934eadaada1SAlexander Graf } 1935eadaada1SAlexander Graf buf[0] = '\0'; 1936eadaada1SAlexander Graf } 1937eadaada1SAlexander Graf closedir(dp); 1938eadaada1SAlexander Graf if (buf[0] == '\0') { 1939eadaada1SAlexander Graf printf("Unknown host!\n"); 1940eadaada1SAlexander Graf return -1; 1941eadaada1SAlexander Graf } 1942eadaada1SAlexander Graf 1943eadaada1SAlexander Graf return 0; 1944eadaada1SAlexander Graf } 1945eadaada1SAlexander Graf 19467d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename) 1947eadaada1SAlexander Graf { 19489bc884b7SDavid Gibson union { 19499bc884b7SDavid Gibson uint32_t v32; 19509bc884b7SDavid Gibson uint64_t v64; 19519bc884b7SDavid Gibson } u; 1952eadaada1SAlexander Graf FILE *f; 1953eadaada1SAlexander Graf int len; 1954eadaada1SAlexander Graf 19557d94a30bSSukadev Bhattiprolu f = fopen(filename, "rb"); 1956eadaada1SAlexander Graf if (!f) { 1957eadaada1SAlexander Graf return -1; 1958eadaada1SAlexander Graf } 1959eadaada1SAlexander Graf 19609bc884b7SDavid Gibson len = fread(&u, 1, sizeof(u), f); 1961eadaada1SAlexander Graf fclose(f); 1962eadaada1SAlexander Graf switch (len) { 19639bc884b7SDavid Gibson case 4: 19649bc884b7SDavid Gibson /* property is a 32-bit quantity */ 19659bc884b7SDavid Gibson return be32_to_cpu(u.v32); 19669bc884b7SDavid Gibson case 8: 19679bc884b7SDavid Gibson return be64_to_cpu(u.v64); 1968eadaada1SAlexander Graf } 1969eadaada1SAlexander Graf 1970eadaada1SAlexander Graf return 0; 1971eadaada1SAlexander Graf } 1972eadaada1SAlexander Graf 19737d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single 19747d94a30bSSukadev Bhattiprolu * integer (32-bit or 64-bit). Returns 0 if anything goes wrong 19757d94a30bSSukadev Bhattiprolu * (can't find or open the property, or doesn't understand the 19767d94a30bSSukadev Bhattiprolu * format) */ 19777d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname) 19787d94a30bSSukadev Bhattiprolu { 19797d94a30bSSukadev Bhattiprolu char buf[PATH_MAX], *tmp; 19807d94a30bSSukadev Bhattiprolu uint64_t val; 19817d94a30bSSukadev Bhattiprolu 19827d94a30bSSukadev Bhattiprolu if (kvmppc_find_cpu_dt(buf, sizeof(buf))) { 19837d94a30bSSukadev Bhattiprolu return -1; 19847d94a30bSSukadev Bhattiprolu } 19857d94a30bSSukadev Bhattiprolu 19867d94a30bSSukadev Bhattiprolu tmp = g_strdup_printf("%s/%s", buf, propname); 19877d94a30bSSukadev Bhattiprolu val = kvmppc_read_int_dt(tmp); 19887d94a30bSSukadev Bhattiprolu g_free(tmp); 19897d94a30bSSukadev Bhattiprolu 19907d94a30bSSukadev Bhattiprolu return val; 19917d94a30bSSukadev Bhattiprolu } 19927d94a30bSSukadev Bhattiprolu 19939bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void) 19949bc884b7SDavid Gibson { 19959bc884b7SDavid Gibson return kvmppc_read_int_cpu_dt("clock-frequency"); 19969bc884b7SDavid Gibson } 19979bc884b7SDavid Gibson 19986659394fSDavid Gibson uint32_t kvmppc_get_vmx(void) 19996659394fSDavid Gibson { 20006659394fSDavid Gibson return kvmppc_read_int_cpu_dt("ibm,vmx"); 20016659394fSDavid Gibson } 20026659394fSDavid Gibson 20036659394fSDavid Gibson uint32_t kvmppc_get_dfp(void) 20046659394fSDavid Gibson { 20056659394fSDavid Gibson return kvmppc_read_int_cpu_dt("ibm,dfp"); 20066659394fSDavid Gibson } 20076659394fSDavid Gibson 20081a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo) 200945024f09SAlexander Graf { 2010a60f24b5SAndreas Färber PowerPCCPU *cpu = ppc_env_get_cpu(env); 2011a60f24b5SAndreas Färber CPUState *cs = CPU(cpu); 201245024f09SAlexander Graf 20136fd33a75SAlexander Graf if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && 20141a61a9aeSStuart Yoder !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) { 20151a61a9aeSStuart Yoder return 0; 20161a61a9aeSStuart Yoder } 201745024f09SAlexander Graf 20181a61a9aeSStuart Yoder return 1; 20191a61a9aeSStuart Yoder } 20201a61a9aeSStuart Yoder 20211a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env) 20221a61a9aeSStuart Yoder { 20231a61a9aeSStuart Yoder struct kvm_ppc_pvinfo pvinfo; 20241a61a9aeSStuart Yoder 20251a61a9aeSStuart Yoder if (!kvmppc_get_pvinfo(env, &pvinfo) && 20261a61a9aeSStuart Yoder (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) { 20271a61a9aeSStuart Yoder return 1; 20281a61a9aeSStuart Yoder } 20291a61a9aeSStuart Yoder 20301a61a9aeSStuart Yoder return 0; 20311a61a9aeSStuart Yoder } 20321a61a9aeSStuart Yoder 20331a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) 20341a61a9aeSStuart Yoder { 20351a61a9aeSStuart Yoder uint32_t *hc = (uint32_t*)buf; 20361a61a9aeSStuart Yoder struct kvm_ppc_pvinfo pvinfo; 20371a61a9aeSStuart Yoder 20381a61a9aeSStuart Yoder if (!kvmppc_get_pvinfo(env, &pvinfo)) { 20391a61a9aeSStuart Yoder memcpy(buf, pvinfo.hcall, buf_len); 204045024f09SAlexander Graf return 0; 204145024f09SAlexander Graf } 204245024f09SAlexander Graf 204345024f09SAlexander Graf /* 2044d13fc32eSAlexander Graf * Fallback to always fail hypercalls regardless of endianness: 204545024f09SAlexander Graf * 2046d13fc32eSAlexander Graf * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian) 204745024f09SAlexander Graf * li r3, -1 2048d13fc32eSAlexander Graf * b .+8 (becomes nop in wrong endian) 2049d13fc32eSAlexander Graf * bswap32(li r3, -1) 205045024f09SAlexander Graf */ 205145024f09SAlexander Graf 2052d13fc32eSAlexander Graf hc[0] = cpu_to_be32(0x08000048); 2053d13fc32eSAlexander Graf hc[1] = cpu_to_be32(0x3860ffff); 2054d13fc32eSAlexander Graf hc[2] = cpu_to_be32(0x48000008); 2055d13fc32eSAlexander Graf hc[3] = cpu_to_be32(bswap32(0x3860ffff)); 205645024f09SAlexander Graf 20570ddbd053SAlexey Kardashevskiy return 1; 205845024f09SAlexander Graf } 205945024f09SAlexander Graf 2060026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall) 2061026bfd89SDavid Gibson { 2062026bfd89SDavid Gibson return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1); 2063026bfd89SDavid Gibson } 2064026bfd89SDavid Gibson 2065026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void) 2066026bfd89SDavid Gibson { 2067026bfd89SDavid Gibson /* 2068026bfd89SDavid Gibson * FIXME: it would be nice if we could detect the cases where 2069026bfd89SDavid Gibson * we're using a device which requires the in kernel 2070026bfd89SDavid Gibson * implementation of these hcalls, but the kernel lacks them and 2071026bfd89SDavid Gibson * produce a warning. 2072026bfd89SDavid Gibson */ 2073026bfd89SDavid Gibson kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD); 2074026bfd89SDavid Gibson kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE); 2075026bfd89SDavid Gibson } 2076026bfd89SDavid Gibson 2077ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void) 2078ef9971ddSAlexey Kardashevskiy { 2079ef9971ddSAlexey Kardashevskiy kvmppc_enable_hcall(kvm_state, H_SET_MODE); 2080ef9971ddSAlexey Kardashevskiy } 2081ef9971ddSAlexey Kardashevskiy 20825145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void) 20835145ad4fSNathan Whitehorn { 20845145ad4fSNathan Whitehorn kvmppc_enable_hcall(kvm_state, H_CLEAR_REF); 20855145ad4fSNathan Whitehorn kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD); 20865145ad4fSNathan Whitehorn } 20875145ad4fSNathan Whitehorn 20881bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu) 2089f61b4bedSAlexander Graf { 20901bc22652SAndreas Färber CPUState *cs = CPU(cpu); 2091f61b4bedSAlexander Graf int ret; 2092f61b4bedSAlexander Graf 209348add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0); 2094f61b4bedSAlexander Graf if (ret) { 2095072ed5f2SThomas Huth error_report("This vCPU type or KVM version does not support PAPR"); 2096072ed5f2SThomas Huth exit(1); 2097f61b4bedSAlexander Graf } 20989b00ea49SDavid Gibson 20999b00ea49SDavid Gibson /* Update the capability flag so we sync the right information 21009b00ea49SDavid Gibson * with kvm */ 21019b00ea49SDavid Gibson cap_papr = 1; 2102f1af19d7SDavid Gibson } 2103f61b4bedSAlexander Graf 2104d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) 21056db5bb0fSAlexey Kardashevskiy { 2106d6e166c0SDavid Gibson return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr); 21076db5bb0fSAlexey Kardashevskiy } 21086db5bb0fSAlexey Kardashevskiy 21095b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) 21105b95b8b9SAlexander Graf { 21115b95b8b9SAlexander Graf CPUState *cs = CPU(cpu); 21125b95b8b9SAlexander Graf int ret; 21135b95b8b9SAlexander Graf 211448add816SCornelia Huck ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy); 21155b95b8b9SAlexander Graf if (ret && mpic_proxy) { 2116072ed5f2SThomas Huth error_report("This KVM version does not support EPR"); 2117072ed5f2SThomas Huth exit(1); 21185b95b8b9SAlexander Graf } 21195b95b8b9SAlexander Graf } 21205b95b8b9SAlexander Graf 2121e97c3636SDavid Gibson int kvmppc_smt_threads(void) 2122e97c3636SDavid Gibson { 2123e97c3636SDavid Gibson return cap_ppc_smt ? cap_ppc_smt : 1; 2124e97c3636SDavid Gibson } 2125e97c3636SDavid Gibson 21267f763a5dSDavid Gibson #ifdef TARGET_PPC64 2127658fa66bSAlexey Kardashevskiy off_t kvmppc_alloc_rma(void **rma) 2128354ac20aSDavid Gibson { 2129354ac20aSDavid Gibson off_t size; 2130354ac20aSDavid Gibson int fd; 2131354ac20aSDavid Gibson struct kvm_allocate_rma ret; 2132354ac20aSDavid Gibson 2133354ac20aSDavid Gibson /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported 2134354ac20aSDavid Gibson * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but 2135354ac20aSDavid Gibson * not necessary on this hardware 2136354ac20aSDavid Gibson * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware 2137354ac20aSDavid Gibson * 2138354ac20aSDavid Gibson * FIXME: We should allow the user to force contiguous RMA 2139354ac20aSDavid Gibson * allocation in the cap_ppc_rma==1 case. 2140354ac20aSDavid Gibson */ 2141354ac20aSDavid Gibson if (cap_ppc_rma < 2) { 2142354ac20aSDavid Gibson return 0; 2143354ac20aSDavid Gibson } 2144354ac20aSDavid Gibson 2145354ac20aSDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret); 2146354ac20aSDavid Gibson if (fd < 0) { 2147354ac20aSDavid Gibson fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n", 2148354ac20aSDavid Gibson strerror(errno)); 2149354ac20aSDavid Gibson return -1; 2150354ac20aSDavid Gibson } 2151354ac20aSDavid Gibson 2152354ac20aSDavid Gibson size = MIN(ret.rma_size, 256ul << 20); 2153354ac20aSDavid Gibson 2154658fa66bSAlexey Kardashevskiy *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 2155658fa66bSAlexey Kardashevskiy if (*rma == MAP_FAILED) { 2156354ac20aSDavid Gibson fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno)); 2157354ac20aSDavid Gibson return -1; 2158354ac20aSDavid Gibson }; 2159354ac20aSDavid Gibson 2160354ac20aSDavid Gibson return size; 2161354ac20aSDavid Gibson } 2162354ac20aSDavid Gibson 21637f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) 21647f763a5dSDavid Gibson { 2165f36951c1SDavid Gibson struct kvm_ppc_smmu_info info; 2166f36951c1SDavid Gibson long rampagesize, best_page_shift; 2167f36951c1SDavid Gibson int i; 2168f36951c1SDavid Gibson 21697f763a5dSDavid Gibson if (cap_ppc_rma >= 2) { 21707f763a5dSDavid Gibson return current_size; 21717f763a5dSDavid Gibson } 2172f36951c1SDavid Gibson 2173f36951c1SDavid Gibson /* Find the largest hardware supported page size that's less than 2174f36951c1SDavid Gibson * or equal to the (logical) backing page size of guest RAM */ 2175182735efSAndreas Färber kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info); 21769c607668SAlexey Kardashevskiy rampagesize = qemu_getrampagesize(); 2177f36951c1SDavid Gibson best_page_shift = 0; 2178f36951c1SDavid Gibson 2179f36951c1SDavid Gibson for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { 2180f36951c1SDavid Gibson struct kvm_ppc_one_seg_page_size *sps = &info.sps[i]; 2181f36951c1SDavid Gibson 2182f36951c1SDavid Gibson if (!sps->page_shift) { 2183f36951c1SDavid Gibson continue; 2184f36951c1SDavid Gibson } 2185f36951c1SDavid Gibson 2186f36951c1SDavid Gibson if ((sps->page_shift > best_page_shift) 2187f36951c1SDavid Gibson && ((1UL << sps->page_shift) <= rampagesize)) { 2188f36951c1SDavid Gibson best_page_shift = sps->page_shift; 2189f36951c1SDavid Gibson } 2190f36951c1SDavid Gibson } 2191f36951c1SDavid Gibson 21927f763a5dSDavid Gibson return MIN(current_size, 2193f36951c1SDavid Gibson 1ULL << (best_page_shift + hash_shift - 7)); 21947f763a5dSDavid Gibson } 21957f763a5dSDavid Gibson #endif 21967f763a5dSDavid Gibson 2197da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void) 2198da95324eSAlexey Kardashevskiy { 2199da95324eSAlexey Kardashevskiy return cap_spapr_multitce; 2200da95324eSAlexey Kardashevskiy } 2201da95324eSAlexey Kardashevskiy 22023dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void) 22033dc410aeSAlexey Kardashevskiy { 22043dc410aeSAlexey Kardashevskiy int ret; 22053dc410aeSAlexey Kardashevskiy 22063dc410aeSAlexey Kardashevskiy ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0, 22073dc410aeSAlexey Kardashevskiy H_PUT_TCE_INDIRECT, 1); 22083dc410aeSAlexey Kardashevskiy if (!ret) { 22093dc410aeSAlexey Kardashevskiy ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0, 22103dc410aeSAlexey Kardashevskiy H_STUFF_TCE, 1); 22113dc410aeSAlexey Kardashevskiy } 22123dc410aeSAlexey Kardashevskiy 22133dc410aeSAlexey Kardashevskiy return ret; 22143dc410aeSAlexey Kardashevskiy } 22153dc410aeSAlexey Kardashevskiy 2216d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 2217d6ee2a7cSAlexey Kardashevskiy uint64_t bus_offset, uint32_t nb_table, 2218d6ee2a7cSAlexey Kardashevskiy int *pfd, bool need_vfio) 22190f5cb298SDavid Gibson { 22200f5cb298SDavid Gibson long len; 22210f5cb298SDavid Gibson int fd; 22220f5cb298SDavid Gibson void *table; 22230f5cb298SDavid Gibson 2224b5aec396SDavid Gibson /* Must set fd to -1 so we don't try to munmap when called for 2225b5aec396SDavid Gibson * destroying the table, which the upper layers -will- do 2226b5aec396SDavid Gibson */ 2227b5aec396SDavid Gibson *pfd = -1; 22286a81dd17SDavid Gibson if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) { 22290f5cb298SDavid Gibson return NULL; 22300f5cb298SDavid Gibson } 22310f5cb298SDavid Gibson 2232d6ee2a7cSAlexey Kardashevskiy if (cap_spapr_tce_64) { 2233d6ee2a7cSAlexey Kardashevskiy struct kvm_create_spapr_tce_64 args = { 2234d6ee2a7cSAlexey Kardashevskiy .liobn = liobn, 2235d6ee2a7cSAlexey Kardashevskiy .page_shift = page_shift, 2236d6ee2a7cSAlexey Kardashevskiy .offset = bus_offset >> page_shift, 2237d6ee2a7cSAlexey Kardashevskiy .size = nb_table, 2238d6ee2a7cSAlexey Kardashevskiy .flags = 0 2239d6ee2a7cSAlexey Kardashevskiy }; 2240d6ee2a7cSAlexey Kardashevskiy fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args); 2241d6ee2a7cSAlexey Kardashevskiy if (fd < 0) { 2242d6ee2a7cSAlexey Kardashevskiy fprintf(stderr, 2243d6ee2a7cSAlexey Kardashevskiy "KVM: Failed to create TCE64 table for liobn 0x%x\n", 2244d6ee2a7cSAlexey Kardashevskiy liobn); 2245d6ee2a7cSAlexey Kardashevskiy return NULL; 2246d6ee2a7cSAlexey Kardashevskiy } 2247d6ee2a7cSAlexey Kardashevskiy } else if (cap_spapr_tce) { 2248d6ee2a7cSAlexey Kardashevskiy uint64_t window_size = (uint64_t) nb_table << page_shift; 2249d6ee2a7cSAlexey Kardashevskiy struct kvm_create_spapr_tce args = { 2250d6ee2a7cSAlexey Kardashevskiy .liobn = liobn, 2251d6ee2a7cSAlexey Kardashevskiy .window_size = window_size, 2252d6ee2a7cSAlexey Kardashevskiy }; 2253d6ee2a7cSAlexey Kardashevskiy if ((window_size != args.window_size) || bus_offset) { 2254d6ee2a7cSAlexey Kardashevskiy return NULL; 2255d6ee2a7cSAlexey Kardashevskiy } 22560f5cb298SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args); 22570f5cb298SDavid Gibson if (fd < 0) { 2258b5aec396SDavid Gibson fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n", 2259b5aec396SDavid Gibson liobn); 22600f5cb298SDavid Gibson return NULL; 22610f5cb298SDavid Gibson } 2262d6ee2a7cSAlexey Kardashevskiy } else { 2263d6ee2a7cSAlexey Kardashevskiy return NULL; 2264d6ee2a7cSAlexey Kardashevskiy } 22650f5cb298SDavid Gibson 2266d6ee2a7cSAlexey Kardashevskiy len = nb_table * sizeof(uint64_t); 22670f5cb298SDavid Gibson /* FIXME: round this up to page size */ 22680f5cb298SDavid Gibson 226974b41e56SDavid Gibson table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 22700f5cb298SDavid Gibson if (table == MAP_FAILED) { 2271b5aec396SDavid Gibson fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n", 2272b5aec396SDavid Gibson liobn); 22730f5cb298SDavid Gibson close(fd); 22740f5cb298SDavid Gibson return NULL; 22750f5cb298SDavid Gibson } 22760f5cb298SDavid Gibson 22770f5cb298SDavid Gibson *pfd = fd; 22780f5cb298SDavid Gibson return table; 22790f5cb298SDavid Gibson } 22800f5cb298SDavid Gibson 2281523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table) 22820f5cb298SDavid Gibson { 22830f5cb298SDavid Gibson long len; 22840f5cb298SDavid Gibson 22850f5cb298SDavid Gibson if (fd < 0) { 22860f5cb298SDavid Gibson return -1; 22870f5cb298SDavid Gibson } 22880f5cb298SDavid Gibson 2289523e7b8aSAlexey Kardashevskiy len = nb_table * sizeof(uint64_t); 22900f5cb298SDavid Gibson if ((munmap(table, len) < 0) || 22910f5cb298SDavid Gibson (close(fd) < 0)) { 2292b5aec396SDavid Gibson fprintf(stderr, "KVM: Unexpected error removing TCE table: %s", 2293b5aec396SDavid Gibson strerror(errno)); 22940f5cb298SDavid Gibson /* Leak the table */ 22950f5cb298SDavid Gibson } 22960f5cb298SDavid Gibson 22970f5cb298SDavid Gibson return 0; 22980f5cb298SDavid Gibson } 22990f5cb298SDavid Gibson 23007f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint) 23017f763a5dSDavid Gibson { 23027f763a5dSDavid Gibson uint32_t shift = shift_hint; 23037f763a5dSDavid Gibson 2304ace9a2cbSDavid Gibson if (!kvm_enabled()) { 2305ace9a2cbSDavid Gibson /* Full emulation, tell caller to allocate htab itself */ 2306ace9a2cbSDavid Gibson return 0; 2307ace9a2cbSDavid Gibson } 2308ace9a2cbSDavid Gibson if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) { 23097f763a5dSDavid Gibson int ret; 23107f763a5dSDavid Gibson ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift); 2311ace9a2cbSDavid Gibson if (ret == -ENOTTY) { 2312ace9a2cbSDavid Gibson /* At least some versions of PR KVM advertise the 2313ace9a2cbSDavid Gibson * capability, but don't implement the ioctl(). Oops. 2314ace9a2cbSDavid Gibson * Return 0 so that we allocate the htab in qemu, as is 2315ace9a2cbSDavid Gibson * correct for PR. */ 2316ace9a2cbSDavid Gibson return 0; 2317ace9a2cbSDavid Gibson } else if (ret < 0) { 23187f763a5dSDavid Gibson return ret; 23197f763a5dSDavid Gibson } 23207f763a5dSDavid Gibson return shift; 23217f763a5dSDavid Gibson } 23227f763a5dSDavid Gibson 2323ace9a2cbSDavid Gibson /* We have a kernel that predates the htab reset calls. For PR 2324ace9a2cbSDavid Gibson * KVM, we need to allocate the htab ourselves, for an HV KVM of 232596c9cff0SThomas Huth * this era, it has allocated a 16MB fixed size hash table already. */ 232696c9cff0SThomas Huth if (kvmppc_is_pr(kvm_state)) { 2327ace9a2cbSDavid Gibson /* PR - tell caller to allocate htab */ 23287f763a5dSDavid Gibson return 0; 2329ace9a2cbSDavid Gibson } else { 2330ace9a2cbSDavid Gibson /* HV - assume 16MB kernel allocated htab */ 2331ace9a2cbSDavid Gibson return 24; 2332ace9a2cbSDavid Gibson } 23337f763a5dSDavid Gibson } 23347f763a5dSDavid Gibson 2335a1e98583SDavid Gibson static inline uint32_t mfpvr(void) 2336a1e98583SDavid Gibson { 2337a1e98583SDavid Gibson uint32_t pvr; 2338a1e98583SDavid Gibson 2339a1e98583SDavid Gibson asm ("mfpvr %0" 2340a1e98583SDavid Gibson : "=r"(pvr)); 2341a1e98583SDavid Gibson return pvr; 2342a1e98583SDavid Gibson } 2343a1e98583SDavid Gibson 2344a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on) 2345a7342588SDavid Gibson { 2346a7342588SDavid Gibson if (on) { 2347a7342588SDavid Gibson *word |= flags; 2348a7342588SDavid Gibson } else { 2349a7342588SDavid Gibson *word &= ~flags; 2350a7342588SDavid Gibson } 2351a7342588SDavid Gibson } 2352a7342588SDavid Gibson 23532985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) 23542985b86bSAndreas Färber { 23552985b86bSAndreas Färber PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); 2356a7342588SDavid Gibson uint32_t vmx = kvmppc_get_vmx(); 2357a7342588SDavid Gibson uint32_t dfp = kvmppc_get_dfp(); 23580cbad81fSDavid Gibson uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size"); 23590cbad81fSDavid Gibson uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size"); 2360a1e98583SDavid Gibson 2361cfe34f44SAndreas Färber /* Now fix up the class with information we can query from the host */ 23623bc9ccc0SAlexey Kardashevskiy pcc->pvr = mfpvr(); 2363a7342588SDavid Gibson 236470bca53fSAlexander Graf if (vmx != -1) { 236570bca53fSAlexander Graf /* Only override when we know what the host supports */ 2366cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0); 2367cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1); 236870bca53fSAlexander Graf } 236970bca53fSAlexander Graf if (dfp != -1) { 237070bca53fSAlexander Graf /* Only override when we know what the host supports */ 2371cfe34f44SAndreas Färber alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp); 237270bca53fSAlexander Graf } 23730cbad81fSDavid Gibson 23740cbad81fSDavid Gibson if (dcache_size != -1) { 23750cbad81fSDavid Gibson pcc->l1_dcache_size = dcache_size; 23760cbad81fSDavid Gibson } 23770cbad81fSDavid Gibson 23780cbad81fSDavid Gibson if (icache_size != -1) { 23790cbad81fSDavid Gibson pcc->l1_icache_size = icache_size; 23800cbad81fSDavid Gibson } 2381c64abd1fSSam Bobroff 2382c64abd1fSSam Bobroff #if defined(TARGET_PPC64) 2383c64abd1fSSam Bobroff pcc->radix_page_info = kvm_get_radix_page_info(); 23845f3066d8SDavid Gibson 23855f3066d8SDavid Gibson if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) { 23865f3066d8SDavid Gibson /* 23875f3066d8SDavid Gibson * POWER9 DD1 has some bugs which make it not really ISA 3.00 23885f3066d8SDavid Gibson * compliant. More importantly, advertising ISA 3.00 23895f3066d8SDavid Gibson * architected mode may prevent guests from activating 23905f3066d8SDavid Gibson * necessary DD1 workarounds. 23915f3066d8SDavid Gibson */ 23925f3066d8SDavid Gibson pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07 23935f3066d8SDavid Gibson | PCR_COMPAT_2_06 | PCR_COMPAT_2_05); 23945f3066d8SDavid Gibson } 2395c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */ 2396a1e98583SDavid Gibson } 2397a1e98583SDavid Gibson 23983b961124SStuart Yoder bool kvmppc_has_cap_epr(void) 23993b961124SStuart Yoder { 24003b961124SStuart Yoder return cap_epr; 24013b961124SStuart Yoder } 24023b961124SStuart Yoder 24037c43bca0SAneesh Kumar K.V bool kvmppc_has_cap_htab_fd(void) 24047c43bca0SAneesh Kumar K.V { 24057c43bca0SAneesh Kumar K.V return cap_htab_fd; 24067c43bca0SAneesh Kumar K.V } 24077c43bca0SAneesh Kumar K.V 240887a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void) 240987a91de6SAlexander Graf { 241087a91de6SAlexander Graf return cap_fixup_hcalls; 241187a91de6SAlexander Graf } 241287a91de6SAlexander Graf 2413bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void) 2414bac3bf28SThomas Huth { 2415bac3bf28SThomas Huth return cap_htm; 2416bac3bf28SThomas Huth } 2417bac3bf28SThomas Huth 2418cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void) 2419cf1c4cceSSam Bobroff { 2420cf1c4cceSSam Bobroff return cap_mmu_radix; 2421cf1c4cceSSam Bobroff } 2422cf1c4cceSSam Bobroff 2423cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void) 2424cf1c4cceSSam Bobroff { 2425cf1c4cceSSam Bobroff return cap_mmu_hash_v3; 2426cf1c4cceSSam Bobroff } 2427cf1c4cceSSam Bobroff 242852b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) 242952b2519cSThomas Huth { 243052b2519cSThomas Huth uint32_t host_pvr = mfpvr(); 243152b2519cSThomas Huth PowerPCCPUClass *pvr_pcc; 243252b2519cSThomas Huth 243352b2519cSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr(host_pvr); 243452b2519cSThomas Huth if (pvr_pcc == NULL) { 243552b2519cSThomas Huth pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr); 243652b2519cSThomas Huth } 243752b2519cSThomas Huth 243852b2519cSThomas Huth return pvr_pcc; 243952b2519cSThomas Huth } 244052b2519cSThomas Huth 24415ba4576bSAndreas Färber static int kvm_ppc_register_host_cpu_type(void) 24425ba4576bSAndreas Färber { 24435ba4576bSAndreas Färber TypeInfo type_info = { 24445ba4576bSAndreas Färber .name = TYPE_HOST_POWERPC_CPU, 24455ba4576bSAndreas Färber .class_init = kvmppc_host_cpu_class_init, 24465ba4576bSAndreas Färber }; 24475ba4576bSAndreas Färber PowerPCCPUClass *pvr_pcc; 24485b79b1caSAlexey Kardashevskiy DeviceClass *dc; 2449715d4b96SThomas Huth int i; 24505ba4576bSAndreas Färber 245152b2519cSThomas Huth pvr_pcc = kvm_ppc_get_host_cpu_class(); 24523bc9ccc0SAlexey Kardashevskiy if (pvr_pcc == NULL) { 24535ba4576bSAndreas Färber return -1; 24545ba4576bSAndreas Färber } 24555ba4576bSAndreas Färber type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc)); 24565ba4576bSAndreas Färber type_register(&type_info); 24575b79b1caSAlexey Kardashevskiy 24583b542549SBharata B Rao #if defined(TARGET_PPC64) 24593b542549SBharata B Rao type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host"); 24603b542549SBharata B Rao type_info.parent = TYPE_SPAPR_CPU_CORE, 24617ebaf795SBharata B Rao type_info.instance_size = sizeof(sPAPRCPUCore); 24627ebaf795SBharata B Rao type_info.instance_init = NULL; 24637ebaf795SBharata B Rao type_info.class_init = spapr_cpu_core_class_init; 24647ebaf795SBharata B Rao type_info.class_data = (void *) "host"; 24653b542549SBharata B Rao type_register(&type_info); 24663b542549SBharata B Rao g_free((void *)type_info.name); 24673b542549SBharata B Rao #endif 24683b542549SBharata B Rao 2469715d4b96SThomas Huth /* 2470715d4b96SThomas Huth * Update generic CPU family class alias (e.g. on a POWER8NVL host, 2471715d4b96SThomas Huth * we want "POWER8" to be a "family" alias that points to the current 2472715d4b96SThomas Huth * host CPU type, too) 2473715d4b96SThomas Huth */ 2474715d4b96SThomas Huth dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc)); 2475715d4b96SThomas Huth for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { 2476715d4b96SThomas Huth if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) { 2477715d4b96SThomas Huth ObjectClass *oc = OBJECT_CLASS(pvr_pcc); 2478715d4b96SThomas Huth char *suffix; 2479715d4b96SThomas Huth 2480715d4b96SThomas Huth ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc)); 2481715d4b96SThomas Huth suffix = strstr(ppc_cpu_aliases[i].model, "-"TYPE_POWERPC_CPU); 2482715d4b96SThomas Huth if (suffix) { 2483715d4b96SThomas Huth *suffix = 0; 2484715d4b96SThomas Huth } 2485715d4b96SThomas Huth ppc_cpu_aliases[i].oc = oc; 2486715d4b96SThomas Huth break; 2487715d4b96SThomas Huth } 2488715d4b96SThomas Huth } 2489715d4b96SThomas Huth 24905ba4576bSAndreas Färber return 0; 24915ba4576bSAndreas Färber } 24925ba4576bSAndreas Färber 2493feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function) 2494feaa64c4SDavid Gibson { 2495feaa64c4SDavid Gibson struct kvm_rtas_token_args args = { 2496feaa64c4SDavid Gibson .token = token, 2497feaa64c4SDavid Gibson }; 2498feaa64c4SDavid Gibson 2499feaa64c4SDavid Gibson if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) { 2500feaa64c4SDavid Gibson return -ENOENT; 2501feaa64c4SDavid Gibson } 2502feaa64c4SDavid Gibson 2503feaa64c4SDavid Gibson strncpy(args.name, function, sizeof(args.name)); 2504feaa64c4SDavid Gibson 2505feaa64c4SDavid Gibson return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args); 2506feaa64c4SDavid Gibson } 250712b1143bSDavid Gibson 2508e68cb8b4SAlexey Kardashevskiy int kvmppc_get_htab_fd(bool write) 2509e68cb8b4SAlexey Kardashevskiy { 2510e68cb8b4SAlexey Kardashevskiy struct kvm_get_htab_fd s = { 2511e68cb8b4SAlexey Kardashevskiy .flags = write ? KVM_GET_HTAB_WRITE : 0, 2512e68cb8b4SAlexey Kardashevskiy .start_index = 0, 2513e68cb8b4SAlexey Kardashevskiy }; 2514e68cb8b4SAlexey Kardashevskiy 2515e68cb8b4SAlexey Kardashevskiy if (!cap_htab_fd) { 2516e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "KVM version doesn't support saving the hash table\n"); 2517e68cb8b4SAlexey Kardashevskiy return -1; 2518e68cb8b4SAlexey Kardashevskiy } 2519e68cb8b4SAlexey Kardashevskiy 2520e68cb8b4SAlexey Kardashevskiy return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s); 2521e68cb8b4SAlexey Kardashevskiy } 2522e68cb8b4SAlexey Kardashevskiy 2523e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns) 2524e68cb8b4SAlexey Kardashevskiy { 2525bc72ad67SAlex Bligh int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 2526e68cb8b4SAlexey Kardashevskiy uint8_t buf[bufsize]; 2527e68cb8b4SAlexey Kardashevskiy ssize_t rc; 2528e68cb8b4SAlexey Kardashevskiy 2529e68cb8b4SAlexey Kardashevskiy do { 2530e68cb8b4SAlexey Kardashevskiy rc = read(fd, buf, bufsize); 2531e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2532e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n", 2533e68cb8b4SAlexey Kardashevskiy strerror(errno)); 2534e68cb8b4SAlexey Kardashevskiy return rc; 2535e68cb8b4SAlexey Kardashevskiy } else if (rc) { 2536e094c4c1SCédric Le Goater uint8_t *buffer = buf; 2537e094c4c1SCédric Le Goater ssize_t n = rc; 2538e094c4c1SCédric Le Goater while (n) { 2539e094c4c1SCédric Le Goater struct kvm_get_htab_header *head = 2540e094c4c1SCédric Le Goater (struct kvm_get_htab_header *) buffer; 2541e094c4c1SCédric Le Goater size_t chunksize = sizeof(*head) + 2542e094c4c1SCédric Le Goater HASH_PTE_SIZE_64 * head->n_valid; 2543e094c4c1SCédric Le Goater 2544e094c4c1SCédric Le Goater qemu_put_be32(f, head->index); 2545e094c4c1SCédric Le Goater qemu_put_be16(f, head->n_valid); 2546e094c4c1SCédric Le Goater qemu_put_be16(f, head->n_invalid); 2547e094c4c1SCédric Le Goater qemu_put_buffer(f, (void *)(head + 1), 2548e094c4c1SCédric Le Goater HASH_PTE_SIZE_64 * head->n_valid); 2549e094c4c1SCédric Le Goater 2550e094c4c1SCédric Le Goater buffer += chunksize; 2551e094c4c1SCédric Le Goater n -= chunksize; 2552e094c4c1SCédric Le Goater } 2553e68cb8b4SAlexey Kardashevskiy } 2554e68cb8b4SAlexey Kardashevskiy } while ((rc != 0) 2555e68cb8b4SAlexey Kardashevskiy && ((max_ns < 0) 2556bc72ad67SAlex Bligh || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns))); 2557e68cb8b4SAlexey Kardashevskiy 2558e68cb8b4SAlexey Kardashevskiy return (rc == 0) ? 1 : 0; 2559e68cb8b4SAlexey Kardashevskiy } 2560e68cb8b4SAlexey Kardashevskiy 2561e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 2562e68cb8b4SAlexey Kardashevskiy uint16_t n_valid, uint16_t n_invalid) 2563e68cb8b4SAlexey Kardashevskiy { 2564e68cb8b4SAlexey Kardashevskiy struct kvm_get_htab_header *buf; 2565e68cb8b4SAlexey Kardashevskiy size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64; 2566e68cb8b4SAlexey Kardashevskiy ssize_t rc; 2567e68cb8b4SAlexey Kardashevskiy 2568e68cb8b4SAlexey Kardashevskiy buf = alloca(chunksize); 2569e68cb8b4SAlexey Kardashevskiy buf->index = index; 2570e68cb8b4SAlexey Kardashevskiy buf->n_valid = n_valid; 2571e68cb8b4SAlexey Kardashevskiy buf->n_invalid = n_invalid; 2572e68cb8b4SAlexey Kardashevskiy 2573e68cb8b4SAlexey Kardashevskiy qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid); 2574e68cb8b4SAlexey Kardashevskiy 2575e68cb8b4SAlexey Kardashevskiy rc = write(fd, buf, chunksize); 2576e68cb8b4SAlexey Kardashevskiy if (rc < 0) { 2577e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Error writing KVM hash table: %s\n", 2578e68cb8b4SAlexey Kardashevskiy strerror(errno)); 2579e68cb8b4SAlexey Kardashevskiy return rc; 2580e68cb8b4SAlexey Kardashevskiy } 2581e68cb8b4SAlexey Kardashevskiy if (rc != chunksize) { 2582e68cb8b4SAlexey Kardashevskiy /* We should never get a short write on a single chunk */ 2583e68cb8b4SAlexey Kardashevskiy fprintf(stderr, "Short write, restoring KVM hash table\n"); 2584e68cb8b4SAlexey Kardashevskiy return -1; 2585e68cb8b4SAlexey Kardashevskiy } 2586e68cb8b4SAlexey Kardashevskiy return 0; 2587e68cb8b4SAlexey Kardashevskiy } 2588e68cb8b4SAlexey Kardashevskiy 258920d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 25904513d923SGleb Natapov { 25914513d923SGleb Natapov return true; 25924513d923SGleb Natapov } 2593a1b87fe0SJan Kiszka 259482169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s) 259582169660SScott Wood { 259682169660SScott Wood } 2597c65f9a07SGreg Kurz 25981ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n) 25991ad9f0a4SDavid Gibson { 26001ad9f0a4SDavid Gibson struct kvm_get_htab_fd ghf = { 26011ad9f0a4SDavid Gibson .flags = 0, 26021ad9f0a4SDavid Gibson .start_index = ptex, 26037c43bca0SAneesh Kumar K.V }; 26041ad9f0a4SDavid Gibson int fd, rc; 26051ad9f0a4SDavid Gibson int i; 26067c43bca0SAneesh Kumar K.V 26071ad9f0a4SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf); 26081ad9f0a4SDavid Gibson if (fd < 0) { 26091ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unable to open HPT fd"); 26101ad9f0a4SDavid Gibson } 26111ad9f0a4SDavid Gibson 26121ad9f0a4SDavid Gibson i = 0; 26131ad9f0a4SDavid Gibson while (i < n) { 26141ad9f0a4SDavid Gibson struct kvm_get_htab_header *hdr; 26151ad9f0a4SDavid Gibson int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP; 26161ad9f0a4SDavid Gibson char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64]; 26171ad9f0a4SDavid Gibson 26181ad9f0a4SDavid Gibson rc = read(fd, buf, sizeof(buf)); 26191ad9f0a4SDavid Gibson if (rc < 0) { 26201ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unable to read HPTEs"); 26211ad9f0a4SDavid Gibson } 26221ad9f0a4SDavid Gibson 26231ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *)buf; 26241ad9f0a4SDavid Gibson while ((i < n) && ((char *)hdr < (buf + rc))) { 26251ad9f0a4SDavid Gibson int invalid = hdr->n_invalid; 26261ad9f0a4SDavid Gibson 26271ad9f0a4SDavid Gibson if (hdr->index != (ptex + i)) { 26281ad9f0a4SDavid Gibson hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32 26291ad9f0a4SDavid Gibson " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i); 26301ad9f0a4SDavid Gibson } 26311ad9f0a4SDavid Gibson 26321ad9f0a4SDavid Gibson memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid); 26331ad9f0a4SDavid Gibson i += hdr->n_valid; 26341ad9f0a4SDavid Gibson 26351ad9f0a4SDavid Gibson if ((n - i) < invalid) { 26361ad9f0a4SDavid Gibson invalid = n - i; 26371ad9f0a4SDavid Gibson } 26381ad9f0a4SDavid Gibson memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64); 26391ad9f0a4SDavid Gibson i += hdr->n_invalid; 26401ad9f0a4SDavid Gibson 26411ad9f0a4SDavid Gibson hdr = (struct kvm_get_htab_header *) 26421ad9f0a4SDavid Gibson ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid); 26431ad9f0a4SDavid Gibson } 26441ad9f0a4SDavid Gibson } 26451ad9f0a4SDavid Gibson 26461ad9f0a4SDavid Gibson close(fd); 26471ad9f0a4SDavid Gibson } 26481ad9f0a4SDavid Gibson 26491ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) 26507c43bca0SAneesh Kumar K.V { 26511ad9f0a4SDavid Gibson int fd, rc; 26527c43bca0SAneesh Kumar K.V struct kvm_get_htab_fd ghf; 26531ad9f0a4SDavid Gibson struct { 26541ad9f0a4SDavid Gibson struct kvm_get_htab_header hdr; 26551ad9f0a4SDavid Gibson uint64_t pte0; 26561ad9f0a4SDavid Gibson uint64_t pte1; 26571ad9f0a4SDavid Gibson } buf; 2658c1385933SAneesh Kumar K.V 2659c1385933SAneesh Kumar K.V ghf.flags = 0; 2660c1385933SAneesh Kumar K.V ghf.start_index = 0; /* Ignored */ 26611ad9f0a4SDavid Gibson fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf); 26621ad9f0a4SDavid Gibson if (fd < 0) { 26631ad9f0a4SDavid Gibson hw_error("kvmppc_write_hpte: Unable to open HPT fd"); 2664c1385933SAneesh Kumar K.V } 2665c1385933SAneesh Kumar K.V 26661ad9f0a4SDavid Gibson buf.hdr.n_valid = 1; 26671ad9f0a4SDavid Gibson buf.hdr.n_invalid = 0; 26681ad9f0a4SDavid Gibson buf.hdr.index = ptex; 26691ad9f0a4SDavid Gibson buf.pte0 = cpu_to_be64(pte0); 26701ad9f0a4SDavid Gibson buf.pte1 = cpu_to_be64(pte1); 26711ad9f0a4SDavid Gibson 26721ad9f0a4SDavid Gibson rc = write(fd, &buf, sizeof(buf)); 26731ad9f0a4SDavid Gibson if (rc != sizeof(buf)) { 26741ad9f0a4SDavid Gibson hw_error("kvmppc_write_hpte: Unable to update KVM HPT"); 2675c1385933SAneesh Kumar K.V } 26761ad9f0a4SDavid Gibson close(fd); 2677c1385933SAneesh Kumar K.V } 26789e03a040SFrank Blaschka 26799e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2680dc9f06caSPavel Fedin uint64_t address, uint32_t data, PCIDevice *dev) 26819e03a040SFrank Blaschka { 26829e03a040SFrank Blaschka return 0; 26839e03a040SFrank Blaschka } 26841850b6b7SEric Auger 268538d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 268638d87493SPeter Xu int vector, PCIDevice *dev) 268738d87493SPeter Xu { 268838d87493SPeter Xu return 0; 268938d87493SPeter Xu } 269038d87493SPeter Xu 269138d87493SPeter Xu int kvm_arch_release_virq_post(int virq) 269238d87493SPeter Xu { 269338d87493SPeter Xu return 0; 269438d87493SPeter Xu } 269538d87493SPeter Xu 26961850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data) 26971850b6b7SEric Auger { 26981850b6b7SEric Auger return data & 0xffff; 26991850b6b7SEric Auger } 27004d9392beSThomas Huth 27014d9392beSThomas Huth int kvmppc_enable_hwrng(void) 27024d9392beSThomas Huth { 27034d9392beSThomas Huth if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) { 27044d9392beSThomas Huth return -1; 27054d9392beSThomas Huth } 27064d9392beSThomas Huth 27074d9392beSThomas Huth return kvmppc_enable_hcall(kvm_state, H_RANDOM); 27084d9392beSThomas Huth } 2709