xref: /qemu/target/ppc/kvm.c (revision ce2918cbc31e190e7d644c684dcc2bbcb6b9a9df)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
42b36f100eSAlexey Kardashevskiy #include "trace.h"
4388365d17SBharat Bhushan #include "exec/gdbstub.h"
444c663752SPaolo Bonzini #include "exec/memattrs.h"
459c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
462d103aaeSMichael Roth #include "sysemu/hostmem.h"
47f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
49f3d9f303SSam Bobroff #include "elf.h"
50c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
51f61b4bedSAlexander Graf 
52d76d1650Saurel32 //#define DEBUG_KVM
53d76d1650Saurel32 
54d76d1650Saurel32 #ifdef DEBUG_KVM
55da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
56d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57d76d1650Saurel32 #else
58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
59d76d1650Saurel32     do { } while (0)
60d76d1650Saurel32 #endif
61d76d1650Saurel32 
62eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
63eadaada1SAlexander Graf 
6494a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6594a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6694a8d39aSJan Kiszka };
6794a8d39aSJan Kiszka 
68fc87e185SAlexander Graf static int cap_interrupt_unset = false;
69fc87e185SAlexander Graf static int cap_interrupt_level = false;
7090dc8812SScott Wood static int cap_segstate;
7190dc8812SScott Wood static int cap_booke_sregs;
72e97c3636SDavid Gibson static int cap_ppc_smt;
73fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
740f5cb298SDavid Gibson static int cap_spapr_tce;
75d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
76da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
779bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
78f1af19d7SDavid Gibson static int cap_hior;
79d67d40eaSDavid Gibson static int cap_one_reg;
803b961124SStuart Yoder static int cap_epr;
8131f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
829b00ea49SDavid Gibson static int cap_papr;
83e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8487a91de6SAlexander Graf static int cap_fixup_hcalls;
85bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
86cf1c4cceSSam Bobroff static int cap_mmu_radix;
87cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
88b55d295eSDavid Gibson static int cap_resize_hpt;
89c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
908acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
918acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
928acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
938ff43ee4SSuraj Jitindar Singh static int cap_ppc_count_cache_flush_assist;
94b9a477b7SSuraj Jitindar Singh static int cap_ppc_nested_kvm_hv;
957d050527SSuraj Jitindar Singh static int cap_large_decr;
96fc87e185SAlexander Graf 
973c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
983c902d44SBharat Bhushan 
99c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
100c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
101c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
102c821c2bdSAlexander Graf  *     still an interrupt pending.
103c6a94ba5SAlexander Graf  *
104c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
105c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
106c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
107c6a94ba5SAlexander Graf  */
108c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
109c6a94ba5SAlexander Graf 
110d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
111c6a94ba5SAlexander Graf {
112d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
113d5a68146SAndreas Färber 
114c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
115c6a94ba5SAlexander Graf }
116c6a94ba5SAlexander Graf 
11796c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11896c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11996c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
12096c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
12196c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12296c9cff0SThomas Huth {
12396c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12470a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12596c9cff0SThomas Huth }
12696c9cff0SThomas Huth 
1272e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms);
1288acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1297d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void);
1305ba4576bSAndreas Färber 
131b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
132d76d1650Saurel32 {
133fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
134fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13590dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13690dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1376977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
1380f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
139d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
140da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1419ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
142d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
143f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1443b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14531f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1469b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1479b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1486977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14987a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
150fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
151bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
152cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
153cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
154b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1558acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
156b9a477b7SSuraj Jitindar Singh     cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
1577d050527SSuraj Jitindar Singh     cap_large_decr = kvmppc_get_dec_bits();
158c363a37aSDaniel Henrique Barboza     /*
159c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
160c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
161c363a37aSDaniel Henrique Barboza      *
162c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
163c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
164c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
165fc87e185SAlexander Graf 
166fc87e185SAlexander Graf     if (!cap_interrupt_level) {
167fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
168fc87e185SAlexander Graf                         "VM to stall at times!\n");
169fc87e185SAlexander Graf     }
170fc87e185SAlexander Graf 
1712e9c10ebSIgor Mammedov     kvm_ppc_register_host_cpu_type(ms);
1725ba4576bSAndreas Färber 
173d76d1650Saurel32     return 0;
174d76d1650Saurel32 }
175d76d1650Saurel32 
176d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
177d525ffabSPaolo Bonzini {
178d525ffabSPaolo Bonzini     return 0;
179d525ffabSPaolo Bonzini }
180d525ffabSPaolo Bonzini 
1811bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
182d76d1650Saurel32 {
1831bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1841bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
185861bbc80SAlexander Graf     struct kvm_sregs sregs;
1865666ca4aSScott Wood     int ret;
1875666ca4aSScott Wood 
1885666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18964e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
19064e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
19164e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
19264e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1935666ca4aSScott Wood         return 0;
1945666ca4aSScott Wood     } else {
19590dc8812SScott Wood         if (!cap_segstate) {
19664e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19764e07be5SAlexander Graf             return -ENOSYS;
1985666ca4aSScott Wood         }
1995666ca4aSScott Wood     }
2005666ca4aSScott Wood 
2011bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
2025666ca4aSScott Wood     if (ret) {
2035666ca4aSScott Wood         return ret;
2045666ca4aSScott Wood     }
205861bbc80SAlexander Graf 
206861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2071bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2085666ca4aSScott Wood }
2095666ca4aSScott Wood 
21093dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2111bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
21293dd5e85SScott Wood {
2131bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2141bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21593dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21693dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21793dd5e85SScott Wood     unsigned int entries = 0;
21893dd5e85SScott Wood     int ret, i;
21993dd5e85SScott Wood 
22093dd5e85SScott Wood     if (!kvm_enabled() ||
221a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
22293dd5e85SScott Wood         return 0;
22393dd5e85SScott Wood     }
22493dd5e85SScott Wood 
22593dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22693dd5e85SScott Wood 
22793dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22893dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22993dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
23093dd5e85SScott Wood         entries += params.tlb_sizes[i];
23193dd5e85SScott Wood     }
23293dd5e85SScott Wood 
23393dd5e85SScott Wood     assert(entries == env->nb_tlb);
23493dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23593dd5e85SScott Wood 
23693dd5e85SScott Wood     env->tlb_dirty = true;
23793dd5e85SScott Wood 
23893dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23993dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
24093dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
24193dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
24293dd5e85SScott Wood 
24348add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
24493dd5e85SScott Wood     if (ret < 0) {
24593dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24693dd5e85SScott Wood                 __func__, strerror(-ret));
24793dd5e85SScott Wood         return ret;
24893dd5e85SScott Wood     }
24993dd5e85SScott Wood 
25093dd5e85SScott Wood     env->kvm_sw_tlb = true;
25193dd5e85SScott Wood     return 0;
25293dd5e85SScott Wood }
25393dd5e85SScott Wood 
2544656e1f0SBenjamin Herrenschmidt 
2554656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
256ab256960SGreg Kurz static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
2574656e1f0SBenjamin Herrenschmidt {
2584656e1f0SBenjamin Herrenschmidt     int ret;
2594656e1f0SBenjamin Herrenschmidt 
260ab256960SGreg Kurz     assert(kvm_state != NULL);
261ab256960SGreg Kurz 
262ab256960SGreg Kurz     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
26371d0f1eaSGreg Kurz         error_setg(errp, "KVM doesn't expose the MMU features it supports");
26471d0f1eaSGreg Kurz         error_append_hint(errp, "Consider switching to a newer KVM\n");
26571d0f1eaSGreg Kurz         return;
26671d0f1eaSGreg Kurz     }
26771d0f1eaSGreg Kurz 
268ab256960SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
2694656e1f0SBenjamin Herrenschmidt     if (ret == 0) {
2704656e1f0SBenjamin Herrenschmidt         return;
2714656e1f0SBenjamin Herrenschmidt     }
2724656e1f0SBenjamin Herrenschmidt 
27371d0f1eaSGreg Kurz     error_setg_errno(errp, -ret,
27471d0f1eaSGreg Kurz                      "KVM failed to provide the MMU features it supports");
2754656e1f0SBenjamin Herrenschmidt }
2764656e1f0SBenjamin Herrenschmidt 
277c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
278c64abd1fSSam Bobroff {
279c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
280c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
281c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
282c64abd1fSSam Bobroff     int i;
283c64abd1fSSam Bobroff 
284c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
285c64abd1fSSam Bobroff         return NULL;
286c64abd1fSSam Bobroff     }
287c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
288c64abd1fSSam Bobroff         return NULL;
289c64abd1fSSam Bobroff     }
290c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
291c64abd1fSSam Bobroff     radix_page_info->count = 0;
292c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
293c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
294c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
295c64abd1fSSam Bobroff             radix_page_info->count++;
296c64abd1fSSam Bobroff         }
297c64abd1fSSam Bobroff     }
298c64abd1fSSam Bobroff     return radix_page_info;
299c64abd1fSSam Bobroff }
300c64abd1fSSam Bobroff 
301b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
302b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
303b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
304b4db5413SSuraj Jitindar Singh {
305b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
306b4db5413SSuraj Jitindar Singh     int ret;
307b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
308b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
309b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
310b4db5413SSuraj Jitindar Singh     };
311b4db5413SSuraj Jitindar Singh 
312b4db5413SSuraj Jitindar Singh     if (radix) {
313b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
314b4db5413SSuraj Jitindar Singh     }
315b4db5413SSuraj Jitindar Singh     if (gtse) {
316b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
317b4db5413SSuraj Jitindar Singh     }
318b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
319b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
320b4db5413SSuraj Jitindar Singh     switch (ret) {
321b4db5413SSuraj Jitindar Singh     case 0:
322b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
323b4db5413SSuraj Jitindar Singh     case -EINVAL:
324b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
325b4db5413SSuraj Jitindar Singh     case -ENODEV:
326b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
327b4db5413SSuraj Jitindar Singh     default:
328b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
329b4db5413SSuraj Jitindar Singh     }
330b4db5413SSuraj Jitindar Singh }
331b4db5413SSuraj Jitindar Singh 
33224c6863cSDavid Gibson bool kvmppc_hpt_needs_host_contiguous_pages(void)
33324c6863cSDavid Gibson {
33424c6863cSDavid Gibson     static struct kvm_ppc_smmu_info smmu_info;
33524c6863cSDavid Gibson 
33624c6863cSDavid Gibson     if (!kvm_enabled()) {
33724c6863cSDavid Gibson         return false;
33824c6863cSDavid Gibson     }
33924c6863cSDavid Gibson 
340ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &error_fatal);
34124c6863cSDavid Gibson     return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
34224c6863cSDavid Gibson }
34324c6863cSDavid Gibson 
344e5ca28ecSDavid Gibson void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
3454656e1f0SBenjamin Herrenschmidt {
346e5ca28ecSDavid Gibson     struct kvm_ppc_smmu_info smmu_info;
3474656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
34871d0f1eaSGreg Kurz     Error *local_err = NULL;
3494656e1f0SBenjamin Herrenschmidt 
350e5ca28ecSDavid Gibson     /* For now, we only have anything to check on hash64 MMUs */
351e5ca28ecSDavid Gibson     if (!cpu->hash64_opts || !kvm_enabled()) {
3524656e1f0SBenjamin Herrenschmidt         return;
3534656e1f0SBenjamin Herrenschmidt     }
3544656e1f0SBenjamin Herrenschmidt 
355ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &local_err);
35671d0f1eaSGreg Kurz     if (local_err) {
35771d0f1eaSGreg Kurz         error_propagate(errp, local_err);
35871d0f1eaSGreg Kurz         return;
35971d0f1eaSGreg Kurz     }
360e5ca28ecSDavid Gibson 
361e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
362e5ca28ecSDavid Gibson         && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
363e5ca28ecSDavid Gibson         error_setg(errp,
364e5ca28ecSDavid Gibson                    "KVM does not support 1TiB segments which guest expects");
365e5ca28ecSDavid Gibson         return;
3664656e1f0SBenjamin Herrenschmidt     }
3674656e1f0SBenjamin Herrenschmidt 
368e5ca28ecSDavid Gibson     if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
369e5ca28ecSDavid Gibson         error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
370e5ca28ecSDavid Gibson                    smmu_info.slb_size, cpu->hash64_opts->slb_size);
371e5ca28ecSDavid Gibson         return;
37290da0d5aSBenjamin Herrenschmidt     }
37390da0d5aSBenjamin Herrenschmidt 
37408215d8fSAlexander Graf     /*
375e5ca28ecSDavid Gibson      * Verify that every pagesize supported by the cpu model is
376e5ca28ecSDavid Gibson      * supported by KVM with the same encodings
37708215d8fSAlexander Graf      */
378e5ca28ecSDavid Gibson     for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
379b07c59f7SDavid Gibson         PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
380e5ca28ecSDavid Gibson         struct kvm_ppc_one_seg_page_size *ksps;
3814656e1f0SBenjamin Herrenschmidt 
382e5ca28ecSDavid Gibson         for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
383e5ca28ecSDavid Gibson             if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
3844656e1f0SBenjamin Herrenschmidt                 break;
3854656e1f0SBenjamin Herrenschmidt             }
3864656e1f0SBenjamin Herrenschmidt         }
387e5ca28ecSDavid Gibson         if (ik >= ARRAY_SIZE(smmu_info.sps)) {
388e5ca28ecSDavid Gibson             error_setg(errp, "KVM doesn't support for base page shift %u",
389e5ca28ecSDavid Gibson                        qsps->page_shift);
390e5ca28ecSDavid Gibson             return;
391e5ca28ecSDavid Gibson         }
392e5ca28ecSDavid Gibson 
393e5ca28ecSDavid Gibson         ksps = &smmu_info.sps[ik];
394e5ca28ecSDavid Gibson         if (ksps->slb_enc != qsps->slb_enc) {
395e5ca28ecSDavid Gibson             error_setg(errp,
396e5ca28ecSDavid Gibson "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
397e5ca28ecSDavid Gibson                        ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
398e5ca28ecSDavid Gibson             return;
399e5ca28ecSDavid Gibson         }
400e5ca28ecSDavid Gibson 
401e5ca28ecSDavid Gibson         for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
402e5ca28ecSDavid Gibson             for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
403e5ca28ecSDavid Gibson                 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
4044656e1f0SBenjamin Herrenschmidt                     break;
4054656e1f0SBenjamin Herrenschmidt                 }
4064656e1f0SBenjamin Herrenschmidt             }
4074656e1f0SBenjamin Herrenschmidt 
408e5ca28ecSDavid Gibson             if (jk >= ARRAY_SIZE(ksps->enc)) {
409e5ca28ecSDavid Gibson                 error_setg(errp, "KVM doesn't support page shift %u/%u",
410e5ca28ecSDavid Gibson                            qsps->enc[jq].page_shift, qsps->page_shift);
411e5ca28ecSDavid Gibson                 return;
412e5ca28ecSDavid Gibson             }
413e5ca28ecSDavid Gibson             if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
414e5ca28ecSDavid Gibson                 error_setg(errp,
415e5ca28ecSDavid Gibson "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
416e5ca28ecSDavid Gibson                            ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
417e5ca28ecSDavid Gibson                            qsps->page_shift, qsps->enc[jq].pte_enc);
418e5ca28ecSDavid Gibson                 return;
419e5ca28ecSDavid Gibson             }
420e5ca28ecSDavid Gibson         }
4214656e1f0SBenjamin Herrenschmidt     }
4224656e1f0SBenjamin Herrenschmidt 
423e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
424e5ca28ecSDavid Gibson         /* Mostly what guest pagesizes we can use are related to the
425e5ca28ecSDavid Gibson          * host pages used to map guest RAM, which is handled in the
426e5ca28ecSDavid Gibson          * platform code. Cache-Inhibited largepages (64k) however are
427e5ca28ecSDavid Gibson          * used for I/O, so if they're mapped to the host at all it
428e5ca28ecSDavid Gibson          * will be a normal mapping, not a special hugepage one used
429e5ca28ecSDavid Gibson          * for RAM. */
430e5ca28ecSDavid Gibson         if (getpagesize() < 0x10000) {
431e5ca28ecSDavid Gibson             error_setg(errp,
432e5ca28ecSDavid Gibson                        "KVM can't supply 64kiB CI pages, which guest expects");
433e5ca28ecSDavid Gibson         }
434e5ca28ecSDavid Gibson     }
435e5ca28ecSDavid Gibson }
4364656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
4374656e1f0SBenjamin Herrenschmidt 
438b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
439b164e48eSEduardo Habkost {
4402e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
441b164e48eSEduardo Habkost }
442b164e48eSEduardo Habkost 
44388365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
44488365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
44588365d17SBharat Bhushan  * of 4 is sufficient for now.
44688365d17SBharat Bhushan  */
44788365d17SBharat Bhushan #define MAX_HW_BKPTS 4
44888365d17SBharat Bhushan 
44988365d17SBharat Bhushan static struct HWBreakpoint {
45088365d17SBharat Bhushan     target_ulong addr;
45188365d17SBharat Bhushan     int type;
45288365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
45388365d17SBharat Bhushan 
45488365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
45588365d17SBharat Bhushan 
45688365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
45788365d17SBharat Bhushan static int max_hw_breakpoint;
45888365d17SBharat Bhushan static int max_hw_watchpoint;
45988365d17SBharat Bhushan static int nb_hw_breakpoint;
46088365d17SBharat Bhushan static int nb_hw_watchpoint;
46188365d17SBharat Bhushan 
46288365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
46388365d17SBharat Bhushan {
46488365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
46588365d17SBharat Bhushan         max_hw_breakpoint = 2;
46688365d17SBharat Bhushan         max_hw_watchpoint = 2;
46788365d17SBharat Bhushan     }
46888365d17SBharat Bhushan 
46988365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
47088365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
47188365d17SBharat Bhushan         return;
47288365d17SBharat Bhushan     }
47388365d17SBharat Bhushan }
47488365d17SBharat Bhushan 
47520d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
4765666ca4aSScott Wood {
47720d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
47820d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
4795666ca4aSScott Wood     int ret;
4805666ca4aSScott Wood 
4814656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
4821bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
4835666ca4aSScott Wood     if (ret) {
484388e47c7SThomas Huth         if (ret == -EINVAL) {
485388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
486388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
487388e47c7SThomas Huth         }
4885666ca4aSScott Wood         return ret;
4895666ca4aSScott Wood     }
490861bbc80SAlexander Graf 
491bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
492c821c2bdSAlexander Graf 
49393dd5e85SScott Wood     switch (cenv->mmu_model) {
49493dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
4957f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
4961bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
49793dd5e85SScott Wood         break;
4987f516c96SThomas Huth     case POWERPC_MMU_2_07:
4997f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
5007f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
501f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
502f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
503f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5047f516c96SThomas Huth                 cap_htm = true;
5057f516c96SThomas Huth             }
506f3d9f303SSam Bobroff         }
5077f516c96SThomas Huth         break;
50893dd5e85SScott Wood     default:
50993dd5e85SScott Wood         break;
51093dd5e85SScott Wood     }
51193dd5e85SScott Wood 
5123c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
51388365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5143c902d44SBharat Bhushan 
515861bbc80SAlexander Graf     return ret;
516d76d1650Saurel32 }
517d76d1650Saurel32 
5181bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
51993dd5e85SScott Wood {
5201bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5211bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
52293dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
52393dd5e85SScott Wood     unsigned char *bitmap;
52493dd5e85SScott Wood     int ret;
52593dd5e85SScott Wood 
52693dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
52793dd5e85SScott Wood         return;
52893dd5e85SScott Wood     }
52993dd5e85SScott Wood 
53093dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
53193dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
53293dd5e85SScott Wood 
53393dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
53493dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
53593dd5e85SScott Wood 
5361bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
53793dd5e85SScott Wood     if (ret) {
53893dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
53993dd5e85SScott Wood                 __func__, strerror(-ret));
54093dd5e85SScott Wood     }
54193dd5e85SScott Wood 
54293dd5e85SScott Wood     g_free(bitmap);
54393dd5e85SScott Wood }
54493dd5e85SScott Wood 
545d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
546d67d40eaSDavid Gibson {
547d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
548d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
549d67d40eaSDavid Gibson     union {
550d67d40eaSDavid Gibson         uint32_t u32;
551d67d40eaSDavid Gibson         uint64_t u64;
552d67d40eaSDavid Gibson     } val;
553d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
554d67d40eaSDavid Gibson         .id = id,
555d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
556d67d40eaSDavid Gibson     };
557d67d40eaSDavid Gibson     int ret;
558d67d40eaSDavid Gibson 
559d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
560d67d40eaSDavid Gibson     if (ret != 0) {
561b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
562d67d40eaSDavid Gibson     } else {
563d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
564d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
565d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
566d67d40eaSDavid Gibson             break;
567d67d40eaSDavid Gibson 
568d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
569d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
570d67d40eaSDavid Gibson             break;
571d67d40eaSDavid Gibson 
572d67d40eaSDavid Gibson         default:
573d67d40eaSDavid Gibson             /* Don't handle this size yet */
574d67d40eaSDavid Gibson             abort();
575d67d40eaSDavid Gibson         }
576d67d40eaSDavid Gibson     }
577d67d40eaSDavid Gibson }
578d67d40eaSDavid Gibson 
579d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
580d67d40eaSDavid Gibson {
581d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
582d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
583d67d40eaSDavid Gibson     union {
584d67d40eaSDavid Gibson         uint32_t u32;
585d67d40eaSDavid Gibson         uint64_t u64;
586d67d40eaSDavid Gibson     } val;
587d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
588d67d40eaSDavid Gibson         .id = id,
589d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
590d67d40eaSDavid Gibson     };
591d67d40eaSDavid Gibson     int ret;
592d67d40eaSDavid Gibson 
593d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
594d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
595d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
596d67d40eaSDavid Gibson         break;
597d67d40eaSDavid Gibson 
598d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
599d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
600d67d40eaSDavid Gibson         break;
601d67d40eaSDavid Gibson 
602d67d40eaSDavid Gibson     default:
603d67d40eaSDavid Gibson         /* Don't handle this size yet */
604d67d40eaSDavid Gibson         abort();
605d67d40eaSDavid Gibson     }
606d67d40eaSDavid Gibson 
607d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
608d67d40eaSDavid Gibson     if (ret != 0) {
609b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
610d67d40eaSDavid Gibson     }
611d67d40eaSDavid Gibson }
612d67d40eaSDavid Gibson 
61370b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
61470b79849SDavid Gibson {
61570b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
61670b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
61770b79849SDavid Gibson     struct kvm_one_reg reg;
61870b79849SDavid Gibson     int i;
61970b79849SDavid Gibson     int ret;
62070b79849SDavid Gibson 
62170b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
62270b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
62370b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
62470b79849SDavid Gibson 
62570b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
62670b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
62770b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
62870b79849SDavid Gibson         if (ret < 0) {
629da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
63070b79849SDavid Gibson             return ret;
63170b79849SDavid Gibson         }
63270b79849SDavid Gibson 
63370b79849SDavid Gibson         for (i = 0; i < 32; i++) {
63470b79849SDavid Gibson             uint64_t vsr[2];
635ef96e3aeSMark Cave-Ayland             uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
636ef96e3aeSMark Cave-Ayland             uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
63770b79849SDavid Gibson 
6383a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
639ef96e3aeSMark Cave-Ayland             vsr[0] = float64_val(*fpr);
640ef96e3aeSMark Cave-Ayland             vsr[1] = *vsrl;
6413a4b791bSGreg Kurz #else
642ef96e3aeSMark Cave-Ayland             vsr[0] = *vsrl;
643ef96e3aeSMark Cave-Ayland             vsr[1] = float64_val(*fpr);
6443a4b791bSGreg Kurz #endif
64570b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
64670b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
64770b79849SDavid Gibson 
64870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
64970b79849SDavid Gibson             if (ret < 0) {
650da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
65170b79849SDavid Gibson                         i, strerror(errno));
65270b79849SDavid Gibson                 return ret;
65370b79849SDavid Gibson             }
65470b79849SDavid Gibson         }
65570b79849SDavid Gibson     }
65670b79849SDavid Gibson 
65770b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
65870b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
65970b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
66070b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66170b79849SDavid Gibson         if (ret < 0) {
662da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
66370b79849SDavid Gibson             return ret;
66470b79849SDavid Gibson         }
66570b79849SDavid Gibson 
66670b79849SDavid Gibson         for (i = 0; i < 32; i++) {
66770b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
668ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
66970b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
67070b79849SDavid Gibson             if (ret < 0) {
671da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
67270b79849SDavid Gibson                 return ret;
67370b79849SDavid Gibson             }
67470b79849SDavid Gibson         }
67570b79849SDavid Gibson     }
67670b79849SDavid Gibson 
67770b79849SDavid Gibson     return 0;
67870b79849SDavid Gibson }
67970b79849SDavid Gibson 
68070b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
68170b79849SDavid Gibson {
68270b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
68370b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
68470b79849SDavid Gibson     struct kvm_one_reg reg;
68570b79849SDavid Gibson     int i;
68670b79849SDavid Gibson     int ret;
68770b79849SDavid Gibson 
68870b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
68970b79849SDavid Gibson         uint64_t fpscr;
69070b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
69170b79849SDavid Gibson 
69270b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
69370b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
69470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
69570b79849SDavid Gibson         if (ret < 0) {
696da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
69770b79849SDavid Gibson             return ret;
69870b79849SDavid Gibson         } else {
69970b79849SDavid Gibson             env->fpscr = fpscr;
70070b79849SDavid Gibson         }
70170b79849SDavid Gibson 
70270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
70370b79849SDavid Gibson             uint64_t vsr[2];
704ef96e3aeSMark Cave-Ayland             uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
705ef96e3aeSMark Cave-Ayland             uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
70670b79849SDavid Gibson 
70770b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
70870b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
70970b79849SDavid Gibson 
71070b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
71170b79849SDavid Gibson             if (ret < 0) {
712da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
71370b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
71470b79849SDavid Gibson                 return ret;
71570b79849SDavid Gibson             } else {
7163a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
717ef96e3aeSMark Cave-Ayland                 *fpr = vsr[0];
71870b79849SDavid Gibson                 if (vsx) {
719ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[1];
72070b79849SDavid Gibson                 }
7213a4b791bSGreg Kurz #else
722ef96e3aeSMark Cave-Ayland                 *fpr = vsr[1];
7233a4b791bSGreg Kurz                 if (vsx) {
724ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[0];
7253a4b791bSGreg Kurz                 }
7263a4b791bSGreg Kurz #endif
72770b79849SDavid Gibson             }
72870b79849SDavid Gibson         }
72970b79849SDavid Gibson     }
73070b79849SDavid Gibson 
73170b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
73270b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
73370b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
73470b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
73570b79849SDavid Gibson         if (ret < 0) {
736da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
73770b79849SDavid Gibson             return ret;
73870b79849SDavid Gibson         }
73970b79849SDavid Gibson 
74070b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74170b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
742ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
74370b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
74470b79849SDavid Gibson             if (ret < 0) {
745da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
74670b79849SDavid Gibson                         i, strerror(errno));
74770b79849SDavid Gibson                 return ret;
74870b79849SDavid Gibson             }
74970b79849SDavid Gibson         }
75070b79849SDavid Gibson     }
75170b79849SDavid Gibson 
75270b79849SDavid Gibson     return 0;
75370b79849SDavid Gibson }
75470b79849SDavid Gibson 
7559b00ea49SDavid Gibson #if defined(TARGET_PPC64)
7569b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
7579b00ea49SDavid Gibson {
7589b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
759*ce2918cbSDavid Gibson     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
7609b00ea49SDavid Gibson     struct kvm_one_reg reg;
7619b00ea49SDavid Gibson     int ret;
7629b00ea49SDavid Gibson 
7639b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
7647388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
7659b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7669b00ea49SDavid Gibson     if (ret < 0) {
767da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
7689b00ea49SDavid Gibson         return ret;
7699b00ea49SDavid Gibson     }
7709b00ea49SDavid Gibson 
7717388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
7727388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
7739b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
7747388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
7759b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7769b00ea49SDavid Gibson     if (ret < 0) {
777da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
7789b00ea49SDavid Gibson                 strerror(errno));
7799b00ea49SDavid Gibson         return ret;
7809b00ea49SDavid Gibson     }
7819b00ea49SDavid Gibson 
7827388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
7837388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
7849b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
7857388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
7869b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7879b00ea49SDavid Gibson     if (ret < 0) {
788da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
7899b00ea49SDavid Gibson                 strerror(errno));
7909b00ea49SDavid Gibson         return ret;
7919b00ea49SDavid Gibson     }
7929b00ea49SDavid Gibson 
7939b00ea49SDavid Gibson     return 0;
7949b00ea49SDavid Gibson }
7959b00ea49SDavid Gibson 
7969b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
7979b00ea49SDavid Gibson {
7989b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
799*ce2918cbSDavid Gibson     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
8009b00ea49SDavid Gibson     struct kvm_one_reg reg;
8019b00ea49SDavid Gibson     int ret;
8029b00ea49SDavid Gibson 
8039b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8049b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8059b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8069b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8077388efafSDavid Gibson     assert(spapr_cpu->vpa_addr
8087388efafSDavid Gibson            || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
8099b00ea49SDavid Gibson 
8107388efafSDavid Gibson     if (spapr_cpu->vpa_addr) {
8119b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8127388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8139b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8149b00ea49SDavid Gibson         if (ret < 0) {
815da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8169b00ea49SDavid Gibson             return ret;
8179b00ea49SDavid Gibson         }
8189b00ea49SDavid Gibson     }
8199b00ea49SDavid Gibson 
8207388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
8217388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
8229b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8237388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
8249b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8259b00ea49SDavid Gibson     if (ret < 0) {
826da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8279b00ea49SDavid Gibson         return ret;
8289b00ea49SDavid Gibson     }
8299b00ea49SDavid Gibson 
8307388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
8317388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
8329b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8337388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
8349b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8359b00ea49SDavid Gibson     if (ret < 0) {
836da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
8379b00ea49SDavid Gibson                 strerror(errno));
8389b00ea49SDavid Gibson         return ret;
8399b00ea49SDavid Gibson     }
8409b00ea49SDavid Gibson 
8417388efafSDavid Gibson     if (!spapr_cpu->vpa_addr) {
8429b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8437388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8449b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8459b00ea49SDavid Gibson         if (ret < 0) {
846da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8479b00ea49SDavid Gibson             return ret;
8489b00ea49SDavid Gibson         }
8499b00ea49SDavid Gibson     }
8509b00ea49SDavid Gibson 
8519b00ea49SDavid Gibson     return 0;
8529b00ea49SDavid Gibson }
8539b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
8549b00ea49SDavid Gibson 
855e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
856a7a00a72SDavid Gibson {
857a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
858a7a00a72SDavid Gibson     struct kvm_sregs sregs;
859a7a00a72SDavid Gibson     int i;
860a7a00a72SDavid Gibson 
861a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
862a7a00a72SDavid Gibson 
8631ec26c75SGreg Kurz     if (cpu->vhyp) {
8641ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
8651ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
8661ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
8671ec26c75SGreg Kurz     } else {
868a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
8691ec26c75SGreg Kurz     }
870a7a00a72SDavid Gibson 
871a7a00a72SDavid Gibson     /* Sync SLB */
872a7a00a72SDavid Gibson #ifdef TARGET_PPC64
873a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
874a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
875a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
876a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
877a7a00a72SDavid Gibson         }
878a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
879a7a00a72SDavid Gibson     }
880a7a00a72SDavid Gibson #endif
881a7a00a72SDavid Gibson 
882a7a00a72SDavid Gibson     /* Sync SRs */
883a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
884a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
885a7a00a72SDavid Gibson     }
886a7a00a72SDavid Gibson 
887a7a00a72SDavid Gibson     /* Sync BATs */
888a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
889a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
890a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
891a7a00a72SDavid Gibson             | env->DBAT[1][i];
892a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
893a7a00a72SDavid Gibson             | env->IBAT[1][i];
894a7a00a72SDavid Gibson     }
895a7a00a72SDavid Gibson 
896a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
897a7a00a72SDavid Gibson }
898a7a00a72SDavid Gibson 
89920d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
900d76d1650Saurel32 {
90120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
90220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
903d76d1650Saurel32     struct kvm_regs regs;
904d76d1650Saurel32     int ret;
905d76d1650Saurel32     int i;
906d76d1650Saurel32 
9071bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9081bc22652SAndreas Färber     if (ret < 0) {
909d76d1650Saurel32         return ret;
9101bc22652SAndreas Färber     }
911d76d1650Saurel32 
912d76d1650Saurel32     regs.ctr = env->ctr;
913d76d1650Saurel32     regs.lr  = env->lr;
914da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
915d76d1650Saurel32     regs.msr = env->msr;
916d76d1650Saurel32     regs.pc = env->nip;
917d76d1650Saurel32 
918d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
919d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
920d76d1650Saurel32 
921d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
922d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
923d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
924d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
925d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
926d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
927d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
928d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
929d76d1650Saurel32 
93090dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
93190dc8812SScott Wood 
932d76d1650Saurel32     for (i = 0;i < 32; i++)
933d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
934d76d1650Saurel32 
9354bddaf55SAlexey Kardashevskiy     regs.cr = 0;
9364bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
9374bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
9384bddaf55SAlexey Kardashevskiy     }
9394bddaf55SAlexey Kardashevskiy 
9401bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
941d76d1650Saurel32     if (ret < 0)
942d76d1650Saurel32         return ret;
943d76d1650Saurel32 
94470b79849SDavid Gibson     kvm_put_fp(cs);
94570b79849SDavid Gibson 
94693dd5e85SScott Wood     if (env->tlb_dirty) {
9471bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
94893dd5e85SScott Wood         env->tlb_dirty = false;
94993dd5e85SScott Wood     }
95093dd5e85SScott Wood 
951f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
952a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
953a7a00a72SDavid Gibson         if (ret < 0) {
954f1af19d7SDavid Gibson             return ret;
955f1af19d7SDavid Gibson         }
956f1af19d7SDavid Gibson     }
957f1af19d7SDavid Gibson 
958f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
959d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
960d67d40eaSDavid Gibson     }
961f1af19d7SDavid Gibson 
962d67d40eaSDavid Gibson     if (cap_one_reg) {
963d67d40eaSDavid Gibson         int i;
964d67d40eaSDavid Gibson 
965d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
966d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
967d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
968d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
969d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
970d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
971d67d40eaSDavid Gibson 
972d67d40eaSDavid Gibson             if (id != 0) {
973d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
974d67d40eaSDavid Gibson             }
975f1af19d7SDavid Gibson         }
9769b00ea49SDavid Gibson 
9779b00ea49SDavid Gibson #ifdef TARGET_PPC64
97880b3f79bSAlexey Kardashevskiy         if (msr_ts) {
97980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
98080b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
98180b3f79bSAlexey Kardashevskiy             }
98280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
98380b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
98480b3f79bSAlexey Kardashevskiy             }
98580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
98680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
98780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
98880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
98980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
99080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
99180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
99280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
99380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
99480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
99580b3f79bSAlexey Kardashevskiy         }
99680b3f79bSAlexey Kardashevskiy 
9979b00ea49SDavid Gibson         if (cap_papr) {
9989b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
999da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
10009b00ea49SDavid Gibson             }
10019b00ea49SDavid Gibson         }
100298a8b524SAlexey Kardashevskiy 
100398a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10049b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1005f1af19d7SDavid Gibson     }
1006f1af19d7SDavid Gibson 
1007d76d1650Saurel32     return ret;
1008d76d1650Saurel32 }
1009d76d1650Saurel32 
1010c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1011c371c2e3SBharat Bhushan {
1012c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1013c371c2e3SBharat Bhushan }
1014c371c2e3SBharat Bhushan 
1015a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1016d76d1650Saurel32 {
101720d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1018ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1019a7a00a72SDavid Gibson     int ret;
1020d76d1650Saurel32 
1021a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
102290dc8812SScott Wood     if (ret < 0) {
102390dc8812SScott Wood         return ret;
102490dc8812SScott Wood     }
102590dc8812SScott Wood 
102690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
102790dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
102890dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
102990dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
103090dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
103190dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
103290dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
103390dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
103490dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
103590dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
103690dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
103790dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
103890dc8812SScott Wood     }
103990dc8812SScott Wood 
104090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
104190dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
104290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
104390dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
104490dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
104590dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
104690dc8812SScott Wood     }
104790dc8812SScott Wood 
104890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
104990dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
105090dc8812SScott Wood     }
105190dc8812SScott Wood 
105290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
105390dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
105490dc8812SScott Wood     }
105590dc8812SScott Wood 
105690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
105790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1058c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
105990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1060c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
106190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1062c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
106390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1064c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
106590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1066c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
106790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1068c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
106990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1070c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
107190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1072c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
107390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1074c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
107590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1076c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
107790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1078c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
107990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1080c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
108190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1082c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
108390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1084c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
108590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1086c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
108790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1088c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
108990dc8812SScott Wood 
109090dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
109190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1092c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
109390dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1094c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
109590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1096c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
109790dc8812SScott Wood         }
109890dc8812SScott Wood 
109990dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
110090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1101c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
110290dc8812SScott Wood         }
110390dc8812SScott Wood 
110490dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
110590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1106c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
110790dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1108c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
110990dc8812SScott Wood         }
111090dc8812SScott Wood     }
111190dc8812SScott Wood 
111290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
111390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
111590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
111690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
111790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
111890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
111990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
112090dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
112190dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
112290dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
112390dc8812SScott Wood     }
112490dc8812SScott Wood 
112590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
112690dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
112790dc8812SScott Wood     }
112890dc8812SScott Wood 
112990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
113090dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
113190dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
113290dc8812SScott Wood     }
113390dc8812SScott Wood 
113490dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
113590dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
113690dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
113790dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
113890dc8812SScott Wood 
113990dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
114090dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
114190dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
114290dc8812SScott Wood         }
114390dc8812SScott Wood     }
1144a7a00a72SDavid Gibson 
1145a7a00a72SDavid Gibson     return 0;
1146fafc0b6aSAlexander Graf }
114790dc8812SScott Wood 
1148a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1149a7a00a72SDavid Gibson {
1150a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1151a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1152a7a00a72SDavid Gibson     int ret;
1153a7a00a72SDavid Gibson     int i;
1154a7a00a72SDavid Gibson 
1155a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
115690dc8812SScott Wood     if (ret < 0) {
115790dc8812SScott Wood         return ret;
115890dc8812SScott Wood     }
115990dc8812SScott Wood 
1160e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1161bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1162f3c75d42SAneesh Kumar K.V     }
1163ba5e5090SAlexander Graf 
1164ba5e5090SAlexander Graf     /* Sync SLB */
116582c09f2fSAlexander Graf #ifdef TARGET_PPC64
11664b4d4a21SAneesh Kumar K.V     /*
11674b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1168a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1169a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1170a7a00a72SDavid Gibson      * in.
11714b4d4a21SAneesh Kumar K.V      */
11724b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1173d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
11744b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
11754b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
11764b4d4a21SAneesh Kumar K.V         /*
11774b4d4a21SAneesh Kumar K.V          * Only restore valid entries
11784b4d4a21SAneesh Kumar K.V          */
11794b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1180bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
11814b4d4a21SAneesh Kumar K.V         }
1182ba5e5090SAlexander Graf     }
118382c09f2fSAlexander Graf #endif
1184ba5e5090SAlexander Graf 
1185ba5e5090SAlexander Graf     /* Sync SRs */
1186ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1187ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1188ba5e5090SAlexander Graf     }
1189ba5e5090SAlexander Graf 
1190ba5e5090SAlexander Graf     /* Sync BATs */
1191ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1192ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1193ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1194ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1195ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1196ba5e5090SAlexander Graf     }
1197a7a00a72SDavid Gibson 
1198a7a00a72SDavid Gibson     return 0;
1199a7a00a72SDavid Gibson }
1200a7a00a72SDavid Gibson 
1201a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1202a7a00a72SDavid Gibson {
1203a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1204a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1205a7a00a72SDavid Gibson     struct kvm_regs regs;
1206a7a00a72SDavid Gibson     uint32_t cr;
1207a7a00a72SDavid Gibson     int i, ret;
1208a7a00a72SDavid Gibson 
1209a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1210a7a00a72SDavid Gibson     if (ret < 0)
1211a7a00a72SDavid Gibson         return ret;
1212a7a00a72SDavid Gibson 
1213a7a00a72SDavid Gibson     cr = regs.cr;
1214a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1215a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1216a7a00a72SDavid Gibson         cr >>= 4;
1217a7a00a72SDavid Gibson     }
1218a7a00a72SDavid Gibson 
1219a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1220a7a00a72SDavid Gibson     env->lr = regs.lr;
1221a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1222a7a00a72SDavid Gibson     env->msr = regs.msr;
1223a7a00a72SDavid Gibson     env->nip = regs.pc;
1224a7a00a72SDavid Gibson 
1225a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1226a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1227a7a00a72SDavid Gibson 
1228a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1229a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1230a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1231a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1232a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1233a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1234a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1235a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1236a7a00a72SDavid Gibson 
1237a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1238a7a00a72SDavid Gibson 
1239a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1240a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1241a7a00a72SDavid Gibson 
1242a7a00a72SDavid Gibson     kvm_get_fp(cs);
1243a7a00a72SDavid Gibson 
1244a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1245a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1246a7a00a72SDavid Gibson         if (ret < 0) {
1247a7a00a72SDavid Gibson             return ret;
1248a7a00a72SDavid Gibson         }
1249a7a00a72SDavid Gibson     }
1250a7a00a72SDavid Gibson 
1251a7a00a72SDavid Gibson     if (cap_segstate) {
1252a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1253a7a00a72SDavid Gibson         if (ret < 0) {
1254a7a00a72SDavid Gibson             return ret;
1255a7a00a72SDavid Gibson         }
1256fafc0b6aSAlexander Graf     }
1257ba5e5090SAlexander Graf 
1258d67d40eaSDavid Gibson     if (cap_hior) {
1259d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1260d67d40eaSDavid Gibson     }
1261d67d40eaSDavid Gibson 
1262d67d40eaSDavid Gibson     if (cap_one_reg) {
1263d67d40eaSDavid Gibson         int i;
1264d67d40eaSDavid Gibson 
1265d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1266d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1267d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1268d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1269d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1270d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1271d67d40eaSDavid Gibson 
1272d67d40eaSDavid Gibson             if (id != 0) {
1273d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1274d67d40eaSDavid Gibson             }
1275d67d40eaSDavid Gibson         }
12769b00ea49SDavid Gibson 
12779b00ea49SDavid Gibson #ifdef TARGET_PPC64
127880b3f79bSAlexey Kardashevskiy         if (msr_ts) {
127980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
128080b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
128180b3f79bSAlexey Kardashevskiy             }
128280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
128380b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
128480b3f79bSAlexey Kardashevskiy             }
128580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
128680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
128780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
128880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
128980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
129080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
129180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
129280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
129380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
129480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
129580b3f79bSAlexey Kardashevskiy         }
129680b3f79bSAlexey Kardashevskiy 
12979b00ea49SDavid Gibson         if (cap_papr) {
12989b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1299da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
13009b00ea49SDavid Gibson             }
13019b00ea49SDavid Gibson         }
130298a8b524SAlexey Kardashevskiy 
130398a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13049b00ea49SDavid Gibson #endif
1305d67d40eaSDavid Gibson     }
1306d67d40eaSDavid Gibson 
1307d76d1650Saurel32     return 0;
1308d76d1650Saurel32 }
1309d76d1650Saurel32 
13101bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1311fc87e185SAlexander Graf {
1312fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1313fc87e185SAlexander Graf 
1314fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1315fc87e185SAlexander Graf         return 0;
1316fc87e185SAlexander Graf     }
1317fc87e185SAlexander Graf 
1318fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1319fc87e185SAlexander Graf         return 0;
1320fc87e185SAlexander Graf     }
1321fc87e185SAlexander Graf 
13221bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1323fc87e185SAlexander Graf 
1324fc87e185SAlexander Graf     return 0;
1325fc87e185SAlexander Graf }
1326fc87e185SAlexander Graf 
1327a69dc537SThomas Huth #if defined(TARGET_PPC64)
132816415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
132916415335SAlexander Graf #else
133016415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
133116415335SAlexander Graf #endif
133216415335SAlexander Graf 
133320d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1334d76d1650Saurel32 {
133520d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
133620d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1337d76d1650Saurel32     int r;
1338d76d1650Saurel32     unsigned irq;
1339d76d1650Saurel32 
13404b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
13414b8523eeSJan Kiszka 
13425cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1343d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1344fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1345fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1346259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
134716415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1348d76d1650Saurel32     {
1349d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1350d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1351d76d1650Saurel32          * when reading the UIC.
1352d76d1650Saurel32          */
1353fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1354d76d1650Saurel32 
1355da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
13561bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
135755e5c285SAndreas Färber         if (r < 0) {
135855e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
135955e5c285SAndreas Färber         }
1360c821c2bdSAlexander Graf 
1361c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1362bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
136373bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1364d76d1650Saurel32     }
1365d76d1650Saurel32 
1366d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1367d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1368d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
13694b8523eeSJan Kiszka 
13704b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1371d76d1650Saurel32 }
1372d76d1650Saurel32 
13734c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1374d76d1650Saurel32 {
13754c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1376d76d1650Saurel32 }
1377d76d1650Saurel32 
137820d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
13790af691d7SMarcelo Tosatti {
1380259186a7SAndreas Färber     return cs->halted;
13810af691d7SMarcelo Tosatti }
13820af691d7SMarcelo Tosatti 
1383259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1384d76d1650Saurel32 {
1385259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1386259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1387259186a7SAndreas Färber 
1388259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1389259186a7SAndreas Färber         cs->halted = 1;
139027103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1391d76d1650Saurel32     }
1392d76d1650Saurel32 
1393bb4ea393SJan Kiszka     return 0;
1394d76d1650Saurel32 }
1395d76d1650Saurel32 
1396d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
13971328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1398d76d1650Saurel32 {
1399d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1400d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1401d76d1650Saurel32 
1402bb4ea393SJan Kiszka     return 0;
1403d76d1650Saurel32 }
1404d76d1650Saurel32 
14051328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1406d76d1650Saurel32 {
1407d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1408d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1409d76d1650Saurel32 
1410bb4ea393SJan Kiszka     return 0;
1411d76d1650Saurel32 }
1412d76d1650Saurel32 
14138a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14148a0548f9SBharat Bhushan {
14158a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14168a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14178a0548f9SBharat Bhushan 
14188a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14198a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14208a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14218a0548f9SBharat Bhushan         return -EINVAL;
14228a0548f9SBharat Bhushan     }
14238a0548f9SBharat Bhushan 
14248a0548f9SBharat Bhushan     return 0;
14258a0548f9SBharat Bhushan }
14268a0548f9SBharat Bhushan 
14278a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14288a0548f9SBharat Bhushan {
14298a0548f9SBharat Bhushan     uint32_t sc;
14308a0548f9SBharat Bhushan 
14318a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14328a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14338a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14348a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14358a0548f9SBharat Bhushan         return -EINVAL;
14368a0548f9SBharat Bhushan     }
14378a0548f9SBharat Bhushan 
14388a0548f9SBharat Bhushan     return 0;
14398a0548f9SBharat Bhushan }
14408a0548f9SBharat Bhushan 
144188365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
144288365d17SBharat Bhushan {
144388365d17SBharat Bhushan     int n;
144488365d17SBharat Bhushan 
144588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
144688365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
144788365d17SBharat Bhushan 
144888365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
144988365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
145088365d17SBharat Bhushan              hw_debug_points[n].type == type) {
145188365d17SBharat Bhushan             return n;
145288365d17SBharat Bhushan         }
145388365d17SBharat Bhushan     }
145488365d17SBharat Bhushan 
145588365d17SBharat Bhushan     return -1;
145688365d17SBharat Bhushan }
145788365d17SBharat Bhushan 
145888365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
145988365d17SBharat Bhushan {
146088365d17SBharat Bhushan     int n;
146188365d17SBharat Bhushan 
146288365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
146388365d17SBharat Bhushan     if (n >= 0) {
146488365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
146588365d17SBharat Bhushan         return n;
146688365d17SBharat Bhushan     }
146788365d17SBharat Bhushan 
146888365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
146988365d17SBharat Bhushan     if (n >= 0) {
147088365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
147188365d17SBharat Bhushan         return n;
147288365d17SBharat Bhushan     }
147388365d17SBharat Bhushan 
147488365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
147588365d17SBharat Bhushan     if (n >= 0) {
147688365d17SBharat Bhushan         *flag = BP_MEM_READ;
147788365d17SBharat Bhushan         return n;
147888365d17SBharat Bhushan     }
147988365d17SBharat Bhushan 
148088365d17SBharat Bhushan     return -1;
148188365d17SBharat Bhushan }
148288365d17SBharat Bhushan 
148388365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
148488365d17SBharat Bhushan                                   target_ulong len, int type)
148588365d17SBharat Bhushan {
148688365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
148788365d17SBharat Bhushan         return -ENOBUFS;
148888365d17SBharat Bhushan     }
148988365d17SBharat Bhushan 
149088365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
149188365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
149288365d17SBharat Bhushan 
149388365d17SBharat Bhushan     switch (type) {
149488365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
149588365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
149688365d17SBharat Bhushan             return -ENOBUFS;
149788365d17SBharat Bhushan         }
149888365d17SBharat Bhushan 
149988365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
150088365d17SBharat Bhushan             return -EEXIST;
150188365d17SBharat Bhushan         }
150288365d17SBharat Bhushan 
150388365d17SBharat Bhushan         nb_hw_breakpoint++;
150488365d17SBharat Bhushan         break;
150588365d17SBharat Bhushan 
150688365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
150788365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
150888365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
150988365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
151088365d17SBharat Bhushan             return -ENOBUFS;
151188365d17SBharat Bhushan         }
151288365d17SBharat Bhushan 
151388365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
151488365d17SBharat Bhushan             return -EEXIST;
151588365d17SBharat Bhushan         }
151688365d17SBharat Bhushan 
151788365d17SBharat Bhushan         nb_hw_watchpoint++;
151888365d17SBharat Bhushan         break;
151988365d17SBharat Bhushan 
152088365d17SBharat Bhushan     default:
152188365d17SBharat Bhushan         return -ENOSYS;
152288365d17SBharat Bhushan     }
152388365d17SBharat Bhushan 
152488365d17SBharat Bhushan     return 0;
152588365d17SBharat Bhushan }
152688365d17SBharat Bhushan 
152788365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
152888365d17SBharat Bhushan                                   target_ulong len, int type)
152988365d17SBharat Bhushan {
153088365d17SBharat Bhushan     int n;
153188365d17SBharat Bhushan 
153288365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
153388365d17SBharat Bhushan     if (n < 0) {
153488365d17SBharat Bhushan         return -ENOENT;
153588365d17SBharat Bhushan     }
153688365d17SBharat Bhushan 
153788365d17SBharat Bhushan     switch (type) {
153888365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
153988365d17SBharat Bhushan         nb_hw_breakpoint--;
154088365d17SBharat Bhushan         break;
154188365d17SBharat Bhushan 
154288365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
154388365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
154488365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
154588365d17SBharat Bhushan         nb_hw_watchpoint--;
154688365d17SBharat Bhushan         break;
154788365d17SBharat Bhushan 
154888365d17SBharat Bhushan     default:
154988365d17SBharat Bhushan         return -ENOSYS;
155088365d17SBharat Bhushan     }
155188365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
155288365d17SBharat Bhushan 
155388365d17SBharat Bhushan     return 0;
155488365d17SBharat Bhushan }
155588365d17SBharat Bhushan 
155688365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
155788365d17SBharat Bhushan {
155888365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
155988365d17SBharat Bhushan }
156088365d17SBharat Bhushan 
15618a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
15628a0548f9SBharat Bhushan {
156388365d17SBharat Bhushan     int n;
156488365d17SBharat Bhushan 
15658a0548f9SBharat Bhushan     /* Software Breakpoint updates */
15668a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
15678a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
15688a0548f9SBharat Bhushan     }
156988365d17SBharat Bhushan 
157088365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
157188365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
157288365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
157388365d17SBharat Bhushan 
157488365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
157588365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
157688365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
157788365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
157888365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
157988365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
158088365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
158188365d17SBharat Bhushan                 break;
158288365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
158388365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
158488365d17SBharat Bhushan                 break;
158588365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
158688365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
158788365d17SBharat Bhushan                 break;
158888365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
158988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
159088365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
159188365d17SBharat Bhushan                 break;
159288365d17SBharat Bhushan             default:
159388365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
159488365d17SBharat Bhushan             }
159588365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
159688365d17SBharat Bhushan         }
159788365d17SBharat Bhushan     }
15988a0548f9SBharat Bhushan }
15998a0548f9SBharat Bhushan 
16002cbd1581SFabiano Rosas static int kvm_handle_hw_breakpoint(CPUState *cs,
16012cbd1581SFabiano Rosas                                     struct kvm_debug_exit_arch *arch_info)
16028a0548f9SBharat Bhushan {
16038a0548f9SBharat Bhushan     int handle = 0;
160488365d17SBharat Bhushan     int n;
160588365d17SBharat Bhushan     int flag = 0;
16068a0548f9SBharat Bhushan 
160788365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
160888365d17SBharat Bhushan         if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
160988365d17SBharat Bhushan             n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
161088365d17SBharat Bhushan             if (n >= 0) {
161188365d17SBharat Bhushan                 handle = 1;
161288365d17SBharat Bhushan             }
161388365d17SBharat Bhushan         } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
161488365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_WRITE)) {
161588365d17SBharat Bhushan             n = find_hw_watchpoint(arch_info->address,  &flag);
161688365d17SBharat Bhushan             if (n >= 0) {
161788365d17SBharat Bhushan                 handle = 1;
161888365d17SBharat Bhushan                 cs->watchpoint_hit = &hw_watchpoint;
161988365d17SBharat Bhushan                 hw_watchpoint.vaddr = hw_debug_points[n].addr;
162088365d17SBharat Bhushan                 hw_watchpoint.flags = flag;
162188365d17SBharat Bhushan             }
162288365d17SBharat Bhushan         }
162388365d17SBharat Bhushan     }
16242cbd1581SFabiano Rosas     return handle;
16252cbd1581SFabiano Rosas }
16262cbd1581SFabiano Rosas 
1627468e3a1aSFabiano Rosas static int kvm_handle_singlestep(void)
1628468e3a1aSFabiano Rosas {
1629468e3a1aSFabiano Rosas     return 1;
1630468e3a1aSFabiano Rosas }
1631468e3a1aSFabiano Rosas 
1632468e3a1aSFabiano Rosas static int kvm_handle_sw_breakpoint(void)
1633468e3a1aSFabiano Rosas {
1634468e3a1aSFabiano Rosas     return 1;
1635468e3a1aSFabiano Rosas }
1636468e3a1aSFabiano Rosas 
16372cbd1581SFabiano Rosas static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16382cbd1581SFabiano Rosas {
16392cbd1581SFabiano Rosas     CPUState *cs = CPU(cpu);
16402cbd1581SFabiano Rosas     CPUPPCState *env = &cpu->env;
16412cbd1581SFabiano Rosas     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16422cbd1581SFabiano Rosas 
16432cbd1581SFabiano Rosas     if (cs->singlestep_enabled) {
1644468e3a1aSFabiano Rosas         return kvm_handle_singlestep();
1645468e3a1aSFabiano Rosas     }
1646468e3a1aSFabiano Rosas 
1647468e3a1aSFabiano Rosas     if (arch_info->status) {
1648468e3a1aSFabiano Rosas         return kvm_handle_hw_breakpoint(cs, arch_info);
1649468e3a1aSFabiano Rosas     }
1650468e3a1aSFabiano Rosas 
1651468e3a1aSFabiano Rosas     if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1652468e3a1aSFabiano Rosas         return kvm_handle_sw_breakpoint();
1653468e3a1aSFabiano Rosas     }
1654468e3a1aSFabiano Rosas 
1655468e3a1aSFabiano Rosas     /*
1656468e3a1aSFabiano Rosas      * QEMU is not able to handle debug exception, so inject
16578a0548f9SBharat Bhushan      * program exception to guest;
16588a0548f9SBharat Bhushan      * Yes program exception NOT debug exception !!
165988365d17SBharat Bhushan      * When QEMU is using debug resources then debug exception must
166088365d17SBharat Bhushan      * be always set. To achieve this we set MSR_DE and also set
166188365d17SBharat Bhushan      * MSRP_DEP so guest cannot change MSR_DE.
166288365d17SBharat Bhushan      * When emulating debug resource for guest we want guest
166388365d17SBharat Bhushan      * to control MSR_DE (enable/disable debug interrupt on need).
166488365d17SBharat Bhushan      * Supporting both configurations are NOT possible.
166588365d17SBharat Bhushan      * So the result is that we cannot share debug resources
166688365d17SBharat Bhushan      * between QEMU and Guest on BOOKE architecture.
166788365d17SBharat Bhushan      * In the current design QEMU gets the priority over guest,
166888365d17SBharat Bhushan      * this means that if QEMU is using debug resources then guest
166988365d17SBharat Bhushan      * cannot use them;
16708a0548f9SBharat Bhushan      * For software breakpoint QEMU uses a privileged instruction;
16718a0548f9SBharat Bhushan      * So there cannot be any reason that we are here for guest
16728a0548f9SBharat Bhushan      * set debug exception, only possibility is guest executed a
16738a0548f9SBharat Bhushan      * privileged / illegal instruction and that's why we are
16748a0548f9SBharat Bhushan      * injecting a program interrupt.
16758a0548f9SBharat Bhushan      */
16768a0548f9SBharat Bhushan     cpu_synchronize_state(cs);
1677468e3a1aSFabiano Rosas     /*
1678468e3a1aSFabiano Rosas      * env->nip is PC, so increment this by 4 to use
16798a0548f9SBharat Bhushan      * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
16808a0548f9SBharat Bhushan      */
16818a0548f9SBharat Bhushan     env->nip += 4;
16828a0548f9SBharat Bhushan     cs->exception_index = POWERPC_EXCP_PROGRAM;
16838a0548f9SBharat Bhushan     env->error_code = POWERPC_EXCP_INVAL;
16848a0548f9SBharat Bhushan     ppc_cpu_do_interrupt(cs);
16858a0548f9SBharat Bhushan 
1686468e3a1aSFabiano Rosas     return 0;
16878a0548f9SBharat Bhushan }
16888a0548f9SBharat Bhushan 
168920d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1690d76d1650Saurel32 {
169120d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
169220d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1693bb4ea393SJan Kiszka     int ret;
1694d76d1650Saurel32 
16954b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
16964b8523eeSJan Kiszka 
1697d76d1650Saurel32     switch (run->exit_reason) {
1698d76d1650Saurel32     case KVM_EXIT_DCR:
1699d76d1650Saurel32         if (run->dcr.is_write) {
1700da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1701d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1702d76d1650Saurel32         } else {
1703da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1704d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1705d76d1650Saurel32         }
1706d76d1650Saurel32         break;
1707d76d1650Saurel32     case KVM_EXIT_HLT:
1708da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1709259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1710d76d1650Saurel32         break;
1711c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1712f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1713da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
171420d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1715aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1716f61b4bedSAlexander Graf                                               run->papr_hcall.args);
171778e8fde2SDavid Gibson         ret = 0;
1718f61b4bedSAlexander Graf         break;
1719f61b4bedSAlexander Graf #endif
17205b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1721da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1722933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
17235b95b8b9SAlexander Graf         ret = 0;
17245b95b8b9SAlexander Graf         break;
172531f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1726da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
172731f2cb8fSBharat Bhushan         watchdog_perform_action();
172831f2cb8fSBharat Bhushan         ret = 0;
172931f2cb8fSBharat Bhushan         break;
173031f2cb8fSBharat Bhushan 
17318a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17328a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17338a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17348a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17358a0548f9SBharat Bhushan             break;
17368a0548f9SBharat Bhushan         }
17378a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17388a0548f9SBharat Bhushan         ret = 0;
17398a0548f9SBharat Bhushan         break;
17408a0548f9SBharat Bhushan 
174173aaec4aSJan Kiszka     default:
174273aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
174373aaec4aSJan Kiszka         ret = -1;
174473aaec4aSJan Kiszka         break;
1745d76d1650Saurel32     }
1746d76d1650Saurel32 
17474b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1748d76d1650Saurel32     return ret;
1749d76d1650Saurel32 }
1750d76d1650Saurel32 
175131f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
175231f2cb8fSBharat Bhushan {
175331f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
175431f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
175531f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
175631f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
175731f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
175831f2cb8fSBharat Bhushan     };
175931f2cb8fSBharat Bhushan 
176031f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
176131f2cb8fSBharat Bhushan }
176231f2cb8fSBharat Bhushan 
176331f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
176431f2cb8fSBharat Bhushan {
176531f2cb8fSBharat Bhushan 
176631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
176731f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
176831f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
176931f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
177031f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
177131f2cb8fSBharat Bhushan     };
177231f2cb8fSBharat Bhushan 
177331f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
177431f2cb8fSBharat Bhushan }
177531f2cb8fSBharat Bhushan 
177631f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
177731f2cb8fSBharat Bhushan {
177831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
177931f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
178031f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
178131f2cb8fSBharat Bhushan 
178231f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
178331f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
178431f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
178531f2cb8fSBharat Bhushan     };
178631f2cb8fSBharat Bhushan 
178731f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
178831f2cb8fSBharat Bhushan }
178931f2cb8fSBharat Bhushan 
179031f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
179131f2cb8fSBharat Bhushan {
179231f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
179331f2cb8fSBharat Bhushan     int ret;
179431f2cb8fSBharat Bhushan 
179531f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
179631f2cb8fSBharat Bhushan         return -1;
179731f2cb8fSBharat Bhushan     }
179831f2cb8fSBharat Bhushan 
179931f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
180031f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
180131f2cb8fSBharat Bhushan         return -1;
180231f2cb8fSBharat Bhushan     }
180331f2cb8fSBharat Bhushan 
180448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
180531f2cb8fSBharat Bhushan     if (ret < 0) {
180631f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
180731f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
180831f2cb8fSBharat Bhushan         return ret;
180931f2cb8fSBharat Bhushan     }
181031f2cb8fSBharat Bhushan 
181131f2cb8fSBharat Bhushan     return ret;
181231f2cb8fSBharat Bhushan }
181331f2cb8fSBharat Bhushan 
1814dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1815dc333cd6SAlexander Graf {
1816dc333cd6SAlexander Graf     FILE *f;
1817dc333cd6SAlexander Graf     int ret = -1;
1818dc333cd6SAlexander Graf     int field_len = strlen(field);
1819dc333cd6SAlexander Graf     char line[512];
1820dc333cd6SAlexander Graf 
1821dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1822dc333cd6SAlexander Graf     if (!f) {
1823dc333cd6SAlexander Graf         return -1;
1824dc333cd6SAlexander Graf     }
1825dc333cd6SAlexander Graf 
1826dc333cd6SAlexander Graf     do {
1827dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1828dc333cd6SAlexander Graf             break;
1829dc333cd6SAlexander Graf         }
1830dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1831ae215068SJim Meyering             pstrcpy(value, len, line);
1832dc333cd6SAlexander Graf             ret = 0;
1833dc333cd6SAlexander Graf             break;
1834dc333cd6SAlexander Graf         }
1835dc333cd6SAlexander Graf     } while(*line);
1836dc333cd6SAlexander Graf 
1837dc333cd6SAlexander Graf     fclose(f);
1838dc333cd6SAlexander Graf 
1839dc333cd6SAlexander Graf     return ret;
1840dc333cd6SAlexander Graf }
1841dc333cd6SAlexander Graf 
1842dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1843dc333cd6SAlexander Graf {
1844dc333cd6SAlexander Graf     char line[512];
1845dc333cd6SAlexander Graf     char *ns;
184673bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1847dc333cd6SAlexander Graf 
1848dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1849dc333cd6SAlexander Graf         return retval;
1850dc333cd6SAlexander Graf     }
1851dc333cd6SAlexander Graf 
1852dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1853dc333cd6SAlexander Graf         return retval;
1854dc333cd6SAlexander Graf     }
1855dc333cd6SAlexander Graf 
1856dc333cd6SAlexander Graf     ns++;
1857dc333cd6SAlexander Graf 
1858f9b8e7f6SShraddha Barke     return atoi(ns);
1859ef951443SNikunj A Dadhania }
1860ef951443SNikunj A Dadhania 
1861ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1862ef951443SNikunj A Dadhania {
1863ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1864ef951443SNikunj A Dadhania                                NULL);
1865ef951443SNikunj A Dadhania }
1866ef951443SNikunj A Dadhania 
1867ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1868ef951443SNikunj A Dadhania {
1869ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1870dc333cd6SAlexander Graf }
18714513d923SGleb Natapov 
1872eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1873eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1874eadaada1SAlexander Graf {
1875eadaada1SAlexander Graf     struct dirent *dirp;
1876eadaada1SAlexander Graf     DIR *dp;
1877eadaada1SAlexander Graf 
1878eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1879eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1880eadaada1SAlexander Graf         return -1;
1881eadaada1SAlexander Graf     }
1882eadaada1SAlexander Graf 
1883eadaada1SAlexander Graf     buf[0] = '\0';
1884eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1885eadaada1SAlexander Graf         FILE *f;
1886eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1887eadaada1SAlexander Graf                  dirp->d_name);
1888eadaada1SAlexander Graf         f = fopen(buf, "r");
1889eadaada1SAlexander Graf         if (f) {
1890eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1891eadaada1SAlexander Graf             fclose(f);
1892eadaada1SAlexander Graf             break;
1893eadaada1SAlexander Graf         }
1894eadaada1SAlexander Graf         buf[0] = '\0';
1895eadaada1SAlexander Graf     }
1896eadaada1SAlexander Graf     closedir(dp);
1897eadaada1SAlexander Graf     if (buf[0] == '\0') {
1898eadaada1SAlexander Graf         printf("Unknown host!\n");
1899eadaada1SAlexander Graf         return -1;
1900eadaada1SAlexander Graf     }
1901eadaada1SAlexander Graf 
1902eadaada1SAlexander Graf     return 0;
1903eadaada1SAlexander Graf }
1904eadaada1SAlexander Graf 
19057d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1906eadaada1SAlexander Graf {
19079bc884b7SDavid Gibson     union {
19089bc884b7SDavid Gibson         uint32_t v32;
19099bc884b7SDavid Gibson         uint64_t v64;
19109bc884b7SDavid Gibson     } u;
1911eadaada1SAlexander Graf     FILE *f;
1912eadaada1SAlexander Graf     int len;
1913eadaada1SAlexander Graf 
19147d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1915eadaada1SAlexander Graf     if (!f) {
1916eadaada1SAlexander Graf         return -1;
1917eadaada1SAlexander Graf     }
1918eadaada1SAlexander Graf 
19199bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1920eadaada1SAlexander Graf     fclose(f);
1921eadaada1SAlexander Graf     switch (len) {
19229bc884b7SDavid Gibson     case 4:
19239bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19249bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19259bc884b7SDavid Gibson     case 8:
19269bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1927eadaada1SAlexander Graf     }
1928eadaada1SAlexander Graf 
1929eadaada1SAlexander Graf     return 0;
1930eadaada1SAlexander Graf }
1931eadaada1SAlexander Graf 
19327d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19337d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19347d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19357d94a30bSSukadev Bhattiprolu  * format) */
19367d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19377d94a30bSSukadev Bhattiprolu {
19387d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19397d94a30bSSukadev Bhattiprolu     uint64_t val;
19407d94a30bSSukadev Bhattiprolu 
19417d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19427d94a30bSSukadev Bhattiprolu         return -1;
19437d94a30bSSukadev Bhattiprolu     }
19447d94a30bSSukadev Bhattiprolu 
19457d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19467d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19477d94a30bSSukadev Bhattiprolu     g_free(tmp);
19487d94a30bSSukadev Bhattiprolu 
19497d94a30bSSukadev Bhattiprolu     return val;
19507d94a30bSSukadev Bhattiprolu }
19517d94a30bSSukadev Bhattiprolu 
19529bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19539bc884b7SDavid Gibson {
19549bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19559bc884b7SDavid Gibson }
19569bc884b7SDavid Gibson 
19577d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void)
19587d050527SSuraj Jitindar Singh {
19597d050527SSuraj Jitindar Singh     int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
19607d050527SSuraj Jitindar Singh 
19617d050527SSuraj Jitindar Singh     if (nr_bits > 0) {
19627d050527SSuraj Jitindar Singh         return nr_bits;
19637d050527SSuraj Jitindar Singh     }
19647d050527SSuraj Jitindar Singh     return 0;
19657d050527SSuraj Jitindar Singh }
19667d050527SSuraj Jitindar Singh 
19671a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
196845024f09SAlexander Graf  {
1969a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
1970a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
197145024f09SAlexander Graf 
19726fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
19731a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
19741a61a9aeSStuart Yoder         return 0;
19751a61a9aeSStuart Yoder     }
197645024f09SAlexander Graf 
19771a61a9aeSStuart Yoder     return 1;
19781a61a9aeSStuart Yoder }
19791a61a9aeSStuart Yoder 
19801a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
19811a61a9aeSStuart Yoder {
19821a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19831a61a9aeSStuart Yoder 
19841a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
19851a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
19861a61a9aeSStuart Yoder         return 1;
19871a61a9aeSStuart Yoder     }
19881a61a9aeSStuart Yoder 
19891a61a9aeSStuart Yoder     return 0;
19901a61a9aeSStuart Yoder }
19911a61a9aeSStuart Yoder 
19921a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
19931a61a9aeSStuart Yoder {
19941a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
19951a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19961a61a9aeSStuart Yoder 
19971a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
19981a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
199945024f09SAlexander Graf         return 0;
200045024f09SAlexander Graf     }
200145024f09SAlexander Graf 
200245024f09SAlexander Graf     /*
2003d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
200445024f09SAlexander Graf      *
2005d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
200645024f09SAlexander Graf      *     li r3, -1
2007d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2008d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
200945024f09SAlexander Graf      */
201045024f09SAlexander Graf 
2011d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2012d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2013d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2014d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
201545024f09SAlexander Graf 
20160ddbd053SAlexey Kardashevskiy     return 1;
201745024f09SAlexander Graf }
201845024f09SAlexander Graf 
2019026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2020026bfd89SDavid Gibson {
2021026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2022026bfd89SDavid Gibson }
2023026bfd89SDavid Gibson 
2024026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2025026bfd89SDavid Gibson {
2026026bfd89SDavid Gibson     /*
2027026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2028026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2029026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2030026bfd89SDavid Gibson      * produce a warning.
2031026bfd89SDavid Gibson      */
2032026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2033026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2034026bfd89SDavid Gibson }
2035026bfd89SDavid Gibson 
2036ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2037ef9971ddSAlexey Kardashevskiy {
2038ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2039ef9971ddSAlexey Kardashevskiy }
2040ef9971ddSAlexey Kardashevskiy 
20415145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20425145ad4fSNathan Whitehorn {
20435145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20445145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20455145ad4fSNathan Whitehorn }
20465145ad4fSNathan Whitehorn 
204768f9f708SSuraj Jitindar Singh void kvmppc_enable_h_page_init(void)
204868f9f708SSuraj Jitindar Singh {
204968f9f708SSuraj Jitindar Singh     kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
205068f9f708SSuraj Jitindar Singh }
205168f9f708SSuraj Jitindar Singh 
20521bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2053f61b4bedSAlexander Graf {
20541bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2055f61b4bedSAlexander Graf     int ret;
2056f61b4bedSAlexander Graf 
2057da20aed1SDavid Gibson     if (!kvm_enabled()) {
2058da20aed1SDavid Gibson         return;
2059da20aed1SDavid Gibson     }
2060da20aed1SDavid Gibson 
206148add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2062f61b4bedSAlexander Graf     if (ret) {
2063072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2064072ed5f2SThomas Huth         exit(1);
2065f61b4bedSAlexander Graf     }
20669b00ea49SDavid Gibson 
20679b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
20689b00ea49SDavid Gibson      * with kvm */
20699b00ea49SDavid Gibson     cap_papr = 1;
2070f1af19d7SDavid Gibson }
2071f61b4bedSAlexander Graf 
2072d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
20736db5bb0fSAlexey Kardashevskiy {
2074d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
20756db5bb0fSAlexey Kardashevskiy }
20766db5bb0fSAlexey Kardashevskiy 
20775b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
20785b95b8b9SAlexander Graf {
20795b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
20805b95b8b9SAlexander Graf     int ret;
20815b95b8b9SAlexander Graf 
208248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
20835b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2084072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2085072ed5f2SThomas Huth         exit(1);
20865b95b8b9SAlexander Graf     }
20875b95b8b9SAlexander Graf }
20885b95b8b9SAlexander Graf 
2089e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2090e97c3636SDavid Gibson {
2091e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2092e97c3636SDavid Gibson }
2093e97c3636SDavid Gibson 
2094fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2095fa98fbfcSSam Bobroff {
2096fa98fbfcSSam Bobroff     int ret;
2097fa98fbfcSSam Bobroff 
2098fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2099fa98fbfcSSam Bobroff     if (!ret) {
2100fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2101fa98fbfcSSam Bobroff     }
2102fa98fbfcSSam Bobroff     return ret;
2103fa98fbfcSSam Bobroff }
2104fa98fbfcSSam Bobroff 
2105fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2106fa98fbfcSSam Bobroff {
2107fa98fbfcSSam Bobroff     int i;
2108fa98fbfcSSam Bobroff     GString *g;
2109fa98fbfcSSam Bobroff     char *s;
2110fa98fbfcSSam Bobroff 
2111fa98fbfcSSam Bobroff     assert(kvm_enabled());
2112fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2113fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2114fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2115fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2116fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2117fa98fbfcSSam Bobroff             }
2118fa98fbfcSSam Bobroff         }
2119fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2120fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2121fa98fbfcSSam Bobroff         g_free(s);
2122fa98fbfcSSam Bobroff     } else {
2123fa98fbfcSSam Bobroff         error_append_hint(errp,
2124fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2125fa98fbfcSSam Bobroff     }
2126fa98fbfcSSam Bobroff }
2127fa98fbfcSSam Bobroff 
2128fa98fbfcSSam Bobroff 
21297f763a5dSDavid Gibson #ifdef TARGET_PPC64
21307f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21317f763a5dSDavid Gibson {
2132f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2133f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2134f36951c1SDavid Gibson     int i;
2135f36951c1SDavid Gibson 
2136f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2137f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2138ab256960SGreg Kurz     kvm_get_smmu_info(&info, &error_fatal);
21399c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2140f36951c1SDavid Gibson     best_page_shift = 0;
2141f36951c1SDavid Gibson 
2142f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2143f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2144f36951c1SDavid Gibson 
2145f36951c1SDavid Gibson         if (!sps->page_shift) {
2146f36951c1SDavid Gibson             continue;
2147f36951c1SDavid Gibson         }
2148f36951c1SDavid Gibson 
2149f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2150f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2151f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2152f36951c1SDavid Gibson         }
2153f36951c1SDavid Gibson     }
2154f36951c1SDavid Gibson 
21557f763a5dSDavid Gibson     return MIN(current_size,
2156f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21577f763a5dSDavid Gibson }
21587f763a5dSDavid Gibson #endif
21597f763a5dSDavid Gibson 
2160da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2161da95324eSAlexey Kardashevskiy {
2162da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2163da95324eSAlexey Kardashevskiy }
2164da95324eSAlexey Kardashevskiy 
21653dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
21663dc410aeSAlexey Kardashevskiy {
21673dc410aeSAlexey Kardashevskiy     int ret;
21683dc410aeSAlexey Kardashevskiy 
21693dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21703dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
21713dc410aeSAlexey Kardashevskiy     if (!ret) {
21723dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21733dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
21743dc410aeSAlexey Kardashevskiy     }
21753dc410aeSAlexey Kardashevskiy 
21763dc410aeSAlexey Kardashevskiy     return ret;
21773dc410aeSAlexey Kardashevskiy }
21783dc410aeSAlexey Kardashevskiy 
2179d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2180d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2181d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
21820f5cb298SDavid Gibson {
21830f5cb298SDavid Gibson     long len;
21840f5cb298SDavid Gibson     int fd;
21850f5cb298SDavid Gibson     void *table;
21860f5cb298SDavid Gibson 
2187b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2188b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2189b5aec396SDavid Gibson      */
2190b5aec396SDavid Gibson     *pfd = -1;
21916a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
21920f5cb298SDavid Gibson         return NULL;
21930f5cb298SDavid Gibson     }
21940f5cb298SDavid Gibson 
2195d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2196d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2197d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2198d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2199d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2200d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2201d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2202d6ee2a7cSAlexey Kardashevskiy         };
2203d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2204d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2205d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2206d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2207d6ee2a7cSAlexey Kardashevskiy                     liobn);
2208d6ee2a7cSAlexey Kardashevskiy             return NULL;
2209d6ee2a7cSAlexey Kardashevskiy         }
2210d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2211d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2212d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2213d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2214d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2215d6ee2a7cSAlexey Kardashevskiy         };
2216d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2217d6ee2a7cSAlexey Kardashevskiy             return NULL;
2218d6ee2a7cSAlexey Kardashevskiy         }
22190f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22200f5cb298SDavid Gibson         if (fd < 0) {
2221b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2222b5aec396SDavid Gibson                     liobn);
22230f5cb298SDavid Gibson             return NULL;
22240f5cb298SDavid Gibson         }
2225d6ee2a7cSAlexey Kardashevskiy     } else {
2226d6ee2a7cSAlexey Kardashevskiy         return NULL;
2227d6ee2a7cSAlexey Kardashevskiy     }
22280f5cb298SDavid Gibson 
2229d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22300f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22310f5cb298SDavid Gibson 
223274b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22330f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2234b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2235b5aec396SDavid Gibson                 liobn);
22360f5cb298SDavid Gibson         close(fd);
22370f5cb298SDavid Gibson         return NULL;
22380f5cb298SDavid Gibson     }
22390f5cb298SDavid Gibson 
22400f5cb298SDavid Gibson     *pfd = fd;
22410f5cb298SDavid Gibson     return table;
22420f5cb298SDavid Gibson }
22430f5cb298SDavid Gibson 
2244523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22450f5cb298SDavid Gibson {
22460f5cb298SDavid Gibson     long len;
22470f5cb298SDavid Gibson 
22480f5cb298SDavid Gibson     if (fd < 0) {
22490f5cb298SDavid Gibson         return -1;
22500f5cb298SDavid Gibson     }
22510f5cb298SDavid Gibson 
2252523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22530f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22540f5cb298SDavid Gibson         (close(fd) < 0)) {
2255b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2256b5aec396SDavid Gibson                 strerror(errno));
22570f5cb298SDavid Gibson         /* Leak the table */
22580f5cb298SDavid Gibson     }
22590f5cb298SDavid Gibson 
22600f5cb298SDavid Gibson     return 0;
22610f5cb298SDavid Gibson }
22620f5cb298SDavid Gibson 
22637f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22647f763a5dSDavid Gibson {
22657f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22667f763a5dSDavid Gibson 
2267ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2268ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2269ace9a2cbSDavid Gibson         return 0;
2270ace9a2cbSDavid Gibson     }
22716977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22727f763a5dSDavid Gibson         int ret;
22737f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2274ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2275ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2276ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2277ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2278ace9a2cbSDavid Gibson              * correct for PR. */
2279ace9a2cbSDavid Gibson             return 0;
2280ace9a2cbSDavid Gibson         } else if (ret < 0) {
22817f763a5dSDavid Gibson             return ret;
22827f763a5dSDavid Gibson         }
22837f763a5dSDavid Gibson         return shift;
22847f763a5dSDavid Gibson     }
22857f763a5dSDavid Gibson 
2286ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2287ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
228896c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
228996c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2290ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
22917f763a5dSDavid Gibson         return 0;
2292ace9a2cbSDavid Gibson     } else {
2293ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2294ace9a2cbSDavid Gibson         return 24;
2295ace9a2cbSDavid Gibson     }
22967f763a5dSDavid Gibson }
22977f763a5dSDavid Gibson 
2298a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2299a1e98583SDavid Gibson {
2300a1e98583SDavid Gibson     uint32_t pvr;
2301a1e98583SDavid Gibson 
2302a1e98583SDavid Gibson     asm ("mfpvr %0"
2303a1e98583SDavid Gibson          : "=r"(pvr));
2304a1e98583SDavid Gibson     return pvr;
2305a1e98583SDavid Gibson }
2306a1e98583SDavid Gibson 
2307a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2308a7342588SDavid Gibson {
2309a7342588SDavid Gibson     if (on) {
2310a7342588SDavid Gibson         *word |= flags;
2311a7342588SDavid Gibson     } else {
2312a7342588SDavid Gibson         *word &= ~flags;
2313a7342588SDavid Gibson     }
2314a7342588SDavid Gibson }
2315a7342588SDavid Gibson 
23162985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
23172985b86bSAndreas Färber {
23182985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
23190cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23200cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2321a1e98583SDavid Gibson 
2322cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23233bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2324a7342588SDavid Gibson 
23253f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
23263f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
23273f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
23283f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
23293f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
23303f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
23310cbad81fSDavid Gibson 
23320cbad81fSDavid Gibson     if (dcache_size != -1) {
23330cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23340cbad81fSDavid Gibson     }
23350cbad81fSDavid Gibson 
23360cbad81fSDavid Gibson     if (icache_size != -1) {
23370cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23380cbad81fSDavid Gibson     }
2339c64abd1fSSam Bobroff 
2340c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2341c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
23425f3066d8SDavid Gibson 
23435f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
23445f3066d8SDavid Gibson         /*
23455f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
23465f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
23475f3066d8SDavid Gibson          * architected mode may prevent guests from activating
23485f3066d8SDavid Gibson          * necessary DD1 workarounds.
23495f3066d8SDavid Gibson          */
23505f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
23515f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
23525f3066d8SDavid Gibson     }
2353c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2354a1e98583SDavid Gibson }
2355a1e98583SDavid Gibson 
23563b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23573b961124SStuart Yoder {
23583b961124SStuart Yoder     return cap_epr;
23593b961124SStuart Yoder }
23603b961124SStuart Yoder 
236187a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
236287a91de6SAlexander Graf {
236387a91de6SAlexander Graf     return cap_fixup_hcalls;
236487a91de6SAlexander Graf }
236587a91de6SAlexander Graf 
2366bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2367bac3bf28SThomas Huth {
2368bac3bf28SThomas Huth     return cap_htm;
2369bac3bf28SThomas Huth }
2370bac3bf28SThomas Huth 
2371cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2372cf1c4cceSSam Bobroff {
2373cf1c4cceSSam Bobroff     return cap_mmu_radix;
2374cf1c4cceSSam Bobroff }
2375cf1c4cceSSam Bobroff 
2376cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2377cf1c4cceSSam Bobroff {
2378cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2379cf1c4cceSSam Bobroff }
2380cf1c4cceSSam Bobroff 
2381072f416aSSuraj Jitindar Singh static bool kvmppc_power8_host(void)
2382072f416aSSuraj Jitindar Singh {
2383072f416aSSuraj Jitindar Singh     bool ret = false;
2384072f416aSSuraj Jitindar Singh #ifdef TARGET_PPC64
2385072f416aSSuraj Jitindar Singh     {
2386072f416aSSuraj Jitindar Singh         uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2387072f416aSSuraj Jitindar Singh         ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2388072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2389072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8_BASE);
2390072f416aSSuraj Jitindar Singh     }
2391072f416aSSuraj Jitindar Singh #endif /* TARGET_PPC64 */
2392072f416aSSuraj Jitindar Singh     return ret;
2393072f416aSSuraj Jitindar Singh }
2394072f416aSSuraj Jitindar Singh 
23958fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
23968fea7044SSuraj Jitindar Singh {
2397072f416aSSuraj Jitindar Singh     bool l1d_thread_priv_req = !kvmppc_power8_host();
2398072f416aSSuraj Jitindar Singh 
23998fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
24008fea7044SSuraj Jitindar Singh         return 2;
2401072f416aSSuraj Jitindar Singh     } else if ((!l1d_thread_priv_req ||
2402072f416aSSuraj Jitindar Singh                 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
24038fea7044SSuraj Jitindar Singh                (c.character & c.character_mask
24048fea7044SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
24058fea7044SSuraj Jitindar Singh         return 1;
24068fea7044SSuraj Jitindar Singh     }
24078fea7044SSuraj Jitindar Singh 
24088fea7044SSuraj Jitindar Singh     return 0;
24098fea7044SSuraj Jitindar Singh }
24108fea7044SSuraj Jitindar Singh 
24118fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
24128fea7044SSuraj Jitindar Singh {
24138fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
24148fea7044SSuraj Jitindar Singh         return 2;
24158fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
24168fea7044SSuraj Jitindar Singh         return 1;
24178fea7044SSuraj Jitindar Singh     }
24188fea7044SSuraj Jitindar Singh 
24198fea7044SSuraj Jitindar Singh     return 0;
24208fea7044SSuraj Jitindar Singh }
24218fea7044SSuraj Jitindar Singh 
24228fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
24238fea7044SSuraj Jitindar Singh {
2424399b2896SSuraj Jitindar Singh     if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2425399b2896SSuraj Jitindar Singh         (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2426399b2896SSuraj Jitindar Singh         (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2427399b2896SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_NA;
2428399b2896SSuraj Jitindar Singh     } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2429399b2896SSuraj Jitindar Singh         return SPAPR_CAP_WORKAROUND;
2430399b2896SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
24318fea7044SSuraj Jitindar Singh         return  SPAPR_CAP_FIXED_CCD;
24328fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
24338fea7044SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_IBS;
24348fea7044SSuraj Jitindar Singh     }
24358fea7044SSuraj Jitindar Singh 
24368fea7044SSuraj Jitindar Singh     return 0;
24378fea7044SSuraj Jitindar Singh }
24388fea7044SSuraj Jitindar Singh 
24398ff43ee4SSuraj Jitindar Singh static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
24408ff43ee4SSuraj Jitindar Singh {
24418ff43ee4SSuraj Jitindar Singh     if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
24428ff43ee4SSuraj Jitindar Singh         return 1;
24438ff43ee4SSuraj Jitindar Singh     }
24448ff43ee4SSuraj Jitindar Singh     return 0;
24458ff43ee4SSuraj Jitindar Singh }
24468ff43ee4SSuraj Jitindar Singh 
24478acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
24488acc2ae5SSuraj Jitindar Singh {
24498acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
24508acc2ae5SSuraj Jitindar Singh     int ret;
24518acc2ae5SSuraj Jitindar Singh 
24528acc2ae5SSuraj Jitindar Singh     /* Assume broken */
24538acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
24548acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
24558acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
24568acc2ae5SSuraj Jitindar Singh 
24578acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
24588acc2ae5SSuraj Jitindar Singh     if (!ret) {
24598acc2ae5SSuraj Jitindar Singh         return;
24608acc2ae5SSuraj Jitindar Singh     }
24618acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
24628acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
24638acc2ae5SSuraj Jitindar Singh         return;
24648acc2ae5SSuraj Jitindar Singh     }
24658fea7044SSuraj Jitindar Singh 
24668fea7044SSuraj Jitindar Singh     cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
24678fea7044SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
24688fea7044SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
24698ff43ee4SSuraj Jitindar Singh     cap_ppc_count_cache_flush_assist =
24708ff43ee4SSuraj Jitindar Singh         parse_cap_ppc_count_cache_flush_assist(c);
24718acc2ae5SSuraj Jitindar Singh }
24728acc2ae5SSuraj Jitindar Singh 
24738acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
24748acc2ae5SSuraj Jitindar Singh {
24758acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
24768acc2ae5SSuraj Jitindar Singh }
24778acc2ae5SSuraj Jitindar Singh 
24788acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
24798acc2ae5SSuraj Jitindar Singh {
24808acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
24818acc2ae5SSuraj Jitindar Singh }
24828acc2ae5SSuraj Jitindar Singh 
24838acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
24848acc2ae5SSuraj Jitindar Singh {
24858acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
24868acc2ae5SSuraj Jitindar Singh }
24878acc2ae5SSuraj Jitindar Singh 
24888ff43ee4SSuraj Jitindar Singh int kvmppc_get_cap_count_cache_flush_assist(void)
24898ff43ee4SSuraj Jitindar Singh {
24908ff43ee4SSuraj Jitindar Singh     return cap_ppc_count_cache_flush_assist;
24918ff43ee4SSuraj Jitindar Singh }
24928ff43ee4SSuraj Jitindar Singh 
2493b9a477b7SSuraj Jitindar Singh bool kvmppc_has_cap_nested_kvm_hv(void)
2494b9a477b7SSuraj Jitindar Singh {
2495b9a477b7SSuraj Jitindar Singh     return !!cap_ppc_nested_kvm_hv;
2496b9a477b7SSuraj Jitindar Singh }
2497b9a477b7SSuraj Jitindar Singh 
2498b9a477b7SSuraj Jitindar Singh int kvmppc_set_cap_nested_kvm_hv(int enable)
2499b9a477b7SSuraj Jitindar Singh {
2500b9a477b7SSuraj Jitindar Singh     return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2501b9a477b7SSuraj Jitindar Singh }
2502b9a477b7SSuraj Jitindar Singh 
25039ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
25049ded780cSAlexey Kardashevskiy {
25059ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
25069ded780cSAlexey Kardashevskiy }
25079ded780cSAlexey Kardashevskiy 
25087d050527SSuraj Jitindar Singh int kvmppc_get_cap_large_decr(void)
25097d050527SSuraj Jitindar Singh {
25107d050527SSuraj Jitindar Singh     return cap_large_decr;
25117d050527SSuraj Jitindar Singh }
25127d050527SSuraj Jitindar Singh 
25137d050527SSuraj Jitindar Singh int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
25147d050527SSuraj Jitindar Singh {
25157d050527SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
25167d050527SSuraj Jitindar Singh     uint64_t lpcr;
25177d050527SSuraj Jitindar Singh 
25187d050527SSuraj Jitindar Singh     kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25197d050527SSuraj Jitindar Singh     /* Do we need to modify the LPCR? */
25207d050527SSuraj Jitindar Singh     if (!!(lpcr & LPCR_LD) != !!enable) {
25217d050527SSuraj Jitindar Singh         if (enable) {
25227d050527SSuraj Jitindar Singh             lpcr |= LPCR_LD;
25237d050527SSuraj Jitindar Singh         } else {
25247d050527SSuraj Jitindar Singh             lpcr &= ~LPCR_LD;
25257d050527SSuraj Jitindar Singh         }
25267d050527SSuraj Jitindar Singh         kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25277d050527SSuraj Jitindar Singh         kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25287d050527SSuraj Jitindar Singh 
25297d050527SSuraj Jitindar Singh         if (!!(lpcr & LPCR_LD) != !!enable) {
25307d050527SSuraj Jitindar Singh             return -1;
25317d050527SSuraj Jitindar Singh         }
25327d050527SSuraj Jitindar Singh     }
25337d050527SSuraj Jitindar Singh 
25347d050527SSuraj Jitindar Singh     return 0;
25357d050527SSuraj Jitindar Singh }
25367d050527SSuraj Jitindar Singh 
253752b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
253852b2519cSThomas Huth {
253952b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
254052b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
254152b2519cSThomas Huth 
254252b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
254352b2519cSThomas Huth     if (pvr_pcc == NULL) {
254452b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
254552b2519cSThomas Huth     }
254652b2519cSThomas Huth 
254752b2519cSThomas Huth     return pvr_pcc;
254852b2519cSThomas Huth }
254952b2519cSThomas Huth 
25502e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms)
25515ba4576bSAndreas Färber {
25525ba4576bSAndreas Färber     TypeInfo type_info = {
25535ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
25545ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
25555ba4576bSAndreas Färber     };
25562e9c10ebSIgor Mammedov     MachineClass *mc = MACHINE_GET_CLASS(ms);
25575ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
255892e926e1SGreg Kurz     ObjectClass *oc;
25595b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2560715d4b96SThomas Huth     int i;
25615ba4576bSAndreas Färber 
256252b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
25633bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
25645ba4576bSAndreas Färber         return -1;
25655ba4576bSAndreas Färber     }
25665ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
25675ba4576bSAndreas Färber     type_register(&type_info);
25682e9c10ebSIgor Mammedov     if (object_dynamic_cast(OBJECT(ms), TYPE_SPAPR_MACHINE)) {
25692e9c10ebSIgor Mammedov         /* override TCG default cpu type with 'host' cpu model */
25702e9c10ebSIgor Mammedov         mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
25712e9c10ebSIgor Mammedov     }
25725b79b1caSAlexey Kardashevskiy 
257392e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
257492e926e1SGreg Kurz     g_assert(oc);
257592e926e1SGreg Kurz 
2576715d4b96SThomas Huth     /*
2577715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2578715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2579715d4b96SThomas Huth      * host CPU type, too)
2580715d4b96SThomas Huth      */
2581715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2582715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2583c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2584715d4b96SThomas Huth             char *suffix;
2585715d4b96SThomas Huth 
2586715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2587c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2588715d4b96SThomas Huth             if (suffix) {
2589715d4b96SThomas Huth                 *suffix = 0;
2590715d4b96SThomas Huth             }
2591715d4b96SThomas Huth             break;
2592715d4b96SThomas Huth         }
2593715d4b96SThomas Huth     }
2594715d4b96SThomas Huth 
25955ba4576bSAndreas Färber     return 0;
25965ba4576bSAndreas Färber }
25975ba4576bSAndreas Färber 
2598feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2599feaa64c4SDavid Gibson {
2600feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2601feaa64c4SDavid Gibson         .token = token,
2602feaa64c4SDavid Gibson     };
2603feaa64c4SDavid Gibson 
2604feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2605feaa64c4SDavid Gibson         return -ENOENT;
2606feaa64c4SDavid Gibson     }
2607feaa64c4SDavid Gibson 
2608feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2609feaa64c4SDavid Gibson 
2610feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2611feaa64c4SDavid Gibson }
261212b1143bSDavid Gibson 
261314b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2614e68cb8b4SAlexey Kardashevskiy {
2615e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2616e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
261714b0d748SGreg Kurz         .start_index = index,
2618e68cb8b4SAlexey Kardashevskiy     };
261982be8e73SGreg Kurz     int ret;
2620e68cb8b4SAlexey Kardashevskiy 
2621e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
262214b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
262314b0d748SGreg Kurz                    write ? "writing" : "reading");
262482be8e73SGreg Kurz         return -ENOTSUP;
2625e68cb8b4SAlexey Kardashevskiy     }
2626e68cb8b4SAlexey Kardashevskiy 
262782be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
262882be8e73SGreg Kurz     if (ret < 0) {
262914b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
263014b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
263114b0d748SGreg Kurz                    strerror(errno));
263282be8e73SGreg Kurz         return -errno;
263382be8e73SGreg Kurz     }
263482be8e73SGreg Kurz 
263582be8e73SGreg Kurz     return ret;
2636e68cb8b4SAlexey Kardashevskiy }
2637e68cb8b4SAlexey Kardashevskiy 
2638e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2639e68cb8b4SAlexey Kardashevskiy {
2640bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2641e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2642e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2643e68cb8b4SAlexey Kardashevskiy 
2644e68cb8b4SAlexey Kardashevskiy     do {
2645e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2646e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2647e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2648e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2649e68cb8b4SAlexey Kardashevskiy             return rc;
2650e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2651e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2652e094c4c1SCédric Le Goater             ssize_t n = rc;
2653e094c4c1SCédric Le Goater             while (n) {
2654e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2655e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2656e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2657e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2658e094c4c1SCédric Le Goater 
2659e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2660e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2661e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2662e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2663e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2664e094c4c1SCédric Le Goater 
2665e094c4c1SCédric Le Goater                 buffer += chunksize;
2666e094c4c1SCédric Le Goater                 n -= chunksize;
2667e094c4c1SCédric Le Goater             }
2668e68cb8b4SAlexey Kardashevskiy         }
2669e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2670e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2671bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2672e68cb8b4SAlexey Kardashevskiy 
2673e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2674e68cb8b4SAlexey Kardashevskiy }
2675e68cb8b4SAlexey Kardashevskiy 
2676e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2677e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2678e68cb8b4SAlexey Kardashevskiy {
2679e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2680e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2681e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2682e68cb8b4SAlexey Kardashevskiy 
2683e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2684e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2685e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2686e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2687e68cb8b4SAlexey Kardashevskiy 
2688e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2689e68cb8b4SAlexey Kardashevskiy 
2690e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2691e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2692e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2693e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2694e68cb8b4SAlexey Kardashevskiy         return rc;
2695e68cb8b4SAlexey Kardashevskiy     }
2696e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2697e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2698e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2699e68cb8b4SAlexey Kardashevskiy         return -1;
2700e68cb8b4SAlexey Kardashevskiy     }
2701e68cb8b4SAlexey Kardashevskiy     return 0;
2702e68cb8b4SAlexey Kardashevskiy }
2703e68cb8b4SAlexey Kardashevskiy 
270420d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
27054513d923SGleb Natapov {
27064513d923SGleb Natapov     return true;
27074513d923SGleb Natapov }
2708a1b87fe0SJan Kiszka 
270982169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
271082169660SScott Wood {
271182169660SScott Wood }
2712c65f9a07SGreg Kurz 
27131ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
27141ad9f0a4SDavid Gibson {
27151ad9f0a4SDavid Gibson     int fd, rc;
27161ad9f0a4SDavid Gibson     int i;
27177c43bca0SAneesh Kumar K.V 
271814b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
27191ad9f0a4SDavid Gibson 
27201ad9f0a4SDavid Gibson     i = 0;
27211ad9f0a4SDavid Gibson     while (i < n) {
27221ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
27231ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
27241ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
27251ad9f0a4SDavid Gibson 
27261ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
27271ad9f0a4SDavid Gibson         if (rc < 0) {
27281ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
27291ad9f0a4SDavid Gibson         }
27301ad9f0a4SDavid Gibson 
27311ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
27321ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2733a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
27341ad9f0a4SDavid Gibson 
27351ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
27361ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
27371ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
27381ad9f0a4SDavid Gibson             }
27391ad9f0a4SDavid Gibson 
2740a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2741a36593e1SAlexey Kardashevskiy                 valid = n - i;
2742a36593e1SAlexey Kardashevskiy             }
2743a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2744a36593e1SAlexey Kardashevskiy             i += valid;
27451ad9f0a4SDavid Gibson 
27461ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
27471ad9f0a4SDavid Gibson                 invalid = n - i;
27481ad9f0a4SDavid Gibson             }
27491ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2750a36593e1SAlexey Kardashevskiy             i += invalid;
27511ad9f0a4SDavid Gibson 
27521ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
27531ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
27541ad9f0a4SDavid Gibson         }
27551ad9f0a4SDavid Gibson     }
27561ad9f0a4SDavid Gibson 
27571ad9f0a4SDavid Gibson     close(fd);
27581ad9f0a4SDavid Gibson }
27591ad9f0a4SDavid Gibson 
27601ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
27617c43bca0SAneesh Kumar K.V {
27621ad9f0a4SDavid Gibson     int fd, rc;
27631ad9f0a4SDavid Gibson     struct {
27641ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
27651ad9f0a4SDavid Gibson         uint64_t pte0;
27661ad9f0a4SDavid Gibson         uint64_t pte1;
27671ad9f0a4SDavid Gibson     } buf;
2768c1385933SAneesh Kumar K.V 
276914b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2770c1385933SAneesh Kumar K.V 
27711ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
27721ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
27731ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
27741ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
27751ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
27761ad9f0a4SDavid Gibson 
27771ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
27781ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
27791ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2780c1385933SAneesh Kumar K.V     }
27811ad9f0a4SDavid Gibson     close(fd);
2782c1385933SAneesh Kumar K.V }
27839e03a040SFrank Blaschka 
27849e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2785dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
27869e03a040SFrank Blaschka {
27879e03a040SFrank Blaschka     return 0;
27889e03a040SFrank Blaschka }
27891850b6b7SEric Auger 
279038d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
279138d87493SPeter Xu                                 int vector, PCIDevice *dev)
279238d87493SPeter Xu {
279338d87493SPeter Xu     return 0;
279438d87493SPeter Xu }
279538d87493SPeter Xu 
279638d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
279738d87493SPeter Xu {
279838d87493SPeter Xu     return 0;
279938d87493SPeter Xu }
280038d87493SPeter Xu 
28011850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
28021850b6b7SEric Auger {
28031850b6b7SEric Auger     return data & 0xffff;
28041850b6b7SEric Auger }
28054d9392beSThomas Huth 
28064d9392beSThomas Huth int kvmppc_enable_hwrng(void)
28074d9392beSThomas Huth {
28084d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
28094d9392beSThomas Huth         return -1;
28104d9392beSThomas Huth     }
28114d9392beSThomas Huth 
28124d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
28134d9392beSThomas Huth }
281430f4b05bSDavid Gibson 
281530f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
281630f4b05bSDavid Gibson {
281730f4b05bSDavid Gibson     if (!kvm_enabled()) {
2818b55d295eSDavid Gibson         return; /* No KVM, we're good */
2819b55d295eSDavid Gibson     }
2820b55d295eSDavid Gibson 
2821b55d295eSDavid Gibson     if (cap_resize_hpt) {
2822b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2823b55d295eSDavid Gibson     }
2824b55d295eSDavid Gibson 
2825b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2826b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
282730f4b05bSDavid Gibson         return;
282830f4b05bSDavid Gibson     }
282930f4b05bSDavid Gibson 
283030f4b05bSDavid Gibson     error_setg(errp,
283130f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
283230f4b05bSDavid Gibson }
2833b55d295eSDavid Gibson 
2834b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2835b55d295eSDavid Gibson {
2836b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2837b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2838b55d295eSDavid Gibson         .flags = flags,
2839b55d295eSDavid Gibson         .shift = shift,
2840b55d295eSDavid Gibson     };
2841b55d295eSDavid Gibson 
2842b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2843b55d295eSDavid Gibson         return -ENOSYS;
2844b55d295eSDavid Gibson     }
2845b55d295eSDavid Gibson 
2846b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2847b55d295eSDavid Gibson }
2848b55d295eSDavid Gibson 
2849b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2850b55d295eSDavid Gibson {
2851b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2852b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2853b55d295eSDavid Gibson         .flags = flags,
2854b55d295eSDavid Gibson         .shift = shift,
2855b55d295eSDavid Gibson     };
2856b55d295eSDavid Gibson 
2857b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2858b55d295eSDavid Gibson         return -ENOSYS;
2859b55d295eSDavid Gibson     }
2860b55d295eSDavid Gibson 
2861b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2862b55d295eSDavid Gibson }
2863b55d295eSDavid Gibson 
2864c363a37aSDaniel Henrique Barboza /*
2865c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2866c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2867c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2868c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2869c363a37aSDaniel Henrique Barboza  *
2870c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2871c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2872c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2873c363a37aSDaniel Henrique Barboza  *
2874c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2875c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2876c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2877c363a37aSDaniel Henrique Barboza  * in QEMU level.
2878c363a37aSDaniel Henrique Barboza  */
2879c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2880c363a37aSDaniel Henrique Barboza {
2881c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2882c363a37aSDaniel Henrique Barboza 
2883c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2884c363a37aSDaniel Henrique Barboza         return false;
2885c363a37aSDaniel Henrique Barboza     }
2886c363a37aSDaniel Henrique Barboza 
2887c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2888c363a37aSDaniel Henrique Barboza         return false;
2889c363a37aSDaniel Henrique Barboza     }
2890c363a37aSDaniel Henrique Barboza 
2891c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2892c363a37aSDaniel Henrique Barboza }
2893a84f7179SNikunj A Dadhania 
2894a84f7179SNikunj A Dadhania void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2895a84f7179SNikunj A Dadhania {
2896a84f7179SNikunj A Dadhania     CPUState *cs = CPU(cpu);
2897a84f7179SNikunj A Dadhania 
2898a84f7179SNikunj A Dadhania     if (kvm_enabled()) {
2899a84f7179SNikunj A Dadhania         kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2900a84f7179SNikunj A Dadhania     }
2901a84f7179SNikunj A Dadhania }
2902