xref: /qemu/target/ppc/kvm.c (revision 7d050527e3f5cadbf9db3bce09409fb4e9259997)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
24d76d1650Saurel32 #include "qemu-common.h"
2530f4b05bSDavid Gibson #include "qapi/error.h"
26072ed5f2SThomas Huth #include "qemu/error-report.h"
2733c11879SPaolo Bonzini #include "cpu.h"
28715d4b96SThomas Huth #include "cpu-models.h"
291de7afc9SPaolo Bonzini #include "qemu/timer.h"
309c17d615SPaolo Bonzini #include "sysemu/sysemu.h"
31b3946626SVincent Palatin #include "sysemu/hw_accel.h"
32d76d1650Saurel32 #include "kvm_ppc.h"
339c17d615SPaolo Bonzini #include "sysemu/cpus.h"
349c17d615SPaolo Bonzini #include "sysemu/device_tree.h"
35d5aea6f3SDavid Gibson #include "mmu-hash64.h"
36d76d1650Saurel32 
37f61b4bedSAlexander Graf #include "hw/sysbus.h"
380d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
397ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
4098a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
4131f2cb8fSBharat Bhushan #include "sysemu/watchdog.h"
42b36f100eSAlexey Kardashevskiy #include "trace.h"
4388365d17SBharat Bhushan #include "exec/gdbstub.h"
444c663752SPaolo Bonzini #include "exec/memattrs.h"
459c607668SAlexey Kardashevskiy #include "exec/ram_addr.h"
462d103aaeSMichael Roth #include "sysemu/hostmem.h"
47f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
49f3d9f303SSam Bobroff #include "elf.h"
50c64abd1fSSam Bobroff #include "sysemu/kvm_int.h"
51f61b4bedSAlexander Graf 
52d76d1650Saurel32 //#define DEBUG_KVM
53d76d1650Saurel32 
54d76d1650Saurel32 #ifdef DEBUG_KVM
55da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
56d76d1650Saurel32     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57d76d1650Saurel32 #else
58da56ff91SPeter Maydell #define DPRINTF(fmt, ...) \
59d76d1650Saurel32     do { } while (0)
60d76d1650Saurel32 #endif
61d76d1650Saurel32 
62eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
63eadaada1SAlexander Graf 
6494a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6594a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6694a8d39aSJan Kiszka };
6794a8d39aSJan Kiszka 
68fc87e185SAlexander Graf static int cap_interrupt_unset = false;
69fc87e185SAlexander Graf static int cap_interrupt_level = false;
7090dc8812SScott Wood static int cap_segstate;
7190dc8812SScott Wood static int cap_booke_sregs;
72e97c3636SDavid Gibson static int cap_ppc_smt;
73fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
740f5cb298SDavid Gibson static int cap_spapr_tce;
75d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
76da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
779bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
78f1af19d7SDavid Gibson static int cap_hior;
79d67d40eaSDavid Gibson static int cap_one_reg;
803b961124SStuart Yoder static int cap_epr;
8131f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
829b00ea49SDavid Gibson static int cap_papr;
83e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
8487a91de6SAlexander Graf static int cap_fixup_hcalls;
85bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
86cf1c4cceSSam Bobroff static int cap_mmu_radix;
87cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
88b55d295eSDavid Gibson static int cap_resize_hpt;
89c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
908acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
918acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
928acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
93b9a477b7SSuraj Jitindar Singh static int cap_ppc_nested_kvm_hv;
94*7d050527SSuraj Jitindar Singh static int cap_large_decr;
95fc87e185SAlexander Graf 
963c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
973c902d44SBharat Bhushan 
98c821c2bdSAlexander Graf /* XXX We have a race condition where we actually have a level triggered
99c821c2bdSAlexander Graf  *     interrupt, but the infrastructure can't expose that yet, so the guest
100c821c2bdSAlexander Graf  *     takes but ignores it, goes to sleep and never gets notified that there's
101c821c2bdSAlexander Graf  *     still an interrupt pending.
102c6a94ba5SAlexander Graf  *
103c821c2bdSAlexander Graf  *     As a quick workaround, let's just wake up again 20 ms after we injected
104c821c2bdSAlexander Graf  *     an interrupt. That way we can assure that we're always reinjecting
105c821c2bdSAlexander Graf  *     interrupts in case the guest swallowed them.
106c6a94ba5SAlexander Graf  */
107c6a94ba5SAlexander Graf static QEMUTimer *idle_timer;
108c6a94ba5SAlexander Graf 
109d5a68146SAndreas Färber static void kvm_kick_cpu(void *opaque)
110c6a94ba5SAlexander Graf {
111d5a68146SAndreas Färber     PowerPCCPU *cpu = opaque;
112d5a68146SAndreas Färber 
113c08d7424SAndreas Färber     qemu_cpu_kick(CPU(cpu));
114c6a94ba5SAlexander Graf }
115c6a94ba5SAlexander Graf 
11696c9cff0SThomas Huth /* Check whether we are running with KVM-PR (instead of KVM-HV).  This
11796c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
11896c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
11996c9cff0SThomas Huth  * assuming what is/isn't available depending on the KVM variant. */
12096c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
12196c9cff0SThomas Huth {
12296c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
12370a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
12496c9cff0SThomas Huth }
12596c9cff0SThomas Huth 
1262e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms);
1278acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
128*7d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void);
1295ba4576bSAndreas Färber 
130b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
131d76d1650Saurel32 {
132fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
133fc87e185SAlexander Graf     cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
13490dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13590dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1366977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
1370f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
138d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
139da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1409ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
141d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
142f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1433b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
14431f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
1459b00ea49SDavid Gibson     /* Note: we don't set cap_papr here, because this capability is
1469b00ea49SDavid Gibson      * only activated after this by kvmppc_set_papr() */
1476977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14887a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
149fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
150bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
151cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
152cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
153b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1548acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
155b9a477b7SSuraj Jitindar Singh     cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
156*7d050527SSuraj Jitindar Singh     cap_large_decr = kvmppc_get_dec_bits();
157c363a37aSDaniel Henrique Barboza     /*
158c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
159c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
160c363a37aSDaniel Henrique Barboza      *
161c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
162c363a37aSDaniel Henrique Barboza      * after the kernel starts implementing it.*/
163c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
164fc87e185SAlexander Graf 
165fc87e185SAlexander Graf     if (!cap_interrupt_level) {
166fc87e185SAlexander Graf         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
167fc87e185SAlexander Graf                         "VM to stall at times!\n");
168fc87e185SAlexander Graf     }
169fc87e185SAlexander Graf 
1702e9c10ebSIgor Mammedov     kvm_ppc_register_host_cpu_type(ms);
1715ba4576bSAndreas Färber 
172d76d1650Saurel32     return 0;
173d76d1650Saurel32 }
174d76d1650Saurel32 
175d525ffabSPaolo Bonzini int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
176d525ffabSPaolo Bonzini {
177d525ffabSPaolo Bonzini     return 0;
178d525ffabSPaolo Bonzini }
179d525ffabSPaolo Bonzini 
1801bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
181d76d1650Saurel32 {
1821bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1831bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
184861bbc80SAlexander Graf     struct kvm_sregs sregs;
1855666ca4aSScott Wood     int ret;
1865666ca4aSScott Wood 
1875666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
18864e07be5SAlexander Graf         /* What we're really trying to say is "if we're on BookE, we use
18964e07be5SAlexander Graf            the native PVR for now". This is the only sane way to check
19064e07be5SAlexander Graf            it though, so we potentially confuse users that they can run
19164e07be5SAlexander Graf            BookE guests on BookS. Let's hope nobody dares enough :) */
1925666ca4aSScott Wood         return 0;
1935666ca4aSScott Wood     } else {
19490dc8812SScott Wood         if (!cap_segstate) {
19564e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
19664e07be5SAlexander Graf             return -ENOSYS;
1975666ca4aSScott Wood         }
1985666ca4aSScott Wood     }
1995666ca4aSScott Wood 
2001bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
2015666ca4aSScott Wood     if (ret) {
2025666ca4aSScott Wood         return ret;
2035666ca4aSScott Wood     }
204861bbc80SAlexander Graf 
205861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2061bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2075666ca4aSScott Wood }
2085666ca4aSScott Wood 
20993dd5e85SScott Wood /* Set up a shared TLB array with KVM */
2101bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
21193dd5e85SScott Wood {
2121bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2131bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
21493dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
21593dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
21693dd5e85SScott Wood     unsigned int entries = 0;
21793dd5e85SScott Wood     int ret, i;
21893dd5e85SScott Wood 
21993dd5e85SScott Wood     if (!kvm_enabled() ||
220a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
22193dd5e85SScott Wood         return 0;
22293dd5e85SScott Wood     }
22393dd5e85SScott Wood 
22493dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
22593dd5e85SScott Wood 
22693dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
22793dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
22893dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
22993dd5e85SScott Wood         entries += params.tlb_sizes[i];
23093dd5e85SScott Wood     }
23193dd5e85SScott Wood 
23293dd5e85SScott Wood     assert(entries == env->nb_tlb);
23393dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
23493dd5e85SScott Wood 
23593dd5e85SScott Wood     env->tlb_dirty = true;
23693dd5e85SScott Wood 
23793dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
23893dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
23993dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
24093dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
24193dd5e85SScott Wood 
24248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
24393dd5e85SScott Wood     if (ret < 0) {
24493dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
24593dd5e85SScott Wood                 __func__, strerror(-ret));
24693dd5e85SScott Wood         return ret;
24793dd5e85SScott Wood     }
24893dd5e85SScott Wood 
24993dd5e85SScott Wood     env->kvm_sw_tlb = true;
25093dd5e85SScott Wood     return 0;
25193dd5e85SScott Wood }
25293dd5e85SScott Wood 
2534656e1f0SBenjamin Herrenschmidt 
2544656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
255ab256960SGreg Kurz static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
2564656e1f0SBenjamin Herrenschmidt {
2574656e1f0SBenjamin Herrenschmidt     int ret;
2584656e1f0SBenjamin Herrenschmidt 
259ab256960SGreg Kurz     assert(kvm_state != NULL);
260ab256960SGreg Kurz 
261ab256960SGreg Kurz     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
26271d0f1eaSGreg Kurz         error_setg(errp, "KVM doesn't expose the MMU features it supports");
26371d0f1eaSGreg Kurz         error_append_hint(errp, "Consider switching to a newer KVM\n");
26471d0f1eaSGreg Kurz         return;
26571d0f1eaSGreg Kurz     }
26671d0f1eaSGreg Kurz 
267ab256960SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
2684656e1f0SBenjamin Herrenschmidt     if (ret == 0) {
2694656e1f0SBenjamin Herrenschmidt         return;
2704656e1f0SBenjamin Herrenschmidt     }
2714656e1f0SBenjamin Herrenschmidt 
27271d0f1eaSGreg Kurz     error_setg_errno(errp, -ret,
27371d0f1eaSGreg Kurz                      "KVM failed to provide the MMU features it supports");
2744656e1f0SBenjamin Herrenschmidt }
2754656e1f0SBenjamin Herrenschmidt 
276c64abd1fSSam Bobroff struct ppc_radix_page_info *kvm_get_radix_page_info(void)
277c64abd1fSSam Bobroff {
278c64abd1fSSam Bobroff     KVMState *s = KVM_STATE(current_machine->accelerator);
279c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
280c64abd1fSSam Bobroff     struct kvm_ppc_rmmu_info rmmu_info;
281c64abd1fSSam Bobroff     int i;
282c64abd1fSSam Bobroff 
283c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
284c64abd1fSSam Bobroff         return NULL;
285c64abd1fSSam Bobroff     }
286c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
287c64abd1fSSam Bobroff         return NULL;
288c64abd1fSSam Bobroff     }
289c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
290c64abd1fSSam Bobroff     radix_page_info->count = 0;
291c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
292c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
293c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
294c64abd1fSSam Bobroff             radix_page_info->count++;
295c64abd1fSSam Bobroff         }
296c64abd1fSSam Bobroff     }
297c64abd1fSSam Bobroff     return radix_page_info;
298c64abd1fSSam Bobroff }
299c64abd1fSSam Bobroff 
300b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
301b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
302b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
303b4db5413SSuraj Jitindar Singh {
304b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
305b4db5413SSuraj Jitindar Singh     int ret;
306b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
307b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
308b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
309b4db5413SSuraj Jitindar Singh     };
310b4db5413SSuraj Jitindar Singh 
311b4db5413SSuraj Jitindar Singh     if (radix) {
312b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
313b4db5413SSuraj Jitindar Singh     }
314b4db5413SSuraj Jitindar Singh     if (gtse) {
315b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
316b4db5413SSuraj Jitindar Singh     }
317b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
318b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
319b4db5413SSuraj Jitindar Singh     switch (ret) {
320b4db5413SSuraj Jitindar Singh     case 0:
321b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
322b4db5413SSuraj Jitindar Singh     case -EINVAL:
323b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
324b4db5413SSuraj Jitindar Singh     case -ENODEV:
325b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
326b4db5413SSuraj Jitindar Singh     default:
327b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
328b4db5413SSuraj Jitindar Singh     }
329b4db5413SSuraj Jitindar Singh }
330b4db5413SSuraj Jitindar Singh 
33124c6863cSDavid Gibson bool kvmppc_hpt_needs_host_contiguous_pages(void)
33224c6863cSDavid Gibson {
33324c6863cSDavid Gibson     static struct kvm_ppc_smmu_info smmu_info;
33424c6863cSDavid Gibson 
33524c6863cSDavid Gibson     if (!kvm_enabled()) {
33624c6863cSDavid Gibson         return false;
33724c6863cSDavid Gibson     }
33824c6863cSDavid Gibson 
339ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &error_fatal);
34024c6863cSDavid Gibson     return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
34124c6863cSDavid Gibson }
34224c6863cSDavid Gibson 
343e5ca28ecSDavid Gibson void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
3444656e1f0SBenjamin Herrenschmidt {
345e5ca28ecSDavid Gibson     struct kvm_ppc_smmu_info smmu_info;
3464656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
34771d0f1eaSGreg Kurz     Error *local_err = NULL;
3484656e1f0SBenjamin Herrenschmidt 
349e5ca28ecSDavid Gibson     /* For now, we only have anything to check on hash64 MMUs */
350e5ca28ecSDavid Gibson     if (!cpu->hash64_opts || !kvm_enabled()) {
3514656e1f0SBenjamin Herrenschmidt         return;
3524656e1f0SBenjamin Herrenschmidt     }
3534656e1f0SBenjamin Herrenschmidt 
354ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &local_err);
35571d0f1eaSGreg Kurz     if (local_err) {
35671d0f1eaSGreg Kurz         error_propagate(errp, local_err);
35771d0f1eaSGreg Kurz         return;
35871d0f1eaSGreg Kurz     }
359e5ca28ecSDavid Gibson 
360e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
361e5ca28ecSDavid Gibson         && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
362e5ca28ecSDavid Gibson         error_setg(errp,
363e5ca28ecSDavid Gibson                    "KVM does not support 1TiB segments which guest expects");
364e5ca28ecSDavid Gibson         return;
3654656e1f0SBenjamin Herrenschmidt     }
3664656e1f0SBenjamin Herrenschmidt 
367e5ca28ecSDavid Gibson     if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
368e5ca28ecSDavid Gibson         error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
369e5ca28ecSDavid Gibson                    smmu_info.slb_size, cpu->hash64_opts->slb_size);
370e5ca28ecSDavid Gibson         return;
37190da0d5aSBenjamin Herrenschmidt     }
37290da0d5aSBenjamin Herrenschmidt 
37308215d8fSAlexander Graf     /*
374e5ca28ecSDavid Gibson      * Verify that every pagesize supported by the cpu model is
375e5ca28ecSDavid Gibson      * supported by KVM with the same encodings
37608215d8fSAlexander Graf      */
377e5ca28ecSDavid Gibson     for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
378b07c59f7SDavid Gibson         PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
379e5ca28ecSDavid Gibson         struct kvm_ppc_one_seg_page_size *ksps;
3804656e1f0SBenjamin Herrenschmidt 
381e5ca28ecSDavid Gibson         for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
382e5ca28ecSDavid Gibson             if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
3834656e1f0SBenjamin Herrenschmidt                 break;
3844656e1f0SBenjamin Herrenschmidt             }
3854656e1f0SBenjamin Herrenschmidt         }
386e5ca28ecSDavid Gibson         if (ik >= ARRAY_SIZE(smmu_info.sps)) {
387e5ca28ecSDavid Gibson             error_setg(errp, "KVM doesn't support for base page shift %u",
388e5ca28ecSDavid Gibson                        qsps->page_shift);
389e5ca28ecSDavid Gibson             return;
390e5ca28ecSDavid Gibson         }
391e5ca28ecSDavid Gibson 
392e5ca28ecSDavid Gibson         ksps = &smmu_info.sps[ik];
393e5ca28ecSDavid Gibson         if (ksps->slb_enc != qsps->slb_enc) {
394e5ca28ecSDavid Gibson             error_setg(errp,
395e5ca28ecSDavid Gibson "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
396e5ca28ecSDavid Gibson                        ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
397e5ca28ecSDavid Gibson             return;
398e5ca28ecSDavid Gibson         }
399e5ca28ecSDavid Gibson 
400e5ca28ecSDavid Gibson         for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
401e5ca28ecSDavid Gibson             for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
402e5ca28ecSDavid Gibson                 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
4034656e1f0SBenjamin Herrenschmidt                     break;
4044656e1f0SBenjamin Herrenschmidt                 }
4054656e1f0SBenjamin Herrenschmidt             }
4064656e1f0SBenjamin Herrenschmidt 
407e5ca28ecSDavid Gibson             if (jk >= ARRAY_SIZE(ksps->enc)) {
408e5ca28ecSDavid Gibson                 error_setg(errp, "KVM doesn't support page shift %u/%u",
409e5ca28ecSDavid Gibson                            qsps->enc[jq].page_shift, qsps->page_shift);
410e5ca28ecSDavid Gibson                 return;
411e5ca28ecSDavid Gibson             }
412e5ca28ecSDavid Gibson             if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
413e5ca28ecSDavid Gibson                 error_setg(errp,
414e5ca28ecSDavid Gibson "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
415e5ca28ecSDavid Gibson                            ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
416e5ca28ecSDavid Gibson                            qsps->page_shift, qsps->enc[jq].pte_enc);
417e5ca28ecSDavid Gibson                 return;
418e5ca28ecSDavid Gibson             }
419e5ca28ecSDavid Gibson         }
4204656e1f0SBenjamin Herrenschmidt     }
4214656e1f0SBenjamin Herrenschmidt 
422e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
423e5ca28ecSDavid Gibson         /* Mostly what guest pagesizes we can use are related to the
424e5ca28ecSDavid Gibson          * host pages used to map guest RAM, which is handled in the
425e5ca28ecSDavid Gibson          * platform code. Cache-Inhibited largepages (64k) however are
426e5ca28ecSDavid Gibson          * used for I/O, so if they're mapped to the host at all it
427e5ca28ecSDavid Gibson          * will be a normal mapping, not a special hugepage one used
428e5ca28ecSDavid Gibson          * for RAM. */
429e5ca28ecSDavid Gibson         if (getpagesize() < 0x10000) {
430e5ca28ecSDavid Gibson             error_setg(errp,
431e5ca28ecSDavid Gibson                        "KVM can't supply 64kiB CI pages, which guest expects");
432e5ca28ecSDavid Gibson         }
433e5ca28ecSDavid Gibson     }
434e5ca28ecSDavid Gibson }
4354656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
4364656e1f0SBenjamin Herrenschmidt 
437b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
438b164e48eSEduardo Habkost {
4392e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
440b164e48eSEduardo Habkost }
441b164e48eSEduardo Habkost 
44288365d17SBharat Bhushan /* e500 supports 2 h/w breakpoint and 2 watchpoint.
44388365d17SBharat Bhushan  * book3s supports only 1 watchpoint, so array size
44488365d17SBharat Bhushan  * of 4 is sufficient for now.
44588365d17SBharat Bhushan  */
44688365d17SBharat Bhushan #define MAX_HW_BKPTS 4
44788365d17SBharat Bhushan 
44888365d17SBharat Bhushan static struct HWBreakpoint {
44988365d17SBharat Bhushan     target_ulong addr;
45088365d17SBharat Bhushan     int type;
45188365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
45288365d17SBharat Bhushan 
45388365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
45488365d17SBharat Bhushan 
45588365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
45688365d17SBharat Bhushan static int max_hw_breakpoint;
45788365d17SBharat Bhushan static int max_hw_watchpoint;
45888365d17SBharat Bhushan static int nb_hw_breakpoint;
45988365d17SBharat Bhushan static int nb_hw_watchpoint;
46088365d17SBharat Bhushan 
46188365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
46288365d17SBharat Bhushan {
46388365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
46488365d17SBharat Bhushan         max_hw_breakpoint = 2;
46588365d17SBharat Bhushan         max_hw_watchpoint = 2;
46688365d17SBharat Bhushan     }
46788365d17SBharat Bhushan 
46888365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
46988365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
47088365d17SBharat Bhushan         return;
47188365d17SBharat Bhushan     }
47288365d17SBharat Bhushan }
47388365d17SBharat Bhushan 
47420d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
4755666ca4aSScott Wood {
47620d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
47720d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
4785666ca4aSScott Wood     int ret;
4795666ca4aSScott Wood 
4804656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
4811bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
4825666ca4aSScott Wood     if (ret) {
483388e47c7SThomas Huth         if (ret == -EINVAL) {
484388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
485388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
486388e47c7SThomas Huth         }
4875666ca4aSScott Wood         return ret;
4885666ca4aSScott Wood     }
489861bbc80SAlexander Graf 
490bc72ad67SAlex Bligh     idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
491c821c2bdSAlexander Graf 
49293dd5e85SScott Wood     switch (cenv->mmu_model) {
49393dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
4947f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
4951bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
49693dd5e85SScott Wood         break;
4977f516c96SThomas Huth     case POWERPC_MMU_2_07:
4987f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
4997f516c96SThomas Huth             /* KVM-HV has transactional memory on POWER8 also without the
500f3d9f303SSam Bobroff              * KVM_CAP_PPC_HTM extension, so enable it here instead as
501f3d9f303SSam Bobroff              * long as it's availble to userspace on the host. */
502f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5037f516c96SThomas Huth                 cap_htm = true;
5047f516c96SThomas Huth             }
505f3d9f303SSam Bobroff         }
5067f516c96SThomas Huth         break;
50793dd5e85SScott Wood     default:
50893dd5e85SScott Wood         break;
50993dd5e85SScott Wood     }
51093dd5e85SScott Wood 
5113c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
51288365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5133c902d44SBharat Bhushan 
514861bbc80SAlexander Graf     return ret;
515d76d1650Saurel32 }
516d76d1650Saurel32 
5171bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
51893dd5e85SScott Wood {
5191bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5201bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
52193dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
52293dd5e85SScott Wood     unsigned char *bitmap;
52393dd5e85SScott Wood     int ret;
52493dd5e85SScott Wood 
52593dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
52693dd5e85SScott Wood         return;
52793dd5e85SScott Wood     }
52893dd5e85SScott Wood 
52993dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
53093dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
53193dd5e85SScott Wood 
53293dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
53393dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
53493dd5e85SScott Wood 
5351bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
53693dd5e85SScott Wood     if (ret) {
53793dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
53893dd5e85SScott Wood                 __func__, strerror(-ret));
53993dd5e85SScott Wood     }
54093dd5e85SScott Wood 
54193dd5e85SScott Wood     g_free(bitmap);
54293dd5e85SScott Wood }
54393dd5e85SScott Wood 
544d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
545d67d40eaSDavid Gibson {
546d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
547d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
548d67d40eaSDavid Gibson     union {
549d67d40eaSDavid Gibson         uint32_t u32;
550d67d40eaSDavid Gibson         uint64_t u64;
551d67d40eaSDavid Gibson     } val;
552d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
553d67d40eaSDavid Gibson         .id = id,
554d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
555d67d40eaSDavid Gibson     };
556d67d40eaSDavid Gibson     int ret;
557d67d40eaSDavid Gibson 
558d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
559d67d40eaSDavid Gibson     if (ret != 0) {
560b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
561d67d40eaSDavid Gibson     } else {
562d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
563d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
564d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
565d67d40eaSDavid Gibson             break;
566d67d40eaSDavid Gibson 
567d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
568d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
569d67d40eaSDavid Gibson             break;
570d67d40eaSDavid Gibson 
571d67d40eaSDavid Gibson         default:
572d67d40eaSDavid Gibson             /* Don't handle this size yet */
573d67d40eaSDavid Gibson             abort();
574d67d40eaSDavid Gibson         }
575d67d40eaSDavid Gibson     }
576d67d40eaSDavid Gibson }
577d67d40eaSDavid Gibson 
578d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
579d67d40eaSDavid Gibson {
580d67d40eaSDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
581d67d40eaSDavid Gibson     CPUPPCState *env = &cpu->env;
582d67d40eaSDavid Gibson     union {
583d67d40eaSDavid Gibson         uint32_t u32;
584d67d40eaSDavid Gibson         uint64_t u64;
585d67d40eaSDavid Gibson     } val;
586d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
587d67d40eaSDavid Gibson         .id = id,
588d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
589d67d40eaSDavid Gibson     };
590d67d40eaSDavid Gibson     int ret;
591d67d40eaSDavid Gibson 
592d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
593d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
594d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
595d67d40eaSDavid Gibson         break;
596d67d40eaSDavid Gibson 
597d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
598d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
599d67d40eaSDavid Gibson         break;
600d67d40eaSDavid Gibson 
601d67d40eaSDavid Gibson     default:
602d67d40eaSDavid Gibson         /* Don't handle this size yet */
603d67d40eaSDavid Gibson         abort();
604d67d40eaSDavid Gibson     }
605d67d40eaSDavid Gibson 
606d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
607d67d40eaSDavid Gibson     if (ret != 0) {
608b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
609d67d40eaSDavid Gibson     }
610d67d40eaSDavid Gibson }
611d67d40eaSDavid Gibson 
61270b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
61370b79849SDavid Gibson {
61470b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
61570b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
61670b79849SDavid Gibson     struct kvm_one_reg reg;
61770b79849SDavid Gibson     int i;
61870b79849SDavid Gibson     int ret;
61970b79849SDavid Gibson 
62070b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
62170b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
62270b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
62370b79849SDavid Gibson 
62470b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
62570b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
62670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
62770b79849SDavid Gibson         if (ret < 0) {
628da56ff91SPeter Maydell             DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
62970b79849SDavid Gibson             return ret;
63070b79849SDavid Gibson         }
63170b79849SDavid Gibson 
63270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
63370b79849SDavid Gibson             uint64_t vsr[2];
634ef96e3aeSMark Cave-Ayland             uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
635ef96e3aeSMark Cave-Ayland             uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
63670b79849SDavid Gibson 
6373a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
638ef96e3aeSMark Cave-Ayland             vsr[0] = float64_val(*fpr);
639ef96e3aeSMark Cave-Ayland             vsr[1] = *vsrl;
6403a4b791bSGreg Kurz #else
641ef96e3aeSMark Cave-Ayland             vsr[0] = *vsrl;
642ef96e3aeSMark Cave-Ayland             vsr[1] = float64_val(*fpr);
6433a4b791bSGreg Kurz #endif
64470b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
64570b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
64670b79849SDavid Gibson 
64770b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
64870b79849SDavid Gibson             if (ret < 0) {
649da56ff91SPeter Maydell                 DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
65070b79849SDavid Gibson                         i, strerror(errno));
65170b79849SDavid Gibson                 return ret;
65270b79849SDavid Gibson             }
65370b79849SDavid Gibson         }
65470b79849SDavid Gibson     }
65570b79849SDavid Gibson 
65670b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
65770b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
65870b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
65970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66070b79849SDavid Gibson         if (ret < 0) {
661da56ff91SPeter Maydell             DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
66270b79849SDavid Gibson             return ret;
66370b79849SDavid Gibson         }
66470b79849SDavid Gibson 
66570b79849SDavid Gibson         for (i = 0; i < 32; i++) {
66670b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
667ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
66870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66970b79849SDavid Gibson             if (ret < 0) {
670da56ff91SPeter Maydell                 DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
67170b79849SDavid Gibson                 return ret;
67270b79849SDavid Gibson             }
67370b79849SDavid Gibson         }
67470b79849SDavid Gibson     }
67570b79849SDavid Gibson 
67670b79849SDavid Gibson     return 0;
67770b79849SDavid Gibson }
67870b79849SDavid Gibson 
67970b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
68070b79849SDavid Gibson {
68170b79849SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
68270b79849SDavid Gibson     CPUPPCState *env = &cpu->env;
68370b79849SDavid Gibson     struct kvm_one_reg reg;
68470b79849SDavid Gibson     int i;
68570b79849SDavid Gibson     int ret;
68670b79849SDavid Gibson 
68770b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
68870b79849SDavid Gibson         uint64_t fpscr;
68970b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
69070b79849SDavid Gibson 
69170b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
69270b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
69370b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
69470b79849SDavid Gibson         if (ret < 0) {
695da56ff91SPeter Maydell             DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
69670b79849SDavid Gibson             return ret;
69770b79849SDavid Gibson         } else {
69870b79849SDavid Gibson             env->fpscr = fpscr;
69970b79849SDavid Gibson         }
70070b79849SDavid Gibson 
70170b79849SDavid Gibson         for (i = 0; i < 32; i++) {
70270b79849SDavid Gibson             uint64_t vsr[2];
703ef96e3aeSMark Cave-Ayland             uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
704ef96e3aeSMark Cave-Ayland             uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
70570b79849SDavid Gibson 
70670b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
70770b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
70870b79849SDavid Gibson 
70970b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
71070b79849SDavid Gibson             if (ret < 0) {
711da56ff91SPeter Maydell                 DPRINTF("Unable to get %s%d from KVM: %s\n",
71270b79849SDavid Gibson                         vsx ? "VSR" : "FPR", i, strerror(errno));
71370b79849SDavid Gibson                 return ret;
71470b79849SDavid Gibson             } else {
7153a4b791bSGreg Kurz #ifdef HOST_WORDS_BIGENDIAN
716ef96e3aeSMark Cave-Ayland                 *fpr = vsr[0];
71770b79849SDavid Gibson                 if (vsx) {
718ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[1];
71970b79849SDavid Gibson                 }
7203a4b791bSGreg Kurz #else
721ef96e3aeSMark Cave-Ayland                 *fpr = vsr[1];
7223a4b791bSGreg Kurz                 if (vsx) {
723ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[0];
7243a4b791bSGreg Kurz                 }
7253a4b791bSGreg Kurz #endif
72670b79849SDavid Gibson             }
72770b79849SDavid Gibson         }
72870b79849SDavid Gibson     }
72970b79849SDavid Gibson 
73070b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
73170b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
73270b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
73370b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
73470b79849SDavid Gibson         if (ret < 0) {
735da56ff91SPeter Maydell             DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
73670b79849SDavid Gibson             return ret;
73770b79849SDavid Gibson         }
73870b79849SDavid Gibson 
73970b79849SDavid Gibson         for (i = 0; i < 32; i++) {
74070b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
741ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
74270b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
74370b79849SDavid Gibson             if (ret < 0) {
744da56ff91SPeter Maydell                 DPRINTF("Unable to get VR%d from KVM: %s\n",
74570b79849SDavid Gibson                         i, strerror(errno));
74670b79849SDavid Gibson                 return ret;
74770b79849SDavid Gibson             }
74870b79849SDavid Gibson         }
74970b79849SDavid Gibson     }
75070b79849SDavid Gibson 
75170b79849SDavid Gibson     return 0;
75270b79849SDavid Gibson }
75370b79849SDavid Gibson 
7549b00ea49SDavid Gibson #if defined(TARGET_PPC64)
7559b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
7569b00ea49SDavid Gibson {
7579b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
7587388efafSDavid Gibson     sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
7599b00ea49SDavid Gibson     struct kvm_one_reg reg;
7609b00ea49SDavid Gibson     int ret;
7619b00ea49SDavid Gibson 
7629b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
7637388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
7649b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7659b00ea49SDavid Gibson     if (ret < 0) {
766da56ff91SPeter Maydell         DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
7679b00ea49SDavid Gibson         return ret;
7689b00ea49SDavid Gibson     }
7699b00ea49SDavid Gibson 
7707388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
7717388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
7729b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
7737388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
7749b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7759b00ea49SDavid Gibson     if (ret < 0) {
776da56ff91SPeter Maydell         DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
7779b00ea49SDavid Gibson                 strerror(errno));
7789b00ea49SDavid Gibson         return ret;
7799b00ea49SDavid Gibson     }
7809b00ea49SDavid Gibson 
7817388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
7827388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
7839b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
7847388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
7859b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7869b00ea49SDavid Gibson     if (ret < 0) {
787da56ff91SPeter Maydell         DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
7889b00ea49SDavid Gibson                 strerror(errno));
7899b00ea49SDavid Gibson         return ret;
7909b00ea49SDavid Gibson     }
7919b00ea49SDavid Gibson 
7929b00ea49SDavid Gibson     return 0;
7939b00ea49SDavid Gibson }
7949b00ea49SDavid Gibson 
7959b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
7969b00ea49SDavid Gibson {
7979b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
7987388efafSDavid Gibson     sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
7999b00ea49SDavid Gibson     struct kvm_one_reg reg;
8009b00ea49SDavid Gibson     int ret;
8019b00ea49SDavid Gibson 
8029b00ea49SDavid Gibson     /* SLB shadow or DTL can't be registered unless a master VPA is
8039b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8049b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
8059b00ea49SDavid Gibson      * deregister the others before deregistering the master VPA */
8067388efafSDavid Gibson     assert(spapr_cpu->vpa_addr
8077388efafSDavid Gibson            || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
8089b00ea49SDavid Gibson 
8097388efafSDavid Gibson     if (spapr_cpu->vpa_addr) {
8109b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8117388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8129b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8139b00ea49SDavid Gibson         if (ret < 0) {
814da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8159b00ea49SDavid Gibson             return ret;
8169b00ea49SDavid Gibson         }
8179b00ea49SDavid Gibson     }
8189b00ea49SDavid Gibson 
8197388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
8207388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
8219b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8227388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
8239b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8249b00ea49SDavid Gibson     if (ret < 0) {
825da56ff91SPeter Maydell         DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
8269b00ea49SDavid Gibson         return ret;
8279b00ea49SDavid Gibson     }
8289b00ea49SDavid Gibson 
8297388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
8307388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
8319b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8327388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
8339b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8349b00ea49SDavid Gibson     if (ret < 0) {
835da56ff91SPeter Maydell         DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
8369b00ea49SDavid Gibson                 strerror(errno));
8379b00ea49SDavid Gibson         return ret;
8389b00ea49SDavid Gibson     }
8399b00ea49SDavid Gibson 
8407388efafSDavid Gibson     if (!spapr_cpu->vpa_addr) {
8419b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8427388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8439b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8449b00ea49SDavid Gibson         if (ret < 0) {
845da56ff91SPeter Maydell             DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
8469b00ea49SDavid Gibson             return ret;
8479b00ea49SDavid Gibson         }
8489b00ea49SDavid Gibson     }
8499b00ea49SDavid Gibson 
8509b00ea49SDavid Gibson     return 0;
8519b00ea49SDavid Gibson }
8529b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
8539b00ea49SDavid Gibson 
854e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
855a7a00a72SDavid Gibson {
856a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
857a7a00a72SDavid Gibson     struct kvm_sregs sregs;
858a7a00a72SDavid Gibson     int i;
859a7a00a72SDavid Gibson 
860a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
861a7a00a72SDavid Gibson 
8621ec26c75SGreg Kurz     if (cpu->vhyp) {
8631ec26c75SGreg Kurz         PPCVirtualHypervisorClass *vhc =
8641ec26c75SGreg Kurz             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
8651ec26c75SGreg Kurz         sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
8661ec26c75SGreg Kurz     } else {
867a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
8681ec26c75SGreg Kurz     }
869a7a00a72SDavid Gibson 
870a7a00a72SDavid Gibson     /* Sync SLB */
871a7a00a72SDavid Gibson #ifdef TARGET_PPC64
872a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
873a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
874a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
875a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
876a7a00a72SDavid Gibson         }
877a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
878a7a00a72SDavid Gibson     }
879a7a00a72SDavid Gibson #endif
880a7a00a72SDavid Gibson 
881a7a00a72SDavid Gibson     /* Sync SRs */
882a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
883a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
884a7a00a72SDavid Gibson     }
885a7a00a72SDavid Gibson 
886a7a00a72SDavid Gibson     /* Sync BATs */
887a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
888a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
889a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
890a7a00a72SDavid Gibson             | env->DBAT[1][i];
891a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
892a7a00a72SDavid Gibson             | env->IBAT[1][i];
893a7a00a72SDavid Gibson     }
894a7a00a72SDavid Gibson 
895a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
896a7a00a72SDavid Gibson }
897a7a00a72SDavid Gibson 
89820d695a9SAndreas Färber int kvm_arch_put_registers(CPUState *cs, int level)
899d76d1650Saurel32 {
90020d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
90120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
902d76d1650Saurel32     struct kvm_regs regs;
903d76d1650Saurel32     int ret;
904d76d1650Saurel32     int i;
905d76d1650Saurel32 
9061bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9071bc22652SAndreas Färber     if (ret < 0) {
908d76d1650Saurel32         return ret;
9091bc22652SAndreas Färber     }
910d76d1650Saurel32 
911d76d1650Saurel32     regs.ctr = env->ctr;
912d76d1650Saurel32     regs.lr  = env->lr;
913da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
914d76d1650Saurel32     regs.msr = env->msr;
915d76d1650Saurel32     regs.pc = env->nip;
916d76d1650Saurel32 
917d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
918d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
919d76d1650Saurel32 
920d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
921d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
922d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
923d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
924d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
925d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
926d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
927d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
928d76d1650Saurel32 
92990dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
93090dc8812SScott Wood 
931d76d1650Saurel32     for (i = 0;i < 32; i++)
932d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
933d76d1650Saurel32 
9344bddaf55SAlexey Kardashevskiy     regs.cr = 0;
9354bddaf55SAlexey Kardashevskiy     for (i = 0; i < 8; i++) {
9364bddaf55SAlexey Kardashevskiy         regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
9374bddaf55SAlexey Kardashevskiy     }
9384bddaf55SAlexey Kardashevskiy 
9391bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
940d76d1650Saurel32     if (ret < 0)
941d76d1650Saurel32         return ret;
942d76d1650Saurel32 
94370b79849SDavid Gibson     kvm_put_fp(cs);
94470b79849SDavid Gibson 
94593dd5e85SScott Wood     if (env->tlb_dirty) {
9461bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
94793dd5e85SScott Wood         env->tlb_dirty = false;
94893dd5e85SScott Wood     }
94993dd5e85SScott Wood 
950f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
951a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
952a7a00a72SDavid Gibson         if (ret < 0) {
953f1af19d7SDavid Gibson             return ret;
954f1af19d7SDavid Gibson         }
955f1af19d7SDavid Gibson     }
956f1af19d7SDavid Gibson 
957f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
958d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
959d67d40eaSDavid Gibson     }
960f1af19d7SDavid Gibson 
961d67d40eaSDavid Gibson     if (cap_one_reg) {
962d67d40eaSDavid Gibson         int i;
963d67d40eaSDavid Gibson 
964d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
965d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
966d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
967d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
968d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
969d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
970d67d40eaSDavid Gibson 
971d67d40eaSDavid Gibson             if (id != 0) {
972d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
973d67d40eaSDavid Gibson             }
974f1af19d7SDavid Gibson         }
9759b00ea49SDavid Gibson 
9769b00ea49SDavid Gibson #ifdef TARGET_PPC64
97780b3f79bSAlexey Kardashevskiy         if (msr_ts) {
97880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
97980b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
98080b3f79bSAlexey Kardashevskiy             }
98180b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
98280b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
98380b3f79bSAlexey Kardashevskiy             }
98480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
98580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
98680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
98780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
98880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
98980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
99080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
99180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
99280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
99380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
99480b3f79bSAlexey Kardashevskiy         }
99580b3f79bSAlexey Kardashevskiy 
9969b00ea49SDavid Gibson         if (cap_papr) {
9979b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
998da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
9999b00ea49SDavid Gibson             }
10009b00ea49SDavid Gibson         }
100198a8b524SAlexey Kardashevskiy 
100298a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
10039b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1004f1af19d7SDavid Gibson     }
1005f1af19d7SDavid Gibson 
1006d76d1650Saurel32     return ret;
1007d76d1650Saurel32 }
1008d76d1650Saurel32 
1009c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1010c371c2e3SBharat Bhushan {
1011c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1012c371c2e3SBharat Bhushan }
1013c371c2e3SBharat Bhushan 
1014a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1015d76d1650Saurel32 {
101620d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1017ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1018a7a00a72SDavid Gibson     int ret;
1019d76d1650Saurel32 
1020a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
102190dc8812SScott Wood     if (ret < 0) {
102290dc8812SScott Wood         return ret;
102390dc8812SScott Wood     }
102490dc8812SScott Wood 
102590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
102690dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
102790dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
102890dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
102990dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
103090dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
103190dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
103290dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
103390dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
103490dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
103590dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
103690dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
103790dc8812SScott Wood     }
103890dc8812SScott Wood 
103990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
104090dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
104190dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
104290dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
104390dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
104490dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
104590dc8812SScott Wood     }
104690dc8812SScott Wood 
104790dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
104890dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
104990dc8812SScott Wood     }
105090dc8812SScott Wood 
105190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
105290dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
105390dc8812SScott Wood     }
105490dc8812SScott Wood 
105590dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
105690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1057c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
105890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1059c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
106090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1061c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
106290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1063c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
106490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1065c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
106690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1067c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
106890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1069c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
107090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1071c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
107290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1073c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
107490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1075c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
107690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1077c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
107890dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1079c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
108090dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1081c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
108290dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1083c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
108490dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1085c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
108690dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1087c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
108890dc8812SScott Wood 
108990dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
109090dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1091c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
109290dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1093c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
109490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1095c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
109690dc8812SScott Wood         }
109790dc8812SScott Wood 
109890dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
109990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1100c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
110190dc8812SScott Wood         }
110290dc8812SScott Wood 
110390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
110490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1105c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
110690dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1107c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
110890dc8812SScott Wood         }
110990dc8812SScott Wood     }
111090dc8812SScott Wood 
111190dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
111290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
111390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
111490dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
111590dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
111690dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
111790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
111890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
111990dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
112090dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
112190dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
112290dc8812SScott Wood     }
112390dc8812SScott Wood 
112490dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
112590dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
112690dc8812SScott Wood     }
112790dc8812SScott Wood 
112890dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
112990dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
113090dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
113190dc8812SScott Wood     }
113290dc8812SScott Wood 
113390dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
113490dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
113590dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
113690dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
113790dc8812SScott Wood 
113890dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
113990dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
114090dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
114190dc8812SScott Wood         }
114290dc8812SScott Wood     }
1143a7a00a72SDavid Gibson 
1144a7a00a72SDavid Gibson     return 0;
1145fafc0b6aSAlexander Graf }
114690dc8812SScott Wood 
1147a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1148a7a00a72SDavid Gibson {
1149a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1150a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1151a7a00a72SDavid Gibson     int ret;
1152a7a00a72SDavid Gibson     int i;
1153a7a00a72SDavid Gibson 
1154a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
115590dc8812SScott Wood     if (ret < 0) {
115690dc8812SScott Wood         return ret;
115790dc8812SScott Wood     }
115890dc8812SScott Wood 
1159e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1160bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1161f3c75d42SAneesh Kumar K.V     }
1162ba5e5090SAlexander Graf 
1163ba5e5090SAlexander Graf     /* Sync SLB */
116482c09f2fSAlexander Graf #ifdef TARGET_PPC64
11654b4d4a21SAneesh Kumar K.V     /*
11664b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1167a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1168a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1169a7a00a72SDavid Gibson      * in.
11704b4d4a21SAneesh Kumar K.V      */
11714b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1172d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
11734b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
11744b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
11754b4d4a21SAneesh Kumar K.V         /*
11764b4d4a21SAneesh Kumar K.V          * Only restore valid entries
11774b4d4a21SAneesh Kumar K.V          */
11784b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1179bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
11804b4d4a21SAneesh Kumar K.V         }
1181ba5e5090SAlexander Graf     }
118282c09f2fSAlexander Graf #endif
1183ba5e5090SAlexander Graf 
1184ba5e5090SAlexander Graf     /* Sync SRs */
1185ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1186ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1187ba5e5090SAlexander Graf     }
1188ba5e5090SAlexander Graf 
1189ba5e5090SAlexander Graf     /* Sync BATs */
1190ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1191ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1192ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1193ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1194ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1195ba5e5090SAlexander Graf     }
1196a7a00a72SDavid Gibson 
1197a7a00a72SDavid Gibson     return 0;
1198a7a00a72SDavid Gibson }
1199a7a00a72SDavid Gibson 
1200a7a00a72SDavid Gibson int kvm_arch_get_registers(CPUState *cs)
1201a7a00a72SDavid Gibson {
1202a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1203a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1204a7a00a72SDavid Gibson     struct kvm_regs regs;
1205a7a00a72SDavid Gibson     uint32_t cr;
1206a7a00a72SDavid Gibson     int i, ret;
1207a7a00a72SDavid Gibson 
1208a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1209a7a00a72SDavid Gibson     if (ret < 0)
1210a7a00a72SDavid Gibson         return ret;
1211a7a00a72SDavid Gibson 
1212a7a00a72SDavid Gibson     cr = regs.cr;
1213a7a00a72SDavid Gibson     for (i = 7; i >= 0; i--) {
1214a7a00a72SDavid Gibson         env->crf[i] = cr & 15;
1215a7a00a72SDavid Gibson         cr >>= 4;
1216a7a00a72SDavid Gibson     }
1217a7a00a72SDavid Gibson 
1218a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1219a7a00a72SDavid Gibson     env->lr = regs.lr;
1220a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1221a7a00a72SDavid Gibson     env->msr = regs.msr;
1222a7a00a72SDavid Gibson     env->nip = regs.pc;
1223a7a00a72SDavid Gibson 
1224a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1225a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1226a7a00a72SDavid Gibson 
1227a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1228a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1229a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1230a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1231a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1232a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1233a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1234a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1235a7a00a72SDavid Gibson 
1236a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1237a7a00a72SDavid Gibson 
1238a7a00a72SDavid Gibson     for (i = 0;i < 32; i++)
1239a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1240a7a00a72SDavid Gibson 
1241a7a00a72SDavid Gibson     kvm_get_fp(cs);
1242a7a00a72SDavid Gibson 
1243a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1244a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1245a7a00a72SDavid Gibson         if (ret < 0) {
1246a7a00a72SDavid Gibson             return ret;
1247a7a00a72SDavid Gibson         }
1248a7a00a72SDavid Gibson     }
1249a7a00a72SDavid Gibson 
1250a7a00a72SDavid Gibson     if (cap_segstate) {
1251a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1252a7a00a72SDavid Gibson         if (ret < 0) {
1253a7a00a72SDavid Gibson             return ret;
1254a7a00a72SDavid Gibson         }
1255fafc0b6aSAlexander Graf     }
1256ba5e5090SAlexander Graf 
1257d67d40eaSDavid Gibson     if (cap_hior) {
1258d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1259d67d40eaSDavid Gibson     }
1260d67d40eaSDavid Gibson 
1261d67d40eaSDavid Gibson     if (cap_one_reg) {
1262d67d40eaSDavid Gibson         int i;
1263d67d40eaSDavid Gibson 
1264d67d40eaSDavid Gibson         /* We deliberately ignore errors here, for kernels which have
1265d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1266d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1267d67d40eaSDavid Gibson          * work, at least until we try to migrate. */
1268d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1269d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1270d67d40eaSDavid Gibson 
1271d67d40eaSDavid Gibson             if (id != 0) {
1272d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1273d67d40eaSDavid Gibson             }
1274d67d40eaSDavid Gibson         }
12759b00ea49SDavid Gibson 
12769b00ea49SDavid Gibson #ifdef TARGET_PPC64
127780b3f79bSAlexey Kardashevskiy         if (msr_ts) {
127880b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
127980b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
128080b3f79bSAlexey Kardashevskiy             }
128180b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
128280b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
128380b3f79bSAlexey Kardashevskiy             }
128480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
128580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
128680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
128780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
128880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
128980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
129080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
129180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
129280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
129380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
129480b3f79bSAlexey Kardashevskiy         }
129580b3f79bSAlexey Kardashevskiy 
12969b00ea49SDavid Gibson         if (cap_papr) {
12979b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
1298da56ff91SPeter Maydell                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
12999b00ea49SDavid Gibson             }
13009b00ea49SDavid Gibson         }
130198a8b524SAlexey Kardashevskiy 
130298a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
13039b00ea49SDavid Gibson #endif
1304d67d40eaSDavid Gibson     }
1305d67d40eaSDavid Gibson 
1306d76d1650Saurel32     return 0;
1307d76d1650Saurel32 }
1308d76d1650Saurel32 
13091bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1310fc87e185SAlexander Graf {
1311fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1312fc87e185SAlexander Graf 
1313fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1314fc87e185SAlexander Graf         return 0;
1315fc87e185SAlexander Graf     }
1316fc87e185SAlexander Graf 
1317fc87e185SAlexander Graf     if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1318fc87e185SAlexander Graf         return 0;
1319fc87e185SAlexander Graf     }
1320fc87e185SAlexander Graf 
13211bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1322fc87e185SAlexander Graf 
1323fc87e185SAlexander Graf     return 0;
1324fc87e185SAlexander Graf }
1325fc87e185SAlexander Graf 
1326a69dc537SThomas Huth #if defined(TARGET_PPC64)
132716415335SAlexander Graf #define PPC_INPUT_INT PPC970_INPUT_INT
132816415335SAlexander Graf #else
132916415335SAlexander Graf #define PPC_INPUT_INT PPC6xx_INPUT_INT
133016415335SAlexander Graf #endif
133116415335SAlexander Graf 
133220d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1333d76d1650Saurel32 {
133420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
133520d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1336d76d1650Saurel32     int r;
1337d76d1650Saurel32     unsigned irq;
1338d76d1650Saurel32 
13394b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
13404b8523eeSJan Kiszka 
13415cbdb3a3SStefan Weil     /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1342d76d1650Saurel32      * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1343fc87e185SAlexander Graf     if (!cap_interrupt_level &&
1344fc87e185SAlexander Graf         run->ready_for_interrupt_injection &&
1345259186a7SAndreas Färber         (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
134616415335SAlexander Graf         (env->irq_input_state & (1<<PPC_INPUT_INT)))
1347d76d1650Saurel32     {
1348d76d1650Saurel32         /* For now KVM disregards the 'irq' argument. However, in the
1349d76d1650Saurel32          * future KVM could cache it in-kernel to avoid a heavyweight exit
1350d76d1650Saurel32          * when reading the UIC.
1351d76d1650Saurel32          */
1352fc87e185SAlexander Graf         irq = KVM_INTERRUPT_SET;
1353d76d1650Saurel32 
1354da56ff91SPeter Maydell         DPRINTF("injected interrupt %d\n", irq);
13551bc22652SAndreas Färber         r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
135655e5c285SAndreas Färber         if (r < 0) {
135755e5c285SAndreas Färber             printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
135855e5c285SAndreas Färber         }
1359c821c2bdSAlexander Graf 
1360c821c2bdSAlexander Graf         /* Always wake up soon in case the interrupt was level based */
1361bc72ad67SAlex Bligh         timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
136273bcb24dSRutuja Shah                        (NANOSECONDS_PER_SECOND / 50));
1363d76d1650Saurel32     }
1364d76d1650Saurel32 
1365d76d1650Saurel32     /* We don't know if there are more interrupts pending after this. However,
1366d76d1650Saurel32      * the guest will return to userspace in the course of handling this one
1367d76d1650Saurel32      * anyways, so we will get a chance to deliver the rest. */
13684b8523eeSJan Kiszka 
13694b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1370d76d1650Saurel32 }
1371d76d1650Saurel32 
13724c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1373d76d1650Saurel32 {
13744c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1375d76d1650Saurel32 }
1376d76d1650Saurel32 
137720d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
13780af691d7SMarcelo Tosatti {
1379259186a7SAndreas Färber     return cs->halted;
13800af691d7SMarcelo Tosatti }
13810af691d7SMarcelo Tosatti 
1382259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1383d76d1650Saurel32 {
1384259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1385259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1386259186a7SAndreas Färber 
1387259186a7SAndreas Färber     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1388259186a7SAndreas Färber         cs->halted = 1;
138927103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1390d76d1650Saurel32     }
1391d76d1650Saurel32 
1392bb4ea393SJan Kiszka     return 0;
1393d76d1650Saurel32 }
1394d76d1650Saurel32 
1395d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
13961328c2bfSAndreas Färber static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1397d76d1650Saurel32 {
1398d76d1650Saurel32     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1399d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1400d76d1650Saurel32 
1401bb4ea393SJan Kiszka     return 0;
1402d76d1650Saurel32 }
1403d76d1650Saurel32 
14041328c2bfSAndreas Färber static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1405d76d1650Saurel32 {
1406d76d1650Saurel32     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1407d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1408d76d1650Saurel32 
1409bb4ea393SJan Kiszka     return 0;
1410d76d1650Saurel32 }
1411d76d1650Saurel32 
14128a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14138a0548f9SBharat Bhushan {
14148a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
14158a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
14168a0548f9SBharat Bhushan 
14178a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14188a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
14198a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
14208a0548f9SBharat Bhushan         return -EINVAL;
14218a0548f9SBharat Bhushan     }
14228a0548f9SBharat Bhushan 
14238a0548f9SBharat Bhushan     return 0;
14248a0548f9SBharat Bhushan }
14258a0548f9SBharat Bhushan 
14268a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14278a0548f9SBharat Bhushan {
14288a0548f9SBharat Bhushan     uint32_t sc;
14298a0548f9SBharat Bhushan 
14308a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14318a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14328a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14338a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14348a0548f9SBharat Bhushan         return -EINVAL;
14358a0548f9SBharat Bhushan     }
14368a0548f9SBharat Bhushan 
14378a0548f9SBharat Bhushan     return 0;
14388a0548f9SBharat Bhushan }
14398a0548f9SBharat Bhushan 
144088365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
144188365d17SBharat Bhushan {
144288365d17SBharat Bhushan     int n;
144388365d17SBharat Bhushan 
144488365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
144588365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
144688365d17SBharat Bhushan 
144788365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
144888365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
144988365d17SBharat Bhushan              hw_debug_points[n].type == type) {
145088365d17SBharat Bhushan             return n;
145188365d17SBharat Bhushan         }
145288365d17SBharat Bhushan     }
145388365d17SBharat Bhushan 
145488365d17SBharat Bhushan     return -1;
145588365d17SBharat Bhushan }
145688365d17SBharat Bhushan 
145788365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
145888365d17SBharat Bhushan {
145988365d17SBharat Bhushan     int n;
146088365d17SBharat Bhushan 
146188365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
146288365d17SBharat Bhushan     if (n >= 0) {
146388365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
146488365d17SBharat Bhushan         return n;
146588365d17SBharat Bhushan     }
146688365d17SBharat Bhushan 
146788365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
146888365d17SBharat Bhushan     if (n >= 0) {
146988365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
147088365d17SBharat Bhushan         return n;
147188365d17SBharat Bhushan     }
147288365d17SBharat Bhushan 
147388365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
147488365d17SBharat Bhushan     if (n >= 0) {
147588365d17SBharat Bhushan         *flag = BP_MEM_READ;
147688365d17SBharat Bhushan         return n;
147788365d17SBharat Bhushan     }
147888365d17SBharat Bhushan 
147988365d17SBharat Bhushan     return -1;
148088365d17SBharat Bhushan }
148188365d17SBharat Bhushan 
148288365d17SBharat Bhushan int kvm_arch_insert_hw_breakpoint(target_ulong addr,
148388365d17SBharat Bhushan                                   target_ulong len, int type)
148488365d17SBharat Bhushan {
148588365d17SBharat Bhushan     if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
148688365d17SBharat Bhushan         return -ENOBUFS;
148788365d17SBharat Bhushan     }
148888365d17SBharat Bhushan 
148988365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
149088365d17SBharat Bhushan     hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
149188365d17SBharat Bhushan 
149288365d17SBharat Bhushan     switch (type) {
149388365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
149488365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
149588365d17SBharat Bhushan             return -ENOBUFS;
149688365d17SBharat Bhushan         }
149788365d17SBharat Bhushan 
149888365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
149988365d17SBharat Bhushan             return -EEXIST;
150088365d17SBharat Bhushan         }
150188365d17SBharat Bhushan 
150288365d17SBharat Bhushan         nb_hw_breakpoint++;
150388365d17SBharat Bhushan         break;
150488365d17SBharat Bhushan 
150588365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
150688365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
150788365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
150888365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
150988365d17SBharat Bhushan             return -ENOBUFS;
151088365d17SBharat Bhushan         }
151188365d17SBharat Bhushan 
151288365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
151388365d17SBharat Bhushan             return -EEXIST;
151488365d17SBharat Bhushan         }
151588365d17SBharat Bhushan 
151688365d17SBharat Bhushan         nb_hw_watchpoint++;
151788365d17SBharat Bhushan         break;
151888365d17SBharat Bhushan 
151988365d17SBharat Bhushan     default:
152088365d17SBharat Bhushan         return -ENOSYS;
152188365d17SBharat Bhushan     }
152288365d17SBharat Bhushan 
152388365d17SBharat Bhushan     return 0;
152488365d17SBharat Bhushan }
152588365d17SBharat Bhushan 
152688365d17SBharat Bhushan int kvm_arch_remove_hw_breakpoint(target_ulong addr,
152788365d17SBharat Bhushan                                   target_ulong len, int type)
152888365d17SBharat Bhushan {
152988365d17SBharat Bhushan     int n;
153088365d17SBharat Bhushan 
153188365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
153288365d17SBharat Bhushan     if (n < 0) {
153388365d17SBharat Bhushan         return -ENOENT;
153488365d17SBharat Bhushan     }
153588365d17SBharat Bhushan 
153688365d17SBharat Bhushan     switch (type) {
153788365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
153888365d17SBharat Bhushan         nb_hw_breakpoint--;
153988365d17SBharat Bhushan         break;
154088365d17SBharat Bhushan 
154188365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
154288365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
154388365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
154488365d17SBharat Bhushan         nb_hw_watchpoint--;
154588365d17SBharat Bhushan         break;
154688365d17SBharat Bhushan 
154788365d17SBharat Bhushan     default:
154888365d17SBharat Bhushan         return -ENOSYS;
154988365d17SBharat Bhushan     }
155088365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
155188365d17SBharat Bhushan 
155288365d17SBharat Bhushan     return 0;
155388365d17SBharat Bhushan }
155488365d17SBharat Bhushan 
155588365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
155688365d17SBharat Bhushan {
155788365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
155888365d17SBharat Bhushan }
155988365d17SBharat Bhushan 
15608a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
15618a0548f9SBharat Bhushan {
156288365d17SBharat Bhushan     int n;
156388365d17SBharat Bhushan 
15648a0548f9SBharat Bhushan     /* Software Breakpoint updates */
15658a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
15668a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
15678a0548f9SBharat Bhushan     }
156888365d17SBharat Bhushan 
156988365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
157088365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
157188365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
157288365d17SBharat Bhushan 
157388365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
157488365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
157588365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
157688365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
157788365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
157888365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
157988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
158088365d17SBharat Bhushan                 break;
158188365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
158288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
158388365d17SBharat Bhushan                 break;
158488365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
158588365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
158688365d17SBharat Bhushan                 break;
158788365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
158888365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
158988365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
159088365d17SBharat Bhushan                 break;
159188365d17SBharat Bhushan             default:
159288365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
159388365d17SBharat Bhushan             }
159488365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
159588365d17SBharat Bhushan         }
159688365d17SBharat Bhushan     }
15978a0548f9SBharat Bhushan }
15988a0548f9SBharat Bhushan 
15998a0548f9SBharat Bhushan static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16008a0548f9SBharat Bhushan {
16018a0548f9SBharat Bhushan     CPUState *cs = CPU(cpu);
16028a0548f9SBharat Bhushan     CPUPPCState *env = &cpu->env;
16038a0548f9SBharat Bhushan     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16048a0548f9SBharat Bhushan     int handle = 0;
160588365d17SBharat Bhushan     int n;
160688365d17SBharat Bhushan     int flag = 0;
16078a0548f9SBharat Bhushan 
160888365d17SBharat Bhushan     if (cs->singlestep_enabled) {
160988365d17SBharat Bhushan         handle = 1;
161088365d17SBharat Bhushan     } else if (arch_info->status) {
161188365d17SBharat Bhushan         if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
161288365d17SBharat Bhushan             if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
161388365d17SBharat Bhushan                 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
161488365d17SBharat Bhushan                 if (n >= 0) {
161588365d17SBharat Bhushan                     handle = 1;
161688365d17SBharat Bhushan                 }
161788365d17SBharat Bhushan             } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
161888365d17SBharat Bhushan                                             KVMPPC_DEBUG_WATCH_WRITE)) {
161988365d17SBharat Bhushan                 n = find_hw_watchpoint(arch_info->address,  &flag);
162088365d17SBharat Bhushan                 if (n >= 0) {
162188365d17SBharat Bhushan                     handle = 1;
162288365d17SBharat Bhushan                     cs->watchpoint_hit = &hw_watchpoint;
162388365d17SBharat Bhushan                     hw_watchpoint.vaddr = hw_debug_points[n].addr;
162488365d17SBharat Bhushan                     hw_watchpoint.flags = flag;
162588365d17SBharat Bhushan                 }
162688365d17SBharat Bhushan             }
162788365d17SBharat Bhushan         }
162888365d17SBharat Bhushan     } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
16298a0548f9SBharat Bhushan         handle = 1;
16308a0548f9SBharat Bhushan     } else {
16318a0548f9SBharat Bhushan         /* QEMU is not able to handle debug exception, so inject
16328a0548f9SBharat Bhushan          * program exception to guest;
16338a0548f9SBharat Bhushan          * Yes program exception NOT debug exception !!
163488365d17SBharat Bhushan          * When QEMU is using debug resources then debug exception must
163588365d17SBharat Bhushan          * be always set. To achieve this we set MSR_DE and also set
163688365d17SBharat Bhushan          * MSRP_DEP so guest cannot change MSR_DE.
163788365d17SBharat Bhushan          * When emulating debug resource for guest we want guest
163888365d17SBharat Bhushan          * to control MSR_DE (enable/disable debug interrupt on need).
163988365d17SBharat Bhushan          * Supporting both configurations are NOT possible.
164088365d17SBharat Bhushan          * So the result is that we cannot share debug resources
164188365d17SBharat Bhushan          * between QEMU and Guest on BOOKE architecture.
164288365d17SBharat Bhushan          * In the current design QEMU gets the priority over guest,
164388365d17SBharat Bhushan          * this means that if QEMU is using debug resources then guest
164488365d17SBharat Bhushan          * cannot use them;
16458a0548f9SBharat Bhushan          * For software breakpoint QEMU uses a privileged instruction;
16468a0548f9SBharat Bhushan          * So there cannot be any reason that we are here for guest
16478a0548f9SBharat Bhushan          * set debug exception, only possibility is guest executed a
16488a0548f9SBharat Bhushan          * privileged / illegal instruction and that's why we are
16498a0548f9SBharat Bhushan          * injecting a program interrupt.
16508a0548f9SBharat Bhushan          */
16518a0548f9SBharat Bhushan 
16528a0548f9SBharat Bhushan         cpu_synchronize_state(cs);
16538a0548f9SBharat Bhushan         /* env->nip is PC, so increment this by 4 to use
16548a0548f9SBharat Bhushan          * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
16558a0548f9SBharat Bhushan          */
16568a0548f9SBharat Bhushan         env->nip += 4;
16578a0548f9SBharat Bhushan         cs->exception_index = POWERPC_EXCP_PROGRAM;
16588a0548f9SBharat Bhushan         env->error_code = POWERPC_EXCP_INVAL;
16598a0548f9SBharat Bhushan         ppc_cpu_do_interrupt(cs);
16608a0548f9SBharat Bhushan     }
16618a0548f9SBharat Bhushan 
16628a0548f9SBharat Bhushan     return handle;
16638a0548f9SBharat Bhushan }
16648a0548f9SBharat Bhushan 
166520d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1666d76d1650Saurel32 {
166720d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
166820d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1669bb4ea393SJan Kiszka     int ret;
1670d76d1650Saurel32 
16714b8523eeSJan Kiszka     qemu_mutex_lock_iothread();
16724b8523eeSJan Kiszka 
1673d76d1650Saurel32     switch (run->exit_reason) {
1674d76d1650Saurel32     case KVM_EXIT_DCR:
1675d76d1650Saurel32         if (run->dcr.is_write) {
1676da56ff91SPeter Maydell             DPRINTF("handle dcr write\n");
1677d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1678d76d1650Saurel32         } else {
1679da56ff91SPeter Maydell             DPRINTF("handle dcr read\n");
1680d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1681d76d1650Saurel32         }
1682d76d1650Saurel32         break;
1683d76d1650Saurel32     case KVM_EXIT_HLT:
1684da56ff91SPeter Maydell         DPRINTF("handle halt\n");
1685259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1686d76d1650Saurel32         break;
1687c6304a4aSDavid Gibson #if defined(TARGET_PPC64)
1688f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1689da56ff91SPeter Maydell         DPRINTF("handle PAPR hypercall\n");
169020d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1691aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1692f61b4bedSAlexander Graf                                               run->papr_hcall.args);
169378e8fde2SDavid Gibson         ret = 0;
1694f61b4bedSAlexander Graf         break;
1695f61b4bedSAlexander Graf #endif
16965b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
1697da56ff91SPeter Maydell         DPRINTF("handle epr\n");
1698933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
16995b95b8b9SAlexander Graf         ret = 0;
17005b95b8b9SAlexander Graf         break;
170131f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
1702da56ff91SPeter Maydell         DPRINTF("handle watchdog expiry\n");
170331f2cb8fSBharat Bhushan         watchdog_perform_action();
170431f2cb8fSBharat Bhushan         ret = 0;
170531f2cb8fSBharat Bhushan         break;
170631f2cb8fSBharat Bhushan 
17078a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17088a0548f9SBharat Bhushan         DPRINTF("handle debug exception\n");
17098a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17108a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17118a0548f9SBharat Bhushan             break;
17128a0548f9SBharat Bhushan         }
17138a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17148a0548f9SBharat Bhushan         ret = 0;
17158a0548f9SBharat Bhushan         break;
17168a0548f9SBharat Bhushan 
171773aaec4aSJan Kiszka     default:
171873aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
171973aaec4aSJan Kiszka         ret = -1;
172073aaec4aSJan Kiszka         break;
1721d76d1650Saurel32     }
1722d76d1650Saurel32 
17234b8523eeSJan Kiszka     qemu_mutex_unlock_iothread();
1724d76d1650Saurel32     return ret;
1725d76d1650Saurel32 }
1726d76d1650Saurel32 
172731f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
172831f2cb8fSBharat Bhushan {
172931f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
173031f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
173131f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
173231f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
173331f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
173431f2cb8fSBharat Bhushan     };
173531f2cb8fSBharat Bhushan 
173631f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
173731f2cb8fSBharat Bhushan }
173831f2cb8fSBharat Bhushan 
173931f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
174031f2cb8fSBharat Bhushan {
174131f2cb8fSBharat Bhushan 
174231f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
174331f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
174431f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
174531f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
174631f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
174731f2cb8fSBharat Bhushan     };
174831f2cb8fSBharat Bhushan 
174931f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
175031f2cb8fSBharat Bhushan }
175131f2cb8fSBharat Bhushan 
175231f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
175331f2cb8fSBharat Bhushan {
175431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
175531f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
175631f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
175731f2cb8fSBharat Bhushan 
175831f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
175931f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
176031f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
176131f2cb8fSBharat Bhushan     };
176231f2cb8fSBharat Bhushan 
176331f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
176431f2cb8fSBharat Bhushan }
176531f2cb8fSBharat Bhushan 
176631f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
176731f2cb8fSBharat Bhushan {
176831f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
176931f2cb8fSBharat Bhushan     int ret;
177031f2cb8fSBharat Bhushan 
177131f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
177231f2cb8fSBharat Bhushan         return -1;
177331f2cb8fSBharat Bhushan     }
177431f2cb8fSBharat Bhushan 
177531f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
177631f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
177731f2cb8fSBharat Bhushan         return -1;
177831f2cb8fSBharat Bhushan     }
177931f2cb8fSBharat Bhushan 
178048add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
178131f2cb8fSBharat Bhushan     if (ret < 0) {
178231f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
178331f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
178431f2cb8fSBharat Bhushan         return ret;
178531f2cb8fSBharat Bhushan     }
178631f2cb8fSBharat Bhushan 
178731f2cb8fSBharat Bhushan     return ret;
178831f2cb8fSBharat Bhushan }
178931f2cb8fSBharat Bhushan 
1790dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1791dc333cd6SAlexander Graf {
1792dc333cd6SAlexander Graf     FILE *f;
1793dc333cd6SAlexander Graf     int ret = -1;
1794dc333cd6SAlexander Graf     int field_len = strlen(field);
1795dc333cd6SAlexander Graf     char line[512];
1796dc333cd6SAlexander Graf 
1797dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1798dc333cd6SAlexander Graf     if (!f) {
1799dc333cd6SAlexander Graf         return -1;
1800dc333cd6SAlexander Graf     }
1801dc333cd6SAlexander Graf 
1802dc333cd6SAlexander Graf     do {
1803dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1804dc333cd6SAlexander Graf             break;
1805dc333cd6SAlexander Graf         }
1806dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1807ae215068SJim Meyering             pstrcpy(value, len, line);
1808dc333cd6SAlexander Graf             ret = 0;
1809dc333cd6SAlexander Graf             break;
1810dc333cd6SAlexander Graf         }
1811dc333cd6SAlexander Graf     } while(*line);
1812dc333cd6SAlexander Graf 
1813dc333cd6SAlexander Graf     fclose(f);
1814dc333cd6SAlexander Graf 
1815dc333cd6SAlexander Graf     return ret;
1816dc333cd6SAlexander Graf }
1817dc333cd6SAlexander Graf 
1818dc333cd6SAlexander Graf uint32_t kvmppc_get_tbfreq(void)
1819dc333cd6SAlexander Graf {
1820dc333cd6SAlexander Graf     char line[512];
1821dc333cd6SAlexander Graf     char *ns;
182273bcb24dSRutuja Shah     uint32_t retval = NANOSECONDS_PER_SECOND;
1823dc333cd6SAlexander Graf 
1824dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
1825dc333cd6SAlexander Graf         return retval;
1826dc333cd6SAlexander Graf     }
1827dc333cd6SAlexander Graf 
1828dc333cd6SAlexander Graf     if (!(ns = strchr(line, ':'))) {
1829dc333cd6SAlexander Graf         return retval;
1830dc333cd6SAlexander Graf     }
1831dc333cd6SAlexander Graf 
1832dc333cd6SAlexander Graf     ns++;
1833dc333cd6SAlexander Graf 
1834f9b8e7f6SShraddha Barke     return atoi(ns);
1835ef951443SNikunj A Dadhania }
1836ef951443SNikunj A Dadhania 
1837ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1838ef951443SNikunj A Dadhania {
1839ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1840ef951443SNikunj A Dadhania                                NULL);
1841ef951443SNikunj A Dadhania }
1842ef951443SNikunj A Dadhania 
1843ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1844ef951443SNikunj A Dadhania {
1845ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1846dc333cd6SAlexander Graf }
18474513d923SGleb Natapov 
1848eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
1849eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1850eadaada1SAlexander Graf {
1851eadaada1SAlexander Graf     struct dirent *dirp;
1852eadaada1SAlexander Graf     DIR *dp;
1853eadaada1SAlexander Graf 
1854eadaada1SAlexander Graf     if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1855eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1856eadaada1SAlexander Graf         return -1;
1857eadaada1SAlexander Graf     }
1858eadaada1SAlexander Graf 
1859eadaada1SAlexander Graf     buf[0] = '\0';
1860eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1861eadaada1SAlexander Graf         FILE *f;
1862eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1863eadaada1SAlexander Graf                  dirp->d_name);
1864eadaada1SAlexander Graf         f = fopen(buf, "r");
1865eadaada1SAlexander Graf         if (f) {
1866eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1867eadaada1SAlexander Graf             fclose(f);
1868eadaada1SAlexander Graf             break;
1869eadaada1SAlexander Graf         }
1870eadaada1SAlexander Graf         buf[0] = '\0';
1871eadaada1SAlexander Graf     }
1872eadaada1SAlexander Graf     closedir(dp);
1873eadaada1SAlexander Graf     if (buf[0] == '\0') {
1874eadaada1SAlexander Graf         printf("Unknown host!\n");
1875eadaada1SAlexander Graf         return -1;
1876eadaada1SAlexander Graf     }
1877eadaada1SAlexander Graf 
1878eadaada1SAlexander Graf     return 0;
1879eadaada1SAlexander Graf }
1880eadaada1SAlexander Graf 
18817d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1882eadaada1SAlexander Graf {
18839bc884b7SDavid Gibson     union {
18849bc884b7SDavid Gibson         uint32_t v32;
18859bc884b7SDavid Gibson         uint64_t v64;
18869bc884b7SDavid Gibson     } u;
1887eadaada1SAlexander Graf     FILE *f;
1888eadaada1SAlexander Graf     int len;
1889eadaada1SAlexander Graf 
18907d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1891eadaada1SAlexander Graf     if (!f) {
1892eadaada1SAlexander Graf         return -1;
1893eadaada1SAlexander Graf     }
1894eadaada1SAlexander Graf 
18959bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1896eadaada1SAlexander Graf     fclose(f);
1897eadaada1SAlexander Graf     switch (len) {
18989bc884b7SDavid Gibson     case 4:
18999bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19009bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19019bc884b7SDavid Gibson     case 8:
19029bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1903eadaada1SAlexander Graf     }
1904eadaada1SAlexander Graf 
1905eadaada1SAlexander Graf     return 0;
1906eadaada1SAlexander Graf }
1907eadaada1SAlexander Graf 
19087d94a30bSSukadev Bhattiprolu /* Read a CPU node property from the host device tree that's a single
19097d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
19107d94a30bSSukadev Bhattiprolu  * (can't find or open the property, or doesn't understand the
19117d94a30bSSukadev Bhattiprolu  * format) */
19127d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19137d94a30bSSukadev Bhattiprolu {
19147d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19157d94a30bSSukadev Bhattiprolu     uint64_t val;
19167d94a30bSSukadev Bhattiprolu 
19177d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19187d94a30bSSukadev Bhattiprolu         return -1;
19197d94a30bSSukadev Bhattiprolu     }
19207d94a30bSSukadev Bhattiprolu 
19217d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19227d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19237d94a30bSSukadev Bhattiprolu     g_free(tmp);
19247d94a30bSSukadev Bhattiprolu 
19257d94a30bSSukadev Bhattiprolu     return val;
19267d94a30bSSukadev Bhattiprolu }
19277d94a30bSSukadev Bhattiprolu 
19289bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19299bc884b7SDavid Gibson {
19309bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19319bc884b7SDavid Gibson }
19329bc884b7SDavid Gibson 
1933*7d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void)
1934*7d050527SSuraj Jitindar Singh {
1935*7d050527SSuraj Jitindar Singh     int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
1936*7d050527SSuraj Jitindar Singh 
1937*7d050527SSuraj Jitindar Singh     if (nr_bits > 0) {
1938*7d050527SSuraj Jitindar Singh         return nr_bits;
1939*7d050527SSuraj Jitindar Singh     }
1940*7d050527SSuraj Jitindar Singh     return 0;
1941*7d050527SSuraj Jitindar Singh }
1942*7d050527SSuraj Jitindar Singh 
19431a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
194445024f09SAlexander Graf  {
1945a60f24b5SAndreas Färber      PowerPCCPU *cpu = ppc_env_get_cpu(env);
1946a60f24b5SAndreas Färber      CPUState *cs = CPU(cpu);
194745024f09SAlexander Graf 
19486fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
19491a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
19501a61a9aeSStuart Yoder         return 0;
19511a61a9aeSStuart Yoder     }
195245024f09SAlexander Graf 
19531a61a9aeSStuart Yoder     return 1;
19541a61a9aeSStuart Yoder }
19551a61a9aeSStuart Yoder 
19561a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
19571a61a9aeSStuart Yoder {
19581a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19591a61a9aeSStuart Yoder 
19601a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
19611a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
19621a61a9aeSStuart Yoder         return 1;
19631a61a9aeSStuart Yoder     }
19641a61a9aeSStuart Yoder 
19651a61a9aeSStuart Yoder     return 0;
19661a61a9aeSStuart Yoder }
19671a61a9aeSStuart Yoder 
19681a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
19691a61a9aeSStuart Yoder {
19701a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t*)buf;
19711a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19721a61a9aeSStuart Yoder 
19731a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
19741a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
197545024f09SAlexander Graf         return 0;
197645024f09SAlexander Graf     }
197745024f09SAlexander Graf 
197845024f09SAlexander Graf     /*
1979d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
198045024f09SAlexander Graf      *
1981d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
198245024f09SAlexander Graf      *     li r3, -1
1983d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
1984d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
198545024f09SAlexander Graf      */
198645024f09SAlexander Graf 
1987d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
1988d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
1989d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
1990d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
199145024f09SAlexander Graf 
19920ddbd053SAlexey Kardashevskiy     return 1;
199345024f09SAlexander Graf }
199445024f09SAlexander Graf 
1995026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
1996026bfd89SDavid Gibson {
1997026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
1998026bfd89SDavid Gibson }
1999026bfd89SDavid Gibson 
2000026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2001026bfd89SDavid Gibson {
2002026bfd89SDavid Gibson     /*
2003026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2004026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2005026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2006026bfd89SDavid Gibson      * produce a warning.
2007026bfd89SDavid Gibson      */
2008026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2009026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2010026bfd89SDavid Gibson }
2011026bfd89SDavid Gibson 
2012ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2013ef9971ddSAlexey Kardashevskiy {
2014ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2015ef9971ddSAlexey Kardashevskiy }
2016ef9971ddSAlexey Kardashevskiy 
20175145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20185145ad4fSNathan Whitehorn {
20195145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20205145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20215145ad4fSNathan Whitehorn }
20225145ad4fSNathan Whitehorn 
20231bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2024f61b4bedSAlexander Graf {
20251bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2026f61b4bedSAlexander Graf     int ret;
2027f61b4bedSAlexander Graf 
2028da20aed1SDavid Gibson     if (!kvm_enabled()) {
2029da20aed1SDavid Gibson         return;
2030da20aed1SDavid Gibson     }
2031da20aed1SDavid Gibson 
203248add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2033f61b4bedSAlexander Graf     if (ret) {
2034072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2035072ed5f2SThomas Huth         exit(1);
2036f61b4bedSAlexander Graf     }
20379b00ea49SDavid Gibson 
20389b00ea49SDavid Gibson     /* Update the capability flag so we sync the right information
20399b00ea49SDavid Gibson      * with kvm */
20409b00ea49SDavid Gibson     cap_papr = 1;
2041f1af19d7SDavid Gibson }
2042f61b4bedSAlexander Graf 
2043d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
20446db5bb0fSAlexey Kardashevskiy {
2045d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
20466db5bb0fSAlexey Kardashevskiy }
20476db5bb0fSAlexey Kardashevskiy 
20485b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
20495b95b8b9SAlexander Graf {
20505b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
20515b95b8b9SAlexander Graf     int ret;
20525b95b8b9SAlexander Graf 
205348add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
20545b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2055072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2056072ed5f2SThomas Huth         exit(1);
20575b95b8b9SAlexander Graf     }
20585b95b8b9SAlexander Graf }
20595b95b8b9SAlexander Graf 
2060e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2061e97c3636SDavid Gibson {
2062e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2063e97c3636SDavid Gibson }
2064e97c3636SDavid Gibson 
2065fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2066fa98fbfcSSam Bobroff {
2067fa98fbfcSSam Bobroff     int ret;
2068fa98fbfcSSam Bobroff 
2069fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2070fa98fbfcSSam Bobroff     if (!ret) {
2071fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2072fa98fbfcSSam Bobroff     }
2073fa98fbfcSSam Bobroff     return ret;
2074fa98fbfcSSam Bobroff }
2075fa98fbfcSSam Bobroff 
2076fa98fbfcSSam Bobroff void kvmppc_hint_smt_possible(Error **errp)
2077fa98fbfcSSam Bobroff {
2078fa98fbfcSSam Bobroff     int i;
2079fa98fbfcSSam Bobroff     GString *g;
2080fa98fbfcSSam Bobroff     char *s;
2081fa98fbfcSSam Bobroff 
2082fa98fbfcSSam Bobroff     assert(kvm_enabled());
2083fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2084fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2085fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2086fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2087fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2088fa98fbfcSSam Bobroff             }
2089fa98fbfcSSam Bobroff         }
2090fa98fbfcSSam Bobroff         s = g_string_free(g, false);
2091fa98fbfcSSam Bobroff         error_append_hint(errp, "%s.\n", s);
2092fa98fbfcSSam Bobroff         g_free(s);
2093fa98fbfcSSam Bobroff     } else {
2094fa98fbfcSSam Bobroff         error_append_hint(errp,
2095fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2096fa98fbfcSSam Bobroff     }
2097fa98fbfcSSam Bobroff }
2098fa98fbfcSSam Bobroff 
2099fa98fbfcSSam Bobroff 
21007f763a5dSDavid Gibson #ifdef TARGET_PPC64
21017f763a5dSDavid Gibson uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
21027f763a5dSDavid Gibson {
2103f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2104f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2105f36951c1SDavid Gibson     int i;
2106f36951c1SDavid Gibson 
2107f36951c1SDavid Gibson     /* Find the largest hardware supported page size that's less than
2108f36951c1SDavid Gibson      * or equal to the (logical) backing page size of guest RAM */
2109ab256960SGreg Kurz     kvm_get_smmu_info(&info, &error_fatal);
21109c607668SAlexey Kardashevskiy     rampagesize = qemu_getrampagesize();
2111f36951c1SDavid Gibson     best_page_shift = 0;
2112f36951c1SDavid Gibson 
2113f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2114f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2115f36951c1SDavid Gibson 
2116f36951c1SDavid Gibson         if (!sps->page_shift) {
2117f36951c1SDavid Gibson             continue;
2118f36951c1SDavid Gibson         }
2119f36951c1SDavid Gibson 
2120f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2121f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2122f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2123f36951c1SDavid Gibson         }
2124f36951c1SDavid Gibson     }
2125f36951c1SDavid Gibson 
21267f763a5dSDavid Gibson     return MIN(current_size,
2127f36951c1SDavid Gibson                1ULL << (best_page_shift + hash_shift - 7));
21287f763a5dSDavid Gibson }
21297f763a5dSDavid Gibson #endif
21307f763a5dSDavid Gibson 
2131da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2132da95324eSAlexey Kardashevskiy {
2133da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2134da95324eSAlexey Kardashevskiy }
2135da95324eSAlexey Kardashevskiy 
21363dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
21373dc410aeSAlexey Kardashevskiy {
21383dc410aeSAlexey Kardashevskiy     int ret;
21393dc410aeSAlexey Kardashevskiy 
21403dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21413dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
21423dc410aeSAlexey Kardashevskiy     if (!ret) {
21433dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
21443dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
21453dc410aeSAlexey Kardashevskiy     }
21463dc410aeSAlexey Kardashevskiy 
21473dc410aeSAlexey Kardashevskiy     return ret;
21483dc410aeSAlexey Kardashevskiy }
21493dc410aeSAlexey Kardashevskiy 
2150d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2151d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2152d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
21530f5cb298SDavid Gibson {
21540f5cb298SDavid Gibson     long len;
21550f5cb298SDavid Gibson     int fd;
21560f5cb298SDavid Gibson     void *table;
21570f5cb298SDavid Gibson 
2158b5aec396SDavid Gibson     /* Must set fd to -1 so we don't try to munmap when called for
2159b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2160b5aec396SDavid Gibson      */
2161b5aec396SDavid Gibson     *pfd = -1;
21626a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
21630f5cb298SDavid Gibson         return NULL;
21640f5cb298SDavid Gibson     }
21650f5cb298SDavid Gibson 
2166d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2167d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2168d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2169d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2170d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2171d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2172d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2173d6ee2a7cSAlexey Kardashevskiy         };
2174d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2175d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2176d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2177d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2178d6ee2a7cSAlexey Kardashevskiy                     liobn);
2179d6ee2a7cSAlexey Kardashevskiy             return NULL;
2180d6ee2a7cSAlexey Kardashevskiy         }
2181d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2182d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2183d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2184d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2185d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2186d6ee2a7cSAlexey Kardashevskiy         };
2187d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2188d6ee2a7cSAlexey Kardashevskiy             return NULL;
2189d6ee2a7cSAlexey Kardashevskiy         }
21900f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
21910f5cb298SDavid Gibson         if (fd < 0) {
2192b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2193b5aec396SDavid Gibson                     liobn);
21940f5cb298SDavid Gibson             return NULL;
21950f5cb298SDavid Gibson         }
2196d6ee2a7cSAlexey Kardashevskiy     } else {
2197d6ee2a7cSAlexey Kardashevskiy         return NULL;
2198d6ee2a7cSAlexey Kardashevskiy     }
21990f5cb298SDavid Gibson 
2200d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22010f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22020f5cb298SDavid Gibson 
220374b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
22040f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2205b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2206b5aec396SDavid Gibson                 liobn);
22070f5cb298SDavid Gibson         close(fd);
22080f5cb298SDavid Gibson         return NULL;
22090f5cb298SDavid Gibson     }
22100f5cb298SDavid Gibson 
22110f5cb298SDavid Gibson     *pfd = fd;
22120f5cb298SDavid Gibson     return table;
22130f5cb298SDavid Gibson }
22140f5cb298SDavid Gibson 
2215523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22160f5cb298SDavid Gibson {
22170f5cb298SDavid Gibson     long len;
22180f5cb298SDavid Gibson 
22190f5cb298SDavid Gibson     if (fd < 0) {
22200f5cb298SDavid Gibson         return -1;
22210f5cb298SDavid Gibson     }
22220f5cb298SDavid Gibson 
2223523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22240f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
22250f5cb298SDavid Gibson         (close(fd) < 0)) {
2226b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2227b5aec396SDavid Gibson                 strerror(errno));
22280f5cb298SDavid Gibson         /* Leak the table */
22290f5cb298SDavid Gibson     }
22300f5cb298SDavid Gibson 
22310f5cb298SDavid Gibson     return 0;
22320f5cb298SDavid Gibson }
22330f5cb298SDavid Gibson 
22347f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
22357f763a5dSDavid Gibson {
22367f763a5dSDavid Gibson     uint32_t shift = shift_hint;
22377f763a5dSDavid Gibson 
2238ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2239ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2240ace9a2cbSDavid Gibson         return 0;
2241ace9a2cbSDavid Gibson     }
22426977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
22437f763a5dSDavid Gibson         int ret;
22447f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2245ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2246ace9a2cbSDavid Gibson             /* At least some versions of PR KVM advertise the
2247ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2248ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2249ace9a2cbSDavid Gibson              * correct for PR. */
2250ace9a2cbSDavid Gibson             return 0;
2251ace9a2cbSDavid Gibson         } else if (ret < 0) {
22527f763a5dSDavid Gibson             return ret;
22537f763a5dSDavid Gibson         }
22547f763a5dSDavid Gibson         return shift;
22557f763a5dSDavid Gibson     }
22567f763a5dSDavid Gibson 
2257ace9a2cbSDavid Gibson     /* We have a kernel that predates the htab reset calls.  For PR
2258ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
225996c9cff0SThomas Huth      * this era, it has allocated a 16MB fixed size hash table already. */
226096c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2261ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
22627f763a5dSDavid Gibson         return 0;
2263ace9a2cbSDavid Gibson     } else {
2264ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2265ace9a2cbSDavid Gibson         return 24;
2266ace9a2cbSDavid Gibson     }
22677f763a5dSDavid Gibson }
22687f763a5dSDavid Gibson 
2269a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2270a1e98583SDavid Gibson {
2271a1e98583SDavid Gibson     uint32_t pvr;
2272a1e98583SDavid Gibson 
2273a1e98583SDavid Gibson     asm ("mfpvr %0"
2274a1e98583SDavid Gibson          : "=r"(pvr));
2275a1e98583SDavid Gibson     return pvr;
2276a1e98583SDavid Gibson }
2277a1e98583SDavid Gibson 
2278a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2279a7342588SDavid Gibson {
2280a7342588SDavid Gibson     if (on) {
2281a7342588SDavid Gibson         *word |= flags;
2282a7342588SDavid Gibson     } else {
2283a7342588SDavid Gibson         *word &= ~flags;
2284a7342588SDavid Gibson     }
2285a7342588SDavid Gibson }
2286a7342588SDavid Gibson 
22872985b86bSAndreas Färber static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
22882985b86bSAndreas Färber {
22892985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
22900cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
22910cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2292a1e98583SDavid Gibson 
2293cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
22943bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2295a7342588SDavid Gibson 
22963f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
22973f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
22983f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
22993f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
23003f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
23013f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
23020cbad81fSDavid Gibson 
23030cbad81fSDavid Gibson     if (dcache_size != -1) {
23040cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
23050cbad81fSDavid Gibson     }
23060cbad81fSDavid Gibson 
23070cbad81fSDavid Gibson     if (icache_size != -1) {
23080cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
23090cbad81fSDavid Gibson     }
2310c64abd1fSSam Bobroff 
2311c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2312c64abd1fSSam Bobroff     pcc->radix_page_info = kvm_get_radix_page_info();
23135f3066d8SDavid Gibson 
23145f3066d8SDavid Gibson     if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
23155f3066d8SDavid Gibson         /*
23165f3066d8SDavid Gibson          * POWER9 DD1 has some bugs which make it not really ISA 3.00
23175f3066d8SDavid Gibson          * compliant.  More importantly, advertising ISA 3.00
23185f3066d8SDavid Gibson          * architected mode may prevent guests from activating
23195f3066d8SDavid Gibson          * necessary DD1 workarounds.
23205f3066d8SDavid Gibson          */
23215f3066d8SDavid Gibson         pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
23225f3066d8SDavid Gibson                                 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
23235f3066d8SDavid Gibson     }
2324c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2325a1e98583SDavid Gibson }
2326a1e98583SDavid Gibson 
23273b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
23283b961124SStuart Yoder {
23293b961124SStuart Yoder     return cap_epr;
23303b961124SStuart Yoder }
23313b961124SStuart Yoder 
233287a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
233387a91de6SAlexander Graf {
233487a91de6SAlexander Graf     return cap_fixup_hcalls;
233587a91de6SAlexander Graf }
233687a91de6SAlexander Graf 
2337bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2338bac3bf28SThomas Huth {
2339bac3bf28SThomas Huth     return cap_htm;
2340bac3bf28SThomas Huth }
2341bac3bf28SThomas Huth 
2342cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2343cf1c4cceSSam Bobroff {
2344cf1c4cceSSam Bobroff     return cap_mmu_radix;
2345cf1c4cceSSam Bobroff }
2346cf1c4cceSSam Bobroff 
2347cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2348cf1c4cceSSam Bobroff {
2349cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2350cf1c4cceSSam Bobroff }
2351cf1c4cceSSam Bobroff 
2352072f416aSSuraj Jitindar Singh static bool kvmppc_power8_host(void)
2353072f416aSSuraj Jitindar Singh {
2354072f416aSSuraj Jitindar Singh     bool ret = false;
2355072f416aSSuraj Jitindar Singh #ifdef TARGET_PPC64
2356072f416aSSuraj Jitindar Singh     {
2357072f416aSSuraj Jitindar Singh         uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2358072f416aSSuraj Jitindar Singh         ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2359072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2360072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8_BASE);
2361072f416aSSuraj Jitindar Singh     }
2362072f416aSSuraj Jitindar Singh #endif /* TARGET_PPC64 */
2363072f416aSSuraj Jitindar Singh     return ret;
2364072f416aSSuraj Jitindar Singh }
2365072f416aSSuraj Jitindar Singh 
23668fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
23678fea7044SSuraj Jitindar Singh {
2368072f416aSSuraj Jitindar Singh     bool l1d_thread_priv_req = !kvmppc_power8_host();
2369072f416aSSuraj Jitindar Singh 
23708fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
23718fea7044SSuraj Jitindar Singh         return 2;
2372072f416aSSuraj Jitindar Singh     } else if ((!l1d_thread_priv_req ||
2373072f416aSSuraj Jitindar Singh                 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
23748fea7044SSuraj Jitindar Singh                (c.character & c.character_mask
23758fea7044SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
23768fea7044SSuraj Jitindar Singh         return 1;
23778fea7044SSuraj Jitindar Singh     }
23788fea7044SSuraj Jitindar Singh 
23798fea7044SSuraj Jitindar Singh     return 0;
23808fea7044SSuraj Jitindar Singh }
23818fea7044SSuraj Jitindar Singh 
23828fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
23838fea7044SSuraj Jitindar Singh {
23848fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
23858fea7044SSuraj Jitindar Singh         return 2;
23868fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
23878fea7044SSuraj Jitindar Singh         return 1;
23888fea7044SSuraj Jitindar Singh     }
23898fea7044SSuraj Jitindar Singh 
23908fea7044SSuraj Jitindar Singh     return 0;
23918fea7044SSuraj Jitindar Singh }
23928fea7044SSuraj Jitindar Singh 
23938fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
23948fea7044SSuraj Jitindar Singh {
23958fea7044SSuraj Jitindar Singh     if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
23968fea7044SSuraj Jitindar Singh         return  SPAPR_CAP_FIXED_CCD;
23978fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
23988fea7044SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_IBS;
23998fea7044SSuraj Jitindar Singh     }
24008fea7044SSuraj Jitindar Singh 
24018fea7044SSuraj Jitindar Singh     return 0;
24028fea7044SSuraj Jitindar Singh }
24038fea7044SSuraj Jitindar Singh 
24048acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
24058acc2ae5SSuraj Jitindar Singh {
24068acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
24078acc2ae5SSuraj Jitindar Singh     int ret;
24088acc2ae5SSuraj Jitindar Singh 
24098acc2ae5SSuraj Jitindar Singh     /* Assume broken */
24108acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
24118acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
24128acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
24138acc2ae5SSuraj Jitindar Singh 
24148acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
24158acc2ae5SSuraj Jitindar Singh     if (!ret) {
24168acc2ae5SSuraj Jitindar Singh         return;
24178acc2ae5SSuraj Jitindar Singh     }
24188acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
24198acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
24208acc2ae5SSuraj Jitindar Singh         return;
24218acc2ae5SSuraj Jitindar Singh     }
24228fea7044SSuraj Jitindar Singh 
24238fea7044SSuraj Jitindar Singh     cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
24248fea7044SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
24258fea7044SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
24268acc2ae5SSuraj Jitindar Singh }
24278acc2ae5SSuraj Jitindar Singh 
24288acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
24298acc2ae5SSuraj Jitindar Singh {
24308acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
24318acc2ae5SSuraj Jitindar Singh }
24328acc2ae5SSuraj Jitindar Singh 
24338acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
24348acc2ae5SSuraj Jitindar Singh {
24358acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
24368acc2ae5SSuraj Jitindar Singh }
24378acc2ae5SSuraj Jitindar Singh 
24388acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
24398acc2ae5SSuraj Jitindar Singh {
24408acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
24418acc2ae5SSuraj Jitindar Singh }
24428acc2ae5SSuraj Jitindar Singh 
2443b9a477b7SSuraj Jitindar Singh bool kvmppc_has_cap_nested_kvm_hv(void)
2444b9a477b7SSuraj Jitindar Singh {
2445b9a477b7SSuraj Jitindar Singh     return !!cap_ppc_nested_kvm_hv;
2446b9a477b7SSuraj Jitindar Singh }
2447b9a477b7SSuraj Jitindar Singh 
2448b9a477b7SSuraj Jitindar Singh int kvmppc_set_cap_nested_kvm_hv(int enable)
2449b9a477b7SSuraj Jitindar Singh {
2450b9a477b7SSuraj Jitindar Singh     return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2451b9a477b7SSuraj Jitindar Singh }
2452b9a477b7SSuraj Jitindar Singh 
24539ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
24549ded780cSAlexey Kardashevskiy {
24559ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
24569ded780cSAlexey Kardashevskiy }
24579ded780cSAlexey Kardashevskiy 
2458*7d050527SSuraj Jitindar Singh int kvmppc_get_cap_large_decr(void)
2459*7d050527SSuraj Jitindar Singh {
2460*7d050527SSuraj Jitindar Singh     return cap_large_decr;
2461*7d050527SSuraj Jitindar Singh }
2462*7d050527SSuraj Jitindar Singh 
2463*7d050527SSuraj Jitindar Singh int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
2464*7d050527SSuraj Jitindar Singh {
2465*7d050527SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
2466*7d050527SSuraj Jitindar Singh     uint64_t lpcr;
2467*7d050527SSuraj Jitindar Singh 
2468*7d050527SSuraj Jitindar Singh     kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2469*7d050527SSuraj Jitindar Singh     /* Do we need to modify the LPCR? */
2470*7d050527SSuraj Jitindar Singh     if (!!(lpcr & LPCR_LD) != !!enable) {
2471*7d050527SSuraj Jitindar Singh         if (enable) {
2472*7d050527SSuraj Jitindar Singh             lpcr |= LPCR_LD;
2473*7d050527SSuraj Jitindar Singh         } else {
2474*7d050527SSuraj Jitindar Singh             lpcr &= ~LPCR_LD;
2475*7d050527SSuraj Jitindar Singh         }
2476*7d050527SSuraj Jitindar Singh         kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2477*7d050527SSuraj Jitindar Singh         kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2478*7d050527SSuraj Jitindar Singh 
2479*7d050527SSuraj Jitindar Singh         if (!!(lpcr & LPCR_LD) != !!enable) {
2480*7d050527SSuraj Jitindar Singh             return -1;
2481*7d050527SSuraj Jitindar Singh         }
2482*7d050527SSuraj Jitindar Singh     }
2483*7d050527SSuraj Jitindar Singh 
2484*7d050527SSuraj Jitindar Singh     return 0;
2485*7d050527SSuraj Jitindar Singh }
2486*7d050527SSuraj Jitindar Singh 
248752b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
248852b2519cSThomas Huth {
248952b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
249052b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
249152b2519cSThomas Huth 
249252b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
249352b2519cSThomas Huth     if (pvr_pcc == NULL) {
249452b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
249552b2519cSThomas Huth     }
249652b2519cSThomas Huth 
249752b2519cSThomas Huth     return pvr_pcc;
249852b2519cSThomas Huth }
249952b2519cSThomas Huth 
25002e9c10ebSIgor Mammedov static int kvm_ppc_register_host_cpu_type(MachineState *ms)
25015ba4576bSAndreas Färber {
25025ba4576bSAndreas Färber     TypeInfo type_info = {
25035ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
25045ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
25055ba4576bSAndreas Färber     };
25062e9c10ebSIgor Mammedov     MachineClass *mc = MACHINE_GET_CLASS(ms);
25075ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
250892e926e1SGreg Kurz     ObjectClass *oc;
25095b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2510715d4b96SThomas Huth     int i;
25115ba4576bSAndreas Färber 
251252b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
25133bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
25145ba4576bSAndreas Färber         return -1;
25155ba4576bSAndreas Färber     }
25165ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
25175ba4576bSAndreas Färber     type_register(&type_info);
25182e9c10ebSIgor Mammedov     if (object_dynamic_cast(OBJECT(ms), TYPE_SPAPR_MACHINE)) {
25192e9c10ebSIgor Mammedov         /* override TCG default cpu type with 'host' cpu model */
25202e9c10ebSIgor Mammedov         mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
25212e9c10ebSIgor Mammedov     }
25225b79b1caSAlexey Kardashevskiy 
252392e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
252492e926e1SGreg Kurz     g_assert(oc);
252592e926e1SGreg Kurz 
2526715d4b96SThomas Huth     /*
2527715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2528715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2529715d4b96SThomas Huth      * host CPU type, too)
2530715d4b96SThomas Huth      */
2531715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2532715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2533c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2534715d4b96SThomas Huth             char *suffix;
2535715d4b96SThomas Huth 
2536715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2537c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2538715d4b96SThomas Huth             if (suffix) {
2539715d4b96SThomas Huth                 *suffix = 0;
2540715d4b96SThomas Huth             }
2541715d4b96SThomas Huth             break;
2542715d4b96SThomas Huth         }
2543715d4b96SThomas Huth     }
2544715d4b96SThomas Huth 
25455ba4576bSAndreas Färber     return 0;
25465ba4576bSAndreas Färber }
25475ba4576bSAndreas Färber 
2548feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2549feaa64c4SDavid Gibson {
2550feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2551feaa64c4SDavid Gibson         .token = token,
2552feaa64c4SDavid Gibson     };
2553feaa64c4SDavid Gibson 
2554feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2555feaa64c4SDavid Gibson         return -ENOENT;
2556feaa64c4SDavid Gibson     }
2557feaa64c4SDavid Gibson 
2558feaa64c4SDavid Gibson     strncpy(args.name, function, sizeof(args.name));
2559feaa64c4SDavid Gibson 
2560feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2561feaa64c4SDavid Gibson }
256212b1143bSDavid Gibson 
256314b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2564e68cb8b4SAlexey Kardashevskiy {
2565e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2566e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
256714b0d748SGreg Kurz         .start_index = index,
2568e68cb8b4SAlexey Kardashevskiy     };
256982be8e73SGreg Kurz     int ret;
2570e68cb8b4SAlexey Kardashevskiy 
2571e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
257214b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
257314b0d748SGreg Kurz                    write ? "writing" : "reading");
257482be8e73SGreg Kurz         return -ENOTSUP;
2575e68cb8b4SAlexey Kardashevskiy     }
2576e68cb8b4SAlexey Kardashevskiy 
257782be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
257882be8e73SGreg Kurz     if (ret < 0) {
257914b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
258014b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
258114b0d748SGreg Kurz                    strerror(errno));
258282be8e73SGreg Kurz         return -errno;
258382be8e73SGreg Kurz     }
258482be8e73SGreg Kurz 
258582be8e73SGreg Kurz     return ret;
2586e68cb8b4SAlexey Kardashevskiy }
2587e68cb8b4SAlexey Kardashevskiy 
2588e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2589e68cb8b4SAlexey Kardashevskiy {
2590bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2591e68cb8b4SAlexey Kardashevskiy     uint8_t buf[bufsize];
2592e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2593e68cb8b4SAlexey Kardashevskiy 
2594e68cb8b4SAlexey Kardashevskiy     do {
2595e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2596e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2597e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2598e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2599e68cb8b4SAlexey Kardashevskiy             return rc;
2600e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2601e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2602e094c4c1SCédric Le Goater             ssize_t n = rc;
2603e094c4c1SCédric Le Goater             while (n) {
2604e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2605e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2606e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2607e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2608e094c4c1SCédric Le Goater 
2609e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2610e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2611e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2612e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2613e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2614e094c4c1SCédric Le Goater 
2615e094c4c1SCédric Le Goater                 buffer += chunksize;
2616e094c4c1SCédric Le Goater                 n -= chunksize;
2617e094c4c1SCédric Le Goater             }
2618e68cb8b4SAlexey Kardashevskiy         }
2619e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2620e68cb8b4SAlexey Kardashevskiy              && ((max_ns < 0)
2621bc72ad67SAlex Bligh                  || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2622e68cb8b4SAlexey Kardashevskiy 
2623e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2624e68cb8b4SAlexey Kardashevskiy }
2625e68cb8b4SAlexey Kardashevskiy 
2626e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2627e68cb8b4SAlexey Kardashevskiy                            uint16_t n_valid, uint16_t n_invalid)
2628e68cb8b4SAlexey Kardashevskiy {
2629e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2630e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
2631e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2632e68cb8b4SAlexey Kardashevskiy 
2633e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2634e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2635e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2636e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2637e68cb8b4SAlexey Kardashevskiy 
2638e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
2639e68cb8b4SAlexey Kardashevskiy 
2640e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2641e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
2642e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Error writing KVM hash table: %s\n",
2643e68cb8b4SAlexey Kardashevskiy                 strerror(errno));
2644e68cb8b4SAlexey Kardashevskiy         return rc;
2645e68cb8b4SAlexey Kardashevskiy     }
2646e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2647e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
2648e68cb8b4SAlexey Kardashevskiy         fprintf(stderr, "Short write, restoring KVM hash table\n");
2649e68cb8b4SAlexey Kardashevskiy         return -1;
2650e68cb8b4SAlexey Kardashevskiy     }
2651e68cb8b4SAlexey Kardashevskiy     return 0;
2652e68cb8b4SAlexey Kardashevskiy }
2653e68cb8b4SAlexey Kardashevskiy 
265420d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
26554513d923SGleb Natapov {
26564513d923SGleb Natapov     return true;
26574513d923SGleb Natapov }
2658a1b87fe0SJan Kiszka 
265982169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
266082169660SScott Wood {
266182169660SScott Wood }
2662c65f9a07SGreg Kurz 
26631ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
26641ad9f0a4SDavid Gibson {
26651ad9f0a4SDavid Gibson     int fd, rc;
26661ad9f0a4SDavid Gibson     int i;
26677c43bca0SAneesh Kumar K.V 
266814b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
26691ad9f0a4SDavid Gibson 
26701ad9f0a4SDavid Gibson     i = 0;
26711ad9f0a4SDavid Gibson     while (i < n) {
26721ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
26731ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
26741ad9f0a4SDavid Gibson         char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
26751ad9f0a4SDavid Gibson 
26761ad9f0a4SDavid Gibson         rc = read(fd, buf, sizeof(buf));
26771ad9f0a4SDavid Gibson         if (rc < 0) {
26781ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
26791ad9f0a4SDavid Gibson         }
26801ad9f0a4SDavid Gibson 
26811ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
26821ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2683a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
26841ad9f0a4SDavid Gibson 
26851ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
26861ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
26871ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
26881ad9f0a4SDavid Gibson             }
26891ad9f0a4SDavid Gibson 
2690a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2691a36593e1SAlexey Kardashevskiy                 valid = n - i;
2692a36593e1SAlexey Kardashevskiy             }
2693a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2694a36593e1SAlexey Kardashevskiy             i += valid;
26951ad9f0a4SDavid Gibson 
26961ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
26971ad9f0a4SDavid Gibson                 invalid = n - i;
26981ad9f0a4SDavid Gibson             }
26991ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2700a36593e1SAlexey Kardashevskiy             i += invalid;
27011ad9f0a4SDavid Gibson 
27021ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
27031ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
27041ad9f0a4SDavid Gibson         }
27051ad9f0a4SDavid Gibson     }
27061ad9f0a4SDavid Gibson 
27071ad9f0a4SDavid Gibson     close(fd);
27081ad9f0a4SDavid Gibson }
27091ad9f0a4SDavid Gibson 
27101ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
27117c43bca0SAneesh Kumar K.V {
27121ad9f0a4SDavid Gibson     int fd, rc;
27131ad9f0a4SDavid Gibson     struct {
27141ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
27151ad9f0a4SDavid Gibson         uint64_t pte0;
27161ad9f0a4SDavid Gibson         uint64_t pte1;
27171ad9f0a4SDavid Gibson     } buf;
2718c1385933SAneesh Kumar K.V 
271914b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2720c1385933SAneesh Kumar K.V 
27211ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
27221ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
27231ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
27241ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
27251ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
27261ad9f0a4SDavid Gibson 
27271ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
27281ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
27291ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2730c1385933SAneesh Kumar K.V     }
27311ad9f0a4SDavid Gibson     close(fd);
2732c1385933SAneesh Kumar K.V }
27339e03a040SFrank Blaschka 
27349e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2735dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
27369e03a040SFrank Blaschka {
27379e03a040SFrank Blaschka     return 0;
27389e03a040SFrank Blaschka }
27391850b6b7SEric Auger 
274038d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
274138d87493SPeter Xu                                 int vector, PCIDevice *dev)
274238d87493SPeter Xu {
274338d87493SPeter Xu     return 0;
274438d87493SPeter Xu }
274538d87493SPeter Xu 
274638d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
274738d87493SPeter Xu {
274838d87493SPeter Xu     return 0;
274938d87493SPeter Xu }
275038d87493SPeter Xu 
27511850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
27521850b6b7SEric Auger {
27531850b6b7SEric Auger     return data & 0xffff;
27541850b6b7SEric Auger }
27554d9392beSThomas Huth 
27564d9392beSThomas Huth int kvmppc_enable_hwrng(void)
27574d9392beSThomas Huth {
27584d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
27594d9392beSThomas Huth         return -1;
27604d9392beSThomas Huth     }
27614d9392beSThomas Huth 
27624d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
27634d9392beSThomas Huth }
276430f4b05bSDavid Gibson 
276530f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
276630f4b05bSDavid Gibson {
276730f4b05bSDavid Gibson     if (!kvm_enabled()) {
2768b55d295eSDavid Gibson         return; /* No KVM, we're good */
2769b55d295eSDavid Gibson     }
2770b55d295eSDavid Gibson 
2771b55d295eSDavid Gibson     if (cap_resize_hpt) {
2772b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2773b55d295eSDavid Gibson     }
2774b55d295eSDavid Gibson 
2775b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2776b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
277730f4b05bSDavid Gibson         return;
277830f4b05bSDavid Gibson     }
277930f4b05bSDavid Gibson 
278030f4b05bSDavid Gibson     error_setg(errp,
278130f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
278230f4b05bSDavid Gibson }
2783b55d295eSDavid Gibson 
2784b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2785b55d295eSDavid Gibson {
2786b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2787b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2788b55d295eSDavid Gibson         .flags = flags,
2789b55d295eSDavid Gibson         .shift = shift,
2790b55d295eSDavid Gibson     };
2791b55d295eSDavid Gibson 
2792b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2793b55d295eSDavid Gibson         return -ENOSYS;
2794b55d295eSDavid Gibson     }
2795b55d295eSDavid Gibson 
2796b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2797b55d295eSDavid Gibson }
2798b55d295eSDavid Gibson 
2799b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2800b55d295eSDavid Gibson {
2801b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2802b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2803b55d295eSDavid Gibson         .flags = flags,
2804b55d295eSDavid Gibson         .shift = shift,
2805b55d295eSDavid Gibson     };
2806b55d295eSDavid Gibson 
2807b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2808b55d295eSDavid Gibson         return -ENOSYS;
2809b55d295eSDavid Gibson     }
2810b55d295eSDavid Gibson 
2811b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2812b55d295eSDavid Gibson }
2813b55d295eSDavid Gibson 
2814c363a37aSDaniel Henrique Barboza /*
2815c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2816c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2817c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2818c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2819c363a37aSDaniel Henrique Barboza  *
2820c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2821c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2822c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2823c363a37aSDaniel Henrique Barboza  *
2824c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2825c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2826c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2827c363a37aSDaniel Henrique Barboza  * in QEMU level.
2828c363a37aSDaniel Henrique Barboza  */
2829c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2830c363a37aSDaniel Henrique Barboza {
2831c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2832c363a37aSDaniel Henrique Barboza 
2833c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2834c363a37aSDaniel Henrique Barboza         return false;
2835c363a37aSDaniel Henrique Barboza     }
2836c363a37aSDaniel Henrique Barboza 
2837c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2838c363a37aSDaniel Henrique Barboza         return false;
2839c363a37aSDaniel Henrique Barboza     }
2840c363a37aSDaniel Henrique Barboza 
2841c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2842c363a37aSDaniel Henrique Barboza }
2843a84f7179SNikunj A Dadhania 
2844a84f7179SNikunj A Dadhania void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2845a84f7179SNikunj A Dadhania {
2846a84f7179SNikunj A Dadhania     CPUState *cs = CPU(cpu);
2847a84f7179SNikunj A Dadhania 
2848a84f7179SNikunj A Dadhania     if (kvm_enabled()) {
2849a84f7179SNikunj A Dadhania         kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2850a84f7179SNikunj A Dadhania     }
2851a84f7179SNikunj A Dadhania }
2852