xref: /qemu/target/ppc/kvm.c (revision 98721058d6d50ef218e0c26e4f67c8ef96965859)
1d76d1650Saurel32 /*
2d76d1650Saurel32  * PowerPC implementation of KVM hooks
3d76d1650Saurel32  *
4d76d1650Saurel32  * Copyright IBM Corp. 2007
590dc8812SScott Wood  * Copyright (C) 2011 Freescale Semiconductor, Inc.
6d76d1650Saurel32  *
7d76d1650Saurel32  * Authors:
8d76d1650Saurel32  *  Jerone Young <jyoung5@us.ibm.com>
9d76d1650Saurel32  *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10d76d1650Saurel32  *  Hollis Blanchard <hollisb@us.ibm.com>
11d76d1650Saurel32  *
12d76d1650Saurel32  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13d76d1650Saurel32  * See the COPYING file in the top-level directory.
14d76d1650Saurel32  *
15d76d1650Saurel32  */
16d76d1650Saurel32 
170d75590dSPeter Maydell #include "qemu/osdep.h"
18eadaada1SAlexander Graf #include <dirent.h>
19d76d1650Saurel32 #include <sys/ioctl.h>
204656e1f0SBenjamin Herrenschmidt #include <sys/vfs.h>
21d76d1650Saurel32 
22d76d1650Saurel32 #include <linux/kvm.h>
23d76d1650Saurel32 
2430f4b05bSDavid Gibson #include "qapi/error.h"
25072ed5f2SThomas Huth #include "qemu/error-report.h"
2633c11879SPaolo Bonzini #include "cpu.h"
27715d4b96SThomas Huth #include "cpu-models.h"
281de7afc9SPaolo Bonzini #include "qemu/timer.h"
2932cad1ffSPhilippe Mathieu-Daudé #include "system/hw_accel.h"
30d76d1650Saurel32 #include "kvm_ppc.h"
3132cad1ffSPhilippe Mathieu-Daudé #include "system/cpus.h"
3232cad1ffSPhilippe Mathieu-Daudé #include "system/device_tree.h"
33d5aea6f3SDavid Gibson #include "mmu-hash64.h"
34d76d1650Saurel32 
350d09e41aSPaolo Bonzini #include "hw/ppc/spapr.h"
367ebaf795SBharata B Rao #include "hw/ppc/spapr_cpu_core.h"
37650d103dSMarkus Armbruster #include "hw/hw.h"
3898a8b524SAlexey Kardashevskiy #include "hw/ppc/ppc.h"
39ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h"
4032cad1ffSPhilippe Mathieu-Daudé #include "system/watchdog.h"
41b36f100eSAlexey Kardashevskiy #include "trace.h"
425b7d54d4SAlex Bennée #include "gdbstub/enums.h"
434c663752SPaolo Bonzini #include "exec/memattrs.h"
444705a71dSRichard Henderson #include "system/ram_addr.h"
4532cad1ffSPhilippe Mathieu-Daudé #include "system/hostmem.h"
46f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
47db725815SMarkus Armbruster #include "qemu/main-loop.h"
489c607668SAlexey Kardashevskiy #include "qemu/mmap-alloc.h"
49f3d9f303SSam Bobroff #include "elf.h"
5032cad1ffSPhilippe Mathieu-Daudé #include "system/kvm_int.h"
5132cad1ffSPhilippe Mathieu-Daudé #include "system/kvm.h"
52b12a0f85SPhilippe Mathieu-Daudé #include "accel/accel-cpu-target.h"
53f61b4bedSAlexander Graf 
54566abdb4SPaolo Bonzini #include CONFIG_DEVICES
55566abdb4SPaolo Bonzini 
56eadaada1SAlexander Graf #define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
57eadaada1SAlexander Graf 
586e0552a3SFabiano Rosas #define DEBUG_RETURN_GUEST 0
596e0552a3SFabiano Rosas #define DEBUG_RETURN_GDB   1
606e0552a3SFabiano Rosas 
6194a8d39aSJan Kiszka const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
6294a8d39aSJan Kiszka     KVM_CAP_LAST_INFO
6394a8d39aSJan Kiszka };
6494a8d39aSJan Kiszka 
65c995e942SDavid Gibson static int cap_interrupt_unset;
6690dc8812SScott Wood static int cap_segstate;
6790dc8812SScott Wood static int cap_booke_sregs;
68e97c3636SDavid Gibson static int cap_ppc_smt;
69fa98fbfcSSam Bobroff static int cap_ppc_smt_possible;
700f5cb298SDavid Gibson static int cap_spapr_tce;
71d6ee2a7cSAlexey Kardashevskiy static int cap_spapr_tce_64;
72da95324eSAlexey Kardashevskiy static int cap_spapr_multitce;
739bb62a07SAlexey Kardashevskiy static int cap_spapr_vfio;
74f1af19d7SDavid Gibson static int cap_hior;
75d67d40eaSDavid Gibson static int cap_one_reg;
763b961124SStuart Yoder static int cap_epr;
7731f2cb8fSBharat Bhushan static int cap_ppc_watchdog;
78e68cb8b4SAlexey Kardashevskiy static int cap_htab_fd;
7987a91de6SAlexander Graf static int cap_fixup_hcalls;
80bac3bf28SThomas Huth static int cap_htm;             /* Hardware transactional memory support */
81cf1c4cceSSam Bobroff static int cap_mmu_radix;
82cf1c4cceSSam Bobroff static int cap_mmu_hash_v3;
8338afd772SCédric Le Goater static int cap_xive;
84b55d295eSDavid Gibson static int cap_resize_hpt;
85c363a37aSDaniel Henrique Barboza static int cap_ppc_pvr_compat;
868acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_cache;
878acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_bounds_check;
888acc2ae5SSuraj Jitindar Singh static int cap_ppc_safe_indirect_branch;
898ff43ee4SSuraj Jitindar Singh static int cap_ppc_count_cache_flush_assist;
90b9a477b7SSuraj Jitindar Singh static int cap_ppc_nested_kvm_hv;
917d050527SSuraj Jitindar Singh static int cap_large_decr;
92ec010c00SNicholas Piggin static int cap_fwnmi;
9382123b75SBharata B Rao static int cap_rpt_invalidate;
94ccc5a4c5SNicholas Piggin static int cap_ail_mode_3;
955f361ea1SShivaprasad G Bhat static int cap_dawr1;
96fc87e185SAlexander Graf 
97566abdb4SPaolo Bonzini #ifdef CONFIG_PSERIES
98566abdb4SPaolo Bonzini static int cap_papr;
99566abdb4SPaolo Bonzini #else
100566abdb4SPaolo Bonzini #define cap_papr (0)
101566abdb4SPaolo Bonzini #endif
102566abdb4SPaolo Bonzini 
1033c902d44SBharat Bhushan static uint32_t debug_inst_opcode;
1043c902d44SBharat Bhushan 
105c995e942SDavid Gibson /*
106c995e942SDavid Gibson  * Check whether we are running with KVM-PR (instead of KVM-HV).  This
10796c9cff0SThomas Huth  * should only be used for fallback tests - generally we should use
10896c9cff0SThomas Huth  * explicit capabilities for the features we want, rather than
109c995e942SDavid Gibson  * assuming what is/isn't available depending on the KVM variant.
110c995e942SDavid Gibson  */
kvmppc_is_pr(KVMState * ks)11196c9cff0SThomas Huth static bool kvmppc_is_pr(KVMState *ks)
11296c9cff0SThomas Huth {
11396c9cff0SThomas Huth     /* Assume KVM-PR if the GET_PVINFO capability is available */
11470a0c19eSGreg Kurz     return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
11596c9cff0SThomas Huth }
11696c9cff0SThomas Huth 
117165dc3edSDavid Gibson static int kvm_ppc_register_host_cpu_type(void);
1188acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s);
1197d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void);
1205ba4576bSAndreas Färber 
kvm_arch_get_default_type(MachineState * ms)1215e0d6590SAkihiko Odaki int kvm_arch_get_default_type(MachineState *ms)
1225e0d6590SAkihiko Odaki {
1235e0d6590SAkihiko Odaki     return 0;
1245e0d6590SAkihiko Odaki }
1255e0d6590SAkihiko Odaki 
kvm_arch_init(MachineState * ms,KVMState * s)126b16565b3SMarcel Apfelbaum int kvm_arch_init(MachineState *ms, KVMState *s)
127d76d1650Saurel32 {
128fc87e185SAlexander Graf     cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
12990dc8812SScott Wood     cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
13090dc8812SScott Wood     cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
1316977afdaSGreg Kurz     cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
1320f5cb298SDavid Gibson     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
133d6ee2a7cSAlexey Kardashevskiy     cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
134da95324eSAlexey Kardashevskiy     cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
1359ded780cSAlexey Kardashevskiy     cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
136d67d40eaSDavid Gibson     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
137f1af19d7SDavid Gibson     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
1383b961124SStuart Yoder     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
13931f2cb8fSBharat Bhushan     cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
140c995e942SDavid Gibson     /*
141c995e942SDavid Gibson      * Note: we don't set cap_papr here, because this capability is
142c995e942SDavid Gibson      * only activated after this by kvmppc_set_papr()
143c995e942SDavid Gibson      */
1446977afdaSGreg Kurz     cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
14587a91de6SAlexander Graf     cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
146fa98fbfcSSam Bobroff     cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
147bac3bf28SThomas Huth     cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
148cf1c4cceSSam Bobroff     cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
149cf1c4cceSSam Bobroff     cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
15038afd772SCédric Le Goater     cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
151b55d295eSDavid Gibson     cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
1528acc2ae5SSuraj Jitindar Singh     kvmppc_get_cpu_characteristics(s);
153b9a477b7SSuraj Jitindar Singh     cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
1547d050527SSuraj Jitindar Singh     cap_large_decr = kvmppc_get_dec_bits();
155ec010c00SNicholas Piggin     cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
1565f361ea1SShivaprasad G Bhat     cap_dawr1 = kvm_vm_check_extension(s, KVM_CAP_PPC_DAWR1);
157c363a37aSDaniel Henrique Barboza     /*
158c363a37aSDaniel Henrique Barboza      * Note: setting it to false because there is not such capability
159c363a37aSDaniel Henrique Barboza      * in KVM at this moment.
160c363a37aSDaniel Henrique Barboza      *
161c363a37aSDaniel Henrique Barboza      * TODO: call kvm_vm_check_extension() with the right capability
162c995e942SDavid Gibson      * after the kernel starts implementing it.
163c995e942SDavid Gibson      */
164c363a37aSDaniel Henrique Barboza     cap_ppc_pvr_compat = false;
165fc87e185SAlexander Graf 
1661e8f51e8SShivaprasad G Bhat     if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
1671e8f51e8SShivaprasad G Bhat         error_report("KVM: Host kernel doesn't have level irq capability");
1681e8f51e8SShivaprasad G Bhat         exit(1);
169fc87e185SAlexander Graf     }
170fc87e185SAlexander Graf 
17182123b75SBharata B Rao     cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
172ccc5a4c5SNicholas Piggin     cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3);
173165dc3edSDavid Gibson     kvm_ppc_register_host_cpu_type();
1745ba4576bSAndreas Färber 
175d76d1650Saurel32     return 0;
176d76d1650Saurel32 }
177d76d1650Saurel32 
kvm_arch_irqchip_create(KVMState * s)1784376c40dSPaolo Bonzini int kvm_arch_irqchip_create(KVMState *s)
179d525ffabSPaolo Bonzini {
180d525ffabSPaolo Bonzini     return 0;
181d525ffabSPaolo Bonzini }
182d525ffabSPaolo Bonzini 
kvm_arch_sync_sregs(PowerPCCPU * cpu)1831bc22652SAndreas Färber static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
184d76d1650Saurel32 {
1851bc22652SAndreas Färber     CPUPPCState *cenv = &cpu->env;
1861bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
187861bbc80SAlexander Graf     struct kvm_sregs sregs;
1885666ca4aSScott Wood     int ret;
1895666ca4aSScott Wood 
1905666ca4aSScott Wood     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
191c995e942SDavid Gibson         /*
192c995e942SDavid Gibson          * What we're really trying to say is "if we're on BookE, we
193c995e942SDavid Gibson          * use the native PVR for now". This is the only sane way to
194c995e942SDavid Gibson          * check it though, so we potentially confuse users that they
195c995e942SDavid Gibson          * can run BookE guests on BookS. Let's hope nobody dares
196c995e942SDavid Gibson          * enough :)
197c995e942SDavid Gibson          */
1985666ca4aSScott Wood         return 0;
1995666ca4aSScott Wood     } else {
20090dc8812SScott Wood         if (!cap_segstate) {
20164e07be5SAlexander Graf             fprintf(stderr, "kvm error: missing PVR setting capability\n");
20264e07be5SAlexander Graf             return -ENOSYS;
2035666ca4aSScott Wood         }
2045666ca4aSScott Wood     }
2055666ca4aSScott Wood 
2061bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
2075666ca4aSScott Wood     if (ret) {
2085666ca4aSScott Wood         return ret;
2095666ca4aSScott Wood     }
210861bbc80SAlexander Graf 
211861bbc80SAlexander Graf     sregs.pvr = cenv->spr[SPR_PVR];
2121bc22652SAndreas Färber     return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
2135666ca4aSScott Wood }
2145666ca4aSScott Wood 
21593dd5e85SScott Wood /* Set up a shared TLB array with KVM */
kvm_booke206_tlb_init(PowerPCCPU * cpu)2161bc22652SAndreas Färber static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
21793dd5e85SScott Wood {
2181bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
2191bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
22093dd5e85SScott Wood     struct kvm_book3e_206_tlb_params params = {};
22193dd5e85SScott Wood     struct kvm_config_tlb cfg = {};
22293dd5e85SScott Wood     unsigned int entries = 0;
22393dd5e85SScott Wood     int ret, i;
22493dd5e85SScott Wood 
22593dd5e85SScott Wood     if (!kvm_enabled() ||
226a60f24b5SAndreas Färber         !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
22793dd5e85SScott Wood         return 0;
22893dd5e85SScott Wood     }
22993dd5e85SScott Wood 
23093dd5e85SScott Wood     assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
23193dd5e85SScott Wood 
23293dd5e85SScott Wood     for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
23393dd5e85SScott Wood         params.tlb_sizes[i] = booke206_tlb_size(env, i);
23493dd5e85SScott Wood         params.tlb_ways[i] = booke206_tlb_ways(env, i);
23593dd5e85SScott Wood         entries += params.tlb_sizes[i];
23693dd5e85SScott Wood     }
23793dd5e85SScott Wood 
23893dd5e85SScott Wood     assert(entries == env->nb_tlb);
23993dd5e85SScott Wood     assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
24093dd5e85SScott Wood 
24193dd5e85SScott Wood     env->tlb_dirty = true;
24293dd5e85SScott Wood 
24393dd5e85SScott Wood     cfg.array = (uintptr_t)env->tlb.tlbm;
24493dd5e85SScott Wood     cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
24593dd5e85SScott Wood     cfg.params = (uintptr_t)&params;
24693dd5e85SScott Wood     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
24793dd5e85SScott Wood 
24848add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
24993dd5e85SScott Wood     if (ret < 0) {
25093dd5e85SScott Wood         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
25193dd5e85SScott Wood                 __func__, strerror(-ret));
25293dd5e85SScott Wood         return ret;
25393dd5e85SScott Wood     }
25493dd5e85SScott Wood 
25593dd5e85SScott Wood     env->kvm_sw_tlb = true;
25693dd5e85SScott Wood     return 0;
25793dd5e85SScott Wood }
25893dd5e85SScott Wood 
2594656e1f0SBenjamin Herrenschmidt 
2604656e1f0SBenjamin Herrenschmidt #if defined(TARGET_PPC64)
kvm_get_smmu_info(struct kvm_ppc_smmu_info * info,Error ** errp)261ab256960SGreg Kurz static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
2624656e1f0SBenjamin Herrenschmidt {
2634656e1f0SBenjamin Herrenschmidt     int ret;
2644656e1f0SBenjamin Herrenschmidt 
265ab256960SGreg Kurz     assert(kvm_state != NULL);
266ab256960SGreg Kurz 
267ab256960SGreg Kurz     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
26871d0f1eaSGreg Kurz         error_setg(errp, "KVM doesn't expose the MMU features it supports");
26971d0f1eaSGreg Kurz         error_append_hint(errp, "Consider switching to a newer KVM\n");
27071d0f1eaSGreg Kurz         return;
27171d0f1eaSGreg Kurz     }
27271d0f1eaSGreg Kurz 
273ab256960SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
2744656e1f0SBenjamin Herrenschmidt     if (ret == 0) {
2754656e1f0SBenjamin Herrenschmidt         return;
2764656e1f0SBenjamin Herrenschmidt     }
2774656e1f0SBenjamin Herrenschmidt 
27871d0f1eaSGreg Kurz     error_setg_errno(errp, -ret,
27971d0f1eaSGreg Kurz                      "KVM failed to provide the MMU features it supports");
2804656e1f0SBenjamin Herrenschmidt }
2814656e1f0SBenjamin Herrenschmidt 
kvmppc_get_radix_page_info(void)282aa6edf97SPhilippe Mathieu-Daudé static struct ppc_radix_page_info *kvmppc_get_radix_page_info(void)
283c64abd1fSSam Bobroff {
2844f7f5893SPhilippe Mathieu-Daudé     KVMState *s = KVM_STATE(current_accel());
285c64abd1fSSam Bobroff     struct ppc_radix_page_info *radix_page_info;
28655baf4b5SDaniel Henrique Barboza     struct kvm_ppc_rmmu_info rmmu_info = { };
287c64abd1fSSam Bobroff     int i;
288c64abd1fSSam Bobroff 
289c64abd1fSSam Bobroff     if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
290c64abd1fSSam Bobroff         return NULL;
291c64abd1fSSam Bobroff     }
292c64abd1fSSam Bobroff     if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
293c64abd1fSSam Bobroff         return NULL;
294c64abd1fSSam Bobroff     }
295c64abd1fSSam Bobroff     radix_page_info = g_malloc0(sizeof(*radix_page_info));
296c64abd1fSSam Bobroff     radix_page_info->count = 0;
297c64abd1fSSam Bobroff     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
298c64abd1fSSam Bobroff         if (rmmu_info.ap_encodings[i]) {
299c64abd1fSSam Bobroff             radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
300c64abd1fSSam Bobroff             radix_page_info->count++;
301c64abd1fSSam Bobroff         }
302c64abd1fSSam Bobroff     }
303c64abd1fSSam Bobroff     return radix_page_info;
304c64abd1fSSam Bobroff }
305c64abd1fSSam Bobroff 
kvmppc_configure_v3_mmu(PowerPCCPU * cpu,bool radix,bool gtse,uint64_t proc_tbl)306b4db5413SSuraj Jitindar Singh target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
307b4db5413SSuraj Jitindar Singh                                      bool radix, bool gtse,
308b4db5413SSuraj Jitindar Singh                                      uint64_t proc_tbl)
309b4db5413SSuraj Jitindar Singh {
310b4db5413SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
311b4db5413SSuraj Jitindar Singh     int ret;
312b4db5413SSuraj Jitindar Singh     uint64_t flags = 0;
313b4db5413SSuraj Jitindar Singh     struct kvm_ppc_mmuv3_cfg cfg = {
314b4db5413SSuraj Jitindar Singh         .process_table = proc_tbl,
315b4db5413SSuraj Jitindar Singh     };
316b4db5413SSuraj Jitindar Singh 
317b4db5413SSuraj Jitindar Singh     if (radix) {
318b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_RADIX;
319b4db5413SSuraj Jitindar Singh     }
320b4db5413SSuraj Jitindar Singh     if (gtse) {
321b4db5413SSuraj Jitindar Singh         flags |= KVM_PPC_MMUV3_GTSE;
322b4db5413SSuraj Jitindar Singh     }
323b4db5413SSuraj Jitindar Singh     cfg.flags = flags;
324b4db5413SSuraj Jitindar Singh     ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
325b4db5413SSuraj Jitindar Singh     switch (ret) {
326b4db5413SSuraj Jitindar Singh     case 0:
327b4db5413SSuraj Jitindar Singh         return H_SUCCESS;
328b4db5413SSuraj Jitindar Singh     case -EINVAL:
329b4db5413SSuraj Jitindar Singh         return H_PARAMETER;
330b4db5413SSuraj Jitindar Singh     case -ENODEV:
331b4db5413SSuraj Jitindar Singh         return H_NOT_AVAILABLE;
332b4db5413SSuraj Jitindar Singh     default:
333b4db5413SSuraj Jitindar Singh         return H_HARDWARE;
334b4db5413SSuraj Jitindar Singh     }
335b4db5413SSuraj Jitindar Singh }
336b4db5413SSuraj Jitindar Singh 
kvmppc_hpt_needs_host_contiguous_pages(void)33724c6863cSDavid Gibson bool kvmppc_hpt_needs_host_contiguous_pages(void)
33824c6863cSDavid Gibson {
33924c6863cSDavid Gibson     static struct kvm_ppc_smmu_info smmu_info;
34024c6863cSDavid Gibson 
34124c6863cSDavid Gibson     if (!kvm_enabled()) {
34224c6863cSDavid Gibson         return false;
34324c6863cSDavid Gibson     }
34424c6863cSDavid Gibson 
345ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &error_fatal);
34624c6863cSDavid Gibson     return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
34724c6863cSDavid Gibson }
34824c6863cSDavid Gibson 
kvm_check_mmu(PowerPCCPU * cpu,Error ** errp)349e5ca28ecSDavid Gibson void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
3504656e1f0SBenjamin Herrenschmidt {
351e5ca28ecSDavid Gibson     struct kvm_ppc_smmu_info smmu_info;
3524656e1f0SBenjamin Herrenschmidt     int iq, ik, jq, jk;
35371d0f1eaSGreg Kurz     Error *local_err = NULL;
3544656e1f0SBenjamin Herrenschmidt 
355e5ca28ecSDavid Gibson     /* For now, we only have anything to check on hash64 MMUs */
356e5ca28ecSDavid Gibson     if (!cpu->hash64_opts || !kvm_enabled()) {
3574656e1f0SBenjamin Herrenschmidt         return;
3584656e1f0SBenjamin Herrenschmidt     }
3594656e1f0SBenjamin Herrenschmidt 
360ab256960SGreg Kurz     kvm_get_smmu_info(&smmu_info, &local_err);
36171d0f1eaSGreg Kurz     if (local_err) {
36271d0f1eaSGreg Kurz         error_propagate(errp, local_err);
36371d0f1eaSGreg Kurz         return;
36471d0f1eaSGreg Kurz     }
365e5ca28ecSDavid Gibson 
366e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
367e5ca28ecSDavid Gibson         && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
368e5ca28ecSDavid Gibson         error_setg(errp,
369e5ca28ecSDavid Gibson                    "KVM does not support 1TiB segments which guest expects");
370e5ca28ecSDavid Gibson         return;
3714656e1f0SBenjamin Herrenschmidt     }
3724656e1f0SBenjamin Herrenschmidt 
373e5ca28ecSDavid Gibson     if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
374e5ca28ecSDavid Gibson         error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
375e5ca28ecSDavid Gibson                    smmu_info.slb_size, cpu->hash64_opts->slb_size);
376e5ca28ecSDavid Gibson         return;
37790da0d5aSBenjamin Herrenschmidt     }
37890da0d5aSBenjamin Herrenschmidt 
37908215d8fSAlexander Graf     /*
380e5ca28ecSDavid Gibson      * Verify that every pagesize supported by the cpu model is
381e5ca28ecSDavid Gibson      * supported by KVM with the same encodings
38208215d8fSAlexander Graf      */
383e5ca28ecSDavid Gibson     for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
384b07c59f7SDavid Gibson         PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
385e5ca28ecSDavid Gibson         struct kvm_ppc_one_seg_page_size *ksps;
3864656e1f0SBenjamin Herrenschmidt 
387e5ca28ecSDavid Gibson         for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
388e5ca28ecSDavid Gibson             if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
3894656e1f0SBenjamin Herrenschmidt                 break;
3904656e1f0SBenjamin Herrenschmidt             }
3914656e1f0SBenjamin Herrenschmidt         }
392e5ca28ecSDavid Gibson         if (ik >= ARRAY_SIZE(smmu_info.sps)) {
393e5ca28ecSDavid Gibson             error_setg(errp, "KVM doesn't support for base page shift %u",
394e5ca28ecSDavid Gibson                        qsps->page_shift);
395e5ca28ecSDavid Gibson             return;
396e5ca28ecSDavid Gibson         }
397e5ca28ecSDavid Gibson 
398e5ca28ecSDavid Gibson         ksps = &smmu_info.sps[ik];
399e5ca28ecSDavid Gibson         if (ksps->slb_enc != qsps->slb_enc) {
400e5ca28ecSDavid Gibson             error_setg(errp,
401e5ca28ecSDavid Gibson "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
402e5ca28ecSDavid Gibson                        ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
403e5ca28ecSDavid Gibson             return;
404e5ca28ecSDavid Gibson         }
405e5ca28ecSDavid Gibson 
406e5ca28ecSDavid Gibson         for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
407e5ca28ecSDavid Gibson             for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
408e5ca28ecSDavid Gibson                 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
4094656e1f0SBenjamin Herrenschmidt                     break;
4104656e1f0SBenjamin Herrenschmidt                 }
4114656e1f0SBenjamin Herrenschmidt             }
4124656e1f0SBenjamin Herrenschmidt 
413e5ca28ecSDavid Gibson             if (jk >= ARRAY_SIZE(ksps->enc)) {
414e5ca28ecSDavid Gibson                 error_setg(errp, "KVM doesn't support page shift %u/%u",
415e5ca28ecSDavid Gibson                            qsps->enc[jq].page_shift, qsps->page_shift);
416e5ca28ecSDavid Gibson                 return;
417e5ca28ecSDavid Gibson             }
418e5ca28ecSDavid Gibson             if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
419e5ca28ecSDavid Gibson                 error_setg(errp,
420e5ca28ecSDavid Gibson "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
421e5ca28ecSDavid Gibson                            ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
422e5ca28ecSDavid Gibson                            qsps->page_shift, qsps->enc[jq].pte_enc);
423e5ca28ecSDavid Gibson                 return;
424e5ca28ecSDavid Gibson             }
425e5ca28ecSDavid Gibson         }
4264656e1f0SBenjamin Herrenschmidt     }
4274656e1f0SBenjamin Herrenschmidt 
428e5ca28ecSDavid Gibson     if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
429c995e942SDavid Gibson         /*
430c995e942SDavid Gibson          * Mostly what guest pagesizes we can use are related to the
431e5ca28ecSDavid Gibson          * host pages used to map guest RAM, which is handled in the
432e5ca28ecSDavid Gibson          * platform code. Cache-Inhibited largepages (64k) however are
433e5ca28ecSDavid Gibson          * used for I/O, so if they're mapped to the host at all it
434e5ca28ecSDavid Gibson          * will be a normal mapping, not a special hugepage one used
435c995e942SDavid Gibson          * for RAM.
436c995e942SDavid Gibson          */
4378e3b0cbbSMarc-André Lureau         if (qemu_real_host_page_size() < 0x10000) {
438e5ca28ecSDavid Gibson             error_setg(errp,
439e5ca28ecSDavid Gibson                        "KVM can't supply 64kiB CI pages, which guest expects");
440e5ca28ecSDavid Gibson         }
441e5ca28ecSDavid Gibson     }
442e5ca28ecSDavid Gibson }
4434656e1f0SBenjamin Herrenschmidt #endif /* !defined (TARGET_PPC64) */
4444656e1f0SBenjamin Herrenschmidt 
kvm_arch_vcpu_id(CPUState * cpu)445b164e48eSEduardo Habkost unsigned long kvm_arch_vcpu_id(CPUState *cpu)
446b164e48eSEduardo Habkost {
4472e886fb3SSam Bobroff     return POWERPC_CPU(cpu)->vcpu_id;
448b164e48eSEduardo Habkost }
449b164e48eSEduardo Habkost 
450c995e942SDavid Gibson /*
451c995e942SDavid Gibson  * e500 supports 2 h/w breakpoint and 2 watchpoint.  book3s supports
452c995e942SDavid Gibson  * only 1 watchpoint, so array size of 4 is sufficient for now.
45388365d17SBharat Bhushan  */
45488365d17SBharat Bhushan #define MAX_HW_BKPTS 4
45588365d17SBharat Bhushan 
45688365d17SBharat Bhushan static struct HWBreakpoint {
45788365d17SBharat Bhushan     target_ulong addr;
45888365d17SBharat Bhushan     int type;
45988365d17SBharat Bhushan } hw_debug_points[MAX_HW_BKPTS];
46088365d17SBharat Bhushan 
46188365d17SBharat Bhushan static CPUWatchpoint hw_watchpoint;
46288365d17SBharat Bhushan 
46388365d17SBharat Bhushan /* Default there is no breakpoint and watchpoint supported */
46488365d17SBharat Bhushan static int max_hw_breakpoint;
46588365d17SBharat Bhushan static int max_hw_watchpoint;
46688365d17SBharat Bhushan static int nb_hw_breakpoint;
46788365d17SBharat Bhushan static int nb_hw_watchpoint;
46888365d17SBharat Bhushan 
kvmppc_hw_debug_points_init(CPUPPCState * cenv)46988365d17SBharat Bhushan static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
47088365d17SBharat Bhushan {
47188365d17SBharat Bhushan     if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
47288365d17SBharat Bhushan         max_hw_breakpoint = 2;
47388365d17SBharat Bhushan         max_hw_watchpoint = 2;
47488365d17SBharat Bhushan     }
47588365d17SBharat Bhushan 
47688365d17SBharat Bhushan     if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
47788365d17SBharat Bhushan         fprintf(stderr, "Error initializing h/w breakpoints\n");
47888365d17SBharat Bhushan         return;
47988365d17SBharat Bhushan     }
48088365d17SBharat Bhushan }
48188365d17SBharat Bhushan 
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)482*a668268dSXiaoyao Li int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
483*a668268dSXiaoyao Li {
484*a668268dSXiaoyao Li     return 0;
485*a668268dSXiaoyao Li }
486*a668268dSXiaoyao Li 
kvm_arch_init_vcpu(CPUState * cs)48720d695a9SAndreas Färber int kvm_arch_init_vcpu(CPUState *cs)
4885666ca4aSScott Wood {
48920d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
49020d695a9SAndreas Färber     CPUPPCState *cenv = &cpu->env;
4915666ca4aSScott Wood     int ret;
4925666ca4aSScott Wood 
4934656e1f0SBenjamin Herrenschmidt     /* Synchronize sregs with kvm */
4941bc22652SAndreas Färber     ret = kvm_arch_sync_sregs(cpu);
4955666ca4aSScott Wood     if (ret) {
496388e47c7SThomas Huth         if (ret == -EINVAL) {
497388e47c7SThomas Huth             error_report("Register sync failed... If you're using kvm-hv.ko,"
498388e47c7SThomas Huth                          " only \"-cpu host\" is possible");
499388e47c7SThomas Huth         }
5005666ca4aSScott Wood         return ret;
5015666ca4aSScott Wood     }
502861bbc80SAlexander Graf 
50393dd5e85SScott Wood     switch (cenv->mmu_model) {
50493dd5e85SScott Wood     case POWERPC_MMU_BOOKE206:
5057f516c96SThomas Huth         /* This target supports access to KVM's guest TLB */
5061bc22652SAndreas Färber         ret = kvm_booke206_tlb_init(cpu);
50793dd5e85SScott Wood         break;
5087f516c96SThomas Huth     case POWERPC_MMU_2_07:
5097f516c96SThomas Huth         if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
510c995e942SDavid Gibson             /*
511c995e942SDavid Gibson              * KVM-HV has transactional memory on POWER8 also without
512c995e942SDavid Gibson              * the KVM_CAP_PPC_HTM extension, so enable it here
513136fbf65Szhaolichang              * instead as long as it's available to userspace on the
514c995e942SDavid Gibson              * host.
515c995e942SDavid Gibson              */
516f3d9f303SSam Bobroff             if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
5177f516c96SThomas Huth                 cap_htm = true;
5187f516c96SThomas Huth             }
519f3d9f303SSam Bobroff         }
5207f516c96SThomas Huth         break;
52193dd5e85SScott Wood     default:
52293dd5e85SScott Wood         break;
52393dd5e85SScott Wood     }
52493dd5e85SScott Wood 
5253c902d44SBharat Bhushan     kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
52688365d17SBharat Bhushan     kvmppc_hw_debug_points_init(cenv);
5273c902d44SBharat Bhushan 
528861bbc80SAlexander Graf     return ret;
529d76d1650Saurel32 }
530d76d1650Saurel32 
kvm_arch_destroy_vcpu(CPUState * cs)531b1115c99SLiran Alon int kvm_arch_destroy_vcpu(CPUState *cs)
532b1115c99SLiran Alon {
533b1115c99SLiran Alon     return 0;
534b1115c99SLiran Alon }
535b1115c99SLiran Alon 
kvm_sw_tlb_put(PowerPCCPU * cpu)5361bc22652SAndreas Färber static void kvm_sw_tlb_put(PowerPCCPU *cpu)
53793dd5e85SScott Wood {
5381bc22652SAndreas Färber     CPUPPCState *env = &cpu->env;
5391bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
54093dd5e85SScott Wood     struct kvm_dirty_tlb dirty_tlb;
54193dd5e85SScott Wood     unsigned char *bitmap;
54293dd5e85SScott Wood     int ret;
54393dd5e85SScott Wood 
54493dd5e85SScott Wood     if (!env->kvm_sw_tlb) {
54593dd5e85SScott Wood         return;
54693dd5e85SScott Wood     }
54793dd5e85SScott Wood 
54893dd5e85SScott Wood     bitmap = g_malloc((env->nb_tlb + 7) / 8);
54993dd5e85SScott Wood     memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
55093dd5e85SScott Wood 
55193dd5e85SScott Wood     dirty_tlb.bitmap = (uintptr_t)bitmap;
55293dd5e85SScott Wood     dirty_tlb.num_dirty = env->nb_tlb;
55393dd5e85SScott Wood 
5541bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
55593dd5e85SScott Wood     if (ret) {
55693dd5e85SScott Wood         fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
55793dd5e85SScott Wood                 __func__, strerror(-ret));
55893dd5e85SScott Wood     }
55993dd5e85SScott Wood 
56093dd5e85SScott Wood     g_free(bitmap);
56193dd5e85SScott Wood }
56293dd5e85SScott Wood 
kvm_get_one_spr(CPUState * cs,uint64_t id,int spr)563d67d40eaSDavid Gibson static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
564d67d40eaSDavid Gibson {
565794511bcSPhilippe Mathieu-Daudé     CPUPPCState *env = cpu_env(cs);
566942069e0SDaniel Henrique Barboza     /* Init 'val' to avoid "uninitialised value" Valgrind warnings */
567d67d40eaSDavid Gibson     union {
568d67d40eaSDavid Gibson         uint32_t u32;
569d67d40eaSDavid Gibson         uint64_t u64;
570942069e0SDaniel Henrique Barboza     } val = { };
571d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
572d67d40eaSDavid Gibson         .id = id,
573d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
574d67d40eaSDavid Gibson     };
575d67d40eaSDavid Gibson     int ret;
576d67d40eaSDavid Gibson 
577d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
578d67d40eaSDavid Gibson     if (ret != 0) {
579b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_get(spr, strerror(errno));
580d67d40eaSDavid Gibson     } else {
581d67d40eaSDavid Gibson         switch (id & KVM_REG_SIZE_MASK) {
582d67d40eaSDavid Gibson         case KVM_REG_SIZE_U32:
583d67d40eaSDavid Gibson             env->spr[spr] = val.u32;
584d67d40eaSDavid Gibson             break;
585d67d40eaSDavid Gibson 
586d67d40eaSDavid Gibson         case KVM_REG_SIZE_U64:
587d67d40eaSDavid Gibson             env->spr[spr] = val.u64;
588d67d40eaSDavid Gibson             break;
589d67d40eaSDavid Gibson 
590d67d40eaSDavid Gibson         default:
591d67d40eaSDavid Gibson             /* Don't handle this size yet */
592d67d40eaSDavid Gibson             abort();
593d67d40eaSDavid Gibson         }
594d67d40eaSDavid Gibson     }
595d67d40eaSDavid Gibson }
596d67d40eaSDavid Gibson 
kvm_put_one_spr(CPUState * cs,uint64_t id,int spr)597d67d40eaSDavid Gibson static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
598d67d40eaSDavid Gibson {
599794511bcSPhilippe Mathieu-Daudé     CPUPPCState *env = cpu_env(cs);
600d67d40eaSDavid Gibson     union {
601d67d40eaSDavid Gibson         uint32_t u32;
602d67d40eaSDavid Gibson         uint64_t u64;
603d67d40eaSDavid Gibson     } val;
604d67d40eaSDavid Gibson     struct kvm_one_reg reg = {
605d67d40eaSDavid Gibson         .id = id,
606d67d40eaSDavid Gibson         .addr = (uintptr_t) &val,
607d67d40eaSDavid Gibson     };
608d67d40eaSDavid Gibson     int ret;
609d67d40eaSDavid Gibson 
610d67d40eaSDavid Gibson     switch (id & KVM_REG_SIZE_MASK) {
611d67d40eaSDavid Gibson     case KVM_REG_SIZE_U32:
612d67d40eaSDavid Gibson         val.u32 = env->spr[spr];
613d67d40eaSDavid Gibson         break;
614d67d40eaSDavid Gibson 
615d67d40eaSDavid Gibson     case KVM_REG_SIZE_U64:
616d67d40eaSDavid Gibson         val.u64 = env->spr[spr];
617d67d40eaSDavid Gibson         break;
618d67d40eaSDavid Gibson 
619d67d40eaSDavid Gibson     default:
620d67d40eaSDavid Gibson         /* Don't handle this size yet */
621d67d40eaSDavid Gibson         abort();
622d67d40eaSDavid Gibson     }
623d67d40eaSDavid Gibson 
624d67d40eaSDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
625d67d40eaSDavid Gibson     if (ret != 0) {
626b36f100eSAlexey Kardashevskiy         trace_kvm_failed_spr_set(spr, strerror(errno));
627d67d40eaSDavid Gibson     }
628d67d40eaSDavid Gibson }
629d67d40eaSDavid Gibson 
kvm_put_fp(CPUState * cs)63070b79849SDavid Gibson static int kvm_put_fp(CPUState *cs)
63170b79849SDavid Gibson {
632794511bcSPhilippe Mathieu-Daudé     CPUPPCState *env = cpu_env(cs);
63370b79849SDavid Gibson     struct kvm_one_reg reg;
63470b79849SDavid Gibson     int i;
63570b79849SDavid Gibson     int ret;
63670b79849SDavid Gibson 
63770b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
63870b79849SDavid Gibson         uint64_t fpscr = env->fpscr;
63970b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
64070b79849SDavid Gibson 
64170b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
64270b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
64370b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
64470b79849SDavid Gibson         if (ret < 0) {
6458d83cbf1SGreg Kurz             trace_kvm_failed_fpscr_set(strerror(errno));
64670b79849SDavid Gibson             return ret;
64770b79849SDavid Gibson         }
64870b79849SDavid Gibson 
64970b79849SDavid Gibson         for (i = 0; i < 32; i++) {
65070b79849SDavid Gibson             uint64_t vsr[2];
651ee1004bbSPhilippe Mathieu-Daudé             uint64_t *fpr = cpu_fpr_ptr(env, i);
652ee1004bbSPhilippe Mathieu-Daudé             uint64_t *vsrl = cpu_vsrl_ptr(env, i);
65370b79849SDavid Gibson 
654e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
655ef96e3aeSMark Cave-Ayland             vsr[0] = float64_val(*fpr);
656ef96e3aeSMark Cave-Ayland             vsr[1] = *vsrl;
6573a4b791bSGreg Kurz #else
658ef96e3aeSMark Cave-Ayland             vsr[0] = *vsrl;
659ef96e3aeSMark Cave-Ayland             vsr[1] = float64_val(*fpr);
6603a4b791bSGreg Kurz #endif
66170b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
66270b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
66370b79849SDavid Gibson 
66470b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
66570b79849SDavid Gibson             if (ret < 0) {
6668d83cbf1SGreg Kurz                 trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
6678d83cbf1SGreg Kurz                                         strerror(errno));
66870b79849SDavid Gibson                 return ret;
66970b79849SDavid Gibson             }
67070b79849SDavid Gibson         }
67170b79849SDavid Gibson     }
67270b79849SDavid Gibson 
67370b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
67470b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
67570b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
67670b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
67770b79849SDavid Gibson         if (ret < 0) {
6788d83cbf1SGreg Kurz             trace_kvm_failed_vscr_set(strerror(errno));
67970b79849SDavid Gibson             return ret;
68070b79849SDavid Gibson         }
68170b79849SDavid Gibson 
68270b79849SDavid Gibson         for (i = 0; i < 32; i++) {
68370b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
684ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
68570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
68670b79849SDavid Gibson             if (ret < 0) {
6878d83cbf1SGreg Kurz                 trace_kvm_failed_vr_set(i, strerror(errno));
68870b79849SDavid Gibson                 return ret;
68970b79849SDavid Gibson             }
69070b79849SDavid Gibson         }
69170b79849SDavid Gibson     }
69270b79849SDavid Gibson 
69370b79849SDavid Gibson     return 0;
69470b79849SDavid Gibson }
69570b79849SDavid Gibson 
kvm_get_fp(CPUState * cs)69670b79849SDavid Gibson static int kvm_get_fp(CPUState *cs)
69770b79849SDavid Gibson {
698794511bcSPhilippe Mathieu-Daudé     CPUPPCState *env = cpu_env(cs);
69970b79849SDavid Gibson     struct kvm_one_reg reg;
70070b79849SDavid Gibson     int i;
70170b79849SDavid Gibson     int ret;
70270b79849SDavid Gibson 
70370b79849SDavid Gibson     if (env->insns_flags & PPC_FLOAT) {
70470b79849SDavid Gibson         uint64_t fpscr;
70570b79849SDavid Gibson         bool vsx = !!(env->insns_flags2 & PPC2_VSX);
70670b79849SDavid Gibson 
70770b79849SDavid Gibson         reg.id = KVM_REG_PPC_FPSCR;
70870b79849SDavid Gibson         reg.addr = (uintptr_t)&fpscr;
70970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
71070b79849SDavid Gibson         if (ret < 0) {
7118d83cbf1SGreg Kurz             trace_kvm_failed_fpscr_get(strerror(errno));
71270b79849SDavid Gibson             return ret;
71370b79849SDavid Gibson         } else {
71470b79849SDavid Gibson             env->fpscr = fpscr;
71570b79849SDavid Gibson         }
71670b79849SDavid Gibson 
71770b79849SDavid Gibson         for (i = 0; i < 32; i++) {
71870b79849SDavid Gibson             uint64_t vsr[2];
719ee1004bbSPhilippe Mathieu-Daudé             uint64_t *fpr = cpu_fpr_ptr(env, i);
720ee1004bbSPhilippe Mathieu-Daudé             uint64_t *vsrl = cpu_vsrl_ptr(env, i);
72170b79849SDavid Gibson 
72270b79849SDavid Gibson             reg.addr = (uintptr_t) &vsr;
72370b79849SDavid Gibson             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
72470b79849SDavid Gibson 
72570b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
72670b79849SDavid Gibson             if (ret < 0) {
7278d83cbf1SGreg Kurz                 trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
7288d83cbf1SGreg Kurz                                         strerror(errno));
72970b79849SDavid Gibson                 return ret;
73070b79849SDavid Gibson             } else {
731e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN
732ef96e3aeSMark Cave-Ayland                 *fpr = vsr[0];
73370b79849SDavid Gibson                 if (vsx) {
734ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[1];
73570b79849SDavid Gibson                 }
7363a4b791bSGreg Kurz #else
737ef96e3aeSMark Cave-Ayland                 *fpr = vsr[1];
7383a4b791bSGreg Kurz                 if (vsx) {
739ef96e3aeSMark Cave-Ayland                     *vsrl = vsr[0];
7403a4b791bSGreg Kurz                 }
7413a4b791bSGreg Kurz #endif
74270b79849SDavid Gibson             }
74370b79849SDavid Gibson         }
74470b79849SDavid Gibson     }
74570b79849SDavid Gibson 
74670b79849SDavid Gibson     if (env->insns_flags & PPC_ALTIVEC) {
74770b79849SDavid Gibson         reg.id = KVM_REG_PPC_VSCR;
74870b79849SDavid Gibson         reg.addr = (uintptr_t)&env->vscr;
74970b79849SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
75070b79849SDavid Gibson         if (ret < 0) {
7518d83cbf1SGreg Kurz             trace_kvm_failed_vscr_get(strerror(errno));
75270b79849SDavid Gibson             return ret;
75370b79849SDavid Gibson         }
75470b79849SDavid Gibson 
75570b79849SDavid Gibson         for (i = 0; i < 32; i++) {
75670b79849SDavid Gibson             reg.id = KVM_REG_PPC_VR(i);
757ef96e3aeSMark Cave-Ayland             reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
75870b79849SDavid Gibson             ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
75970b79849SDavid Gibson             if (ret < 0) {
7608d83cbf1SGreg Kurz                 trace_kvm_failed_vr_get(i, strerror(errno));
76170b79849SDavid Gibson                 return ret;
76270b79849SDavid Gibson             }
76370b79849SDavid Gibson         }
76470b79849SDavid Gibson     }
76570b79849SDavid Gibson 
76670b79849SDavid Gibson     return 0;
76770b79849SDavid Gibson }
76870b79849SDavid Gibson 
7699b00ea49SDavid Gibson #if defined(TARGET_PPC64)
kvm_get_vpa(CPUState * cs)7709b00ea49SDavid Gibson static int kvm_get_vpa(CPUState *cs)
7719b00ea49SDavid Gibson {
7729b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
773ce2918cbSDavid Gibson     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
7749b00ea49SDavid Gibson     struct kvm_one_reg reg;
7759b00ea49SDavid Gibson     int ret;
7769b00ea49SDavid Gibson 
7779b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_ADDR;
7787388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
7799b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7809b00ea49SDavid Gibson     if (ret < 0) {
7818d83cbf1SGreg Kurz         trace_kvm_failed_vpa_addr_get(strerror(errno));
7829b00ea49SDavid Gibson         return ret;
7839b00ea49SDavid Gibson     }
7849b00ea49SDavid Gibson 
7857388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
7867388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
7879b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
7887388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
7899b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
7909b00ea49SDavid Gibson     if (ret < 0) {
7918d83cbf1SGreg Kurz         trace_kvm_failed_slb_get(strerror(errno));
7929b00ea49SDavid Gibson         return ret;
7939b00ea49SDavid Gibson     }
7949b00ea49SDavid Gibson 
7957388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
7967388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
7979b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
7987388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
7999b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
8009b00ea49SDavid Gibson     if (ret < 0) {
8018d83cbf1SGreg Kurz         trace_kvm_failed_dtl_get(strerror(errno));
8029b00ea49SDavid Gibson         return ret;
8039b00ea49SDavid Gibson     }
8049b00ea49SDavid Gibson 
8059b00ea49SDavid Gibson     return 0;
8069b00ea49SDavid Gibson }
8079b00ea49SDavid Gibson 
kvm_put_vpa(CPUState * cs)8089b00ea49SDavid Gibson static int kvm_put_vpa(CPUState *cs)
8099b00ea49SDavid Gibson {
8109b00ea49SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
811ce2918cbSDavid Gibson     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
8129b00ea49SDavid Gibson     struct kvm_one_reg reg;
8139b00ea49SDavid Gibson     int ret;
8149b00ea49SDavid Gibson 
815c995e942SDavid Gibson     /*
816c995e942SDavid Gibson      * SLB shadow or DTL can't be registered unless a master VPA is
8179b00ea49SDavid Gibson      * registered.  That means when restoring state, if a VPA *is*
8189b00ea49SDavid Gibson      * registered, we need to set that up first.  If not, we need to
819c995e942SDavid Gibson      * deregister the others before deregistering the master VPA
820c995e942SDavid Gibson      */
8217388efafSDavid Gibson     assert(spapr_cpu->vpa_addr
8227388efafSDavid Gibson            || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
8239b00ea49SDavid Gibson 
8247388efafSDavid Gibson     if (spapr_cpu->vpa_addr) {
8259b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8267388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8279b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8289b00ea49SDavid Gibson         if (ret < 0) {
8298d83cbf1SGreg Kurz             trace_kvm_failed_vpa_addr_set(strerror(errno));
8309b00ea49SDavid Gibson             return ret;
8319b00ea49SDavid Gibson         }
8329b00ea49SDavid Gibson     }
8339b00ea49SDavid Gibson 
8347388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->slb_shadow_size
8357388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
8369b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_SLB;
8377388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
8389b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8399b00ea49SDavid Gibson     if (ret < 0) {
8408d83cbf1SGreg Kurz         trace_kvm_failed_slb_set(strerror(errno));
8419b00ea49SDavid Gibson         return ret;
8429b00ea49SDavid Gibson     }
8439b00ea49SDavid Gibson 
8447388efafSDavid Gibson     assert((uintptr_t)&spapr_cpu->dtl_size
8457388efafSDavid Gibson            == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
8469b00ea49SDavid Gibson     reg.id = KVM_REG_PPC_VPA_DTL;
8477388efafSDavid Gibson     reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
8489b00ea49SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8499b00ea49SDavid Gibson     if (ret < 0) {
8508d83cbf1SGreg Kurz         trace_kvm_failed_dtl_set(strerror(errno));
8519b00ea49SDavid Gibson         return ret;
8529b00ea49SDavid Gibson     }
8539b00ea49SDavid Gibson 
8547388efafSDavid Gibson     if (!spapr_cpu->vpa_addr) {
8559b00ea49SDavid Gibson         reg.id = KVM_REG_PPC_VPA_ADDR;
8567388efafSDavid Gibson         reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
8579b00ea49SDavid Gibson         ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
8589b00ea49SDavid Gibson         if (ret < 0) {
8598d83cbf1SGreg Kurz             trace_kvm_failed_null_vpa_addr_set(strerror(errno));
8609b00ea49SDavid Gibson             return ret;
8619b00ea49SDavid Gibson         }
8629b00ea49SDavid Gibson     }
8639b00ea49SDavid Gibson 
8649b00ea49SDavid Gibson     return 0;
8659b00ea49SDavid Gibson }
8669b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
8679b00ea49SDavid Gibson 
kvmppc_put_books_sregs(PowerPCCPU * cpu)868e5c0d3ceSDavid Gibson int kvmppc_put_books_sregs(PowerPCCPU *cpu)
869a7a00a72SDavid Gibson {
870a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
871b339427cSDaniel Henrique Barboza     struct kvm_sregs sregs = { };
872a7a00a72SDavid Gibson     int i;
873a7a00a72SDavid Gibson 
874a7a00a72SDavid Gibson     sregs.pvr = env->spr[SPR_PVR];
875a7a00a72SDavid Gibson 
8761ec26c75SGreg Kurz     if (cpu->vhyp) {
877c700b5e1SNicholas Piggin         sregs.u.s.sdr1 = cpu->vhyp_class->encode_hpt_for_kvm_pr(cpu->vhyp);
8781ec26c75SGreg Kurz     } else {
879a7a00a72SDavid Gibson         sregs.u.s.sdr1 = env->spr[SPR_SDR1];
8801ec26c75SGreg Kurz     }
881a7a00a72SDavid Gibson 
882a7a00a72SDavid Gibson     /* Sync SLB */
883a7a00a72SDavid Gibson #ifdef TARGET_PPC64
884a7a00a72SDavid Gibson     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
885a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
886a7a00a72SDavid Gibson         if (env->slb[i].esid & SLB_ESID_V) {
887a7a00a72SDavid Gibson             sregs.u.s.ppc64.slb[i].slbe |= i;
888a7a00a72SDavid Gibson         }
889a7a00a72SDavid Gibson         sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
890a7a00a72SDavid Gibson     }
891a7a00a72SDavid Gibson #endif
892a7a00a72SDavid Gibson 
893a7a00a72SDavid Gibson     /* Sync SRs */
894a7a00a72SDavid Gibson     for (i = 0; i < 16; i++) {
895a7a00a72SDavid Gibson         sregs.u.s.ppc32.sr[i] = env->sr[i];
896a7a00a72SDavid Gibson     }
897a7a00a72SDavid Gibson 
898a7a00a72SDavid Gibson     /* Sync BATs */
899a7a00a72SDavid Gibson     for (i = 0; i < 8; i++) {
900a7a00a72SDavid Gibson         /* Beware. We have to swap upper and lower bits here */
901a7a00a72SDavid Gibson         sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
902a7a00a72SDavid Gibson             | env->DBAT[1][i];
903a7a00a72SDavid Gibson         sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
904a7a00a72SDavid Gibson             | env->IBAT[1][i];
905a7a00a72SDavid Gibson     }
906a7a00a72SDavid Gibson 
907a7a00a72SDavid Gibson     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
908a7a00a72SDavid Gibson }
909a7a00a72SDavid Gibson 
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)910a1676bb3SJulia Suvorova int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
911d76d1650Saurel32 {
91220d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
91320d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
914d76d1650Saurel32     struct kvm_regs regs;
915d76d1650Saurel32     int ret;
916d76d1650Saurel32     int i;
917d76d1650Saurel32 
9181bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
9191bc22652SAndreas Färber     if (ret < 0) {
920d76d1650Saurel32         return ret;
9211bc22652SAndreas Färber     }
922d76d1650Saurel32 
923d76d1650Saurel32     regs.ctr = env->ctr;
924d76d1650Saurel32     regs.lr  = env->lr;
925da91a00fSRichard Henderson     regs.xer = cpu_read_xer(env);
926d76d1650Saurel32     regs.msr = env->msr;
927d76d1650Saurel32     regs.pc = env->nip;
928d76d1650Saurel32 
929d76d1650Saurel32     regs.srr0 = env->spr[SPR_SRR0];
930d76d1650Saurel32     regs.srr1 = env->spr[SPR_SRR1];
931d76d1650Saurel32 
932d76d1650Saurel32     regs.sprg0 = env->spr[SPR_SPRG0];
933d76d1650Saurel32     regs.sprg1 = env->spr[SPR_SPRG1];
934d76d1650Saurel32     regs.sprg2 = env->spr[SPR_SPRG2];
935d76d1650Saurel32     regs.sprg3 = env->spr[SPR_SPRG3];
936d76d1650Saurel32     regs.sprg4 = env->spr[SPR_SPRG4];
937d76d1650Saurel32     regs.sprg5 = env->spr[SPR_SPRG5];
938d76d1650Saurel32     regs.sprg6 = env->spr[SPR_SPRG6];
939d76d1650Saurel32     regs.sprg7 = env->spr[SPR_SPRG7];
940d76d1650Saurel32 
94190dc8812SScott Wood     regs.pid = env->spr[SPR_BOOKE_PID];
94290dc8812SScott Wood 
943c995e942SDavid Gibson     for (i = 0; i < 32; i++) {
944d76d1650Saurel32         regs.gpr[i] = env->gpr[i];
945c995e942SDavid Gibson     }
946d76d1650Saurel32 
9472060436aSHarsh Prateek Bora     regs.cr = ppc_get_cr(env);
9484bddaf55SAlexey Kardashevskiy 
9491bc22652SAndreas Färber     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
950c995e942SDavid Gibson     if (ret < 0) {
951d76d1650Saurel32         return ret;
952c995e942SDavid Gibson     }
953d76d1650Saurel32 
95470b79849SDavid Gibson     kvm_put_fp(cs);
95570b79849SDavid Gibson 
95693dd5e85SScott Wood     if (env->tlb_dirty) {
9571bc22652SAndreas Färber         kvm_sw_tlb_put(cpu);
95893dd5e85SScott Wood         env->tlb_dirty = false;
95993dd5e85SScott Wood     }
96093dd5e85SScott Wood 
961f1af19d7SDavid Gibson     if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
962a7a00a72SDavid Gibson         ret = kvmppc_put_books_sregs(cpu);
963a7a00a72SDavid Gibson         if (ret < 0) {
964f1af19d7SDavid Gibson             return ret;
965f1af19d7SDavid Gibson         }
966f1af19d7SDavid Gibson     }
967f1af19d7SDavid Gibson 
968f1af19d7SDavid Gibson     if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
969d67d40eaSDavid Gibson         kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
970d67d40eaSDavid Gibson     }
971f1af19d7SDavid Gibson 
972d67d40eaSDavid Gibson     if (cap_one_reg) {
973c995e942SDavid Gibson         /*
974c995e942SDavid Gibson          * We deliberately ignore errors here, for kernels which have
975d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
976d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
977c995e942SDavid Gibson          * work, at least until we try to migrate.
978c995e942SDavid Gibson          */
979d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
980d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
981d67d40eaSDavid Gibson 
982d67d40eaSDavid Gibson             if (id != 0) {
983d67d40eaSDavid Gibson                 kvm_put_one_spr(cs, id, i);
984d67d40eaSDavid Gibson             }
985f1af19d7SDavid Gibson         }
9869b00ea49SDavid Gibson 
9879b00ea49SDavid Gibson #ifdef TARGET_PPC64
988ca241959SVíctor Colombo         if (FIELD_EX64(env->msr, MSR, TS)) {
98980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
99080b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
99180b3f79bSAlexey Kardashevskiy             }
99280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
99380b3f79bSAlexey Kardashevskiy                 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
99480b3f79bSAlexey Kardashevskiy             }
99580b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
99680b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
99780b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
99880b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
99980b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
100080b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
100180b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
100280b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
100380b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
100480b3f79bSAlexey Kardashevskiy             kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
100580b3f79bSAlexey Kardashevskiy         }
100680b3f79bSAlexey Kardashevskiy 
10079b00ea49SDavid Gibson         if (cap_papr) {
10089b00ea49SDavid Gibson             if (kvm_put_vpa(cs) < 0) {
10098d83cbf1SGreg Kurz                 trace_kvm_failed_put_vpa();
10109b00ea49SDavid Gibson             }
10119b00ea49SDavid Gibson         }
101298a8b524SAlexey Kardashevskiy 
101398a8b524SAlexey Kardashevskiy         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1014972bd576SAlexey Kardashevskiy 
1015972bd576SAlexey Kardashevskiy         if (level > KVM_PUT_RUNTIME_STATE) {
1016972bd576SAlexey Kardashevskiy             kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1017972bd576SAlexey Kardashevskiy         }
10189b00ea49SDavid Gibson #endif /* TARGET_PPC64 */
1019f1af19d7SDavid Gibson     }
1020f1af19d7SDavid Gibson 
1021d76d1650Saurel32     return ret;
1022d76d1650Saurel32 }
1023d76d1650Saurel32 
kvm_sync_excp(CPUPPCState * env,int vector,int ivor)1024c371c2e3SBharat Bhushan static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1025c371c2e3SBharat Bhushan {
1026c371c2e3SBharat Bhushan      env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1027c371c2e3SBharat Bhushan }
1028c371c2e3SBharat Bhushan 
kvmppc_get_booke_sregs(PowerPCCPU * cpu)1029a7a00a72SDavid Gibson static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1030d76d1650Saurel32 {
103120d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1032ba5e5090SAlexander Graf     struct kvm_sregs sregs;
1033a7a00a72SDavid Gibson     int ret;
1034d76d1650Saurel32 
1035a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
103690dc8812SScott Wood     if (ret < 0) {
103790dc8812SScott Wood         return ret;
103890dc8812SScott Wood     }
103990dc8812SScott Wood 
104090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_BASE) {
104190dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
104290dc8812SScott Wood         env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
104390dc8812SScott Wood         env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
104490dc8812SScott Wood         env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
104590dc8812SScott Wood         env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
104690dc8812SScott Wood         env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
104790dc8812SScott Wood         env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
104890dc8812SScott Wood         env->spr[SPR_DECR] = sregs.u.e.dec;
104990dc8812SScott Wood         env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
105090dc8812SScott Wood         env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
105190dc8812SScott Wood         env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
105290dc8812SScott Wood     }
105390dc8812SScott Wood 
105490dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
105590dc8812SScott Wood         env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
105690dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
105790dc8812SScott Wood         env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
105890dc8812SScott Wood         env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
105990dc8812SScott Wood         env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
106090dc8812SScott Wood     }
106190dc8812SScott Wood 
106290dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_64) {
106390dc8812SScott Wood         env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
106490dc8812SScott Wood     }
106590dc8812SScott Wood 
106690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
106790dc8812SScott Wood         env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
106890dc8812SScott Wood     }
106990dc8812SScott Wood 
107090dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
107190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1072c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
107390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1074c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
107590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1076c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
107790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1078c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
107990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1080c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
108190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1082c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
108390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1084c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
108590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1086c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
108790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1088c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
108990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1090c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
109190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1092c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
109390dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1094c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
109590dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1096c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
109790dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1098c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
109990dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1100c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
110190dc8812SScott Wood         env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1102c371c2e3SBharat Bhushan         kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
110390dc8812SScott Wood 
110490dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_SPE) {
110590dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1106c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
110790dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1108c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
110990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1110c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
111190dc8812SScott Wood         }
111290dc8812SScott Wood 
111390dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PM) {
111490dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1115c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
111690dc8812SScott Wood         }
111790dc8812SScott Wood 
111890dc8812SScott Wood         if (sregs.u.e.features & KVM_SREGS_E_PC) {
111990dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1120c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
112190dc8812SScott Wood             env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1122c371c2e3SBharat Bhushan             kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
112390dc8812SScott Wood         }
112490dc8812SScott Wood     }
112590dc8812SScott Wood 
112690dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
112790dc8812SScott Wood         env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
112890dc8812SScott Wood         env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
112990dc8812SScott Wood         env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
113090dc8812SScott Wood         env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
113190dc8812SScott Wood         env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
113290dc8812SScott Wood         env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
113390dc8812SScott Wood         env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
113490dc8812SScott Wood         env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
113590dc8812SScott Wood         env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
113690dc8812SScott Wood         env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
113790dc8812SScott Wood     }
113890dc8812SScott Wood 
113990dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_EXP) {
114090dc8812SScott Wood         env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
114190dc8812SScott Wood     }
114290dc8812SScott Wood 
114390dc8812SScott Wood     if (sregs.u.e.features & KVM_SREGS_E_PD) {
114490dc8812SScott Wood         env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
114590dc8812SScott Wood         env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
114690dc8812SScott Wood     }
114790dc8812SScott Wood 
114890dc8812SScott Wood     if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
114990dc8812SScott Wood         env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
115090dc8812SScott Wood         env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
115190dc8812SScott Wood         env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
115290dc8812SScott Wood 
115390dc8812SScott Wood         if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
115490dc8812SScott Wood             env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
115590dc8812SScott Wood             env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
115690dc8812SScott Wood         }
115790dc8812SScott Wood     }
1158a7a00a72SDavid Gibson 
1159a7a00a72SDavid Gibson     return 0;
1160fafc0b6aSAlexander Graf }
116190dc8812SScott Wood 
kvmppc_get_books_sregs(PowerPCCPU * cpu)1162a7a00a72SDavid Gibson static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1163a7a00a72SDavid Gibson {
1164a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1165a7a00a72SDavid Gibson     struct kvm_sregs sregs;
1166a7a00a72SDavid Gibson     int ret;
1167a7a00a72SDavid Gibson     int i;
1168a7a00a72SDavid Gibson 
1169a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
117090dc8812SScott Wood     if (ret < 0) {
117190dc8812SScott Wood         return ret;
117290dc8812SScott Wood     }
117390dc8812SScott Wood 
1174e57ca75cSDavid Gibson     if (!cpu->vhyp) {
1175bb593904SDavid Gibson         ppc_store_sdr1(env, sregs.u.s.sdr1);
1176f3c75d42SAneesh Kumar K.V     }
1177ba5e5090SAlexander Graf 
1178ba5e5090SAlexander Graf     /* Sync SLB */
117982c09f2fSAlexander Graf #ifdef TARGET_PPC64
11804b4d4a21SAneesh Kumar K.V     /*
11814b4d4a21SAneesh Kumar K.V      * The packed SLB array we get from KVM_GET_SREGS only contains
1182a7a00a72SDavid Gibson      * information about valid entries. So we flush our internal copy
1183a7a00a72SDavid Gibson      * to get rid of stale ones, then put all valid SLB entries back
1184a7a00a72SDavid Gibson      * in.
11854b4d4a21SAneesh Kumar K.V      */
11864b4d4a21SAneesh Kumar K.V     memset(env->slb, 0, sizeof(env->slb));
1187d83af167SAneesh Kumar K.V     for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
11884b4d4a21SAneesh Kumar K.V         target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
11894b4d4a21SAneesh Kumar K.V         target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
11904b4d4a21SAneesh Kumar K.V         /*
11914b4d4a21SAneesh Kumar K.V          * Only restore valid entries
11924b4d4a21SAneesh Kumar K.V          */
11934b4d4a21SAneesh Kumar K.V         if (rb & SLB_ESID_V) {
1194bcd81230SDavid Gibson             ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
11954b4d4a21SAneesh Kumar K.V         }
1196ba5e5090SAlexander Graf     }
119782c09f2fSAlexander Graf #endif
1198ba5e5090SAlexander Graf 
1199ba5e5090SAlexander Graf     /* Sync SRs */
1200ba5e5090SAlexander Graf     for (i = 0; i < 16; i++) {
1201ba5e5090SAlexander Graf         env->sr[i] = sregs.u.s.ppc32.sr[i];
1202ba5e5090SAlexander Graf     }
1203ba5e5090SAlexander Graf 
1204ba5e5090SAlexander Graf     /* Sync BATs */
1205ba5e5090SAlexander Graf     for (i = 0; i < 8; i++) {
1206ba5e5090SAlexander Graf         env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1207ba5e5090SAlexander Graf         env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1208ba5e5090SAlexander Graf         env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1209ba5e5090SAlexander Graf         env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1210ba5e5090SAlexander Graf     }
1211a7a00a72SDavid Gibson 
1212a7a00a72SDavid Gibson     return 0;
1213a7a00a72SDavid Gibson }
1214a7a00a72SDavid Gibson 
kvm_arch_get_registers(CPUState * cs,Error ** errp)1215a1676bb3SJulia Suvorova int kvm_arch_get_registers(CPUState *cs, Error **errp)
1216a7a00a72SDavid Gibson {
1217a7a00a72SDavid Gibson     PowerPCCPU *cpu = POWERPC_CPU(cs);
1218a7a00a72SDavid Gibson     CPUPPCState *env = &cpu->env;
1219a7a00a72SDavid Gibson     struct kvm_regs regs;
1220a7a00a72SDavid Gibson     int i, ret;
1221a7a00a72SDavid Gibson 
1222a7a00a72SDavid Gibson     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1223c995e942SDavid Gibson     if (ret < 0) {
1224a7a00a72SDavid Gibson         return ret;
1225c995e942SDavid Gibson     }
1226a7a00a72SDavid Gibson 
12272060436aSHarsh Prateek Bora     ppc_set_cr(env, regs.cr);
1228a7a00a72SDavid Gibson     env->ctr = regs.ctr;
1229a7a00a72SDavid Gibson     env->lr = regs.lr;
1230a7a00a72SDavid Gibson     cpu_write_xer(env, regs.xer);
1231a7a00a72SDavid Gibson     env->msr = regs.msr;
1232a7a00a72SDavid Gibson     env->nip = regs.pc;
1233a7a00a72SDavid Gibson 
1234a7a00a72SDavid Gibson     env->spr[SPR_SRR0] = regs.srr0;
1235a7a00a72SDavid Gibson     env->spr[SPR_SRR1] = regs.srr1;
1236a7a00a72SDavid Gibson 
1237a7a00a72SDavid Gibson     env->spr[SPR_SPRG0] = regs.sprg0;
1238a7a00a72SDavid Gibson     env->spr[SPR_SPRG1] = regs.sprg1;
1239a7a00a72SDavid Gibson     env->spr[SPR_SPRG2] = regs.sprg2;
1240a7a00a72SDavid Gibson     env->spr[SPR_SPRG3] = regs.sprg3;
1241a7a00a72SDavid Gibson     env->spr[SPR_SPRG4] = regs.sprg4;
1242a7a00a72SDavid Gibson     env->spr[SPR_SPRG5] = regs.sprg5;
1243a7a00a72SDavid Gibson     env->spr[SPR_SPRG6] = regs.sprg6;
1244a7a00a72SDavid Gibson     env->spr[SPR_SPRG7] = regs.sprg7;
1245a7a00a72SDavid Gibson 
1246a7a00a72SDavid Gibson     env->spr[SPR_BOOKE_PID] = regs.pid;
1247a7a00a72SDavid Gibson 
1248c995e942SDavid Gibson     for (i = 0; i < 32; i++) {
1249a7a00a72SDavid Gibson         env->gpr[i] = regs.gpr[i];
1250c995e942SDavid Gibson     }
1251a7a00a72SDavid Gibson 
1252a7a00a72SDavid Gibson     kvm_get_fp(cs);
1253a7a00a72SDavid Gibson 
1254a7a00a72SDavid Gibson     if (cap_booke_sregs) {
1255a7a00a72SDavid Gibson         ret = kvmppc_get_booke_sregs(cpu);
1256a7a00a72SDavid Gibson         if (ret < 0) {
1257a7a00a72SDavid Gibson             return ret;
1258a7a00a72SDavid Gibson         }
1259a7a00a72SDavid Gibson     }
1260a7a00a72SDavid Gibson 
1261a7a00a72SDavid Gibson     if (cap_segstate) {
1262a7a00a72SDavid Gibson         ret = kvmppc_get_books_sregs(cpu);
1263a7a00a72SDavid Gibson         if (ret < 0) {
1264a7a00a72SDavid Gibson             return ret;
1265a7a00a72SDavid Gibson         }
1266fafc0b6aSAlexander Graf     }
1267ba5e5090SAlexander Graf 
1268d67d40eaSDavid Gibson     if (cap_hior) {
1269d67d40eaSDavid Gibson         kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1270d67d40eaSDavid Gibson     }
1271d67d40eaSDavid Gibson 
1272d67d40eaSDavid Gibson     if (cap_one_reg) {
1273c995e942SDavid Gibson         /*
1274c995e942SDavid Gibson          * We deliberately ignore errors here, for kernels which have
1275d67d40eaSDavid Gibson          * the ONE_REG calls, but don't support the specific
1276d67d40eaSDavid Gibson          * registers, there's a reasonable chance things will still
1277c995e942SDavid Gibson          * work, at least until we try to migrate.
1278c995e942SDavid Gibson          */
1279d67d40eaSDavid Gibson         for (i = 0; i < 1024; i++) {
1280d67d40eaSDavid Gibson             uint64_t id = env->spr_cb[i].one_reg_id;
1281d67d40eaSDavid Gibson 
1282d67d40eaSDavid Gibson             if (id != 0) {
1283d67d40eaSDavid Gibson                 kvm_get_one_spr(cs, id, i);
1284d67d40eaSDavid Gibson             }
1285d67d40eaSDavid Gibson         }
12869b00ea49SDavid Gibson 
12879b00ea49SDavid Gibson #ifdef TARGET_PPC64
1288ca241959SVíctor Colombo         if (FIELD_EX64(env->msr, MSR, TS)) {
128980b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
129080b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
129180b3f79bSAlexey Kardashevskiy             }
129280b3f79bSAlexey Kardashevskiy             for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
129380b3f79bSAlexey Kardashevskiy                 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
129480b3f79bSAlexey Kardashevskiy             }
129580b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
129680b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
129780b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
129880b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
129980b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
130080b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
130180b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
130280b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
130380b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
130480b3f79bSAlexey Kardashevskiy             kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
130580b3f79bSAlexey Kardashevskiy         }
130680b3f79bSAlexey Kardashevskiy 
13079b00ea49SDavid Gibson         if (cap_papr) {
13089b00ea49SDavid Gibson             if (kvm_get_vpa(cs) < 0) {
13098d83cbf1SGreg Kurz                 trace_kvm_failed_get_vpa();
13109b00ea49SDavid Gibson             }
13119b00ea49SDavid Gibson         }
131298a8b524SAlexey Kardashevskiy 
131398a8b524SAlexey Kardashevskiy         kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1314972bd576SAlexey Kardashevskiy         kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
13159b00ea49SDavid Gibson #endif
1316d67d40eaSDavid Gibson     }
1317d67d40eaSDavid Gibson 
1318d76d1650Saurel32     return 0;
1319d76d1650Saurel32 }
1320d76d1650Saurel32 
kvmppc_set_interrupt(PowerPCCPU * cpu,int irq,int level)13211bc22652SAndreas Färber int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1322fc87e185SAlexander Graf {
1323fc87e185SAlexander Graf     unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1324fc87e185SAlexander Graf 
1325fc87e185SAlexander Graf     if (irq != PPC_INTERRUPT_EXT) {
1326fc87e185SAlexander Graf         return 0;
1327fc87e185SAlexander Graf     }
1328fc87e185SAlexander Graf 
132976d93e14Sjianchunfu     if (!cap_interrupt_unset) {
1330fc87e185SAlexander Graf         return 0;
1331fc87e185SAlexander Graf     }
1332fc87e185SAlexander Graf 
13331bc22652SAndreas Färber     kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1334fc87e185SAlexander Graf 
1335fc87e185SAlexander Graf     return 0;
1336fc87e185SAlexander Graf }
1337fc87e185SAlexander Graf 
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)133820d695a9SAndreas Färber void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1339d76d1650Saurel32 {
1340d76d1650Saurel32 }
1341d76d1650Saurel32 
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)13424c663752SPaolo Bonzini MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1343d76d1650Saurel32 {
13444c663752SPaolo Bonzini     return MEMTXATTRS_UNSPECIFIED;
1345d76d1650Saurel32 }
1346d76d1650Saurel32 
kvm_arch_process_async_events(CPUState * cs)134720d695a9SAndreas Färber int kvm_arch_process_async_events(CPUState *cs)
13480af691d7SMarcelo Tosatti {
1349259186a7SAndreas Färber     return cs->halted;
13500af691d7SMarcelo Tosatti }
13510af691d7SMarcelo Tosatti 
kvmppc_handle_halt(PowerPCCPU * cpu)1352259186a7SAndreas Färber static int kvmppc_handle_halt(PowerPCCPU *cpu)
1353d76d1650Saurel32 {
1354259186a7SAndreas Färber     CPUState *cs = CPU(cpu);
1355259186a7SAndreas Färber     CPUPPCState *env = &cpu->env;
1356259186a7SAndreas Färber 
13570939b8f8SVíctor Colombo     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
13580939b8f8SVíctor Colombo         FIELD_EX64(env->msr, MSR, EE)) {
1359259186a7SAndreas Färber         cs->halted = 1;
136027103424SAndreas Färber         cs->exception_index = EXCP_HLT;
1361d76d1650Saurel32     }
1362d76d1650Saurel32 
1363bb4ea393SJan Kiszka     return 0;
1364d76d1650Saurel32 }
1365d76d1650Saurel32 
1366d76d1650Saurel32 /* map dcr access to existing qemu dcr emulation */
kvmppc_handle_dcr_read(CPUPPCState * env,uint32_t dcrn,uint32_t * data)1367c995e942SDavid Gibson static int kvmppc_handle_dcr_read(CPUPPCState *env,
1368c995e942SDavid Gibson                                   uint32_t dcrn, uint32_t *data)
1369d76d1650Saurel32 {
1370c995e942SDavid Gibson     if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1371d76d1650Saurel32         fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1372c995e942SDavid Gibson     }
1373d76d1650Saurel32 
1374bb4ea393SJan Kiszka     return 0;
1375d76d1650Saurel32 }
1376d76d1650Saurel32 
kvmppc_handle_dcr_write(CPUPPCState * env,uint32_t dcrn,uint32_t data)1377c995e942SDavid Gibson static int kvmppc_handle_dcr_write(CPUPPCState *env,
1378c995e942SDavid Gibson                                    uint32_t dcrn, uint32_t data)
1379d76d1650Saurel32 {
1380c995e942SDavid Gibson     if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1381d76d1650Saurel32         fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1382c995e942SDavid Gibson     }
1383d76d1650Saurel32 
1384bb4ea393SJan Kiszka     return 0;
1385d76d1650Saurel32 }
1386d76d1650Saurel32 
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)13878a0548f9SBharat Bhushan int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
13888a0548f9SBharat Bhushan {
13898a0548f9SBharat Bhushan     /* Mixed endian case is not handled */
13908a0548f9SBharat Bhushan     uint32_t sc = debug_inst_opcode;
13918a0548f9SBharat Bhushan 
13928a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
13938a0548f9SBharat Bhushan                             sizeof(sc), 0) ||
13948a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
13958a0548f9SBharat Bhushan         return -EINVAL;
13968a0548f9SBharat Bhushan     }
13978a0548f9SBharat Bhushan 
13988a0548f9SBharat Bhushan     return 0;
13998a0548f9SBharat Bhushan }
14008a0548f9SBharat Bhushan 
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)14018a0548f9SBharat Bhushan int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
14028a0548f9SBharat Bhushan {
14038a0548f9SBharat Bhushan     uint32_t sc;
14048a0548f9SBharat Bhushan 
14058a0548f9SBharat Bhushan     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
14068a0548f9SBharat Bhushan         sc != debug_inst_opcode ||
14078a0548f9SBharat Bhushan         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
14088a0548f9SBharat Bhushan                             sizeof(sc), 1)) {
14098a0548f9SBharat Bhushan         return -EINVAL;
14108a0548f9SBharat Bhushan     }
14118a0548f9SBharat Bhushan 
14128a0548f9SBharat Bhushan     return 0;
14138a0548f9SBharat Bhushan }
14148a0548f9SBharat Bhushan 
find_hw_breakpoint(target_ulong addr,int type)141588365d17SBharat Bhushan static int find_hw_breakpoint(target_ulong addr, int type)
141688365d17SBharat Bhushan {
141788365d17SBharat Bhushan     int n;
141888365d17SBharat Bhushan 
141988365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
142088365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
142188365d17SBharat Bhushan 
142288365d17SBharat Bhushan     for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
142388365d17SBharat Bhushan         if (hw_debug_points[n].addr == addr &&
142488365d17SBharat Bhushan              hw_debug_points[n].type == type) {
142588365d17SBharat Bhushan             return n;
142688365d17SBharat Bhushan         }
142788365d17SBharat Bhushan     }
142888365d17SBharat Bhushan 
142988365d17SBharat Bhushan     return -1;
143088365d17SBharat Bhushan }
143188365d17SBharat Bhushan 
find_hw_watchpoint(target_ulong addr,int * flag)143288365d17SBharat Bhushan static int find_hw_watchpoint(target_ulong addr, int *flag)
143388365d17SBharat Bhushan {
143488365d17SBharat Bhushan     int n;
143588365d17SBharat Bhushan 
143688365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
143788365d17SBharat Bhushan     if (n >= 0) {
143888365d17SBharat Bhushan         *flag = BP_MEM_ACCESS;
143988365d17SBharat Bhushan         return n;
144088365d17SBharat Bhushan     }
144188365d17SBharat Bhushan 
144288365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
144388365d17SBharat Bhushan     if (n >= 0) {
144488365d17SBharat Bhushan         *flag = BP_MEM_WRITE;
144588365d17SBharat Bhushan         return n;
144688365d17SBharat Bhushan     }
144788365d17SBharat Bhushan 
144888365d17SBharat Bhushan     n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
144988365d17SBharat Bhushan     if (n >= 0) {
145088365d17SBharat Bhushan         *flag = BP_MEM_READ;
145188365d17SBharat Bhushan         return n;
145288365d17SBharat Bhushan     }
145388365d17SBharat Bhushan 
145488365d17SBharat Bhushan     return -1;
145588365d17SBharat Bhushan }
145688365d17SBharat Bhushan 
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)1457b8a6eb18SAnton Johansson int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
145888365d17SBharat Bhushan {
1459b8a6eb18SAnton Johansson     const unsigned breakpoint_index = nb_hw_breakpoint + nb_hw_watchpoint;
1460b8a6eb18SAnton Johansson     if (breakpoint_index >= ARRAY_SIZE(hw_debug_points)) {
146188365d17SBharat Bhushan         return -ENOBUFS;
146288365d17SBharat Bhushan     }
146388365d17SBharat Bhushan 
1464b8a6eb18SAnton Johansson     hw_debug_points[breakpoint_index].addr = addr;
1465b8a6eb18SAnton Johansson     hw_debug_points[breakpoint_index].type = type;
146688365d17SBharat Bhushan 
146788365d17SBharat Bhushan     switch (type) {
146888365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
146988365d17SBharat Bhushan         if (nb_hw_breakpoint >= max_hw_breakpoint) {
147088365d17SBharat Bhushan             return -ENOBUFS;
147188365d17SBharat Bhushan         }
147288365d17SBharat Bhushan 
147388365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
147488365d17SBharat Bhushan             return -EEXIST;
147588365d17SBharat Bhushan         }
147688365d17SBharat Bhushan 
147788365d17SBharat Bhushan         nb_hw_breakpoint++;
147888365d17SBharat Bhushan         break;
147988365d17SBharat Bhushan 
148088365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
148188365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
148288365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
148388365d17SBharat Bhushan         if (nb_hw_watchpoint >= max_hw_watchpoint) {
148488365d17SBharat Bhushan             return -ENOBUFS;
148588365d17SBharat Bhushan         }
148688365d17SBharat Bhushan 
148788365d17SBharat Bhushan         if (find_hw_breakpoint(addr, type) >= 0) {
148888365d17SBharat Bhushan             return -EEXIST;
148988365d17SBharat Bhushan         }
149088365d17SBharat Bhushan 
149188365d17SBharat Bhushan         nb_hw_watchpoint++;
149288365d17SBharat Bhushan         break;
149388365d17SBharat Bhushan 
149488365d17SBharat Bhushan     default:
149588365d17SBharat Bhushan         return -ENOSYS;
149688365d17SBharat Bhushan     }
149788365d17SBharat Bhushan 
149888365d17SBharat Bhushan     return 0;
149988365d17SBharat Bhushan }
150088365d17SBharat Bhushan 
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)1501b8a6eb18SAnton Johansson int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
150288365d17SBharat Bhushan {
150388365d17SBharat Bhushan     int n;
150488365d17SBharat Bhushan 
150588365d17SBharat Bhushan     n = find_hw_breakpoint(addr, type);
150688365d17SBharat Bhushan     if (n < 0) {
150788365d17SBharat Bhushan         return -ENOENT;
150888365d17SBharat Bhushan     }
150988365d17SBharat Bhushan 
151088365d17SBharat Bhushan     switch (type) {
151188365d17SBharat Bhushan     case GDB_BREAKPOINT_HW:
151288365d17SBharat Bhushan         nb_hw_breakpoint--;
151388365d17SBharat Bhushan         break;
151488365d17SBharat Bhushan 
151588365d17SBharat Bhushan     case GDB_WATCHPOINT_WRITE:
151688365d17SBharat Bhushan     case GDB_WATCHPOINT_READ:
151788365d17SBharat Bhushan     case GDB_WATCHPOINT_ACCESS:
151888365d17SBharat Bhushan         nb_hw_watchpoint--;
151988365d17SBharat Bhushan         break;
152088365d17SBharat Bhushan 
152188365d17SBharat Bhushan     default:
152288365d17SBharat Bhushan         return -ENOSYS;
152388365d17SBharat Bhushan     }
152488365d17SBharat Bhushan     hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
152588365d17SBharat Bhushan 
152688365d17SBharat Bhushan     return 0;
152788365d17SBharat Bhushan }
152888365d17SBharat Bhushan 
kvm_arch_remove_all_hw_breakpoints(void)152988365d17SBharat Bhushan void kvm_arch_remove_all_hw_breakpoints(void)
153088365d17SBharat Bhushan {
153188365d17SBharat Bhushan     nb_hw_breakpoint = nb_hw_watchpoint = 0;
153288365d17SBharat Bhushan }
153388365d17SBharat Bhushan 
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)15348a0548f9SBharat Bhushan void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
15358a0548f9SBharat Bhushan {
153688365d17SBharat Bhushan     int n;
153788365d17SBharat Bhushan 
15388a0548f9SBharat Bhushan     /* Software Breakpoint updates */
15398a0548f9SBharat Bhushan     if (kvm_sw_breakpoints_active(cs)) {
15408a0548f9SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
15418a0548f9SBharat Bhushan     }
154288365d17SBharat Bhushan 
154388365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint)
154488365d17SBharat Bhushan            <= ARRAY_SIZE(hw_debug_points));
154588365d17SBharat Bhushan     assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
154688365d17SBharat Bhushan 
154788365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
154888365d17SBharat Bhushan         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
154988365d17SBharat Bhushan         memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
155088365d17SBharat Bhushan         for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
155188365d17SBharat Bhushan             switch (hw_debug_points[n].type) {
155288365d17SBharat Bhushan             case GDB_BREAKPOINT_HW:
155388365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
155488365d17SBharat Bhushan                 break;
155588365d17SBharat Bhushan             case GDB_WATCHPOINT_WRITE:
155688365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
155788365d17SBharat Bhushan                 break;
155888365d17SBharat Bhushan             case GDB_WATCHPOINT_READ:
155988365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
156088365d17SBharat Bhushan                 break;
156188365d17SBharat Bhushan             case GDB_WATCHPOINT_ACCESS:
156288365d17SBharat Bhushan                 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
156388365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_READ;
156488365d17SBharat Bhushan                 break;
156588365d17SBharat Bhushan             default:
156688365d17SBharat Bhushan                 cpu_abort(cs, "Unsupported breakpoint type\n");
156788365d17SBharat Bhushan             }
156888365d17SBharat Bhushan             dbg->arch.bp[n].addr = hw_debug_points[n].addr;
156988365d17SBharat Bhushan         }
157088365d17SBharat Bhushan     }
15718a0548f9SBharat Bhushan }
15728a0548f9SBharat Bhushan 
kvm_handle_hw_breakpoint(CPUState * cs,struct kvm_debug_exit_arch * arch_info)15732cbd1581SFabiano Rosas static int kvm_handle_hw_breakpoint(CPUState *cs,
15742cbd1581SFabiano Rosas                                     struct kvm_debug_exit_arch *arch_info)
15758a0548f9SBharat Bhushan {
15766e0552a3SFabiano Rosas     int handle = DEBUG_RETURN_GUEST;
157788365d17SBharat Bhushan     int n;
157888365d17SBharat Bhushan     int flag = 0;
15798a0548f9SBharat Bhushan 
158088365d17SBharat Bhushan     if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
158188365d17SBharat Bhushan         if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
158288365d17SBharat Bhushan             n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
158388365d17SBharat Bhushan             if (n >= 0) {
15846e0552a3SFabiano Rosas                 handle = DEBUG_RETURN_GDB;
158588365d17SBharat Bhushan             }
158688365d17SBharat Bhushan         } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
158788365d17SBharat Bhushan                                         KVMPPC_DEBUG_WATCH_WRITE)) {
158888365d17SBharat Bhushan             n = find_hw_watchpoint(arch_info->address,  &flag);
158988365d17SBharat Bhushan             if (n >= 0) {
15906e0552a3SFabiano Rosas                 handle = DEBUG_RETURN_GDB;
159188365d17SBharat Bhushan                 cs->watchpoint_hit = &hw_watchpoint;
159288365d17SBharat Bhushan                 hw_watchpoint.vaddr = hw_debug_points[n].addr;
159388365d17SBharat Bhushan                 hw_watchpoint.flags = flag;
159488365d17SBharat Bhushan             }
159588365d17SBharat Bhushan         }
159688365d17SBharat Bhushan     }
15972cbd1581SFabiano Rosas     return handle;
15982cbd1581SFabiano Rosas }
15992cbd1581SFabiano Rosas 
kvm_handle_singlestep(void)1600468e3a1aSFabiano Rosas static int kvm_handle_singlestep(void)
1601468e3a1aSFabiano Rosas {
16026e0552a3SFabiano Rosas     return DEBUG_RETURN_GDB;
1603468e3a1aSFabiano Rosas }
1604468e3a1aSFabiano Rosas 
kvm_handle_sw_breakpoint(void)1605468e3a1aSFabiano Rosas static int kvm_handle_sw_breakpoint(void)
1606468e3a1aSFabiano Rosas {
16076e0552a3SFabiano Rosas     return DEBUG_RETURN_GDB;
1608468e3a1aSFabiano Rosas }
1609468e3a1aSFabiano Rosas 
kvm_handle_debug(PowerPCCPU * cpu,struct kvm_run * run)16102cbd1581SFabiano Rosas static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
16112cbd1581SFabiano Rosas {
16122cbd1581SFabiano Rosas     CPUState *cs = CPU(cpu);
16132cbd1581SFabiano Rosas     CPUPPCState *env = &cpu->env;
16142cbd1581SFabiano Rosas     struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
16152cbd1581SFabiano Rosas 
16162cbd1581SFabiano Rosas     if (cs->singlestep_enabled) {
1617468e3a1aSFabiano Rosas         return kvm_handle_singlestep();
1618468e3a1aSFabiano Rosas     }
1619468e3a1aSFabiano Rosas 
1620468e3a1aSFabiano Rosas     if (arch_info->status) {
1621468e3a1aSFabiano Rosas         return kvm_handle_hw_breakpoint(cs, arch_info);
1622468e3a1aSFabiano Rosas     }
1623468e3a1aSFabiano Rosas 
1624468e3a1aSFabiano Rosas     if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1625468e3a1aSFabiano Rosas         return kvm_handle_sw_breakpoint();
1626468e3a1aSFabiano Rosas     }
1627468e3a1aSFabiano Rosas 
1628468e3a1aSFabiano Rosas     /*
1629468e3a1aSFabiano Rosas      * QEMU is not able to handle debug exception, so inject
16308a0548f9SBharat Bhushan      * program exception to guest;
16318a0548f9SBharat Bhushan      * Yes program exception NOT debug exception !!
163288365d17SBharat Bhushan      * When QEMU is using debug resources then debug exception must
163388365d17SBharat Bhushan      * be always set. To achieve this we set MSR_DE and also set
163488365d17SBharat Bhushan      * MSRP_DEP so guest cannot change MSR_DE.
163588365d17SBharat Bhushan      * When emulating debug resource for guest we want guest
163688365d17SBharat Bhushan      * to control MSR_DE (enable/disable debug interrupt on need).
163788365d17SBharat Bhushan      * Supporting both configurations are NOT possible.
163888365d17SBharat Bhushan      * So the result is that we cannot share debug resources
163988365d17SBharat Bhushan      * between QEMU and Guest on BOOKE architecture.
164088365d17SBharat Bhushan      * In the current design QEMU gets the priority over guest,
164188365d17SBharat Bhushan      * this means that if QEMU is using debug resources then guest
164288365d17SBharat Bhushan      * cannot use them;
16438a0548f9SBharat Bhushan      * For software breakpoint QEMU uses a privileged instruction;
16448a0548f9SBharat Bhushan      * So there cannot be any reason that we are here for guest
16458a0548f9SBharat Bhushan      * set debug exception, only possibility is guest executed a
16468a0548f9SBharat Bhushan      * privileged / illegal instruction and that's why we are
16478a0548f9SBharat Bhushan      * injecting a program interrupt.
16488a0548f9SBharat Bhushan      */
16498a0548f9SBharat Bhushan     cpu_synchronize_state(cs);
1650468e3a1aSFabiano Rosas     /*
1651468e3a1aSFabiano Rosas      * env->nip is PC, so increment this by 4 to use
16528a0548f9SBharat Bhushan      * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
16538a0548f9SBharat Bhushan      */
16548a0548f9SBharat Bhushan     env->nip += 4;
16558a0548f9SBharat Bhushan     cs->exception_index = POWERPC_EXCP_PROGRAM;
16568a0548f9SBharat Bhushan     env->error_code = POWERPC_EXCP_INVAL;
16578a0548f9SBharat Bhushan     ppc_cpu_do_interrupt(cs);
16588a0548f9SBharat Bhushan 
16596e0552a3SFabiano Rosas     return DEBUG_RETURN_GUEST;
16608a0548f9SBharat Bhushan }
16618a0548f9SBharat Bhushan 
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)166220d695a9SAndreas Färber int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1663d76d1650Saurel32 {
166420d695a9SAndreas Färber     PowerPCCPU *cpu = POWERPC_CPU(cs);
166520d695a9SAndreas Färber     CPUPPCState *env = &cpu->env;
1666bb4ea393SJan Kiszka     int ret;
1667d76d1650Saurel32 
1668195801d7SStefan Hajnoczi     bql_lock();
16694b8523eeSJan Kiszka 
1670d76d1650Saurel32     switch (run->exit_reason) {
1671d76d1650Saurel32     case KVM_EXIT_DCR:
1672d76d1650Saurel32         if (run->dcr.is_write) {
16738d83cbf1SGreg Kurz             trace_kvm_handle_dcr_write();
1674d76d1650Saurel32             ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1675d76d1650Saurel32         } else {
1676228152c2SBoxuan Li             trace_kvm_handle_dcr_read();
1677d76d1650Saurel32             ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1678d76d1650Saurel32         }
1679d76d1650Saurel32         break;
1680d76d1650Saurel32     case KVM_EXIT_HLT:
16818d83cbf1SGreg Kurz         trace_kvm_handle_halt();
1682259186a7SAndreas Färber         ret = kvmppc_handle_halt(cpu);
1683d76d1650Saurel32         break;
1684566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
1685f61b4bedSAlexander Graf     case KVM_EXIT_PAPR_HCALL:
1686f290a238SFabiano Rosas         trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
168720d695a9SAndreas Färber         run->papr_hcall.ret = spapr_hypercall(cpu,
1688aa100fa4SAndreas Färber                                               run->papr_hcall.nr,
1689f61b4bedSAlexander Graf                                               run->papr_hcall.args);
169078e8fde2SDavid Gibson         ret = 0;
1691f61b4bedSAlexander Graf         break;
1692f61b4bedSAlexander Graf #endif
16935b95b8b9SAlexander Graf     case KVM_EXIT_EPR:
16948d83cbf1SGreg Kurz         trace_kvm_handle_epr();
1695933b19eaSAlexander Graf         run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
16965b95b8b9SAlexander Graf         ret = 0;
16975b95b8b9SAlexander Graf         break;
169831f2cb8fSBharat Bhushan     case KVM_EXIT_WATCHDOG:
16998d83cbf1SGreg Kurz         trace_kvm_handle_watchdog_expiry();
170031f2cb8fSBharat Bhushan         watchdog_perform_action();
170131f2cb8fSBharat Bhushan         ret = 0;
170231f2cb8fSBharat Bhushan         break;
170331f2cb8fSBharat Bhushan 
17048a0548f9SBharat Bhushan     case KVM_EXIT_DEBUG:
17058d83cbf1SGreg Kurz         trace_kvm_handle_debug_exception();
17068a0548f9SBharat Bhushan         if (kvm_handle_debug(cpu, run)) {
17078a0548f9SBharat Bhushan             ret = EXCP_DEBUG;
17088a0548f9SBharat Bhushan             break;
17098a0548f9SBharat Bhushan         }
17108a0548f9SBharat Bhushan         /* re-enter, this exception was guest-internal */
17118a0548f9SBharat Bhushan         ret = 0;
17128a0548f9SBharat Bhushan         break;
17138a0548f9SBharat Bhushan 
1714566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
17159ac703acSAravinda Prasad     case KVM_EXIT_NMI:
17169ac703acSAravinda Prasad         trace_kvm_handle_nmi_exception();
17179ac703acSAravinda Prasad         ret = kvm_handle_nmi(cpu, run);
17189ac703acSAravinda Prasad         break;
17199ac703acSAravinda Prasad #endif
17209ac703acSAravinda Prasad 
172173aaec4aSJan Kiszka     default:
172273aaec4aSJan Kiszka         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
172373aaec4aSJan Kiszka         ret = -1;
172473aaec4aSJan Kiszka         break;
1725d76d1650Saurel32     }
1726d76d1650Saurel32 
1727195801d7SStefan Hajnoczi     bql_unlock();
1728d76d1650Saurel32     return ret;
1729d76d1650Saurel32 }
1730d76d1650Saurel32 
kvmppc_or_tsr_bits(PowerPCCPU * cpu,uint32_t tsr_bits)173131f2cb8fSBharat Bhushan int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
173231f2cb8fSBharat Bhushan {
173331f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
173431f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
173531f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
173631f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_OR_TSR,
173731f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
173831f2cb8fSBharat Bhushan     };
173931f2cb8fSBharat Bhushan 
1740c4550e6eSCédric Le Goater     if (!kvm_enabled()) {
1741c4550e6eSCédric Le Goater         return 0;
1742c4550e6eSCédric Le Goater     }
1743c4550e6eSCédric Le Goater 
174431f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
174531f2cb8fSBharat Bhushan }
174631f2cb8fSBharat Bhushan 
kvmppc_clear_tsr_bits(PowerPCCPU * cpu,uint32_t tsr_bits)174731f2cb8fSBharat Bhushan int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
174831f2cb8fSBharat Bhushan {
174931f2cb8fSBharat Bhushan 
175031f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
175131f2cb8fSBharat Bhushan     uint32_t bits = tsr_bits;
175231f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
175331f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_CLEAR_TSR,
175431f2cb8fSBharat Bhushan         .addr = (uintptr_t) &bits,
175531f2cb8fSBharat Bhushan     };
175631f2cb8fSBharat Bhushan 
1757c4550e6eSCédric Le Goater     if (!kvm_enabled()) {
1758c4550e6eSCédric Le Goater         return 0;
1759c4550e6eSCédric Le Goater     }
1760c4550e6eSCédric Le Goater 
176131f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
176231f2cb8fSBharat Bhushan }
176331f2cb8fSBharat Bhushan 
kvmppc_set_tcr(PowerPCCPU * cpu)176431f2cb8fSBharat Bhushan int kvmppc_set_tcr(PowerPCCPU *cpu)
176531f2cb8fSBharat Bhushan {
176631f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
176731f2cb8fSBharat Bhushan     CPUPPCState *env = &cpu->env;
176831f2cb8fSBharat Bhushan     uint32_t tcr = env->spr[SPR_BOOKE_TCR];
176931f2cb8fSBharat Bhushan 
177031f2cb8fSBharat Bhushan     struct kvm_one_reg reg = {
177131f2cb8fSBharat Bhushan         .id = KVM_REG_PPC_TCR,
177231f2cb8fSBharat Bhushan         .addr = (uintptr_t) &tcr,
177331f2cb8fSBharat Bhushan     };
177431f2cb8fSBharat Bhushan 
1775c4550e6eSCédric Le Goater     if (!kvm_enabled()) {
1776c4550e6eSCédric Le Goater         return 0;
1777c4550e6eSCédric Le Goater     }
1778c4550e6eSCédric Le Goater 
177931f2cb8fSBharat Bhushan     return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
178031f2cb8fSBharat Bhushan }
178131f2cb8fSBharat Bhushan 
kvmppc_booke_watchdog_enable(PowerPCCPU * cpu)178231f2cb8fSBharat Bhushan int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
178331f2cb8fSBharat Bhushan {
178431f2cb8fSBharat Bhushan     CPUState *cs = CPU(cpu);
178531f2cb8fSBharat Bhushan     int ret;
178631f2cb8fSBharat Bhushan 
178731f2cb8fSBharat Bhushan     if (!kvm_enabled()) {
178831f2cb8fSBharat Bhushan         return -1;
178931f2cb8fSBharat Bhushan     }
179031f2cb8fSBharat Bhushan 
179131f2cb8fSBharat Bhushan     if (!cap_ppc_watchdog) {
179231f2cb8fSBharat Bhushan         printf("warning: KVM does not support watchdog");
179331f2cb8fSBharat Bhushan         return -1;
179431f2cb8fSBharat Bhushan     }
179531f2cb8fSBharat Bhushan 
179648add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
179731f2cb8fSBharat Bhushan     if (ret < 0) {
179831f2cb8fSBharat Bhushan         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
179931f2cb8fSBharat Bhushan                 __func__, strerror(-ret));
180031f2cb8fSBharat Bhushan         return ret;
180131f2cb8fSBharat Bhushan     }
180231f2cb8fSBharat Bhushan 
180331f2cb8fSBharat Bhushan     return ret;
180431f2cb8fSBharat Bhushan }
180531f2cb8fSBharat Bhushan 
read_cpuinfo(const char * field,char * value,int len)1806dc333cd6SAlexander Graf static int read_cpuinfo(const char *field, char *value, int len)
1807dc333cd6SAlexander Graf {
1808dc333cd6SAlexander Graf     FILE *f;
1809dc333cd6SAlexander Graf     int ret = -1;
1810dc333cd6SAlexander Graf     int field_len = strlen(field);
1811dc333cd6SAlexander Graf     char line[512];
1812dc333cd6SAlexander Graf 
1813dc333cd6SAlexander Graf     f = fopen("/proc/cpuinfo", "r");
1814dc333cd6SAlexander Graf     if (!f) {
1815dc333cd6SAlexander Graf         return -1;
1816dc333cd6SAlexander Graf     }
1817dc333cd6SAlexander Graf 
1818dc333cd6SAlexander Graf     do {
1819dc333cd6SAlexander Graf         if (!fgets(line, sizeof(line), f)) {
1820dc333cd6SAlexander Graf             break;
1821dc333cd6SAlexander Graf         }
1822dc333cd6SAlexander Graf         if (!strncmp(line, field, field_len)) {
1823ae215068SJim Meyering             pstrcpy(value, len, line);
1824dc333cd6SAlexander Graf             ret = 0;
1825dc333cd6SAlexander Graf             break;
1826dc333cd6SAlexander Graf         }
1827dc333cd6SAlexander Graf     } while (*line);
1828dc333cd6SAlexander Graf 
1829dc333cd6SAlexander Graf     fclose(f);
1830dc333cd6SAlexander Graf 
1831dc333cd6SAlexander Graf     return ret;
1832dc333cd6SAlexander Graf }
1833dc333cd6SAlexander Graf 
kvmppc_get_tbfreq_procfs(void)18349cbcfb59SGreg Kurz static uint32_t kvmppc_get_tbfreq_procfs(void)
1835dc333cd6SAlexander Graf {
1836dc333cd6SAlexander Graf     char line[512];
1837dc333cd6SAlexander Graf     char *ns;
18389cbcfb59SGreg Kurz     uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
18399cbcfb59SGreg Kurz     uint32_t tbfreq_procfs;
1840dc333cd6SAlexander Graf 
1841dc333cd6SAlexander Graf     if (read_cpuinfo("timebase", line, sizeof(line))) {
18429cbcfb59SGreg Kurz         return tbfreq_fallback;
1843dc333cd6SAlexander Graf     }
1844dc333cd6SAlexander Graf 
1845c995e942SDavid Gibson     ns = strchr(line, ':');
1846c995e942SDavid Gibson     if (!ns) {
18479cbcfb59SGreg Kurz         return tbfreq_fallback;
1848dc333cd6SAlexander Graf     }
1849dc333cd6SAlexander Graf 
18509cbcfb59SGreg Kurz     tbfreq_procfs = atoi(++ns);
1851dc333cd6SAlexander Graf 
18529cbcfb59SGreg Kurz     /* 0 is certainly not acceptable by the guest, return fallback value */
18539cbcfb59SGreg Kurz     return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
18549cbcfb59SGreg Kurz }
18559cbcfb59SGreg Kurz 
kvmppc_get_tbfreq(void)18569cbcfb59SGreg Kurz uint32_t kvmppc_get_tbfreq(void)
18579cbcfb59SGreg Kurz {
18589cbcfb59SGreg Kurz     static uint32_t cached_tbfreq;
18599cbcfb59SGreg Kurz 
18609cbcfb59SGreg Kurz     if (!cached_tbfreq) {
18619cbcfb59SGreg Kurz         cached_tbfreq = kvmppc_get_tbfreq_procfs();
18629cbcfb59SGreg Kurz     }
18639cbcfb59SGreg Kurz 
18649cbcfb59SGreg Kurz     return cached_tbfreq;
1865ef951443SNikunj A Dadhania }
1866ef951443SNikunj A Dadhania 
kvmppc_get_host_serial(char ** value)1867ef951443SNikunj A Dadhania bool kvmppc_get_host_serial(char **value)
1868ef951443SNikunj A Dadhania {
1869ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1870ef951443SNikunj A Dadhania                                NULL);
1871ef951443SNikunj A Dadhania }
1872ef951443SNikunj A Dadhania 
kvmppc_get_host_model(char ** value)1873ef951443SNikunj A Dadhania bool kvmppc_get_host_model(char **value)
1874ef951443SNikunj A Dadhania {
1875ef951443SNikunj A Dadhania     return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1876dc333cd6SAlexander Graf }
18774513d923SGleb Natapov 
1878eadaada1SAlexander Graf /* Try to find a device tree node for a CPU with clock-frequency property */
kvmppc_find_cpu_dt(char * buf,int buf_len)1879eadaada1SAlexander Graf static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1880eadaada1SAlexander Graf {
1881eadaada1SAlexander Graf     struct dirent *dirp;
1882eadaada1SAlexander Graf     DIR *dp;
1883eadaada1SAlexander Graf 
1884c995e942SDavid Gibson     dp = opendir(PROC_DEVTREE_CPU);
1885c995e942SDavid Gibson     if (!dp) {
1886eadaada1SAlexander Graf         printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1887eadaada1SAlexander Graf         return -1;
1888eadaada1SAlexander Graf     }
1889eadaada1SAlexander Graf 
1890eadaada1SAlexander Graf     buf[0] = '\0';
1891eadaada1SAlexander Graf     while ((dirp = readdir(dp)) != NULL) {
1892eadaada1SAlexander Graf         FILE *f;
18931a42c692SMurilo Opsfelder Araujo 
18941a42c692SMurilo Opsfelder Araujo         /* Don't accidentally read from the current and parent directories */
18951a42c692SMurilo Opsfelder Araujo         if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
18961a42c692SMurilo Opsfelder Araujo             continue;
18971a42c692SMurilo Opsfelder Araujo         }
18981a42c692SMurilo Opsfelder Araujo 
1899eadaada1SAlexander Graf         snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1900eadaada1SAlexander Graf                  dirp->d_name);
1901eadaada1SAlexander Graf         f = fopen(buf, "r");
1902eadaada1SAlexander Graf         if (f) {
1903eadaada1SAlexander Graf             snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1904eadaada1SAlexander Graf             fclose(f);
1905eadaada1SAlexander Graf             break;
1906eadaada1SAlexander Graf         }
1907eadaada1SAlexander Graf         buf[0] = '\0';
1908eadaada1SAlexander Graf     }
1909eadaada1SAlexander Graf     closedir(dp);
1910eadaada1SAlexander Graf     if (buf[0] == '\0') {
1911eadaada1SAlexander Graf         printf("Unknown host!\n");
1912eadaada1SAlexander Graf         return -1;
1913eadaada1SAlexander Graf     }
1914eadaada1SAlexander Graf 
1915eadaada1SAlexander Graf     return 0;
1916eadaada1SAlexander Graf }
1917eadaada1SAlexander Graf 
kvmppc_read_int_dt(const char * filename)19187d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_dt(const char *filename)
1919eadaada1SAlexander Graf {
19209bc884b7SDavid Gibson     union {
19219bc884b7SDavid Gibson         uint32_t v32;
19229bc884b7SDavid Gibson         uint64_t v64;
19239bc884b7SDavid Gibson     } u;
1924eadaada1SAlexander Graf     FILE *f;
1925eadaada1SAlexander Graf     int len;
1926eadaada1SAlexander Graf 
19277d94a30bSSukadev Bhattiprolu     f = fopen(filename, "rb");
1928eadaada1SAlexander Graf     if (!f) {
1929eadaada1SAlexander Graf         return -1;
1930eadaada1SAlexander Graf     }
1931eadaada1SAlexander Graf 
19329bc884b7SDavid Gibson     len = fread(&u, 1, sizeof(u), f);
1933eadaada1SAlexander Graf     fclose(f);
1934eadaada1SAlexander Graf     switch (len) {
19359bc884b7SDavid Gibson     case 4:
19369bc884b7SDavid Gibson         /* property is a 32-bit quantity */
19379bc884b7SDavid Gibson         return be32_to_cpu(u.v32);
19389bc884b7SDavid Gibson     case 8:
19399bc884b7SDavid Gibson         return be64_to_cpu(u.v64);
1940eadaada1SAlexander Graf     }
1941eadaada1SAlexander Graf 
1942eadaada1SAlexander Graf     return 0;
1943eadaada1SAlexander Graf }
1944eadaada1SAlexander Graf 
1945c995e942SDavid Gibson /*
1946c995e942SDavid Gibson  * Read a CPU node property from the host device tree that's a single
19477d94a30bSSukadev Bhattiprolu  * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
1948c995e942SDavid Gibson  * (can't find or open the property, or doesn't understand the format)
1949c995e942SDavid Gibson  */
kvmppc_read_int_cpu_dt(const char * propname)19507d94a30bSSukadev Bhattiprolu static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
19517d94a30bSSukadev Bhattiprolu {
19527d94a30bSSukadev Bhattiprolu     char buf[PATH_MAX], *tmp;
19537d94a30bSSukadev Bhattiprolu     uint64_t val;
19547d94a30bSSukadev Bhattiprolu 
19557d94a30bSSukadev Bhattiprolu     if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
19567d94a30bSSukadev Bhattiprolu         return -1;
19577d94a30bSSukadev Bhattiprolu     }
19587d94a30bSSukadev Bhattiprolu 
19597d94a30bSSukadev Bhattiprolu     tmp = g_strdup_printf("%s/%s", buf, propname);
19607d94a30bSSukadev Bhattiprolu     val = kvmppc_read_int_dt(tmp);
19617d94a30bSSukadev Bhattiprolu     g_free(tmp);
19627d94a30bSSukadev Bhattiprolu 
19637d94a30bSSukadev Bhattiprolu     return val;
19647d94a30bSSukadev Bhattiprolu }
19657d94a30bSSukadev Bhattiprolu 
kvmppc_get_clockfreq(void)19669bc884b7SDavid Gibson uint64_t kvmppc_get_clockfreq(void)
19679bc884b7SDavid Gibson {
19689bc884b7SDavid Gibson     return kvmppc_read_int_cpu_dt("clock-frequency");
19699bc884b7SDavid Gibson }
19709bc884b7SDavid Gibson 
kvmppc_get_dec_bits(void)19717d050527SSuraj Jitindar Singh static int kvmppc_get_dec_bits(void)
19727d050527SSuraj Jitindar Singh {
19737d050527SSuraj Jitindar Singh     int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
19747d050527SSuraj Jitindar Singh 
19757d050527SSuraj Jitindar Singh     if (nr_bits > 0) {
19767d050527SSuraj Jitindar Singh         return nr_bits;
19777d050527SSuraj Jitindar Singh     }
19787d050527SSuraj Jitindar Singh     return 0;
19797d050527SSuraj Jitindar Singh }
19807d050527SSuraj Jitindar Singh 
kvmppc_get_pvinfo(CPUPPCState * env,struct kvm_ppc_pvinfo * pvinfo)19811a61a9aeSStuart Yoder static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
198245024f09SAlexander Graf {
1983db70b311SRichard Henderson     CPUState *cs = env_cpu(env);
198445024f09SAlexander Graf 
19856fd33a75SAlexander Graf     if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
19861a61a9aeSStuart Yoder         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
19871a61a9aeSStuart Yoder         return 0;
19881a61a9aeSStuart Yoder     }
198945024f09SAlexander Graf 
19901a61a9aeSStuart Yoder     return 1;
19911a61a9aeSStuart Yoder }
19921a61a9aeSStuart Yoder 
kvmppc_get_hasidle(CPUPPCState * env)19931a61a9aeSStuart Yoder int kvmppc_get_hasidle(CPUPPCState *env)
19941a61a9aeSStuart Yoder {
19951a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
19961a61a9aeSStuart Yoder 
19971a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo) &&
19981a61a9aeSStuart Yoder         (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
19991a61a9aeSStuart Yoder         return 1;
20001a61a9aeSStuart Yoder     }
20011a61a9aeSStuart Yoder 
20021a61a9aeSStuart Yoder     return 0;
20031a61a9aeSStuart Yoder }
20041a61a9aeSStuart Yoder 
kvmppc_get_hypercall(CPUPPCState * env,uint8_t * buf,int buf_len)20051a61a9aeSStuart Yoder int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
20061a61a9aeSStuart Yoder {
20071a61a9aeSStuart Yoder     uint32_t *hc = (uint32_t *)buf;
20081a61a9aeSStuart Yoder     struct kvm_ppc_pvinfo pvinfo;
20091a61a9aeSStuart Yoder 
20101a61a9aeSStuart Yoder     if (!kvmppc_get_pvinfo(env, &pvinfo)) {
20111a61a9aeSStuart Yoder         memcpy(buf, pvinfo.hcall, buf_len);
201245024f09SAlexander Graf         return 0;
201345024f09SAlexander Graf     }
201445024f09SAlexander Graf 
201545024f09SAlexander Graf     /*
2016d13fc32eSAlexander Graf      * Fallback to always fail hypercalls regardless of endianness:
201745024f09SAlexander Graf      *
2018d13fc32eSAlexander Graf      *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
201945024f09SAlexander Graf      *     li r3, -1
2020d13fc32eSAlexander Graf      *     b .+8       (becomes nop in wrong endian)
2021d13fc32eSAlexander Graf      *     bswap32(li r3, -1)
202245024f09SAlexander Graf      */
202345024f09SAlexander Graf 
2024d13fc32eSAlexander Graf     hc[0] = cpu_to_be32(0x08000048);
2025d13fc32eSAlexander Graf     hc[1] = cpu_to_be32(0x3860ffff);
2026d13fc32eSAlexander Graf     hc[2] = cpu_to_be32(0x48000008);
2027d13fc32eSAlexander Graf     hc[3] = cpu_to_be32(bswap32(0x3860ffff));
202845024f09SAlexander Graf 
20290ddbd053SAlexey Kardashevskiy     return 1;
203045024f09SAlexander Graf }
203145024f09SAlexander Graf 
kvmppc_enable_hcall(KVMState * s,target_ulong hcall)2032026bfd89SDavid Gibson static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2033026bfd89SDavid Gibson {
2034026bfd89SDavid Gibson     return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2035026bfd89SDavid Gibson }
2036026bfd89SDavid Gibson 
kvmppc_enable_logical_ci_hcalls(void)2037026bfd89SDavid Gibson void kvmppc_enable_logical_ci_hcalls(void)
2038026bfd89SDavid Gibson {
2039026bfd89SDavid Gibson     /*
2040026bfd89SDavid Gibson      * FIXME: it would be nice if we could detect the cases where
2041026bfd89SDavid Gibson      * we're using a device which requires the in kernel
2042026bfd89SDavid Gibson      * implementation of these hcalls, but the kernel lacks them and
2043026bfd89SDavid Gibson      * produce a warning.
2044026bfd89SDavid Gibson      */
2045026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2046026bfd89SDavid Gibson     kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2047026bfd89SDavid Gibson }
2048026bfd89SDavid Gibson 
kvmppc_enable_set_mode_hcall(void)2049ef9971ddSAlexey Kardashevskiy void kvmppc_enable_set_mode_hcall(void)
2050ef9971ddSAlexey Kardashevskiy {
2051ef9971ddSAlexey Kardashevskiy     kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2052ef9971ddSAlexey Kardashevskiy }
2053ef9971ddSAlexey Kardashevskiy 
kvmppc_enable_clear_ref_mod_hcalls(void)20545145ad4fSNathan Whitehorn void kvmppc_enable_clear_ref_mod_hcalls(void)
20555145ad4fSNathan Whitehorn {
20565145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
20575145ad4fSNathan Whitehorn     kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
20585145ad4fSNathan Whitehorn }
20595145ad4fSNathan Whitehorn 
kvmppc_enable_h_page_init(void)206068f9f708SSuraj Jitindar Singh void kvmppc_enable_h_page_init(void)
206168f9f708SSuraj Jitindar Singh {
206268f9f708SSuraj Jitindar Singh     kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
206368f9f708SSuraj Jitindar Singh }
206468f9f708SSuraj Jitindar Singh 
kvmppc_enable_h_rpt_invalidate(void)206582123b75SBharata B Rao void kvmppc_enable_h_rpt_invalidate(void)
206682123b75SBharata B Rao {
206782123b75SBharata B Rao     kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
206882123b75SBharata B Rao }
206982123b75SBharata B Rao 
2070566abdb4SPaolo Bonzini #ifdef CONFIG_PSERIES
kvmppc_set_papr(PowerPCCPU * cpu)20711bc22652SAndreas Färber void kvmppc_set_papr(PowerPCCPU *cpu)
2072f61b4bedSAlexander Graf {
20731bc22652SAndreas Färber     CPUState *cs = CPU(cpu);
2074f61b4bedSAlexander Graf     int ret;
2075f61b4bedSAlexander Graf 
2076da20aed1SDavid Gibson     if (!kvm_enabled()) {
2077da20aed1SDavid Gibson         return;
2078da20aed1SDavid Gibson     }
2079da20aed1SDavid Gibson 
208048add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2081f61b4bedSAlexander Graf     if (ret) {
2082072ed5f2SThomas Huth         error_report("This vCPU type or KVM version does not support PAPR");
2083072ed5f2SThomas Huth         exit(1);
2084f61b4bedSAlexander Graf     }
20859b00ea49SDavid Gibson 
2086c995e942SDavid Gibson     /*
2087c995e942SDavid Gibson      * Update the capability flag so we sync the right information
2088c995e942SDavid Gibson      * with kvm
2089c995e942SDavid Gibson      */
20909b00ea49SDavid Gibson     cap_papr = 1;
2091f1af19d7SDavid Gibson }
2092566abdb4SPaolo Bonzini #endif
2093f61b4bedSAlexander Graf 
kvmppc_set_compat(PowerPCCPU * cpu,uint32_t compat_pvr)2094d6e166c0SDavid Gibson int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
20956db5bb0fSAlexey Kardashevskiy {
2096d6e166c0SDavid Gibson     return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
20976db5bb0fSAlexey Kardashevskiy }
20986db5bb0fSAlexey Kardashevskiy 
kvmppc_set_mpic_proxy(PowerPCCPU * cpu,int mpic_proxy)20995b95b8b9SAlexander Graf void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
21005b95b8b9SAlexander Graf {
21015b95b8b9SAlexander Graf     CPUState *cs = CPU(cpu);
21025b95b8b9SAlexander Graf     int ret;
21035b95b8b9SAlexander Graf 
210448add816SCornelia Huck     ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
21055b95b8b9SAlexander Graf     if (ret && mpic_proxy) {
2106072ed5f2SThomas Huth         error_report("This KVM version does not support EPR");
2107072ed5f2SThomas Huth         exit(1);
21085b95b8b9SAlexander Graf     }
21095b95b8b9SAlexander Graf }
21105b95b8b9SAlexander Graf 
kvmppc_get_fwnmi(void)2111ec010c00SNicholas Piggin bool kvmppc_get_fwnmi(void)
2112ec010c00SNicholas Piggin {
2113ec010c00SNicholas Piggin     return cap_fwnmi;
2114ec010c00SNicholas Piggin }
2115ec010c00SNicholas Piggin 
kvmppc_set_fwnmi(PowerPCCPU * cpu)2116aef92d87SLaurent Vivier int kvmppc_set_fwnmi(PowerPCCPU *cpu)
21179d953ce4SAravinda Prasad {
21189d953ce4SAravinda Prasad     CPUState *cs = CPU(cpu);
21199d953ce4SAravinda Prasad 
21209d953ce4SAravinda Prasad     return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
21219d953ce4SAravinda Prasad }
21229d953ce4SAravinda Prasad 
kvmppc_has_cap_dawr1(void)21235f361ea1SShivaprasad G Bhat bool kvmppc_has_cap_dawr1(void)
21245f361ea1SShivaprasad G Bhat {
21255f361ea1SShivaprasad G Bhat     return !!cap_dawr1;
21265f361ea1SShivaprasad G Bhat }
21275f361ea1SShivaprasad G Bhat 
kvmppc_set_cap_dawr1(int enable)21285f361ea1SShivaprasad G Bhat int kvmppc_set_cap_dawr1(int enable)
21295f361ea1SShivaprasad G Bhat {
21305f361ea1SShivaprasad G Bhat     return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_DAWR1, 0, enable);
21315f361ea1SShivaprasad G Bhat }
21325f361ea1SShivaprasad G Bhat 
kvmppc_smt_threads(void)2133e97c3636SDavid Gibson int kvmppc_smt_threads(void)
2134e97c3636SDavid Gibson {
2135e97c3636SDavid Gibson     return cap_ppc_smt ? cap_ppc_smt : 1;
2136e97c3636SDavid Gibson }
2137e97c3636SDavid Gibson 
kvmppc_set_smt_threads(int smt)2138fa98fbfcSSam Bobroff int kvmppc_set_smt_threads(int smt)
2139fa98fbfcSSam Bobroff {
2140fa98fbfcSSam Bobroff     int ret;
2141fa98fbfcSSam Bobroff 
2142fa98fbfcSSam Bobroff     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2143fa98fbfcSSam Bobroff     if (!ret) {
2144fa98fbfcSSam Bobroff         cap_ppc_smt = smt;
2145fa98fbfcSSam Bobroff     }
2146fa98fbfcSSam Bobroff     return ret;
2147fa98fbfcSSam Bobroff }
2148fa98fbfcSSam Bobroff 
kvmppc_error_append_smt_possible_hint(Error * const * errp)21490c115681SVladimir Sementsov-Ogievskiy void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2150fa98fbfcSSam Bobroff {
2151fa98fbfcSSam Bobroff     int i;
2152fa98fbfcSSam Bobroff     GString *g;
2153fa98fbfcSSam Bobroff     char *s;
2154fa98fbfcSSam Bobroff 
2155fa98fbfcSSam Bobroff     assert(kvm_enabled());
2156fa98fbfcSSam Bobroff     if (cap_ppc_smt_possible) {
2157fa98fbfcSSam Bobroff         g = g_string_new("Available VSMT modes:");
2158fa98fbfcSSam Bobroff         for (i = 63; i >= 0; i--) {
2159fa98fbfcSSam Bobroff             if ((1UL << i) & cap_ppc_smt_possible) {
2160fa98fbfcSSam Bobroff                 g_string_append_printf(g, " %lu", (1UL << i));
2161fa98fbfcSSam Bobroff             }
2162fa98fbfcSSam Bobroff         }
2163fa98fbfcSSam Bobroff         s = g_string_free(g, false);
21641a639fdfSMarkus Armbruster         error_append_hint(errp, "%s.\n", s);
2165fa98fbfcSSam Bobroff         g_free(s);
2166fa98fbfcSSam Bobroff     } else {
21671a639fdfSMarkus Armbruster         error_append_hint(errp,
2168fa98fbfcSSam Bobroff                           "This KVM seems to be too old to support VSMT.\n");
2169fa98fbfcSSam Bobroff     }
2170fa98fbfcSSam Bobroff }
2171fa98fbfcSSam Bobroff 
2172fa98fbfcSSam Bobroff 
21737f763a5dSDavid Gibson #ifdef TARGET_PPC64
kvmppc_vrma_limit(unsigned int hash_shift)21746a84737cSDavid Gibson uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
21757f763a5dSDavid Gibson {
2176f36951c1SDavid Gibson     struct kvm_ppc_smmu_info info;
2177f36951c1SDavid Gibson     long rampagesize, best_page_shift;
2178f36951c1SDavid Gibson     int i;
2179f36951c1SDavid Gibson 
2180c995e942SDavid Gibson     /*
2181c995e942SDavid Gibson      * Find the largest hardware supported page size that's less than
2182c995e942SDavid Gibson      * or equal to the (logical) backing page size of guest RAM
2183c995e942SDavid Gibson      */
2184ab256960SGreg Kurz     kvm_get_smmu_info(&info, &error_fatal);
2185905b7ee4SDavid Hildenbrand     rampagesize = qemu_minrampagesize();
2186f36951c1SDavid Gibson     best_page_shift = 0;
2187f36951c1SDavid Gibson 
2188f36951c1SDavid Gibson     for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2189f36951c1SDavid Gibson         struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2190f36951c1SDavid Gibson 
2191f36951c1SDavid Gibson         if (!sps->page_shift) {
2192f36951c1SDavid Gibson             continue;
2193f36951c1SDavid Gibson         }
2194f36951c1SDavid Gibson 
2195f36951c1SDavid Gibson         if ((sps->page_shift > best_page_shift)
2196f36951c1SDavid Gibson             && ((1UL << sps->page_shift) <= rampagesize)) {
2197f36951c1SDavid Gibson             best_page_shift = sps->page_shift;
2198f36951c1SDavid Gibson         }
2199f36951c1SDavid Gibson     }
2200f36951c1SDavid Gibson 
22016a84737cSDavid Gibson     return 1ULL << (best_page_shift + hash_shift - 7);
22027f763a5dSDavid Gibson }
22037f763a5dSDavid Gibson #endif
22047f763a5dSDavid Gibson 
kvmppc_spapr_use_multitce(void)2205da95324eSAlexey Kardashevskiy bool kvmppc_spapr_use_multitce(void)
2206da95324eSAlexey Kardashevskiy {
2207da95324eSAlexey Kardashevskiy     return cap_spapr_multitce;
2208da95324eSAlexey Kardashevskiy }
2209da95324eSAlexey Kardashevskiy 
kvmppc_spapr_enable_inkernel_multitce(void)22103dc410aeSAlexey Kardashevskiy int kvmppc_spapr_enable_inkernel_multitce(void)
22113dc410aeSAlexey Kardashevskiy {
22123dc410aeSAlexey Kardashevskiy     int ret;
22133dc410aeSAlexey Kardashevskiy 
22143dc410aeSAlexey Kardashevskiy     ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22153dc410aeSAlexey Kardashevskiy                             H_PUT_TCE_INDIRECT, 1);
22163dc410aeSAlexey Kardashevskiy     if (!ret) {
22173dc410aeSAlexey Kardashevskiy         ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
22183dc410aeSAlexey Kardashevskiy                                 H_STUFF_TCE, 1);
22193dc410aeSAlexey Kardashevskiy     }
22203dc410aeSAlexey Kardashevskiy 
22213dc410aeSAlexey Kardashevskiy     return ret;
22223dc410aeSAlexey Kardashevskiy }
22233dc410aeSAlexey Kardashevskiy 
kvmppc_create_spapr_tce(uint32_t liobn,uint32_t page_shift,uint64_t bus_offset,uint32_t nb_table,int * pfd,bool need_vfio)2224d6ee2a7cSAlexey Kardashevskiy void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2225d6ee2a7cSAlexey Kardashevskiy                               uint64_t bus_offset, uint32_t nb_table,
2226d6ee2a7cSAlexey Kardashevskiy                               int *pfd, bool need_vfio)
22270f5cb298SDavid Gibson {
22280f5cb298SDavid Gibson     long len;
22290f5cb298SDavid Gibson     int fd;
22300f5cb298SDavid Gibson     void *table;
22310f5cb298SDavid Gibson 
2232c995e942SDavid Gibson     /*
2233c995e942SDavid Gibson      * Must set fd to -1 so we don't try to munmap when called for
2234b5aec396SDavid Gibson      * destroying the table, which the upper layers -will- do
2235b5aec396SDavid Gibson      */
2236b5aec396SDavid Gibson     *pfd = -1;
22376a81dd17SDavid Gibson     if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
22380f5cb298SDavid Gibson         return NULL;
22390f5cb298SDavid Gibson     }
22400f5cb298SDavid Gibson 
2241d6ee2a7cSAlexey Kardashevskiy     if (cap_spapr_tce_64) {
2242d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce_64 args = {
2243d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2244d6ee2a7cSAlexey Kardashevskiy             .page_shift = page_shift,
2245d6ee2a7cSAlexey Kardashevskiy             .offset = bus_offset >> page_shift,
2246d6ee2a7cSAlexey Kardashevskiy             .size = nb_table,
2247d6ee2a7cSAlexey Kardashevskiy             .flags = 0
2248d6ee2a7cSAlexey Kardashevskiy         };
2249d6ee2a7cSAlexey Kardashevskiy         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2250d6ee2a7cSAlexey Kardashevskiy         if (fd < 0) {
2251d6ee2a7cSAlexey Kardashevskiy             fprintf(stderr,
2252d6ee2a7cSAlexey Kardashevskiy                     "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2253d6ee2a7cSAlexey Kardashevskiy                     liobn);
2254d6ee2a7cSAlexey Kardashevskiy             return NULL;
2255d6ee2a7cSAlexey Kardashevskiy         }
2256d6ee2a7cSAlexey Kardashevskiy     } else if (cap_spapr_tce) {
2257d6ee2a7cSAlexey Kardashevskiy         uint64_t window_size = (uint64_t) nb_table << page_shift;
2258d6ee2a7cSAlexey Kardashevskiy         struct kvm_create_spapr_tce args = {
2259d6ee2a7cSAlexey Kardashevskiy             .liobn = liobn,
2260d6ee2a7cSAlexey Kardashevskiy             .window_size = window_size,
2261d6ee2a7cSAlexey Kardashevskiy         };
2262d6ee2a7cSAlexey Kardashevskiy         if ((window_size != args.window_size) || bus_offset) {
2263d6ee2a7cSAlexey Kardashevskiy             return NULL;
2264d6ee2a7cSAlexey Kardashevskiy         }
22650f5cb298SDavid Gibson         fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
22660f5cb298SDavid Gibson         if (fd < 0) {
2267b5aec396SDavid Gibson             fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2268b5aec396SDavid Gibson                     liobn);
22690f5cb298SDavid Gibson             return NULL;
22700f5cb298SDavid Gibson         }
2271d6ee2a7cSAlexey Kardashevskiy     } else {
2272d6ee2a7cSAlexey Kardashevskiy         return NULL;
2273d6ee2a7cSAlexey Kardashevskiy     }
22740f5cb298SDavid Gibson 
2275d6ee2a7cSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22760f5cb298SDavid Gibson     /* FIXME: round this up to page size */
22770f5cb298SDavid Gibson 
227874b41e56SDavid Gibson     table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
22790f5cb298SDavid Gibson     if (table == MAP_FAILED) {
2280b5aec396SDavid Gibson         fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2281b5aec396SDavid Gibson                 liobn);
22820f5cb298SDavid Gibson         close(fd);
22830f5cb298SDavid Gibson         return NULL;
22840f5cb298SDavid Gibson     }
22850f5cb298SDavid Gibson 
22860f5cb298SDavid Gibson     *pfd = fd;
22870f5cb298SDavid Gibson     return table;
22880f5cb298SDavid Gibson }
22890f5cb298SDavid Gibson 
kvmppc_remove_spapr_tce(void * table,int fd,uint32_t nb_table)2290523e7b8aSAlexey Kardashevskiy int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
22910f5cb298SDavid Gibson {
22920f5cb298SDavid Gibson     long len;
22930f5cb298SDavid Gibson 
22940f5cb298SDavid Gibson     if (fd < 0) {
22950f5cb298SDavid Gibson         return -1;
22960f5cb298SDavid Gibson     }
22970f5cb298SDavid Gibson 
2298523e7b8aSAlexey Kardashevskiy     len = nb_table * sizeof(uint64_t);
22990f5cb298SDavid Gibson     if ((munmap(table, len) < 0) ||
23000f5cb298SDavid Gibson         (close(fd) < 0)) {
2301b5aec396SDavid Gibson         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2302b5aec396SDavid Gibson                 strerror(errno));
23030f5cb298SDavid Gibson         /* Leak the table */
23040f5cb298SDavid Gibson     }
23050f5cb298SDavid Gibson 
23060f5cb298SDavid Gibson     return 0;
23070f5cb298SDavid Gibson }
23080f5cb298SDavid Gibson 
kvmppc_reset_htab(int shift_hint)23097f763a5dSDavid Gibson int kvmppc_reset_htab(int shift_hint)
23107f763a5dSDavid Gibson {
23117f763a5dSDavid Gibson     uint32_t shift = shift_hint;
23127f763a5dSDavid Gibson 
2313ace9a2cbSDavid Gibson     if (!kvm_enabled()) {
2314ace9a2cbSDavid Gibson         /* Full emulation, tell caller to allocate htab itself */
2315ace9a2cbSDavid Gibson         return 0;
2316ace9a2cbSDavid Gibson     }
23176977afdaSGreg Kurz     if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
23187f763a5dSDavid Gibson         int ret;
23197f763a5dSDavid Gibson         ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2320ace9a2cbSDavid Gibson         if (ret == -ENOTTY) {
2321c995e942SDavid Gibson             /*
2322c995e942SDavid Gibson              * At least some versions of PR KVM advertise the
2323ace9a2cbSDavid Gibson              * capability, but don't implement the ioctl().  Oops.
2324ace9a2cbSDavid Gibson              * Return 0 so that we allocate the htab in qemu, as is
2325c995e942SDavid Gibson              * correct for PR.
2326c995e942SDavid Gibson              */
2327ace9a2cbSDavid Gibson             return 0;
2328ace9a2cbSDavid Gibson         } else if (ret < 0) {
23297f763a5dSDavid Gibson             return ret;
23307f763a5dSDavid Gibson         }
23317f763a5dSDavid Gibson         return shift;
23327f763a5dSDavid Gibson     }
23337f763a5dSDavid Gibson 
2334c995e942SDavid Gibson     /*
2335c995e942SDavid Gibson      * We have a kernel that predates the htab reset calls.  For PR
2336ace9a2cbSDavid Gibson      * KVM, we need to allocate the htab ourselves, for an HV KVM of
2337c995e942SDavid Gibson      * this era, it has allocated a 16MB fixed size hash table
2338c995e942SDavid Gibson      * already.
2339c995e942SDavid Gibson      */
234096c9cff0SThomas Huth     if (kvmppc_is_pr(kvm_state)) {
2341ace9a2cbSDavid Gibson         /* PR - tell caller to allocate htab */
23427f763a5dSDavid Gibson         return 0;
2343ace9a2cbSDavid Gibson     } else {
2344ace9a2cbSDavid Gibson         /* HV - assume 16MB kernel allocated htab */
2345ace9a2cbSDavid Gibson         return 24;
2346ace9a2cbSDavid Gibson     }
23477f763a5dSDavid Gibson }
23487f763a5dSDavid Gibson 
mfpvr(void)2349a1e98583SDavid Gibson static inline uint32_t mfpvr(void)
2350a1e98583SDavid Gibson {
2351a1e98583SDavid Gibson     uint32_t pvr;
2352a1e98583SDavid Gibson 
2353a1e98583SDavid Gibson     asm ("mfpvr %0"
2354a1e98583SDavid Gibson          : "=r"(pvr));
2355a1e98583SDavid Gibson     return pvr;
2356a1e98583SDavid Gibson }
2357a1e98583SDavid Gibson 
alter_insns(uint64_t * word,uint64_t flags,bool on)2358a7342588SDavid Gibson static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2359a7342588SDavid Gibson {
2360a7342588SDavid Gibson     if (on) {
2361a7342588SDavid Gibson         *word |= flags;
2362a7342588SDavid Gibson     } else {
2363a7342588SDavid Gibson         *word &= ~flags;
2364a7342588SDavid Gibson     }
2365a7342588SDavid Gibson }
2366a7342588SDavid Gibson 
kvmppc_cpu_realize(CPUState * cs,Error ** errp)2367cfb52d07SHarsh Prateek Bora static bool kvmppc_cpu_realize(CPUState *cs, Error **errp)
2368cfb52d07SHarsh Prateek Bora {
2369cfb52d07SHarsh Prateek Bora     int ret;
2370cfb52d07SHarsh Prateek Bora     const char *vcpu_str = (cs->parent_obj.hotplugged == true) ?
2371cfb52d07SHarsh Prateek Bora                            "hotplug" : "create";
2372cfb52d07SHarsh Prateek Bora     cs->cpu_index = cpu_get_free_index();
2373cfb52d07SHarsh Prateek Bora 
2374cfb52d07SHarsh Prateek Bora     POWERPC_CPU(cs)->vcpu_id = cs->cpu_index;
2375cfb52d07SHarsh Prateek Bora 
2376cfb52d07SHarsh Prateek Bora     /* create and park to fail gracefully in case vcpu hotplug fails */
2377cfb52d07SHarsh Prateek Bora     ret = kvm_create_and_park_vcpu(cs);
2378cfb52d07SHarsh Prateek Bora     if (ret) {
2379cfb52d07SHarsh Prateek Bora         /*
2380cfb52d07SHarsh Prateek Bora          * This causes QEMU to terminate if initial CPU creation
2381cfb52d07SHarsh Prateek Bora          * fails, and only CPU hotplug failure if the error happens
2382cfb52d07SHarsh Prateek Bora          * there.
2383cfb52d07SHarsh Prateek Bora          */
2384cfb52d07SHarsh Prateek Bora         error_setg(errp, "%s: vcpu %s failed with %d",
2385cfb52d07SHarsh Prateek Bora                          __func__, vcpu_str, ret);
2386cfb52d07SHarsh Prateek Bora         return false;
2387cfb52d07SHarsh Prateek Bora     }
2388cfb52d07SHarsh Prateek Bora     return true;
2389cfb52d07SHarsh Prateek Bora }
2390cfb52d07SHarsh Prateek Bora 
kvmppc_host_cpu_class_init(ObjectClass * oc,const void * data)239112d1a768SPhilippe Mathieu-Daudé static void kvmppc_host_cpu_class_init(ObjectClass *oc, const void *data)
23922985b86bSAndreas Färber {
23932985b86bSAndreas Färber     PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
23940cbad81fSDavid Gibson     uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
23950cbad81fSDavid Gibson     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2396a1e98583SDavid Gibson 
2397cfe34f44SAndreas Färber     /* Now fix up the class with information we can query from the host */
23983bc9ccc0SAlexey Kardashevskiy     pcc->pvr = mfpvr();
2399a7342588SDavid Gibson 
24003f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
24013f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
24023f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_VSX,
24033f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
24043f2ca480SDavid Gibson     alter_insns(&pcc->insns_flags2, PPC2_DFP,
24053f2ca480SDavid Gibson                 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
24060cbad81fSDavid Gibson 
24070cbad81fSDavid Gibson     if (dcache_size != -1) {
24080cbad81fSDavid Gibson         pcc->l1_dcache_size = dcache_size;
24090cbad81fSDavid Gibson     }
24100cbad81fSDavid Gibson 
24110cbad81fSDavid Gibson     if (icache_size != -1) {
24120cbad81fSDavid Gibson         pcc->l1_icache_size = icache_size;
24130cbad81fSDavid Gibson     }
2414c64abd1fSSam Bobroff 
2415c64abd1fSSam Bobroff #if defined(TARGET_PPC64)
2416aa6edf97SPhilippe Mathieu-Daudé     pcc->radix_page_info = kvmppc_get_radix_page_info();
2417c64abd1fSSam Bobroff #endif /* defined(TARGET_PPC64) */
2418a1e98583SDavid Gibson }
2419a1e98583SDavid Gibson 
kvmppc_has_cap_epr(void)24203b961124SStuart Yoder bool kvmppc_has_cap_epr(void)
24213b961124SStuart Yoder {
24223b961124SStuart Yoder     return cap_epr;
24233b961124SStuart Yoder }
24243b961124SStuart Yoder 
kvmppc_has_cap_fixup_hcalls(void)242587a91de6SAlexander Graf bool kvmppc_has_cap_fixup_hcalls(void)
242687a91de6SAlexander Graf {
242787a91de6SAlexander Graf     return cap_fixup_hcalls;
242887a91de6SAlexander Graf }
242987a91de6SAlexander Graf 
kvmppc_has_cap_htm(void)2430bac3bf28SThomas Huth bool kvmppc_has_cap_htm(void)
2431bac3bf28SThomas Huth {
2432bac3bf28SThomas Huth     return cap_htm;
2433bac3bf28SThomas Huth }
2434bac3bf28SThomas Huth 
kvmppc_has_cap_mmu_radix(void)2435cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_radix(void)
2436cf1c4cceSSam Bobroff {
2437cf1c4cceSSam Bobroff     return cap_mmu_radix;
2438cf1c4cceSSam Bobroff }
2439cf1c4cceSSam Bobroff 
kvmppc_has_cap_mmu_hash_v3(void)2440cf1c4cceSSam Bobroff bool kvmppc_has_cap_mmu_hash_v3(void)
2441cf1c4cceSSam Bobroff {
2442cf1c4cceSSam Bobroff     return cap_mmu_hash_v3;
2443cf1c4cceSSam Bobroff }
2444cf1c4cceSSam Bobroff 
kvmppc_power8_host(void)2445072f416aSSuraj Jitindar Singh static bool kvmppc_power8_host(void)
2446072f416aSSuraj Jitindar Singh {
2447072f416aSSuraj Jitindar Singh     bool ret = false;
2448072f416aSSuraj Jitindar Singh #ifdef TARGET_PPC64
2449072f416aSSuraj Jitindar Singh     {
2450072f416aSSuraj Jitindar Singh         uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2451072f416aSSuraj Jitindar Singh         ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2452072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2453072f416aSSuraj Jitindar Singh               (base_pvr == CPU_POWERPC_POWER8_BASE);
2454072f416aSSuraj Jitindar Singh     }
2455072f416aSSuraj Jitindar Singh #endif /* TARGET_PPC64 */
2456072f416aSSuraj Jitindar Singh     return ret;
2457072f416aSSuraj Jitindar Singh }
2458072f416aSSuraj Jitindar Singh 
parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)24598fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
24608fea7044SSuraj Jitindar Singh {
2461072f416aSSuraj Jitindar Singh     bool l1d_thread_priv_req = !kvmppc_power8_host();
2462072f416aSSuraj Jitindar Singh 
24638fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
24648fea7044SSuraj Jitindar Singh         return 2;
2465072f416aSSuraj Jitindar Singh     } else if ((!l1d_thread_priv_req ||
2466072f416aSSuraj Jitindar Singh                 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
24678fea7044SSuraj Jitindar Singh                (c.character & c.character_mask
24688fea7044SSuraj Jitindar Singh                 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
24698fea7044SSuraj Jitindar Singh         return 1;
24708fea7044SSuraj Jitindar Singh     }
24718fea7044SSuraj Jitindar Singh 
24728fea7044SSuraj Jitindar Singh     return 0;
24738fea7044SSuraj Jitindar Singh }
24748fea7044SSuraj Jitindar Singh 
parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)24758fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
24768fea7044SSuraj Jitindar Singh {
24778fea7044SSuraj Jitindar Singh     if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
24788fea7044SSuraj Jitindar Singh         return 2;
24798fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
24808fea7044SSuraj Jitindar Singh         return 1;
24818fea7044SSuraj Jitindar Singh     }
24828fea7044SSuraj Jitindar Singh 
24838fea7044SSuraj Jitindar Singh     return 0;
24848fea7044SSuraj Jitindar Singh }
24858fea7044SSuraj Jitindar Singh 
parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)24868fea7044SSuraj Jitindar Singh static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
24878fea7044SSuraj Jitindar Singh {
2488399b2896SSuraj Jitindar Singh     if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2489399b2896SSuraj Jitindar Singh         (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2490399b2896SSuraj Jitindar Singh         (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2491399b2896SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_NA;
2492399b2896SSuraj Jitindar Singh     } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2493399b2896SSuraj Jitindar Singh         return SPAPR_CAP_WORKAROUND;
2494399b2896SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
24958fea7044SSuraj Jitindar Singh         return  SPAPR_CAP_FIXED_CCD;
24968fea7044SSuraj Jitindar Singh     } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
24978fea7044SSuraj Jitindar Singh         return SPAPR_CAP_FIXED_IBS;
24988fea7044SSuraj Jitindar Singh     }
24998fea7044SSuraj Jitindar Singh 
25008fea7044SSuraj Jitindar Singh     return 0;
25018fea7044SSuraj Jitindar Singh }
25028fea7044SSuraj Jitindar Singh 
parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)25038ff43ee4SSuraj Jitindar Singh static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
25048ff43ee4SSuraj Jitindar Singh {
25058ff43ee4SSuraj Jitindar Singh     if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
25068ff43ee4SSuraj Jitindar Singh         return 1;
25078ff43ee4SSuraj Jitindar Singh     }
25088ff43ee4SSuraj Jitindar Singh     return 0;
25098ff43ee4SSuraj Jitindar Singh }
25108ff43ee4SSuraj Jitindar Singh 
kvmppc_has_cap_xive(void)251138afd772SCédric Le Goater bool kvmppc_has_cap_xive(void)
251238afd772SCédric Le Goater {
251338afd772SCédric Le Goater     return cap_xive;
251438afd772SCédric Le Goater }
251538afd772SCédric Le Goater 
kvmppc_get_cpu_characteristics(KVMState * s)25168acc2ae5SSuraj Jitindar Singh static void kvmppc_get_cpu_characteristics(KVMState *s)
25178acc2ae5SSuraj Jitindar Singh {
25188acc2ae5SSuraj Jitindar Singh     struct kvm_ppc_cpu_char c;
25198acc2ae5SSuraj Jitindar Singh     int ret;
25208acc2ae5SSuraj Jitindar Singh 
25218acc2ae5SSuraj Jitindar Singh     /* Assume broken */
25228acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_cache = 0;
25238acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = 0;
25248acc2ae5SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = 0;
25258acc2ae5SSuraj Jitindar Singh 
25268acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
25278acc2ae5SSuraj Jitindar Singh     if (!ret) {
25288acc2ae5SSuraj Jitindar Singh         return;
25298acc2ae5SSuraj Jitindar Singh     }
25308acc2ae5SSuraj Jitindar Singh     ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
25318acc2ae5SSuraj Jitindar Singh     if (ret < 0) {
25328acc2ae5SSuraj Jitindar Singh         return;
25338acc2ae5SSuraj Jitindar Singh     }
25348fea7044SSuraj Jitindar Singh 
25358fea7044SSuraj Jitindar Singh     cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
25368fea7044SSuraj Jitindar Singh     cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
25378fea7044SSuraj Jitindar Singh     cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
25388ff43ee4SSuraj Jitindar Singh     cap_ppc_count_cache_flush_assist =
25398ff43ee4SSuraj Jitindar Singh         parse_cap_ppc_count_cache_flush_assist(c);
25408acc2ae5SSuraj Jitindar Singh }
25418acc2ae5SSuraj Jitindar Singh 
kvmppc_get_cap_safe_cache(void)25428acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_cache(void)
25438acc2ae5SSuraj Jitindar Singh {
25448acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_cache;
25458acc2ae5SSuraj Jitindar Singh }
25468acc2ae5SSuraj Jitindar Singh 
kvmppc_get_cap_safe_bounds_check(void)25478acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_bounds_check(void)
25488acc2ae5SSuraj Jitindar Singh {
25498acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_bounds_check;
25508acc2ae5SSuraj Jitindar Singh }
25518acc2ae5SSuraj Jitindar Singh 
kvmppc_get_cap_safe_indirect_branch(void)25528acc2ae5SSuraj Jitindar Singh int kvmppc_get_cap_safe_indirect_branch(void)
25538acc2ae5SSuraj Jitindar Singh {
25548acc2ae5SSuraj Jitindar Singh     return cap_ppc_safe_indirect_branch;
25558acc2ae5SSuraj Jitindar Singh }
25568acc2ae5SSuraj Jitindar Singh 
kvmppc_get_cap_count_cache_flush_assist(void)25578ff43ee4SSuraj Jitindar Singh int kvmppc_get_cap_count_cache_flush_assist(void)
25588ff43ee4SSuraj Jitindar Singh {
25598ff43ee4SSuraj Jitindar Singh     return cap_ppc_count_cache_flush_assist;
25608ff43ee4SSuraj Jitindar Singh }
25618ff43ee4SSuraj Jitindar Singh 
kvmppc_has_cap_nested_kvm_hv(void)2562b9a477b7SSuraj Jitindar Singh bool kvmppc_has_cap_nested_kvm_hv(void)
2563b9a477b7SSuraj Jitindar Singh {
2564b9a477b7SSuraj Jitindar Singh     return !!cap_ppc_nested_kvm_hv;
2565b9a477b7SSuraj Jitindar Singh }
2566b9a477b7SSuraj Jitindar Singh 
kvmppc_set_cap_nested_kvm_hv(int enable)2567b9a477b7SSuraj Jitindar Singh int kvmppc_set_cap_nested_kvm_hv(int enable)
2568b9a477b7SSuraj Jitindar Singh {
2569b9a477b7SSuraj Jitindar Singh     return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2570b9a477b7SSuraj Jitindar Singh }
2571b9a477b7SSuraj Jitindar Singh 
kvmppc_has_cap_spapr_vfio(void)25729ded780cSAlexey Kardashevskiy bool kvmppc_has_cap_spapr_vfio(void)
25739ded780cSAlexey Kardashevskiy {
25749ded780cSAlexey Kardashevskiy     return cap_spapr_vfio;
25759ded780cSAlexey Kardashevskiy }
25769ded780cSAlexey Kardashevskiy 
kvmppc_get_cap_large_decr(void)25777d050527SSuraj Jitindar Singh int kvmppc_get_cap_large_decr(void)
25787d050527SSuraj Jitindar Singh {
25797d050527SSuraj Jitindar Singh     return cap_large_decr;
25807d050527SSuraj Jitindar Singh }
25817d050527SSuraj Jitindar Singh 
kvmppc_enable_cap_large_decr(PowerPCCPU * cpu,int enable)25827d050527SSuraj Jitindar Singh int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
25837d050527SSuraj Jitindar Singh {
25847d050527SSuraj Jitindar Singh     CPUState *cs = CPU(cpu);
258559411579SDaniel Henrique Barboza     uint64_t lpcr = 0;
25867d050527SSuraj Jitindar Singh 
25877d050527SSuraj Jitindar Singh     kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25887d050527SSuraj Jitindar Singh     /* Do we need to modify the LPCR? */
25897d050527SSuraj Jitindar Singh     if (!!(lpcr & LPCR_LD) != !!enable) {
25907d050527SSuraj Jitindar Singh         if (enable) {
25917d050527SSuraj Jitindar Singh             lpcr |= LPCR_LD;
25927d050527SSuraj Jitindar Singh         } else {
25937d050527SSuraj Jitindar Singh             lpcr &= ~LPCR_LD;
25947d050527SSuraj Jitindar Singh         }
25957d050527SSuraj Jitindar Singh         kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25967d050527SSuraj Jitindar Singh         kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
25977d050527SSuraj Jitindar Singh 
25987d050527SSuraj Jitindar Singh         if (!!(lpcr & LPCR_LD) != !!enable) {
25997d050527SSuraj Jitindar Singh             return -1;
26007d050527SSuraj Jitindar Singh         }
26017d050527SSuraj Jitindar Singh     }
26027d050527SSuraj Jitindar Singh 
26037d050527SSuraj Jitindar Singh     return 0;
26047d050527SSuraj Jitindar Singh }
26057d050527SSuraj Jitindar Singh 
kvmppc_has_cap_rpt_invalidate(void)260682123b75SBharata B Rao int kvmppc_has_cap_rpt_invalidate(void)
260782123b75SBharata B Rao {
260882123b75SBharata B Rao     return cap_rpt_invalidate;
260982123b75SBharata B Rao }
261082123b75SBharata B Rao 
kvmppc_supports_ail_3(void)2611ccc5a4c5SNicholas Piggin bool kvmppc_supports_ail_3(void)
2612ccc5a4c5SNicholas Piggin {
2613ccc5a4c5SNicholas Piggin     return cap_ail_mode_3;
2614ccc5a4c5SNicholas Piggin }
2615ccc5a4c5SNicholas Piggin 
kvm_ppc_get_host_cpu_class(void)261652b2519cSThomas Huth PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
261752b2519cSThomas Huth {
261852b2519cSThomas Huth     uint32_t host_pvr = mfpvr();
261952b2519cSThomas Huth     PowerPCCPUClass *pvr_pcc;
262052b2519cSThomas Huth 
262152b2519cSThomas Huth     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
262252b2519cSThomas Huth     if (pvr_pcc == NULL) {
262352b2519cSThomas Huth         pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
262452b2519cSThomas Huth     }
262552b2519cSThomas Huth 
262652b2519cSThomas Huth     return pvr_pcc;
262752b2519cSThomas Huth }
262852b2519cSThomas Huth 
pseries_machine_class_fixup(ObjectClass * oc,void * opaque)2629165dc3edSDavid Gibson static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2630165dc3edSDavid Gibson {
2631165dc3edSDavid Gibson     MachineClass *mc = MACHINE_CLASS(oc);
2632165dc3edSDavid Gibson 
2633165dc3edSDavid Gibson     mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2634165dc3edSDavid Gibson }
2635165dc3edSDavid Gibson 
kvm_ppc_register_host_cpu_type(void)2636165dc3edSDavid Gibson static int kvm_ppc_register_host_cpu_type(void)
26375ba4576bSAndreas Färber {
26385ba4576bSAndreas Färber     TypeInfo type_info = {
26395ba4576bSAndreas Färber         .name = TYPE_HOST_POWERPC_CPU,
26405ba4576bSAndreas Färber         .class_init = kvmppc_host_cpu_class_init,
26415ba4576bSAndreas Färber     };
26425ba4576bSAndreas Färber     PowerPCCPUClass *pvr_pcc;
264392e926e1SGreg Kurz     ObjectClass *oc;
26445b79b1caSAlexey Kardashevskiy     DeviceClass *dc;
2645715d4b96SThomas Huth     int i;
26465ba4576bSAndreas Färber 
264752b2519cSThomas Huth     pvr_pcc = kvm_ppc_get_host_cpu_class();
26483bc9ccc0SAlexey Kardashevskiy     if (pvr_pcc == NULL) {
26495ba4576bSAndreas Färber         return -1;
26505ba4576bSAndreas Färber     }
26515ba4576bSAndreas Färber     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
26526e0a8883SZhao Liu     type_register_static(&type_info);
26532e9c10ebSIgor Mammedov     /* override TCG default cpu type with 'host' cpu model */
2654165dc3edSDavid Gibson     object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2655165dc3edSDavid Gibson                          false, NULL);
26565b79b1caSAlexey Kardashevskiy 
265792e926e1SGreg Kurz     oc = object_class_by_name(type_info.name);
265892e926e1SGreg Kurz     g_assert(oc);
265992e926e1SGreg Kurz 
2660715d4b96SThomas Huth     /*
2661715d4b96SThomas Huth      * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2662715d4b96SThomas Huth      * we want "POWER8" to be a "family" alias that points to the current
2663715d4b96SThomas Huth      * host CPU type, too)
2664715d4b96SThomas Huth      */
2665715d4b96SThomas Huth     dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2666715d4b96SThomas Huth     for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2667c5354f54SIgor Mammedov         if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2668715d4b96SThomas Huth             char *suffix;
2669715d4b96SThomas Huth 
2670715d4b96SThomas Huth             ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2671c9137065SIgor Mammedov             suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2672715d4b96SThomas Huth             if (suffix) {
2673715d4b96SThomas Huth                 *suffix = 0;
2674715d4b96SThomas Huth             }
2675715d4b96SThomas Huth             break;
2676715d4b96SThomas Huth         }
2677715d4b96SThomas Huth     }
2678715d4b96SThomas Huth 
26795ba4576bSAndreas Färber     return 0;
26805ba4576bSAndreas Färber }
26815ba4576bSAndreas Färber 
kvmppc_define_rtas_kernel_token(uint32_t token,const char * function)2682feaa64c4SDavid Gibson int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2683feaa64c4SDavid Gibson {
2684feaa64c4SDavid Gibson     struct kvm_rtas_token_args args = {
2685feaa64c4SDavid Gibson         .token = token,
2686feaa64c4SDavid Gibson     };
2687feaa64c4SDavid Gibson 
2688feaa64c4SDavid Gibson     if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2689feaa64c4SDavid Gibson         return -ENOENT;
2690feaa64c4SDavid Gibson     }
2691feaa64c4SDavid Gibson 
26927701aeedSCédric Le Goater     strncpy(args.name, function, sizeof(args.name) - 1);
2693feaa64c4SDavid Gibson 
2694feaa64c4SDavid Gibson     return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2695feaa64c4SDavid Gibson }
269612b1143bSDavid Gibson 
kvmppc_get_htab_fd(bool write,uint64_t index,Error ** errp)269714b0d748SGreg Kurz int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2698e68cb8b4SAlexey Kardashevskiy {
2699e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_fd s = {
2700e68cb8b4SAlexey Kardashevskiy         .flags = write ? KVM_GET_HTAB_WRITE : 0,
270114b0d748SGreg Kurz         .start_index = index,
2702e68cb8b4SAlexey Kardashevskiy     };
270382be8e73SGreg Kurz     int ret;
2704e68cb8b4SAlexey Kardashevskiy 
2705e68cb8b4SAlexey Kardashevskiy     if (!cap_htab_fd) {
270614b0d748SGreg Kurz         error_setg(errp, "KVM version doesn't support %s the HPT",
270714b0d748SGreg Kurz                    write ? "writing" : "reading");
270882be8e73SGreg Kurz         return -ENOTSUP;
2709e68cb8b4SAlexey Kardashevskiy     }
2710e68cb8b4SAlexey Kardashevskiy 
271182be8e73SGreg Kurz     ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
271282be8e73SGreg Kurz     if (ret < 0) {
271314b0d748SGreg Kurz         error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
271414b0d748SGreg Kurz                    write ? "writing" : "reading", write ? "to" : "from",
271514b0d748SGreg Kurz                    strerror(errno));
271682be8e73SGreg Kurz         return -errno;
271782be8e73SGreg Kurz     }
271882be8e73SGreg Kurz 
271982be8e73SGreg Kurz     return ret;
2720e68cb8b4SAlexey Kardashevskiy }
2721e68cb8b4SAlexey Kardashevskiy 
kvmppc_save_htab(QEMUFile * f,int fd,size_t bufsize,int64_t max_ns)2722e68cb8b4SAlexey Kardashevskiy int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2723e68cb8b4SAlexey Kardashevskiy {
2724bc72ad67SAlex Bligh     int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2725aba594daSThomas Huth     g_autofree uint8_t *buf = g_malloc(bufsize);
2726e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2727e68cb8b4SAlexey Kardashevskiy 
2728e68cb8b4SAlexey Kardashevskiy     do {
2729e68cb8b4SAlexey Kardashevskiy         rc = read(fd, buf, bufsize);
2730e68cb8b4SAlexey Kardashevskiy         if (rc < 0) {
2731e68cb8b4SAlexey Kardashevskiy             fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2732e68cb8b4SAlexey Kardashevskiy                     strerror(errno));
2733e68cb8b4SAlexey Kardashevskiy             return rc;
2734e68cb8b4SAlexey Kardashevskiy         } else if (rc) {
2735e094c4c1SCédric Le Goater             uint8_t *buffer = buf;
2736e094c4c1SCédric Le Goater             ssize_t n = rc;
2737e094c4c1SCédric Le Goater             while (n) {
2738e094c4c1SCédric Le Goater                 struct kvm_get_htab_header *head =
2739e094c4c1SCédric Le Goater                     (struct kvm_get_htab_header *) buffer;
2740e094c4c1SCédric Le Goater                 size_t chunksize = sizeof(*head) +
2741e094c4c1SCédric Le Goater                      HASH_PTE_SIZE_64 * head->n_valid;
2742e094c4c1SCédric Le Goater 
2743e094c4c1SCédric Le Goater                 qemu_put_be32(f, head->index);
2744e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_valid);
2745e094c4c1SCédric Le Goater                 qemu_put_be16(f, head->n_invalid);
2746e094c4c1SCédric Le Goater                 qemu_put_buffer(f, (void *)(head + 1),
2747e094c4c1SCédric Le Goater                                 HASH_PTE_SIZE_64 * head->n_valid);
2748e094c4c1SCédric Le Goater 
2749e094c4c1SCédric Le Goater                 buffer += chunksize;
2750e094c4c1SCédric Le Goater                 n -= chunksize;
2751e094c4c1SCédric Le Goater             }
2752e68cb8b4SAlexey Kardashevskiy         }
2753e68cb8b4SAlexey Kardashevskiy     } while ((rc != 0)
2754c995e942SDavid Gibson              && ((max_ns < 0) ||
2755c995e942SDavid Gibson                  ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2756e68cb8b4SAlexey Kardashevskiy 
2757e68cb8b4SAlexey Kardashevskiy     return (rc == 0) ? 1 : 0;
2758e68cb8b4SAlexey Kardashevskiy }
2759e68cb8b4SAlexey Kardashevskiy 
kvmppc_load_htab_chunk(QEMUFile * f,int fd,uint32_t index,uint16_t n_valid,uint16_t n_invalid,Error ** errp)2760e68cb8b4SAlexey Kardashevskiy int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
27610a06e4d6SGreg Kurz                            uint16_t n_valid, uint16_t n_invalid, Error **errp)
2762e68cb8b4SAlexey Kardashevskiy {
2763e68cb8b4SAlexey Kardashevskiy     struct kvm_get_htab_header *buf;
2764e68cb8b4SAlexey Kardashevskiy     size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2765e68cb8b4SAlexey Kardashevskiy     ssize_t rc;
2766e68cb8b4SAlexey Kardashevskiy 
2767e68cb8b4SAlexey Kardashevskiy     buf = alloca(chunksize);
2768e68cb8b4SAlexey Kardashevskiy     buf->index = index;
2769e68cb8b4SAlexey Kardashevskiy     buf->n_valid = n_valid;
2770e68cb8b4SAlexey Kardashevskiy     buf->n_invalid = n_invalid;
2771e68cb8b4SAlexey Kardashevskiy 
2772e68cb8b4SAlexey Kardashevskiy     qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2773e68cb8b4SAlexey Kardashevskiy 
2774e68cb8b4SAlexey Kardashevskiy     rc = write(fd, buf, chunksize);
2775e68cb8b4SAlexey Kardashevskiy     if (rc < 0) {
27760a06e4d6SGreg Kurz         error_setg_errno(errp, errno, "Error writing the KVM hash table");
27770a06e4d6SGreg Kurz         return -errno;
2778e68cb8b4SAlexey Kardashevskiy     }
2779e68cb8b4SAlexey Kardashevskiy     if (rc != chunksize) {
2780e68cb8b4SAlexey Kardashevskiy         /* We should never get a short write on a single chunk */
27810a06e4d6SGreg Kurz         error_setg(errp, "Short write while restoring the KVM hash table");
27820a06e4d6SGreg Kurz         return -ENOSPC;
2783e68cb8b4SAlexey Kardashevskiy     }
2784e68cb8b4SAlexey Kardashevskiy     return 0;
2785e68cb8b4SAlexey Kardashevskiy }
2786e68cb8b4SAlexey Kardashevskiy 
kvm_arch_stop_on_emulation_error(CPUState * cpu)278720d695a9SAndreas Färber bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
27884513d923SGleb Natapov {
27894513d923SGleb Natapov     return true;
27904513d923SGleb Natapov }
2791a1b87fe0SJan Kiszka 
kvm_arch_init_irq_routing(KVMState * s)279282169660SScott Wood void kvm_arch_init_irq_routing(KVMState *s)
279382169660SScott Wood {
279482169660SScott Wood }
2795c65f9a07SGreg Kurz 
kvmppc_read_hptes(ppc_hash_pte64_t * hptes,hwaddr ptex,int n)27961ad9f0a4SDavid Gibson void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
27971ad9f0a4SDavid Gibson {
27981ad9f0a4SDavid Gibson     int fd, rc;
27991ad9f0a4SDavid Gibson     int i;
28007c43bca0SAneesh Kumar K.V 
280114b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
28021ad9f0a4SDavid Gibson 
28031ad9f0a4SDavid Gibson     i = 0;
28041ad9f0a4SDavid Gibson     while (i < n) {
28051ad9f0a4SDavid Gibson         struct kvm_get_htab_header *hdr;
28061ad9f0a4SDavid Gibson         int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
280797c2fc50SThomas Huth         char buf[sizeof(*hdr) + HPTES_PER_GROUP * HASH_PTE_SIZE_64];
28081ad9f0a4SDavid Gibson 
280997c2fc50SThomas Huth         rc = read(fd, buf, sizeof(*hdr) + m * HASH_PTE_SIZE_64);
28101ad9f0a4SDavid Gibson         if (rc < 0) {
28111ad9f0a4SDavid Gibson             hw_error("kvmppc_read_hptes: Unable to read HPTEs");
28121ad9f0a4SDavid Gibson         }
28131ad9f0a4SDavid Gibson 
28141ad9f0a4SDavid Gibson         hdr = (struct kvm_get_htab_header *)buf;
28151ad9f0a4SDavid Gibson         while ((i < n) && ((char *)hdr < (buf + rc))) {
2816a36593e1SAlexey Kardashevskiy             int invalid = hdr->n_invalid, valid = hdr->n_valid;
28171ad9f0a4SDavid Gibson 
28181ad9f0a4SDavid Gibson             if (hdr->index != (ptex + i)) {
28191ad9f0a4SDavid Gibson                 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
28201ad9f0a4SDavid Gibson                          " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
28211ad9f0a4SDavid Gibson             }
28221ad9f0a4SDavid Gibson 
2823a36593e1SAlexey Kardashevskiy             if (n - i < valid) {
2824a36593e1SAlexey Kardashevskiy                 valid = n - i;
2825a36593e1SAlexey Kardashevskiy             }
2826a36593e1SAlexey Kardashevskiy             memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2827a36593e1SAlexey Kardashevskiy             i += valid;
28281ad9f0a4SDavid Gibson 
28291ad9f0a4SDavid Gibson             if ((n - i) < invalid) {
28301ad9f0a4SDavid Gibson                 invalid = n - i;
28311ad9f0a4SDavid Gibson             }
28321ad9f0a4SDavid Gibson             memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2833a36593e1SAlexey Kardashevskiy             i += invalid;
28341ad9f0a4SDavid Gibson 
28351ad9f0a4SDavid Gibson             hdr = (struct kvm_get_htab_header *)
28361ad9f0a4SDavid Gibson                 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
28371ad9f0a4SDavid Gibson         }
28381ad9f0a4SDavid Gibson     }
28391ad9f0a4SDavid Gibson 
28401ad9f0a4SDavid Gibson     close(fd);
28411ad9f0a4SDavid Gibson }
28421ad9f0a4SDavid Gibson 
kvmppc_write_hpte(hwaddr ptex,uint64_t pte0,uint64_t pte1)28431ad9f0a4SDavid Gibson void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
28447c43bca0SAneesh Kumar K.V {
28451ad9f0a4SDavid Gibson     int fd, rc;
28461ad9f0a4SDavid Gibson     struct {
28471ad9f0a4SDavid Gibson         struct kvm_get_htab_header hdr;
28481ad9f0a4SDavid Gibson         uint64_t pte0;
28491ad9f0a4SDavid Gibson         uint64_t pte1;
28501ad9f0a4SDavid Gibson     } buf;
2851c1385933SAneesh Kumar K.V 
285214b0d748SGreg Kurz     fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2853c1385933SAneesh Kumar K.V 
28541ad9f0a4SDavid Gibson     buf.hdr.n_valid = 1;
28551ad9f0a4SDavid Gibson     buf.hdr.n_invalid = 0;
28561ad9f0a4SDavid Gibson     buf.hdr.index = ptex;
28571ad9f0a4SDavid Gibson     buf.pte0 = cpu_to_be64(pte0);
28581ad9f0a4SDavid Gibson     buf.pte1 = cpu_to_be64(pte1);
28591ad9f0a4SDavid Gibson 
28601ad9f0a4SDavid Gibson     rc = write(fd, &buf, sizeof(buf));
28611ad9f0a4SDavid Gibson     if (rc != sizeof(buf)) {
28621ad9f0a4SDavid Gibson         hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2863c1385933SAneesh Kumar K.V     }
28641ad9f0a4SDavid Gibson     close(fd);
2865c1385933SAneesh Kumar K.V }
28669e03a040SFrank Blaschka 
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)28679e03a040SFrank Blaschka int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2868dc9f06caSPavel Fedin                              uint64_t address, uint32_t data, PCIDevice *dev)
28699e03a040SFrank Blaschka {
28709e03a040SFrank Blaschka     return 0;
28719e03a040SFrank Blaschka }
28721850b6b7SEric Auger 
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)287338d87493SPeter Xu int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
287438d87493SPeter Xu                                 int vector, PCIDevice *dev)
287538d87493SPeter Xu {
287638d87493SPeter Xu     return 0;
287738d87493SPeter Xu }
287838d87493SPeter Xu 
kvm_arch_release_virq_post(int virq)287938d87493SPeter Xu int kvm_arch_release_virq_post(int virq)
288038d87493SPeter Xu {
288138d87493SPeter Xu     return 0;
288238d87493SPeter Xu }
288338d87493SPeter Xu 
kvm_arch_msi_data_to_gsi(uint32_t data)28841850b6b7SEric Auger int kvm_arch_msi_data_to_gsi(uint32_t data)
28851850b6b7SEric Auger {
28861850b6b7SEric Auger     return data & 0xffff;
28871850b6b7SEric Auger }
28884d9392beSThomas Huth 
2889566abdb4SPaolo Bonzini #if defined(CONFIG_PSERIES)
kvm_handle_nmi(PowerPCCPU * cpu,struct kvm_run * run)28909ac703acSAravinda Prasad int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
28919ac703acSAravinda Prasad {
2892211a7784SGanesh Goudar     uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
289381fe70e4SAravinda Prasad 
28949ac703acSAravinda Prasad     cpu_synchronize_state(CPU(cpu));
28959ac703acSAravinda Prasad 
2896211a7784SGanesh Goudar     spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
28979ac703acSAravinda Prasad 
28989ac703acSAravinda Prasad     return 0;
28999ac703acSAravinda Prasad }
29009ac703acSAravinda Prasad #endif
29019ac703acSAravinda Prasad 
kvmppc_enable_hwrng(void)29024d9392beSThomas Huth int kvmppc_enable_hwrng(void)
29034d9392beSThomas Huth {
29044d9392beSThomas Huth     if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
29054d9392beSThomas Huth         return -1;
29064d9392beSThomas Huth     }
29074d9392beSThomas Huth 
29084d9392beSThomas Huth     return kvmppc_enable_hcall(kvm_state, H_RANDOM);
29094d9392beSThomas Huth }
291030f4b05bSDavid Gibson 
kvmppc_check_papr_resize_hpt(Error ** errp)291130f4b05bSDavid Gibson void kvmppc_check_papr_resize_hpt(Error **errp)
291230f4b05bSDavid Gibson {
291330f4b05bSDavid Gibson     if (!kvm_enabled()) {
2914b55d295eSDavid Gibson         return; /* No KVM, we're good */
2915b55d295eSDavid Gibson     }
2916b55d295eSDavid Gibson 
2917b55d295eSDavid Gibson     if (cap_resize_hpt) {
2918b55d295eSDavid Gibson         return; /* Kernel has explicit support, we're good */
2919b55d295eSDavid Gibson     }
2920b55d295eSDavid Gibson 
2921b55d295eSDavid Gibson     /* Otherwise fallback on looking for PR KVM */
2922b55d295eSDavid Gibson     if (kvmppc_is_pr(kvm_state)) {
292330f4b05bSDavid Gibson         return;
292430f4b05bSDavid Gibson     }
292530f4b05bSDavid Gibson 
292630f4b05bSDavid Gibson     error_setg(errp,
292730f4b05bSDavid Gibson                "Hash page table resizing not available with this KVM version");
292830f4b05bSDavid Gibson }
2929b55d295eSDavid Gibson 
kvmppc_resize_hpt_prepare(PowerPCCPU * cpu,target_ulong flags,int shift)2930b55d295eSDavid Gibson int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2931b55d295eSDavid Gibson {
2932b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2933b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2934b55d295eSDavid Gibson         .flags = flags,
2935b55d295eSDavid Gibson         .shift = shift,
2936b55d295eSDavid Gibson     };
2937b55d295eSDavid Gibson 
2938b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2939b55d295eSDavid Gibson         return -ENOSYS;
2940b55d295eSDavid Gibson     }
2941b55d295eSDavid Gibson 
2942b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2943b55d295eSDavid Gibson }
2944b55d295eSDavid Gibson 
kvmppc_resize_hpt_commit(PowerPCCPU * cpu,target_ulong flags,int shift)2945b55d295eSDavid Gibson int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2946b55d295eSDavid Gibson {
2947b55d295eSDavid Gibson     CPUState *cs = CPU(cpu);
2948b55d295eSDavid Gibson     struct kvm_ppc_resize_hpt rhpt = {
2949b55d295eSDavid Gibson         .flags = flags,
2950b55d295eSDavid Gibson         .shift = shift,
2951b55d295eSDavid Gibson     };
2952b55d295eSDavid Gibson 
2953b55d295eSDavid Gibson     if (!cap_resize_hpt) {
2954b55d295eSDavid Gibson         return -ENOSYS;
2955b55d295eSDavid Gibson     }
2956b55d295eSDavid Gibson 
2957b55d295eSDavid Gibson     return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2958b55d295eSDavid Gibson }
2959b55d295eSDavid Gibson 
2960c363a37aSDaniel Henrique Barboza /*
2961c363a37aSDaniel Henrique Barboza  * This is a helper function to detect a post migration scenario
2962c363a37aSDaniel Henrique Barboza  * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2963c363a37aSDaniel Henrique Barboza  * the guest kernel can't handle a PVR value other than the actual host
2964c363a37aSDaniel Henrique Barboza  * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2965c363a37aSDaniel Henrique Barboza  *
2966c363a37aSDaniel Henrique Barboza  * If we don't have cap_ppc_pvr_compat and we're not running in PR
2967c363a37aSDaniel Henrique Barboza  * (so, we're HV), return true. The workaround itself is done in
2968c363a37aSDaniel Henrique Barboza  * cpu_post_load.
2969c363a37aSDaniel Henrique Barboza  *
2970c363a37aSDaniel Henrique Barboza  * The order here is important: we'll only check for KVM PR as a
2971c363a37aSDaniel Henrique Barboza  * fallback if the guest kernel can't handle the situation itself.
2972c363a37aSDaniel Henrique Barboza  * We need to avoid as much as possible querying the running KVM type
2973c363a37aSDaniel Henrique Barboza  * in QEMU level.
2974c363a37aSDaniel Henrique Barboza  */
kvmppc_pvr_workaround_required(PowerPCCPU * cpu)2975c363a37aSDaniel Henrique Barboza bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2976c363a37aSDaniel Henrique Barboza {
2977c363a37aSDaniel Henrique Barboza     CPUState *cs = CPU(cpu);
2978c363a37aSDaniel Henrique Barboza 
2979c363a37aSDaniel Henrique Barboza     if (!kvm_enabled()) {
2980c363a37aSDaniel Henrique Barboza         return false;
2981c363a37aSDaniel Henrique Barboza     }
2982c363a37aSDaniel Henrique Barboza 
2983c363a37aSDaniel Henrique Barboza     if (cap_ppc_pvr_compat) {
2984c363a37aSDaniel Henrique Barboza         return false;
2985c363a37aSDaniel Henrique Barboza     }
2986c363a37aSDaniel Henrique Barboza 
2987c363a37aSDaniel Henrique Barboza     return !kvmppc_is_pr(cs->kvm_state);
2988c363a37aSDaniel Henrique Barboza }
2989a84f7179SNikunj A Dadhania 
kvmppc_set_reg_ppc_online(PowerPCCPU * cpu,unsigned int online)2990a84f7179SNikunj A Dadhania void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2991a84f7179SNikunj A Dadhania {
2992a84f7179SNikunj A Dadhania     CPUState *cs = CPU(cpu);
2993a84f7179SNikunj A Dadhania 
2994a84f7179SNikunj A Dadhania     if (kvm_enabled()) {
2995a84f7179SNikunj A Dadhania         kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2996a84f7179SNikunj A Dadhania     }
2997a84f7179SNikunj A Dadhania }
29989723295aSGreg Kurz 
kvmppc_set_reg_tb_offset(PowerPCCPU * cpu,int64_t tb_offset)29999723295aSGreg Kurz void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
30009723295aSGreg Kurz {
30019723295aSGreg Kurz     CPUState *cs = CPU(cpu);
30029723295aSGreg Kurz 
30039723295aSGreg Kurz     if (kvm_enabled()) {
30049723295aSGreg Kurz         kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
30059723295aSGreg Kurz     }
30069723295aSGreg Kurz }
300792a5199bSTom Lendacky 
kvm_arch_accel_class_init(ObjectClass * oc)30083dba0a33SPaolo Bonzini void kvm_arch_accel_class_init(ObjectClass *oc)
30093dba0a33SPaolo Bonzini {
30103dba0a33SPaolo Bonzini }
3011cfb52d07SHarsh Prateek Bora 
kvm_cpu_accel_class_init(ObjectClass * oc,const void * data)301212d1a768SPhilippe Mathieu-Daudé static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
3013cfb52d07SHarsh Prateek Bora {
3014cfb52d07SHarsh Prateek Bora     AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
3015cfb52d07SHarsh Prateek Bora 
3016cfb52d07SHarsh Prateek Bora     acc->cpu_target_realize = kvmppc_cpu_realize;
3017cfb52d07SHarsh Prateek Bora }
3018cfb52d07SHarsh Prateek Bora 
3019cfb52d07SHarsh Prateek Bora static const TypeInfo kvm_cpu_accel_type_info = {
3020cfb52d07SHarsh Prateek Bora     .name = ACCEL_CPU_NAME("kvm"),
3021cfb52d07SHarsh Prateek Bora 
3022cfb52d07SHarsh Prateek Bora     .parent = TYPE_ACCEL_CPU,
3023cfb52d07SHarsh Prateek Bora     .class_init = kvm_cpu_accel_class_init,
3024cfb52d07SHarsh Prateek Bora     .abstract = true,
3025cfb52d07SHarsh Prateek Bora };
kvm_cpu_accel_register_types(void)3026cfb52d07SHarsh Prateek Bora static void kvm_cpu_accel_register_types(void)
3027cfb52d07SHarsh Prateek Bora {
3028cfb52d07SHarsh Prateek Bora     type_register_static(&kvm_cpu_accel_type_info);
3029cfb52d07SHarsh Prateek Bora }
3030cfb52d07SHarsh Prateek Bora type_init(kvm_cpu_accel_register_types);
3031