Lines Matching +full:spe +full:- +full:pmu
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
14 #include <linux/arm-smccc.h>
43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
61 * Mode of operation configurable with kvm-arm.mode early param.
62 * See Documentation/admin-guide/kernel-parameters.txt for more information.
94 *p = mc->head; in push_hyp_memcache()
95 mc->head = to_pa(p); in push_hyp_memcache()
96 mc->nr_pages++; in push_hyp_memcache()
102 phys_addr_t *p = to_va(mc->head); in pop_hyp_memcache()
104 if (!mc->nr_pages) in pop_hyp_memcache()
107 mc->head = *p; in pop_hyp_memcache()
108 mc->nr_pages--; in pop_hyp_memcache()
119 while (mc->nr_pages < min_pages) { in __topup_hyp_memcache()
123 return -ENOMEM; in __topup_hyp_memcache()
135 while (mc->nr_pages) in __free_hyp_memcache()
154 * translation regime that isn't affected by its own stage-2
155 * translation, such as a non-VHE hypervisor running at vEL2, or
157 * canonical stage-2 page tables.
163 * VTCR value used on the host. For a non-NV guest (or a NV
185 * Protected by kvm->slots_lock.
223 unsigned long mask = data->mpidr_mask; in kvm_mpidr_index()
229 * If this looks like RISC-V's BEXT or x86's PEXT in kvm_mpidr_index()
234 index |= (aff & BIT(bit)) >> (bit - bit_idx); in kvm_mpidr_index()
253 /* Protects VM-scoped configuration data */
279 /* VM-wide vCPU feature set */
286 * VM-wide PMU filter, implemented as a bitmap and big enough for
306 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
309 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
311 #define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
331 * __VNCR_START__, and the value (after correction) to be an 8-byte offset
340 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
344 __after_##r = __MAX__(__before_##r - 1, r)
363 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
366 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
394 FPEXC32_EL2, /* Floating-Point Exception Control Register */
418 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
425 __VNCR_START__, /* Any VNCR-capable reg goes after this point */
611 /* Self-hosted trace */
618 struct kvm_pmu pmu; member
651 /* Per-vcpu CCSIDR override or NULL */
656 * Each 'flag' is composed of a comma-separated triplet:
658 * - the flag-set it belongs to in the vcpu->arch structure
659 * - the value for that flag
660 * - the mask for that flag
662 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
673 typeof(v->arch.flagset) *_fset; \
685 READ_ONCE(v->arch.flagset) & (m); \
689 * Note that the set/clear accessors must be preempt-safe in order to
693 /* the nVHE hypervisor is always non-preemptible */
703 typeof(v->arch.flagset) *fset; \
707 fset = &v->arch.flagset; \
717 typeof(v->arch.flagset) *fset; \
721 fset = &v->arch.flagset; \
776 /* Save SPE context if active */
793 /* Software step state is Active-pending */
802 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
803 sve_ffr_offset((vcpu)->arch.sve_max_vl))
805 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
811 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
847 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
856 * Don't bother with VNCR-based accesses in the nVHE code, it has no
863 r >= __VNCR_START__ && ctxt->vncr_array)) in __ctxt_sys_reg()
864 return &ctxt->vncr_array[r - __VNCR_START__]; in __ctxt_sys_reg()
866 return (u64 *)&ctxt->sys_regs[r]; in __ctxt_sys_reg()
871 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
887 * thread when emulating cross-VCPU communication. in __vcpu_read_sys_reg_from_cpu()
999 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
1103 vcpu_arch->steal.base = INVALID_GPA; in kvm_arm_pvtime_vcpu_init()
1108 return (vcpu_arch->steal.base != INVALID_GPA); in kvm_arm_is_pvtime_enabled()
1164 return (!has_vhe() && attr->exclude_host); in kvm_pmu_counter_deferred()
1208 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1215 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1219 return test_bit(feature, ka->vcpu_features); in __vcpu_has_feature()
1222 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))