Lines Matching +full:imp +full:- +full:res +full:- +full:offset +full:- +full:value

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
14 #include <linux/arm-smccc.h>
43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
63 * Mode of operation configurable with kvm-arm.mode early param.
64 * See Documentation/admin-guide/kernel-parameters.txt for more information.
99 *p = mc->head; in push_hyp_memcache()
100 mc->head = to_pa(p); in push_hyp_memcache()
101 mc->nr_pages++; in push_hyp_memcache()
107 phys_addr_t *p = to_va(mc->head & PAGE_MASK); in pop_hyp_memcache()
109 if (!mc->nr_pages) in pop_hyp_memcache()
112 mc->head = *p; in pop_hyp_memcache()
113 mc->nr_pages--; in pop_hyp_memcache()
124 while (mc->nr_pages < min_pages) { in __topup_hyp_memcache()
128 return -ENOMEM; in __topup_hyp_memcache()
140 while (mc->nr_pages) in __free_hyp_memcache()
159 * translation regime that isn't affected by its own stage-2
160 * translation, such as a non-VHE hypervisor running at vEL2, or
162 * canonical stage-2 page tables.
168 * VTCR value used on the host. For a non-NV guest (or a NV
170 * apply), its T0SZ value reflects that of the IPA size.
190 * Protected by kvm->slots_lock.
198 * For a shadow stage-2 MMU, the virtual vttbr used by the
201 * - the virtual VTTBR programmed by the guest hypervisor with
203 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
244 unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
245 unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
264 unsigned long index = 0, mask = data->mpidr_mask; in kvm_mpidr_index()
290 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
292 * same feature-set to all vcpus.
315 /* Protects VM-scoped configuration data */
333 /* VM counter offset */
339 /* Fine-Grained UNDEF initialised */
347 /* VM-wide vCPU feature set */
354 * VM-wide PMU filter, implemented as a bitmap and big enough for
362 /* PMCR_EL0.N value for the guest */
377 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
380 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
389 /* Masks for VNCR-backed and general EL2 sysregs */
408 * __VNCR_START__, and the value (after correction) to be an 8-byte offset
417 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
421 __after_##r = __MAX__(__before_##r - 1, r)
424 m, __after_##m = m - 1
427 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
443 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
446 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
480 FPEXC32_EL2, /* Floating-Point Exception Control Register */
517 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
519 /* Any VNCR-capable reg goes after this point */
606 } mask[NR_SYS_REGS - __SANITISED_REG_START__];
642 * This structure is instantiated on a per-CPU basis, and contains
645 * - tied to a single physical CPU, and
646 * - either have a lifetime that does not extend past vcpu_put()
647 * - or is an invariant for the lifetime of the system
686 /* Self-hosted trace */
692 /* Guest trace filter value */
810 /* Pages to top-up the pKVM/EL2 guest pool */
825 /* Per-vcpu CCSIDR override or NULL */
830 * Each 'flag' is composed of a comma-separated triplet:
832 * - the flag-set it belongs to in the vcpu->arch structure
833 * - the value for that flag
834 * - the mask for that flag
836 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
837 * unpack_vcpu_flag() extract the flag value from the triplet for
847 typeof(v->arch.flagset) *_fset; \
859 READ_ONCE(v->arch.flagset) & (m); \
863 * Note that the set/clear accessors must be preempt-safe in order to
867 /* the nVHE hypervisor is always non-preemptible */
877 typeof(v->arch.flagset) *fset; \
881 fset = &v->arch.flagset; \
891 typeof(v->arch.flagset) *fset; \
895 fset = &v->arch.flagset; \
953 /* Software step state is Active-pending for external debug */
966 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
967 sve_ffr_offset((vcpu)->arch.sve_max_vl))
969 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
978 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
994 test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
997 #define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm))
999 #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
1021 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
1030 * Don't bother with VNCR-based accesses in the nVHE code, it has no
1037 r >= __VNCR_START__ && ctxt->vncr_array)) in ___ctxt_sys_reg()
1038 return &ctxt->vncr_array[r - __VNCR_START__]; in ___ctxt_sys_reg()
1040 return (u64 *)&ctxt->sys_regs[r]; in ___ctxt_sys_reg()
1055 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1076 * thread when emulating cross-VCPU communication. in __vcpu_read_sys_reg_from_cpu()
1198 #define vcpu_has_run_once(vcpu) (!!READ_ONCE((vcpu)->pid))
1203 struct arm_smccc_res res; \
1206 ##__VA_ARGS__, &res); \
1207 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
1209 res.a1; \
1308 vcpu_arch->steal.base = INVALID_GPA; in kvm_arm_pvtime_vcpu_init()
1313 return (vcpu_arch->steal.base != INVALID_GPA); in kvm_arm_is_pvtime_enabled()
1323 * How we access per-CPU host data depends on the where we access it from,
1326 * - VHE and nVHE hypervisor bits use their locally defined instance
1328 * - the rest of the kernel use either the VHE or nVHE one, depending on
1332 * per-CPU stuff is exclusively accessible to the protected EL2 code.
1340 #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
1344 &this_cpu_ptr(&kvm_host_data)->f : \
1345 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
1388 ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE)
1390 ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED)
1392 ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED)
1404 struct kvm_arm_counter_offset *offset);
1417 return (!has_vhe() && attr->exclude_host); in kvm_pmu_counter_deferred()
1452 #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
1454 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
1463 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1470 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1474 return test_bit(feature, ka->vcpu_features); in __vcpu_has_feature()
1477 #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
1478 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
1498 return &ka->id_regs[IDREG_IDX(reg)]; in __vm_id_reg()
1500 return &ka->ctr_el0; in __vm_id_reg()
1502 return &ka->midr_el1; in __vm_id_reg()
1504 return &ka->revidr_el1; in __vm_id_reg()
1506 return &ka->aidr_el1; in __vm_id_reg()
1514 ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1524 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1536 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1569 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
1571 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
1573 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
1580 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
1583 (kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP))
1586 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
1589 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))