Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
20 #include <linux/irqchip/arm-gic-v3.h>
25 #include <asm/debug-monitors.h>
35 #include <trace/events/kvm.h>
40 #include "trace.h"
74 "sys_reg read to write-only register"); in read_from_write_only()
82 "sys_reg write to read-only register"); in write_to_read_only()
188 * If we have a non-VHE guest and that the sysreg in vcpu_read_sys_reg()
190 * in-memory copy instead. in vcpu_read_sys_reg()
228 * to reverse-translate virtual EL2 system registers for a in vcpu_write_sys_reg()
229 * non-VHE guest hypervisor. in vcpu_write_sys_reg()
291 * = Log2(bytes) - 2 + 2 in get_min_cache_line_size()
302 if (vcpu->arch.ccsidr) in get_ccsidr()
303 return vcpu->arch.ccsidr[csselr]; in get_ccsidr()
318 * non-aliasing) are 1 set and 1 way. in get_ccsidr()
330 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); in get_ccsidr()
336 u32 *ccsidr = vcpu->arch.ccsidr; in set_ccsidr()
341 return -EINVAL; in set_ccsidr()
349 return -ENOMEM; in set_ccsidr()
354 vcpu->arch.ccsidr = ccsidr; in set_ccsidr()
366 if (p->is_write) in access_rw()
367 vcpu_write_sys_reg(vcpu, p->regval, r->reg); in access_rw()
369 p->regval = vcpu_read_sys_reg(vcpu, r->reg); in access_rw()
381 if (!p->is_write) in access_dcsw()
388 * CPU left in the system, and certainly not from non-secure in access_dcsw()
401 if (!kvm_has_mte(vcpu->kvm)) in access_dcgsw()
410 switch (r->aarch32_map) { in get_access_mask()
438 BUG_ON(!p->is_write); in access_vm_reg()
443 val = vcpu_read_sys_reg(vcpu, r->reg); in access_vm_reg()
449 val |= (p->regval & (mask >> shift)) << shift; in access_vm_reg()
450 vcpu_write_sys_reg(vcpu, val, r->reg); in access_vm_reg()
462 if (p->is_write) in access_actlr()
466 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; in access_actlr()
483 if (!kvm_has_gicv3(vcpu->kvm)) in access_gic_sgi()
486 if (!p->is_write) in access_gic_sgi()
496 if (p->Op0 == 0) { /* AArch32 */ in access_gic_sgi()
497 switch (p->Op1) { in access_gic_sgi()
508 switch (p->Op2) { in access_gic_sgi()
520 vgic_v3_dispatch_sgi(vcpu, p->regval, g1); in access_gic_sgi()
529 if (!kvm_has_gicv3(vcpu->kvm)) in access_gic_sre()
532 if (p->is_write) in access_gic_sre()
535 if (p->Op1 == 4) { /* ICC_SRE_EL2 */ in access_gic_sre()
536 p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | in access_gic_sre()
539 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; in access_gic_sre()
549 if (p->is_write) in trap_raz_wi()
567 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) in trap_loregion()
570 if (p->is_write && sr == SYS_LORID_EL1) in trap_loregion()
580 if (!p->is_write) in trap_oslar_el1()
583 kvm_debug_handle_oslar(vcpu, p->regval); in trap_oslar_el1()
591 if (p->is_write) in trap_oslsr_el1()
594 p->regval = __vcpu_sys_reg(vcpu, r->reg); in trap_oslsr_el1()
605 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) in set_oslsr_el1()
606 return -EINVAL; in set_oslsr_el1()
608 __vcpu_sys_reg(vcpu, rd->reg) = val; in set_oslsr_el1()
616 if (p->is_write) { in trap_dbgauthstatus_el1()
619 p->regval = read_sysreg(dbgauthstatus_el1); in trap_dbgauthstatus_el1()
651 val |= (p->regval & (mask >> shift)) << shift; in reg_to_dbg()
663 p->regval = (*dbg_reg & mask) >> shift; in dbg_to_reg()
668 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; in demux_wb_reg()
670 switch (rd->Op2) { in demux_wb_reg()
672 return &dbg->dbg_bvr[rd->CRm]; in demux_wb_reg()
674 return &dbg->dbg_bcr[rd->CRm]; in demux_wb_reg()
676 return &dbg->dbg_wvr[rd->CRm]; in demux_wb_reg()
678 return &dbg->dbg_wcr[rd->CRm]; in demux_wb_reg()
680 KVM_BUG_ON(1, vcpu->kvm); in demux_wb_reg()
693 if (p->is_write) in trap_dbg_wb_reg()
708 return -EINVAL; in set_dbg_wb_reg()
720 return -EINVAL; in get_dbg_wb_reg()
738 *reg = rd->val; in reset_dbg_wb_reg()
739 return rd->val; in reset_dbg_wb_reg()
767 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); in reset_mpidr()
768 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); in reset_mpidr()
769 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); in reset_mpidr()
788 u8 n = vcpu->kvm->arch.pmcr_n; in reset_pmu_reg()
791 mask |= GENMASK(n - 1, 0); in reset_pmu_reg()
794 __vcpu_sys_reg(vcpu, r->reg) &= mask; in reset_pmu_reg()
796 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmu_reg()
802 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); in reset_pmevcntr()
804 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevcntr()
814 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); in reset_pmevtyper()
816 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevtyper()
822 __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; in reset_pmselr()
824 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmselr()
838 __vcpu_sys_reg(vcpu, r->reg) = pmcr; in reset_pmcr()
840 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmcr()
882 if (p->is_write) { in access_pmcr()
889 val |= p->regval & ARMV8_PMU_PMCR_MASK; in access_pmcr()
897 p->regval = val; in access_pmcr()
909 if (p->is_write) in access_pmselr()
910 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; in access_pmselr()
913 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) in access_pmselr()
924 BUG_ON(p->is_write); in access_pmceid()
931 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); in access_pmceid()
935 p->regval = pmceid; in access_pmceid()
959 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) in get_pmu_evcntr()
964 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in get_pmu_evcntr()
975 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) in set_pmu_evcntr()
980 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in set_pmu_evcntr()
992 if (r->CRn == 9 && r->CRm == 13) { in access_pmu_evcntr()
993 if (r->Op2 == 2) { in access_pmu_evcntr()
1000 } else if (r->Op2 == 0) { in access_pmu_evcntr()
1007 } else if (r->CRn == 0 && r->CRm == 9) { in access_pmu_evcntr()
1013 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { in access_pmu_evcntr()
1018 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evcntr()
1027 if (p->is_write) { in access_pmu_evcntr()
1031 kvm_pmu_set_counter_value(vcpu, idx, p->regval); in access_pmu_evcntr()
1033 p->regval = kvm_pmu_get_counter_value(vcpu, idx); in access_pmu_evcntr()
1047 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { in access_pmu_evtyper()
1051 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { in access_pmu_evtyper()
1052 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evtyper()
1065 if (p->is_write) { in access_pmu_evtyper()
1066 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); in access_pmu_evtyper()
1069 p->regval = __vcpu_sys_reg(vcpu, reg); in access_pmu_evtyper()
1079 __vcpu_sys_reg(vcpu, r->reg) = val & mask; in set_pmreg()
1089 *val = __vcpu_sys_reg(vcpu, r->reg) & mask; in get_pmreg()
1102 if (p->is_write) { in access_pmcnten()
1103 val = p->regval & mask; in access_pmcnten()
1104 if (r->Op2 & 0x1) in access_pmcnten()
1113 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in access_pmcnten()
1127 if (p->is_write) { in access_pminten()
1128 u64 val = p->regval & mask; in access_pminten()
1130 if (r->Op2 & 0x1) in access_pminten()
1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in access_pminten()
1151 if (p->is_write) { in access_pmovs()
1152 if (r->CRm & 0x2) in access_pmovs()
1154 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); in access_pmovs()
1157 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); in access_pmovs()
1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in access_pmovs()
1170 if (!p->is_write) in access_pmswinc()
1177 kvm_pmu_software_increment(vcpu, p->regval & mask); in access_pmswinc()
1184 if (p->is_write) { in access_pmuserenr()
1189 p->regval & ARMV8_PMU_USERENR_MASK; in access_pmuserenr()
1191 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) in access_pmuserenr()
1209 struct kvm *kvm = vcpu->kvm; in set_pmcr()
1211 mutex_lock(&kvm->arch.config_lock); in set_pmcr()
1220 kvm->arch.pmcr_n = new_n; in set_pmcr()
1222 mutex_unlock(&kvm->arch.config_lock); in set_pmcr()
1239 __vcpu_sys_reg(vcpu, r->reg) = val; in set_pmcr()
1458 if (p->is_write) in access_arch_timer()
1459 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); in access_arch_timer()
1461 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); in access_arch_timer()
1503 * arm64_check_features() - Check if a feature register value constitutes
1521 u64 writable_mask = rd->val; in arm64_check_features()
1522 u64 limit = rd->reset(vcpu, rd); in arm64_check_features()
1531 return val ? -E2BIG : 0; in arm64_check_features()
1535 return -EINVAL; in arm64_check_features()
1537 ftrp = ftr_reg->ftr_bits; in arm64_check_features()
1539 for (; ftrp && ftrp->width; ftrp++) { in arm64_check_features()
1557 return -E2BIG; in arm64_check_features()
1562 return -E2BIG; in arm64_check_features()
1603 if (!kvm_has_mte(vcpu->kvm)) in __kvm_read_sanitised_id_reg()
1653 val = limit_nv_id_reg(vcpu->kvm, id, val); in __kvm_read_sanitised_id_reg()
1666 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r)); in read_id_reg()
1680 * registers KVM maintains on a per-VM basis.
1683 * per-VM registers.
1754 if (p->is_write) in access_id_reg()
1757 p->regval = read_id_reg(vcpu, r); in access_id_reg()
1762 /* Visibility overrides for SVE-specific control registers */
1775 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) in sme_visibility()
1784 if (kvm_has_fpmr(vcpu->kvm)) in fp8_visibility()
1797 * Although this is a per-CPU feature, we make it global because in sanitise_id_aa64pfr0_el1()
1861 * non-architectural PMUs. Of course, PMUv3 is the only game in town for in set_id_aa64dfr0_el1()
1862 * PMU virtualization, so the IMP_DEF value was rather user-hostile. in set_id_aa64dfr0_el1()
1868 * surprising than an ill-guided PMU driver poking at impdef system in set_id_aa64dfr0_el1()
1879 return -EINVAL; in set_id_aa64dfr0_el1()
1920 return -EINVAL; in set_id_dfr0_el1()
1923 return -EINVAL; in set_id_dfr0_el1()
1941 * But KVM must also accept values from user-space that were provided in set_id_aa64pfr0_el1()
1942 * by KVM. On CPUs that support MPAM, permit user-space to write in set_id_aa64pfr0_el1()
1952 return -EINVAL; in set_id_aa64pfr0_el1()
1980 return -EINVAL; in set_id_aa64mmfr0_el1()
2020 return -EINVAL; in set_ctr_el0()
2025 return -ENOENT; in set_ctr_el0()
2043 if (kvm_vm_has_ran_once(vcpu->kvm)) { in get_id_reg()
2048 mutex_lock(&vcpu->kvm->arch.config_lock); in get_id_reg()
2050 mutex_unlock(&vcpu->kvm->arch.config_lock); in get_id_reg()
2061 mutex_lock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2067 if (kvm_vm_has_ran_once(vcpu->kvm)) { in set_id_reg()
2069 ret = -EBUSY; in set_id_reg()
2073 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2079 kvm_set_vm_id_reg(vcpu->kvm, id, val); in set_id_reg()
2081 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2084 * arm64_check_features() returns -E2BIG to indicate the register's in set_id_reg()
2085 * feature set is a superset of the maximally-allowed register value. in set_id_reg()
2088 * writes return -EINVAL. in set_id_reg()
2090 if (ret == -E2BIG) in set_id_reg()
2091 ret = -EINVAL; in set_id_reg()
2097 u64 *p = __vm_id_reg(&kvm->arch, reg); in kvm_set_vm_id_reg()
2099 lockdep_assert_held(&kvm->arch.config_lock); in kvm_set_vm_id_reg()
2123 if (p->is_write) in access_ctr()
2126 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0); in access_ctr()
2133 if (p->is_write) in access_clidr()
2136 p->regval = __vcpu_sys_reg(vcpu, r->reg); in access_clidr()
2188 if (kvm_has_mte(vcpu->kvm)) in reset_clidr()
2191 __vcpu_sys_reg(vcpu, r->reg) = clidr; in reset_clidr()
2193 return __vcpu_sys_reg(vcpu, r->reg); in reset_clidr()
2203 return -EINVAL; in set_clidr()
2205 __vcpu_sys_reg(vcpu, rd->reg) = val; in set_clidr()
2213 int reg = r->reg; in access_csselr()
2215 if (p->is_write) in access_csselr()
2216 vcpu_write_sys_reg(vcpu, p->regval, reg); in access_csselr()
2218 p->regval = vcpu_read_sys_reg(vcpu, reg); in access_csselr()
2227 if (p->is_write) in access_ccsidr()
2233 p->regval = get_ccsidr(vcpu, csselr); in access_ccsidr()
2241 if (kvm_has_mte(vcpu->kvm)) in mte_visibility()
2274 "trap of VNCR-backed register"); in bad_vncr_trap()
2387 if (p->is_write) in access_sp_el1()
2388 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval; in access_sp_el1()
2390 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); in access_sp_el1()
2399 if (p->is_write) in access_elr()
2400 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); in access_elr()
2402 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); in access_elr()
2411 if (p->is_write) in access_spsr()
2412 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; in access_spsr()
2414 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); in access_spsr()
2423 if (p->is_write) in access_cntkctl_el12()
2424 __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval; in access_cntkctl_el12()
2426 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); in access_cntkctl_el12()
2433 u64 val = r->val; in reset_hcr()
2438 return __vcpu_sys_reg(vcpu, r->reg) = val; in reset_hcr()
2466 if (!p->is_write) { in access_zcr_el2()
2467 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2); in access_zcr_el2()
2471 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; in access_zcr_el2()
2473 vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2); in access_zcr_el2()
2482 if (p->is_write) in access_gic_vtr()
2485 p->regval = kvm_vgic_global_state.ich_vtr_el2; in access_gic_vtr()
2486 p->regval &= ~(ICH_VTR_EL2_DVIM | in access_gic_vtr()
2489 p->regval |= ICH_VTR_EL2_nV4; in access_gic_vtr()
2498 if (p->is_write) in access_gic_misr()
2501 p->regval = vgic_v3_get_misr(vcpu); in access_gic_misr()
2510 if (p->is_write) in access_gic_eisr()
2513 p->regval = vgic_v3_get_eisr(vcpu); in access_gic_eisr()
2522 if (p->is_write) in access_gic_elrsr()
2525 p->regval = vgic_v3_get_elrsr(vcpu); in access_gic_elrsr()
2533 if (kvm_has_s1poe(vcpu->kvm)) in s1poe_visibility()
2548 if (kvm_has_tcr2(vcpu->kvm)) in tcr2_visibility()
2563 if (kvm_has_s1pie(vcpu->kvm)) in s1pie_visibility()
2585 * Request a reload of the PMU to enable/disable the counters affected in access_mdcr()
2601 * trapped, allowing the guest to read the actual hardware value. On big-little
2613 if (p->is_write) in access_imp_id_reg()
2617 * Return the VM-scoped implementation ID register values if userspace in access_imp_id_reg()
2620 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) in access_imp_id_reg()
2629 p->regval = read_sysreg(revidr_el1); in access_imp_id_reg()
2632 p->regval = read_sysreg(aidr_el1); in access_imp_id_reg()
2662 KVM_BUG_ON(1, vcpu->kvm); in reset_imp_id_reg()
2670 struct kvm *kvm = vcpu->kvm; in set_imp_id_reg()
2673 guard(mutex)(&kvm->arch.config_lock); in set_imp_id_reg()
2679 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) in set_imp_id_reg()
2680 return -EINVAL; in set_imp_id_reg()
2687 return -EBUSY; in set_imp_id_reg()
2693 if ((val & r->val) != val) in set_imp_id_reg()
2694 return -EINVAL; in set_imp_id_reg()
2836 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
3384 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s1e01()
3386 __kvm_at_s1e01(vcpu, op, p->regval); in handle_at_s1e01()
3394 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s1e2()
3396 /* There is no FGT associated with AT S1E2A :-( */ in handle_at_s1e2()
3398 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { in handle_at_s1e2()
3403 __kvm_at_s1e2(vcpu, op, p->regval); in handle_at_s1e2()
3411 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s12()
3413 __kvm_at_s12(vcpu, op, p->regval); in handle_at_s12()
3420 struct kvm *kvm = vpcu->kvm; in kvm_supported_tlbi_s12_op()
3437 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_alle1is()
3442 write_lock(&vcpu->kvm->mmu_lock); in handle_alle1is()
3448 kvm_nested_s2_unmap(vcpu->kvm, true); in handle_alle1is()
3450 write_unlock(&vcpu->kvm->mmu_lock); in handle_alle1is()
3457 struct kvm *kvm = vpcu->kvm; in kvm_supported_tlbi_ipas2_op()
3516 * at worst may cause more aborts for shadow stage-2 fills. in s2_mmu_unmap_range()
3518 * Dropping the MMU lock also implies that shadow stage-2 fills could in s2_mmu_unmap_range()
3520 * the L1 needs to put its stage-2 in a consistent state before doing in s2_mmu_unmap_range()
3523 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); in s2_mmu_unmap_range()
3529 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_vmalls12e1is()
3536 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); in handle_vmalls12e1is()
3538 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_vmalls12e1is()
3553 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_ripas2e1is()
3566 tg = FIELD_GET(GENMASK(47, 46), p->regval); in handle_ripas2e1is()
3567 scale = FIELD_GET(GENMASK(45, 44), p->regval); in handle_ripas2e1is()
3568 num = FIELD_GET(GENMASK(43, 39), p->regval); in handle_ripas2e1is()
3569 base = p->regval & GENMASK(36, 0); in handle_ripas2e1is()
3587 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_ripas2e1is()
3608 * - NS bit: we're non-secure only. in s2_mmu_unmap_ipa()
3610 * - IPA[51:48]: We don't support 52bit IPA just yet... in s2_mmu_unmap_ipa()
3614 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12; in s2_mmu_unmap_ipa()
3615 max_size = compute_tlb_inval_range(mmu, info->ipa.addr); in s2_mmu_unmap_ipa()
3616 base_addr &= ~(max_size - 1); in s2_mmu_unmap_ipa()
3628 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_ipas2e1is()
3634 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_ipas2e1is()
3637 .addr = p->regval, in handle_ipas2e1is()
3648 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); in s2_mmu_tlbi_s1e1()
3654 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_tlbi_el1()
3663 * - HCR_EL2.E2H == 0 : a non-VHE guest in handle_tlbi_el1()
3664 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode in handle_tlbi_el1()
3675 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_tlbi_el1()
3678 .addr = p->regval, in handle_tlbi_el1()
3865 if (p->is_write) { in trap_dbgdidr()
3868 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1); in trap_dbgdidr()
3869 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); in trap_dbgdidr()
3871 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | in trap_dbgdidr()
4218 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { in check_sysreg_table()
4219 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n", in check_sysreg_table()
4220 &table[i], i, table[i - 1].name, table[i].name); in check_sysreg_table()
4251 BUG_ON(!r->access); in perform_access()
4254 if (likely(r->access(vcpu, params, r))) in perform_access()
4259 * emulate_cp -- tries to match a sys_reg access in a handling table, and
4293 int cp = -1; in unhandled_cp_access()
4315 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4338 * Make a 64-bit value out of Rt and Rt2. As we use the same trap in kvm_handle_cp_64()
4339 * backends between AArch32 and AArch64, we get away with it. in kvm_handle_cp_64()
4377 params->is_write = ((esr & 1) == 0); in kvm_esr_cp10_id_to_sys64()
4378 params->Op0 = 3; in kvm_esr_cp10_id_to_sys64()
4379 params->Op1 = 0; in kvm_esr_cp10_id_to_sys64()
4380 params->CRn = 0; in kvm_esr_cp10_id_to_sys64()
4381 params->CRm = 3; in kvm_esr_cp10_id_to_sys64()
4383 /* CP10 ID registers are read-only */ in kvm_esr_cp10_id_to_sys64()
4384 valid = !params->is_write; in kvm_esr_cp10_id_to_sys64()
4389 params->Op2 = 0; in kvm_esr_cp10_id_to_sys64()
4393 params->Op2 = 1; in kvm_esr_cp10_id_to_sys64()
4397 params->Op2 = 2; in kvm_esr_cp10_id_to_sys64()
4407 params->is_write ? "write" : "read", reg_id); in kvm_esr_cp10_id_to_sys64()
4412 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4416 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4439 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4450 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4461 if (params->is_write) { in kvm_emulate_cp15_id_reg()
4466 params->Op0 = 3; in kvm_emulate_cp15_id_reg()
4473 if (params->CRm > 3) in kvm_emulate_cp15_id_reg()
4474 params->regval = 0; in kvm_emulate_cp15_id_reg()
4478 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_emulate_cp15_id_reg()
4483 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4496 params->regval = vcpu_get_reg(vcpu, Rt); in kvm_handle_cp_32()
4499 if (!params->is_write) in kvm_handle_cp_32()
4500 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_handle_cp_32()
4550 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
4596 struct kvm *kvm = s->private; in idregs_debug_start()
4599 mutex_lock(&kvm->arch.config_lock); in idregs_debug_start()
4601 iter = &kvm->arch.idreg_debugfs_iter; in idregs_debug_start()
4602 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) && in idregs_debug_start()
4608 iter = ERR_PTR(-EBUSY); in idregs_debug_start()
4611 mutex_unlock(&kvm->arch.config_lock); in idregs_debug_start()
4618 struct kvm *kvm = s->private; in idregs_debug_next()
4622 if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) { in idregs_debug_next()
4623 kvm->arch.idreg_debugfs_iter++; in idregs_debug_next()
4625 return &kvm->arch.idreg_debugfs_iter; in idregs_debug_next()
4633 struct kvm *kvm = s->private; in idregs_debug_stop()
4638 mutex_lock(&kvm->arch.config_lock); in idregs_debug_stop()
4640 kvm->arch.idreg_debugfs_iter = ~0; in idregs_debug_stop()
4642 mutex_unlock(&kvm->arch.config_lock); in idregs_debug_stop()
4648 struct kvm *kvm = s->private; in idregs_debug_show()
4650 desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter); in idregs_debug_show()
4652 if (!desc->name) in idregs_debug_show()
4656 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc))); in idregs_debug_show()
4672 kvm->arch.idreg_debugfs_iter = ~0; in kvm_sys_regs_create_debugfs()
4674 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, in kvm_sys_regs_create_debugfs()
4681 struct kvm *kvm = vcpu->kvm; in reset_vm_ftr_id_reg()
4683 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) in reset_vm_ftr_id_reg()
4686 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg)); in reset_vm_ftr_id_reg()
4695 reg->reset(vcpu, reg); in reset_vcpu_ftr_id_reg()
4699 * kvm_reset_sys_regs - sets system registers to reset value
4707 struct kvm *kvm = vcpu->kvm; in kvm_reset_sys_regs()
4713 if (!r->reset) in kvm_reset_sys_regs()
4721 r->reset(vcpu, r); in kvm_reset_sys_regs()
4723 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) in kvm_reset_sys_regs()
4724 (void)__vcpu_sys_reg(vcpu, r->reg); in kvm_reset_sys_regs()
4727 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); in kvm_reset_sys_regs()
4734 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
4787 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) in index_to_params()
4789 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) in index_to_params()
4791 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) in index_to_params()
4793 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) in index_to_params()
4795 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) in index_to_params()
4830 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) in id_to_sys_reg_desc()
4843 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_get()
4844 return -ENOENT; in demux_c15_get()
4849 return -ENOENT; in demux_c15_get()
4853 return -ENOENT; in demux_c15_get()
4857 return -ENOENT; in demux_c15_get()
4868 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_set()
4869 return -ENOENT; in demux_c15_set()
4874 return -ENOENT; in demux_c15_set()
4878 return -ENOENT; in demux_c15_set()
4881 return -EFAULT; in demux_c15_set()
4885 return -ENOENT; in demux_c15_set()
4892 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_get_user()
4897 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_get_user()
4899 return -ENOENT; in kvm_sys_reg_get_user()
4901 if (r->get_user) { in kvm_sys_reg_get_user()
4902 ret = (r->get_user)(vcpu, r, &val); in kvm_sys_reg_get_user()
4904 val = __vcpu_sys_reg(vcpu, r->reg); in kvm_sys_reg_get_user()
4916 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_get_reg()
4918 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_get_reg()
4919 return demux_c15_get(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_get_reg()
4928 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_set_user()
4934 return -EFAULT; in kvm_sys_reg_set_user()
4936 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_set_user()
4938 return -ENOENT; in kvm_sys_reg_set_user()
4943 if (r->set_user) { in kvm_sys_reg_set_user()
4944 ret = (r->set_user)(vcpu, r, val); in kvm_sys_reg_set_user()
4946 __vcpu_sys_reg(vcpu, r->reg) = val; in kvm_sys_reg_set_user()
4955 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_set_reg()
4957 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_set_reg()
4958 return demux_c15_set(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_set_reg()
4977 return -EFAULT; in write_demux_regids()
4987 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | in sys_reg_to_index()
4988 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | in sys_reg_to_index()
4989 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | in sys_reg_to_index()
4990 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | in sys_reg_to_index()
4991 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); in sys_reg_to_index()
5015 if (!(rd->reg || rd->get_user)) in walk_one_sys_reg()
5022 return -EFAULT; in walk_one_sys_reg()
5074 u64 __user *masks = (u64 __user *)range->addr; in kvm_vm_ioctl_get_reg_writable_masks()
5077 if (range->range || in kvm_vm_ioctl_get_reg_writable_masks()
5078 memcmp(range->reserved, zero_page, sizeof(range->reserved))) in kvm_vm_ioctl_get_reg_writable_masks()
5079 return -EINVAL; in kvm_vm_ioctl_get_reg_writable_masks()
5083 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
5090 if (!is_feature_id_reg(encoding) || !reg->set_user) in kvm_vm_ioctl_get_reg_writable_masks()
5093 if (!reg->val || in kvm_vm_ioctl_get_reg_writable_masks()
5097 val = reg->val; in kvm_vm_ioctl_get_reg_writable_masks()
5100 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
5108 struct kvm *kvm = vcpu->kvm; in vcpu_set_hcr()
5111 vcpu->arch.hcr_el2 |= HCR_E2H; in vcpu_set_hcr()
5114 vcpu->arch.hcr_el2 |= HCR_TEA; in vcpu_set_hcr()
5116 vcpu->arch.hcr_el2 |= HCR_TERR; in vcpu_set_hcr()
5120 vcpu->arch.hcr_el2 |= HCR_FWB; in vcpu_set_hcr()
5125 vcpu->arch.hcr_el2 |= HCR_TID4; in vcpu_set_hcr()
5127 vcpu->arch.hcr_el2 |= HCR_TID2; in vcpu_set_hcr()
5130 vcpu->arch.hcr_el2 &= ~HCR_RW; in vcpu_set_hcr()
5132 if (kvm_has_mte(vcpu->kvm)) in vcpu_set_hcr()
5133 vcpu->arch.hcr_el2 |= HCR_ATA; in vcpu_set_hcr()
5141 vcpu->arch.hcr_el2 |= HCR_TTLBOS; in vcpu_set_hcr()
5146 struct kvm *kvm = vcpu->kvm; in kvm_calculate_traps()
5148 mutex_lock(&kvm->arch.config_lock); in kvm_calculate_traps()
5153 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) in kvm_calculate_traps()
5156 kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 | in kvm_calculate_traps()
5164 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS| in kvm_calculate_traps()
5176 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 | in kvm_calculate_traps()
5190 kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A; in kvm_calculate_traps()
5193 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP | in kvm_calculate_traps()
5197 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | in kvm_calculate_traps()
5201 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 | in kvm_calculate_traps()
5205 kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | in kvm_calculate_traps()
5209 kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA | in kvm_calculate_traps()
5212 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ | in kvm_calculate_traps()
5216 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); in kvm_calculate_traps()
5218 mutex_unlock(&kvm->arch.config_lock); in kvm_calculate_traps()
5231 struct kvm *kvm = vcpu->kvm; in kvm_finalize_sys_regs()
5233 guard(mutex)(&kvm->arch.config_lock); in kvm_finalize_sys_regs()
5237 kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) { in kvm_finalize_sys_regs()
5238 kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK; in kvm_finalize_sys_regs()
5239 kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK; in kvm_finalize_sys_regs()
5266 return -EINVAL; in kvm_sys_reg_table_init()