Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
22 #include <asm/debug-monitors.h>
32 #include <trace/events/kvm.h>
36 #include "trace.h"
64 "sys_reg read to write-only register"); in read_from_write_only()
72 "sys_reg write to read-only register"); in write_to_read_only()
149 * If we have a non-VHE guest and that the sysreg in vcpu_read_sys_reg()
151 * in-memory copy instead. in vcpu_read_sys_reg()
186 * to reverse-translate virtual EL2 system registers for a in vcpu_write_sys_reg()
187 * non-VHE guest hypervisor. in vcpu_write_sys_reg()
236 * = Log2(bytes) - 2 + 2 in get_min_cache_line_size()
247 if (vcpu->arch.ccsidr) in get_ccsidr()
248 return vcpu->arch.ccsidr[csselr]; in get_ccsidr()
263 * non-aliasing) are 1 set and 1 way. in get_ccsidr()
275 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); in get_ccsidr()
281 u32 *ccsidr = vcpu->arch.ccsidr; in set_ccsidr()
286 return -EINVAL; in set_ccsidr()
294 return -ENOMEM; in set_ccsidr()
299 vcpu->arch.ccsidr = ccsidr; in set_ccsidr()
311 if (p->is_write) in access_rw()
312 vcpu_write_sys_reg(vcpu, p->regval, r->reg); in access_rw()
314 p->regval = vcpu_read_sys_reg(vcpu, r->reg); in access_rw()
326 if (!p->is_write) in access_dcsw()
333 * CPU left in the system, and certainly not from non-secure in access_dcsw()
346 if (!kvm_has_mte(vcpu->kvm)) { in access_dcgsw()
357 switch (r->aarch32_map) { in get_access_mask()
385 BUG_ON(!p->is_write); in access_vm_reg()
390 val = vcpu_read_sys_reg(vcpu, r->reg); in access_vm_reg()
396 val |= (p->regval & (mask >> shift)) << shift; in access_vm_reg()
397 vcpu_write_sys_reg(vcpu, val, r->reg); in access_vm_reg()
409 if (p->is_write) in access_actlr()
413 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; in access_actlr()
430 if (!p->is_write) in access_gic_sgi()
440 if (p->Op0 == 0) { /* AArch32 */ in access_gic_sgi()
441 switch (p->Op1) { in access_gic_sgi()
452 switch (p->Op2) { in access_gic_sgi()
464 vgic_v3_dispatch_sgi(vcpu, p->regval, g1); in access_gic_sgi()
473 if (p->is_write) in access_gic_sre()
476 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; in access_gic_sre()
484 if (p->is_write) in trap_raz_wi()
508 u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1); in trap_loregion()
516 if (p->is_write && sr == SYS_LORID_EL1) in trap_loregion()
528 if (!p->is_write) in trap_oslar_el1()
533 if (p->regval & OSLAR_EL1_OSLK) in trap_oslar_el1()
544 if (p->is_write) in trap_oslsr_el1()
547 p->regval = __vcpu_sys_reg(vcpu, r->reg); in trap_oslsr_el1()
558 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) in set_oslsr_el1()
559 return -EINVAL; in set_oslsr_el1()
561 __vcpu_sys_reg(vcpu, rd->reg) = val; in set_oslsr_el1()
569 if (p->is_write) { in trap_dbgauthstatus_el1()
572 p->regval = read_sysreg(dbgauthstatus_el1); in trap_dbgauthstatus_el1()
578 * We want to avoid world-switching all the DBG registers all the
581 * - If we've touched any debug register, it is likely that we're
584 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
592 * - If the dirty bit is set (because we're coming back from trapping),
594 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
597 * - Otherwise, enable the traps
600 * - If the dirty bit is set, save guest registers, restore host
609 if (p->is_write) in trap_debug_regs()
612 trace_trap_reg(__func__, r->reg, p->is_write, p->regval); in trap_debug_regs()
637 val |= (p->regval & (mask >> shift)) << shift; in reg_to_dbg()
651 p->regval = (*dbg_reg & mask) >> shift; in dbg_to_reg()
658 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; in trap_bvr()
660 if (p->is_write) in trap_bvr()
665 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); in trap_bvr()
673 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val; in set_bvr()
680 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; in get_bvr()
687 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val; in reset_bvr()
688 return rd->val; in reset_bvr()
695 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; in trap_bcr()
697 if (p->is_write) in trap_bcr()
702 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); in trap_bcr()
710 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val; in set_bcr()
717 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; in get_bcr()
724 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val; in reset_bcr()
725 return rd->val; in reset_bcr()
732 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; in trap_wvr()
734 if (p->is_write) in trap_wvr()
739 trace_trap_reg(__func__, rd->CRm, p->is_write, in trap_wvr()
740 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]); in trap_wvr()
748 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val; in set_wvr()
755 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; in get_wvr()
762 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val; in reset_wvr()
763 return rd->val; in reset_wvr()
770 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; in trap_wcr()
772 if (p->is_write) in trap_wcr()
777 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); in trap_wcr()
785 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val; in set_wcr()
792 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; in get_wcr()
799 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val; in reset_wcr()
800 return rd->val; in reset_wcr()
828 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); in reset_mpidr()
829 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); in reset_mpidr()
830 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); in reset_mpidr()
849 u8 n = vcpu->kvm->arch.pmcr_n; in reset_pmu_reg()
852 mask |= GENMASK(n - 1, 0); in reset_pmu_reg()
855 __vcpu_sys_reg(vcpu, r->reg) &= mask; in reset_pmu_reg()
857 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmu_reg()
863 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); in reset_pmevcntr()
865 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevcntr()
875 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); in reset_pmevtyper()
877 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevtyper()
883 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK; in reset_pmselr()
885 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmselr()
899 __vcpu_sys_reg(vcpu, r->reg) = pmcr; in reset_pmcr()
901 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmcr()
943 if (p->is_write) { in access_pmcr()
950 val |= p->regval & ARMV8_PMU_PMCR_MASK; in access_pmcr()
958 p->regval = val; in access_pmcr()
970 if (p->is_write) in access_pmselr()
971 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; in access_pmselr()
974 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) in access_pmselr()
985 BUG_ON(p->is_write); in access_pmceid()
992 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); in access_pmceid()
996 p->regval = pmceid; in access_pmceid()
1020 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) in get_pmu_evcntr()
1025 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in get_pmu_evcntr()
1037 if (r->CRn == 9 && r->CRm == 13) { in access_pmu_evcntr()
1038 if (r->Op2 == 2) { in access_pmu_evcntr()
1045 } else if (r->Op2 == 0) { in access_pmu_evcntr()
1052 } else if (r->CRn == 0 && r->CRm == 9) { in access_pmu_evcntr()
1058 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { in access_pmu_evcntr()
1063 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evcntr()
1072 if (p->is_write) { in access_pmu_evcntr()
1076 kvm_pmu_set_counter_value(vcpu, idx, p->regval); in access_pmu_evcntr()
1078 p->regval = kvm_pmu_get_counter_value(vcpu, idx); in access_pmu_evcntr()
1092 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { in access_pmu_evtyper()
1096 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { in access_pmu_evtyper()
1097 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evtyper()
1110 if (p->is_write) { in access_pmu_evtyper()
1111 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); in access_pmu_evtyper()
1114 p->regval = __vcpu_sys_reg(vcpu, reg); in access_pmu_evtyper()
1126 switch (r->reg) { in set_pmreg()
1129 set = r->CRm & 2; in set_pmreg()
1133 set = r->Op2 & 1; in set_pmreg()
1138 __vcpu_sys_reg(vcpu, r->reg) |= val; in set_pmreg()
1140 __vcpu_sys_reg(vcpu, r->reg) &= ~val; in set_pmreg()
1149 *val = __vcpu_sys_reg(vcpu, r->reg) & mask; in get_pmreg()
1162 if (p->is_write) { in access_pmcnten()
1163 val = p->regval & mask; in access_pmcnten()
1164 if (r->Op2 & 0x1) { in access_pmcnten()
1175 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in access_pmcnten()
1189 if (p->is_write) { in access_pminten()
1190 u64 val = p->regval & mask; in access_pminten()
1192 if (r->Op2 & 0x1) in access_pminten()
1199 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in access_pminten()
1213 if (p->is_write) { in access_pmovs()
1214 if (r->CRm & 0x2) in access_pmovs()
1216 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); in access_pmovs()
1219 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); in access_pmovs()
1221 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in access_pmovs()
1232 if (!p->is_write) in access_pmswinc()
1239 kvm_pmu_software_increment(vcpu, p->regval & mask); in access_pmswinc()
1246 if (p->is_write) { in access_pmuserenr()
1253 p->regval & ARMV8_PMU_USERENR_MASK; in access_pmuserenr()
1255 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) in access_pmuserenr()
1273 struct kvm *kvm = vcpu->kvm; in set_pmcr()
1275 mutex_lock(&kvm->arch.config_lock); in set_pmcr()
1284 kvm->arch.pmcr_n = new_n; in set_pmcr()
1286 mutex_unlock(&kvm->arch.config_lock); in set_pmcr()
1303 __vcpu_sys_reg(vcpu, r->reg) = val; in set_pmcr()
1404 if (p->is_write) in access_arch_timer()
1405 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); in access_arch_timer()
1407 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); in access_arch_timer()
1439 * arm64_check_features() - Check if a feature register value constitutes
1457 u64 writable_mask = rd->val; in arm64_check_features()
1458 u64 limit = rd->reset(vcpu, rd); in arm64_check_features()
1467 return val ? -E2BIG : 0; in arm64_check_features()
1471 return -EINVAL; in arm64_check_features()
1473 ftrp = ftr_reg->ftr_bits; in arm64_check_features()
1475 for (; ftrp && ftrp->width; ftrp++) { in arm64_check_features()
1493 return -E2BIG; in arm64_check_features()
1498 return -E2BIG; in arm64_check_features()
1530 if (!kvm_has_mte(vcpu->kvm)) in __kvm_read_sanitised_id_reg()
1568 return IDREG(vcpu->kvm, reg_to_encoding(r)); in read_id_reg()
1630 if (p->is_write) in access_id_reg()
1633 p->regval = read_id_reg(vcpu, r); in access_id_reg()
1638 /* Visibility overrides for SVE-specific control registers */
1658 * Although this is a per-CPU feature, we make it global because in read_sanitised_id_aa64pfr0_el1()
1724 * non-architectural PMUs. Of course, PMUv3 is the only game in town for in set_id_aa64dfr0_el1()
1725 * PMU virtualization, so the IMP_DEF value was rather user-hostile. in set_id_aa64dfr0_el1()
1731 * surprising than an ill-guided PMU driver poking at impdef system in set_id_aa64dfr0_el1()
1742 return -EINVAL; in set_id_aa64dfr0_el1()
1781 return -EINVAL; in set_id_dfr0_el1()
1784 return -EINVAL; in set_id_dfr0_el1()
1803 if (kvm_vm_has_ran_once(vcpu->kvm)) { in get_id_reg()
1808 mutex_lock(&vcpu->kvm->arch.config_lock); in get_id_reg()
1810 mutex_unlock(&vcpu->kvm->arch.config_lock); in get_id_reg()
1821 mutex_lock(&vcpu->kvm->arch.config_lock); in set_id_reg()
1827 if (kvm_vm_has_ran_once(vcpu->kvm)) { in set_id_reg()
1829 ret = -EBUSY; in set_id_reg()
1833 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
1839 IDREG(vcpu->kvm, id) = val; in set_id_reg()
1841 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
1844 * arm64_check_features() returns -E2BIG to indicate the register's in set_id_reg()
1845 * feature set is a superset of the maximally-allowed register value. in set_id_reg()
1848 * writes return -EINVAL. in set_id_reg()
1850 if (ret == -E2BIG) in set_id_reg()
1851 ret = -EINVAL; in set_id_reg()
1871 if (p->is_write) in access_ctr()
1874 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0); in access_ctr()
1881 if (p->is_write) in access_clidr()
1884 p->regval = __vcpu_sys_reg(vcpu, r->reg); in access_clidr()
1936 if (kvm_has_mte(vcpu->kvm)) in reset_clidr()
1939 __vcpu_sys_reg(vcpu, r->reg) = clidr; in reset_clidr()
1941 return __vcpu_sys_reg(vcpu, r->reg); in reset_clidr()
1951 return -EINVAL; in set_clidr()
1953 __vcpu_sys_reg(vcpu, rd->reg) = val; in set_clidr()
1961 int reg = r->reg; in access_csselr()
1963 if (p->is_write) in access_csselr()
1964 vcpu_write_sys_reg(vcpu, p->regval, reg); in access_csselr()
1966 p->regval = vcpu_read_sys_reg(vcpu, reg); in access_csselr()
1975 if (p->is_write) in access_ccsidr()
1981 p->regval = get_ccsidr(vcpu, csselr); in access_ccsidr()
1989 if (kvm_has_mte(vcpu->kvm)) in mte_visibility()
2022 "trap of VNCR-backed register"); in bad_vncr_trap()
2145 if (p->is_write) in access_sp_el1()
2146 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval; in access_sp_el1()
2148 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); in access_sp_el1()
2157 if (p->is_write) in access_elr()
2158 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); in access_elr()
2160 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); in access_elr()
2169 if (p->is_write) in access_spsr()
2170 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; in access_spsr()
2172 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); in access_spsr()
2736 if (p->is_write) { in trap_dbgdidr()
2739 u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); in trap_dbgdidr()
2740 u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); in trap_dbgdidr()
2743 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | in trap_dbgdidr()
3064 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { in check_sysreg_table()
3065 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1); in check_sysreg_table()
3096 BUG_ON(!r->access); in perform_access()
3099 if (likely(r->access(vcpu, params, r))) in perform_access()
3104 * emulate_cp -- tries to match a sys_reg access in a handling table, and
3138 int cp = -1; in unhandled_cp_access()
3160 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
3182 * Make a 64-bit value out of Rt and Rt2. As we use the same trap in kvm_handle_cp_64()
3183 * backends between AArch32 and AArch64, we get away with it. in kvm_handle_cp_64()
3221 params->is_write = ((esr & 1) == 0); in kvm_esr_cp10_id_to_sys64()
3222 params->Op0 = 3; in kvm_esr_cp10_id_to_sys64()
3223 params->Op1 = 0; in kvm_esr_cp10_id_to_sys64()
3224 params->CRn = 0; in kvm_esr_cp10_id_to_sys64()
3225 params->CRm = 3; in kvm_esr_cp10_id_to_sys64()
3227 /* CP10 ID registers are read-only */ in kvm_esr_cp10_id_to_sys64()
3228 valid = !params->is_write; in kvm_esr_cp10_id_to_sys64()
3233 params->Op2 = 0; in kvm_esr_cp10_id_to_sys64()
3237 params->Op2 = 1; in kvm_esr_cp10_id_to_sys64()
3241 params->Op2 = 2; in kvm_esr_cp10_id_to_sys64()
3251 params->is_write ? "write" : "read", reg_id); in kvm_esr_cp10_id_to_sys64()
3256 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3260 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3283 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3294 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3305 if (params->is_write) { in kvm_emulate_cp15_id_reg()
3310 params->Op0 = 3; in kvm_emulate_cp15_id_reg()
3317 if (params->CRm > 3) in kvm_emulate_cp15_id_reg()
3318 params->regval = 0; in kvm_emulate_cp15_id_reg()
3322 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_emulate_cp15_id_reg()
3327 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3338 params->regval = vcpu_get_reg(vcpu, Rt); in kvm_handle_cp_32()
3341 if (!params->is_write) in kvm_handle_cp_32()
3342 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_handle_cp_32()
3390 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; in is_imp_def_sys_reg()
3394 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3427 struct kvm *kvm = vcpu->kvm; in kvm_reset_id_regs()
3429 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) in kvm_reset_id_regs()
3432 lockdep_assert_held(&kvm->arch.config_lock); in kvm_reset_id_regs()
3436 IDREG(kvm, id) = idreg->reset(vcpu, idreg); in kvm_reset_id_regs()
3442 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); in kvm_reset_id_regs()
3446 * kvm_reset_sys_regs - sets system registers to reset value
3464 if (r->reset) in kvm_reset_sys_regs()
3465 r->reset(vcpu, r); in kvm_reset_sys_regs()
3470 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
3512 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) in index_to_params()
3514 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) in index_to_params()
3516 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) in index_to_params()
3518 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) in index_to_params()
3520 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) in index_to_params()
3555 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) in id_to_sys_reg_desc()
3573 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3574 return ((struct sys_reg_desc *)r)->val; \
3583 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0); in FUNCTION_INVARIANT()
3584 return ((struct sys_reg_desc *)r)->val; in FUNCTION_INVARIANT()
3587 /* ->val is filled in by kvm_sys_reg_table_init() */
3602 return -ENOENT; in get_invariant_sys_reg()
3604 return put_user(r->val, uaddr); in get_invariant_sys_reg()
3615 return -ENOENT; in set_invariant_sys_reg()
3618 return -EFAULT; in set_invariant_sys_reg()
3621 if (r->val != val) in set_invariant_sys_reg()
3622 return -EINVAL; in set_invariant_sys_reg()
3634 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_get()
3635 return -ENOENT; in demux_c15_get()
3640 return -ENOENT; in demux_c15_get()
3644 return -ENOENT; in demux_c15_get()
3648 return -ENOENT; in demux_c15_get()
3659 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_set()
3660 return -ENOENT; in demux_c15_set()
3665 return -ENOENT; in demux_c15_set()
3669 return -ENOENT; in demux_c15_set()
3672 return -EFAULT; in demux_c15_set()
3676 return -ENOENT; in demux_c15_set()
3683 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_get_user()
3688 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_get_user()
3690 return -ENOENT; in kvm_sys_reg_get_user()
3692 if (r->get_user) { in kvm_sys_reg_get_user()
3693 ret = (r->get_user)(vcpu, r, &val); in kvm_sys_reg_get_user()
3695 val = __vcpu_sys_reg(vcpu, r->reg); in kvm_sys_reg_get_user()
3707 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_get_reg()
3710 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_get_reg()
3711 return demux_c15_get(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_get_reg()
3713 err = get_invariant_sys_reg(reg->id, uaddr); in kvm_arm_sys_reg_get_reg()
3714 if (err != -ENOENT) in kvm_arm_sys_reg_get_reg()
3724 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_set_user()
3730 return -EFAULT; in kvm_sys_reg_set_user()
3732 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_set_user()
3734 return -ENOENT; in kvm_sys_reg_set_user()
3739 if (r->set_user) { in kvm_sys_reg_set_user()
3740 ret = (r->set_user)(vcpu, r, val); in kvm_sys_reg_set_user()
3742 __vcpu_sys_reg(vcpu, r->reg) = val; in kvm_sys_reg_set_user()
3751 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_set_reg()
3754 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_set_reg()
3755 return demux_c15_set(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_set_reg()
3757 err = set_invariant_sys_reg(reg->id, uaddr); in kvm_arm_sys_reg_set_reg()
3758 if (err != -ENOENT) in kvm_arm_sys_reg_set_reg()
3778 return -EFAULT; in write_demux_regids()
3788 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | in sys_reg_to_index()
3789 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | in sys_reg_to_index()
3790 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | in sys_reg_to_index()
3791 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | in sys_reg_to_index()
3792 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); in sys_reg_to_index()
3816 if (!(rd->reg || rd->get_user)) in walk_one_sys_reg()
3823 return -EFAULT; in walk_one_sys_reg()
3862 return -EFAULT; in kvm_arm_copy_sys_reg_indices()
3892 u64 __user *masks = (u64 __user *)range->addr; in kvm_vm_ioctl_get_reg_writable_masks()
3895 if (range->range || in kvm_vm_ioctl_get_reg_writable_masks()
3896 memcmp(range->reserved, zero_page, sizeof(range->reserved))) in kvm_vm_ioctl_get_reg_writable_masks()
3897 return -EINVAL; in kvm_vm_ioctl_get_reg_writable_masks()
3901 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
3908 if (!is_feature_id_reg(encoding) || !reg->set_user) in kvm_vm_ioctl_get_reg_writable_masks()
3918 if (!reg->val || in kvm_vm_ioctl_get_reg_writable_masks()
3921 val = reg->val; in kvm_vm_ioctl_get_reg_writable_masks()
3927 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
3948 return -EINVAL; in kvm_sys_reg_table_init()
3958 return -EINVAL; in kvm_sys_reg_table_init()