Home
last modified time | relevance | path

Searched full:msr (Results 1 – 25 of 840) sorted by relevance

12345678910>>...34

/linux-6.8/tools/testing/selftests/kvm/x86_64/
Dhyperv_features.c37 static bool is_write_only_msr(uint32_t msr) in is_write_only_msr() argument
39 return msr == HV_X64_MSR_EOI; in is_write_only_msr()
42 static void guest_msr(struct msr_data *msr) in guest_msr() argument
47 GUEST_ASSERT(msr->idx); in guest_msr()
49 if (msr->write) in guest_msr()
50 vector = wrmsr_safe(msr->idx, msr->write_val); in guest_msr()
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) in guest_msr()
53 vector = rdmsr_safe(msr->idx, &msr_val); in guest_msr()
55 if (msr->fault_expected) in guest_msr()
58 msr->write ? "WR" : "RD", msr->idx, vector); in guest_msr()
[all …]
Duserspace_msr_exit_test.c30 /* Test an MSR the kernel knows about. */
37 /* Test an MSR the kernel doesn't know about. */
44 /* Test a fabricated MSR that no one knows about. */
86 static void deny_msr(uint8_t *bitmap, u32 msr) in deny_msr() argument
88 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); in deny_msr()
151 static noinline uint64_t test_rdmsr(uint32_t msr) in test_rdmsr() argument
158 "=a"(a), "=d"(d) : "c"(msr) : "memory"); in test_rdmsr()
167 static noinline void test_wrmsr(uint32_t msr, uint64_t value) in test_wrmsr() argument
175 "a"(a), "d"(d), "c"(msr) : "memory"); in test_wrmsr()
185 static noinline uint64_t test_em_rdmsr(uint32_t msr) in test_em_rdmsr() argument
[all …]
Dkvm_pv_test.c20 #define TEST_MSR(msr) { .idx = msr, .name = #msr } argument
22 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) argument
41 static void test_msr(struct msr_data *msr) in test_msr() argument
46 PR_MSR(msr); in test_msr()
48 vector = rdmsr_safe(msr->idx, &ignored); in test_msr()
51 vector = wrmsr_safe(msr->idx, 0); in test_msr()
100 struct msr_data *msr = (struct msr_data *)uc->args[0]; in pr_msr() local
102 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); in pr_msr()
/linux-6.8/arch/x86/lib/
Dmsr.c5 #include <asm/msr.h>
7 #include <asm/msr-trace.h>
9 struct msr *msrs_alloc(void) in msrs_alloc()
11 struct msr *msrs = NULL; in msrs_alloc()
13 msrs = alloc_percpu(struct msr); in msrs_alloc()
23 void msrs_free(struct msr *msrs) in msrs_free()
30 * msr_read - Read an MSR with error handling
31 * @msr: MSR to read
39 static int msr_read(u32 msr, struct msr *m) in msr_read() argument
44 err = rdmsrl_safe(msr, &val); in msr_read()
[all …]
Dmsr-smp.c6 #include <asm/msr.h>
11 struct msr *reg; in __rdmsr_on_cpu()
25 struct msr *reg; in __wrmsr_on_cpu()
100 struct msr *msrs, in __rwmsr_on_cpus()
123 * @msr_no: which MSR
124 * @msrs: array of MSR values
127 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in rdmsr_on_cpus()
137 * @msr_no: which MSR
138 * @msrs: array of MSR values
141 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in wrmsr_on_cpus()
[all …]
/linux-6.8/arch/x86/kernel/cpu/
Dperfctr-watchdog.c44 /* converts an msr to an appropriate reservation bit */
45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument
51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit()
52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit()
53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit()
56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit()
62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit()
64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit()
69 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
[all …]
Dfeat_ctl.c6 #include <asm/msr-index.h>
47 /* All 64 bits of tertiary controls MSR are allowed-1 settings. */ in init_vmx_capabilities()
57 * MSR, low for EPT, high for VPID. in init_vmx_capabilities()
117 u64 msr; in init_ia32_feat_ctl() local
119 if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { in init_ia32_feat_ctl()
139 if (msr & FEAT_CTL_LOCKED) in init_ia32_feat_ctl()
143 * Ignore whatever value BIOS left in the MSR to avoid enabling random in init_ia32_feat_ctl()
146 msr = FEAT_CTL_LOCKED; in init_ia32_feat_ctl()
154 msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; in init_ia32_feat_ctl()
157 msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; in init_ia32_feat_ctl()
[all …]
/linux-6.8/arch/x86/include/asm/
Dmsr.h5 #include "msr-index.h"
12 #include <uapi/asm/msr.h>
13 #include <asm/shared/msr.h>
17 struct msr reg;
18 struct msr *msrs;
64 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
65 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
66 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
68 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument
69 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument
[all …]
Dmsr-trace.h3 #define TRACE_SYSTEM msr
6 #define TRACE_INCLUDE_FILE msr-trace
22 TP_PROTO(unsigned msr, u64 val, int failed),
23 TP_ARGS(msr, val, failed),
25 __field( unsigned, msr )
30 __entry->msr = msr;
35 __entry->msr,
41 TP_PROTO(unsigned msr, u64 val, int failed),
42 TP_ARGS(msr, val, failed)
46 TP_PROTO(unsigned msr, u64 val, int failed),
[all …]
/linux-6.8/arch/powerpc/kvm/
Dbook3s_hv_tm.c19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local
23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
25 if (msr & MSR_PR) { in emulate_tx_failure()
45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local
74 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
78 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation()
84 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation()
96 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation()
105 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
111 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation()
[all …]
Dbook3s_hv_tm_builtin.c18 * (MSR[TS] = S and the fake-suspend flag is not set).
23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local
45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early()
52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early()
57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early()
67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early()
68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early()
77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early()
[all …]
/linux-6.8/drivers/powercap/
Dintel_rapl_msr.c3 * Intel Running Average Power Limit (RAPL) Driver via MSR interface
32 /* private data for RAPL MSR Interface */
37 .reg_unit.msr = MSR_RAPL_POWER_UNIT,
38 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT,
39 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS,
40 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS,
41 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO,
42 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT,
43 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS,
44 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY,
[all …]
/linux-6.8/arch/x86/events/
Dprobe.c14 * Accepts msr[] array with non populated entries as long as either
15 * msr[i].msr is 0 or msr[i].grp is NULL. Note that the default sysfs
19 perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) in perf_msr_probe() argument
29 if (!msr[bit].no_check) { in perf_msr_probe()
30 struct attribute_group *grp = msr[bit].grp; in perf_msr_probe()
40 if (!msr[bit].msr) in perf_msr_probe()
43 if (msr[bit].test && !msr[bit].test(bit, data)) in perf_msr_probe()
45 /* Virt sucks; you cannot tell if a R/O MSR is present :/ */ in perf_msr_probe()
46 if (rdmsrl_safe(msr[bit].msr, &val)) in perf_msr_probe()
49 mask = msr[bit].mask; in perf_msr_probe()
/linux-6.8/arch/x86/kvm/
Dmtrr.c28 static bool is_mtrr_base_msr(unsigned int msr) in is_mtrr_base_msr() argument
31 return !(msr & 0x1); in is_mtrr_base_msr()
35 unsigned int msr) in var_mtrr_msr_to_range() argument
37 int index = (msr - MTRRphysBase_MSR(0)) / 2; in var_mtrr_msr_to_range()
42 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument
44 switch (msr) { in msr_mtrr_valid()
68 static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument
73 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid()
76 if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid()
80 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid()
[all …]
/linux-6.8/arch/powerpc/kernel/
Dsignal_64.c130 unsigned long msr = regs->msr; in __unsafe_setup_sigcontext() local
144 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) in __unsafe_setup_sigcontext()
147 msr |= MSR_VEC; in __unsafe_setup_sigcontext()
160 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_setup_sigcontext()
163 msr &= ~MSR_VSX; in __unsafe_setup_sigcontext()
173 /* set MSR_VSX in the MSR value in the frame to in __unsafe_setup_sigcontext()
176 msr |= MSR_VSX; in __unsafe_setup_sigcontext()
181 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); in __unsafe_setup_sigcontext()
210 unsigned long msr) in setup_tm_sigcontexts() argument
229 BUG_ON(!MSR_TM_ACTIVE(msr)); in setup_tm_sigcontexts()
[all …]
Dsignal_32.c158 /* copy up to but not including MSR */ in __unsafe_restore_general_regs()
161 /* copy from orig_r3 (the word after the MSR) up to the end */ in __unsafe_restore_general_regs()
271 unsigned long msr = regs->msr; in __unsafe_save_user_regs() local
281 /* set MSR_VEC in the saved MSR value to indicate that in __unsafe_save_user_regs()
283 msr |= MSR_VEC; in __unsafe_save_user_regs()
285 /* else assert((regs->msr & MSR_VEC) == 0) */ in __unsafe_save_user_regs()
299 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_save_user_regs()
302 msr &= ~MSR_VSX; in __unsafe_save_user_regs()
307 * the saved MSR value to indicate that frame->mc_vregs in __unsafe_save_user_regs()
312 msr |= MSR_VSX; in __unsafe_save_user_regs()
[all …]
Dcpu_setup_power.c16 /* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */
19 u64 msr; in init_hvmode_206() local
21 msr = mfmsr(); in init_hvmode_206()
22 if (msr & MSR_HV) in init_hvmode_206()
153 u64 msr; in __restore_cpu_power7() local
155 msr = mfmsr(); in __restore_cpu_power7()
156 if (!(msr & MSR_HV)) in __restore_cpu_power7()
185 u64 msr; in __restore_cpu_power8() local
191 msr = mfmsr(); in __restore_cpu_power8()
192 if (!(msr & MSR_HV)) in __restore_cpu_power8()
[all …]
/linux-6.8/Documentation/trace/
Devents-msr.rst2 MSR Trace Events
5 The x86 kernel supports tracing most MSR (Model Specific Register) accesses.
11 /sys/kernel/tracing/events/msr/
13 Trace MSR reads:
17 - msr: MSR number
22 Trace MSR writes:
26 - msr: MSR number
37 cat /sys/kernel/tracing/trace | decode_msr.py /usr/src/linux/include/asm/msr-index.h
39 to add symbolic MSR names.
/linux-6.8/arch/x86/xen/
Dpmu.c29 /* Macro for computing address of a PMU MSR bank */
132 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument
138 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr()
139 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr()
140 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr()
141 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr()
198 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument
214 switch (msr) { in xen_intel_pmu_emulate()
252 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate()
261 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument
[all …]
/linux-6.8/arch/x86/kvm/svm/
Dpmu.c38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument
47 switch (msr) { in get_gp_pmc_amd()
55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); in get_gp_pmc_amd()
56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) in get_gp_pmc_amd()
62 idx = msr - MSR_K7_EVNTSEL0; in get_gp_pmc_amd()
67 idx = msr - MSR_K7_PERFCTR0; in get_gp_pmc_amd()
97 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in amd_msr_idx_to_pmc() argument
102 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc()
103 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_msr_idx_to_pmc()
108 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument
[all …]
/linux-6.8/arch/microblaze/kernel/
Dprocess.c44 pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", in show_regs()
45 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs()
72 local_save_flags(childregs->msr); in copy_thread()
73 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread()
83 childregs->msr |= MSR_UMS; in copy_thread()
87 * before enabling VM. This MSR will be restored in switch_to and in copy_thread()
91 * compose the right MSR for RETURN(). It will work for switch_to also in copy_thread()
94 * right now MSR is a copy of parent one */ in copy_thread()
95 childregs->msr &= ~MSR_EIP; in copy_thread()
96 childregs->msr |= MSR_IE; in copy_thread()
[all …]
/linux-6.8/tools/power/x86/turbostat/
Dturbostat.c267 int get_msr(int cpu, off_t offset, unsigned long long *msr);
319 unsigned long long msr = 3; in slm_bclk() local
323 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) in slm_bclk()
326 i = msr & 0xf; in slm_bclk()
1033 * The accumulated sum of MSR is defined as a monotonic
1034 * increasing MSR, it will be accumulated periodically,
1047 int get_msr_sum(int cpu, off_t offset, unsigned long long *msr);
1052 /*The accumulated MSR value is updated by the timer */
1054 /*The MSR footprint recorded in last timer */
1059 /* The percpu MSR sum array.*/
[all …]
/linux-6.8/arch/arm64/kvm/hyp/nvhe/
Dhyp-init.S91 msr mair_el2, x1
94 msr hcr_el2, x1
103 msr tpidr_el2, x0
110 msr tpidr_el2, x1
113 msr vttbr_el2, x1
116 msr vtcr_el2, x1
123 msr ttbr0_el2, x2
126 msr tcr_el2, x0
148 msr sctlr_el2, x0
153 msr vbar_el2, x0
[all …]
/linux-6.8/arch/m68k/bvme6000/
Dconfig.c166 unsigned char msr; in bvme6000_timer_int() local
169 msr = rtc->msr & 0xc0; in bvme6000_timer_int()
170 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int()
191 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local
193 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init()
203 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init()
208 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init()
211 rtc->msr = msr; in bvme6000_sched_init()
233 unsigned char msr, msb; in bvme6000_read_clk() local
239 msr = rtc->msr & 0xc0; in bvme6000_read_clk()
[all …]
/linux-6.8/arch/arm64/mm/
Dproc.S127 msr tpidr_el0, x2
128 msr tpidrro_el0, x3
129 msr contextidr_el1, x4
130 msr cpacr_el1, x6
136 msr tcr_el1, x8
137 msr vbar_el1, x9
146 msr mdscr_el1, x10
148 msr sctlr_el1, x12
150 msr sp_el0, x14
154 msr osdlr_el1, x5
[all …]

12345678910>>...34