1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MSR_H
31965aae3SH. Peter Anvin #define _ASM_X86_MSR_H
4be7baf80SThomas Gleixner
5b72e7464SBorislav Petkov #include "msr-index.h"
6be7baf80SThomas Gleixner
724a295e4SThomas Huth #ifndef __ASSEMBLER__
8c210d249SGlauber de Oliveira Costa
9c210d249SGlauber de Oliveira Costa #include <asm/asm.h>
10c210d249SGlauber de Oliveira Costa #include <asm/errno.h>
116bc1096dSBorislav Petkov #include <asm/cpumask.h>
12b72e7464SBorislav Petkov #include <uapi/asm/msr.h>
13176db622SMichael Roth #include <asm/shared/msr.h>
14c210d249SGlauber de Oliveira Costa
15efef7f18SXin Li (Intel) #include <linux/types.h>
165323922fSThomas Gleixner #include <linux/percpu.h>
175323922fSThomas Gleixner
186ede31e0SBorislav Petkov struct msr_info {
196ede31e0SBorislav Petkov u32 msr_no;
206ede31e0SBorislav Petkov struct msr reg;
215323922fSThomas Gleixner struct msr __percpu *msrs;
226ede31e0SBorislav Petkov int err;
236ede31e0SBorislav Petkov };
246ede31e0SBorislav Petkov
256ede31e0SBorislav Petkov struct msr_regs_info {
266ede31e0SBorislav Petkov u32 *regs;
276ede31e0SBorislav Petkov int err;
286ede31e0SBorislav Petkov };
296ede31e0SBorislav Petkov
307a9c2dd0SChen Yu struct saved_msr {
317a9c2dd0SChen Yu bool valid;
327a9c2dd0SChen Yu struct msr_info info;
337a9c2dd0SChen Yu };
347a9c2dd0SChen Yu
357a9c2dd0SChen Yu struct saved_msrs {
367a9c2dd0SChen Yu unsigned int num;
377a9c2dd0SChen Yu struct saved_msr *array;
387a9c2dd0SChen Yu };
397a9c2dd0SChen Yu
40c210d249SGlauber de Oliveira Costa /*
417f47d8ccSAndi Kleen * Be very careful with includes. This header is prone to include loops.
427f47d8ccSAndi Kleen */
437f47d8ccSAndi Kleen #include <asm/atomic.h>
447f47d8ccSAndi Kleen #include <linux/tracepoint-defs.h>
457f47d8ccSAndi Kleen
46fdb46faeSSteven Rostedt (VMware) #ifdef CONFIG_TRACEPOINTS
47fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(read_msr);
48fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(write_msr);
49fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(rdpmc);
50d58c04cfSIngo Molnar extern void do_trace_write_msr(u32 msr, u64 val, int failed);
51d58c04cfSIngo Molnar extern void do_trace_read_msr(u32 msr, u64 val, int failed);
52d8f8aad6SIngo Molnar extern void do_trace_rdpmc(u32 msr, u64 val, int failed);
537f47d8ccSAndi Kleen #else
do_trace_write_msr(u32 msr,u64 val,int failed)54d58c04cfSIngo Molnar static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {}
do_trace_read_msr(u32 msr,u64 val,int failed)55d58c04cfSIngo Molnar static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {}
do_trace_rdpmc(u32 msr,u64 val,int failed)56d8f8aad6SIngo Molnar static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {}
577f47d8ccSAndi Kleen #endif
587f47d8ccSAndi Kleen
59a585df8eSBorislav Petkov /*
60a585df8eSBorislav Petkov * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
61a585df8eSBorislav Petkov * accessors and should not have any tracing or other functionality piggybacking
62a585df8eSBorislav Petkov * on them - those are *purely* for accessing MSRs and nothing more. So don't even
63a585df8eSBorislav Petkov * think of extending them - you will be slapped with a stinking trout or a frozen
64a585df8eSBorislav Petkov * shark will reach you, wherever you are! You've been warned.
65a585df8eSBorislav Petkov */
__rdmsr(u32 msr)66d58c04cfSIngo Molnar static __always_inline u64 __rdmsr(u32 msr)
67be7baf80SThomas Gleixner {
68c9d8ea9dSIngo Molnar EAX_EDX_DECLARE_ARGS(val, low, high);
69be7baf80SThomas Gleixner
70fbd70437SAndy Lutomirski asm volatile("1: rdmsr\n"
71fbd70437SAndy Lutomirski "2:\n"
7246d28947SThomas Gleixner _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
73fbd70437SAndy Lutomirski : EAX_EDX_RET(val, low, high) : "c" (msr));
74a585df8eSBorislav Petkov
75c210d249SGlauber de Oliveira Costa return EAX_EDX_VAL(val, low, high);
76be7baf80SThomas Gleixner }
77be7baf80SThomas Gleixner
__wrmsrq(u32 msr,u64 val)780c2678efSXin Li (Intel) static __always_inline void __wrmsrq(u32 msr, u64 val)
79a585df8eSBorislav Petkov {
80a585df8eSBorislav Petkov asm volatile("1: wrmsr\n"
81a585df8eSBorislav Petkov "2:\n"
8246d28947SThomas Gleixner _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
830c2678efSXin Li (Intel) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory");
84a585df8eSBorislav Petkov }
85a585df8eSBorislav Petkov
86c996f380SBorislav Petkov #define native_rdmsr(msr, val1, val2) \
87c996f380SBorislav Petkov do { \
88c996f380SBorislav Petkov u64 __val = __rdmsr((msr)); \
89c996f380SBorislav Petkov (void)((val1) = (u32)__val); \
90c996f380SBorislav Petkov (void)((val2) = (u32)(__val >> 32)); \
91c996f380SBorislav Petkov } while (0)
92c996f380SBorislav Petkov
native_rdmsrq(u32 msr)93ed56a309SXin Li (Intel) static __always_inline u64 native_rdmsrq(u32 msr)
94ed56a309SXin Li (Intel) {
95ed56a309SXin Li (Intel) return __rdmsr(msr);
96ed56a309SXin Li (Intel) }
97ed56a309SXin Li (Intel)
98c996f380SBorislav Petkov #define native_wrmsr(msr, low, high) \
990c2678efSXin Li (Intel) __wrmsrq((msr), (u64)(high) << 32 | (low))
100c996f380SBorislav Petkov
1017cbc2ba7SIngo Molnar #define native_wrmsrq(msr, val) \
1020c2678efSXin Li (Intel) __wrmsrq((msr), (val))
103c996f380SBorislav Petkov
native_read_msr(u32 msr)104d58c04cfSIngo Molnar static inline u64 native_read_msr(u32 msr)
105a585df8eSBorislav Petkov {
106dfe2574cSIngo Molnar u64 val;
107a585df8eSBorislav Petkov
108a585df8eSBorislav Petkov val = __rdmsr(msr);
109a585df8eSBorislav Petkov
110fdb46faeSSteven Rostedt (VMware) if (tracepoint_enabled(read_msr))
111a585df8eSBorislav Petkov do_trace_read_msr(msr, val, 0);
112a585df8eSBorislav Petkov
113a585df8eSBorislav Petkov return val;
114a585df8eSBorislav Petkov }
115a585df8eSBorislav Petkov
native_read_msr_safe(u32 msr,u64 * p)116502ad6e5SXin Li (Intel) static inline int native_read_msr_safe(u32 msr, u64 *p)
117be7baf80SThomas Gleixner {
118502ad6e5SXin Li (Intel) int err;
119c9d8ea9dSIngo Molnar EAX_EDX_DECLARE_ARGS(val, low, high);
120be7baf80SThomas Gleixner
121d52a7344SPeter Zijlstra asm volatile("1: rdmsr ; xor %[err],%[err]\n"
122d52a7344SPeter Zijlstra "2:\n\t"
123d52a7344SPeter Zijlstra _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
124502ad6e5SXin Li (Intel) : [err] "=r" (err), EAX_EDX_RET(val, low, high)
125d52a7344SPeter Zijlstra : "c" (msr));
126fdb46faeSSteven Rostedt (VMware) if (tracepoint_enabled(read_msr))
127502ad6e5SXin Li (Intel) do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
128502ad6e5SXin Li (Intel)
129502ad6e5SXin Li (Intel) *p = EAX_EDX_VAL(val, low, high);
130502ad6e5SXin Li (Intel)
131502ad6e5SXin Li (Intel) return err;
132be7baf80SThomas Gleixner }
133be7baf80SThomas Gleixner
134dd2f4a00SAndy Lutomirski /* Can be uninlined because referenced by paravirt */
native_write_msr(u32 msr,u64 val)1350c2678efSXin Li (Intel) static inline void notrace native_write_msr(u32 msr, u64 val)
136b2c5ea4fSWanpeng Li {
137519be7daSXin Li (Intel) native_wrmsrq(msr, val);
138a585df8eSBorislav Petkov
139fdb46faeSSteven Rostedt (VMware) if (tracepoint_enabled(write_msr))
140519be7daSXin Li (Intel) do_trace_write_msr(msr, val, 0);
141be7baf80SThomas Gleixner }
142be7baf80SThomas Gleixner
1430ca59dd9SFrederic Weisbecker /* Can be uninlined because referenced by paravirt */
native_write_msr_safe(u32 msr,u64 val)1440c2678efSXin Li (Intel) static inline int notrace native_write_msr_safe(u32 msr, u64 val)
145be7baf80SThomas Gleixner {
146be7baf80SThomas Gleixner int err;
1475d07c2ccSBorislav Petkov
148d52a7344SPeter Zijlstra asm volatile("1: wrmsr ; xor %[err],%[err]\n"
149d52a7344SPeter Zijlstra "2:\n\t"
150d52a7344SPeter Zijlstra _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
15108970fc4SH. Peter Anvin : [err] "=a" (err)
1520c2678efSXin Li (Intel) : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32))
153af2b1c60SJeremy Fitzhardinge : "memory");
154fdb46faeSSteven Rostedt (VMware) if (tracepoint_enabled(write_msr))
1550c2678efSXin Li (Intel) do_trace_write_msr(msr, val, err);
156be7baf80SThomas Gleixner return err;
157be7baf80SThomas Gleixner }
158be7baf80SThomas Gleixner
1591f975f78SAndre Przywara extern int rdmsr_safe_regs(u32 regs[8]);
1601f975f78SAndre Przywara extern int wrmsr_safe_regs(u32 regs[8]);
161132ec92fSBorislav Petkov
native_read_pmc(int counter)162dfe2574cSIngo Molnar static inline u64 native_read_pmc(int counter)
163be7baf80SThomas Gleixner {
164c9d8ea9dSIngo Molnar EAX_EDX_DECLARE_ARGS(val, low, high);
165c210d249SGlauber de Oliveira Costa
166c210d249SGlauber de Oliveira Costa asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
167fdb46faeSSteven Rostedt (VMware) if (tracepoint_enabled(rdpmc))
1687f47d8ccSAndi Kleen do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
169c210d249SGlauber de Oliveira Costa return EAX_EDX_VAL(val, low, high);
170be7baf80SThomas Gleixner }
171be7baf80SThomas Gleixner
1729bad5658SJuergen Gross #ifdef CONFIG_PARAVIRT_XXL
173be7baf80SThomas Gleixner #include <asm/paravirt.h>
17496a388deSThomas Gleixner #else
175be7baf80SThomas Gleixner #include <linux/errno.h>
176be7baf80SThomas Gleixner /*
177be7baf80SThomas Gleixner * Access to machine-specific registers (available on 586 and better only)
178be7baf80SThomas Gleixner * Note: the rd* operations modify the parameters directly (without using
179be7baf80SThomas Gleixner * pointer indirection), this allows gcc to optimize better
180be7baf80SThomas Gleixner */
181be7baf80SThomas Gleixner
1821423bed2SBorislav Petkov #define rdmsr(msr, low, high) \
183be7baf80SThomas Gleixner do { \
184abb0ade0SJoe Perches u64 __val = native_read_msr((msr)); \
1851423bed2SBorislav Petkov (void)((low) = (u32)__val); \
1861423bed2SBorislav Petkov (void)((high) = (u32)(__val >> 32)); \
187be7baf80SThomas Gleixner } while (0)
188be7baf80SThomas Gleixner
wrmsr(u32 msr,u32 low,u32 high)189d58c04cfSIngo Molnar static inline void wrmsr(u32 msr, u32 low, u32 high)
190be7baf80SThomas Gleixner {
1910c2678efSXin Li (Intel) native_write_msr(msr, (u64)high << 32 | low);
192be7baf80SThomas Gleixner }
193be7baf80SThomas Gleixner
194c435e608SIngo Molnar #define rdmsrq(msr, val) \
195abb0ade0SJoe Perches ((val) = native_read_msr((msr)))
196be7baf80SThomas Gleixner
wrmsrq(u32 msr,u64 val)19778255eb2SIngo Molnar static inline void wrmsrq(u32 msr, u64 val)
19847edb651SAndy Lutomirski {
1990c2678efSXin Li (Intel) native_write_msr(msr, val);
20047edb651SAndy Lutomirski }
201be7baf80SThomas Gleixner
202be7baf80SThomas Gleixner /* wrmsr with exception handling */
wrmsrq_safe(u32 msr,u64 val)2030c2678efSXin Li (Intel) static inline int wrmsrq_safe(u32 msr, u64 val)
204be7baf80SThomas Gleixner {
2050c2678efSXin Li (Intel) return native_write_msr_safe(msr, val);
206be7baf80SThomas Gleixner }
207be7baf80SThomas Gleixner
208060feb65SH. Peter Anvin /* rdmsr with exception handling */
2091423bed2SBorislav Petkov #define rdmsr_safe(msr, low, high) \
210be7baf80SThomas Gleixner ({ \
211502ad6e5SXin Li (Intel) u64 __val; \
212502ad6e5SXin Li (Intel) int __err = native_read_msr_safe((msr), &__val); \
2131423bed2SBorislav Petkov (*low) = (u32)__val; \
2141423bed2SBorislav Petkov (*high) = (u32)(__val >> 32); \
215be7baf80SThomas Gleixner __err; \
216be7baf80SThomas Gleixner })
217be7baf80SThomas Gleixner
rdmsrq_safe(u32 msr,u64 * p)2186fe22abaSIngo Molnar static inline int rdmsrq_safe(u32 msr, u64 *p)
2191de87bd4SAndi Kleen {
220502ad6e5SXin Li (Intel) return native_read_msr_safe(msr, p);
2211de87bd4SAndi Kleen }
222177fed1eSBorislav Petkov
rdpmc(int counter)223795ada52SXin Li (Intel) static __always_inline u64 rdpmc(int counter)
224795ada52SXin Li (Intel) {
225795ada52SXin Li (Intel) return native_read_pmc(counter);
226795ada52SXin Li (Intel) }
2271ff4d58aSAndi Kleen
2289bad5658SJuergen Gross #endif /* !CONFIG_PARAVIRT_XXL */
2299261e050SAndy Lutomirski
230efe50881SAndrew Cooper /* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */
2313fa0fc95SSean Christopherson #define ASM_WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6)
232efe50881SAndrew Cooper
233efe50881SAndrew Cooper /* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */
wrmsrns(u32 msr,u64 val)234a4cb5eceSXin Li static __always_inline void wrmsrns(u32 msr, u64 val)
235a4cb5eceSXin Li {
236efe50881SAndrew Cooper /*
237efe50881SAndrew Cooper * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant
238efe50881SAndrew Cooper * DS prefix to avoid a trailing NOP.
239efe50881SAndrew Cooper */
2403fa0fc95SSean Christopherson asm volatile("1: " ALTERNATIVE("ds wrmsr", ASM_WRMSRNS, X86_FEATURE_WRMSRNS)
241efe50881SAndrew Cooper "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
242efe50881SAndrew Cooper : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)));
243a4cb5eceSXin Li }
244a4cb5eceSXin Li
245cf991de2SAndy Lutomirski /*
2460c2678efSXin Li (Intel) * Dual u32 version of wrmsrq_safe():
247cf991de2SAndy Lutomirski */
wrmsr_safe(u32 msr,u32 low,u32 high)2480c2678efSXin Li (Intel) static inline int wrmsr_safe(u32 msr, u32 low, u32 high)
249cf991de2SAndy Lutomirski {
2500c2678efSXin Li (Intel) return wrmsrq_safe(msr, (u64)high << 32 | low);
251cf991de2SAndy Lutomirski }
252be7baf80SThomas Gleixner
2535323922fSThomas Gleixner struct msr __percpu *msrs_alloc(void);
2545323922fSThomas Gleixner void msrs_free(struct msr __percpu *msrs);
25522085a66SBorislav Petkov int msr_set_bit(u32 msr, u8 bit);
25622085a66SBorislav Petkov int msr_clear_bit(u32 msr, u8 bit);
25750542251SBorislav Petkov
258be7baf80SThomas Gleixner #ifdef CONFIG_SMP
259c6f31932SH. Peter Anvin int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
260c6f31932SH. Peter Anvin int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
261d7484babSIngo Molnar int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
262c895ecdaSIngo Molnar int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
2635323922fSThomas Gleixner void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
2645323922fSThomas Gleixner void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
265be7baf80SThomas Gleixner int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
266be7baf80SThomas Gleixner int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
2675e404cb7SIngo Molnar int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
26827a23a54SIngo Molnar int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
2698b956bf1SH. Peter Anvin int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
2708b956bf1SH. Peter Anvin int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
271be7baf80SThomas Gleixner #else /* CONFIG_SMP */
rdmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)272c6f31932SH. Peter Anvin static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
273be7baf80SThomas Gleixner {
274be7baf80SThomas Gleixner rdmsr(msr_no, *l, *h);
275c6f31932SH. Peter Anvin return 0;
276be7baf80SThomas Gleixner }
wrmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)277c6f31932SH. Peter Anvin static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
278be7baf80SThomas Gleixner {
279be7baf80SThomas Gleixner wrmsr(msr_no, l, h);
280c6f31932SH. Peter Anvin return 0;
281be7baf80SThomas Gleixner }
rdmsrq_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)282d7484babSIngo Molnar static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
2831a6b991aSJacob Pan {
284c435e608SIngo Molnar rdmsrq(msr_no, *q);
2851a6b991aSJacob Pan return 0;
2861a6b991aSJacob Pan }
wrmsrq_on_cpu(unsigned int cpu,u32 msr_no,u64 q)287c895ecdaSIngo Molnar static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
2881a6b991aSJacob Pan {
28978255eb2SIngo Molnar wrmsrq(msr_no, q);
2901a6b991aSJacob Pan return 0;
2911a6b991aSJacob Pan }
rdmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr __percpu * msrs)2920d0fbbddSRusty Russell static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
2935323922fSThomas Gleixner struct msr __percpu *msrs)
294b034c19fSBorislav Petkov {
2955323922fSThomas Gleixner rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
296b034c19fSBorislav Petkov }
wrmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr __percpu * msrs)2970d0fbbddSRusty Russell static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
2985323922fSThomas Gleixner struct msr __percpu *msrs)
299b034c19fSBorislav Petkov {
3005323922fSThomas Gleixner wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
301b034c19fSBorislav Petkov }
rdmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)302abb0ade0SJoe Perches static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
303abb0ade0SJoe Perches u32 *l, u32 *h)
304be7baf80SThomas Gleixner {
305be7baf80SThomas Gleixner return rdmsr_safe(msr_no, l, h);
306be7baf80SThomas Gleixner }
wrmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)307be7baf80SThomas Gleixner static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
308be7baf80SThomas Gleixner {
309be7baf80SThomas Gleixner return wrmsr_safe(msr_no, l, h);
310be7baf80SThomas Gleixner }
rdmsrq_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)3115e404cb7SIngo Molnar static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
3121a6b991aSJacob Pan {
3136fe22abaSIngo Molnar return rdmsrq_safe(msr_no, q);
3141a6b991aSJacob Pan }
wrmsrq_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 q)31527a23a54SIngo Molnar static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
3161a6b991aSJacob Pan {
3176fa17efeSIngo Molnar return wrmsrq_safe(msr_no, q);
3181a6b991aSJacob Pan }
rdmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])3198b956bf1SH. Peter Anvin static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
3208b956bf1SH. Peter Anvin {
3218b956bf1SH. Peter Anvin return rdmsr_safe_regs(regs);
3228b956bf1SH. Peter Anvin }
wrmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])3238b956bf1SH. Peter Anvin static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
3248b956bf1SH. Peter Anvin {
3258b956bf1SH. Peter Anvin return wrmsr_safe_regs(regs);
3268b956bf1SH. Peter Anvin }
327be7baf80SThomas Gleixner #endif /* CONFIG_SMP */
328a5447e92SIngo Molnar
329a5447e92SIngo Molnar /* Compatibility wrappers: */
330a5447e92SIngo Molnar #define rdmsrl(msr, val) rdmsrq(msr, val)
331a5447e92SIngo Molnar #define wrmsrl(msr, val) wrmsrq(msr, val)
332*baad9190SIngo Molnar #define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q)
333a5447e92SIngo Molnar
33424a295e4SThomas Huth #endif /* __ASSEMBLER__ */
3351965aae3SH. Peter Anvin #endif /* _ASM_X86_MSR_H */
336