xref: /kvm-unit-tests/lib/x86/pmu.h (revision 8a2866d111ccde83c5b364ec7dcdc74631631ae3)
1 #ifndef _X86_PMU_H_
2 #define _X86_PMU_H_
3 
4 #include "processor.h"
5 #include "libcflat.h"
6 
7 #define FIXED_CNT_INDEX 32
8 #define MAX_NUM_LBR_ENTRY	  32
9 
10 /* Performance Counter Vector for the LVT PC Register */
11 #define PMI_VECTOR	32
12 
13 #define DEBUGCTLMSR_LBR	  (1UL <<  0)
14 
15 #define PMU_CAP_LBR_FMT	  0x3f
16 #define PMU_CAP_FW_WRITES	(1ULL << 13)
17 
18 #define EVNSEL_EVENT_SHIFT	0
19 #define EVNTSEL_UMASK_SHIFT	8
20 #define EVNTSEL_USR_SHIFT	16
21 #define EVNTSEL_OS_SHIFT	17
22 #define EVNTSEL_EDGE_SHIFT	18
23 #define EVNTSEL_PC_SHIFT	19
24 #define EVNTSEL_INT_SHIFT	20
25 #define EVNTSEL_EN_SHIF		22
26 #define EVNTSEL_INV_SHIF	23
27 #define EVNTSEL_CMASK_SHIFT	24
28 
29 #define EVNTSEL_EN	(1 << EVNTSEL_EN_SHIF)
30 #define EVNTSEL_USR	(1 << EVNTSEL_USR_SHIFT)
31 #define EVNTSEL_OS	(1 << EVNTSEL_OS_SHIFT)
32 #define EVNTSEL_PC	(1 << EVNTSEL_PC_SHIFT)
33 #define EVNTSEL_INT	(1 << EVNTSEL_INT_SHIFT)
34 #define EVNTSEL_INV	(1 << EVNTSEL_INV_SHIF)
35 
36 struct pmu_caps {
37 	u8 version;
38 	u8 nr_fixed_counters;
39 	u8 fixed_counter_width;
40 	u8 nr_gp_counters;
41 	u8 gp_counter_width;
42 	u8 gp_counter_mask_length;
43 	u32 gp_counter_available;
44 	u32 msr_gp_counter_base;
45 	u32 msr_gp_event_select_base;
46 
47 	u32 msr_global_status;
48 	u32 msr_global_ctl;
49 	u32 msr_global_status_clr;
50 
51 	u64 perf_cap;
52 };
53 
54 extern struct pmu_caps pmu;
55 
56 void pmu_init(void);
57 
58 static inline u32 MSR_GP_COUNTERx(unsigned int i)
59 {
60 	return pmu.msr_gp_counter_base + i;
61 }
62 
63 static inline u32 MSR_GP_EVENT_SELECTx(unsigned int i)
64 {
65 	return pmu.msr_gp_event_select_base + i;
66 }
67 
68 static inline bool this_cpu_has_pmu(void)
69 {
70 	return !!pmu.version;
71 }
72 
73 static inline bool this_cpu_has_perf_global_ctrl(void)
74 {
75 	return pmu.version > 1;
76 }
77 
78 static inline bool pmu_gp_counter_is_available(int i)
79 {
80 	return pmu.gp_counter_available & BIT(i);
81 }
82 
83 static inline u64 pmu_lbr_version(void)
84 {
85 	return pmu.perf_cap & PMU_CAP_LBR_FMT;
86 }
87 
88 static inline bool pmu_has_full_writes(void)
89 {
90 	return pmu.perf_cap & PMU_CAP_FW_WRITES;
91 }
92 
93 static inline bool pmu_use_full_writes(void)
94 {
95 	return pmu.msr_gp_counter_base == MSR_IA32_PMC0;
96 }
97 
98 static inline u32 MSR_PERF_FIXED_CTRx(unsigned int i)
99 {
100 	return MSR_CORE_PERF_FIXED_CTR0 + i;
101 }
102 
103 static inline void pmu_reset_all_gp_counters(void)
104 {
105 	unsigned int idx;
106 
107 	for (idx = 0; idx < pmu.nr_gp_counters; idx++) {
108 		wrmsr(MSR_GP_EVENT_SELECTx(idx), 0);
109 		wrmsr(MSR_GP_COUNTERx(idx), 0);
110 	}
111 }
112 
113 static inline void pmu_reset_all_fixed_counters(void)
114 {
115 	unsigned int idx;
116 
117 	if (!pmu.nr_fixed_counters)
118 		return;
119 
120 	wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
121 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++)
122 		wrmsr(MSR_PERF_FIXED_CTRx(idx), 0);
123 }
124 
125 static inline void pmu_reset_all_counters(void)
126 {
127 	pmu_reset_all_gp_counters();
128 	pmu_reset_all_fixed_counters();
129 }
130 
131 static inline void pmu_clear_global_status(void)
132 {
133 	wrmsr(pmu.msr_global_status_clr, rdmsr(pmu.msr_global_status));
134 }
135 
136 #endif /* _X86_PMU_H_ */
137