xref: /kvm-unit-tests/lib/x86/pmu.h (revision dcec966ff7423a29dad0e5ffdcf58e8a4095356f)
1 #ifndef _X86_PMU_H_
2 #define _X86_PMU_H_
3 
4 #include "processor.h"
5 #include "libcflat.h"
6 
7 #define FIXED_CNT_INDEX 32
8 #define MAX_NUM_LBR_ENTRY	  32
9 
10 /* Performance Counter Vector for the LVT PC Register */
11 #define PMI_VECTOR	32
12 
13 #define AMD64_NUM_COUNTERS	4
14 #define AMD64_NUM_COUNTERS_CORE	6
15 
16 #define PMC_DEFAULT_WIDTH	48
17 
18 #define DEBUGCTLMSR_LBR	  (1UL <<  0)
19 
20 #define PMU_CAP_LBR_FMT	  0x3f
21 #define PMU_CAP_FW_WRITES	(1ULL << 13)
22 #define PMU_CAP_PEBS_BASELINE	(1ULL << 14)
23 #define PERF_CAP_PEBS_FORMAT           0xf00
24 
25 #define EVNSEL_EVENT_SHIFT	0
26 #define EVNTSEL_UMASK_SHIFT	8
27 #define EVNTSEL_USR_SHIFT	16
28 #define EVNTSEL_OS_SHIFT	17
29 #define EVNTSEL_EDGE_SHIFT	18
30 #define EVNTSEL_PC_SHIFT	19
31 #define EVNTSEL_INT_SHIFT	20
32 #define EVNTSEL_EN_SHIF		22
33 #define EVNTSEL_INV_SHIF	23
34 #define EVNTSEL_CMASK_SHIFT	24
35 
36 #define EVNTSEL_EN	(1 << EVNTSEL_EN_SHIF)
37 #define EVNTSEL_USR	(1 << EVNTSEL_USR_SHIFT)
38 #define EVNTSEL_OS	(1 << EVNTSEL_OS_SHIFT)
39 #define EVNTSEL_PC	(1 << EVNTSEL_PC_SHIFT)
40 #define EVNTSEL_INT	(1 << EVNTSEL_INT_SHIFT)
41 #define EVNTSEL_INV	(1 << EVNTSEL_INV_SHIF)
42 
43 #define GLOBAL_STATUS_BUFFER_OVF_BIT		62
44 #define GLOBAL_STATUS_BUFFER_OVF	BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
45 
46 #define PEBS_DATACFG_MEMINFO	BIT_ULL(0)
47 #define PEBS_DATACFG_GPRS	BIT_ULL(1)
48 #define PEBS_DATACFG_XMMS	BIT_ULL(2)
49 #define PEBS_DATACFG_LBRS	BIT_ULL(3)
50 #define PEBS_DATACFG_MASK	(PEBS_DATACFG_MEMINFO | \
51 				 PEBS_DATACFG_GPRS | \
52 				 PEBS_DATACFG_XMMS | \
53 				 PEBS_DATACFG_LBRS)
54 
55 #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
56 #define PEBS_DATACFG_LBR_SHIFT	24
57 #define MAX_NUM_LBR_ENTRY	32
58 
59 struct pmu_caps {
60 	bool is_intel;
61 	u8 version;
62 	u8 nr_fixed_counters;
63 	u8 fixed_counter_width;
64 	u8 nr_gp_counters;
65 	u8 gp_counter_width;
66 	u8 gp_counter_mask_length;
67 	u32 gp_counter_available;
68 	u32 msr_gp_counter_base;
69 	u32 msr_gp_event_select_base;
70 
71 	u32 msr_global_status;
72 	u32 msr_global_ctl;
73 	u32 msr_global_status_clr;
74 
75 	u64 perf_cap;
76 };
77 
78 extern struct pmu_caps pmu;
79 
80 void pmu_init(void);
81 
MSR_GP_COUNTERx(unsigned int i)82 static inline u32 MSR_GP_COUNTERx(unsigned int i)
83 {
84 	if (pmu.msr_gp_counter_base == MSR_F15H_PERF_CTR0)
85 		return pmu.msr_gp_counter_base + 2 * i;
86 
87 	return pmu.msr_gp_counter_base + i;
88 }
89 
MSR_GP_EVENT_SELECTx(unsigned int i)90 static inline u32 MSR_GP_EVENT_SELECTx(unsigned int i)
91 {
92 	if (pmu.msr_gp_event_select_base == MSR_F15H_PERF_CTL0)
93 		return pmu.msr_gp_event_select_base + 2 * i;
94 
95 	return pmu.msr_gp_event_select_base + i;
96 }
97 
this_cpu_has_pmu(void)98 static inline bool this_cpu_has_pmu(void)
99 {
100 	return !pmu.is_intel || !!pmu.version;
101 }
102 
this_cpu_has_perf_global_ctrl(void)103 static inline bool this_cpu_has_perf_global_ctrl(void)
104 {
105 	return pmu.version > 1;
106 }
107 
this_cpu_has_perf_global_status(void)108 static inline bool this_cpu_has_perf_global_status(void)
109 {
110 	return pmu.version > 1;
111 }
112 
pmu_gp_counter_is_available(int i)113 static inline bool pmu_gp_counter_is_available(int i)
114 {
115 	return pmu.gp_counter_available & BIT(i);
116 }
117 
pmu_lbr_version(void)118 static inline u64 pmu_lbr_version(void)
119 {
120 	return pmu.perf_cap & PMU_CAP_LBR_FMT;
121 }
122 
pmu_has_full_writes(void)123 static inline bool pmu_has_full_writes(void)
124 {
125 	return pmu.perf_cap & PMU_CAP_FW_WRITES;
126 }
127 
pmu_activate_full_writes(void)128 static inline void pmu_activate_full_writes(void)
129 {
130 	pmu.msr_gp_counter_base = MSR_IA32_PMC0;
131 }
132 
pmu_use_full_writes(void)133 static inline bool pmu_use_full_writes(void)
134 {
135 	return pmu.msr_gp_counter_base == MSR_IA32_PMC0;
136 }
137 
MSR_PERF_FIXED_CTRx(unsigned int i)138 static inline u32 MSR_PERF_FIXED_CTRx(unsigned int i)
139 {
140 	return MSR_CORE_PERF_FIXED_CTR0 + i;
141 }
142 
pmu_reset_all_gp_counters(void)143 static inline void pmu_reset_all_gp_counters(void)
144 {
145 	unsigned int idx;
146 
147 	for (idx = 0; idx < pmu.nr_gp_counters; idx++) {
148 		wrmsr(MSR_GP_EVENT_SELECTx(idx), 0);
149 		wrmsr(MSR_GP_COUNTERx(idx), 0);
150 	}
151 }
152 
pmu_reset_all_fixed_counters(void)153 static inline void pmu_reset_all_fixed_counters(void)
154 {
155 	unsigned int idx;
156 
157 	if (!pmu.nr_fixed_counters)
158 		return;
159 
160 	wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
161 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++)
162 		wrmsr(MSR_PERF_FIXED_CTRx(idx), 0);
163 }
164 
pmu_reset_all_counters(void)165 static inline void pmu_reset_all_counters(void)
166 {
167 	pmu_reset_all_gp_counters();
168 	pmu_reset_all_fixed_counters();
169 }
170 
pmu_clear_global_status(void)171 static inline void pmu_clear_global_status(void)
172 {
173 	wrmsr(pmu.msr_global_status_clr, rdmsr(pmu.msr_global_status));
174 }
175 
pmu_has_pebs(void)176 static inline bool pmu_has_pebs(void)
177 {
178 	return pmu.version > 1;
179 }
180 
pmu_pebs_format(void)181 static inline u8 pmu_pebs_format(void)
182 {
183 	return (pmu.perf_cap & PERF_CAP_PEBS_FORMAT ) >> 8;
184 }
185 
pmu_has_pebs_baseline(void)186 static inline bool pmu_has_pebs_baseline(void)
187 {
188 	return pmu.perf_cap & PMU_CAP_PEBS_BASELINE;
189 }
190 
191 #endif /* _X86_PMU_H_ */
192