xref: /kvm-unit-tests/lib/x86/pmu.h (revision b36f35a82ff4cec5f71a68aa782332e2bc3488f7)
1 #ifndef _X86_PMU_H_
2 #define _X86_PMU_H_
3 
4 #include "processor.h"
5 #include "libcflat.h"
6 
7 #define FIXED_CNT_INDEX 32
8 #define MAX_NUM_LBR_ENTRY	  32
9 
10 /* Performance Counter Vector for the LVT PC Register */
11 #define PMI_VECTOR	32
12 
13 #define AMD64_NUM_COUNTERS	4
14 #define AMD64_NUM_COUNTERS_CORE	6
15 
16 #define PMC_DEFAULT_WIDTH	48
17 
18 #define DEBUGCTLMSR_LBR	  (1UL <<  0)
19 
20 #define PMU_CAP_LBR_FMT	  0x3f
21 #define PMU_CAP_FW_WRITES	(1ULL << 13)
22 #define PMU_CAP_PEBS_BASELINE	(1ULL << 14)
23 #define PERF_CAP_PEBS_FORMAT           0xf00
24 
25 #define EVNSEL_EVENT_SHIFT	0
26 #define EVNTSEL_UMASK_SHIFT	8
27 #define EVNTSEL_USR_SHIFT	16
28 #define EVNTSEL_OS_SHIFT	17
29 #define EVNTSEL_EDGE_SHIFT	18
30 #define EVNTSEL_PC_SHIFT	19
31 #define EVNTSEL_INT_SHIFT	20
32 #define EVNTSEL_EN_SHIF		22
33 #define EVNTSEL_INV_SHIF	23
34 #define EVNTSEL_CMASK_SHIFT	24
35 
36 #define EVNTSEL_EN	(1 << EVNTSEL_EN_SHIF)
37 #define EVNTSEL_USR	(1 << EVNTSEL_USR_SHIFT)
38 #define EVNTSEL_OS	(1 << EVNTSEL_OS_SHIFT)
39 #define EVNTSEL_PC	(1 << EVNTSEL_PC_SHIFT)
40 #define EVNTSEL_INT	(1 << EVNTSEL_INT_SHIFT)
41 #define EVNTSEL_INV	(1 << EVNTSEL_INV_SHIF)
42 
43 #define GLOBAL_STATUS_BUFFER_OVF_BIT		62
44 #define GLOBAL_STATUS_BUFFER_OVF	BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
45 
46 #define PEBS_DATACFG_MEMINFO	BIT_ULL(0)
47 #define PEBS_DATACFG_GP	BIT_ULL(1)
48 #define PEBS_DATACFG_XMMS	BIT_ULL(2)
49 #define PEBS_DATACFG_LBRS	BIT_ULL(3)
50 
51 #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
52 #define PEBS_DATACFG_LBR_SHIFT	24
53 #define MAX_NUM_LBR_ENTRY	32
54 
55 struct pmu_caps {
56 	bool is_intel;
57 	u8 version;
58 	u8 nr_fixed_counters;
59 	u8 fixed_counter_width;
60 	u8 nr_gp_counters;
61 	u8 gp_counter_width;
62 	u8 gp_counter_mask_length;
63 	u32 gp_counter_available;
64 	u32 msr_gp_counter_base;
65 	u32 msr_gp_event_select_base;
66 
67 	u32 msr_global_status;
68 	u32 msr_global_ctl;
69 	u32 msr_global_status_clr;
70 
71 	u64 perf_cap;
72 };
73 
74 extern struct pmu_caps pmu;
75 
76 void pmu_init(void);
77 
78 static inline u32 MSR_GP_COUNTERx(unsigned int i)
79 {
80 	if (pmu.msr_gp_counter_base == MSR_F15H_PERF_CTR0)
81 		return pmu.msr_gp_counter_base + 2 * i;
82 
83 	return pmu.msr_gp_counter_base + i;
84 }
85 
86 static inline u32 MSR_GP_EVENT_SELECTx(unsigned int i)
87 {
88 	if (pmu.msr_gp_event_select_base == MSR_F15H_PERF_CTL0)
89 		return pmu.msr_gp_event_select_base + 2 * i;
90 
91 	return pmu.msr_gp_event_select_base + i;
92 }
93 
94 static inline bool this_cpu_has_pmu(void)
95 {
96 	return !pmu.is_intel || !!pmu.version;
97 }
98 
99 static inline bool this_cpu_has_perf_global_ctrl(void)
100 {
101 	return pmu.version > 1;
102 }
103 
104 static inline bool this_cpu_has_perf_global_status(void)
105 {
106 	return pmu.version > 1;
107 }
108 
109 static inline bool pmu_gp_counter_is_available(int i)
110 {
111 	return pmu.gp_counter_available & BIT(i);
112 }
113 
114 static inline u64 pmu_lbr_version(void)
115 {
116 	return pmu.perf_cap & PMU_CAP_LBR_FMT;
117 }
118 
119 static inline bool pmu_has_full_writes(void)
120 {
121 	return pmu.perf_cap & PMU_CAP_FW_WRITES;
122 }
123 
124 static inline void pmu_activate_full_writes(void)
125 {
126 	pmu.msr_gp_counter_base = MSR_IA32_PMC0;
127 }
128 
129 static inline bool pmu_use_full_writes(void)
130 {
131 	return pmu.msr_gp_counter_base == MSR_IA32_PMC0;
132 }
133 
134 static inline u32 MSR_PERF_FIXED_CTRx(unsigned int i)
135 {
136 	return MSR_CORE_PERF_FIXED_CTR0 + i;
137 }
138 
139 static inline void pmu_reset_all_gp_counters(void)
140 {
141 	unsigned int idx;
142 
143 	for (idx = 0; idx < pmu.nr_gp_counters; idx++) {
144 		wrmsr(MSR_GP_EVENT_SELECTx(idx), 0);
145 		wrmsr(MSR_GP_COUNTERx(idx), 0);
146 	}
147 }
148 
149 static inline void pmu_reset_all_fixed_counters(void)
150 {
151 	unsigned int idx;
152 
153 	if (!pmu.nr_fixed_counters)
154 		return;
155 
156 	wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
157 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++)
158 		wrmsr(MSR_PERF_FIXED_CTRx(idx), 0);
159 }
160 
161 static inline void pmu_reset_all_counters(void)
162 {
163 	pmu_reset_all_gp_counters();
164 	pmu_reset_all_fixed_counters();
165 }
166 
167 static inline void pmu_clear_global_status(void)
168 {
169 	wrmsr(pmu.msr_global_status_clr, rdmsr(pmu.msr_global_status));
170 }
171 
172 static inline bool pmu_has_pebs(void)
173 {
174 	return pmu.version > 1;
175 }
176 
177 static inline u8 pmu_pebs_format(void)
178 {
179 	return (pmu.perf_cap & PERF_CAP_PEBS_FORMAT ) >> 8;
180 }
181 
182 static inline bool pmu_has_pebs_baseline(void)
183 {
184 	return pmu.perf_cap & PMU_CAP_PEBS_BASELINE;
185 }
186 
187 #endif /* _X86_PMU_H_ */
188