1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
4
5 #include <linux/static_call.h>
6
7 /*
8 * Performance event hw details:
9 */
10
11 #define INTEL_PMC_MAX_GENERIC 32
12 #define INTEL_PMC_MAX_FIXED 16
13 #define INTEL_PMC_IDX_FIXED 32
14
15 #define X86_PMC_IDX_MAX 64
16
17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
19
20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
22
23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
35 #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
36 #define ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE (1ULL << 37)
37 #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
38
39 #define INTEL_FIXED_BITS_STRIDE 4
40 #define INTEL_FIXED_0_KERNEL (1ULL << 0)
41 #define INTEL_FIXED_0_USER (1ULL << 1)
42 #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2)
43 #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3)
44 #define INTEL_FIXED_0_RDPMC_USER_DISABLE (1ULL << 33)
45 #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2)
46
47 #define HSW_IN_TX (1ULL << 32)
48 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
49 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
50 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
51
52 #define INTEL_FIXED_BITS_MASK \
53 (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \
54 INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \
55 ICL_FIXED_0_ADAPTIVE | INTEL_FIXED_0_RDPMC_USER_DISABLE)
56
57 #define intel_fixed_bits_by_idx(_idx, _bits) \
58 ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
59
60 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
61 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
62 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
63
64 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
65 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
66 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
67
68 #define AMD64_EVENTSEL_EVENT \
69 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
70 #define INTEL_ARCH_EVENT_MASK \
71 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
72
73 #define AMD64_L3_SLICE_SHIFT 48
74 #define AMD64_L3_SLICE_MASK \
75 (0xFULL << AMD64_L3_SLICE_SHIFT)
76 #define AMD64_L3_SLICEID_MASK \
77 (0x7ULL << AMD64_L3_SLICE_SHIFT)
78
79 #define AMD64_L3_THREAD_SHIFT 56
80 #define AMD64_L3_THREAD_MASK \
81 (0xFFULL << AMD64_L3_THREAD_SHIFT)
82 #define AMD64_L3_F19H_THREAD_MASK \
83 (0x3ULL << AMD64_L3_THREAD_SHIFT)
84
85 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
86 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
87
88 #define AMD64_L3_COREID_SHIFT 42
89 #define AMD64_L3_COREID_MASK \
90 (0x7ULL << AMD64_L3_COREID_SHIFT)
91
92 #define X86_RAW_EVENT_MASK \
93 (ARCH_PERFMON_EVENTSEL_EVENT | \
94 ARCH_PERFMON_EVENTSEL_UMASK | \
95 ARCH_PERFMON_EVENTSEL_EDGE | \
96 ARCH_PERFMON_EVENTSEL_INV | \
97 ARCH_PERFMON_EVENTSEL_CMASK)
98 #define X86_ALL_EVENT_FLAGS \
99 (ARCH_PERFMON_EVENTSEL_EDGE | \
100 ARCH_PERFMON_EVENTSEL_INV | \
101 ARCH_PERFMON_EVENTSEL_CMASK | \
102 ARCH_PERFMON_EVENTSEL_ANY | \
103 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
104 HSW_IN_TX | \
105 HSW_IN_TX_CHECKPOINTED)
106 #define AMD64_RAW_EVENT_MASK \
107 (X86_RAW_EVENT_MASK | \
108 AMD64_EVENTSEL_EVENT)
109 #define AMD64_RAW_EVENT_MASK_NB \
110 (AMD64_EVENTSEL_EVENT | \
111 ARCH_PERFMON_EVENTSEL_UMASK)
112
113 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \
114 (AMD64_EVENTSEL_EVENT | \
115 GENMASK_ULL(37, 36))
116
117 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \
118 (ARCH_PERFMON_EVENTSEL_UMASK | \
119 GENMASK_ULL(27, 24))
120
121 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \
122 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \
123 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
124
125 #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31)
126 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0)
127 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8)
128 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \
129 (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \
130 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
131
132 #define AMD64_NUM_COUNTERS 4
133 #define AMD64_NUM_COUNTERS_CORE 6
134 #define AMD64_NUM_COUNTERS_NB 4
135
136 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
137 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
138 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
139 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
140 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
141
142 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
143 #define ARCH_PERFMON_EVENTS_COUNT 7
144
145 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
146 #define PEBS_DATACFG_GP BIT_ULL(1)
147 #define PEBS_DATACFG_XMMS BIT_ULL(2)
148 #define PEBS_DATACFG_LBRS BIT_ULL(3)
149 #define PEBS_DATACFG_CNTR BIT_ULL(4)
150 #define PEBS_DATACFG_METRICS BIT_ULL(5)
151 #define PEBS_DATACFG_LBR_SHIFT 24
152 #define PEBS_DATACFG_CNTR_SHIFT 32
153 #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0)
154 #define PEBS_DATACFG_FIX_SHIFT 48
155 #define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0)
156
157 /* Steal the highest bit of pebs_data_cfg for SW usage */
158 #define PEBS_UPDATE_DS_SW BIT_ULL(63)
159
160 /*
161 * Intel "Architectural Performance Monitoring" CPUID
162 * detection/enumeration details:
163 */
164 union cpuid10_eax {
165 struct {
166 unsigned int version_id:8;
167 unsigned int num_counters:8;
168 unsigned int bit_width:8;
169 unsigned int mask_length:8;
170 } split;
171 unsigned int full;
172 };
173
174 union cpuid10_ebx {
175 struct {
176 unsigned int no_unhalted_core_cycles:1;
177 unsigned int no_instructions_retired:1;
178 unsigned int no_unhalted_reference_cycles:1;
179 unsigned int no_llc_reference:1;
180 unsigned int no_llc_misses:1;
181 unsigned int no_branch_instruction_retired:1;
182 unsigned int no_branch_misses_retired:1;
183 } split;
184 unsigned int full;
185 };
186
187 union cpuid10_edx {
188 struct {
189 unsigned int num_counters_fixed:5;
190 unsigned int bit_width_fixed:8;
191 unsigned int reserved1:2;
192 unsigned int anythread_deprecated:1;
193 unsigned int reserved2:16;
194 } split;
195 unsigned int full;
196 };
197
198 /*
199 * Intel "Architectural Performance Monitoring extension" CPUID
200 * detection/enumeration details:
201 */
202 #define ARCH_PERFMON_EXT_LEAF 0x00000023
203 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
204 #define ARCH_PERFMON_ACR_LEAF 0x2
205 #define ARCH_PERFMON_PEBS_CAP_LEAF 0x4
206 #define ARCH_PERFMON_PEBS_COUNTER_LEAF 0x5
207
208 union cpuid35_eax {
209 struct {
210 unsigned int leaf0:1;
211 /* Counters Sub-Leaf */
212 unsigned int cntr_subleaf:1;
213 /* Auto Counter Reload Sub-Leaf */
214 unsigned int acr_subleaf:1;
215 /* Events Sub-Leaf */
216 unsigned int events_subleaf:1;
217 /* arch-PEBS Sub-Leaves */
218 unsigned int pebs_caps_subleaf:1;
219 unsigned int pebs_cnts_subleaf:1;
220 unsigned int reserved:26;
221 } split;
222 unsigned int full;
223 };
224
225 union cpuid35_ebx {
226 struct {
227 /* UnitMask2 Supported */
228 unsigned int umask2:1;
229 /* EQ-bit Supported */
230 unsigned int eq:1;
231 /* rdpmc user disable Supported */
232 unsigned int rdpmc_user_disable:1;
233 unsigned int reserved:29;
234 } split;
235 unsigned int full;
236 };
237
238 /*
239 * Intel Architectural LBR CPUID detection/enumeration details:
240 */
241 union cpuid28_eax {
242 struct {
243 /* Supported LBR depth values */
244 unsigned int lbr_depth_mask:8;
245 unsigned int reserved:22;
246 /* Deep C-state Reset */
247 unsigned int lbr_deep_c_reset:1;
248 /* IP values contain LIP */
249 unsigned int lbr_lip:1;
250 } split;
251 unsigned int full;
252 };
253
254 union cpuid28_ebx {
255 struct {
256 /* CPL Filtering Supported */
257 unsigned int lbr_cpl:1;
258 /* Branch Filtering Supported */
259 unsigned int lbr_filter:1;
260 /* Call-stack Mode Supported */
261 unsigned int lbr_call_stack:1;
262 } split;
263 unsigned int full;
264 };
265
266 union cpuid28_ecx {
267 struct {
268 /* Mispredict Bit Supported */
269 unsigned int lbr_mispred:1;
270 /* Timed LBRs Supported */
271 unsigned int lbr_timed_lbr:1;
272 /* Branch Type Field Supported */
273 unsigned int lbr_br_type:1;
274 unsigned int reserved:13;
275 /* Branch counters (Event Logging) Supported */
276 unsigned int lbr_counters:4;
277 } split;
278 unsigned int full;
279 };
280
281 /*
282 * AMD "Extended Performance Monitoring and Debug" CPUID
283 * detection/enumeration details:
284 */
285 union cpuid_0x80000022_ebx {
286 struct {
287 /* Number of Core Performance Counters */
288 unsigned int num_core_pmc:4;
289 /* Number of available LBR Stack Entries */
290 unsigned int lbr_v2_stack_sz:6;
291 /* Number of Data Fabric Counters */
292 unsigned int num_df_pmc:6;
293 /* Number of Unified Memory Controller Counters */
294 unsigned int num_umc_pmc:6;
295 } split;
296 unsigned int full;
297 };
298
299 struct x86_pmu_capability {
300 int version;
301 int num_counters_gp;
302 int num_counters_fixed;
303 int bit_width_gp;
304 int bit_width_fixed;
305 unsigned int events_mask;
306 int events_mask_len;
307 unsigned int pebs_ept :1;
308 unsigned int mediated :1;
309 };
310
311 /*
312 * Fixed-purpose performance events:
313 */
314
315 /* RDPMC offset for Fixed PMCs */
316 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
317 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
318
319 /*
320 * All the fixed-mode PMCs are configured via this single MSR:
321 */
322 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
323
324 /*
325 * There is no event-code assigned to the fixed-mode PMCs.
326 *
327 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
328 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
329 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
330 *
331 * For a fixed-mode PMC, which doesn't have an equivalent event, a
332 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
333 * The pseudo event-code for a fixed-mode PMC must be 0x00.
334 * The pseudo umask-code is 0xX. The X equals the index of the fixed
335 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
336 *
337 * The counts are available in separate MSRs:
338 */
339
340 /* Instr_Retired.Any: */
341 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
342 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
343
344 /* CPU_CLK_Unhalted.Core: */
345 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
346 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
347
348 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
349 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
350 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
351 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
352
353 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
354 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
355 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
356 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
357
358 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
359 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
360 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
361
use_fixed_pseudo_encoding(u64 code)362 static inline bool use_fixed_pseudo_encoding(u64 code)
363 {
364 return !(code & 0xff);
365 }
366
367 /*
368 * We model BTS tracing as another fixed-mode PMC.
369 *
370 * We choose the value 47 for the fixed index of BTS, since lower
371 * values are used by actual fixed events and higher values are used
372 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
373 */
374 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
375
376 /*
377 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
378 * each TopDown metric event.
379 *
380 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
381 */
382 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
383 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
384 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
385 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
386 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
387 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4)
388 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5)
389 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6)
390 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7)
391 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND
392 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
393 INTEL_PMC_MSK_FIXED_SLOTS)
394
395 /*
396 * There is no event-code assigned to the TopDown events.
397 *
398 * For the slots event, use the pseudo code of the fixed counter 3.
399 *
400 * For the metric events, the pseudo event-code is 0x00.
401 * The pseudo umask-code starts from the middle of the pseudo event
402 * space, 0x80.
403 */
404 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
405 /* Level 1 metrics */
406 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
407 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
408 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
409 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
410 /* Level 2 metrics */
411 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */
412 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */
413 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */
414 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */
415
416 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND
417 #define INTEL_TD_METRIC_NUM 8
418
419 #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0
420 #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT)
421
is_metric_idx(int idx)422 static inline bool is_metric_idx(int idx)
423 {
424 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
425 }
426
is_topdown_idx(int idx)427 static inline bool is_topdown_idx(int idx)
428 {
429 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
430 }
431
432 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
433 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
434
435 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
436 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62
437 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
438 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
439 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
440 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
441 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
442 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
443 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
444 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
445 #define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT 54
446 #define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD BIT_ULL(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT)
447 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
448
449 #define GLOBAL_CTRL_EN_PERF_METRICS BIT_ULL(48)
450 /*
451 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
452 *
453 * We choose bit 58 because it's used to indicate LBR stack frozen state
454 * for architectural perfmon v4, also we unconditionally mask that bit in
455 * the handle_pmi_common(), so it'll never be set in the overflow handling.
456 *
457 * With this fake counter assigned, the guest LBR event user (such as KVM),
458 * can program the LBR registers on its own, and we don't actually do anything
459 * with then in the host context.
460 */
461 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
462
463 /*
464 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
465 * since it would claim bit 58 which is effectively Fixed26.
466 */
467 #define INTEL_FIXED_VLBR_EVENT 0x1b00
468
469 /*
470 * Adaptive PEBS v4
471 */
472
473 struct pebs_basic {
474 u64 format_group:32,
475 retire_latency:16,
476 format_size:16;
477 u64 ip;
478 u64 applicable_counters;
479 u64 tsc;
480 };
481
482 struct pebs_meminfo {
483 u64 address;
484 u64 aux;
485 union {
486 /* pre Alder Lake */
487 u64 mem_latency;
488 /* Alder Lake and later */
489 struct {
490 u64 instr_latency:16;
491 u64 pad2:16;
492 u64 cache_latency:16;
493 u64 pad3:16;
494 };
495 };
496 u64 tsx_tuning;
497 };
498
499 struct pebs_gprs {
500 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
501 u64 r8, r9, r10, r11, r12, r13, r14, r15;
502 };
503
504 struct pebs_xmm {
505 u64 xmm[16*2]; /* two entries for each register */
506 };
507
508 struct pebs_cntr_header {
509 u32 cntr;
510 u32 fixed;
511 u32 metrics;
512 u32 reserved;
513 };
514
515 #define INTEL_CNTR_METRICS 0x3
516
517 /*
518 * Arch PEBS
519 */
520 union arch_pebs_index {
521 struct {
522 u64 rsvd:4,
523 wr:23,
524 rsvd2:4,
525 full:1,
526 en:1,
527 rsvd3:3,
528 thresh:23,
529 rsvd4:5;
530 };
531 u64 whole;
532 };
533
534 struct arch_pebs_header {
535 union {
536 u64 format;
537 struct {
538 u64 size:16, /* Record size */
539 rsvd:14,
540 mode:1, /* 64BIT_MODE */
541 cont:1,
542 rsvd2:3,
543 cntr:5,
544 lbr:2,
545 rsvd3:7,
546 xmm:1,
547 ymmh:1,
548 rsvd4:2,
549 opmask:1,
550 zmmh:1,
551 h16zmm:1,
552 rsvd5:5,
553 gpr:1,
554 aux:1,
555 basic:1;
556 };
557 };
558 u64 rsvd6;
559 };
560
561 struct arch_pebs_basic {
562 u64 ip;
563 u64 applicable_counters;
564 u64 tsc;
565 u64 retire :16, /* Retire Latency */
566 valid :1,
567 rsvd :47;
568 u64 rsvd2;
569 u64 rsvd3;
570 };
571
572 struct arch_pebs_aux {
573 u64 address;
574 u64 rsvd;
575 u64 rsvd2;
576 u64 rsvd3;
577 u64 rsvd4;
578 u64 aux;
579 u64 instr_latency :16,
580 pad2 :16,
581 cache_latency :16,
582 pad3 :16;
583 u64 tsx_tuning;
584 };
585
586 struct arch_pebs_gprs {
587 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
588 u64 r8, r9, r10, r11, r12, r13, r14, r15, ssp;
589 u64 rsvd;
590 };
591
592 struct arch_pebs_xer_header {
593 u64 xstate;
594 u64 rsvd;
595 };
596
597 #define ARCH_PEBS_LBR_NAN 0x0
598 #define ARCH_PEBS_LBR_NUM_8 0x1
599 #define ARCH_PEBS_LBR_NUM_16 0x2
600 #define ARCH_PEBS_LBR_NUM_VAR 0x3
601 #define ARCH_PEBS_BASE_LBR_ENTRIES 8
602 struct arch_pebs_lbr_header {
603 u64 rsvd;
604 u64 ctl;
605 u64 depth;
606 u64 ler_from;
607 u64 ler_to;
608 u64 ler_info;
609 };
610
611 struct arch_pebs_cntr_header {
612 u32 cntr;
613 u32 fixed;
614 u32 metrics;
615 u32 reserved;
616 };
617
618 /*
619 * AMD Extended Performance Monitoring and Debug cpuid feature detection
620 */
621 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022
622
623 /*
624 * IBS cpuid feature detection
625 */
626
627 #define IBS_CPUID_FEATURES 0x8000001b
628
629 /*
630 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
631 * bit 0 is used to indicate the existence of IBS.
632 */
633 #define IBS_CAPS_AVAIL (1U<<0)
634 #define IBS_CAPS_FETCHSAM (1U<<1)
635 #define IBS_CAPS_OPSAM (1U<<2)
636 #define IBS_CAPS_RDWROPCNT (1U<<3)
637 #define IBS_CAPS_OPCNT (1U<<4)
638 #define IBS_CAPS_BRNTRGT (1U<<5)
639 #define IBS_CAPS_OPCNTEXT (1U<<6)
640 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
641 #define IBS_CAPS_OPBRNFUSE (1U<<8)
642 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
643 #define IBS_CAPS_OPDATA4 (1U<<10)
644 #define IBS_CAPS_ZEN4 (1U<<11)
645 #define IBS_CAPS_OPLDLAT (1U<<12)
646 #define IBS_CAPS_OPDTLBPGSIZE (1U<<19)
647
648 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
649 | IBS_CAPS_FETCHSAM \
650 | IBS_CAPS_OPSAM)
651
652 /*
653 * IBS APIC setup
654 */
655 #define IBSCTL 0x1cc
656 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
657 #define IBSCTL_LVT_OFFSET_MASK 0x0F
658
659 /* IBS fetch bits/masks */
660 #define IBS_FETCH_L3MISSONLY (1ULL<<59)
661 #define IBS_FETCH_RAND_EN (1ULL<<57)
662 #define IBS_FETCH_VAL (1ULL<<49)
663 #define IBS_FETCH_ENABLE (1ULL<<48)
664 #define IBS_FETCH_CNT 0xFFFF0000ULL
665 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
666
667 /*
668 * IBS op bits/masks
669 * The lower 7 bits of the current count are random bits
670 * preloaded by hardware and ignored in software
671 */
672 #define IBS_OP_LDLAT_EN (1ULL<<63)
673 #define IBS_OP_LDLAT_THRSH (0xFULL<<59)
674 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
675 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
676 #define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
677 #define IBS_OP_CNT_CTL (1ULL<<19)
678 #define IBS_OP_VAL (1ULL<<18)
679 #define IBS_OP_ENABLE (1ULL<<17)
680 #define IBS_OP_L3MISSONLY (1ULL<<16)
681 #define IBS_OP_MAX_CNT 0x0000FFFFULL
682 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
683 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
684 #define IBS_RIP_INVALID (1ULL<<38)
685
686 #ifdef CONFIG_X86_LOCAL_APIC
687 extern u32 get_ibs_caps(void);
688 extern int forward_event_to_ibs(struct perf_event *event);
689 #else
get_ibs_caps(void)690 static inline u32 get_ibs_caps(void) { return 0; }
forward_event_to_ibs(struct perf_event * event)691 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
692 #endif
693
694 #ifdef CONFIG_PERF_EVENTS
695 extern void perf_events_lapic_init(void);
696
697 /*
698 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
699 * unused and ABI specified to be 0, so nobody should care what we do with
700 * them.
701 *
702 * EXACT - the IP points to the exact instruction that triggered the
703 * event (HW bugs exempt).
704 * VM - original X86_VM_MASK; see set_linear_ip().
705 */
706 #define PERF_EFLAGS_EXACT (1UL << 3)
707 #define PERF_EFLAGS_VM (1UL << 5)
708
709 struct pt_regs;
710 struct x86_perf_regs {
711 struct pt_regs regs;
712 u64 *xmm_regs;
713 };
714
715 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
716 extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
717 extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs);
718 #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs)
719 #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
720
721 #include <asm/stacktrace.h>
722
723 /*
724 * We abuse bit 3 from flags to pass exact information, see
725 * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT.
726 */
727 #define perf_arch_fetch_caller_regs(regs, __ip) { \
728 (regs)->ip = (__ip); \
729 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
730 (regs)->cs = __KERNEL_CS; \
731 regs->flags = 0; \
732 }
733
734 struct perf_guest_switch_msr {
735 unsigned msr;
736 u64 host, guest;
737 };
738
739 struct x86_pmu_lbr {
740 unsigned int nr;
741 unsigned int from;
742 unsigned int to;
743 unsigned int info;
744 bool has_callstack;
745 };
746
747 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
748 extern u64 perf_get_hw_event_config(int hw_event);
749 extern void perf_check_microcode(void);
750 extern void perf_clear_dirty_counters(void);
751 extern int x86_perf_rdpmc_index(struct perf_event *event);
752 #else
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)753 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
754 {
755 memset(cap, 0, sizeof(*cap));
756 }
757
perf_get_hw_event_config(int hw_event)758 static inline u64 perf_get_hw_event_config(int hw_event)
759 {
760 return 0;
761 }
762
perf_events_lapic_init(void)763 static inline void perf_events_lapic_init(void) { }
perf_check_microcode(void)764 static inline void perf_check_microcode(void) { }
765 #endif
766
767 #ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
768 extern void perf_load_guest_lvtpc(u32 guest_lvtpc);
769 extern void perf_put_guest_lvtpc(void);
770 #endif
771
772 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
773 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
774 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
775 #else
776 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)777 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
778 {
779 memset(lbr, 0, sizeof(*lbr));
780 }
781 #endif
782
783 #ifdef CONFIG_CPU_SUP_INTEL
784 extern void intel_pt_handle_vmx(int on);
785 #else
intel_pt_handle_vmx(int on)786 static inline void intel_pt_handle_vmx(int on)
787 {
788
789 }
790 #endif
791
792 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
793 extern void amd_pmu_enable_virt(void);
794 extern void amd_pmu_disable_virt(void);
795
796 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
797
798 #define PERF_NEEDS_LOPWR_CB 1
799
800 /*
801 * architectural low power callback impacts
802 * drivers/acpi/processor_idle.c
803 * drivers/acpi/acpi_pad.c
804 */
805 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
806
807 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
808
perf_lopwr_cb(bool lopwr_in)809 static __always_inline void perf_lopwr_cb(bool lopwr_in)
810 {
811 static_call_mod(perf_lopwr_cb)(lopwr_in);
812 }
813
814 #endif /* PERF_NEEDS_LOPWR_CB */
815
816 #else
amd_pmu_enable_virt(void)817 static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)818 static inline void amd_pmu_disable_virt(void) { }
819 #endif
820
821 #define arch_perf_out_copy_user copy_from_user_nmi
822
823 #endif /* _ASM_X86_PERF_EVENT_H */
824