1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel(R) Processor Trace PMU driver for perf
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
7 * Programming Reference:
8 * http://software.intel.com/en-us/intel-isa-extensions
9 */
10
11 #undef DEBUG
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/types.h>
16 #include <linux/bits.h>
17 #include <linux/limits.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/kvm_types.h>
21
22 #include <asm/cpuid/api.h>
23 #include <asm/perf_event.h>
24 #include <asm/insn.h>
25 #include <asm/io.h>
26 #include <asm/intel_pt.h>
27 #include <asm/cpu_device_id.h>
28 #include <asm/msr.h>
29
30 #include "../perf_event.h"
31 #include "pt.h"
32
33 static DEFINE_PER_CPU(struct pt, pt_ctx);
34
35 static struct pt_pmu pt_pmu;
36
37 /*
38 * Capabilities of Intel PT hardware, such as number of address bits or
39 * supported output schemes, are cached and exported to userspace as "caps"
40 * attribute group of pt pmu device
41 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
42 * relevant bits together with intel_pt traces.
43 *
44 * These are necessary for both trace decoding (payloads_lip, contains address
45 * width encoded in IP-related packets), and event configuration (bitmasks with
46 * permitted values for certain bit fields).
47 */
48 #define PT_CAP(_n, _l, _r, _m) \
49 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
50 .reg = _r, .mask = _m }
51
52 static struct pt_cap_desc {
53 const char *name;
54 u32 leaf;
55 u8 reg;
56 u32 mask;
57 } pt_caps[] = {
58 PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
59 PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
60 PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
61 PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
62 PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
63 PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
64 PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
65 PT_CAP(event_trace, 0, CPUID_EBX, BIT(7)),
66 PT_CAP(tnt_disable, 0, CPUID_EBX, BIT(8)),
67 PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
68 PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
69 PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
70 PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)),
71 PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
72 PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7),
73 PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
74 PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
75 PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
76 };
77
intel_pt_validate_cap(u32 * caps,enum pt_capabilities capability)78 u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
79 {
80 struct pt_cap_desc *cd = &pt_caps[capability];
81 u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
82 unsigned int shift = __ffs(cd->mask);
83
84 return (c & cd->mask) >> shift;
85 }
86 EXPORT_SYMBOL_FOR_KVM(intel_pt_validate_cap);
87
intel_pt_validate_hw_cap(enum pt_capabilities cap)88 u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
89 {
90 return intel_pt_validate_cap(pt_pmu.caps, cap);
91 }
92 EXPORT_SYMBOL_FOR_KVM(intel_pt_validate_hw_cap);
93
pt_cap_show(struct device * cdev,struct device_attribute * attr,char * buf)94 static ssize_t pt_cap_show(struct device *cdev,
95 struct device_attribute *attr,
96 char *buf)
97 {
98 struct dev_ext_attribute *ea =
99 container_of(attr, struct dev_ext_attribute, attr);
100 enum pt_capabilities cap = (long)ea->var;
101
102 return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
103 }
104
105 static struct attribute_group pt_cap_group __ro_after_init = {
106 .name = "caps",
107 };
108
109 PMU_FORMAT_ATTR(pt, "config:0" );
110 PMU_FORMAT_ATTR(cyc, "config:1" );
111 PMU_FORMAT_ATTR(pwr_evt, "config:4" );
112 PMU_FORMAT_ATTR(fup_on_ptw, "config:5" );
113 PMU_FORMAT_ATTR(mtc, "config:9" );
114 PMU_FORMAT_ATTR(tsc, "config:10" );
115 PMU_FORMAT_ATTR(noretcomp, "config:11" );
116 PMU_FORMAT_ATTR(ptw, "config:12" );
117 PMU_FORMAT_ATTR(branch, "config:13" );
118 PMU_FORMAT_ATTR(event, "config:31" );
119 PMU_FORMAT_ATTR(notnt, "config:55" );
120 PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
121 PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
122 PMU_FORMAT_ATTR(psb_period, "config:24-27" );
123
124 static struct attribute *pt_formats_attr[] = {
125 &format_attr_pt.attr,
126 &format_attr_cyc.attr,
127 &format_attr_pwr_evt.attr,
128 &format_attr_event.attr,
129 &format_attr_notnt.attr,
130 &format_attr_fup_on_ptw.attr,
131 &format_attr_mtc.attr,
132 &format_attr_tsc.attr,
133 &format_attr_noretcomp.attr,
134 &format_attr_ptw.attr,
135 &format_attr_branch.attr,
136 &format_attr_mtc_period.attr,
137 &format_attr_cyc_thresh.attr,
138 &format_attr_psb_period.attr,
139 NULL,
140 };
141
142 static struct attribute_group pt_format_group = {
143 .name = "format",
144 .attrs = pt_formats_attr,
145 };
146
147 static ssize_t
pt_timing_attr_show(struct device * dev,struct device_attribute * attr,char * page)148 pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
149 char *page)
150 {
151 struct perf_pmu_events_attr *pmu_attr =
152 container_of(attr, struct perf_pmu_events_attr, attr);
153
154 switch (pmu_attr->id) {
155 case 0:
156 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
157 case 1:
158 return sprintf(page, "%u:%u\n",
159 pt_pmu.tsc_art_num,
160 pt_pmu.tsc_art_den);
161 default:
162 break;
163 }
164
165 return -EINVAL;
166 }
167
168 PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
169 pt_timing_attr_show);
170 PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
171 pt_timing_attr_show);
172
173 static struct attribute *pt_timing_attr[] = {
174 &timing_attr_max_nonturbo_ratio.attr.attr,
175 &timing_attr_tsc_art_ratio.attr.attr,
176 NULL,
177 };
178
179 static struct attribute_group pt_timing_group = {
180 .attrs = pt_timing_attr,
181 };
182
183 static const struct attribute_group *pt_attr_groups[] = {
184 &pt_cap_group,
185 &pt_format_group,
186 &pt_timing_group,
187 NULL,
188 };
189
pt_pmu_hw_init(void)190 static int __init pt_pmu_hw_init(void)
191 {
192 struct dev_ext_attribute *de_attrs;
193 struct attribute **attrs;
194 size_t size;
195 u64 reg;
196 int ret;
197 long i;
198
199 rdmsrq(MSR_PLATFORM_INFO, reg);
200 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
201
202 /*
203 * if available, read in TSC to core crystal clock ratio,
204 * otherwise, zero for numerator stands for "not enumerated"
205 * as per SDM
206 */
207 if (boot_cpu_data.cpuid_level >= CPUID_LEAF_TSC) {
208 u32 eax, ebx, ecx, edx;
209
210 cpuid(CPUID_LEAF_TSC, &eax, &ebx, &ecx, &edx);
211
212 pt_pmu.tsc_art_num = ebx;
213 pt_pmu.tsc_art_den = eax;
214 }
215
216 /* model-specific quirks */
217 switch (boot_cpu_data.x86_vfm) {
218 case INTEL_BROADWELL:
219 case INTEL_BROADWELL_D:
220 case INTEL_BROADWELL_G:
221 case INTEL_BROADWELL_X:
222 /* not setting BRANCH_EN will #GP, erratum BDM106 */
223 pt_pmu.branch_en_always_on = true;
224 break;
225 default:
226 break;
227 }
228
229 if (boot_cpu_has(X86_FEATURE_VMX)) {
230 /*
231 * Intel SDM, 36.5 "Tracing post-VMXON" says that
232 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
233 * post-VMXON.
234 */
235 rdmsrq(MSR_IA32_VMX_MISC, reg);
236 if (reg & BIT(14))
237 pt_pmu.vmx = true;
238 }
239
240 for (i = 0; i < PT_CPUID_LEAVES; i++) {
241 cpuid_count(20, i,
242 &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
243 &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
244 &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
245 &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
246 }
247
248 ret = -ENOMEM;
249 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
250 attrs = kzalloc(size, GFP_KERNEL);
251 if (!attrs)
252 goto fail;
253
254 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
255 de_attrs = kzalloc(size, GFP_KERNEL);
256 if (!de_attrs)
257 goto fail;
258
259 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
260 struct dev_ext_attribute *de_attr = de_attrs + i;
261
262 de_attr->attr.attr.name = pt_caps[i].name;
263
264 sysfs_attr_init(&de_attr->attr.attr);
265
266 de_attr->attr.attr.mode = S_IRUGO;
267 de_attr->attr.show = pt_cap_show;
268 de_attr->var = (void *)i;
269
270 attrs[i] = &de_attr->attr.attr;
271 }
272
273 pt_cap_group.attrs = attrs;
274
275 return 0;
276
277 fail:
278 kfree(attrs);
279
280 return ret;
281 }
282
283 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
284 RTIT_CTL_CYC_THRESH | \
285 RTIT_CTL_PSB_FREQ)
286
287 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
288 RTIT_CTL_MTC_RANGE)
289
290 #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \
291 RTIT_CTL_FUP_ON_PTW)
292
293 /*
294 * Bit 0 (TraceEn) in the attr.config is meaningless as the
295 * corresponding bit in the RTIT_CTL can only be controlled
296 * by the driver; therefore, repurpose it to mean: pass
297 * through the bit that was previously assumed to be always
298 * on for PT, thereby allowing the user to *not* set it if
299 * they so wish. See also pt_event_valid() and pt_config().
300 */
301 #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
302
303 #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \
304 RTIT_CTL_TSC_EN | \
305 RTIT_CTL_DISRETC | \
306 RTIT_CTL_BRANCH_EN | \
307 RTIT_CTL_CYC_PSB | \
308 RTIT_CTL_MTC | \
309 RTIT_CTL_PWR_EVT_EN | \
310 RTIT_CTL_EVENT_EN | \
311 RTIT_CTL_NOTNT | \
312 RTIT_CTL_FUP_ON_PTW | \
313 RTIT_CTL_PTW_EN)
314
pt_event_valid(struct perf_event * event)315 static bool pt_event_valid(struct perf_event *event)
316 {
317 u64 config = event->attr.config;
318 u64 allowed, requested;
319
320 if ((config & PT_CONFIG_MASK) != config)
321 return false;
322
323 if (config & RTIT_CTL_CYC_PSB) {
324 if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
325 return false;
326
327 allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
328 requested = (config & RTIT_CTL_PSB_FREQ) >>
329 RTIT_CTL_PSB_FREQ_OFFSET;
330 if (requested && (!(allowed & BIT(requested))))
331 return false;
332
333 allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
334 requested = (config & RTIT_CTL_CYC_THRESH) >>
335 RTIT_CTL_CYC_THRESH_OFFSET;
336 if (requested && (!(allowed & BIT(requested))))
337 return false;
338 }
339
340 if (config & RTIT_CTL_MTC) {
341 /*
342 * In the unlikely case that CPUID lists valid mtc periods,
343 * but not the mtc capability, drop out here.
344 *
345 * Spec says that setting mtc period bits while mtc bit in
346 * CPUID is 0 will #GP, so better safe than sorry.
347 */
348 if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
349 return false;
350
351 allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
352 if (!allowed)
353 return false;
354
355 requested = (config & RTIT_CTL_MTC_RANGE) >>
356 RTIT_CTL_MTC_RANGE_OFFSET;
357
358 if (!(allowed & BIT(requested)))
359 return false;
360 }
361
362 if (config & RTIT_CTL_PWR_EVT_EN &&
363 !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
364 return false;
365
366 if (config & RTIT_CTL_EVENT_EN &&
367 !intel_pt_validate_hw_cap(PT_CAP_event_trace))
368 return false;
369
370 if (config & RTIT_CTL_NOTNT &&
371 !intel_pt_validate_hw_cap(PT_CAP_tnt_disable))
372 return false;
373
374 if (config & RTIT_CTL_PTW) {
375 if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
376 return false;
377
378 /* FUPonPTW without PTW doesn't make sense */
379 if ((config & RTIT_CTL_FUP_ON_PTW) &&
380 !(config & RTIT_CTL_PTW_EN))
381 return false;
382 }
383
384 /*
385 * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
386 * clears the assumption that BranchEn must always be enabled,
387 * as was the case with the first implementation of PT.
388 * If this bit is not set, the legacy behavior is preserved
389 * for compatibility with the older userspace.
390 *
391 * Re-using bit 0 for this purpose is fine because it is never
392 * directly set by the user; previous attempts at setting it in
393 * the attr.config resulted in -EINVAL.
394 */
395 if (config & RTIT_CTL_PASSTHROUGH) {
396 /*
397 * Disallow not setting BRANCH_EN where BRANCH_EN is
398 * always required.
399 */
400 if (pt_pmu.branch_en_always_on &&
401 !(config & RTIT_CTL_BRANCH_EN))
402 return false;
403 } else {
404 /*
405 * Disallow BRANCH_EN without the PASSTHROUGH.
406 */
407 if (config & RTIT_CTL_BRANCH_EN)
408 return false;
409 }
410
411 return true;
412 }
413
414 /*
415 * PT configuration helpers
416 * These all are cpu affine and operate on a local PT
417 */
418
pt_config_start(struct perf_event * event)419 static void pt_config_start(struct perf_event *event)
420 {
421 struct pt *pt = this_cpu_ptr(&pt_ctx);
422 u64 ctl = event->hw.aux_config;
423
424 if (READ_ONCE(event->hw.aux_paused))
425 return;
426
427 ctl |= RTIT_CTL_TRACEEN;
428 if (READ_ONCE(pt->vmx_on))
429 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
430 else
431 wrmsrq(MSR_IA32_RTIT_CTL, ctl);
432
433 WRITE_ONCE(event->hw.aux_config, ctl);
434 }
435
436 /* Address ranges and their corresponding msr configuration registers */
437 static const struct pt_address_range {
438 unsigned long msr_a;
439 unsigned long msr_b;
440 unsigned int reg_off;
441 } pt_address_ranges[] = {
442 {
443 .msr_a = MSR_IA32_RTIT_ADDR0_A,
444 .msr_b = MSR_IA32_RTIT_ADDR0_B,
445 .reg_off = RTIT_CTL_ADDR0_OFFSET,
446 },
447 {
448 .msr_a = MSR_IA32_RTIT_ADDR1_A,
449 .msr_b = MSR_IA32_RTIT_ADDR1_B,
450 .reg_off = RTIT_CTL_ADDR1_OFFSET,
451 },
452 {
453 .msr_a = MSR_IA32_RTIT_ADDR2_A,
454 .msr_b = MSR_IA32_RTIT_ADDR2_B,
455 .reg_off = RTIT_CTL_ADDR2_OFFSET,
456 },
457 {
458 .msr_a = MSR_IA32_RTIT_ADDR3_A,
459 .msr_b = MSR_IA32_RTIT_ADDR3_B,
460 .reg_off = RTIT_CTL_ADDR3_OFFSET,
461 }
462 };
463
pt_config_filters(struct perf_event * event)464 static u64 pt_config_filters(struct perf_event *event)
465 {
466 struct pt_filters *filters = event->hw.addr_filters;
467 struct pt *pt = this_cpu_ptr(&pt_ctx);
468 unsigned int range = 0;
469 u64 rtit_ctl = 0;
470
471 if (!filters)
472 return 0;
473
474 perf_event_addr_filters_sync(event);
475
476 for (range = 0; range < filters->nr_filters; range++) {
477 struct pt_filter *filter = &filters->filter[range];
478
479 /*
480 * Note, if the range has zero start/end addresses due
481 * to its dynamic object not being loaded yet, we just
482 * go ahead and program zeroed range, which will simply
483 * produce no data. Note^2: if executable code at 0x0
484 * is a concern, we can set up an "invalid" configuration
485 * such as msr_b < msr_a.
486 */
487
488 /* avoid redundant msr writes */
489 if (pt->filters.filter[range].msr_a != filter->msr_a) {
490 wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a);
491 pt->filters.filter[range].msr_a = filter->msr_a;
492 }
493
494 if (pt->filters.filter[range].msr_b != filter->msr_b) {
495 wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b);
496 pt->filters.filter[range].msr_b = filter->msr_b;
497 }
498
499 rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
500 }
501
502 return rtit_ctl;
503 }
504
pt_config(struct perf_event * event)505 static void pt_config(struct perf_event *event)
506 {
507 struct pt *pt = this_cpu_ptr(&pt_ctx);
508 struct pt_buffer *buf = perf_get_aux(&pt->handle);
509 u64 reg;
510
511 /* First round: clear STATUS, in particular the PSB byte counter. */
512 if (!event->hw.aux_config) {
513 perf_event_itrace_started(event);
514 wrmsrq(MSR_IA32_RTIT_STATUS, 0);
515 }
516
517 reg = pt_config_filters(event);
518 reg |= RTIT_CTL_TRACEEN;
519 if (!buf->single)
520 reg |= RTIT_CTL_TOPA;
521
522 /*
523 * Previously, we had BRANCH_EN on by default, but now that PT has
524 * grown features outside of branch tracing, it is useful to allow
525 * the user to disable it. Setting bit 0 in the event's attr.config
526 * allows BRANCH_EN to pass through instead of being always on. See
527 * also the comment in pt_event_valid().
528 */
529 if (event->attr.config & BIT(0)) {
530 reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
531 } else {
532 reg |= RTIT_CTL_BRANCH_EN;
533 }
534
535 if (!event->attr.exclude_kernel)
536 reg |= RTIT_CTL_OS;
537 if (!event->attr.exclude_user)
538 reg |= RTIT_CTL_USR;
539
540 reg |= (event->attr.config & PT_CONFIG_MASK);
541
542 event->hw.aux_config = reg;
543
544 /*
545 * Allow resume before starting so as not to overwrite a value set by a
546 * PMI.
547 */
548 barrier();
549 WRITE_ONCE(pt->resume_allowed, 1);
550 /* Configuration is complete, it is now OK to handle an NMI */
551 barrier();
552 WRITE_ONCE(pt->handle_nmi, 1);
553 barrier();
554 pt_config_start(event);
555 barrier();
556 /*
557 * Allow pause after starting so its pt_config_stop() doesn't race with
558 * pt_config_start().
559 */
560 WRITE_ONCE(pt->pause_allowed, 1);
561 }
562
pt_config_stop(struct perf_event * event)563 static void pt_config_stop(struct perf_event *event)
564 {
565 struct pt *pt = this_cpu_ptr(&pt_ctx);
566 u64 ctl = READ_ONCE(event->hw.aux_config);
567
568 /* may be already stopped by a PMI */
569 if (!(ctl & RTIT_CTL_TRACEEN))
570 return;
571
572 ctl &= ~RTIT_CTL_TRACEEN;
573 if (!READ_ONCE(pt->vmx_on))
574 wrmsrq(MSR_IA32_RTIT_CTL, ctl);
575
576 WRITE_ONCE(event->hw.aux_config, ctl);
577
578 /*
579 * A wrmsr that disables trace generation serializes other PT
580 * registers and causes all data packets to be written to memory,
581 * but a fence is required for the data to become globally visible.
582 *
583 * The below WMB, separating data store and aux_head store matches
584 * the consumer's RMB that separates aux_head load and data load.
585 */
586 wmb();
587 }
588
589 /**
590 * struct topa - ToPA metadata
591 * @list: linkage to struct pt_buffer's list of tables
592 * @offset: offset of the first entry in this table in the buffer
593 * @size: total size of all entries in this table
594 * @last: index of the last initialized entry in this table
595 * @z_count: how many times the first entry repeats
596 */
597 struct topa {
598 struct list_head list;
599 u64 offset;
600 size_t size;
601 int last;
602 unsigned int z_count;
603 };
604
605 /*
606 * Keep ToPA table-related metadata on the same page as the actual table,
607 * taking up a few words from the top
608 */
609
610 #define TENTS_PER_PAGE \
611 ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
612
613 /**
614 * struct topa_page - page-sized ToPA table with metadata at the top
615 * @table: actual ToPA table entries, as understood by PT hardware
616 * @topa: metadata
617 */
618 struct topa_page {
619 struct topa_entry table[TENTS_PER_PAGE];
620 struct topa topa;
621 };
622
topa_to_page(struct topa * topa)623 static inline struct topa_page *topa_to_page(struct topa *topa)
624 {
625 return container_of(topa, struct topa_page, topa);
626 }
627
topa_entry_to_page(struct topa_entry * te)628 static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
629 {
630 return (struct topa_page *)((unsigned long)te & PAGE_MASK);
631 }
632
topa_pfn(struct topa * topa)633 static inline phys_addr_t topa_pfn(struct topa *topa)
634 {
635 return PFN_DOWN(virt_to_phys(topa_to_page(topa)));
636 }
637
638 /* make -1 stand for the last table entry */
639 #define TOPA_ENTRY(t, i) \
640 ((i) == -1 \
641 ? &topa_to_page(t)->table[(t)->last] \
642 : &topa_to_page(t)->table[(i)])
643 #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
644 #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
645
pt_config_buffer(struct pt_buffer * buf)646 static void pt_config_buffer(struct pt_buffer *buf)
647 {
648 struct pt *pt = this_cpu_ptr(&pt_ctx);
649 u64 reg, mask;
650 void *base;
651
652 if (buf->single) {
653 base = buf->data_pages[0];
654 mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
655 } else {
656 base = topa_to_page(buf->cur)->table;
657 mask = (u64)buf->cur_idx;
658 }
659
660 reg = virt_to_phys(base);
661 if (pt->output_base != reg) {
662 pt->output_base = reg;
663 wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, reg);
664 }
665
666 reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
667 if (pt->output_mask != reg) {
668 pt->output_mask = reg;
669 wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, reg);
670 }
671 }
672
673 /**
674 * topa_alloc() - allocate page-sized ToPA table
675 * @cpu: CPU on which to allocate.
676 * @gfp: Allocation flags.
677 *
678 * Return: On success, return the pointer to ToPA table page.
679 */
topa_alloc(int cpu,gfp_t gfp)680 static struct topa *topa_alloc(int cpu, gfp_t gfp)
681 {
682 int node = cpu_to_node(cpu);
683 struct topa_page *tp;
684 struct page *p;
685
686 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
687 if (!p)
688 return NULL;
689
690 tp = page_address(p);
691 tp->topa.last = 0;
692
693 /*
694 * In case of singe-entry ToPA, always put the self-referencing END
695 * link as the 2nd entry in the table
696 */
697 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
698 TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
699 TOPA_ENTRY(&tp->topa, 1)->end = 1;
700 }
701
702 return &tp->topa;
703 }
704
705 /**
706 * topa_free() - free a page-sized ToPA table
707 * @topa: Table to deallocate.
708 */
topa_free(struct topa * topa)709 static void topa_free(struct topa *topa)
710 {
711 free_page((unsigned long)topa);
712 }
713
714 /**
715 * topa_insert_table() - insert a ToPA table into a buffer
716 * @buf: PT buffer that's being extended.
717 * @topa: New topa table to be inserted.
718 *
719 * If it's the first table in this buffer, set up buffer's pointers
720 * accordingly; otherwise, add a END=1 link entry to @topa to the current
721 * "last" table and adjust the last table pointer to @topa.
722 */
topa_insert_table(struct pt_buffer * buf,struct topa * topa)723 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
724 {
725 struct topa *last = buf->last;
726
727 list_add_tail(&topa->list, &buf->tables);
728
729 if (!buf->first) {
730 buf->first = buf->last = buf->cur = topa;
731 return;
732 }
733
734 topa->offset = last->offset + last->size;
735 buf->last = topa;
736
737 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
738 return;
739
740 BUG_ON(last->last != TENTS_PER_PAGE - 1);
741
742 TOPA_ENTRY(last, -1)->base = topa_pfn(topa);
743 TOPA_ENTRY(last, -1)->end = 1;
744 }
745
746 /**
747 * topa_table_full() - check if a ToPA table is filled up
748 * @topa: ToPA table.
749 */
topa_table_full(struct topa * topa)750 static bool topa_table_full(struct topa *topa)
751 {
752 /* single-entry ToPA is a special case */
753 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
754 return !!topa->last;
755
756 return topa->last == TENTS_PER_PAGE - 1;
757 }
758
759 /**
760 * topa_insert_pages() - create a list of ToPA tables
761 * @buf: PT buffer being initialized.
762 * @cpu: CPU on which to allocate.
763 * @gfp: Allocation flags.
764 *
765 * This initializes a list of ToPA tables with entries from
766 * the data_pages provided by rb_alloc_aux().
767 *
768 * Return: 0 on success or error code.
769 */
topa_insert_pages(struct pt_buffer * buf,int cpu,gfp_t gfp)770 static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
771 {
772 struct topa *topa = buf->last;
773 int order = 0;
774 struct page *p;
775
776 p = virt_to_page(buf->data_pages[buf->nr_pages]);
777 if (PagePrivate(p))
778 order = page_private(p);
779
780 if (topa_table_full(topa)) {
781 topa = topa_alloc(cpu, gfp);
782 if (!topa)
783 return -ENOMEM;
784
785 topa_insert_table(buf, topa);
786 }
787
788 if (topa->z_count == topa->last - 1) {
789 if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
790 topa->z_count++;
791 }
792
793 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
794 TOPA_ENTRY(topa, -1)->size = order;
795 if (!buf->snapshot &&
796 !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
797 TOPA_ENTRY(topa, -1)->intr = 1;
798 TOPA_ENTRY(topa, -1)->stop = 1;
799 }
800
801 topa->last++;
802 topa->size += sizes(order);
803
804 buf->nr_pages += 1ul << order;
805
806 return 0;
807 }
808
809 /**
810 * pt_topa_dump() - print ToPA tables and their entries
811 * @buf: PT buffer.
812 */
pt_topa_dump(struct pt_buffer * buf)813 static void pt_topa_dump(struct pt_buffer *buf)
814 {
815 struct topa *topa;
816
817 list_for_each_entry(topa, &buf->tables, list) {
818 struct topa_page *tp = topa_to_page(topa);
819 int i;
820
821 pr_debug("# table @%p, off %llx size %zx\n", tp->table,
822 topa->offset, topa->size);
823 for (i = 0; i < TENTS_PER_PAGE; i++) {
824 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
825 &tp->table[i],
826 (unsigned long)tp->table[i].base << TOPA_SHIFT,
827 sizes(tp->table[i].size),
828 tp->table[i].end ? 'E' : ' ',
829 tp->table[i].intr ? 'I' : ' ',
830 tp->table[i].stop ? 'S' : ' ',
831 *(u64 *)&tp->table[i]);
832 if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
833 tp->table[i].stop) ||
834 tp->table[i].end)
835 break;
836 if (!i && topa->z_count)
837 i += topa->z_count;
838 }
839 }
840 }
841
842 /**
843 * pt_buffer_advance() - advance to the next output region
844 * @buf: PT buffer.
845 *
846 * Advance the current pointers in the buffer to the next ToPA entry.
847 */
pt_buffer_advance(struct pt_buffer * buf)848 static void pt_buffer_advance(struct pt_buffer *buf)
849 {
850 buf->output_off = 0;
851 buf->cur_idx++;
852
853 if (buf->cur_idx == buf->cur->last) {
854 if (buf->cur == buf->last) {
855 buf->cur = buf->first;
856 buf->wrapped = true;
857 } else {
858 buf->cur = list_entry(buf->cur->list.next, struct topa,
859 list);
860 }
861 buf->cur_idx = 0;
862 }
863 }
864
865 /**
866 * pt_update_head() - calculate current offsets and sizes
867 * @pt: Per-cpu pt context.
868 *
869 * Update buffer's current write pointer position and data size.
870 */
pt_update_head(struct pt * pt)871 static void pt_update_head(struct pt *pt)
872 {
873 struct pt_buffer *buf = perf_get_aux(&pt->handle);
874 bool wrapped = buf->wrapped;
875 u64 topa_idx, base, old;
876
877 buf->wrapped = false;
878
879 if (buf->single) {
880 local_set(&buf->data_size, buf->output_off);
881 return;
882 }
883
884 /* offset of the first region in this table from the beginning of buf */
885 base = buf->cur->offset + buf->output_off;
886
887 /* offset of the current output region within this table */
888 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
889 base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
890
891 if (buf->snapshot) {
892 local_set(&buf->data_size, base);
893 } else {
894 old = (local64_xchg(&buf->head, base) &
895 ((buf->nr_pages << PAGE_SHIFT) - 1));
896 if (base < old || (base == old && wrapped))
897 base += buf->nr_pages << PAGE_SHIFT;
898
899 local_add(base - old, &buf->data_size);
900 }
901 }
902
903 /**
904 * pt_buffer_region() - obtain current output region's address
905 * @buf: PT buffer.
906 */
pt_buffer_region(struct pt_buffer * buf)907 static void *pt_buffer_region(struct pt_buffer *buf)
908 {
909 return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
910 }
911
912 /**
913 * pt_buffer_region_size() - obtain current output region's size
914 * @buf: PT buffer.
915 */
pt_buffer_region_size(struct pt_buffer * buf)916 static size_t pt_buffer_region_size(struct pt_buffer *buf)
917 {
918 return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
919 }
920
921 /**
922 * pt_handle_status() - take care of possible status conditions
923 * @pt: Per-cpu pt context.
924 */
pt_handle_status(struct pt * pt)925 static void pt_handle_status(struct pt *pt)
926 {
927 struct pt_buffer *buf = perf_get_aux(&pt->handle);
928 int advance = 0;
929 u64 status;
930
931 rdmsrq(MSR_IA32_RTIT_STATUS, status);
932
933 if (status & RTIT_STATUS_ERROR) {
934 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
935 pt_topa_dump(buf);
936 status &= ~RTIT_STATUS_ERROR;
937 }
938
939 if (status & RTIT_STATUS_STOPPED) {
940 status &= ~RTIT_STATUS_STOPPED;
941
942 /*
943 * On systems that only do single-entry ToPA, hitting STOP
944 * means we are already losing data; need to let the decoder
945 * know.
946 */
947 if (!buf->single &&
948 (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
949 buf->output_off == pt_buffer_region_size(buf))) {
950 perf_aux_output_flag(&pt->handle,
951 PERF_AUX_FLAG_TRUNCATED);
952 advance++;
953 }
954 }
955
956 /*
957 * Also on single-entry ToPA implementations, interrupt will come
958 * before the output reaches its output region's boundary.
959 */
960 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
961 !buf->snapshot &&
962 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
963 void *head = pt_buffer_region(buf);
964
965 /* everything within this margin needs to be zeroed out */
966 memset(head + buf->output_off, 0,
967 pt_buffer_region_size(buf) -
968 buf->output_off);
969 advance++;
970 }
971
972 if (advance)
973 pt_buffer_advance(buf);
974
975 wrmsrq(MSR_IA32_RTIT_STATUS, status);
976 }
977
978 /**
979 * pt_read_offset() - translate registers into buffer pointers
980 * @buf: PT buffer.
981 *
982 * Set buffer's output pointers from MSR values.
983 */
pt_read_offset(struct pt_buffer * buf)984 static void pt_read_offset(struct pt_buffer *buf)
985 {
986 struct pt *pt = this_cpu_ptr(&pt_ctx);
987 struct topa_page *tp;
988
989 if (!buf->single) {
990 rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
991 tp = phys_to_virt(pt->output_base);
992 buf->cur = &tp->topa;
993 }
994
995 rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
996 /* offset within current output region */
997 buf->output_off = pt->output_mask >> 32;
998 /* index of current output region within this table */
999 if (!buf->single)
1000 buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7;
1001 }
1002
1003 static struct topa_entry *
pt_topa_entry_for_page(struct pt_buffer * buf,unsigned int pg)1004 pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
1005 {
1006 struct topa_page *tp;
1007 struct topa *topa;
1008 unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0;
1009
1010 /*
1011 * Indicates a bug in the caller.
1012 */
1013 if (WARN_ON_ONCE(pg >= buf->nr_pages))
1014 return NULL;
1015
1016 /*
1017 * First, find the ToPA table where @pg fits. With high
1018 * order allocations, there shouldn't be many of these.
1019 */
1020 list_for_each_entry(topa, &buf->tables, list) {
1021 if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT)
1022 goto found;
1023 }
1024
1025 /*
1026 * Hitting this means we have a problem in the ToPA
1027 * allocation code.
1028 */
1029 WARN_ON_ONCE(1);
1030
1031 return NULL;
1032
1033 found:
1034 /*
1035 * Indicates a problem in the ToPA allocation code.
1036 */
1037 if (WARN_ON_ONCE(topa->last == -1))
1038 return NULL;
1039
1040 tp = topa_to_page(topa);
1041 cur_pg = PFN_DOWN(topa->offset);
1042 if (topa->z_count) {
1043 z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1);
1044 start_idx = topa->z_count + 1;
1045 }
1046
1047 /*
1048 * Multiple entries at the beginning of the table have the same size,
1049 * ideally all of them; if @pg falls there, the search is done.
1050 */
1051 if (pg >= cur_pg && pg < cur_pg + z_pg) {
1052 idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
1053 return &tp->table[idx];
1054 }
1055
1056 /*
1057 * Otherwise, slow path: iterate through the remaining entries.
1058 */
1059 for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) {
1060 if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
1061 return &tp->table[idx];
1062
1063 cur_pg += TOPA_ENTRY_PAGES(topa, idx);
1064 }
1065
1066 /*
1067 * Means we couldn't find a ToPA entry in the table that does match.
1068 */
1069 WARN_ON_ONCE(1);
1070
1071 return NULL;
1072 }
1073
1074 static struct topa_entry *
pt_topa_prev_entry(struct pt_buffer * buf,struct topa_entry * te)1075 pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te)
1076 {
1077 unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
1078 struct topa_page *tp;
1079 struct topa *topa;
1080
1081 tp = (struct topa_page *)table;
1082 if (tp->table != te)
1083 return --te;
1084
1085 topa = &tp->topa;
1086 if (topa == buf->first)
1087 topa = buf->last;
1088 else
1089 topa = list_prev_entry(topa, list);
1090
1091 tp = topa_to_page(topa);
1092
1093 return &tp->table[topa->last - 1];
1094 }
1095
1096 /**
1097 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
1098 * @buf: PT buffer.
1099 * @handle: Current output handle.
1100 *
1101 * Place INT and STOP marks to prevent overwriting old data that the consumer
1102 * hasn't yet collected and waking up the consumer after a certain fraction of
1103 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
1104 *
1105 * This obviously relies on buf::head to figure out buffer markers, so it has
1106 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
1107 * is enabled.
1108 */
pt_buffer_reset_markers(struct pt_buffer * buf,struct perf_output_handle * handle)1109 static int pt_buffer_reset_markers(struct pt_buffer *buf,
1110 struct perf_output_handle *handle)
1111
1112 {
1113 unsigned long head = local64_read(&buf->head);
1114 unsigned long idx, npages, wakeup;
1115
1116 if (buf->single)
1117 return 0;
1118
1119 /* can't stop in the middle of an output region */
1120 if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
1121 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1122 return -EINVAL;
1123 }
1124
1125
1126 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
1127 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1128 return 0;
1129
1130 /* clear STOP and INT from current entry */
1131 if (buf->stop_te) {
1132 buf->stop_te->stop = 0;
1133 buf->stop_te->intr = 0;
1134 }
1135
1136 if (buf->intr_te)
1137 buf->intr_te->intr = 0;
1138
1139 /* how many pages till the STOP marker */
1140 npages = handle->size >> PAGE_SHIFT;
1141
1142 /* if it's on a page boundary, fill up one more page */
1143 if (!offset_in_page(head + handle->size + 1))
1144 npages++;
1145
1146 idx = (head >> PAGE_SHIFT) + npages;
1147 idx &= buf->nr_pages - 1;
1148
1149 if (idx != buf->stop_pos) {
1150 buf->stop_pos = idx;
1151 buf->stop_te = pt_topa_entry_for_page(buf, idx);
1152 buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te);
1153 }
1154
1155 wakeup = handle->wakeup >> PAGE_SHIFT;
1156
1157 /* in the worst case, wake up the consumer one page before hard stop */
1158 idx = (head >> PAGE_SHIFT) + npages - 1;
1159 if (idx > wakeup)
1160 idx = wakeup;
1161
1162 idx &= buf->nr_pages - 1;
1163 if (idx != buf->intr_pos) {
1164 buf->intr_pos = idx;
1165 buf->intr_te = pt_topa_entry_for_page(buf, idx);
1166 buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te);
1167 }
1168
1169 buf->stop_te->stop = 1;
1170 buf->stop_te->intr = 1;
1171 buf->intr_te->intr = 1;
1172
1173 return 0;
1174 }
1175
1176 /**
1177 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1178 * @buf: PT buffer.
1179 * @head: Write pointer (aux_head) from AUX buffer.
1180 *
1181 * Find the ToPA table and entry corresponding to given @head and set buffer's
1182 * "current" pointers accordingly. This is done after we have obtained the
1183 * current aux_head position from a successful call to perf_aux_output_begin()
1184 * to make sure the hardware is writing to the right place.
1185 *
1186 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
1187 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
1188 * which are used to determine INT and STOP markers' locations by a subsequent
1189 * call to pt_buffer_reset_markers().
1190 */
pt_buffer_reset_offsets(struct pt_buffer * buf,unsigned long head)1191 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1192 {
1193 struct topa_page *cur_tp;
1194 struct topa_entry *te;
1195 int pg;
1196
1197 if (buf->snapshot)
1198 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1199
1200 if (!buf->single) {
1201 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
1202 te = pt_topa_entry_for_page(buf, pg);
1203
1204 cur_tp = topa_entry_to_page(te);
1205 buf->cur = &cur_tp->topa;
1206 buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
1207 buf->output_off = head & (pt_buffer_region_size(buf) - 1);
1208 } else {
1209 buf->output_off = head;
1210 }
1211
1212 local64_set(&buf->head, head);
1213 local_set(&buf->data_size, 0);
1214 }
1215
1216 /**
1217 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1218 * @buf: PT buffer.
1219 */
pt_buffer_fini_topa(struct pt_buffer * buf)1220 static void pt_buffer_fini_topa(struct pt_buffer *buf)
1221 {
1222 struct topa *topa, *iter;
1223
1224 if (buf->single)
1225 return;
1226
1227 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1228 /*
1229 * right now, this is in free_aux() path only, so
1230 * no need to unlink this table from the list
1231 */
1232 topa_free(topa);
1233 }
1234 }
1235
1236 /**
1237 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1238 * @buf: PT buffer.
1239 * @cpu: CPU on which to allocate.
1240 * @nr_pages: No. of pages to allocate.
1241 * @gfp: Allocation flags.
1242 *
1243 * Return: 0 on success or error code.
1244 */
pt_buffer_init_topa(struct pt_buffer * buf,int cpu,unsigned long nr_pages,gfp_t gfp)1245 static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
1246 unsigned long nr_pages, gfp_t gfp)
1247 {
1248 struct topa *topa;
1249 int err;
1250
1251 topa = topa_alloc(cpu, gfp);
1252 if (!topa)
1253 return -ENOMEM;
1254
1255 topa_insert_table(buf, topa);
1256
1257 while (buf->nr_pages < nr_pages) {
1258 err = topa_insert_pages(buf, cpu, gfp);
1259 if (err) {
1260 pt_buffer_fini_topa(buf);
1261 return -ENOMEM;
1262 }
1263 }
1264
1265 /* link last table to the first one, unless we're double buffering */
1266 if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
1267 TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first);
1268 TOPA_ENTRY(buf->last, -1)->end = 1;
1269 }
1270
1271 pt_topa_dump(buf);
1272 return 0;
1273 }
1274
pt_buffer_try_single(struct pt_buffer * buf,int nr_pages)1275 static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
1276 {
1277 struct page *p = virt_to_page(buf->data_pages[0]);
1278 int ret = -ENOTSUPP, order = 0;
1279
1280 /*
1281 * We can use single range output mode
1282 * + in snapshot mode, where we don't need interrupts;
1283 * + if the hardware supports it;
1284 * + if the entire buffer is one contiguous allocation.
1285 */
1286 if (!buf->snapshot)
1287 goto out;
1288
1289 if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output))
1290 goto out;
1291
1292 if (PagePrivate(p))
1293 order = page_private(p);
1294
1295 if (1 << order != nr_pages)
1296 goto out;
1297
1298 /*
1299 * Some processors cannot always support single range for more than
1300 * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
1301 * also be affected, so for now rather than trying to keep track of
1302 * which ones, just disable it for all.
1303 */
1304 if (nr_pages > 1)
1305 goto out;
1306
1307 buf->single = true;
1308 buf->nr_pages = nr_pages;
1309 ret = 0;
1310 out:
1311 return ret;
1312 }
1313
1314 /**
1315 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1316 * @event: Performance event
1317 * @pages: Array of pointers to buffer pages passed from perf core.
1318 * @nr_pages: Number of pages in the buffer.
1319 * @snapshot: If this is a snapshot/overwrite counter.
1320 *
1321 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1322 * bookkeeping for an AUX buffer.
1323 *
1324 * Return: Our private PT buffer structure.
1325 */
1326 static void *
pt_buffer_setup_aux(struct perf_event * event,void ** pages,int nr_pages,bool snapshot)1327 pt_buffer_setup_aux(struct perf_event *event, void **pages,
1328 int nr_pages, bool snapshot)
1329 {
1330 struct pt_buffer *buf;
1331 int node, ret, cpu = event->cpu;
1332
1333 if (!nr_pages)
1334 return NULL;
1335
1336 /*
1337 * Only support AUX sampling in snapshot mode, where we don't
1338 * generate NMIs.
1339 */
1340 if (event->attr.aux_sample_size && !snapshot)
1341 return NULL;
1342
1343 if (cpu == -1)
1344 cpu = raw_smp_processor_id();
1345 node = cpu_to_node(cpu);
1346
1347 buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
1348 if (!buf)
1349 return NULL;
1350
1351 buf->snapshot = snapshot;
1352 buf->data_pages = pages;
1353 buf->stop_pos = -1;
1354 buf->intr_pos = -1;
1355
1356 INIT_LIST_HEAD(&buf->tables);
1357
1358 ret = pt_buffer_try_single(buf, nr_pages);
1359 if (!ret)
1360 return buf;
1361
1362 ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
1363 if (ret) {
1364 kfree(buf);
1365 return NULL;
1366 }
1367
1368 return buf;
1369 }
1370
1371 /**
1372 * pt_buffer_free_aux() - perf AUX deallocation path callback
1373 * @data: PT buffer.
1374 */
pt_buffer_free_aux(void * data)1375 static void pt_buffer_free_aux(void *data)
1376 {
1377 struct pt_buffer *buf = data;
1378
1379 pt_buffer_fini_topa(buf);
1380 kfree(buf);
1381 }
1382
pt_addr_filters_init(struct perf_event * event)1383 static int pt_addr_filters_init(struct perf_event *event)
1384 {
1385 struct pt_filters *filters;
1386 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1387
1388 if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1389 return 0;
1390
1391 filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1392 if (!filters)
1393 return -ENOMEM;
1394
1395 if (event->parent)
1396 memcpy(filters, event->parent->hw.addr_filters,
1397 sizeof(*filters));
1398
1399 event->hw.addr_filters = filters;
1400
1401 return 0;
1402 }
1403
pt_addr_filters_fini(struct perf_event * event)1404 static void pt_addr_filters_fini(struct perf_event *event)
1405 {
1406 kfree(event->hw.addr_filters);
1407 event->hw.addr_filters = NULL;
1408 }
1409
1410 #ifdef CONFIG_X86_64
1411 /* Clamp to a canonical address greater-than-or-equal-to the address given */
clamp_to_ge_canonical_addr(u64 vaddr,u8 vaddr_bits)1412 static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
1413 {
1414 return __is_canonical_address(vaddr, vaddr_bits) ?
1415 vaddr :
1416 -BIT_ULL(vaddr_bits - 1);
1417 }
1418
1419 /* Clamp to a canonical address less-than-or-equal-to the address given */
clamp_to_le_canonical_addr(u64 vaddr,u8 vaddr_bits)1420 static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
1421 {
1422 return __is_canonical_address(vaddr, vaddr_bits) ?
1423 vaddr :
1424 BIT_ULL(vaddr_bits - 1) - 1;
1425 }
1426 #else
1427 #define clamp_to_ge_canonical_addr(x, y) (x)
1428 #define clamp_to_le_canonical_addr(x, y) (x)
1429 #endif
1430
pt_event_addr_filters_validate(struct list_head * filters)1431 static int pt_event_addr_filters_validate(struct list_head *filters)
1432 {
1433 struct perf_addr_filter *filter;
1434 int range = 0;
1435
1436 list_for_each_entry(filter, filters, entry) {
1437 /*
1438 * PT doesn't support single address triggers and
1439 * 'start' filters.
1440 */
1441 if (!filter->size ||
1442 filter->action == PERF_ADDR_FILTER_ACTION_START)
1443 return -EOPNOTSUPP;
1444
1445 if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1446 return -EOPNOTSUPP;
1447 }
1448
1449 return 0;
1450 }
1451
pt_event_addr_filters_sync(struct perf_event * event)1452 static void pt_event_addr_filters_sync(struct perf_event *event)
1453 {
1454 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1455 unsigned long msr_a, msr_b;
1456 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
1457 struct pt_filters *filters = event->hw.addr_filters;
1458 struct perf_addr_filter *filter;
1459 int range = 0;
1460
1461 if (!filters)
1462 return;
1463
1464 list_for_each_entry(filter, &head->list, entry) {
1465 if (filter->path.dentry && !fr[range].start) {
1466 msr_a = msr_b = 0;
1467 } else {
1468 unsigned long n = fr[range].size - 1;
1469 unsigned long a = fr[range].start;
1470 unsigned long b;
1471
1472 if (a > ULONG_MAX - n)
1473 b = ULONG_MAX;
1474 else
1475 b = a + n;
1476 /*
1477 * Apply the offset. 64-bit addresses written to the
1478 * MSRs must be canonical, but the range can encompass
1479 * non-canonical addresses. Since software cannot
1480 * execute at non-canonical addresses, adjusting to
1481 * canonical addresses does not affect the result of the
1482 * address filter.
1483 */
1484 msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
1485 msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
1486 if (msr_b < msr_a)
1487 msr_a = msr_b = 0;
1488 }
1489
1490 filters->filter[range].msr_a = msr_a;
1491 filters->filter[range].msr_b = msr_b;
1492 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER)
1493 filters->filter[range].config = 1;
1494 else
1495 filters->filter[range].config = 2;
1496 range++;
1497 }
1498
1499 filters->nr_filters = range;
1500 }
1501
1502 /**
1503 * intel_pt_interrupt() - PT PMI handler
1504 */
intel_pt_interrupt(void)1505 void intel_pt_interrupt(void)
1506 {
1507 struct pt *pt = this_cpu_ptr(&pt_ctx);
1508 struct pt_buffer *buf;
1509 struct perf_event *event = pt->handle.event;
1510
1511 /*
1512 * There may be a dangling PT bit in the interrupt status register
1513 * after PT has been disabled by pt_event_stop(). Make sure we don't
1514 * do anything (particularly, re-enable) for this event here.
1515 */
1516 if (!READ_ONCE(pt->handle_nmi))
1517 return;
1518
1519 if (!event)
1520 return;
1521
1522 pt_config_stop(event);
1523
1524 buf = perf_get_aux(&pt->handle);
1525 if (!buf)
1526 return;
1527
1528 pt_read_offset(buf);
1529
1530 pt_handle_status(pt);
1531
1532 pt_update_head(pt);
1533
1534 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1535
1536 if (!event->hw.state) {
1537 int ret;
1538
1539 buf = perf_aux_output_begin(&pt->handle, event);
1540 if (!buf) {
1541 event->hw.state = PERF_HES_STOPPED;
1542 WRITE_ONCE(pt->resume_allowed, 0);
1543 return;
1544 }
1545
1546 pt_buffer_reset_offsets(buf, pt->handle.head);
1547 /* snapshot counters don't use PMI, so it's safe */
1548 ret = pt_buffer_reset_markers(buf, &pt->handle);
1549 if (ret) {
1550 perf_aux_output_end(&pt->handle, 0);
1551 WRITE_ONCE(pt->resume_allowed, 0);
1552 return;
1553 }
1554
1555 pt_config_buffer(buf);
1556 pt_config_start(event);
1557 }
1558 }
1559
intel_pt_handle_vmx(int on)1560 void intel_pt_handle_vmx(int on)
1561 {
1562 struct pt *pt = this_cpu_ptr(&pt_ctx);
1563 struct perf_event *event;
1564 unsigned long flags;
1565
1566 /* PT plays nice with VMX, do nothing */
1567 if (pt_pmu.vmx)
1568 return;
1569
1570 /*
1571 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1572 * sure to not try to set it while VMX is on. Disable
1573 * interrupts to avoid racing with pmu callbacks;
1574 * concurrent PMI should be handled fine.
1575 */
1576 local_irq_save(flags);
1577 WRITE_ONCE(pt->vmx_on, on);
1578
1579 /*
1580 * If an AUX transaction is in progress, it will contain
1581 * gap(s), so flag it PARTIAL to inform the user.
1582 */
1583 event = pt->handle.event;
1584 if (event)
1585 perf_aux_output_flag(&pt->handle,
1586 PERF_AUX_FLAG_PARTIAL);
1587
1588 /* Turn PTs back on */
1589 if (!on && event)
1590 wrmsrq(MSR_IA32_RTIT_CTL, event->hw.aux_config);
1591
1592 local_irq_restore(flags);
1593 }
1594 EXPORT_SYMBOL_FOR_KVM(intel_pt_handle_vmx);
1595
1596 /*
1597 * PMU callbacks
1598 */
1599
pt_event_start(struct perf_event * event,int mode)1600 static void pt_event_start(struct perf_event *event, int mode)
1601 {
1602 struct hw_perf_event *hwc = &event->hw;
1603 struct pt *pt = this_cpu_ptr(&pt_ctx);
1604 struct pt_buffer *buf;
1605
1606 if (mode & PERF_EF_RESUME) {
1607 if (READ_ONCE(pt->resume_allowed)) {
1608 u64 status;
1609
1610 /*
1611 * Only if the trace is not active and the error and
1612 * stopped bits are clear, is it safe to start, but a
1613 * PMI might have just cleared these, so resume_allowed
1614 * must be checked again also.
1615 */
1616 rdmsrq(MSR_IA32_RTIT_STATUS, status);
1617 if (!(status & (RTIT_STATUS_TRIGGEREN |
1618 RTIT_STATUS_ERROR |
1619 RTIT_STATUS_STOPPED)) &&
1620 READ_ONCE(pt->resume_allowed))
1621 pt_config_start(event);
1622 }
1623 return;
1624 }
1625
1626 buf = perf_aux_output_begin(&pt->handle, event);
1627 if (!buf)
1628 goto fail_stop;
1629
1630 pt_buffer_reset_offsets(buf, pt->handle.head);
1631 if (!buf->snapshot) {
1632 if (pt_buffer_reset_markers(buf, &pt->handle))
1633 goto fail_end_stop;
1634 }
1635
1636 hwc->state = 0;
1637
1638 pt_config_buffer(buf);
1639 pt_config(event);
1640
1641 return;
1642
1643 fail_end_stop:
1644 perf_aux_output_end(&pt->handle, 0);
1645 fail_stop:
1646 hwc->state = PERF_HES_STOPPED;
1647 }
1648
pt_event_stop(struct perf_event * event,int mode)1649 static void pt_event_stop(struct perf_event *event, int mode)
1650 {
1651 struct pt *pt = this_cpu_ptr(&pt_ctx);
1652
1653 if (mode & PERF_EF_PAUSE) {
1654 if (READ_ONCE(pt->pause_allowed))
1655 pt_config_stop(event);
1656 return;
1657 }
1658
1659 /*
1660 * Protect against the PMI racing with disabling wrmsr,
1661 * see comment in intel_pt_interrupt().
1662 */
1663 WRITE_ONCE(pt->handle_nmi, 0);
1664 barrier();
1665
1666 /*
1667 * Prevent a resume from attempting to restart tracing, or a pause
1668 * during a subsequent start. Do this after clearing handle_nmi so that
1669 * pt_event_snapshot_aux() will not re-allow them.
1670 */
1671 WRITE_ONCE(pt->pause_allowed, 0);
1672 WRITE_ONCE(pt->resume_allowed, 0);
1673 barrier();
1674
1675 pt_config_stop(event);
1676
1677 if (event->hw.state == PERF_HES_STOPPED)
1678 return;
1679
1680 event->hw.state = PERF_HES_STOPPED;
1681
1682 if (mode & PERF_EF_UPDATE) {
1683 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1684
1685 if (!buf)
1686 return;
1687
1688 if (WARN_ON_ONCE(pt->handle.event != event))
1689 return;
1690
1691 pt_read_offset(buf);
1692
1693 pt_handle_status(pt);
1694
1695 pt_update_head(pt);
1696
1697 if (buf->snapshot)
1698 pt->handle.head =
1699 local_xchg(&buf->data_size,
1700 buf->nr_pages << PAGE_SHIFT);
1701 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1702 }
1703 }
1704
pt_event_snapshot_aux(struct perf_event * event,struct perf_output_handle * handle,unsigned long size)1705 static long pt_event_snapshot_aux(struct perf_event *event,
1706 struct perf_output_handle *handle,
1707 unsigned long size)
1708 {
1709 struct pt *pt = this_cpu_ptr(&pt_ctx);
1710 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1711 unsigned long from = 0, to;
1712 long ret;
1713
1714 if (WARN_ON_ONCE(!buf))
1715 return 0;
1716
1717 /*
1718 * Sampling is only allowed on snapshot events;
1719 * see pt_buffer_setup_aux().
1720 */
1721 if (WARN_ON_ONCE(!buf->snapshot))
1722 return 0;
1723
1724 /* Prevent pause/resume from attempting to start/stop tracing */
1725 WRITE_ONCE(pt->pause_allowed, 0);
1726 WRITE_ONCE(pt->resume_allowed, 0);
1727 barrier();
1728 /*
1729 * There is no PT interrupt in this mode, so stop the trace and it will
1730 * remain stopped while the buffer is copied.
1731 */
1732 pt_config_stop(event);
1733 pt_read_offset(buf);
1734 pt_update_head(pt);
1735
1736 to = local_read(&buf->data_size);
1737 if (to < size)
1738 from = buf->nr_pages << PAGE_SHIFT;
1739 from += to - size;
1740
1741 ret = perf_output_copy_aux(&pt->handle, handle, from, to);
1742
1743 /*
1744 * Here, handle_nmi tells us if the tracing was on.
1745 * If the tracing was on, restart it.
1746 */
1747 if (READ_ONCE(pt->handle_nmi)) {
1748 WRITE_ONCE(pt->resume_allowed, 1);
1749 barrier();
1750 pt_config_start(event);
1751 barrier();
1752 WRITE_ONCE(pt->pause_allowed, 1);
1753 }
1754
1755 return ret;
1756 }
1757
pt_event_del(struct perf_event * event,int mode)1758 static void pt_event_del(struct perf_event *event, int mode)
1759 {
1760 pt_event_stop(event, PERF_EF_UPDATE);
1761 }
1762
pt_event_add(struct perf_event * event,int mode)1763 static int pt_event_add(struct perf_event *event, int mode)
1764 {
1765 struct pt *pt = this_cpu_ptr(&pt_ctx);
1766 struct hw_perf_event *hwc = &event->hw;
1767 int ret = -EBUSY;
1768
1769 if (pt->handle.event)
1770 goto fail;
1771
1772 if (mode & PERF_EF_START) {
1773 pt_event_start(event, 0);
1774 ret = -EINVAL;
1775 if (hwc->state == PERF_HES_STOPPED)
1776 goto fail;
1777 } else {
1778 hwc->state = PERF_HES_STOPPED;
1779 }
1780
1781 ret = 0;
1782 fail:
1783
1784 return ret;
1785 }
1786
pt_event_read(struct perf_event * event)1787 static void pt_event_read(struct perf_event *event)
1788 {
1789 }
1790
pt_event_destroy(struct perf_event * event)1791 static void pt_event_destroy(struct perf_event *event)
1792 {
1793 pt_addr_filters_fini(event);
1794 x86_del_exclusive(x86_lbr_exclusive_pt);
1795 }
1796
pt_event_init(struct perf_event * event)1797 static int pt_event_init(struct perf_event *event)
1798 {
1799 if (event->attr.type != pt_pmu.pmu.type)
1800 return -ENOENT;
1801
1802 if (!pt_event_valid(event))
1803 return -EINVAL;
1804
1805 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1806 return -EBUSY;
1807
1808 if (pt_addr_filters_init(event)) {
1809 x86_del_exclusive(x86_lbr_exclusive_pt);
1810 return -ENOMEM;
1811 }
1812
1813 event->destroy = pt_event_destroy;
1814
1815 return 0;
1816 }
1817
cpu_emergency_stop_pt(void)1818 void cpu_emergency_stop_pt(void)
1819 {
1820 struct pt *pt = this_cpu_ptr(&pt_ctx);
1821
1822 if (pt->handle.event)
1823 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1824 }
1825
is_intel_pt_event(struct perf_event * event)1826 int is_intel_pt_event(struct perf_event *event)
1827 {
1828 return event->pmu == &pt_pmu.pmu;
1829 }
1830
pt_init(void)1831 static __init int pt_init(void)
1832 {
1833 int ret, cpu, prior_warn = 0;
1834
1835 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1836
1837 if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1838 return -ENODEV;
1839
1840 cpus_read_lock();
1841 for_each_online_cpu(cpu) {
1842 u64 ctl;
1843
1844 ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1845 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1846 prior_warn++;
1847 }
1848 cpus_read_unlock();
1849
1850 if (prior_warn) {
1851 x86_add_exclusive(x86_lbr_exclusive_pt);
1852 pr_warn("PT is enabled at boot time, doing nothing\n");
1853
1854 return -EBUSY;
1855 }
1856
1857 ret = pt_pmu_hw_init();
1858 if (ret)
1859 return ret;
1860
1861 if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
1862 pr_warn("ToPA output is not supported on this CPU\n");
1863 return -ENODEV;
1864 }
1865
1866 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1867 pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
1868 else
1869 pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_PREFER_LARGE;
1870
1871 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE |
1872 PERF_PMU_CAP_ITRACE |
1873 PERF_PMU_CAP_AUX_PAUSE;
1874 pt_pmu.pmu.attr_groups = pt_attr_groups;
1875 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1876 pt_pmu.pmu.event_init = pt_event_init;
1877 pt_pmu.pmu.add = pt_event_add;
1878 pt_pmu.pmu.del = pt_event_del;
1879 pt_pmu.pmu.start = pt_event_start;
1880 pt_pmu.pmu.stop = pt_event_stop;
1881 pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux;
1882 pt_pmu.pmu.read = pt_event_read;
1883 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1884 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1885 pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
1886 pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1887 pt_pmu.pmu.nr_addr_filters =
1888 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
1889
1890 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1891
1892 return ret;
1893 }
1894 arch_initcall(pt_init);
1895