1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <api/io_dir.h>
7 #include <subcmd/pager.h>
8 #include <sys/types.h>
9 #include <ctype.h>
10 #include <pthread.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include "cpumap.h"
14 #include "debug.h"
15 #include "evsel.h"
16 #include "pmus.h"
17 #include "pmu.h"
18 #include "hwmon_pmu.h"
19 #include "tool_pmu.h"
20 #include "print-events.h"
21 #include "strbuf.h"
22
23 /*
24 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
25 * directory contains "cpus" file. All PMUs belonging to core_pmus
26 * must have pmu->is_core=1. If there are more than one PMU in
27 * this list, perf interprets it as a heterogeneous platform.
28 * (FWIW, certain ARM platforms having heterogeneous cores uses
29 * homogeneous PMU, and thus they are treated as homogeneous
30 * platform by perf because core_pmus will have only one entry)
31 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
32 * matter whether PMU is present per SMT-thread or outside of the
33 * core in the hw. For e.g., an instance of AMD ibs_fetch// and
34 * ibs_op// PMUs is present in each hw SMT thread, however they
35 * are captured under other_pmus. PMUs belonging to other_pmus
36 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
37 */
38 static LIST_HEAD(core_pmus);
39 static LIST_HEAD(other_pmus);
40 enum perf_tool_pmu_type {
41 PERF_TOOL_PMU_TYPE_PE_CORE,
42 PERF_TOOL_PMU_TYPE_PE_OTHER,
43 PERF_TOOL_PMU_TYPE_TOOL,
44 PERF_TOOL_PMU_TYPE_HWMON,
45
46 #define PERF_TOOL_PMU_TYPE_PE_CORE_MASK (1 << PERF_TOOL_PMU_TYPE_PE_CORE)
47 #define PERF_TOOL_PMU_TYPE_PE_OTHER_MASK (1 << PERF_TOOL_PMU_TYPE_PE_OTHER)
48 #define PERF_TOOL_PMU_TYPE_TOOL_MASK (1 << PERF_TOOL_PMU_TYPE_TOOL)
49 #define PERF_TOOL_PMU_TYPE_HWMON_MASK (1 << PERF_TOOL_PMU_TYPE_HWMON)
50
51 #define PERF_TOOL_PMU_TYPE_ALL_MASK (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | \
52 PERF_TOOL_PMU_TYPE_PE_OTHER_MASK | \
53 PERF_TOOL_PMU_TYPE_TOOL_MASK | \
54 PERF_TOOL_PMU_TYPE_HWMON_MASK)
55 };
56 static unsigned int read_pmu_types;
57
58 static void pmu_read_sysfs(unsigned int to_read_pmus);
59
pmu_name_len_no_suffix(const char * str)60 size_t pmu_name_len_no_suffix(const char *str)
61 {
62 int orig_len, len;
63 bool has_hex_digits = false;
64
65 orig_len = len = strlen(str);
66
67 /* Count trailing digits. */
68 while (len > 0 && isxdigit(str[len - 1])) {
69 if (!isdigit(str[len - 1]))
70 has_hex_digits = true;
71 len--;
72 }
73
74 if (len > 0 && len != orig_len && str[len - 1] == '_') {
75 /*
76 * There is a '_{num}' suffix. For decimal suffixes any length
77 * will do, for hexadecimal ensure more than 2 hex digits so
78 * that S390's cpum_cf PMU doesn't match.
79 */
80 if (!has_hex_digits || (orig_len - len) > 2)
81 return len - 1;
82 }
83 /* Use the full length. */
84 return orig_len;
85 }
86
pmu_name_cmp(const char * lhs_pmu_name,const char * rhs_pmu_name)87 int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
88 {
89 unsigned long long lhs_num = 0, rhs_num = 0;
90 size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
91 size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
92 int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
93 lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
94
95 if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
96 return ret;
97
98 if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
99 lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
100 if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
101 rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
102
103 return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
104 }
105
perf_pmus__destroy(void)106 void perf_pmus__destroy(void)
107 {
108 struct perf_pmu *pmu, *tmp;
109
110 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
111 list_del(&pmu->list);
112
113 perf_pmu__delete(pmu);
114 }
115 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
116 list_del(&pmu->list);
117
118 perf_pmu__delete(pmu);
119 }
120 read_pmu_types = 0;
121 }
122
pmu_find(const char * name)123 static struct perf_pmu *pmu_find(const char *name)
124 {
125 struct perf_pmu *pmu;
126
127 list_for_each_entry(pmu, &core_pmus, list) {
128 if (!strcmp(pmu->name, name) ||
129 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
130 return pmu;
131 }
132 list_for_each_entry(pmu, &other_pmus, list) {
133 if (!strcmp(pmu->name, name) ||
134 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
135 return pmu;
136 }
137
138 return NULL;
139 }
140
perf_pmus__find(const char * name)141 struct perf_pmu *perf_pmus__find(const char *name)
142 {
143 struct perf_pmu *pmu;
144 int dirfd;
145 bool core_pmu;
146 unsigned int to_read_pmus = 0;
147
148 /*
149 * Once PMU is loaded it stays in the list,
150 * so we keep us from multiple reading/parsing
151 * the pmu format definitions.
152 */
153 pmu = pmu_find(name);
154 if (pmu)
155 return pmu;
156
157 if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK)
158 return NULL;
159
160 core_pmu = is_pmu_core(name);
161 if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK))
162 return NULL;
163
164 dirfd = perf_pmu__event_source_devices_fd();
165 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
166 /*eager_load=*/false);
167 close(dirfd);
168
169 if (pmu)
170 return pmu;
171
172 /* Looking up an individual perf event PMU failed, check if a tool PMU should be read. */
173 if (!strncmp(name, "hwmon_", 6))
174 to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
175 else if (!strcmp(name, "tool"))
176 to_read_pmus |= PERF_TOOL_PMU_TYPE_TOOL_MASK;
177
178 if (to_read_pmus) {
179 pmu_read_sysfs(to_read_pmus);
180 pmu = pmu_find(name);
181 if (pmu)
182 return pmu;
183 }
184 /* Read all necessary PMUs from sysfs and see if the PMU is found. */
185 to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK;
186 if (!core_pmu)
187 to_read_pmus |= PERF_TOOL_PMU_TYPE_PE_OTHER_MASK;
188 pmu_read_sysfs(to_read_pmus);
189 return pmu_find(name);
190 }
191
perf_pmu__find2(int dirfd,const char * name)192 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
193 {
194 struct perf_pmu *pmu;
195 bool core_pmu;
196
197 /*
198 * Once PMU is loaded it stays in the list,
199 * so we keep us from multiple reading/parsing
200 * the pmu format definitions.
201 */
202 pmu = pmu_find(name);
203 if (pmu)
204 return pmu;
205
206 if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK)
207 return NULL;
208
209 core_pmu = is_pmu_core(name);
210 if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK))
211 return NULL;
212
213 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
214 /*eager_load=*/false);
215 }
216
pmus_cmp(void * priv __maybe_unused,const struct list_head * lhs,const struct list_head * rhs)217 static int pmus_cmp(void *priv __maybe_unused,
218 const struct list_head *lhs, const struct list_head *rhs)
219 {
220 struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
221 struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
222
223 return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
224 }
225
226 /* Add all pmus in sysfs to pmu list: */
pmu_read_sysfs(unsigned int to_read_types)227 static void pmu_read_sysfs(unsigned int to_read_types)
228 {
229 struct perf_pmu *tool_pmu;
230
231 if ((read_pmu_types & to_read_types) == to_read_types) {
232 /* All requested PMU types have been read. */
233 return;
234 }
235
236 if (to_read_types & (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | PERF_TOOL_PMU_TYPE_PE_OTHER_MASK)) {
237 int fd = perf_pmu__event_source_devices_fd();
238 struct io_dir dir;
239 struct io_dirent64 *dent;
240 bool core_only = (to_read_types & PERF_TOOL_PMU_TYPE_PE_OTHER_MASK) == 0;
241
242 if (fd < 0)
243 goto skip_pe_pmus;
244
245 io_dir__init(&dir, fd);
246
247 while ((dent = io_dir__readdir(&dir)) != NULL) {
248 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
249 continue;
250 if (core_only && !is_pmu_core(dent->d_name))
251 continue;
252 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
253 perf_pmu__find2(fd, dent->d_name);
254 }
255
256 close(fd);
257 }
258 skip_pe_pmus:
259 if ((to_read_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK) && list_empty(&core_pmus)) {
260 if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
261 pr_err("Failure to set up any core PMUs\n");
262 }
263 list_sort(NULL, &core_pmus, pmus_cmp);
264
265 if ((to_read_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) != 0 &&
266 (read_pmu_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) == 0) {
267 tool_pmu = tool_pmu__new();
268 if (tool_pmu)
269 list_add_tail(&tool_pmu->list, &other_pmus);
270 }
271 if ((to_read_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) != 0 &&
272 (read_pmu_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) == 0)
273 perf_pmus__read_hwmon_pmus(&other_pmus);
274
275 list_sort(NULL, &other_pmus, pmus_cmp);
276
277 read_pmu_types |= to_read_types;
278 }
279
__perf_pmus__find_by_type(unsigned int type)280 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
281 {
282 struct perf_pmu *pmu;
283
284 list_for_each_entry(pmu, &core_pmus, list) {
285 if (pmu->type == type)
286 return pmu;
287 }
288
289 list_for_each_entry(pmu, &other_pmus, list) {
290 if (pmu->type == type)
291 return pmu;
292 }
293 return NULL;
294 }
295
perf_pmus__find_by_type(unsigned int type)296 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
297 {
298 unsigned int to_read_pmus;
299 struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
300
301 if (pmu || (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK))
302 return pmu;
303
304 if (type >= PERF_PMU_TYPE_PE_START && type <= PERF_PMU_TYPE_PE_END) {
305 to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
306 PERF_TOOL_PMU_TYPE_PE_OTHER_MASK;
307 } else if (type >= PERF_PMU_TYPE_HWMON_START && type <= PERF_PMU_TYPE_HWMON_END) {
308 to_read_pmus = PERF_TOOL_PMU_TYPE_HWMON_MASK;
309 } else {
310 to_read_pmus = PERF_TOOL_PMU_TYPE_TOOL_MASK;
311 }
312 pmu_read_sysfs(to_read_pmus);
313 pmu = __perf_pmus__find_by_type(type);
314 return pmu;
315 }
316
317 /*
318 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
319 * next pmu. Returns NULL on end.
320 */
perf_pmus__scan(struct perf_pmu * pmu)321 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
322 {
323 bool use_core_pmus = !pmu || pmu->is_core;
324
325 if (!pmu) {
326 pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK);
327 pmu = list_prepare_entry(pmu, &core_pmus, list);
328 }
329 if (use_core_pmus) {
330 list_for_each_entry_continue(pmu, &core_pmus, list)
331 return pmu;
332
333 pmu = NULL;
334 pmu = list_prepare_entry(pmu, &other_pmus, list);
335 }
336 list_for_each_entry_continue(pmu, &other_pmus, list)
337 return pmu;
338 return NULL;
339 }
340
perf_pmus__scan_core(struct perf_pmu * pmu)341 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
342 {
343 if (!pmu) {
344 pmu_read_sysfs(PERF_TOOL_PMU_TYPE_PE_CORE_MASK);
345 return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
346 }
347 list_for_each_entry_continue(pmu, &core_pmus, list)
348 return pmu;
349
350 return NULL;
351 }
352
perf_pmus__scan_skip_duplicates(struct perf_pmu * pmu)353 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
354 {
355 bool use_core_pmus = !pmu || pmu->is_core;
356 int last_pmu_name_len = 0;
357 const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
358
359 if (!pmu) {
360 pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK);
361 pmu = list_prepare_entry(pmu, &core_pmus, list);
362 } else
363 last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
364
365 if (use_core_pmus) {
366 list_for_each_entry_continue(pmu, &core_pmus, list) {
367 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
368
369 if (last_pmu_name_len == pmu_name_len &&
370 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
371 continue;
372
373 return pmu;
374 }
375 pmu = NULL;
376 pmu = list_prepare_entry(pmu, &other_pmus, list);
377 }
378 list_for_each_entry_continue(pmu, &other_pmus, list) {
379 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
380
381 if (last_pmu_name_len == pmu_name_len &&
382 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
383 continue;
384
385 return pmu;
386 }
387 return NULL;
388 }
389
perf_pmus__pmu_for_pmu_filter(const char * str)390 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
391 {
392 struct perf_pmu *pmu = NULL;
393
394 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
395 if (!strcmp(pmu->name, str))
396 return pmu;
397 /* Ignore "uncore_" prefix. */
398 if (!strncmp(pmu->name, "uncore_", 7)) {
399 if (!strcmp(pmu->name + 7, str))
400 return pmu;
401 }
402 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */
403 if (!strncmp(pmu->name, "cpu_", 4)) {
404 if (!strcmp(pmu->name + 4, str))
405 return pmu;
406 }
407 }
408 return NULL;
409 }
410
411 /** Struct for ordering events as output in perf list. */
412 struct sevent {
413 /** PMU for event. */
414 const struct perf_pmu *pmu;
415 const char *name;
416 const char* alias;
417 const char *scale_unit;
418 const char *desc;
419 const char *long_desc;
420 const char *encoding_desc;
421 const char *topic;
422 const char *pmu_name;
423 const char *event_type_desc;
424 bool deprecated;
425 };
426
cmp_sevent(const void * a,const void * b)427 static int cmp_sevent(const void *a, const void *b)
428 {
429 const struct sevent *as = a;
430 const struct sevent *bs = b;
431 bool a_iscpu, b_iscpu;
432 int ret;
433
434 /* Put extra events last. */
435 if (!!as->desc != !!bs->desc)
436 return !!as->desc - !!bs->desc;
437
438 /* Order by topics. */
439 ret = strcmp(as->topic ?: "", bs->topic ?: "");
440 if (ret)
441 return ret;
442
443 /* Order CPU core events to be first */
444 a_iscpu = as->pmu ? as->pmu->is_core : true;
445 b_iscpu = bs->pmu ? bs->pmu->is_core : true;
446 if (a_iscpu != b_iscpu)
447 return a_iscpu ? -1 : 1;
448
449 /* Order by PMU name. */
450 if (as->pmu != bs->pmu) {
451 ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
452 if (ret)
453 return ret;
454 }
455
456 /* Order by event name. */
457 return strcmp(as->name, bs->name);
458 }
459
pmu_alias_is_duplicate(struct sevent * a,struct sevent * b)460 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
461 {
462 /* Different names -> never duplicates */
463 if (strcmp(a->name ?: "//", b->name ?: "//"))
464 return false;
465
466 /* Don't remove duplicates for different PMUs */
467 return strcmp(a->pmu_name, b->pmu_name) == 0;
468 }
469
470 struct events_callback_state {
471 struct sevent *aliases;
472 size_t aliases_len;
473 size_t index;
474 };
475
perf_pmus__print_pmu_events__callback(void * vstate,struct pmu_event_info * info)476 static int perf_pmus__print_pmu_events__callback(void *vstate,
477 struct pmu_event_info *info)
478 {
479 struct events_callback_state *state = vstate;
480 struct sevent *s;
481
482 if (state->index >= state->aliases_len) {
483 pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
484 return 1;
485 }
486 assert(info->pmu != NULL || info->name != NULL);
487 s = &state->aliases[state->index];
488 s->pmu = info->pmu;
489 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
490 COPY_STR(name);
491 COPY_STR(alias);
492 COPY_STR(scale_unit);
493 COPY_STR(desc);
494 COPY_STR(long_desc);
495 COPY_STR(encoding_desc);
496 COPY_STR(topic);
497 COPY_STR(pmu_name);
498 COPY_STR(event_type_desc);
499 #undef COPY_STR
500 s->deprecated = info->deprecated;
501 state->index++;
502 return 0;
503 }
504
perf_pmus__print_pmu_events(const struct print_callbacks * print_cb,void * print_state)505 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
506 {
507 struct perf_pmu *pmu;
508 int printed = 0;
509 int len;
510 struct sevent *aliases;
511 struct events_callback_state state;
512 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
513 struct perf_pmu *(*scan_fn)(struct perf_pmu *);
514
515 if (skip_duplicate_pmus)
516 scan_fn = perf_pmus__scan_skip_duplicates;
517 else
518 scan_fn = perf_pmus__scan;
519
520 pmu = NULL;
521 len = 0;
522 while ((pmu = scan_fn(pmu)) != NULL)
523 len += perf_pmu__num_events(pmu);
524
525 aliases = zalloc(sizeof(struct sevent) * len);
526 if (!aliases) {
527 pr_err("FATAL: not enough memory to print PMU events\n");
528 return;
529 }
530 pmu = NULL;
531 state = (struct events_callback_state) {
532 .aliases = aliases,
533 .aliases_len = len,
534 .index = 0,
535 };
536 while ((pmu = scan_fn(pmu)) != NULL) {
537 perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
538 perf_pmus__print_pmu_events__callback);
539 }
540 qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
541 for (int j = 0; j < len; j++) {
542 /* Skip duplicates */
543 if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
544 goto free;
545
546 print_cb->print_event(print_state,
547 aliases[j].topic,
548 aliases[j].pmu_name,
549 aliases[j].name,
550 aliases[j].alias,
551 aliases[j].scale_unit,
552 aliases[j].deprecated,
553 aliases[j].event_type_desc,
554 aliases[j].desc,
555 aliases[j].long_desc,
556 aliases[j].encoding_desc);
557 free:
558 zfree(&aliases[j].name);
559 zfree(&aliases[j].alias);
560 zfree(&aliases[j].scale_unit);
561 zfree(&aliases[j].desc);
562 zfree(&aliases[j].long_desc);
563 zfree(&aliases[j].encoding_desc);
564 zfree(&aliases[j].topic);
565 zfree(&aliases[j].pmu_name);
566 zfree(&aliases[j].event_type_desc);
567 }
568 if (printed && pager_in_use())
569 printf("\n");
570
571 zfree(&aliases);
572 }
573
574 struct build_format_string_args {
575 struct strbuf short_string;
576 struct strbuf long_string;
577 int num_formats;
578 };
579
build_format_string(void * state,const char * name,int config,const unsigned long * bits)580 static int build_format_string(void *state, const char *name, int config,
581 const unsigned long *bits)
582 {
583 struct build_format_string_args *args = state;
584 unsigned int num_bits;
585 int ret1, ret2 = 0;
586
587 (void)config;
588 args->num_formats++;
589 if (args->num_formats > 1) {
590 strbuf_addch(&args->long_string, ',');
591 if (args->num_formats < 4)
592 strbuf_addch(&args->short_string, ',');
593 }
594 num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
595 if (num_bits <= 1) {
596 ret1 = strbuf_addf(&args->long_string, "%s", name);
597 if (args->num_formats < 4)
598 ret2 = strbuf_addf(&args->short_string, "%s", name);
599 } else if (num_bits > 8) {
600 ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
601 ULLONG_MAX >> (64 - num_bits));
602 if (args->num_formats < 4) {
603 ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
604 ULLONG_MAX >> (64 - num_bits));
605 }
606 } else {
607 ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
608 ULLONG_MAX >> (64 - num_bits));
609 if (args->num_formats < 4) {
610 ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
611 ULLONG_MAX >> (64 - num_bits));
612 }
613 }
614 return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
615 }
616
perf_pmus__print_raw_pmu_events(const struct print_callbacks * print_cb,void * print_state)617 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
618 {
619 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
620 struct perf_pmu *(*scan_fn)(struct perf_pmu *);
621 struct perf_pmu *pmu = NULL;
622
623 if (skip_duplicate_pmus)
624 scan_fn = perf_pmus__scan_skip_duplicates;
625 else
626 scan_fn = perf_pmus__scan;
627
628 while ((pmu = scan_fn(pmu)) != NULL) {
629 struct build_format_string_args format_args = {
630 .short_string = STRBUF_INIT,
631 .long_string = STRBUF_INIT,
632 .num_formats = 0,
633 };
634 int len = pmu_name_len_no_suffix(pmu->name);
635 const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
636
637 if (!pmu->is_core)
638 desc = NULL;
639
640 strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
641 strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
642 perf_pmu__for_each_format(pmu, &format_args, build_format_string);
643
644 if (format_args.num_formats > 3)
645 strbuf_addf(&format_args.short_string, ",.../modifier");
646 else
647 strbuf_addf(&format_args.short_string, "/modifier");
648
649 strbuf_addf(&format_args.long_string, "/modifier");
650 print_cb->print_event(print_state,
651 /*topic=*/NULL,
652 /*pmu_name=*/NULL,
653 format_args.short_string.buf,
654 /*event_alias=*/NULL,
655 /*scale_unit=*/NULL,
656 /*deprecated=*/false,
657 "Raw event descriptor",
658 desc,
659 /*long_desc=*/NULL,
660 format_args.long_string.buf);
661
662 strbuf_release(&format_args.short_string);
663 strbuf_release(&format_args.long_string);
664 }
665 }
666
perf_pmus__have_event(const char * pname,const char * name)667 bool perf_pmus__have_event(const char *pname, const char *name)
668 {
669 struct perf_pmu *pmu = perf_pmus__find(pname);
670
671 return pmu && perf_pmu__have_event(pmu, name);
672 }
673
perf_pmus__num_core_pmus(void)674 int perf_pmus__num_core_pmus(void)
675 {
676 static int count;
677
678 if (!count) {
679 struct perf_pmu *pmu = NULL;
680
681 while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
682 count++;
683 }
684 return count;
685 }
686
__perf_pmus__supports_extended_type(void)687 static bool __perf_pmus__supports_extended_type(void)
688 {
689 struct perf_pmu *pmu = NULL;
690
691 if (perf_pmus__num_core_pmus() <= 1)
692 return false;
693
694 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
695 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
696 return false;
697 }
698
699 return true;
700 }
701
702 static bool perf_pmus__do_support_extended_type;
703
perf_pmus__init_supports_extended_type(void)704 static void perf_pmus__init_supports_extended_type(void)
705 {
706 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
707 }
708
perf_pmus__supports_extended_type(void)709 bool perf_pmus__supports_extended_type(void)
710 {
711 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
712
713 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
714
715 return perf_pmus__do_support_extended_type;
716 }
717
evsel__find_pmu(const struct evsel * evsel)718 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
719 {
720 struct perf_pmu *pmu = evsel->pmu;
721 bool legacy_core_type;
722
723 if (pmu)
724 return pmu;
725
726 pmu = perf_pmus__find_by_type(evsel->core.attr.type);
727 legacy_core_type =
728 evsel->core.attr.type == PERF_TYPE_HARDWARE ||
729 evsel->core.attr.type == PERF_TYPE_HW_CACHE;
730 if (!pmu && legacy_core_type) {
731 if (perf_pmus__supports_extended_type()) {
732 u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
733
734 pmu = perf_pmus__find_by_type(type);
735 } else {
736 pmu = perf_pmus__find_core_pmu();
737 }
738 }
739 ((struct evsel *)evsel)->pmu = pmu;
740 return pmu;
741 }
742
perf_pmus__find_core_pmu(void)743 struct perf_pmu *perf_pmus__find_core_pmu(void)
744 {
745 return perf_pmus__scan_core(NULL);
746 }
747
perf_pmus__add_test_pmu(int test_sysfs_dirfd,const char * name)748 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
749 {
750 /*
751 * Some PMU functions read from the sysfs mount point, so care is
752 * needed, hence passing the eager_load flag to load things like the
753 * format files.
754 */
755 return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
756 }
757
perf_pmus__add_test_hwmon_pmu(int hwmon_dir,const char * sysfs_name,const char * name)758 struct perf_pmu *perf_pmus__add_test_hwmon_pmu(int hwmon_dir,
759 const char *sysfs_name,
760 const char *name)
761 {
762 return hwmon_pmu__new(&other_pmus, hwmon_dir, sysfs_name, name);
763 }
764
perf_pmus__fake_pmu(void)765 struct perf_pmu *perf_pmus__fake_pmu(void)
766 {
767 static struct perf_pmu fake = {
768 .name = "fake",
769 .type = PERF_PMU_TYPE_FAKE,
770 .format = LIST_HEAD_INIT(fake.format),
771 };
772
773 return &fake;
774 }
775