1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2017, Intel Corporation. 4 */ 5 6 /* Manage metrics and groups of metrics from JSON files */ 7 8 #include "metricgroup.h" 9 #include "debug.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "strbuf.h" 13 #include "pmu.h" 14 #include "pmus.h" 15 #include "print-events.h" 16 #include "smt.h" 17 #include "tool_pmu.h" 18 #include "expr.h" 19 #include "rblist.h" 20 #include <string.h> 21 #include <errno.h> 22 #include "strlist.h" 23 #include <assert.h> 24 #include <linux/ctype.h> 25 #include <linux/list_sort.h> 26 #include <linux/string.h> 27 #include <linux/zalloc.h> 28 #include <perf/cpumap.h> 29 #include <subcmd/parse-options.h> 30 #include <api/fs/fs.h> 31 #include "util.h" 32 #include <asm/bug.h> 33 #include "cgroup.h" 34 #include "util/hashmap.h" 35 36 struct metric_event *metricgroup__lookup(struct rblist *metric_events, 37 struct evsel *evsel, 38 bool create) 39 { 40 struct rb_node *nd; 41 struct metric_event me = { 42 .evsel = evsel 43 }; 44 45 if (!metric_events) 46 return NULL; 47 48 if (evsel && evsel->metric_leader) 49 me.evsel = evsel->metric_leader; 50 nd = rblist__find(metric_events, &me); 51 if (nd) 52 return container_of(nd, struct metric_event, nd); 53 if (create) { 54 rblist__add_node(metric_events, &me); 55 nd = rblist__find(metric_events, &me); 56 if (nd) 57 return container_of(nd, struct metric_event, nd); 58 } 59 return NULL; 60 } 61 62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry) 63 { 64 struct metric_event *a = container_of(rb_node, 65 struct metric_event, 66 nd); 67 const struct metric_event *b = entry; 68 69 if (a->evsel == b->evsel) 70 return 0; 71 if ((char *)a->evsel < (char *)b->evsel) 72 return -1; 73 return +1; 74 } 75 76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused, 77 const void *entry) 78 { 79 struct metric_event *me = malloc(sizeof(struct metric_event)); 80 81 if (!me) 82 return NULL; 83 memcpy(me, entry, sizeof(struct metric_event)); 84 me->evsel = ((struct metric_event *)entry)->evsel; 85 me->is_default = false; 86 INIT_LIST_HEAD(&me->head); 87 return &me->nd; 88 } 89 90 static void metric_event_delete(struct rblist *rblist __maybe_unused, 91 struct rb_node *rb_node) 92 { 93 struct metric_event *me = container_of(rb_node, struct metric_event, nd); 94 struct metric_expr *expr, *tmp; 95 96 list_for_each_entry_safe(expr, tmp, &me->head, nd) { 97 zfree(&expr->metric_name); 98 zfree(&expr->metric_refs); 99 zfree(&expr->metric_events); 100 free(expr); 101 } 102 103 free(me); 104 } 105 106 static void metricgroup__rblist_init(struct rblist *metric_events) 107 { 108 rblist__init(metric_events); 109 metric_events->node_cmp = metric_event_cmp; 110 metric_events->node_new = metric_event_new; 111 metric_events->node_delete = metric_event_delete; 112 } 113 114 void metricgroup__rblist_exit(struct rblist *metric_events) 115 { 116 rblist__exit(metric_events); 117 } 118 119 /** 120 * The metric under construction. The data held here will be placed in a 121 * metric_expr. 122 */ 123 struct metric { 124 struct list_head nd; 125 /** 126 * The expression parse context importantly holding the IDs contained 127 * within the expression. 128 */ 129 struct expr_parse_ctx *pctx; 130 const char *pmu; 131 /** The name of the metric such as "IPC". */ 132 const char *metric_name; 133 /** Modifier on the metric such as "u" or NULL for none. */ 134 const char *modifier; 135 /** The expression to parse, for example, "instructions/cycles". */ 136 const char *metric_expr; 137 /** Optional threshold expression where zero value is green, otherwise red. */ 138 const char *metric_threshold; 139 /** 140 * The "ScaleUnit" that scales and adds a unit to the metric during 141 * output. 142 */ 143 const char *metric_unit; 144 /** 145 * Optional name of the metric group reported 146 * if the Default metric group is being processed. 147 */ 148 const char *default_metricgroup_name; 149 /** Optional null terminated array of referenced metrics. */ 150 struct metric_ref *metric_refs; 151 /** 152 * Should events of the metric be grouped? 153 */ 154 bool group_events; 155 /** 156 * Parsed events for the metric. Optional as events may be taken from a 157 * different metric whose group contains all the IDs necessary for this 158 * one. 159 */ 160 struct evlist *evlist; 161 }; 162 163 static void metric__watchdog_constraint_hint(const char *name, bool foot) 164 { 165 static bool violate_nmi_constraint; 166 167 if (!foot) { 168 pr_warning("Not grouping metric %s's events.\n", name); 169 violate_nmi_constraint = true; 170 return; 171 } 172 173 if (!violate_nmi_constraint) 174 return; 175 176 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n" 177 " echo 0 > /proc/sys/kernel/nmi_watchdog\n" 178 " perf stat ...\n" 179 " echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 180 } 181 182 static bool metric__group_events(const struct pmu_metric *pm) 183 { 184 switch (pm->event_grouping) { 185 case MetricNoGroupEvents: 186 return false; 187 case MetricNoGroupEventsNmi: 188 if (!sysctl__nmi_watchdog_enabled()) 189 return true; 190 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false); 191 return false; 192 case MetricNoGroupEventsSmt: 193 return !smt_on(); 194 case MetricGroupEvents: 195 default: 196 return true; 197 } 198 } 199 200 static void metric__free(struct metric *m) 201 { 202 if (!m) 203 return; 204 205 zfree(&m->metric_refs); 206 expr__ctx_free(m->pctx); 207 zfree(&m->modifier); 208 evlist__delete(m->evlist); 209 free(m); 210 } 211 212 static struct metric *metric__new(const struct pmu_metric *pm, 213 const char *modifier, 214 bool metric_no_group, 215 int runtime, 216 const char *user_requested_cpu_list, 217 bool system_wide) 218 { 219 struct metric *m; 220 221 m = zalloc(sizeof(*m)); 222 if (!m) 223 return NULL; 224 225 m->pctx = expr__ctx_new(); 226 if (!m->pctx) 227 goto out_err; 228 229 m->pmu = pm->pmu ?: "cpu"; 230 m->metric_name = pm->metric_name; 231 m->default_metricgroup_name = pm->default_metricgroup_name ?: ""; 232 m->modifier = NULL; 233 if (modifier) { 234 m->modifier = strdup(modifier); 235 if (!m->modifier) 236 goto out_err; 237 } 238 m->metric_expr = pm->metric_expr; 239 m->metric_threshold = pm->metric_threshold; 240 m->metric_unit = pm->unit; 241 m->pctx->sctx.user_requested_cpu_list = NULL; 242 if (user_requested_cpu_list) { 243 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list); 244 if (!m->pctx->sctx.user_requested_cpu_list) 245 goto out_err; 246 } 247 m->pctx->sctx.runtime = runtime; 248 m->pctx->sctx.system_wide = system_wide; 249 m->group_events = !metric_no_group && metric__group_events(pm); 250 m->metric_refs = NULL; 251 m->evlist = NULL; 252 253 return m; 254 out_err: 255 metric__free(m); 256 return NULL; 257 } 258 259 static bool contains_metric_id(struct evsel **metric_events, int num_events, 260 const char *metric_id) 261 { 262 int i; 263 264 for (i = 0; i < num_events; i++) { 265 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id)) 266 return true; 267 } 268 return false; 269 } 270 271 /** 272 * setup_metric_events - Find a group of events in metric_evlist that correspond 273 * to the IDs from a parsed metric expression. 274 * @pmu: The PMU for the IDs. 275 * @ids: the metric IDs to match. 276 * @metric_evlist: the list of perf events. 277 * @out_metric_events: holds the created metric events array. 278 */ 279 static int setup_metric_events(const char *pmu, struct hashmap *ids, 280 struct evlist *metric_evlist, 281 struct evsel ***out_metric_events) 282 { 283 struct evsel **metric_events; 284 const char *metric_id; 285 struct evsel *ev; 286 size_t ids_size, matched_events, i; 287 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu); 288 289 *out_metric_events = NULL; 290 ids_size = hashmap__size(ids); 291 292 metric_events = calloc(ids_size + 1, sizeof(void *)); 293 if (!metric_events) 294 return -ENOMEM; 295 296 matched_events = 0; 297 evlist__for_each_entry(metric_evlist, ev) { 298 struct expr_id_data *val_ptr; 299 300 /* Don't match events for the wrong hybrid PMU. */ 301 if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) && 302 strcmp(ev->pmu->name, pmu)) 303 continue; 304 /* 305 * Check for duplicate events with the same name. For 306 * example, uncore_imc/cas_count_read/ will turn into 6 307 * events per socket on skylakex. Only the first such 308 * event is placed in metric_events. 309 */ 310 metric_id = evsel__metric_id(ev); 311 if (contains_metric_id(metric_events, matched_events, metric_id)) 312 continue; 313 /* 314 * Does this event belong to the parse context? For 315 * combined or shared groups, this metric may not care 316 * about this event. 317 */ 318 if (hashmap__find(ids, metric_id, &val_ptr)) { 319 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev)); 320 metric_events[matched_events++] = ev; 321 322 if (matched_events >= ids_size) 323 break; 324 } 325 } 326 if (matched_events < ids_size) { 327 free(metric_events); 328 return -EINVAL; 329 } 330 for (i = 0; i < ids_size; i++) { 331 ev = metric_events[i]; 332 ev->collect_stat = true; 333 334 /* 335 * The metric leader points to the identically named 336 * event in metric_events. 337 */ 338 ev->metric_leader = ev; 339 /* 340 * Mark two events with identical names in the same 341 * group (or globally) as being in use as uncore events 342 * may be duplicated for each pmu. Set the metric leader 343 * of such events to be the event that appears in 344 * metric_events. 345 */ 346 metric_id = evsel__metric_id(ev); 347 evlist__for_each_entry_continue(metric_evlist, ev) { 348 if (!strcmp(evsel__metric_id(ev), metric_id)) 349 ev->metric_leader = metric_events[i]; 350 } 351 } 352 *out_metric_events = metric_events; 353 return 0; 354 } 355 356 static bool match_metric_or_groups(const char *metric_or_groups, const char *sought) 357 { 358 int len; 359 char *m; 360 361 if (!sought) 362 return false; 363 if (!strcmp(sought, "all")) 364 return true; 365 if (!metric_or_groups) 366 return !strcasecmp(sought, "No_group"); 367 len = strlen(sought); 368 if (!strncasecmp(metric_or_groups, sought, len) && 369 (metric_or_groups[len] == 0 || metric_or_groups[len] == ';')) 370 return true; 371 m = strchr(metric_or_groups, ';'); 372 return m && match_metric_or_groups(m + 1, sought); 373 } 374 375 static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *pmu, 376 const char *metric_or_groups) 377 { 378 const char *pm_pmu = pm->pmu ?: "cpu"; 379 380 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu)) 381 return false; 382 383 return match_metric_or_groups(pm->metric_group, metric_or_groups) || 384 match_metric_or_groups(pm->metric_name, metric_or_groups); 385 } 386 387 /** struct mep - RB-tree node for building printing information. */ 388 struct mep { 389 /** nd - RB-tree element. */ 390 struct rb_node nd; 391 /** @metric_group: Owned metric group name, separated others with ';'. */ 392 char *metric_group; 393 const char *metric_name; 394 const char *metric_desc; 395 const char *metric_long_desc; 396 const char *metric_expr; 397 const char *metric_threshold; 398 const char *metric_unit; 399 const char *pmu_name; 400 }; 401 402 static int mep_cmp(struct rb_node *rb_node, const void *entry) 403 { 404 struct mep *a = container_of(rb_node, struct mep, nd); 405 struct mep *b = (struct mep *)entry; 406 int ret; 407 408 ret = strcmp(a->metric_group, b->metric_group); 409 if (ret) 410 return ret; 411 412 return strcmp(a->metric_name, b->metric_name); 413 } 414 415 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry) 416 { 417 struct mep *me = malloc(sizeof(struct mep)); 418 419 if (!me) 420 return NULL; 421 422 memcpy(me, entry, sizeof(struct mep)); 423 return &me->nd; 424 } 425 426 static void mep_delete(struct rblist *rl __maybe_unused, 427 struct rb_node *nd) 428 { 429 struct mep *me = container_of(nd, struct mep, nd); 430 431 zfree(&me->metric_group); 432 free(me); 433 } 434 435 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group, 436 const char *metric_name) 437 { 438 struct rb_node *nd; 439 struct mep me = { 440 .metric_group = strdup(metric_group), 441 .metric_name = metric_name, 442 }; 443 nd = rblist__find(groups, &me); 444 if (nd) { 445 free(me.metric_group); 446 return container_of(nd, struct mep, nd); 447 } 448 rblist__add_node(groups, &me); 449 nd = rblist__find(groups, &me); 450 if (nd) 451 return container_of(nd, struct mep, nd); 452 return NULL; 453 } 454 455 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm, 456 struct rblist *groups) 457 { 458 const char *g; 459 char *omg, *mg; 460 461 mg = strdup(pm->metric_group ?: pm->metric_name); 462 if (!mg) 463 return -ENOMEM; 464 omg = mg; 465 while ((g = strsep(&mg, ";")) != NULL) { 466 struct mep *me; 467 468 g = skip_spaces(g); 469 if (strlen(g)) 470 me = mep_lookup(groups, g, pm->metric_name); 471 else 472 me = mep_lookup(groups, pm->metric_name, pm->metric_name); 473 474 if (me) { 475 me->metric_desc = pm->desc; 476 me->metric_long_desc = pm->long_desc; 477 me->metric_expr = pm->metric_expr; 478 me->metric_threshold = pm->metric_threshold; 479 me->metric_unit = pm->unit; 480 me->pmu_name = pm->pmu; 481 } 482 } 483 free(omg); 484 485 return 0; 486 } 487 488 struct metricgroup_iter_data { 489 pmu_metric_iter_fn fn; 490 void *data; 491 }; 492 493 static int metricgroup__sys_event_iter(const struct pmu_metric *pm, 494 const struct pmu_metrics_table *table, 495 void *data) 496 { 497 struct metricgroup_iter_data *d = data; 498 struct perf_pmu *pmu = NULL; 499 500 if (!pm->metric_expr || !pm->compat) 501 return 0; 502 503 while ((pmu = perf_pmus__scan(pmu))) { 504 505 if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id)) 506 continue; 507 508 return d->fn(pm, table, d->data); 509 } 510 return 0; 511 } 512 513 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm, 514 const struct pmu_metrics_table *table __maybe_unused, 515 void *vdata) 516 { 517 struct rblist *groups = vdata; 518 519 return metricgroup__add_to_mep_groups(pm, groups); 520 } 521 522 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state) 523 { 524 struct rblist groups; 525 const struct pmu_metrics_table *table; 526 struct rb_node *node, *next; 527 528 rblist__init(&groups); 529 groups.node_new = mep_new; 530 groups.node_cmp = mep_cmp; 531 groups.node_delete = mep_delete; 532 table = pmu_metrics_table__find(); 533 if (table) { 534 pmu_metrics_table__for_each_metric(table, 535 metricgroup__add_to_mep_groups_callback, 536 &groups); 537 } 538 { 539 struct metricgroup_iter_data data = { 540 .fn = metricgroup__add_to_mep_groups_callback, 541 .data = &groups, 542 }; 543 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 544 } 545 546 for (node = rb_first_cached(&groups.entries); node; node = next) { 547 struct mep *me = container_of(node, struct mep, nd); 548 549 print_cb->print_metric(print_state, 550 me->metric_group, 551 me->metric_name, 552 me->metric_desc, 553 me->metric_long_desc, 554 me->metric_expr, 555 me->metric_threshold, 556 me->metric_unit, 557 me->pmu_name); 558 next = rb_next(node); 559 rblist__remove_node(&groups, node); 560 } 561 } 562 563 static const char *code_characters = ",-=@"; 564 565 static int encode_metric_id(struct strbuf *sb, const char *x) 566 { 567 char *c; 568 int ret = 0; 569 570 for (; *x; x++) { 571 c = strchr(code_characters, *x); 572 if (c) { 573 ret = strbuf_addch(sb, '!'); 574 if (ret) 575 break; 576 577 ret = strbuf_addch(sb, '0' + (c - code_characters)); 578 if (ret) 579 break; 580 } else { 581 ret = strbuf_addch(sb, *x); 582 if (ret) 583 break; 584 } 585 } 586 return ret; 587 } 588 589 static int decode_metric_id(struct strbuf *sb, const char *x) 590 { 591 const char *orig = x; 592 size_t i; 593 char c; 594 int ret; 595 596 for (; *x; x++) { 597 c = *x; 598 if (*x == '!') { 599 x++; 600 i = *x - '0'; 601 if (i > strlen(code_characters)) { 602 pr_err("Bad metric-id encoding in: '%s'", orig); 603 return -1; 604 } 605 c = code_characters[i]; 606 } 607 ret = strbuf_addch(sb, c); 608 if (ret) 609 return ret; 610 } 611 return 0; 612 } 613 614 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier) 615 { 616 struct evsel *ev; 617 struct strbuf sb = STRBUF_INIT; 618 char *cur; 619 int ret = 0; 620 621 evlist__for_each_entry(perf_evlist, ev) { 622 if (!ev->metric_id) 623 continue; 624 625 ret = strbuf_setlen(&sb, 0); 626 if (ret) 627 break; 628 629 ret = decode_metric_id(&sb, ev->metric_id); 630 if (ret) 631 break; 632 633 free((char *)ev->metric_id); 634 ev->metric_id = strdup(sb.buf); 635 if (!ev->metric_id) { 636 ret = -ENOMEM; 637 break; 638 } 639 /* 640 * If the name is just the parsed event, use the metric-id to 641 * give a more friendly display version. 642 */ 643 if (strstr(ev->name, "metric-id=")) { 644 bool has_slash = false; 645 646 zfree(&ev->name); 647 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) { 648 *cur = '/'; 649 has_slash = true; 650 } 651 652 if (modifier) { 653 if (!has_slash && !strchr(sb.buf, ':')) { 654 ret = strbuf_addch(&sb, ':'); 655 if (ret) 656 break; 657 } 658 ret = strbuf_addstr(&sb, modifier); 659 if (ret) 660 break; 661 } 662 ev->name = strdup(sb.buf); 663 if (!ev->name) { 664 ret = -ENOMEM; 665 break; 666 } 667 } 668 } 669 strbuf_release(&sb); 670 return ret; 671 } 672 673 static int metricgroup__build_event_string(struct strbuf *events, 674 const struct expr_parse_ctx *ctx, 675 const char *modifier, 676 bool group_events) 677 { 678 struct hashmap_entry *cur; 679 size_t bkt; 680 bool no_group = true, has_tool_events = false; 681 bool tool_events[TOOL_PMU__EVENT_MAX] = {false}; 682 int ret = 0; 683 684 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0) 685 686 hashmap__for_each_entry(ctx->ids, cur, bkt) { 687 const char *sep, *rsep, *id = cur->pkey; 688 enum tool_pmu_event ev; 689 690 pr_debug("found event %s\n", id); 691 692 /* Always move tool events outside of the group. */ 693 ev = tool_pmu__str_to_event(id); 694 if (ev != TOOL_PMU__EVENT_NONE) { 695 has_tool_events = true; 696 tool_events[ev] = true; 697 continue; 698 } 699 /* Separate events with commas and open the group if necessary. */ 700 if (no_group) { 701 if (group_events) { 702 ret = strbuf_addch(events, '{'); 703 RETURN_IF_NON_ZERO(ret); 704 } 705 706 no_group = false; 707 } else { 708 ret = strbuf_addch(events, ','); 709 RETURN_IF_NON_ZERO(ret); 710 } 711 /* 712 * Encode the ID as an event string. Add a qualifier for 713 * metric_id that is the original name except with characters 714 * that parse-events can't parse replaced. For example, 715 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/ 716 */ 717 sep = strchr(id, '@'); 718 if (sep != NULL) { 719 ret = strbuf_add(events, id, sep - id); 720 RETURN_IF_NON_ZERO(ret); 721 ret = strbuf_addch(events, '/'); 722 RETURN_IF_NON_ZERO(ret); 723 rsep = strrchr(sep, '@'); 724 ret = strbuf_add(events, sep + 1, rsep - sep - 1); 725 RETURN_IF_NON_ZERO(ret); 726 ret = strbuf_addstr(events, ",metric-id="); 727 RETURN_IF_NON_ZERO(ret); 728 sep = rsep; 729 } else { 730 sep = strchr(id, ':'); 731 if (sep != NULL) { 732 ret = strbuf_add(events, id, sep - id); 733 RETURN_IF_NON_ZERO(ret); 734 } else { 735 ret = strbuf_addstr(events, id); 736 RETURN_IF_NON_ZERO(ret); 737 } 738 ret = strbuf_addstr(events, "/metric-id="); 739 RETURN_IF_NON_ZERO(ret); 740 } 741 ret = encode_metric_id(events, id); 742 RETURN_IF_NON_ZERO(ret); 743 ret = strbuf_addstr(events, "/"); 744 RETURN_IF_NON_ZERO(ret); 745 746 if (sep != NULL) { 747 ret = strbuf_addstr(events, sep + 1); 748 RETURN_IF_NON_ZERO(ret); 749 } 750 if (modifier) { 751 ret = strbuf_addstr(events, modifier); 752 RETURN_IF_NON_ZERO(ret); 753 } 754 } 755 if (!no_group && group_events) { 756 ret = strbuf_addf(events, "}:W"); 757 RETURN_IF_NON_ZERO(ret); 758 } 759 if (has_tool_events) { 760 int i; 761 762 tool_pmu__for_each_event(i) { 763 if (tool_events[i]) { 764 if (!no_group) { 765 ret = strbuf_addch(events, ','); 766 RETURN_IF_NON_ZERO(ret); 767 } 768 no_group = false; 769 ret = strbuf_addstr(events, tool_pmu__event_to_str(i)); 770 RETURN_IF_NON_ZERO(ret); 771 } 772 } 773 } 774 775 return ret; 776 #undef RETURN_IF_NON_ZERO 777 } 778 779 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused) 780 { 781 return 1; 782 } 783 784 /* 785 * A singly linked list on the stack of the names of metrics being 786 * processed. Used to identify recursion. 787 */ 788 struct visited_metric { 789 const char *name; 790 const struct visited_metric *parent; 791 }; 792 793 struct metricgroup_add_iter_data { 794 struct list_head *metric_list; 795 const char *pmu; 796 const char *metric_name; 797 const char *modifier; 798 int *ret; 799 bool *has_match; 800 bool metric_no_group; 801 bool metric_no_threshold; 802 const char *user_requested_cpu_list; 803 bool system_wide; 804 struct metric *root_metric; 805 const struct visited_metric *visited; 806 const struct pmu_metrics_table *table; 807 }; 808 809 static int add_metric(struct list_head *metric_list, 810 const struct pmu_metric *pm, 811 const char *modifier, 812 bool metric_no_group, 813 bool metric_no_threshold, 814 const char *user_requested_cpu_list, 815 bool system_wide, 816 struct metric *root_metric, 817 const struct visited_metric *visited, 818 const struct pmu_metrics_table *table); 819 820 static int metricgroup__find_metric_callback(const struct pmu_metric *pm, 821 const struct pmu_metrics_table *table __maybe_unused, 822 void *vdata) 823 { 824 struct pmu_metric *copied_pm = vdata; 825 826 memcpy(copied_pm, pm, sizeof(*pm)); 827 return 0; 828 } 829 830 /** 831 * resolve_metric - Locate metrics within the root metric and recursively add 832 * references to them. 833 * @metric_list: The list the metric is added to. 834 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs. 835 * @modifier: if non-null event modifiers like "u". 836 * @metric_no_group: Should events written to events be grouped "{}" or 837 * global. Grouping is the default but due to multiplexing the 838 * user may override. 839 * @user_requested_cpu_list: Command line specified CPUs to record on. 840 * @system_wide: Are events for all processes recorded. 841 * @root_metric: Metrics may reference other metrics to form a tree. In this 842 * case the root_metric holds all the IDs and a list of referenced 843 * metrics. When adding a root this argument is NULL. 844 * @visited: A singly linked list of metric names being added that is used to 845 * detect recursion. 846 * @table: The table that is searched for metrics, most commonly the table for the 847 * architecture perf is running upon. 848 */ 849 static int resolve_metric(struct list_head *metric_list, 850 struct perf_pmu *pmu, 851 const char *modifier, 852 bool metric_no_group, 853 bool metric_no_threshold, 854 const char *user_requested_cpu_list, 855 bool system_wide, 856 struct metric *root_metric, 857 const struct visited_metric *visited, 858 const struct pmu_metrics_table *table) 859 { 860 struct hashmap_entry *cur; 861 size_t bkt; 862 struct to_resolve { 863 /* The metric to resolve. */ 864 struct pmu_metric pm; 865 /* 866 * The key in the IDs map, this may differ from in case, 867 * etc. from pm->metric_name. 868 */ 869 const char *key; 870 } *pending = NULL; 871 int i, ret = 0, pending_cnt = 0; 872 873 /* 874 * Iterate all the parsed IDs and if there's a matching metric and it to 875 * the pending array. 876 */ 877 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) { 878 struct pmu_metric pm; 879 880 if (pmu_metrics_table__find_metric(table, pmu, cur->pkey, 881 metricgroup__find_metric_callback, 882 &pm) != PMU_METRICS__NOT_FOUND) { 883 pending = realloc(pending, 884 (pending_cnt + 1) * sizeof(struct to_resolve)); 885 if (!pending) 886 return -ENOMEM; 887 888 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm)); 889 pending[pending_cnt].key = cur->pkey; 890 pending_cnt++; 891 } 892 } 893 894 /* Remove the metric IDs from the context. */ 895 for (i = 0; i < pending_cnt; i++) 896 expr__del_id(root_metric->pctx, pending[i].key); 897 898 /* 899 * Recursively add all the metrics, IDs are added to the root metric's 900 * context. 901 */ 902 for (i = 0; i < pending_cnt; i++) { 903 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group, 904 metric_no_threshold, user_requested_cpu_list, system_wide, 905 root_metric, visited, table); 906 if (ret) 907 break; 908 } 909 910 free(pending); 911 return ret; 912 } 913 914 /** 915 * __add_metric - Add a metric to metric_list. 916 * @metric_list: The list the metric is added to. 917 * @pm: The pmu_metric containing the metric to be added. 918 * @modifier: if non-null event modifiers like "u". 919 * @metric_no_group: Should events written to events be grouped "{}" or 920 * global. Grouping is the default but due to multiplexing the 921 * user may override. 922 * @metric_no_threshold: Should threshold expressions be ignored? 923 * @runtime: A special argument for the parser only known at runtime. 924 * @user_requested_cpu_list: Command line specified CPUs to record on. 925 * @system_wide: Are events for all processes recorded. 926 * @root_metric: Metrics may reference other metrics to form a tree. In this 927 * case the root_metric holds all the IDs and a list of referenced 928 * metrics. When adding a root this argument is NULL. 929 * @visited: A singly linked list of metric names being added that is used to 930 * detect recursion. 931 * @table: The table that is searched for metrics, most commonly the table for the 932 * architecture perf is running upon. 933 */ 934 static int __add_metric(struct list_head *metric_list, 935 const struct pmu_metric *pm, 936 const char *modifier, 937 bool metric_no_group, 938 bool metric_no_threshold, 939 int runtime, 940 const char *user_requested_cpu_list, 941 bool system_wide, 942 struct metric *root_metric, 943 const struct visited_metric *visited, 944 const struct pmu_metrics_table *table) 945 { 946 const struct visited_metric *vm; 947 int ret; 948 bool is_root = !root_metric; 949 const char *expr; 950 struct visited_metric visited_node = { 951 .name = pm->metric_name, 952 .parent = visited, 953 }; 954 955 for (vm = visited; vm; vm = vm->parent) { 956 if (!strcmp(pm->metric_name, vm->name)) { 957 pr_err("failed: recursion detected for %s\n", pm->metric_name); 958 return -1; 959 } 960 } 961 962 if (is_root) { 963 /* 964 * This metric is the root of a tree and may reference other 965 * metrics that are added recursively. 966 */ 967 root_metric = metric__new(pm, modifier, metric_no_group, runtime, 968 user_requested_cpu_list, system_wide); 969 if (!root_metric) 970 return -ENOMEM; 971 972 } else { 973 int cnt = 0; 974 975 /* 976 * This metric was referenced in a metric higher in the 977 * tree. Check if the same metric is already resolved in the 978 * metric_refs list. 979 */ 980 if (root_metric->metric_refs) { 981 for (; root_metric->metric_refs[cnt].metric_name; cnt++) { 982 if (!strcmp(pm->metric_name, 983 root_metric->metric_refs[cnt].metric_name)) 984 return 0; 985 } 986 } 987 988 /* Create reference. Need space for the entry and the terminator. */ 989 root_metric->metric_refs = realloc(root_metric->metric_refs, 990 (cnt + 2) * sizeof(struct metric_ref)); 991 if (!root_metric->metric_refs) 992 return -ENOMEM; 993 994 /* 995 * Intentionally passing just const char pointers, 996 * from 'pe' object, so they never go away. We don't 997 * need to change them, so there's no need to create 998 * our own copy. 999 */ 1000 root_metric->metric_refs[cnt].metric_name = pm->metric_name; 1001 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr; 1002 1003 /* Null terminate array. */ 1004 root_metric->metric_refs[cnt+1].metric_name = NULL; 1005 root_metric->metric_refs[cnt+1].metric_expr = NULL; 1006 } 1007 1008 /* 1009 * For both the parent and referenced metrics, we parse 1010 * all the metric's IDs and add it to the root context. 1011 */ 1012 ret = 0; 1013 expr = pm->metric_expr; 1014 if (is_root && pm->metric_threshold) { 1015 /* 1016 * Threshold expressions are built off the actual metric. Switch 1017 * to use that in case of additional necessary events. Change 1018 * the visited node name to avoid this being flagged as 1019 * recursion. If the threshold events are disabled, just use the 1020 * metric's name as a reference. This allows metric threshold 1021 * computation if there are sufficient events. 1022 */ 1023 assert(strstr(pm->metric_threshold, pm->metric_name)); 1024 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold; 1025 visited_node.name = "__threshold__"; 1026 } 1027 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) { 1028 /* Broken metric. */ 1029 ret = -EINVAL; 1030 } 1031 if (!ret) { 1032 /* Resolve referenced metrics. */ 1033 struct perf_pmu *pmu; 1034 1035 if (pm->pmu && pm->pmu[0] != '\0') 1036 pmu = perf_pmus__find(pm->pmu); 1037 else 1038 pmu = perf_pmus__scan_core(/*pmu=*/ NULL); 1039 1040 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group, 1041 metric_no_threshold, user_requested_cpu_list, 1042 system_wide, root_metric, &visited_node, 1043 table); 1044 } 1045 if (ret) { 1046 if (is_root) 1047 metric__free(root_metric); 1048 1049 } else if (is_root) 1050 list_add(&root_metric->nd, metric_list); 1051 1052 return ret; 1053 } 1054 1055 static int add_metric(struct list_head *metric_list, 1056 const struct pmu_metric *pm, 1057 const char *modifier, 1058 bool metric_no_group, 1059 bool metric_no_threshold, 1060 const char *user_requested_cpu_list, 1061 bool system_wide, 1062 struct metric *root_metric, 1063 const struct visited_metric *visited, 1064 const struct pmu_metrics_table *table) 1065 { 1066 int ret = 0; 1067 1068 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name); 1069 1070 if (!strstr(pm->metric_expr, "?")) { 1071 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1072 metric_no_threshold, 0, user_requested_cpu_list, 1073 system_wide, root_metric, visited, table); 1074 } else { 1075 int j, count; 1076 1077 count = arch_get_runtimeparam(pm); 1078 1079 /* This loop is added to create multiple 1080 * events depend on count value and add 1081 * those events to metric_list. 1082 */ 1083 1084 for (j = 0; j < count && !ret; j++) 1085 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1086 metric_no_threshold, j, user_requested_cpu_list, 1087 system_wide, root_metric, visited, table); 1088 } 1089 1090 return ret; 1091 } 1092 1093 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm, 1094 const struct pmu_metrics_table *table __maybe_unused, 1095 void *data) 1096 { 1097 struct metricgroup_add_iter_data *d = data; 1098 int ret; 1099 1100 if (!match_pm_metric_or_groups(pm, d->pmu, d->metric_name)) 1101 return 0; 1102 1103 ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group, 1104 d->metric_no_threshold, d->user_requested_cpu_list, 1105 d->system_wide, d->root_metric, d->visited, d->table); 1106 if (ret) 1107 goto out; 1108 1109 *(d->has_match) = true; 1110 1111 out: 1112 *(d->ret) = ret; 1113 return ret; 1114 } 1115 1116 /** 1117 * metric_list_cmp - list_sort comparator that sorts metrics with more events to 1118 * the front. tool events are excluded from the count. 1119 */ 1120 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l, 1121 const struct list_head *r) 1122 { 1123 const struct metric *left = container_of(l, struct metric, nd); 1124 const struct metric *right = container_of(r, struct metric, nd); 1125 struct expr_id_data *data; 1126 int i, left_count, right_count; 1127 1128 left_count = hashmap__size(left->pctx->ids); 1129 tool_pmu__for_each_event(i) { 1130 if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data)) 1131 left_count--; 1132 } 1133 1134 right_count = hashmap__size(right->pctx->ids); 1135 tool_pmu__for_each_event(i) { 1136 if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data)) 1137 right_count--; 1138 } 1139 1140 return right_count - left_count; 1141 } 1142 1143 /** 1144 * default_metricgroup_cmp - Implements complex key for the Default metricgroup 1145 * that first sorts by default_metricgroup_name, then 1146 * metric_name. 1147 */ 1148 static int default_metricgroup_cmp(void *priv __maybe_unused, 1149 const struct list_head *l, 1150 const struct list_head *r) 1151 { 1152 const struct metric *left = container_of(l, struct metric, nd); 1153 const struct metric *right = container_of(r, struct metric, nd); 1154 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name); 1155 1156 if (diff) 1157 return diff; 1158 1159 return strcmp(right->metric_name, left->metric_name); 1160 } 1161 1162 struct metricgroup__add_metric_data { 1163 struct list_head *list; 1164 const char *pmu; 1165 const char *metric_name; 1166 const char *modifier; 1167 const char *user_requested_cpu_list; 1168 bool metric_no_group; 1169 bool metric_no_threshold; 1170 bool system_wide; 1171 bool has_match; 1172 }; 1173 1174 static int metricgroup__add_metric_callback(const struct pmu_metric *pm, 1175 const struct pmu_metrics_table *table, 1176 void *vdata) 1177 { 1178 struct metricgroup__add_metric_data *data = vdata; 1179 int ret = 0; 1180 1181 if (pm->metric_expr && match_pm_metric_or_groups(pm, data->pmu, data->metric_name)) { 1182 bool metric_no_group = data->metric_no_group || 1183 match_metric_or_groups(pm->metricgroup_no_group, data->metric_name); 1184 1185 data->has_match = true; 1186 ret = add_metric(data->list, pm, data->modifier, metric_no_group, 1187 data->metric_no_threshold, data->user_requested_cpu_list, 1188 data->system_wide, /*root_metric=*/NULL, 1189 /*visited_metrics=*/NULL, table); 1190 } 1191 return ret; 1192 } 1193 1194 /** 1195 * metricgroup__add_metric - Find and add a metric, or a metric group. 1196 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs. 1197 * @metric_name: The name of the metric or metric group. For example, "IPC" 1198 * could be the name of a metric and "TopDownL1" the name of a 1199 * metric group. 1200 * @modifier: if non-null event modifiers like "u". 1201 * @metric_no_group: Should events written to events be grouped "{}" or 1202 * global. Grouping is the default but due to multiplexing the 1203 * user may override. 1204 * @user_requested_cpu_list: Command line specified CPUs to record on. 1205 * @system_wide: Are events for all processes recorded. 1206 * @metric_list: The list that the metric or metric group are added to. 1207 * @table: The table that is searched for metrics, most commonly the table for the 1208 * architecture perf is running upon. 1209 */ 1210 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier, 1211 bool metric_no_group, bool metric_no_threshold, 1212 const char *user_requested_cpu_list, 1213 bool system_wide, 1214 struct list_head *metric_list, 1215 const struct pmu_metrics_table *table) 1216 { 1217 LIST_HEAD(list); 1218 int ret; 1219 bool has_match = false; 1220 1221 { 1222 struct metricgroup__add_metric_data data = { 1223 .list = &list, 1224 .pmu = pmu, 1225 .metric_name = metric_name, 1226 .modifier = modifier, 1227 .metric_no_group = metric_no_group, 1228 .metric_no_threshold = metric_no_threshold, 1229 .user_requested_cpu_list = user_requested_cpu_list, 1230 .system_wide = system_wide, 1231 .has_match = false, 1232 }; 1233 /* 1234 * Iterate over all metrics seeing if metric matches either the 1235 * name or group. When it does add the metric to the list. 1236 */ 1237 ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback, 1238 &data); 1239 if (ret) 1240 goto out; 1241 1242 has_match = data.has_match; 1243 } 1244 { 1245 struct metricgroup_iter_data data = { 1246 .fn = metricgroup__add_metric_sys_event_iter, 1247 .data = (void *) &(struct metricgroup_add_iter_data) { 1248 .metric_list = &list, 1249 .pmu = pmu, 1250 .metric_name = metric_name, 1251 .modifier = modifier, 1252 .metric_no_group = metric_no_group, 1253 .user_requested_cpu_list = user_requested_cpu_list, 1254 .system_wide = system_wide, 1255 .has_match = &has_match, 1256 .ret = &ret, 1257 .table = table, 1258 }, 1259 }; 1260 1261 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 1262 } 1263 /* End of pmu events. */ 1264 if (!has_match) 1265 ret = -EINVAL; 1266 1267 out: 1268 /* 1269 * add to metric_list so that they can be released 1270 * even if it's failed 1271 */ 1272 list_splice(&list, metric_list); 1273 return ret; 1274 } 1275 1276 /** 1277 * metricgroup__add_metric_list - Find and add metrics, or metric groups, 1278 * specified in a list. 1279 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS. 1280 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1" 1281 * would match the IPC and CPI metrics, and TopDownL1 would match all 1282 * the metrics in the TopDownL1 group. 1283 * @metric_no_group: Should events written to events be grouped "{}" or 1284 * global. Grouping is the default but due to multiplexing the 1285 * user may override. 1286 * @user_requested_cpu_list: Command line specified CPUs to record on. 1287 * @system_wide: Are events for all processes recorded. 1288 * @metric_list: The list that metrics are added to. 1289 * @table: The table that is searched for metrics, most commonly the table for the 1290 * architecture perf is running upon. 1291 */ 1292 static int metricgroup__add_metric_list(const char *pmu, const char *list, 1293 bool metric_no_group, 1294 bool metric_no_threshold, 1295 const char *user_requested_cpu_list, 1296 bool system_wide, struct list_head *metric_list, 1297 const struct pmu_metrics_table *table) 1298 { 1299 char *list_itr, *list_copy, *metric_name, *modifier; 1300 int ret, count = 0; 1301 1302 list_copy = strdup(list); 1303 if (!list_copy) 1304 return -ENOMEM; 1305 list_itr = list_copy; 1306 1307 while ((metric_name = strsep(&list_itr, ",")) != NULL) { 1308 modifier = strchr(metric_name, ':'); 1309 if (modifier) 1310 *modifier++ = '\0'; 1311 1312 ret = metricgroup__add_metric(pmu, metric_name, modifier, 1313 metric_no_group, metric_no_threshold, 1314 user_requested_cpu_list, 1315 system_wide, metric_list, table); 1316 if (ret == -EINVAL) 1317 pr_err("Cannot find metric or group `%s'\n", metric_name); 1318 1319 if (ret) 1320 break; 1321 1322 count++; 1323 } 1324 free(list_copy); 1325 1326 if (!ret) { 1327 /* 1328 * Warn about nmi_watchdog if any parsed metrics had the 1329 * NO_NMI_WATCHDOG constraint. 1330 */ 1331 metric__watchdog_constraint_hint(NULL, /*foot=*/true); 1332 /* No metrics. */ 1333 if (count == 0) 1334 return -EINVAL; 1335 } 1336 return ret; 1337 } 1338 1339 static void metricgroup__free_metrics(struct list_head *metric_list) 1340 { 1341 struct metric *m, *tmp; 1342 1343 list_for_each_entry_safe (m, tmp, metric_list, nd) { 1344 list_del_init(&m->nd); 1345 metric__free(m); 1346 } 1347 } 1348 1349 /** 1350 * find_tool_events - Search for the pressence of tool events in metric_list. 1351 * @metric_list: List to take metrics from. 1352 * @tool_events: Array of false values, indices corresponding to tool events set 1353 * to true if tool event is found. 1354 */ 1355 static void find_tool_events(const struct list_head *metric_list, 1356 bool tool_events[TOOL_PMU__EVENT_MAX]) 1357 { 1358 struct metric *m; 1359 1360 list_for_each_entry(m, metric_list, nd) { 1361 int i; 1362 1363 tool_pmu__for_each_event(i) { 1364 struct expr_id_data *data; 1365 1366 if (!tool_events[i] && 1367 !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data)) 1368 tool_events[i] = true; 1369 } 1370 } 1371 } 1372 1373 /** 1374 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events 1375 * metric IDs, as the IDs are held in a set, 1376 * duplicates will be removed. 1377 * @metric_list: List to take metrics from. 1378 * @combined: Out argument for result. 1379 */ 1380 static int build_combined_expr_ctx(const struct list_head *metric_list, 1381 struct expr_parse_ctx **combined) 1382 { 1383 struct hashmap_entry *cur; 1384 size_t bkt; 1385 struct metric *m; 1386 char *dup; 1387 int ret; 1388 1389 *combined = expr__ctx_new(); 1390 if (!*combined) 1391 return -ENOMEM; 1392 1393 list_for_each_entry(m, metric_list, nd) { 1394 if (!m->group_events && !m->modifier) { 1395 hashmap__for_each_entry(m->pctx->ids, cur, bkt) { 1396 dup = strdup(cur->pkey); 1397 if (!dup) { 1398 ret = -ENOMEM; 1399 goto err_out; 1400 } 1401 ret = expr__add_id(*combined, dup); 1402 if (ret) 1403 goto err_out; 1404 } 1405 } 1406 } 1407 return 0; 1408 err_out: 1409 expr__ctx_free(*combined); 1410 *combined = NULL; 1411 return ret; 1412 } 1413 1414 /** 1415 * parse_ids - Build the event string for the ids and parse them creating an 1416 * evlist. The encoded metric_ids are decoded. 1417 * @metric_no_merge: is metric sharing explicitly disabled. 1418 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU. 1419 * @ids: the event identifiers parsed from a metric. 1420 * @modifier: any modifiers added to the events. 1421 * @group_events: should events be placed in a weak group. 1422 * @tool_events: entries set true if the tool event of index could be present in 1423 * the overall list of metrics. 1424 * @out_evlist: the created list of events. 1425 */ 1426 static int parse_ids(bool metric_no_merge, bool fake_pmu, 1427 struct expr_parse_ctx *ids, const char *modifier, 1428 bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX], 1429 struct evlist **out_evlist) 1430 { 1431 struct parse_events_error parse_error; 1432 struct evlist *parsed_evlist; 1433 struct strbuf events = STRBUF_INIT; 1434 int ret; 1435 1436 *out_evlist = NULL; 1437 if (!metric_no_merge || hashmap__size(ids->ids) == 0) { 1438 bool added_event = false; 1439 int i; 1440 /* 1441 * We may fail to share events between metrics because a tool 1442 * event isn't present in one metric. For example, a ratio of 1443 * cache misses doesn't need duration_time but the same events 1444 * may be used for a misses per second. Events without sharing 1445 * implies multiplexing, that is best avoided, so place 1446 * all tool events in every group. 1447 * 1448 * Also, there may be no ids/events in the expression parsing 1449 * context because of constant evaluation, e.g.: 1450 * event1 if #smt_on else 0 1451 * Add a tool event to avoid a parse error on an empty string. 1452 */ 1453 tool_pmu__for_each_event(i) { 1454 if (tool_events[i]) { 1455 char *tmp = strdup(tool_pmu__event_to_str(i)); 1456 1457 if (!tmp) 1458 return -ENOMEM; 1459 ids__insert(ids->ids, tmp); 1460 added_event = true; 1461 } 1462 } 1463 if (!added_event && hashmap__size(ids->ids) == 0) { 1464 char *tmp = strdup("duration_time"); 1465 1466 if (!tmp) 1467 return -ENOMEM; 1468 ids__insert(ids->ids, tmp); 1469 } 1470 } 1471 ret = metricgroup__build_event_string(&events, ids, modifier, 1472 group_events); 1473 if (ret) 1474 return ret; 1475 1476 parsed_evlist = evlist__new(); 1477 if (!parsed_evlist) { 1478 ret = -ENOMEM; 1479 goto err_out; 1480 } 1481 pr_debug("Parsing metric events '%s'\n", events.buf); 1482 parse_events_error__init(&parse_error); 1483 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL, 1484 &parse_error, fake_pmu, /*warn_if_reordered=*/false, 1485 /*fake_tp=*/false); 1486 if (ret) { 1487 parse_events_error__print(&parse_error, events.buf); 1488 goto err_out; 1489 } 1490 ret = decode_all_metric_ids(parsed_evlist, modifier); 1491 if (ret) 1492 goto err_out; 1493 1494 *out_evlist = parsed_evlist; 1495 parsed_evlist = NULL; 1496 err_out: 1497 parse_events_error__exit(&parse_error); 1498 evlist__delete(parsed_evlist); 1499 strbuf_release(&events); 1500 return ret; 1501 } 1502 1503 static int parse_groups(struct evlist *perf_evlist, 1504 const char *pmu, const char *str, 1505 bool metric_no_group, 1506 bool metric_no_merge, 1507 bool metric_no_threshold, 1508 const char *user_requested_cpu_list, 1509 bool system_wide, 1510 bool fake_pmu, 1511 struct rblist *metric_events_list, 1512 const struct pmu_metrics_table *table) 1513 { 1514 struct evlist *combined_evlist = NULL; 1515 LIST_HEAD(metric_list); 1516 struct metric *m; 1517 bool tool_events[TOOL_PMU__EVENT_MAX] = {false}; 1518 bool is_default = !strcmp(str, "Default"); 1519 int ret; 1520 1521 if (metric_events_list->nr_entries == 0) 1522 metricgroup__rblist_init(metric_events_list); 1523 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold, 1524 user_requested_cpu_list, 1525 system_wide, &metric_list, table); 1526 if (ret) 1527 goto out; 1528 1529 /* Sort metrics from largest to smallest. */ 1530 list_sort(NULL, &metric_list, metric_list_cmp); 1531 1532 if (!metric_no_merge) { 1533 struct expr_parse_ctx *combined = NULL; 1534 1535 find_tool_events(&metric_list, tool_events); 1536 1537 ret = build_combined_expr_ctx(&metric_list, &combined); 1538 1539 if (!ret && combined && hashmap__size(combined->ids)) { 1540 ret = parse_ids(metric_no_merge, fake_pmu, combined, 1541 /*modifier=*/NULL, 1542 /*group_events=*/false, 1543 tool_events, 1544 &combined_evlist); 1545 } 1546 if (combined) 1547 expr__ctx_free(combined); 1548 1549 if (ret) 1550 goto out; 1551 } 1552 1553 if (is_default) 1554 list_sort(NULL, &metric_list, default_metricgroup_cmp); 1555 1556 list_for_each_entry(m, &metric_list, nd) { 1557 struct metric_event *me; 1558 struct evsel **metric_events; 1559 struct evlist *metric_evlist = NULL; 1560 struct metric *n; 1561 struct metric_expr *expr; 1562 1563 if (combined_evlist && !m->group_events) { 1564 metric_evlist = combined_evlist; 1565 } else if (!metric_no_merge) { 1566 /* 1567 * See if the IDs for this metric are a subset of an 1568 * earlier metric. 1569 */ 1570 list_for_each_entry(n, &metric_list, nd) { 1571 if (m == n) 1572 break; 1573 1574 if (n->evlist == NULL) 1575 continue; 1576 1577 if ((!m->modifier && n->modifier) || 1578 (m->modifier && !n->modifier) || 1579 (m->modifier && n->modifier && 1580 strcmp(m->modifier, n->modifier))) 1581 continue; 1582 1583 if ((!m->pmu && n->pmu) || 1584 (m->pmu && !n->pmu) || 1585 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu))) 1586 continue; 1587 1588 if (expr__subset_of_ids(n->pctx, m->pctx)) { 1589 pr_debug("Events in '%s' fully contained within '%s'\n", 1590 m->metric_name, n->metric_name); 1591 metric_evlist = n->evlist; 1592 break; 1593 } 1594 1595 } 1596 } 1597 if (!metric_evlist) { 1598 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier, 1599 m->group_events, tool_events, &m->evlist); 1600 if (ret) 1601 goto out; 1602 1603 metric_evlist = m->evlist; 1604 } 1605 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids, 1606 metric_evlist, &metric_events); 1607 if (ret) { 1608 pr_err("Cannot resolve IDs for %s: %s\n", 1609 m->metric_name, m->metric_expr); 1610 goto out; 1611 } 1612 1613 me = metricgroup__lookup(metric_events_list, metric_events[0], true); 1614 1615 expr = malloc(sizeof(struct metric_expr)); 1616 if (!expr) { 1617 ret = -ENOMEM; 1618 free(metric_events); 1619 goto out; 1620 } 1621 1622 expr->metric_refs = m->metric_refs; 1623 m->metric_refs = NULL; 1624 expr->metric_expr = m->metric_expr; 1625 if (m->modifier) { 1626 char *tmp; 1627 1628 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0) 1629 expr->metric_name = NULL; 1630 else 1631 expr->metric_name = tmp; 1632 } else 1633 expr->metric_name = strdup(m->metric_name); 1634 1635 if (!expr->metric_name) { 1636 ret = -ENOMEM; 1637 free(metric_events); 1638 goto out; 1639 } 1640 expr->metric_threshold = m->metric_threshold; 1641 expr->metric_unit = m->metric_unit; 1642 expr->metric_events = metric_events; 1643 expr->runtime = m->pctx->sctx.runtime; 1644 expr->default_metricgroup_name = m->default_metricgroup_name; 1645 me->is_default = is_default; 1646 list_add(&expr->nd, &me->head); 1647 } 1648 1649 1650 if (combined_evlist) { 1651 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries); 1652 evlist__delete(combined_evlist); 1653 } 1654 1655 list_for_each_entry(m, &metric_list, nd) { 1656 if (m->evlist) 1657 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries); 1658 } 1659 1660 out: 1661 metricgroup__free_metrics(&metric_list); 1662 return ret; 1663 } 1664 1665 int metricgroup__parse_groups(struct evlist *perf_evlist, 1666 const char *pmu, 1667 const char *str, 1668 bool metric_no_group, 1669 bool metric_no_merge, 1670 bool metric_no_threshold, 1671 const char *user_requested_cpu_list, 1672 bool system_wide, 1673 bool hardware_aware_grouping, 1674 struct rblist *metric_events) 1675 { 1676 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1677 1678 if (!table) 1679 return -EINVAL; 1680 if (hardware_aware_grouping) 1681 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n"); 1682 1683 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge, 1684 metric_no_threshold, user_requested_cpu_list, system_wide, 1685 /*fake_pmu=*/false, metric_events, table); 1686 } 1687 1688 int metricgroup__parse_groups_test(struct evlist *evlist, 1689 const struct pmu_metrics_table *table, 1690 const char *str, 1691 struct rblist *metric_events) 1692 { 1693 return parse_groups(evlist, "all", str, 1694 /*metric_no_group=*/false, 1695 /*metric_no_merge=*/false, 1696 /*metric_no_threshold=*/false, 1697 /*user_requested_cpu_list=*/NULL, 1698 /*system_wide=*/false, 1699 /*fake_pmu=*/true, metric_events, table); 1700 } 1701 1702 struct metricgroup__has_metric_data { 1703 const char *pmu; 1704 const char *metric_or_groups; 1705 }; 1706 static int metricgroup__has_metric_or_groups_callback(const struct pmu_metric *pm, 1707 const struct pmu_metrics_table *table 1708 __maybe_unused, 1709 void *vdata) 1710 { 1711 struct metricgroup__has_metric_data *data = vdata; 1712 1713 return match_pm_metric_or_groups(pm, data->pmu, data->metric_or_groups) ? 1 : 0; 1714 } 1715 1716 bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups) 1717 { 1718 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1719 struct metricgroup__has_metric_data data = { 1720 .pmu = pmu, 1721 .metric_or_groups = metric_or_groups, 1722 }; 1723 1724 if (!table) 1725 return false; 1726 1727 return pmu_metrics_table__for_each_metric(table, 1728 metricgroup__has_metric_or_groups_callback, 1729 &data) 1730 ? true : false; 1731 } 1732 1733 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm, 1734 const struct pmu_metrics_table *table __maybe_unused, 1735 void *data) 1736 { 1737 unsigned int *max_level = data; 1738 unsigned int level; 1739 const char *p = strstr(pm->metric_group ?: "", "TopdownL"); 1740 1741 if (!p || p[8] == '\0') 1742 return 0; 1743 1744 level = p[8] - '0'; 1745 if (level > *max_level) 1746 *max_level = level; 1747 1748 return 0; 1749 } 1750 1751 unsigned int metricgroups__topdown_max_level(void) 1752 { 1753 unsigned int max_level = 0; 1754 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1755 1756 if (!table) 1757 return false; 1758 1759 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback, 1760 &max_level); 1761 return max_level; 1762 } 1763 1764 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 1765 struct rblist *new_metric_events, 1766 struct rblist *old_metric_events) 1767 { 1768 unsigned int i; 1769 1770 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) { 1771 struct rb_node *nd; 1772 struct metric_event *old_me, *new_me; 1773 struct metric_expr *old_expr, *new_expr; 1774 struct evsel *evsel; 1775 size_t alloc_size; 1776 int idx, nr; 1777 1778 nd = rblist__entry(old_metric_events, i); 1779 old_me = container_of(nd, struct metric_event, nd); 1780 1781 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx); 1782 if (!evsel) 1783 return -EINVAL; 1784 new_me = metricgroup__lookup(new_metric_events, evsel, true); 1785 if (!new_me) 1786 return -ENOMEM; 1787 1788 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n", 1789 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx); 1790 1791 list_for_each_entry(old_expr, &old_me->head, nd) { 1792 new_expr = malloc(sizeof(*new_expr)); 1793 if (!new_expr) 1794 return -ENOMEM; 1795 1796 new_expr->metric_expr = old_expr->metric_expr; 1797 new_expr->metric_threshold = old_expr->metric_threshold; 1798 new_expr->metric_name = strdup(old_expr->metric_name); 1799 if (!new_expr->metric_name) 1800 return -ENOMEM; 1801 1802 new_expr->metric_unit = old_expr->metric_unit; 1803 new_expr->runtime = old_expr->runtime; 1804 1805 if (old_expr->metric_refs) { 1806 /* calculate number of metric_events */ 1807 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++) 1808 continue; 1809 alloc_size = sizeof(*new_expr->metric_refs); 1810 new_expr->metric_refs = calloc(nr + 1, alloc_size); 1811 if (!new_expr->metric_refs) { 1812 free(new_expr); 1813 return -ENOMEM; 1814 } 1815 1816 memcpy(new_expr->metric_refs, old_expr->metric_refs, 1817 nr * alloc_size); 1818 } else { 1819 new_expr->metric_refs = NULL; 1820 } 1821 1822 /* calculate number of metric_events */ 1823 for (nr = 0; old_expr->metric_events[nr]; nr++) 1824 continue; 1825 alloc_size = sizeof(*new_expr->metric_events); 1826 new_expr->metric_events = calloc(nr + 1, alloc_size); 1827 if (!new_expr->metric_events) { 1828 zfree(&new_expr->metric_refs); 1829 free(new_expr); 1830 return -ENOMEM; 1831 } 1832 1833 /* copy evsel in the same position */ 1834 for (idx = 0; idx < nr; idx++) { 1835 evsel = old_expr->metric_events[idx]; 1836 evsel = evlist__find_evsel(evlist, evsel->core.idx); 1837 if (evsel == NULL) { 1838 zfree(&new_expr->metric_events); 1839 zfree(&new_expr->metric_refs); 1840 free(new_expr); 1841 return -EINVAL; 1842 } 1843 new_expr->metric_events[idx] = evsel; 1844 } 1845 1846 list_add(&new_expr->nd, &new_me->head); 1847 } 1848 } 1849 return 0; 1850 } 1851