1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/capability.h> 8 #include <linux/kernel.h> 9 #include <linux/mman.h> 10 #include <linux/string.h> 11 #include <linux/time64.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/param.h> 15 #include <fcntl.h> 16 #include <unistd.h> 17 #include <inttypes.h> 18 #include "annotate.h" 19 #include "build-id.h" 20 #include "cap.h" 21 #include "cpumap.h" 22 #include "dso.h" 23 #include "util.h" // lsdir() 24 #include "debug.h" 25 #include "event.h" 26 #include "machine.h" 27 #include "map.h" 28 #include "symbol.h" 29 #include "map_symbol.h" 30 #include "mem-events.h" 31 #include "mem-info.h" 32 #include "symsrc.h" 33 #include "strlist.h" 34 #include "intlist.h" 35 #include "namespaces.h" 36 #include "header.h" 37 #include "path.h" 38 #include <linux/ctype.h> 39 #include <linux/zalloc.h> 40 41 #include <elf.h> 42 #include <limits.h> 43 #include <symbol/kallsyms.h> 44 #include <sys/utsname.h> 45 46 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 47 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 48 static bool symbol__is_idle(const char *name); 49 50 int vmlinux_path__nr_entries; 51 char **vmlinux_path; 52 53 struct symbol_conf symbol_conf = { 54 .nanosecs = false, 55 .use_modules = true, 56 .try_vmlinux_path = true, 57 .demangle = true, 58 .demangle_kernel = false, 59 .cumulate_callchain = true, 60 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 61 .show_hist_headers = true, 62 .symfs = "", 63 .event_group = true, 64 .inline_name = true, 65 .res_sample = 0, 66 }; 67 68 struct map_list_node { 69 struct list_head node; 70 struct map *map; 71 }; 72 73 static struct map_list_node *map_list_node__new(void) 74 { 75 return malloc(sizeof(struct map_list_node)); 76 } 77 78 static enum dso_binary_type binary_type_symtab[] = { 79 DSO_BINARY_TYPE__KALLSYMS, 80 DSO_BINARY_TYPE__GUEST_KALLSYMS, 81 DSO_BINARY_TYPE__JAVA_JIT, 82 DSO_BINARY_TYPE__DEBUGLINK, 83 DSO_BINARY_TYPE__BUILD_ID_CACHE, 84 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 85 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 86 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 87 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 88 DSO_BINARY_TYPE__GNU_DEBUGDATA, 89 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 90 DSO_BINARY_TYPE__GUEST_KMODULE, 91 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 92 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 93 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 94 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 95 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, 96 DSO_BINARY_TYPE__NOT_FOUND, 97 }; 98 99 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 100 101 static bool symbol_type__filter(char __symbol_type) 102 { 103 // Since 'U' == undefined and 'u' == unique global symbol, we can't use toupper there 104 char symbol_type = toupper(__symbol_type); 105 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B' || 106 __symbol_type == 'u' || __symbol_type == 'l'; 107 } 108 109 static int prefix_underscores_count(const char *str) 110 { 111 const char *tail = str; 112 113 while (*tail == '_') 114 tail++; 115 116 return tail - str; 117 } 118 119 const char * __weak arch__normalize_symbol_name(const char *name) 120 { 121 return name; 122 } 123 124 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 125 { 126 return strcmp(namea, nameb); 127 } 128 129 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 130 unsigned int n) 131 { 132 return strncmp(namea, nameb, n); 133 } 134 135 int __weak arch__choose_best_symbol(struct symbol *syma, 136 struct symbol *symb __maybe_unused) 137 { 138 /* Avoid "SyS" kernel syscall aliases */ 139 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 140 return SYMBOL_B; 141 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 142 return SYMBOL_B; 143 144 return SYMBOL_A; 145 } 146 147 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 148 { 149 s64 a; 150 s64 b; 151 size_t na, nb; 152 153 /* Prefer a symbol with non zero length */ 154 a = syma->end - syma->start; 155 b = symb->end - symb->start; 156 if ((b == 0) && (a > 0)) 157 return SYMBOL_A; 158 else if ((a == 0) && (b > 0)) 159 return SYMBOL_B; 160 161 if (syma->type != symb->type) { 162 if (syma->type == STT_NOTYPE) 163 return SYMBOL_B; 164 if (symb->type == STT_NOTYPE) 165 return SYMBOL_A; 166 } 167 168 /* Prefer a non weak symbol over a weak one */ 169 a = syma->binding == STB_WEAK; 170 b = symb->binding == STB_WEAK; 171 if (b && !a) 172 return SYMBOL_A; 173 if (a && !b) 174 return SYMBOL_B; 175 176 /* Prefer a global symbol over a non global one */ 177 a = syma->binding == STB_GLOBAL; 178 b = symb->binding == STB_GLOBAL; 179 if (a && !b) 180 return SYMBOL_A; 181 if (b && !a) 182 return SYMBOL_B; 183 184 /* Prefer a symbol with less underscores */ 185 a = prefix_underscores_count(syma->name); 186 b = prefix_underscores_count(symb->name); 187 if (b > a) 188 return SYMBOL_A; 189 else if (a > b) 190 return SYMBOL_B; 191 192 /* Choose the symbol with the longest name */ 193 na = strlen(syma->name); 194 nb = strlen(symb->name); 195 if (na > nb) 196 return SYMBOL_A; 197 else if (na < nb) 198 return SYMBOL_B; 199 200 return arch__choose_best_symbol(syma, symb); 201 } 202 203 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 204 { 205 struct rb_node *nd; 206 struct symbol *curr, *next; 207 208 if (symbol_conf.allow_aliases) 209 return; 210 211 nd = rb_first_cached(symbols); 212 213 while (nd) { 214 curr = rb_entry(nd, struct symbol, rb_node); 215 again: 216 nd = rb_next(&curr->rb_node); 217 if (!nd) 218 break; 219 220 next = rb_entry(nd, struct symbol, rb_node); 221 if (curr->start != next->start) 222 continue; 223 224 if (choose_best_symbol(curr, next) == SYMBOL_A) { 225 if (next->type == STT_GNU_IFUNC) 226 curr->ifunc_alias = true; 227 rb_erase_cached(&next->rb_node, symbols); 228 symbol__delete(next); 229 goto again; 230 } else { 231 if (curr->type == STT_GNU_IFUNC) 232 next->ifunc_alias = true; 233 nd = rb_next(&curr->rb_node); 234 rb_erase_cached(&curr->rb_node, symbols); 235 symbol__delete(curr); 236 } 237 } 238 } 239 240 /* Update zero-sized symbols using the address of the next symbol */ 241 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) 242 { 243 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 244 struct symbol *curr, *prev; 245 246 if (prevnd == NULL) 247 return; 248 249 curr = rb_entry(prevnd, struct symbol, rb_node); 250 251 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 252 prev = curr; 253 curr = rb_entry(nd, struct symbol, rb_node); 254 255 /* 256 * On some architecture kernel text segment start is located at 257 * some low memory address, while modules are located at high 258 * memory addresses (or vice versa). The gap between end of 259 * kernel text segment and beginning of first module's text 260 * segment is very big. Therefore do not fill this gap and do 261 * not assign it to the kernel dso map (kallsyms). 262 * 263 * Also BPF code can be allocated separately from text segments 264 * and modules. So the last entry in a module should not fill 265 * the gap too. 266 * 267 * In kallsyms, it determines module symbols using '[' character 268 * like in: 269 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] 270 */ 271 if (prev->end == prev->start) { 272 const char *prev_mod; 273 const char *curr_mod; 274 275 if (!is_kallsyms) { 276 prev->end = curr->start; 277 continue; 278 } 279 280 prev_mod = strchr(prev->name, '['); 281 curr_mod = strchr(curr->name, '['); 282 283 /* Last kernel/module symbol mapped to end of page */ 284 if (!prev_mod != !curr_mod) 285 prev->end = roundup(prev->end + 4096, 4096); 286 /* Last symbol in the previous module */ 287 else if (prev_mod && strcmp(prev_mod, curr_mod)) 288 prev->end = roundup(prev->end + 4096, 4096); 289 else 290 prev->end = curr->start; 291 292 pr_debug4("%s sym:%s end:%#" PRIx64 "\n", 293 __func__, prev->name, prev->end); 294 } 295 } 296 297 /* Last entry */ 298 if (curr->end == curr->start) 299 curr->end = roundup(curr->start, 4096) + 4096; 300 } 301 302 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 303 { 304 size_t namelen = strlen(name) + 1; 305 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 306 sizeof(*sym) + namelen)); 307 if (sym == NULL) 308 return NULL; 309 310 if (symbol_conf.priv_size) { 311 if (symbol_conf.init_annotation) { 312 struct annotation *notes = (void *)sym; 313 annotation__init(notes); 314 } 315 sym = ((void *)sym) + symbol_conf.priv_size; 316 } 317 318 sym->start = start; 319 sym->end = len ? start + len : start; 320 sym->type = type; 321 sym->binding = binding; 322 sym->namelen = namelen - 1; 323 324 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 325 __func__, name, start, sym->end); 326 memcpy(sym->name, name, namelen); 327 328 return sym; 329 } 330 331 void symbol__delete(struct symbol *sym) 332 { 333 if (symbol_conf.priv_size) { 334 if (symbol_conf.init_annotation) { 335 struct annotation *notes = symbol__annotation(sym); 336 337 annotation__exit(notes); 338 } 339 } 340 free(((void *)sym) - symbol_conf.priv_size); 341 } 342 343 void symbols__delete(struct rb_root_cached *symbols) 344 { 345 struct symbol *pos; 346 struct rb_node *next = rb_first_cached(symbols); 347 348 while (next) { 349 pos = rb_entry(next, struct symbol, rb_node); 350 next = rb_next(&pos->rb_node); 351 rb_erase_cached(&pos->rb_node, symbols); 352 symbol__delete(pos); 353 } 354 } 355 356 void __symbols__insert(struct rb_root_cached *symbols, 357 struct symbol *sym, bool kernel) 358 { 359 struct rb_node **p = &symbols->rb_root.rb_node; 360 struct rb_node *parent = NULL; 361 const u64 ip = sym->start; 362 struct symbol *s; 363 bool leftmost = true; 364 365 if (kernel) { 366 const char *name = sym->name; 367 /* 368 * ppc64 uses function descriptors and appends a '.' to the 369 * start of every instruction address. Remove it. 370 */ 371 if (name[0] == '.') 372 name++; 373 sym->idle = symbol__is_idle(name); 374 } 375 376 while (*p != NULL) { 377 parent = *p; 378 s = rb_entry(parent, struct symbol, rb_node); 379 if (ip < s->start) 380 p = &(*p)->rb_left; 381 else { 382 p = &(*p)->rb_right; 383 leftmost = false; 384 } 385 } 386 rb_link_node(&sym->rb_node, parent, p); 387 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 388 } 389 390 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 391 { 392 __symbols__insert(symbols, sym, false); 393 } 394 395 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 396 { 397 struct rb_node *n; 398 399 if (symbols == NULL) 400 return NULL; 401 402 n = symbols->rb_root.rb_node; 403 404 while (n) { 405 struct symbol *s = rb_entry(n, struct symbol, rb_node); 406 407 if (ip < s->start) 408 n = n->rb_left; 409 else if (ip > s->end || (ip == s->end && ip != s->start)) 410 n = n->rb_right; 411 else 412 return s; 413 } 414 415 return NULL; 416 } 417 418 static struct symbol *symbols__first(struct rb_root_cached *symbols) 419 { 420 struct rb_node *n = rb_first_cached(symbols); 421 422 if (n) 423 return rb_entry(n, struct symbol, rb_node); 424 425 return NULL; 426 } 427 428 static struct symbol *symbols__last(struct rb_root_cached *symbols) 429 { 430 struct rb_node *n = rb_last(&symbols->rb_root); 431 432 if (n) 433 return rb_entry(n, struct symbol, rb_node); 434 435 return NULL; 436 } 437 438 static struct symbol *symbols__next(struct symbol *sym) 439 { 440 struct rb_node *n = rb_next(&sym->rb_node); 441 442 if (n) 443 return rb_entry(n, struct symbol, rb_node); 444 445 return NULL; 446 } 447 448 static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs) 449 { 450 const struct symbol *lhs = *((const struct symbol **)vlhs); 451 const struct symbol *rhs = *((const struct symbol **)vrhs); 452 453 return strcmp(lhs->name, rhs->name); 454 } 455 456 static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len) 457 { 458 struct rb_node *nd; 459 struct symbol **result; 460 size_t i = 0, size = 0; 461 462 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) 463 size++; 464 465 result = malloc(sizeof(*result) * size); 466 if (!result) 467 return NULL; 468 469 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 470 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 471 472 result[i++] = pos; 473 } 474 qsort(result, size, sizeof(*result), symbols__sort_name_cmp); 475 *len = size; 476 return result; 477 } 478 479 int symbol__match_symbol_name(const char *name, const char *str, 480 enum symbol_tag_include includes) 481 { 482 const char *versioning; 483 484 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 485 (versioning = strstr(name, "@@"))) { 486 int len = strlen(str); 487 488 if (len < versioning - name) 489 len = versioning - name; 490 491 return arch__compare_symbol_names_n(name, str, len); 492 } else 493 return arch__compare_symbol_names(name, str); 494 } 495 496 static struct symbol *symbols__find_by_name(struct symbol *symbols[], 497 size_t symbols_len, 498 const char *name, 499 enum symbol_tag_include includes, 500 size_t *found_idx) 501 { 502 size_t i, lower = 0, upper = symbols_len; 503 struct symbol *s = NULL; 504 505 if (found_idx) 506 *found_idx = SIZE_MAX; 507 508 if (!symbols_len) 509 return NULL; 510 511 while (lower < upper) { 512 int cmp; 513 514 i = (lower + upper) / 2; 515 cmp = symbol__match_symbol_name(symbols[i]->name, name, includes); 516 517 if (cmp > 0) 518 upper = i; 519 else if (cmp < 0) 520 lower = i + 1; 521 else { 522 if (found_idx) 523 *found_idx = i; 524 s = symbols[i]; 525 break; 526 } 527 } 528 if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) { 529 /* return first symbol that has same name (if any) */ 530 for (; i > 0; i--) { 531 struct symbol *tmp = symbols[i - 1]; 532 533 if (!arch__compare_symbol_names(tmp->name, s->name)) { 534 if (found_idx) 535 *found_idx = i - 1; 536 s = tmp; 537 } else 538 break; 539 } 540 } 541 assert(!found_idx || !s || s == symbols[*found_idx]); 542 return s; 543 } 544 545 void dso__reset_find_symbol_cache(struct dso *dso) 546 { 547 dso__set_last_find_result_addr(dso, 0); 548 dso__set_last_find_result_symbol(dso, NULL); 549 } 550 551 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 552 { 553 __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso)); 554 555 /* update the symbol cache if necessary */ 556 if (dso__last_find_result_addr(dso) >= sym->start && 557 (dso__last_find_result_addr(dso) < sym->end || 558 sym->start == sym->end)) { 559 dso__set_last_find_result_symbol(dso, sym); 560 } 561 } 562 563 void dso__delete_symbol(struct dso *dso, struct symbol *sym) 564 { 565 rb_erase_cached(&sym->rb_node, dso__symbols(dso)); 566 symbol__delete(sym); 567 dso__reset_find_symbol_cache(dso); 568 } 569 570 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 571 { 572 if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) { 573 dso__set_last_find_result_addr(dso, addr); 574 dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr)); 575 } 576 577 return dso__last_find_result_symbol(dso); 578 } 579 580 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr) 581 { 582 return symbols__find(dso__symbols(dso), addr); 583 } 584 585 struct symbol *dso__first_symbol(struct dso *dso) 586 { 587 return symbols__first(dso__symbols(dso)); 588 } 589 590 struct symbol *dso__last_symbol(struct dso *dso) 591 { 592 return symbols__last(dso__symbols(dso)); 593 } 594 595 struct symbol *dso__next_symbol(struct symbol *sym) 596 { 597 return symbols__next(sym); 598 } 599 600 struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx) 601 { 602 if (*idx + 1 >= dso__symbol_names_len(dso)) 603 return NULL; 604 605 ++*idx; 606 return dso__symbol_names(dso)[*idx]; 607 } 608 609 /* 610 * Returns first symbol that matched with @name. 611 */ 612 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx) 613 { 614 struct symbol *s = symbols__find_by_name(dso__symbol_names(dso), 615 dso__symbol_names_len(dso), 616 name, SYMBOL_TAG_INCLUDE__NONE, idx); 617 if (!s) { 618 s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso), 619 name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx); 620 } 621 return s; 622 } 623 624 void dso__sort_by_name(struct dso *dso) 625 { 626 mutex_lock(dso__lock(dso)); 627 if (!dso__sorted_by_name(dso)) { 628 size_t len; 629 630 dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len)); 631 if (dso__symbol_names(dso)) { 632 dso__set_symbol_names_len(dso, len); 633 dso__set_sorted_by_name(dso); 634 } 635 } 636 mutex_unlock(dso__lock(dso)); 637 } 638 639 /* 640 * While we find nice hex chars, build a long_val. 641 * Return number of chars processed. 642 */ 643 static int hex2u64(const char *ptr, u64 *long_val) 644 { 645 char *p; 646 647 *long_val = strtoull(ptr, &p, 16); 648 649 return p - ptr; 650 } 651 652 653 int modules__parse(const char *filename, void *arg, 654 int (*process_module)(void *arg, const char *name, 655 u64 start, u64 size)) 656 { 657 char *line = NULL; 658 size_t n; 659 FILE *file; 660 int err = 0; 661 662 file = fopen(filename, "r"); 663 if (file == NULL) 664 return -1; 665 666 while (1) { 667 char name[PATH_MAX]; 668 u64 start, size; 669 char *sep, *endptr; 670 ssize_t line_len; 671 672 line_len = getline(&line, &n, file); 673 if (line_len < 0) { 674 if (feof(file)) 675 break; 676 err = -1; 677 goto out; 678 } 679 680 if (!line) { 681 err = -1; 682 goto out; 683 } 684 685 line[--line_len] = '\0'; /* \n */ 686 687 sep = strrchr(line, 'x'); 688 if (sep == NULL) 689 continue; 690 691 hex2u64(sep + 1, &start); 692 693 sep = strchr(line, ' '); 694 if (sep == NULL) 695 continue; 696 697 *sep = '\0'; 698 699 scnprintf(name, sizeof(name), "[%s]", line); 700 701 size = strtoul(sep + 1, &endptr, 0); 702 if (*endptr != ' ' && *endptr != '\t') 703 continue; 704 705 err = process_module(arg, name, start, size); 706 if (err) 707 break; 708 } 709 out: 710 free(line); 711 fclose(file); 712 return err; 713 } 714 715 /* 716 * These are symbols in the kernel image, so make sure that 717 * sym is from a kernel DSO. 718 */ 719 static bool symbol__is_idle(const char *name) 720 { 721 const char * const idle_symbols[] = { 722 "acpi_idle_do_entry", 723 "acpi_processor_ffh_cstate_enter", 724 "arch_cpu_idle", 725 "cpu_idle", 726 "cpu_startup_entry", 727 "idle_cpu", 728 "intel_idle", 729 "intel_idle_ibrs", 730 "default_idle", 731 "native_safe_halt", 732 "enter_idle", 733 "exit_idle", 734 "mwait_idle", 735 "mwait_idle_with_hints", 736 "mwait_idle_with_hints.constprop.0", 737 "poll_idle", 738 "ppc64_runlatch_off", 739 "pseries_dedicated_idle_sleep", 740 "psw_idle", 741 "psw_idle_exit", 742 NULL 743 }; 744 int i; 745 static struct strlist *idle_symbols_list; 746 747 if (idle_symbols_list) 748 return strlist__has_entry(idle_symbols_list, name); 749 750 idle_symbols_list = strlist__new(NULL, NULL); 751 752 for (i = 0; idle_symbols[i]; i++) 753 strlist__add(idle_symbols_list, idle_symbols[i]); 754 755 return strlist__has_entry(idle_symbols_list, name); 756 } 757 758 static int map__process_kallsym_symbol(void *arg, const char *name, 759 char type, u64 start) 760 { 761 struct symbol *sym; 762 struct dso *dso = arg; 763 struct rb_root_cached *root = dso__symbols(dso); 764 765 if (!symbol_type__filter(type)) 766 return 0; 767 768 /* Ignore local symbols for ARM modules */ 769 if (name[0] == '$') 770 return 0; 771 772 /* 773 * module symbols are not sorted so we add all 774 * symbols, setting length to 0, and rely on 775 * symbols__fixup_end() to fix it up. 776 */ 777 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 778 if (sym == NULL) 779 return -ENOMEM; 780 /* 781 * We will pass the symbols to the filter later, in 782 * map__split_kallsyms, when we have split the maps per module 783 */ 784 __symbols__insert(root, sym, !strchr(name, '[')); 785 786 return 0; 787 } 788 789 /* 790 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 791 * so that we can in the next step set the symbol ->end address and then 792 * call kernel_maps__split_kallsyms. 793 */ 794 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 795 { 796 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 797 } 798 799 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 800 { 801 struct symbol *pos; 802 int count = 0; 803 struct rb_root_cached *root = dso__symbols(dso); 804 struct rb_root_cached old_root = *root; 805 struct rb_node *next = rb_first_cached(root); 806 807 if (!kmaps) 808 return -1; 809 810 *root = RB_ROOT_CACHED; 811 812 while (next) { 813 struct map *curr_map; 814 struct dso *curr_map_dso; 815 char *module; 816 817 pos = rb_entry(next, struct symbol, rb_node); 818 next = rb_next(&pos->rb_node); 819 820 rb_erase_cached(&pos->rb_node, &old_root); 821 RB_CLEAR_NODE(&pos->rb_node); 822 module = strchr(pos->name, '\t'); 823 if (module) 824 *module = '\0'; 825 826 curr_map = maps__find(kmaps, pos->start); 827 828 if (!curr_map) { 829 symbol__delete(pos); 830 continue; 831 } 832 curr_map_dso = map__dso(curr_map); 833 pos->start -= map__start(curr_map) - map__pgoff(curr_map); 834 if (pos->end > map__end(curr_map)) 835 pos->end = map__end(curr_map); 836 if (pos->end) 837 pos->end -= map__start(curr_map) - map__pgoff(curr_map); 838 symbols__insert(dso__symbols(curr_map_dso), pos); 839 ++count; 840 map__put(curr_map); 841 } 842 843 /* Symbols have been adjusted */ 844 dso__set_adjust_symbols(dso, true); 845 846 return count; 847 } 848 849 /* 850 * Split the symbols into maps, making sure there are no overlaps, i.e. the 851 * kernel range is broken in several maps, named [kernel].N, as we don't have 852 * the original ELF section names vmlinux have. 853 */ 854 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, 855 struct map *initial_map) 856 { 857 struct machine *machine; 858 struct map *curr_map = map__get(initial_map); 859 struct symbol *pos; 860 int count = 0, moved = 0; 861 struct rb_root_cached *root = dso__symbols(dso); 862 struct rb_node *next = rb_first_cached(root); 863 int kernel_range = 0; 864 bool x86_64; 865 866 if (!kmaps) 867 return -1; 868 869 machine = maps__machine(kmaps); 870 871 x86_64 = machine__is(machine, "x86_64"); 872 873 while (next) { 874 char *module; 875 876 pos = rb_entry(next, struct symbol, rb_node); 877 next = rb_next(&pos->rb_node); 878 879 module = strchr(pos->name, '\t'); 880 if (module) { 881 struct dso *curr_map_dso; 882 883 if (!symbol_conf.use_modules) 884 goto discard_symbol; 885 886 *module++ = '\0'; 887 curr_map_dso = map__dso(curr_map); 888 if (strcmp(dso__short_name(curr_map_dso), module)) { 889 if (!RC_CHK_EQUAL(curr_map, initial_map) && 890 dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && 891 machine__is_default_guest(machine)) { 892 /* 893 * We assume all symbols of a module are 894 * continuous in * kallsyms, so curr_map 895 * points to a module and all its 896 * symbols are in its kmap. Mark it as 897 * loaded. 898 */ 899 dso__set_loaded(curr_map_dso); 900 } 901 902 map__zput(curr_map); 903 curr_map = maps__find_by_name(kmaps, module); 904 if (curr_map == NULL) { 905 pr_debug("%s/proc/{kallsyms,modules} " 906 "inconsistency while looking " 907 "for \"%s\" module!\n", 908 machine->root_dir, module); 909 curr_map = map__get(initial_map); 910 goto discard_symbol; 911 } 912 curr_map_dso = map__dso(curr_map); 913 if (dso__loaded(curr_map_dso) && 914 !machine__is_default_guest(machine)) 915 goto discard_symbol; 916 } 917 /* 918 * So that we look just like we get from .ko files, 919 * i.e. not prelinked, relative to initial_map->start. 920 */ 921 pos->start = map__map_ip(curr_map, pos->start); 922 pos->end = map__map_ip(curr_map, pos->end); 923 } else if (x86_64 && is_entry_trampoline(pos->name)) { 924 /* 925 * These symbols are not needed anymore since the 926 * trampoline maps refer to the text section and it's 927 * symbols instead. Avoid having to deal with 928 * relocations, and the assumption that the first symbol 929 * is the start of kernel text, by simply removing the 930 * symbols at this point. 931 */ 932 goto discard_symbol; 933 } else if (!RC_CHK_EQUAL(curr_map, initial_map)) { 934 char dso_name[PATH_MAX]; 935 struct dso *ndso; 936 937 if (delta) { 938 /* Kernel was relocated at boot time */ 939 pos->start -= delta; 940 pos->end -= delta; 941 } 942 943 if (count == 0) { 944 map__zput(curr_map); 945 curr_map = map__get(initial_map); 946 goto add_symbol; 947 } 948 949 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 950 snprintf(dso_name, sizeof(dso_name), 951 "[guest.kernel].%d", 952 kernel_range++); 953 else 954 snprintf(dso_name, sizeof(dso_name), 955 "[kernel].%d", 956 kernel_range++); 957 958 ndso = dso__new(dso_name); 959 map__zput(curr_map); 960 if (ndso == NULL) 961 return -1; 962 963 dso__set_kernel(ndso, dso__kernel(dso)); 964 965 curr_map = map__new2(pos->start, ndso); 966 if (curr_map == NULL) { 967 dso__put(ndso); 968 return -1; 969 } 970 971 map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); 972 if (maps__insert(kmaps, curr_map)) { 973 map__zput(curr_map); 974 dso__put(ndso); 975 return -1; 976 } 977 ++kernel_range; 978 } else if (delta) { 979 /* Kernel was relocated at boot time */ 980 pos->start -= delta; 981 pos->end -= delta; 982 } 983 add_symbol: 984 if (!RC_CHK_EQUAL(curr_map, initial_map)) { 985 struct dso *curr_map_dso = map__dso(curr_map); 986 987 rb_erase_cached(&pos->rb_node, root); 988 symbols__insert(dso__symbols(curr_map_dso), pos); 989 ++moved; 990 } else 991 ++count; 992 993 continue; 994 discard_symbol: 995 rb_erase_cached(&pos->rb_node, root); 996 symbol__delete(pos); 997 } 998 999 if (!RC_CHK_EQUAL(curr_map, initial_map) && 1000 dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST && 1001 machine__is_default_guest(maps__machine(kmaps))) { 1002 dso__set_loaded(map__dso(curr_map)); 1003 } 1004 map__put(curr_map); 1005 return count + moved; 1006 } 1007 1008 bool symbol__restricted_filename(const char *filename, 1009 const char *restricted_filename) 1010 { 1011 bool restricted = false; 1012 1013 if (symbol_conf.kptr_restrict) { 1014 char *r = realpath(filename, NULL); 1015 1016 if (r != NULL) { 1017 restricted = strcmp(r, restricted_filename) == 0; 1018 free(r); 1019 return restricted; 1020 } 1021 } 1022 1023 return restricted; 1024 } 1025 1026 struct module_info { 1027 struct rb_node rb_node; 1028 char *name; 1029 u64 start; 1030 }; 1031 1032 static void add_module(struct module_info *mi, struct rb_root *modules) 1033 { 1034 struct rb_node **p = &modules->rb_node; 1035 struct rb_node *parent = NULL; 1036 struct module_info *m; 1037 1038 while (*p != NULL) { 1039 parent = *p; 1040 m = rb_entry(parent, struct module_info, rb_node); 1041 if (strcmp(mi->name, m->name) < 0) 1042 p = &(*p)->rb_left; 1043 else 1044 p = &(*p)->rb_right; 1045 } 1046 rb_link_node(&mi->rb_node, parent, p); 1047 rb_insert_color(&mi->rb_node, modules); 1048 } 1049 1050 static void delete_modules(struct rb_root *modules) 1051 { 1052 struct module_info *mi; 1053 struct rb_node *next = rb_first(modules); 1054 1055 while (next) { 1056 mi = rb_entry(next, struct module_info, rb_node); 1057 next = rb_next(&mi->rb_node); 1058 rb_erase(&mi->rb_node, modules); 1059 zfree(&mi->name); 1060 free(mi); 1061 } 1062 } 1063 1064 static struct module_info *find_module(const char *name, 1065 struct rb_root *modules) 1066 { 1067 struct rb_node *n = modules->rb_node; 1068 1069 while (n) { 1070 struct module_info *m; 1071 int cmp; 1072 1073 m = rb_entry(n, struct module_info, rb_node); 1074 cmp = strcmp(name, m->name); 1075 if (cmp < 0) 1076 n = n->rb_left; 1077 else if (cmp > 0) 1078 n = n->rb_right; 1079 else 1080 return m; 1081 } 1082 1083 return NULL; 1084 } 1085 1086 static int __read_proc_modules(void *arg, const char *name, u64 start, 1087 u64 size __maybe_unused) 1088 { 1089 struct rb_root *modules = arg; 1090 struct module_info *mi; 1091 1092 mi = zalloc(sizeof(struct module_info)); 1093 if (!mi) 1094 return -ENOMEM; 1095 1096 mi->name = strdup(name); 1097 mi->start = start; 1098 1099 if (!mi->name) { 1100 free(mi); 1101 return -ENOMEM; 1102 } 1103 1104 add_module(mi, modules); 1105 1106 return 0; 1107 } 1108 1109 static int read_proc_modules(const char *filename, struct rb_root *modules) 1110 { 1111 if (symbol__restricted_filename(filename, "/proc/modules")) 1112 return -1; 1113 1114 if (modules__parse(filename, modules, __read_proc_modules)) { 1115 delete_modules(modules); 1116 return -1; 1117 } 1118 1119 return 0; 1120 } 1121 1122 int compare_proc_modules(const char *from, const char *to) 1123 { 1124 struct rb_root from_modules = RB_ROOT; 1125 struct rb_root to_modules = RB_ROOT; 1126 struct rb_node *from_node, *to_node; 1127 struct module_info *from_m, *to_m; 1128 int ret = -1; 1129 1130 if (read_proc_modules(from, &from_modules)) 1131 return -1; 1132 1133 if (read_proc_modules(to, &to_modules)) 1134 goto out_delete_from; 1135 1136 from_node = rb_first(&from_modules); 1137 to_node = rb_first(&to_modules); 1138 while (from_node) { 1139 if (!to_node) 1140 break; 1141 1142 from_m = rb_entry(from_node, struct module_info, rb_node); 1143 to_m = rb_entry(to_node, struct module_info, rb_node); 1144 1145 if (from_m->start != to_m->start || 1146 strcmp(from_m->name, to_m->name)) 1147 break; 1148 1149 from_node = rb_next(from_node); 1150 to_node = rb_next(to_node); 1151 } 1152 1153 if (!from_node && !to_node) 1154 ret = 0; 1155 1156 delete_modules(&to_modules); 1157 out_delete_from: 1158 delete_modules(&from_modules); 1159 1160 return ret; 1161 } 1162 1163 static int do_validate_kcore_modules_cb(struct map *old_map, void *data) 1164 { 1165 struct rb_root *modules = data; 1166 struct module_info *mi; 1167 struct dso *dso; 1168 1169 if (!__map__is_kmodule(old_map)) 1170 return 0; 1171 1172 dso = map__dso(old_map); 1173 /* Module must be in memory at the same address */ 1174 mi = find_module(dso__short_name(dso), modules); 1175 if (!mi || mi->start != map__start(old_map)) 1176 return -EINVAL; 1177 1178 return 0; 1179 } 1180 1181 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) 1182 { 1183 struct rb_root modules = RB_ROOT; 1184 int err; 1185 1186 err = read_proc_modules(filename, &modules); 1187 if (err) 1188 return err; 1189 1190 err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules); 1191 1192 delete_modules(&modules); 1193 return err; 1194 } 1195 1196 /* 1197 * If kallsyms is referenced by name then we look for filename in the same 1198 * directory. 1199 */ 1200 static bool filename_from_kallsyms_filename(char *filename, 1201 const char *base_name, 1202 const char *kallsyms_filename) 1203 { 1204 char *name; 1205 1206 strcpy(filename, kallsyms_filename); 1207 name = strrchr(filename, '/'); 1208 if (!name) 1209 return false; 1210 1211 name += 1; 1212 1213 if (!strcmp(name, "kallsyms")) { 1214 strcpy(name, base_name); 1215 return true; 1216 } 1217 1218 return false; 1219 } 1220 1221 static int validate_kcore_modules(const char *kallsyms_filename, 1222 struct map *map) 1223 { 1224 struct maps *kmaps = map__kmaps(map); 1225 char modules_filename[PATH_MAX]; 1226 1227 if (!kmaps) 1228 return -EINVAL; 1229 1230 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1231 kallsyms_filename)) 1232 return -EINVAL; 1233 1234 if (do_validate_kcore_modules(modules_filename, kmaps)) 1235 return -EINVAL; 1236 1237 return 0; 1238 } 1239 1240 static int validate_kcore_addresses(const char *kallsyms_filename, 1241 struct map *map) 1242 { 1243 struct kmap *kmap = map__kmap(map); 1244 1245 if (!kmap) 1246 return -EINVAL; 1247 1248 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1249 u64 start; 1250 1251 if (kallsyms__get_function_start(kallsyms_filename, 1252 kmap->ref_reloc_sym->name, &start)) 1253 return -ENOENT; 1254 if (start != kmap->ref_reloc_sym->addr) 1255 return -EINVAL; 1256 } 1257 1258 return validate_kcore_modules(kallsyms_filename, map); 1259 } 1260 1261 struct kcore_mapfn_data { 1262 struct dso *dso; 1263 struct list_head maps; 1264 }; 1265 1266 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1267 { 1268 struct kcore_mapfn_data *md = data; 1269 struct map_list_node *list_node = map_list_node__new(); 1270 1271 if (!list_node) 1272 return -ENOMEM; 1273 1274 list_node->map = map__new2(start, md->dso); 1275 if (!list_node->map) { 1276 free(list_node); 1277 return -ENOMEM; 1278 } 1279 1280 map__set_end(list_node->map, map__start(list_node->map) + len); 1281 map__set_pgoff(list_node->map, pgoff); 1282 1283 list_add(&list_node->node, &md->maps); 1284 1285 return 0; 1286 } 1287 1288 static bool remove_old_maps(struct map *map, void *data) 1289 { 1290 const struct map *map_to_save = data; 1291 1292 /* 1293 * We need to preserve eBPF maps even if they are covered by kcore, 1294 * because we need to access eBPF dso for source data. 1295 */ 1296 return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map); 1297 } 1298 1299 static int dso__load_kcore(struct dso *dso, struct map *map, 1300 const char *kallsyms_filename) 1301 { 1302 struct maps *kmaps = map__kmaps(map); 1303 struct kcore_mapfn_data md; 1304 struct map *map_ref, *replacement_map = NULL; 1305 struct machine *machine; 1306 bool is_64_bit; 1307 int err, fd; 1308 char kcore_filename[PATH_MAX]; 1309 u64 stext; 1310 1311 if (!kmaps) 1312 return -EINVAL; 1313 1314 machine = maps__machine(kmaps); 1315 1316 /* This function requires that the map is the kernel map */ 1317 if (!__map__is_kernel(map)) 1318 return -EINVAL; 1319 1320 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1321 kallsyms_filename)) 1322 return -EINVAL; 1323 1324 /* Modules and kernel must be present at their original addresses */ 1325 if (validate_kcore_addresses(kallsyms_filename, map)) 1326 return -EINVAL; 1327 1328 md.dso = dso; 1329 INIT_LIST_HEAD(&md.maps); 1330 1331 fd = open(kcore_filename, O_RDONLY); 1332 if (fd < 0) { 1333 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1334 kcore_filename); 1335 return -EINVAL; 1336 } 1337 1338 /* Read new maps into temporary lists */ 1339 err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md, 1340 &is_64_bit); 1341 if (err) 1342 goto out_err; 1343 dso__set_is_64_bit(dso, is_64_bit); 1344 1345 if (list_empty(&md.maps)) { 1346 err = -EINVAL; 1347 goto out_err; 1348 } 1349 1350 /* Remove old maps */ 1351 maps__remove_maps(kmaps, remove_old_maps, map); 1352 machine->trampolines_mapped = false; 1353 1354 /* Find the kernel map using the '_stext' symbol */ 1355 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1356 u64 replacement_size = 0; 1357 struct map_list_node *new_node; 1358 1359 list_for_each_entry(new_node, &md.maps, node) { 1360 struct map *new_map = new_node->map; 1361 u64 new_size = map__size(new_map); 1362 1363 if (!(stext >= map__start(new_map) && stext < map__end(new_map))) 1364 continue; 1365 1366 /* 1367 * On some architectures, ARM64 for example, the kernel 1368 * text can get allocated inside of the vmalloc segment. 1369 * Select the smallest matching segment, in case stext 1370 * falls within more than one in the list. 1371 */ 1372 if (!replacement_map || new_size < replacement_size) { 1373 replacement_map = new_map; 1374 replacement_size = new_size; 1375 } 1376 } 1377 } 1378 1379 if (!replacement_map) 1380 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; 1381 1382 /* 1383 * Update addresses of vmlinux map. Re-insert it to ensure maps are 1384 * correctly ordered. Do this before using maps__merge_in() for the 1385 * remaining maps so vmlinux gets split if necessary. 1386 */ 1387 map_ref = map__get(map); 1388 maps__remove(kmaps, map_ref); 1389 1390 map__set_start(map_ref, map__start(replacement_map)); 1391 map__set_end(map_ref, map__end(replacement_map)); 1392 map__set_pgoff(map_ref, map__pgoff(replacement_map)); 1393 map__set_mapping_type(map_ref, map__mapping_type(replacement_map)); 1394 1395 err = maps__insert(kmaps, map_ref); 1396 map__put(map_ref); 1397 if (err) 1398 goto out_err; 1399 1400 /* Add new maps */ 1401 while (!list_empty(&md.maps)) { 1402 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); 1403 struct map *new_map = new_node->map; 1404 1405 list_del_init(&new_node->node); 1406 1407 /* skip if replacement_map, already inserted above */ 1408 if (!RC_CHK_EQUAL(new_map, replacement_map)) { 1409 /* 1410 * Merge kcore map into existing maps, 1411 * and ensure that current maps (eBPF) 1412 * stay intact. 1413 */ 1414 if (maps__merge_in(kmaps, new_map)) { 1415 err = -EINVAL; 1416 goto out_err; 1417 } 1418 } 1419 free(new_node); 1420 } 1421 1422 if (machine__is(machine, "x86_64")) { 1423 u64 addr; 1424 1425 /* 1426 * If one of the corresponding symbols is there, assume the 1427 * entry trampoline maps are too. 1428 */ 1429 if (!kallsyms__get_function_start(kallsyms_filename, 1430 ENTRY_TRAMPOLINE_NAME, 1431 &addr)) 1432 machine->trampolines_mapped = true; 1433 } 1434 1435 /* 1436 * Set the data type and long name so that kcore can be read via 1437 * dso__data_read_addr(). 1438 */ 1439 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 1440 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE); 1441 else 1442 dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE); 1443 dso__set_long_name(dso, strdup(kcore_filename), true); 1444 1445 close(fd); 1446 1447 if (map__prot(map) & PROT_EXEC) 1448 pr_debug("Using %s for kernel object code\n", kcore_filename); 1449 else 1450 pr_debug("Using %s for kernel data\n", kcore_filename); 1451 1452 return 0; 1453 1454 out_err: 1455 while (!list_empty(&md.maps)) { 1456 struct map_list_node *list_node; 1457 1458 list_node = list_entry(md.maps.next, struct map_list_node, node); 1459 list_del_init(&list_node->node); 1460 map__zput(list_node->map); 1461 free(list_node); 1462 } 1463 close(fd); 1464 return err; 1465 } 1466 1467 /* 1468 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1469 * delta based on the relocation reference symbol. 1470 */ 1471 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1472 { 1473 u64 addr; 1474 1475 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1476 return 0; 1477 1478 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1479 return -1; 1480 1481 *delta = addr - kmap->ref_reloc_sym->addr; 1482 return 0; 1483 } 1484 1485 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1486 struct map *map, bool no_kcore) 1487 { 1488 struct kmap *kmap = map__kmap(map); 1489 u64 delta = 0; 1490 1491 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1492 return -1; 1493 1494 if (!kmap || !kmap->kmaps) 1495 return -1; 1496 1497 if (dso__load_all_kallsyms(dso, filename) < 0) 1498 return -1; 1499 1500 if (kallsyms__delta(kmap, filename, &delta)) 1501 return -1; 1502 1503 symbols__fixup_end(dso__symbols(dso), true); 1504 symbols__fixup_duplicate(dso__symbols(dso)); 1505 1506 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 1507 dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); 1508 else 1509 dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS); 1510 1511 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1512 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); 1513 else 1514 return maps__split_kallsyms(kmap->kmaps, dso, delta, map); 1515 } 1516 1517 int dso__load_kallsyms(struct dso *dso, const char *filename, 1518 struct map *map) 1519 { 1520 return __dso__load_kallsyms(dso, filename, map, false); 1521 } 1522 1523 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1524 { 1525 char *line = NULL; 1526 size_t n; 1527 FILE *file; 1528 int nr_syms = 0; 1529 1530 file = fopen(map_path, "r"); 1531 if (file == NULL) 1532 goto out_failure; 1533 1534 while (!feof(file)) { 1535 u64 start, size; 1536 struct symbol *sym; 1537 int line_len, len; 1538 1539 line_len = getline(&line, &n, file); 1540 if (line_len < 0) 1541 break; 1542 1543 if (!line) 1544 goto out_failure; 1545 1546 line[--line_len] = '\0'; /* \n */ 1547 1548 len = hex2u64(line, &start); 1549 1550 len++; 1551 if (len + 2 >= line_len) 1552 continue; 1553 1554 len += hex2u64(line + len, &size); 1555 1556 len++; 1557 if (len + 2 >= line_len) 1558 continue; 1559 1560 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1561 1562 if (sym == NULL) 1563 goto out_delete_line; 1564 1565 symbols__insert(dso__symbols(dso), sym); 1566 nr_syms++; 1567 } 1568 1569 free(line); 1570 fclose(file); 1571 1572 return nr_syms; 1573 1574 out_delete_line: 1575 free(line); 1576 out_failure: 1577 return -1; 1578 } 1579 1580 #ifdef HAVE_LIBBFD_SUPPORT 1581 #define PACKAGE 'perf' 1582 #include <bfd.h> 1583 1584 static int bfd_symbols__cmpvalue(const void *a, const void *b) 1585 { 1586 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b; 1587 1588 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs)) 1589 return bfd_asymbol_value(as) - bfd_asymbol_value(bs); 1590 1591 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0]; 1592 } 1593 1594 static int bfd2elf_binding(asymbol *symbol) 1595 { 1596 if (symbol->flags & BSF_WEAK) 1597 return STB_WEAK; 1598 if (symbol->flags & BSF_GLOBAL) 1599 return STB_GLOBAL; 1600 if (symbol->flags & BSF_LOCAL) 1601 return STB_LOCAL; 1602 return -1; 1603 } 1604 1605 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) 1606 { 1607 int err = -1; 1608 long symbols_size, symbols_count, i; 1609 asection *section; 1610 asymbol **symbols, *sym; 1611 struct symbol *symbol; 1612 bfd *abfd; 1613 u64 start, len; 1614 1615 abfd = bfd_openr(debugfile, NULL); 1616 if (!abfd) 1617 return -1; 1618 1619 if (!bfd_check_format(abfd, bfd_object)) { 1620 pr_debug2("%s: cannot read %s bfd file.\n", __func__, 1621 dso__long_name(dso)); 1622 goto out_close; 1623 } 1624 1625 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour) 1626 goto out_close; 1627 1628 symbols_size = bfd_get_symtab_upper_bound(abfd); 1629 if (symbols_size == 0) { 1630 bfd_close(abfd); 1631 return 0; 1632 } 1633 1634 if (symbols_size < 0) 1635 goto out_close; 1636 1637 symbols = malloc(symbols_size); 1638 if (!symbols) 1639 goto out_close; 1640 1641 symbols_count = bfd_canonicalize_symtab(abfd, symbols); 1642 if (symbols_count < 0) 1643 goto out_free; 1644 1645 section = bfd_get_section_by_name(abfd, ".text"); 1646 if (section) { 1647 for (i = 0; i < symbols_count; ++i) { 1648 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") || 1649 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__")) 1650 break; 1651 } 1652 if (i < symbols_count) { 1653 /* PE symbols can only have 4 bytes, so use .text high bits */ 1654 u64 text_offset = (section->vma - (u32)section->vma) 1655 + (u32)bfd_asymbol_value(symbols[i]); 1656 dso__set_text_offset(dso, text_offset); 1657 dso__set_text_end(dso, (section->vma - text_offset) + section->size); 1658 } else { 1659 dso__set_text_offset(dso, section->vma - section->filepos); 1660 dso__set_text_end(dso, section->filepos + section->size); 1661 } 1662 } 1663 1664 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue); 1665 1666 #ifdef bfd_get_section 1667 #define bfd_asymbol_section bfd_get_section 1668 #endif 1669 for (i = 0; i < symbols_count; ++i) { 1670 sym = symbols[i]; 1671 section = bfd_asymbol_section(sym); 1672 if (bfd2elf_binding(sym) < 0) 1673 continue; 1674 1675 while (i + 1 < symbols_count && 1676 bfd_asymbol_section(symbols[i + 1]) == section && 1677 bfd2elf_binding(symbols[i + 1]) < 0) 1678 i++; 1679 1680 if (i + 1 < symbols_count && 1681 bfd_asymbol_section(symbols[i + 1]) == section) 1682 len = symbols[i + 1]->value - sym->value; 1683 else 1684 len = section->size - sym->value; 1685 1686 start = bfd_asymbol_value(sym) - dso__text_offset(dso); 1687 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC, 1688 bfd_asymbol_name(sym)); 1689 if (!symbol) 1690 goto out_free; 1691 1692 symbols__insert(dso__symbols(dso), symbol); 1693 } 1694 #ifdef bfd_get_section 1695 #undef bfd_asymbol_section 1696 #endif 1697 1698 symbols__fixup_end(dso__symbols(dso), false); 1699 symbols__fixup_duplicate(dso__symbols(dso)); 1700 dso__set_adjust_symbols(dso, true); 1701 1702 err = 0; 1703 out_free: 1704 free(symbols); 1705 out_close: 1706 bfd_close(abfd); 1707 return err; 1708 } 1709 #endif 1710 1711 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1712 enum dso_binary_type type) 1713 { 1714 switch (type) { 1715 case DSO_BINARY_TYPE__JAVA_JIT: 1716 case DSO_BINARY_TYPE__DEBUGLINK: 1717 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1718 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1719 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1720 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 1721 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1722 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1723 case DSO_BINARY_TYPE__GNU_DEBUGDATA: 1724 return !kmod && dso__kernel(dso) == DSO_SPACE__USER; 1725 1726 case DSO_BINARY_TYPE__KALLSYMS: 1727 case DSO_BINARY_TYPE__VMLINUX: 1728 case DSO_BINARY_TYPE__KCORE: 1729 return dso__kernel(dso) == DSO_SPACE__KERNEL; 1730 1731 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1732 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1733 case DSO_BINARY_TYPE__GUEST_KCORE: 1734 return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST; 1735 1736 case DSO_BINARY_TYPE__GUEST_KMODULE: 1737 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1738 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1739 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1740 /* 1741 * kernel modules know their symtab type - it's set when 1742 * creating a module dso in machine__addnew_module_map(). 1743 */ 1744 return kmod && dso__symtab_type(dso) == type; 1745 1746 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1747 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1748 return true; 1749 1750 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1751 case DSO_BINARY_TYPE__BPF_IMAGE: 1752 case DSO_BINARY_TYPE__OOL: 1753 case DSO_BINARY_TYPE__NOT_FOUND: 1754 default: 1755 return false; 1756 } 1757 } 1758 1759 /* Checks for the existence of the perf-<pid>.map file in two different 1760 * locations. First, if the process is a separate mount namespace, check in 1761 * that namespace using the pid of the innermost pid namespace. If's not in a 1762 * namespace, or the file can't be found there, try in the mount namespace of 1763 * the tracing process using our view of its pid. 1764 */ 1765 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1766 struct nsinfo **nsip) 1767 { 1768 struct nscookie nsc; 1769 struct nsinfo *nsi; 1770 struct nsinfo *nnsi; 1771 int rc = -1; 1772 1773 nsi = *nsip; 1774 1775 if (nsinfo__need_setns(nsi)) { 1776 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi)); 1777 nsinfo__mountns_enter(nsi, &nsc); 1778 rc = access(filebuf, R_OK); 1779 nsinfo__mountns_exit(&nsc); 1780 if (rc == 0) 1781 return rc; 1782 } 1783 1784 nnsi = nsinfo__copy(nsi); 1785 if (nnsi) { 1786 nsinfo__put(nsi); 1787 1788 nsinfo__clear_need_setns(nnsi); 1789 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi)); 1790 *nsip = nnsi; 1791 rc = 0; 1792 } 1793 1794 return rc; 1795 } 1796 1797 int dso__load(struct dso *dso, struct map *map) 1798 { 1799 char *name; 1800 int ret = -1; 1801 u_int i; 1802 struct machine *machine = NULL; 1803 char *root_dir = (char *) ""; 1804 int ss_pos = 0; 1805 struct symsrc ss_[2]; 1806 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1807 bool kmod; 1808 bool perfmap; 1809 struct build_id bid; 1810 struct nscookie nsc; 1811 char newmapname[PATH_MAX]; 1812 const char *map_path = dso__long_name(dso); 1813 1814 mutex_lock(dso__lock(dso)); 1815 perfmap = is_perf_pid_map_name(map_path); 1816 1817 if (perfmap) { 1818 if (dso__nsinfo(dso) && 1819 (dso__find_perf_map(newmapname, sizeof(newmapname), 1820 dso__nsinfo_ptr(dso)) == 0)) { 1821 map_path = newmapname; 1822 } 1823 } 1824 1825 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); 1826 1827 /* check again under the dso->lock */ 1828 if (dso__loaded(dso)) { 1829 ret = 1; 1830 goto out; 1831 } 1832 1833 kmod = dso__is_kmod(dso); 1834 1835 if (dso__kernel(dso) && !kmod) { 1836 if (dso__kernel(dso) == DSO_SPACE__KERNEL) 1837 ret = dso__load_kernel_sym(dso, map); 1838 else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 1839 ret = dso__load_guest_kernel_sym(dso, map); 1840 1841 machine = maps__machine(map__kmaps(map)); 1842 if (machine__is(machine, "x86_64")) 1843 machine__map_x86_64_entry_trampolines(machine, dso); 1844 goto out; 1845 } 1846 1847 dso__set_adjust_symbols(dso, false); 1848 1849 if (perfmap) { 1850 ret = dso__load_perf_map(map_path, dso); 1851 dso__set_symtab_type(dso, ret > 0 1852 ? DSO_BINARY_TYPE__JAVA_JIT 1853 : DSO_BINARY_TYPE__NOT_FOUND); 1854 goto out; 1855 } 1856 1857 if (machine) 1858 root_dir = machine->root_dir; 1859 1860 name = malloc(PATH_MAX); 1861 if (!name) 1862 goto out; 1863 1864 /* 1865 * Read the build id if possible. This is required for 1866 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1867 */ 1868 if (!dso__has_build_id(dso) && 1869 is_regular_file(dso__long_name(dso))) { 1870 __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso)); 1871 if (filename__read_build_id(name, &bid) > 0) 1872 dso__set_build_id(dso, &bid); 1873 } 1874 1875 /* 1876 * Iterate over candidate debug images. 1877 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1878 * and/or opd section) for processing. 1879 */ 1880 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1881 struct symsrc *ss = &ss_[ss_pos]; 1882 bool next_slot = false; 1883 bool is_reg; 1884 bool nsexit; 1885 int bfdrc = -1; 1886 int sirc = -1; 1887 1888 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1889 1890 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1891 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1892 1893 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1894 continue; 1895 1896 if (dso__read_binary_type_filename(dso, symtab_type, 1897 root_dir, name, PATH_MAX)) 1898 continue; 1899 1900 if (nsexit) 1901 nsinfo__mountns_exit(&nsc); 1902 1903 is_reg = is_regular_file(name); 1904 if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) { 1905 char *new_name = dso__filename_with_chroot(dso, name); 1906 if (new_name) { 1907 is_reg = is_regular_file(new_name); 1908 strlcpy(name, new_name, PATH_MAX); 1909 free(new_name); 1910 } 1911 } 1912 1913 #ifdef HAVE_LIBBFD_SUPPORT 1914 if (is_reg) 1915 bfdrc = dso__load_bfd_symbols(dso, name); 1916 #endif 1917 if (is_reg && bfdrc < 0) 1918 sirc = symsrc__init(ss, dso, name, symtab_type); 1919 1920 if (nsexit) 1921 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); 1922 1923 if (bfdrc == 0) { 1924 ret = 0; 1925 break; 1926 } 1927 1928 if (!is_reg || sirc < 0) 1929 continue; 1930 1931 if (!syms_ss && symsrc__has_symtab(ss)) { 1932 syms_ss = ss; 1933 next_slot = true; 1934 if (!dso__symsrc_filename(dso)) 1935 dso__set_symsrc_filename(dso, strdup(name)); 1936 } 1937 1938 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1939 runtime_ss = ss; 1940 next_slot = true; 1941 } 1942 1943 if (next_slot) { 1944 ss_pos++; 1945 1946 if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) 1947 dso__set_binary_type(dso, symtab_type); 1948 1949 if (syms_ss && runtime_ss) 1950 break; 1951 } else { 1952 symsrc__destroy(ss); 1953 } 1954 1955 } 1956 1957 if (!runtime_ss && !syms_ss) 1958 goto out_free; 1959 1960 if (runtime_ss && !syms_ss) { 1961 syms_ss = runtime_ss; 1962 } 1963 1964 /* We'll have to hope for the best */ 1965 if (!runtime_ss && syms_ss) 1966 runtime_ss = syms_ss; 1967 1968 if (syms_ss) 1969 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1970 else 1971 ret = -1; 1972 1973 if (ret > 0) { 1974 int nr_plt; 1975 1976 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 1977 if (nr_plt > 0) 1978 ret += nr_plt; 1979 } 1980 1981 for (; ss_pos > 0; ss_pos--) 1982 symsrc__destroy(&ss_[ss_pos - 1]); 1983 out_free: 1984 free(name); 1985 if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL) 1986 ret = 0; 1987 out: 1988 dso__set_loaded(dso); 1989 mutex_unlock(dso__lock(dso)); 1990 nsinfo__mountns_exit(&nsc); 1991 1992 return ret; 1993 } 1994 1995 /* 1996 * Always takes ownership of vmlinux when vmlinux_allocated == true, even if 1997 * it returns an error. 1998 */ 1999 int dso__load_vmlinux(struct dso *dso, struct map *map, 2000 const char *vmlinux, bool vmlinux_allocated) 2001 { 2002 int err = -1; 2003 struct symsrc ss; 2004 char symfs_vmlinux[PATH_MAX]; 2005 enum dso_binary_type symtab_type; 2006 2007 if (vmlinux[0] == '/') 2008 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 2009 else 2010 symbol__join_symfs(symfs_vmlinux, vmlinux); 2011 2012 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 2013 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 2014 else 2015 symtab_type = DSO_BINARY_TYPE__VMLINUX; 2016 2017 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) { 2018 if (vmlinux_allocated) 2019 free((char *) vmlinux); 2020 return -1; 2021 } 2022 2023 /* 2024 * dso__load_sym() may copy 'dso' which will result in the copies having 2025 * an incorrect long name unless we set it here first. 2026 */ 2027 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 2028 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST) 2029 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX); 2030 else 2031 dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX); 2032 2033 err = dso__load_sym(dso, map, &ss, &ss, 0); 2034 symsrc__destroy(&ss); 2035 2036 if (err > 0) { 2037 dso__set_loaded(dso); 2038 pr_debug("Using %s for symbols\n", symfs_vmlinux); 2039 } 2040 2041 return err; 2042 } 2043 2044 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 2045 { 2046 int i, err = 0; 2047 char *filename = NULL; 2048 2049 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 2050 vmlinux_path__nr_entries + 1); 2051 2052 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 2053 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 2054 if (err > 0) 2055 goto out; 2056 } 2057 2058 if (!symbol_conf.ignore_vmlinux_buildid) 2059 filename = dso__build_id_filename(dso, NULL, 0, false); 2060 if (filename != NULL) { 2061 err = dso__load_vmlinux(dso, map, filename, true); 2062 if (err > 0) 2063 goto out; 2064 } 2065 out: 2066 return err; 2067 } 2068 2069 static bool visible_dir_filter(const char *name, struct dirent *d) 2070 { 2071 if (d->d_type != DT_DIR) 2072 return false; 2073 return lsdir_no_dot_filter(name, d); 2074 } 2075 2076 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 2077 { 2078 char kallsyms_filename[PATH_MAX]; 2079 int ret = -1; 2080 struct strlist *dirs; 2081 struct str_node *nd; 2082 2083 dirs = lsdir(dir, visible_dir_filter); 2084 if (!dirs) 2085 return -1; 2086 2087 strlist__for_each_entry(nd, dirs) { 2088 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 2089 "%s/%s/kallsyms", dir, nd->s); 2090 if (!validate_kcore_addresses(kallsyms_filename, map)) { 2091 strlcpy(dir, kallsyms_filename, dir_sz); 2092 ret = 0; 2093 break; 2094 } 2095 } 2096 2097 strlist__delete(dirs); 2098 2099 return ret; 2100 } 2101 2102 /* 2103 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 2104 * since access(R_OK) only checks with real UID/GID but open() use effective 2105 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 2106 */ 2107 static bool filename__readable(const char *file) 2108 { 2109 int fd = open(file, O_RDONLY); 2110 if (fd < 0) 2111 return false; 2112 close(fd); 2113 return true; 2114 } 2115 2116 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 2117 { 2118 struct build_id bid; 2119 char sbuild_id[SBUILD_ID_SIZE]; 2120 bool is_host = false; 2121 char path[PATH_MAX]; 2122 2123 if (!dso__has_build_id(dso)) { 2124 /* 2125 * Last resort, if we don't have a build-id and couldn't find 2126 * any vmlinux file, try the running kernel kallsyms table. 2127 */ 2128 goto proc_kallsyms; 2129 } 2130 2131 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0) 2132 is_host = dso__build_id_equal(dso, &bid); 2133 2134 /* Try a fast path for /proc/kallsyms if possible */ 2135 if (is_host) { 2136 /* 2137 * Do not check the build-id cache, unless we know we cannot use 2138 * /proc/kcore or module maps don't match to /proc/kallsyms. 2139 * To check readability of /proc/kcore, do not use access(R_OK) 2140 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 2141 * can't check it. 2142 */ 2143 if (filename__readable("/proc/kcore") && 2144 !validate_kcore_addresses("/proc/kallsyms", map)) 2145 goto proc_kallsyms; 2146 } 2147 2148 build_id__sprintf(dso__bid(dso), sbuild_id); 2149 2150 /* Find kallsyms in build-id cache with kcore */ 2151 scnprintf(path, sizeof(path), "%s/%s/%s", 2152 buildid_dir, DSO__NAME_KCORE, sbuild_id); 2153 2154 if (!find_matching_kcore(map, path, sizeof(path))) 2155 return strdup(path); 2156 2157 /* Use current /proc/kallsyms if possible */ 2158 if (is_host) { 2159 proc_kallsyms: 2160 return strdup("/proc/kallsyms"); 2161 } 2162 2163 /* Finally, find a cache of kallsyms */ 2164 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 2165 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 2166 sbuild_id); 2167 return NULL; 2168 } 2169 2170 return strdup(path); 2171 } 2172 2173 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 2174 { 2175 int err; 2176 const char *kallsyms_filename = NULL; 2177 char *kallsyms_allocated_filename = NULL; 2178 char *filename = NULL; 2179 2180 /* 2181 * Step 1: if the user specified a kallsyms or vmlinux filename, use 2182 * it and only it, reporting errors to the user if it cannot be used. 2183 * 2184 * For instance, try to analyse an ARM perf.data file _without_ a 2185 * build-id, or if the user specifies the wrong path to the right 2186 * vmlinux file, obviously we can't fallback to another vmlinux (a 2187 * x86_86 one, on the machine where analysis is being performed, say), 2188 * or worse, /proc/kallsyms. 2189 * 2190 * If the specified file _has_ a build-id and there is a build-id 2191 * section in the perf.data file, we will still do the expected 2192 * validation in dso__load_vmlinux and will bail out if they don't 2193 * match. 2194 */ 2195 if (symbol_conf.kallsyms_name != NULL) { 2196 kallsyms_filename = symbol_conf.kallsyms_name; 2197 goto do_kallsyms; 2198 } 2199 2200 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2201 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2202 } 2203 2204 /* 2205 * Before checking on common vmlinux locations, check if it's 2206 * stored as standard build id binary (not kallsyms) under 2207 * .debug cache. 2208 */ 2209 if (!symbol_conf.ignore_vmlinux_buildid) 2210 filename = __dso__build_id_filename(dso, NULL, 0, false, false); 2211 if (filename != NULL) { 2212 err = dso__load_vmlinux(dso, map, filename, true); 2213 if (err > 0) 2214 return err; 2215 } 2216 2217 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2218 err = dso__load_vmlinux_path(dso, map); 2219 if (err > 0) 2220 return err; 2221 } 2222 2223 /* do not try local files if a symfs was given */ 2224 if (symbol_conf.symfs[0] != 0) 2225 return -1; 2226 2227 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2228 if (!kallsyms_allocated_filename) 2229 return -1; 2230 2231 kallsyms_filename = kallsyms_allocated_filename; 2232 2233 do_kallsyms: 2234 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2235 if (err > 0) 2236 pr_debug("Using %s for symbols\n", kallsyms_filename); 2237 free(kallsyms_allocated_filename); 2238 2239 if (err > 0 && !dso__is_kcore(dso)) { 2240 dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS); 2241 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2242 map__fixup_start(map); 2243 map__fixup_end(map); 2244 } 2245 2246 return err; 2247 } 2248 2249 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2250 { 2251 int err; 2252 const char *kallsyms_filename; 2253 struct machine *machine = maps__machine(map__kmaps(map)); 2254 char path[PATH_MAX]; 2255 2256 if (machine->kallsyms_filename) { 2257 kallsyms_filename = machine->kallsyms_filename; 2258 } else if (machine__is_default_guest(machine)) { 2259 /* 2260 * if the user specified a vmlinux filename, use it and only 2261 * it, reporting errors to the user if it cannot be used. 2262 * Or use file guest_kallsyms inputted by user on commandline 2263 */ 2264 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2265 err = dso__load_vmlinux(dso, map, 2266 symbol_conf.default_guest_vmlinux_name, 2267 false); 2268 return err; 2269 } 2270 2271 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2272 if (!kallsyms_filename) 2273 return -1; 2274 } else { 2275 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2276 kallsyms_filename = path; 2277 } 2278 2279 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2280 if (err > 0) 2281 pr_debug("Using %s for symbols\n", kallsyms_filename); 2282 if (err > 0 && !dso__is_kcore(dso)) { 2283 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS); 2284 dso__set_long_name(dso, machine->mmap_name, false); 2285 map__fixup_start(map); 2286 map__fixup_end(map); 2287 } 2288 2289 return err; 2290 } 2291 2292 static void vmlinux_path__exit(void) 2293 { 2294 while (--vmlinux_path__nr_entries >= 0) 2295 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2296 vmlinux_path__nr_entries = 0; 2297 2298 zfree(&vmlinux_path); 2299 } 2300 2301 static const char * const vmlinux_paths[] = { 2302 "vmlinux", 2303 "/boot/vmlinux" 2304 }; 2305 2306 static const char * const vmlinux_paths_upd[] = { 2307 "/boot/vmlinux-%s", 2308 "/usr/lib/debug/boot/vmlinux-%s", 2309 "/lib/modules/%s/build/vmlinux", 2310 "/usr/lib/debug/lib/modules/%s/vmlinux", 2311 "/usr/lib/debug/boot/vmlinux-%s.debug" 2312 }; 2313 2314 static int vmlinux_path__add(const char *new_entry) 2315 { 2316 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2317 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2318 return -1; 2319 ++vmlinux_path__nr_entries; 2320 2321 return 0; 2322 } 2323 2324 static int vmlinux_path__init(struct perf_env *env) 2325 { 2326 struct utsname uts; 2327 char bf[PATH_MAX]; 2328 char *kernel_version; 2329 unsigned int i; 2330 2331 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2332 ARRAY_SIZE(vmlinux_paths_upd))); 2333 if (vmlinux_path == NULL) 2334 return -1; 2335 2336 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2337 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2338 goto out_fail; 2339 2340 /* only try kernel version if no symfs was given */ 2341 if (symbol_conf.symfs[0] != 0) 2342 return 0; 2343 2344 if (env) { 2345 kernel_version = env->os_release; 2346 } else { 2347 if (uname(&uts) < 0) 2348 goto out_fail; 2349 2350 kernel_version = uts.release; 2351 } 2352 2353 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2354 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2355 if (vmlinux_path__add(bf) < 0) 2356 goto out_fail; 2357 } 2358 2359 return 0; 2360 2361 out_fail: 2362 vmlinux_path__exit(); 2363 return -1; 2364 } 2365 2366 int setup_list(struct strlist **list, const char *list_str, 2367 const char *list_name) 2368 { 2369 if (list_str == NULL) 2370 return 0; 2371 2372 *list = strlist__new(list_str, NULL); 2373 if (!*list) { 2374 pr_err("problems parsing %s list\n", list_name); 2375 return -1; 2376 } 2377 2378 symbol_conf.has_filter = true; 2379 return 0; 2380 } 2381 2382 int setup_intlist(struct intlist **list, const char *list_str, 2383 const char *list_name) 2384 { 2385 if (list_str == NULL) 2386 return 0; 2387 2388 *list = intlist__new(list_str); 2389 if (!*list) { 2390 pr_err("problems parsing %s list\n", list_name); 2391 return -1; 2392 } 2393 return 0; 2394 } 2395 2396 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list) 2397 { 2398 struct str_node *pos, *tmp; 2399 unsigned long val; 2400 char *sep; 2401 const char *end; 2402 int i = 0, err; 2403 2404 *addr_list = intlist__new(NULL); 2405 if (!*addr_list) 2406 return -1; 2407 2408 strlist__for_each_entry_safe(pos, tmp, sym_list) { 2409 errno = 0; 2410 val = strtoul(pos->s, &sep, 16); 2411 if (errno || (sep == pos->s)) 2412 continue; 2413 2414 if (*sep != '\0') { 2415 end = pos->s + strlen(pos->s) - 1; 2416 while (end >= sep && isspace(*end)) 2417 end--; 2418 2419 if (end >= sep) 2420 continue; 2421 } 2422 2423 err = intlist__add(*addr_list, val); 2424 if (err) 2425 break; 2426 2427 strlist__remove(sym_list, pos); 2428 i++; 2429 } 2430 2431 if (i == 0) { 2432 intlist__delete(*addr_list); 2433 *addr_list = NULL; 2434 } 2435 2436 return 0; 2437 } 2438 2439 static bool symbol__read_kptr_restrict(void) 2440 { 2441 bool value = false; 2442 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2443 bool used_root; 2444 bool cap_syslog = perf_cap__capable(CAP_SYSLOG, &used_root); 2445 2446 if (fp != NULL) { 2447 char line[8]; 2448 2449 if (fgets(line, sizeof(line), fp) != NULL) 2450 value = cap_syslog ? (atoi(line) >= 2) : (atoi(line) != 0); 2451 2452 fclose(fp); 2453 } 2454 2455 /* Per kernel/kallsyms.c: 2456 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG 2457 */ 2458 if (perf_event_paranoid() > 1 && !cap_syslog) 2459 value = true; 2460 2461 return value; 2462 } 2463 2464 int symbol__annotation_init(void) 2465 { 2466 if (symbol_conf.init_annotation) 2467 return 0; 2468 2469 if (symbol_conf.initialized) { 2470 pr_err("Annotation needs to be init before symbol__init()\n"); 2471 return -1; 2472 } 2473 2474 symbol_conf.priv_size += sizeof(struct annotation); 2475 symbol_conf.init_annotation = true; 2476 return 0; 2477 } 2478 2479 static int setup_parallelism_bitmap(void) 2480 { 2481 struct perf_cpu_map *map; 2482 struct perf_cpu cpu; 2483 int i, err = -1; 2484 2485 if (symbol_conf.parallelism_list_str == NULL) 2486 return 0; 2487 2488 map = perf_cpu_map__new(symbol_conf.parallelism_list_str); 2489 if (map == NULL) { 2490 pr_err("failed to parse parallelism filter list\n"); 2491 return -1; 2492 } 2493 2494 bitmap_fill(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1); 2495 perf_cpu_map__for_each_cpu(cpu, i, map) { 2496 if (cpu.cpu <= 0 || cpu.cpu > MAX_NR_CPUS) { 2497 pr_err("Requested parallelism level %d is invalid.\n", cpu.cpu); 2498 goto out_delete_map; 2499 } 2500 __clear_bit(cpu.cpu, symbol_conf.parallelism_filter); 2501 } 2502 2503 err = 0; 2504 out_delete_map: 2505 perf_cpu_map__put(map); 2506 return err; 2507 } 2508 2509 int symbol__init(struct perf_env *env) 2510 { 2511 const char *symfs; 2512 2513 if (symbol_conf.initialized) 2514 return 0; 2515 2516 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2517 2518 symbol__elf_init(); 2519 2520 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2521 return -1; 2522 2523 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2524 pr_err("'.' is the only non valid --field-separator argument\n"); 2525 return -1; 2526 } 2527 2528 if (setup_parallelism_bitmap()) 2529 return -1; 2530 2531 if (setup_list(&symbol_conf.dso_list, 2532 symbol_conf.dso_list_str, "dso") < 0) 2533 return -1; 2534 2535 if (setup_list(&symbol_conf.comm_list, 2536 symbol_conf.comm_list_str, "comm") < 0) 2537 goto out_free_dso_list; 2538 2539 if (setup_intlist(&symbol_conf.pid_list, 2540 symbol_conf.pid_list_str, "pid") < 0) 2541 goto out_free_comm_list; 2542 2543 if (setup_intlist(&symbol_conf.tid_list, 2544 symbol_conf.tid_list_str, "tid") < 0) 2545 goto out_free_pid_list; 2546 2547 if (setup_list(&symbol_conf.sym_list, 2548 symbol_conf.sym_list_str, "symbol") < 0) 2549 goto out_free_tid_list; 2550 2551 if (symbol_conf.sym_list && 2552 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0) 2553 goto out_free_sym_list; 2554 2555 if (setup_list(&symbol_conf.bt_stop_list, 2556 symbol_conf.bt_stop_list_str, "symbol") < 0) 2557 goto out_free_sym_list; 2558 2559 /* 2560 * A path to symbols of "/" is identical to "" 2561 * reset here for simplicity. 2562 */ 2563 symfs = realpath(symbol_conf.symfs, NULL); 2564 if (symfs == NULL) 2565 symfs = symbol_conf.symfs; 2566 if (strcmp(symfs, "/") == 0) 2567 symbol_conf.symfs = ""; 2568 if (symfs != symbol_conf.symfs) 2569 free((void *)symfs); 2570 2571 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2572 2573 symbol_conf.initialized = true; 2574 return 0; 2575 2576 out_free_sym_list: 2577 strlist__delete(symbol_conf.sym_list); 2578 intlist__delete(symbol_conf.addr_list); 2579 out_free_tid_list: 2580 intlist__delete(symbol_conf.tid_list); 2581 out_free_pid_list: 2582 intlist__delete(symbol_conf.pid_list); 2583 out_free_comm_list: 2584 strlist__delete(symbol_conf.comm_list); 2585 out_free_dso_list: 2586 strlist__delete(symbol_conf.dso_list); 2587 return -1; 2588 } 2589 2590 void symbol__exit(void) 2591 { 2592 if (!symbol_conf.initialized) 2593 return; 2594 strlist__delete(symbol_conf.bt_stop_list); 2595 strlist__delete(symbol_conf.sym_list); 2596 strlist__delete(symbol_conf.dso_list); 2597 strlist__delete(symbol_conf.comm_list); 2598 intlist__delete(symbol_conf.tid_list); 2599 intlist__delete(symbol_conf.pid_list); 2600 intlist__delete(symbol_conf.addr_list); 2601 vmlinux_path__exit(); 2602 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2603 symbol_conf.bt_stop_list = NULL; 2604 symbol_conf.initialized = false; 2605 } 2606 2607 int symbol__config_symfs(const struct option *opt __maybe_unused, 2608 const char *dir, int unset __maybe_unused) 2609 { 2610 char *bf = NULL; 2611 int ret; 2612 2613 symbol_conf.symfs = strdup(dir); 2614 if (symbol_conf.symfs == NULL) 2615 return -ENOMEM; 2616 2617 /* skip the locally configured cache if a symfs is given, and 2618 * config buildid dir to symfs/.debug 2619 */ 2620 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2621 if (ret < 0) 2622 return -ENOMEM; 2623 2624 set_buildid_dir(bf); 2625 2626 free(bf); 2627 return 0; 2628 } 2629 2630 /* 2631 * Checks that user supplied symbol kernel files are accessible because 2632 * the default mechanism for accessing elf files fails silently. i.e. if 2633 * debug syms for a build ID aren't found perf carries on normally. When 2634 * they are user supplied we should assume that the user doesn't want to 2635 * silently fail. 2636 */ 2637 int symbol__validate_sym_arguments(void) 2638 { 2639 if (symbol_conf.vmlinux_name && 2640 access(symbol_conf.vmlinux_name, R_OK)) { 2641 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); 2642 return -EINVAL; 2643 } 2644 if (symbol_conf.kallsyms_name && 2645 access(symbol_conf.kallsyms_name, R_OK)) { 2646 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); 2647 return -EINVAL; 2648 } 2649 return 0; 2650 } 2651