1 // SPDX-License-Identifier: GPL-2.0 2 #include <elf.h> 3 #include <errno.h> 4 #include <fcntl.h> 5 #include <stdlib.h> 6 #include <stdio.h> 7 #include <string.h> 8 #include <linux/kernel.h> 9 #include <linux/zalloc.h> 10 #include "dso.h" 11 #include "session.h" 12 #include "thread.h" 13 #include "thread-stack.h" 14 #include "debug.h" 15 #include "namespaces.h" 16 #include "comm.h" 17 #include "map.h" 18 #include "symbol.h" 19 #include "unwind.h" 20 #include "callchain.h" 21 #include "dwarf-regs.h" 22 23 #include <api/fs/fs.h> 24 25 int thread__init_maps(struct thread *thread, struct machine *machine) 26 { 27 pid_t pid = thread__pid(thread); 28 29 if (pid == thread__tid(thread) || pid == -1) { 30 thread__set_maps(thread, maps__new(machine)); 31 } else { 32 struct thread *leader = machine__findnew_thread(machine, pid, pid); 33 34 if (leader) { 35 thread__set_maps(thread, maps__get(thread__maps(leader))); 36 thread__put(leader); 37 } 38 } 39 40 return thread__maps(thread) ? 0 : -1; 41 } 42 43 struct thread *thread__new(pid_t pid, pid_t tid) 44 { 45 RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread)); 46 struct thread *thread; 47 48 if (ADD_RC_CHK(thread, _thread) != NULL) { 49 struct comm *comm; 50 char comm_str[32]; 51 52 thread__set_pid(thread, pid); 53 thread__set_tid(thread, tid); 54 thread__set_ppid(thread, -1); 55 thread__set_cpu(thread, -1); 56 thread__set_guest_cpu(thread, -1); 57 thread__set_e_machine(thread, EM_NONE); 58 thread__set_lbr_stitch_enable(thread, false); 59 INIT_LIST_HEAD(thread__namespaces_list(thread)); 60 INIT_LIST_HEAD(thread__comm_list(thread)); 61 init_rwsem(thread__namespaces_lock(thread)); 62 init_rwsem(thread__comm_lock(thread)); 63 64 snprintf(comm_str, sizeof(comm_str), ":%d", tid); 65 comm = comm__new(comm_str, 0, false); 66 if (!comm) 67 goto err_thread; 68 69 list_add(&comm->list, thread__comm_list(thread)); 70 refcount_set(thread__refcnt(thread), 1); 71 /* Thread holds first ref to nsdata. */ 72 RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid); 73 srccode_state_init(thread__srccode_state(thread)); 74 } 75 76 return thread; 77 78 err_thread: 79 thread__delete(thread); 80 return NULL; 81 } 82 83 static void (*thread__priv_destructor)(void *priv); 84 85 void thread__set_priv_destructor(void (*destructor)(void *priv)) 86 { 87 assert(thread__priv_destructor == NULL); 88 89 thread__priv_destructor = destructor; 90 } 91 92 void thread__delete(struct thread *thread) 93 { 94 struct namespaces *namespaces, *tmp_namespaces; 95 struct comm *comm, *tmp_comm; 96 97 thread_stack__free(thread); 98 99 if (thread__maps(thread)) { 100 maps__put(thread__maps(thread)); 101 thread__set_maps(thread, NULL); 102 } 103 down_write(thread__namespaces_lock(thread)); 104 list_for_each_entry_safe(namespaces, tmp_namespaces, 105 thread__namespaces_list(thread), list) { 106 list_del_init(&namespaces->list); 107 namespaces__free(namespaces); 108 } 109 up_write(thread__namespaces_lock(thread)); 110 111 down_write(thread__comm_lock(thread)); 112 list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) { 113 list_del_init(&comm->list); 114 comm__free(comm); 115 } 116 up_write(thread__comm_lock(thread)); 117 118 nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo); 119 srccode_state_free(thread__srccode_state(thread)); 120 121 exit_rwsem(thread__namespaces_lock(thread)); 122 exit_rwsem(thread__comm_lock(thread)); 123 thread__free_stitch_list(thread); 124 125 if (thread__priv_destructor) 126 thread__priv_destructor(thread__priv(thread)); 127 128 RC_CHK_FREE(thread); 129 } 130 131 struct thread *thread__get(struct thread *thread) 132 { 133 struct thread *result; 134 135 if (RC_CHK_GET(result, thread)) 136 refcount_inc(thread__refcnt(thread)); 137 138 return result; 139 } 140 141 void thread__put(struct thread *thread) 142 { 143 if (thread && refcount_dec_and_test(thread__refcnt(thread))) 144 thread__delete(thread); 145 else 146 RC_CHK_PUT(thread); 147 } 148 149 static struct namespaces *__thread__namespaces(struct thread *thread) 150 { 151 if (list_empty(thread__namespaces_list(thread))) 152 return NULL; 153 154 return list_first_entry(thread__namespaces_list(thread), struct namespaces, list); 155 } 156 157 struct namespaces *thread__namespaces(struct thread *thread) 158 { 159 struct namespaces *ns; 160 161 down_read(thread__namespaces_lock(thread)); 162 ns = __thread__namespaces(thread); 163 up_read(thread__namespaces_lock(thread)); 164 165 return ns; 166 } 167 168 static int __thread__set_namespaces(struct thread *thread, u64 timestamp, 169 struct perf_record_namespaces *event) 170 { 171 struct namespaces *new, *curr = __thread__namespaces(thread); 172 173 new = namespaces__new(event); 174 if (!new) 175 return -ENOMEM; 176 177 list_add(&new->list, thread__namespaces_list(thread)); 178 179 if (timestamp && curr) { 180 /* 181 * setns syscall must have changed few or all the namespaces 182 * of this thread. Update end time for the namespaces 183 * previously used. 184 */ 185 curr = list_next_entry(new, list); 186 curr->end_time = timestamp; 187 } 188 189 return 0; 190 } 191 192 int thread__set_namespaces(struct thread *thread, u64 timestamp, 193 struct perf_record_namespaces *event) 194 { 195 int ret; 196 197 down_write(thread__namespaces_lock(thread)); 198 ret = __thread__set_namespaces(thread, timestamp, event); 199 up_write(thread__namespaces_lock(thread)); 200 return ret; 201 } 202 203 struct comm *thread__comm(struct thread *thread) 204 { 205 if (list_empty(thread__comm_list(thread))) 206 return NULL; 207 208 return list_first_entry(thread__comm_list(thread), struct comm, list); 209 } 210 211 struct comm *thread__exec_comm(struct thread *thread) 212 { 213 struct comm *comm, *last = NULL, *second_last = NULL; 214 215 list_for_each_entry(comm, thread__comm_list(thread), list) { 216 if (comm->exec) 217 return comm; 218 second_last = last; 219 last = comm; 220 } 221 222 /* 223 * 'last' with no start time might be the parent's comm of a synthesized 224 * thread (created by processing a synthesized fork event). For a main 225 * thread, that is very probably wrong. Prefer a later comm to avoid 226 * that case. 227 */ 228 if (second_last && !last->start && thread__pid(thread) == thread__tid(thread)) 229 return second_last; 230 231 return last; 232 } 233 234 static int ____thread__set_comm(struct thread *thread, const char *str, 235 u64 timestamp, bool exec) 236 { 237 struct comm *new, *curr = thread__comm(thread); 238 239 /* Override the default :tid entry */ 240 if (!thread__comm_set(thread)) { 241 int err = comm__override(curr, str, timestamp, exec); 242 if (err) 243 return err; 244 } else { 245 new = comm__new(str, timestamp, exec); 246 if (!new) 247 return -ENOMEM; 248 list_add(&new->list, thread__comm_list(thread)); 249 250 if (exec) 251 unwind__flush_access(thread__maps(thread)); 252 } 253 254 thread__set_comm_set(thread, true); 255 256 return 0; 257 } 258 259 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 260 bool exec) 261 { 262 int ret; 263 264 down_write(thread__comm_lock(thread)); 265 ret = ____thread__set_comm(thread, str, timestamp, exec); 266 up_write(thread__comm_lock(thread)); 267 return ret; 268 } 269 270 int thread__set_comm_from_proc(struct thread *thread) 271 { 272 char path[64]; 273 char *comm = NULL; 274 size_t sz; 275 int err = -1; 276 277 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", 278 thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) && 279 procfs__read_str(path, &comm, &sz) == 0) { 280 comm[sz - 1] = '\0'; 281 err = thread__set_comm(thread, comm, 0); 282 } 283 284 return err; 285 } 286 287 static const char *__thread__comm_str(struct thread *thread) 288 { 289 const struct comm *comm = thread__comm(thread); 290 291 if (!comm) 292 return NULL; 293 294 return comm__str(comm); 295 } 296 297 const char *thread__comm_str(struct thread *thread) 298 { 299 const char *str; 300 301 down_read(thread__comm_lock(thread)); 302 str = __thread__comm_str(thread); 303 up_read(thread__comm_lock(thread)); 304 305 return str; 306 } 307 308 static int __thread__comm_len(struct thread *thread, const char *comm) 309 { 310 if (!comm) 311 return 0; 312 thread__set_comm_len(thread, strlen(comm)); 313 314 return thread__var_comm_len(thread); 315 } 316 317 /* CHECKME: it should probably better return the max comm len from its comm list */ 318 int thread__comm_len(struct thread *thread) 319 { 320 int comm_len = thread__var_comm_len(thread); 321 322 if (!comm_len) { 323 const char *comm; 324 325 down_read(thread__comm_lock(thread)); 326 comm = __thread__comm_str(thread); 327 comm_len = __thread__comm_len(thread, comm); 328 up_read(thread__comm_lock(thread)); 329 } 330 331 return comm_len; 332 } 333 334 size_t thread__fprintf(struct thread *thread, FILE *fp) 335 { 336 return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) + 337 maps__fprintf(thread__maps(thread), fp); 338 } 339 340 int thread__insert_map(struct thread *thread, struct map *map) 341 { 342 int ret; 343 344 ret = unwind__prepare_access(thread__maps(thread), map, NULL); 345 if (ret) 346 return ret; 347 348 return maps__fixup_overlap_and_insert(thread__maps(thread), map); 349 } 350 351 struct thread__prepare_access_maps_cb_args { 352 int err; 353 struct maps *maps; 354 }; 355 356 static int thread__prepare_access_maps_cb(struct map *map, void *data) 357 { 358 bool initialized = false; 359 struct thread__prepare_access_maps_cb_args *args = data; 360 361 args->err = unwind__prepare_access(args->maps, map, &initialized); 362 363 return (args->err || initialized) ? 1 : 0; 364 } 365 366 static int thread__prepare_access(struct thread *thread) 367 { 368 struct thread__prepare_access_maps_cb_args args = { 369 .err = 0, 370 }; 371 372 if (dwarf_callchain_users) { 373 args.maps = thread__maps(thread); 374 maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args); 375 } 376 377 return args.err; 378 } 379 380 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) 381 { 382 /* This is new thread, we share map groups for process. */ 383 if (thread__pid(thread) == thread__pid(parent)) 384 return thread__prepare_access(thread); 385 386 if (maps__equal(thread__maps(thread), thread__maps(parent))) { 387 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 388 thread__pid(thread), thread__tid(thread), 389 thread__pid(parent), thread__tid(parent)); 390 return 0; 391 } 392 /* But this one is new process, copy maps. */ 393 return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0; 394 } 395 396 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) 397 { 398 if (thread__comm_set(parent)) { 399 const char *comm = thread__comm_str(parent); 400 int err; 401 if (!comm) 402 return -ENOMEM; 403 err = thread__set_comm(thread, comm, timestamp); 404 if (err) 405 return err; 406 } 407 408 thread__set_ppid(thread, thread__tid(parent)); 409 return thread__clone_maps(thread, parent, do_maps_clone); 410 } 411 412 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, 413 struct addr_location *al) 414 { 415 size_t i; 416 const u8 cpumodes[] = { 417 PERF_RECORD_MISC_USER, 418 PERF_RECORD_MISC_KERNEL, 419 PERF_RECORD_MISC_GUEST_USER, 420 PERF_RECORD_MISC_GUEST_KERNEL 421 }; 422 423 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 424 thread__find_symbol(thread, cpumodes[i], addr, al); 425 if (al->map) 426 break; 427 } 428 } 429 430 static uint16_t read_proc_e_machine_for_pid(pid_t pid) 431 { 432 char path[6 /* "/proc/" */ + 11 /* max length of pid */ + 5 /* "/exe\0" */]; 433 int fd; 434 uint16_t e_machine = EM_NONE; 435 436 snprintf(path, sizeof(path), "/proc/%d/exe", pid); 437 fd = open(path, O_RDONLY); 438 if (fd >= 0) { 439 _Static_assert(offsetof(Elf32_Ehdr, e_machine) == 18, "Unexpected offset"); 440 _Static_assert(offsetof(Elf64_Ehdr, e_machine) == 18, "Unexpected offset"); 441 if (pread(fd, &e_machine, sizeof(e_machine), 18) != sizeof(e_machine)) 442 e_machine = EM_NONE; 443 close(fd); 444 } 445 return e_machine; 446 } 447 448 static int thread__e_machine_callback(struct map *map, void *machine) 449 { 450 struct dso *dso = map__dso(map); 451 452 _Static_assert(0 == EM_NONE, "Unexpected EM_NONE"); 453 if (!dso) 454 return EM_NONE; 455 456 return dso__e_machine(dso, machine); 457 } 458 459 uint16_t thread__e_machine(struct thread *thread, struct machine *machine) 460 { 461 pid_t tid, pid; 462 uint16_t e_machine = RC_CHK_ACCESS(thread)->e_machine; 463 464 if (e_machine != EM_NONE) 465 return e_machine; 466 467 tid = thread__tid(thread); 468 pid = thread__pid(thread); 469 if (pid != tid) { 470 struct thread *parent = machine__findnew_thread(machine, pid, pid); 471 472 if (parent) { 473 e_machine = thread__e_machine(parent, machine); 474 thread__set_e_machine(thread, e_machine); 475 return e_machine; 476 } 477 /* Something went wrong, fallback. */ 478 } 479 /* Reading on the PID thread. First try to find from the maps. */ 480 e_machine = maps__for_each_map(thread__maps(thread), 481 thread__e_machine_callback, 482 machine); 483 if (e_machine == EM_NONE) { 484 /* Maps failed, perhaps we're live with map events disabled. */ 485 bool is_live = machine->machines == NULL; 486 487 if (!is_live) { 488 /* Check if the session has a data file. */ 489 struct perf_session *session = container_of(machine->machines, 490 struct perf_session, 491 machines); 492 493 is_live = !!session->data; 494 } 495 /* Read from /proc/pid/exe if live. */ 496 if (is_live) 497 e_machine = read_proc_e_machine_for_pid(pid); 498 } 499 if (e_machine != EM_NONE) 500 thread__set_e_machine(thread, e_machine); 501 else 502 e_machine = EM_HOST; 503 return e_machine; 504 } 505 506 struct thread *thread__main_thread(struct machine *machine, struct thread *thread) 507 { 508 if (thread__pid(thread) == thread__tid(thread)) 509 return thread__get(thread); 510 511 if (thread__pid(thread) == -1) 512 return NULL; 513 514 return machine__find_thread(machine, thread__pid(thread), thread__pid(thread)); 515 } 516 517 int thread__memcpy(struct thread *thread, struct machine *machine, 518 void *buf, u64 ip, int len, bool *is64bit) 519 { 520 u8 cpumode = PERF_RECORD_MISC_USER; 521 struct addr_location al; 522 struct dso *dso; 523 long offset; 524 525 if (machine__kernel_ip(machine, ip)) 526 cpumode = PERF_RECORD_MISC_KERNEL; 527 528 addr_location__init(&al); 529 if (!thread__find_map(thread, cpumode, ip, &al)) { 530 addr_location__exit(&al); 531 return -1; 532 } 533 534 dso = map__dso(al.map); 535 536 if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) { 537 addr_location__exit(&al); 538 return -1; 539 } 540 541 offset = map__map_ip(al.map, ip); 542 if (is64bit) 543 *is64bit = dso__is_64_bit(dso); 544 545 addr_location__exit(&al); 546 547 return dso__data_read_offset(dso, machine, offset, buf, len); 548 } 549 550 void thread__free_stitch_list(struct thread *thread) 551 { 552 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 553 struct stitch_list *pos, *tmp; 554 555 if (!lbr_stitch) 556 return; 557 558 list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) { 559 map_symbol__exit(&pos->cursor.ms); 560 list_del_init(&pos->node); 561 free(pos); 562 } 563 564 list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) { 565 list_del_init(&pos->node); 566 free(pos); 567 } 568 569 for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++) 570 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms); 571 572 zfree(&lbr_stitch->prev_lbr_cursor); 573 free(thread__lbr_stitch(thread)); 574 thread__set_lbr_stitch(thread, NULL); 575 } 576