Lines Matching full:thread
25 #include "thread.h"
47 struct thread *th, bool lock);
78 return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread); in thread_rb_node__cmp_tid()
81 static struct thread_rb_node *thread_rb_node__find(const struct thread *th, in thread_rb_node__find()
103 static void thread__set_guest_comm(struct thread *thread, pid_t pid) in thread__set_guest_comm() argument
108 thread__set_comm(thread, comm, 0); in thread__set_guest_comm()
144 struct thread *thread = machine__findnew_thread(machine, -1, in machine__init() local
147 if (thread == NULL) in machine__init()
150 thread__set_guest_comm(thread, pid); in machine__init()
151 thread__put(thread); in machine__init()
235 __machine__remove_thread(machine, trb, trb->thread, false); in machine__delete_threads()
417 * To support that, copy the host thread's maps to the guest thread's maps.
420 * thread's maps have been set up.
422 * This function returns the guest thread. Apart from keeping the data
423 * structures sane, using a thread belonging to the guest machine, instead
424 * of the host thread, allows it to have its own comm (refer
427 static struct thread *findnew_guest_code(struct machine *machine, in findnew_guest_code()
431 struct thread *host_thread; in findnew_guest_code()
432 struct thread *thread; in findnew_guest_code() local
438 thread = machine__findnew_thread(machine, -1, pid); in findnew_guest_code()
439 if (!thread) in findnew_guest_code()
443 if (maps__nr_maps(thread__maps(thread))) in findnew_guest_code()
444 return thread; in findnew_guest_code()
450 thread__set_guest_comm(thread, pid); in findnew_guest_code()
456 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread)); in findnew_guest_code()
461 return thread; in findnew_guest_code()
464 thread__zput(thread); in findnew_guest_code()
468 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid) in machines__findnew_guest_code()
476 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid) in machine__findnew_guest_code()
517 struct thread *th, pid_t pid) in machine__update_thread_pid()
519 struct thread *leader; in machine__update_thread_pid()
545 * tid. Consequently there never should be any maps on a thread in machine__update_thread_pid()
549 pr_err("Discarding thread maps for %d:%d\n", in machine__update_thread_pid()
568 static struct thread*
572 struct thread *th; in __threads__get_last_match()
587 static struct thread*
591 struct thread *th = NULL; in threads__get_last_match()
600 __threads__set_last_match(struct threads *threads, struct thread *th) in __threads__set_last_match()
607 threads__set_last_match(struct threads *threads, struct thread *th) in threads__set_last_match()
614 * Caller must eventually drop thread->refcnt returned with a successful
615 * lookup/new thread inserted.
617 static struct thread *____machine__findnew_thread(struct machine *machine, in ____machine__findnew_thread()
624 struct thread *th; in ____machine__findnew_thread()
634 th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; in ____machine__findnew_thread()
662 nd->thread = th; in ____machine__findnew_thread()
670 * thread__init_maps to find the thread leader and that would screwed in ____machine__findnew_thread()
674 pr_err("Thread init failed thread %d\n", pid); in ____machine__findnew_thread()
690 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) in __machine__findnew_thread()
695 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, in machine__findnew_thread()
699 struct thread *th; in machine__findnew_thread()
707 struct thread *machine__find_thread(struct machine *machine, pid_t pid, in machine__find_thread()
711 struct thread *th; in machine__find_thread()
721 * So here a single thread is created for that, but actually there is a separate
722 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
726 struct thread *machine__idle_thread(struct machine *machine) in machine__idle_thread()
728 struct thread *thread = machine__findnew_thread(machine, 0, 0); in machine__idle_thread() local
730 if (!thread || thread__set_comm(thread, "swapper", 0) || in machine__idle_thread()
731 thread__set_namespaces(thread, 0, NULL)) in machine__idle_thread()
734 return thread; in machine__idle_thread()
738 struct thread *thread) in machine__thread_exec_comm() argument
741 return thread__exec_comm(thread); in machine__thread_exec_comm()
743 return thread__comm(thread); in machine__thread_exec_comm()
749 struct thread *thread = machine__findnew_thread(machine, in machine__process_comm_event() local
761 if (thread == NULL || in machine__process_comm_event()
762 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { in machine__process_comm_event()
767 thread__put(thread); in machine__process_comm_event()
776 struct thread *thread = machine__findnew_thread(machine, in machine__process_namespaces_event() local
792 if (thread == NULL || in machine__process_namespaces_event()
793 thread__set_namespaces(thread, sample->time, &event->namespaces)) { in machine__process_namespaces_event()
798 thread__put(thread); in machine__process_namespaces_event()
1138 struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread; in machine__fprintf()
1940 struct thread *thread; in machine__process_mmap2_event() local
1974 thread = machine__findnew_thread(machine, event->mmap2.pid, in machine__process_mmap2_event()
1976 if (thread == NULL) in machine__process_mmap2_event()
1983 event->mmap2.filename, thread); in machine__process_mmap2_event()
1988 ret = thread__insert_map(thread, map); in machine__process_mmap2_event()
1992 thread__put(thread); in machine__process_mmap2_event()
1999 thread__put(thread); in machine__process_mmap2_event()
2008 struct thread *thread; in machine__process_mmap_event() local
2031 thread = machine__findnew_thread(machine, event->mmap.pid, in machine__process_mmap_event()
2033 if (thread == NULL) in machine__process_mmap_event()
2041 NULL, prot, 0, NULL, event->mmap.filename, thread); in machine__process_mmap_event()
2046 ret = thread__insert_map(thread, map); in machine__process_mmap_event()
2050 thread__put(thread); in machine__process_mmap_event()
2057 thread__put(thread); in machine__process_mmap_event()
2064 struct thread *th, bool lock) in __machine__remove_thread()
2079 thread__put(nd->thread); in __machine__remove_thread()
2090 void machine__remove_thread(struct machine *machine, struct thread *th) in machine__remove_thread()
2098 struct thread *thread = machine__find_thread(machine, in machine__process_fork_event() local
2101 struct thread *parent = machine__findnew_thread(machine, in machine__process_fork_event()
2111 * There may be an existing thread that is not actually the parent, in machine__process_fork_event()
2113 * (fork) event that would have removed the thread was lost. Assume the in machine__process_fork_event()
2117 dump_printf("removing erroneous parent thread %d/%d\n", in machine__process_fork_event()
2125 /* if a thread currently exists for the thread id remove it */ in machine__process_fork_event()
2126 if (thread != NULL) { in machine__process_fork_event()
2127 machine__remove_thread(machine, thread); in machine__process_fork_event()
2128 thread__put(thread); in machine__process_fork_event()
2131 thread = machine__findnew_thread(machine, event->fork.pid, in machine__process_fork_event()
2134 * When synthesizing FORK events, we are trying to create thread in machine__process_fork_event()
2150 if (thread == NULL || parent == NULL || in machine__process_fork_event()
2151 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { in machine__process_fork_event()
2155 thread__put(thread); in machine__process_fork_event()
2164 struct thread *thread = machine__find_thread(machine, in machine__process_exit_event() local
2171 if (thread != NULL) { in machine__process_exit_event()
2173 thread__set_exited(thread, /*exited=*/true); in machine__process_exit_event()
2175 machine__remove_thread(machine, thread); in machine__process_exit_event()
2177 thread__put(thread); in machine__process_exit_event()
2233 static void ip__resolve_ams(struct thread *thread, in ip__resolve_ams() argument
2247 thread__find_cpumode_addr_location(thread, ip, &al); in ip__resolve_ams()
2260 static void ip__resolve_data(struct thread *thread, in ip__resolve_data() argument
2268 thread__find_symbol(thread, m, addr, &al); in ip__resolve_data()
2289 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); in sample__resolve_mem()
2290 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, in sample__resolve_mem()
2326 static int add_callchain_ip(struct thread *thread, in add_callchain_ip() argument
2348 thread__find_cpumode_addr_location(thread, ip, &al); in add_callchain_ip()
2374 thread__find_symbol(thread, *cpumode, ip, &al); in add_callchain_ip()
2423 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to); in sample__resolve_bstack()
2424 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from); in sample__resolve_bstack()
2494 static int lbr_callchain_add_kernel_ip(struct thread *thread, in lbr_callchain_add_kernel_ip() argument
2508 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_kernel_ip()
2518 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_kernel_ip()
2528 static void save_lbr_cursor_node(struct thread *thread, in save_lbr_cursor_node() argument
2532 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); in save_lbr_cursor_node()
2553 static int lbr_callchain_add_lbr_ip(struct thread *thread, in lbr_callchain_add_lbr_ip() argument
2574 if (thread__lbr_stitch(thread)) { in lbr_callchain_add_lbr_ip()
2589 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_lbr_ip()
2602 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) { in lbr_callchain_add_lbr_ip()
2614 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_lbr_ip()
2620 save_lbr_cursor_node(thread, cursor, i); in lbr_callchain_add_lbr_ip()
2629 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_lbr_ip()
2635 save_lbr_cursor_node(thread, cursor, i); in lbr_callchain_add_lbr_ip()
2643 err = add_callchain_ip(thread, cursor, parent, in lbr_callchain_add_lbr_ip()
2654 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread, in lbr_callchain_add_stitched_lbr_ip() argument
2657 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); in lbr_callchain_add_stitched_lbr_ip()
2679 static struct stitch_list *get_stitch_node(struct thread *thread) in get_stitch_node() argument
2681 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); in get_stitch_node()
2695 static bool has_stitched_lbr(struct thread *thread, in has_stitched_lbr() argument
2705 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); in has_stitched_lbr()
2751 stitch_node = get_stitch_node(thread); in has_stitched_lbr()
2767 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr) in alloc_lbr_stitch() argument
2769 if (thread__lbr_stitch(thread)) in alloc_lbr_stitch()
2772 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch))); in alloc_lbr_stitch()
2773 if (!thread__lbr_stitch(thread)) in alloc_lbr_stitch()
2776 thread__lbr_stitch(thread)->prev_lbr_cursor = in alloc_lbr_stitch()
2778 if (!thread__lbr_stitch(thread)->prev_lbr_cursor) in alloc_lbr_stitch()
2781 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); in alloc_lbr_stitch()
2782 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); in alloc_lbr_stitch()
2787 free(thread__lbr_stitch(thread)); in alloc_lbr_stitch()
2788 thread__set_lbr_stitch(thread, NULL); in alloc_lbr_stitch()
2791 thread__set_lbr_stitch_enable(thread, false); in alloc_lbr_stitch()
2802 static int resolve_lbr_callchain_sample(struct thread *thread, in resolve_lbr_callchain_sample() argument
2827 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx && in resolve_lbr_callchain_sample()
2828 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { in resolve_lbr_callchain_sample()
2829 lbr_stitch = thread__lbr_stitch(thread); in resolve_lbr_callchain_sample()
2831 stitched_lbr = has_stitched_lbr(thread, sample, in resolve_lbr_callchain_sample()
2844 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, in resolve_lbr_callchain_sample()
2850 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, in resolve_lbr_callchain_sample()
2856 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); in resolve_lbr_callchain_sample()
2863 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); in resolve_lbr_callchain_sample()
2867 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, in resolve_lbr_callchain_sample()
2873 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, in resolve_lbr_callchain_sample()
2885 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, in find_prev_cpumode() argument
2897 err = add_callchain_ip(thread, cursor, parent, in find_prev_cpumode()
2907 struct thread *thread, int usr_idx) in get_leaf_frame_caller() argument
2909 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64")) in get_leaf_frame_caller()
2910 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx); in get_leaf_frame_caller()
2915 static int thread__resolve_callchain_sample(struct thread *thread, in thread__resolve_callchain_sample() argument
2939 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, in thread__resolve_callchain_sample()
2950 skip_idx = arch_skip_callchain_idx(thread, chain); in thread__resolve_callchain_sample()
3002 err = add_callchain_ip(thread, cursor, parent, in thread__resolve_callchain_sample()
3009 err = add_callchain_ip(thread, cursor, parent, root_al, in thread__resolve_callchain_sample()
3027 err = find_prev_cpumode(chain, thread, cursor, parent, root_al, in thread__resolve_callchain_sample()
3049 err = find_prev_cpumode(chain, thread, cursor, parent, in thread__resolve_callchain_sample()
3067 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx); in thread__resolve_callchain_sample()
3076 err = add_callchain_ip(thread, cursor, parent, in thread__resolve_callchain_sample()
3084 err = add_callchain_ip(thread, cursor, parent, in thread__resolve_callchain_sample()
3162 static int thread__resolve_callchain_unwind(struct thread *thread, in thread__resolve_callchain_unwind() argument
3179 thread, sample, max_stack, false); in thread__resolve_callchain_unwind()
3182 int thread__resolve_callchain(struct thread *thread, in thread__resolve_callchain() argument
3198 ret = thread__resolve_callchain_sample(thread, cursor, in thread__resolve_callchain()
3204 ret = thread__resolve_callchain_unwind(thread, cursor, in thread__resolve_callchain()
3208 ret = thread__resolve_callchain_unwind(thread, cursor, in thread__resolve_callchain()
3213 ret = thread__resolve_callchain_sample(thread, cursor, in thread__resolve_callchain()
3223 int (*fn)(struct thread *thread, void *p), in machine__for_each_thread() argument
3237 rc = fn(trb->thread, priv); in machine__for_each_thread()
3246 int (*fn)(struct thread *thread, void *p), in machines__for_each_thread() argument
3277 struct thread *thread; in machine__set_current_tid() local
3291 thread = machine__findnew_thread(machine, pid, tid); in machine__set_current_tid()
3292 if (!thread) in machine__set_current_tid()
3295 thread__set_cpu(thread, cpu); in machine__set_current_tid()
3296 thread__put(thread); in machine__set_current_tid()