Lines Matching +full:0 +full:x2c000
65 for (i = 0; i < THREADS__TABLE_SIZE; i++) { in machine__threads_init()
69 threads->nr = 0; in machine__threads_init()
97 machine->pid) < 0) in machine__set_mmap_name()
100 return machine->mmap_name ? 0 : -ENOMEM; in machine__set_mmap_name()
108 thread__set_comm(thread, comm, 0); in thread__set_guest_comm()
115 memset(machine, 0, sizeof(*machine)); in machine__init()
130 machine->id_hdr_size = 0; in machine__init()
133 machine->kernel_start = 0; in machine__init()
155 err = 0; in machine__init()
163 return 0; in machine__init()
173 if (machine__create_kernel_maps(machine) < 0) in machine__new_host()
192 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { in machine__new_kallsyms()
227 for (i = 0; i < THREADS__TABLE_SIZE; i++) { in machine__delete_threads()
258 for (i = 0; i < THREADS__TABLE_SIZE; i++) { in machine__exit()
296 if (machine__init(machine, root_dir, pid) != 0) { in machines__add()
720 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
728 struct thread *thread = machine__findnew_thread(machine, 0, 0); in machine__idle_thread()
730 if (!thread || thread__set_comm(thread, "swapper", 0) || in machine__idle_thread()
731 thread__set_namespaces(thread, 0, NULL)) in machine__idle_thread()
753 int err = 0; in machine__process_comm_event()
779 int err = 0; in machine__process_namespaces_event()
816 return 0; in machine__process_cgroup_event()
824 return 0; in machine__process_lost_event()
832 return 0; in machine__process_lost_samples_event()
865 return 0; in machine__process_aux_event()
873 return 0; in machine__process_itrace_start_event()
881 return 0; in machine__process_aux_output_hw_id_event()
889 return 0; in machine__process_switch_event()
900 int err = 0; in machine__process_ksymbol_register()
910 map = map__new2(0, dso); in machine__process_ksymbol_register()
948 0, 0, event->ksymbol.name); in machine__process_ksymbol_register()
969 return 0; in machine__process_ksymbol_unregister()
981 return 0; in machine__process_ksymbol_unregister()
1008 return 0; in machine__process_text_poke()
1012 return 0; in machine__process_text_poke()
1036 return 0; in machine__process_text_poke()
1106 size_t printed = 0; in machine__fprintf_vmlinux_path()
1113 printed += fprintf(fp, "[0] %s\n", filename); in machine__fprintf_vmlinux_path()
1116 for (i = 0; i < vmlinux_path__nr_entries; ++i) in machine__fprintf_vmlinux_path()
1129 for (i = 0; i < THREADS__TABLE_SIZE; i++) { in machine__fprintf()
1196 u64 addr = 0; in machine__get_running_kernel_start()
1201 return 0; in machine__get_running_kernel_start()
1203 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { in machine__get_running_kernel_start()
1223 return 0; in machine__get_running_kernel_start()
1271 for (i = 0; i < ARRAY_SIZE(syms); i++) { in find_entry_trampoline()
1277 return 0; in find_entry_trampoline()
1284 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1285 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1286 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1300 return 0; in machine__map_x86_64_entry_trampolines_cb()
1307 return 0; in machine__map_x86_64_entry_trampolines_cb()
1328 return 0; in machine__map_x86_64_entry_trampolines()
1332 return 0; in machine__map_x86_64_entry_trampolines()
1337 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { in machine__map_x86_64_entry_trampolines()
1349 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) in machine__map_x86_64_entry_trampolines()
1355 return 0; in machine__map_x86_64_entry_trampolines()
1361 return 0; in machine__create_extra_kernel_maps()
1371 machine->vmlinux_map = map__new2(0, kernel); in __machine__create_kernel_maps()
1399 int ret = 0; in machines__create_guest_kernel_maps()
1401 int i, items = 0; in machines__create_guest_kernel_maps()
1414 if (items <= 0) in machines__create_guest_kernel_maps()
1416 for (i = 0; i < items; i++) { in machines__create_guest_kernel_maps()
1417 if (!isdigit(namelist[i]->d_name[0])) { in machines__create_guest_kernel_maps()
1422 if ((*endp != '\0') || in machines__create_guest_kernel_maps()
1477 if (ret > 0) { in machine__load_kallsyms()
1496 if (ret > 0) in machine__load_vmlinux_path()
1525 *tmp = '\0'; in get_kernel_version()
1543 return 0; in maps__set_module_path()
1562 return 0; in maps__set_module_path()
1569 int ret = 0; in maps__set_modules_path_dir()
1591 if (depth == 0) { in maps__set_modules_path_dir()
1598 if (ret < 0) in maps__set_modules_path_dir()
1635 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0); in machine__set_modules_path()
1641 return 0; in arch__fix_module_text_start()
1650 if (arch__fix_module_text_start(&start, &size, name) < 0) in machine__create_module()
1660 return 0; in machine__create_module()
1682 return 0; in machine__create_modules()
1686 return 0; in machine__create_modules()
1698 if (start == 0 && end == 0) in machine__set_kernel_mmap()
1699 map__set_end(machine->vmlinux_map, ~0ULL); in machine__set_kernel_mmap()
1724 u64 start = 0, end = ~0ULL; in machine__create_kernel_maps()
1731 if (ret < 0) in machine__create_kernel_maps()
1734 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { in machine__create_kernel_maps()
1756 if (ret < 0) in machine__create_kernel_maps()
1763 if (end == ~0ULL) { in machine__create_kernel_maps()
1817 return 0; in machine__process_kernel_mmap_event()
1824 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; in machine__process_kernel_mmap_event()
1832 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; in machine__process_kernel_mmap_event()
1834 if (xm->name[0] == '/' || in machine__process_kernel_mmap_event()
1835 (!is_kernel_mmap && xm->name[0] == '[')) { in machine__process_kernel_mmap_event()
1894 if (__machine__create_kernel_maps(machine, kernel) < 0) { in machine__process_kernel_mmap_event()
1902 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { in machine__process_kernel_mmap_event()
1915 if (xm->pgoff != 0) { in machine__process_kernel_mmap_event()
1931 return 0; in machine__process_kernel_mmap_event()
1949 int ret = 0; in machine__process_mmap2_event()
1969 if (ret < 0) in machine__process_mmap2_event()
1971 return 0; in machine__process_mmap2_event()
1994 return 0; in machine__process_mmap2_event()
2002 return 0; in machine__process_mmap2_event()
2010 u32 prot = 0; in machine__process_mmap_event()
2011 int ret = 0; in machine__process_mmap_event()
2026 if (ret < 0) in machine__process_mmap_event()
2028 return 0; in machine__process_mmap_event()
2041 NULL, prot, 0, NULL, event->mmap.filename, thread); in machine__process_mmap_event()
2052 return 0; in machine__process_mmap_event()
2060 return 0; in machine__process_mmap_event()
2077 BUG_ON(refcount_read(thread__refcnt(th)) == 0); in __machine__remove_thread()
2105 int err = 0; in machine__process_fork_event()
2151 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { in machine__process_fork_event()
2178 return 0; in machine__process_exit_event()
2230 return regexec(regex, sym->name, 0, NULL, 0) == 0; in symbol__match_regex()
2255 ams->phys_addr = 0; in ip__resolve_ams()
2256 ams->data_page_size = 0; in ip__resolve_ams()
2339 int nr_loop_iter = 0, err = 0; in add_callchain_ip()
2340 u64 iter_cycles = 0; in add_callchain_ip()
2344 al.filtered = 0; in add_callchain_ip()
2422 for (i = 0; i < bs->nr; i++) { in sample__resolve_bstack()
2436 iter->cycles = 0; in save_iterations()
2438 for (i = 0; i < nr; i++) in save_iterations()
2444 #define NO_ENTRY 0xff
2459 for (i = 0; i < nr; i++) { in remove_loops()
2468 off = 0; in remove_loops()
2476 if (j > 0) { in remove_loops()
2507 for (i = 0; i < end + 1; i++) { in lbr_callchain_add_kernel_ip()
2514 return 0; in lbr_callchain_add_kernel_ip()
2517 for (i = end; i >= 0; i--) { in lbr_callchain_add_kernel_ip()
2525 return 0; in lbr_callchain_add_kernel_ip()
2579 for (i = 0; i < (int)(cursor->nr - 1); i++) in lbr_callchain_add_lbr_ip()
2586 ip = entries[0].to; in lbr_callchain_add_lbr_ip()
2587 flags = &entries[0].flags; in lbr_callchain_add_lbr_ip()
2588 *branch_from = entries[0].from; in lbr_callchain_add_lbr_ip()
2599 * But does not need to save current cursor node for entry 0. in lbr_callchain_add_lbr_ip()
2611 for (i = 0; i < lbr_nr; i++) { in lbr_callchain_add_lbr_ip()
2622 return 0; in lbr_callchain_add_lbr_ip()
2626 for (i = lbr_nr - 1; i >= 0; i--) { in lbr_callchain_add_lbr_ip()
2638 if (lbr_nr > 0) { in lbr_callchain_add_lbr_ip()
2640 ip = entries[0].to; in lbr_callchain_add_lbr_ip()
2641 flags = &entries[0].flags; in lbr_callchain_add_lbr_ip()
2642 *branch_from = entries[0].from; in lbr_callchain_add_lbr_ip()
2651 return 0; in lbr_callchain_add_lbr_ip()
2676 return 0; in lbr_callchain_add_stitched_lbr_ip()
2706 int i, j, nr_identical_branches = 0; in has_stitched_lbr()
2730 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { in has_stitched_lbr()
2799 * 0 no available LBR callchain information, should try fp
2815 u64 branch_from = 0; in resolve_lbr_callchain_sample()
2818 for (i = 0; i < chain_nr; i++) { in resolve_lbr_callchain_sample()
2825 return 0; in resolve_lbr_callchain_sample()
2828 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { in resolve_lbr_callchain_sample()
2882 return (err < 0) ? err : 0; in resolve_lbr_callchain_sample()
2891 int err = 0; in find_prev_cpumode()
2893 while (--ent >= 0) { in find_prev_cpumode()
2899 false, NULL, NULL, 0); in find_prev_cpumode()
2912 return 0; in get_leaf_frame_caller()
2926 int chain_nr = 0; in thread__resolve_callchain_sample()
2930 int first_call = 0; in thread__resolve_callchain_sample()
2941 !env ? 0 : env->max_branches); in thread__resolve_callchain_sample()
2943 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2974 for (i = 0; i < nr; i++) { in thread__resolve_callchain_sample()
2998 memset(iter, 0, sizeof(struct iterations) * nr); in thread__resolve_callchain_sample()
3001 for (i = 0; i < nr; i++) { in thread__resolve_callchain_sample()
3012 &iter[i], 0); in thread__resolve_callchain_sample()
3019 if (chain_nr == 0) in thread__resolve_callchain_sample()
3020 return 0; in thread__resolve_callchain_sample()
3030 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
3032 for (i = first_call, nr_entries = 0; in thread__resolve_callchain_sample()
3052 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
3065 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { in thread__resolve_callchain_sample()
3078 false, NULL, NULL, 0); in thread__resolve_callchain_sample()
3080 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
3086 false, NULL, NULL, 0); in thread__resolve_callchain_sample()
3089 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
3092 return 0; in thread__resolve_callchain_sample()
3128 NULL, 0, 0, 0, ilist->srcline); in append_inlines()
3130 if (ret != 0) in append_inlines()
3145 return 0; in unwind_entry()
3147 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) in unwind_entry()
3148 return 0; in unwind_entry()
3159 false, NULL, 0, 0, 0, srcline); in unwind_entry()
3171 return 0; in thread__resolve_callchain_unwind()
3176 return 0; in thread__resolve_callchain_unwind()
3190 int ret = 0; in thread__resolve_callchain()
3228 int rc = 0; in machine__for_each_thread()
3231 for (i = 0; i < THREADS__TABLE_SIZE; i++) { in machine__for_each_thread()
3238 if (rc != 0) in machine__for_each_thread()
3250 int rc = 0; in machines__for_each_thread()
3253 if (rc != 0) in machines__for_each_thread()
3260 if (rc != 0) in machines__for_each_thread()
3268 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) in machine__get_current_tid()
3280 if (cpu < 0) in machine__set_current_tid()
3298 return 0; in machine__set_current_tid()
3317 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; in machine__nr_cpus_avail()
3323 int err = 0; in machine__get_kernel_start()
3401 int err = 0; in machine__for_each_dso()