Lines Matching +full:cpu +full:- +full:core

1 // SPDX-License-Identifier: GPL-2.0
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0)) argument
50 struct perf_cpu cpu; in bperf_load_program() local
51 int total_cpus = cpu__max_cpu().cpu; in bperf_load_program()
58 return -1; in bperf_load_program()
61 skel->rodata->num_cpus = total_cpus; in bperf_load_program()
62 skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
65 skel->rodata->use_cgroup_v2 = 1; in bperf_load_program()
67 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); in bperf_load_program()
69 /* we need one copy of events per cpu for reading */ in bperf_load_program()
70 map_size = total_cpus * evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
71 bpf_map__set_max_entries(skel->maps.events, map_size); in bperf_load_program()
72 bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups); in bperf_load_program()
73 /* previous result is saved in a per-cpu array */ in bperf_load_program()
74 map_size = evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
75 bpf_map__set_max_entries(skel->maps.prev_readings, map_size); in bperf_load_program()
76 /* cgroup result needs all events (per-cpu) */ in bperf_load_program()
77 map_size = evlist->core.nr_entries; in bperf_load_program()
78 bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size); in bperf_load_program()
88 err = -1; in bperf_load_program()
91 if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) { in bperf_load_program()
96 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { in bperf_load_program()
97 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, in bperf_load_program()
107 * Update cgrp_idx map from cgroup-id to event index. in bperf_load_program()
113 if (cgrp == NULL || evsel->cgrp == leader_cgrp) { in bperf_load_program()
114 leader_cgrp = evsel->cgrp; in bperf_load_program()
115 evsel->cgrp = NULL; in bperf_load_program()
118 err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1); in bperf_load_program()
120 evsel->supported = true; in bperf_load_program()
122 map_fd = bpf_map__fd(skel->maps.events); in bperf_load_program()
123 perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) { in bperf_load_program()
125 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu; in bperf_load_program()
130 evsel->cgrp = leader_cgrp; in bperf_load_program()
133 if (evsel->cgrp == cgrp) in bperf_load_program()
136 cgrp = evsel->cgrp; in bperf_load_program()
139 pr_debug("Failed to get cgroup id for %s\n", cgrp->name); in bperf_load_program()
140 cgrp->id = 0; in bperf_load_program()
143 map_fd = bpf_map__fd(skel->maps.cgrp_idx); in bperf_load_program()
144 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); in bperf_load_program()
157 prog_fd = bpf_program__fd(skel->progs.trigger_read); in bperf_load_program()
161 "Therefore, --for-each-cgroup might show inaccurate readings\n"); in bperf_load_program()
174 evsel->bperf_leader_prog_fd = -1; in bperf_cgrp__load()
175 evsel->bperf_leader_link_fd = -1; in bperf_cgrp__load()
177 if (!bperf_loaded && bperf_load_program(evsel->evlist)) in bperf_cgrp__load()
178 return -1; in bperf_cgrp__load()
182 evsel->follower_skel = (struct bperf_follower_bpf *)skel; in bperf_cgrp__load()
188 int cpu __maybe_unused, int fd __maybe_unused) in bperf_cgrp__install_pe()
195 * trigger the leader prog on each cpu, so the cgrp_reading map could get
200 struct perf_cpu cpu; in bperf_cgrp__sync_counters() local
202 int prog_fd = bpf_program__fd(skel->progs.trigger_read); in bperf_cgrp__sync_counters()
204 perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus) in bperf_cgrp__sync_counters()
205 bperf_trigger_reading(prog_fd, cpu.cpu); in bperf_cgrp__sync_counters()
212 if (evsel->core.idx) in bperf_cgrp__enable()
215 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__enable()
217 skel->bss->enabled = 1; in bperf_cgrp__enable()
223 if (evsel->core.idx) in bperf_cgrp__disable()
226 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__disable()
228 skel->bss->enabled = 0; in bperf_cgrp__disable()
234 struct evlist *evlist = evsel->evlist; in bperf_cgrp__read()
235 int total_cpus = cpu__max_cpu().cpu; in bperf_cgrp__read()
240 if (evsel->core.idx) in bperf_cgrp__read()
243 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__read()
247 return -ENOMEM; in bperf_cgrp__read()
249 reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings); in bperf_cgrp__read()
252 __u32 idx = evsel->core.idx; in bperf_cgrp__read()
254 struct perf_cpu cpu; in bperf_cgrp__read() local
259 idx, evsel__name(evsel), evsel->cgrp->name); in bperf_cgrp__read()
263 perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) { in bperf_cgrp__read()
264 counts = perf_counts(evsel->counts, i, 0); in bperf_cgrp__read()
265 counts->val = values[cpu.cpu].counter; in bperf_cgrp__read()
266 counts->ena = values[cpu.cpu].enabled; in bperf_cgrp__read()
267 counts->run = values[cpu.cpu].running; in bperf_cgrp__read()
278 if (evsel->core.idx) in bperf_cgrp__destroy()