Lines Matching +full:cpu +full:- +full:core

1 // SPDX-License-Identifier: GPL-2.0
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0)) argument
50 struct perf_cpu cpu; in bperf_load_program() local
51 int total_cpus = cpu__max_cpu().cpu; in bperf_load_program()
58 return -1; in bperf_load_program()
61 skel->rodata->num_cpus = total_cpus; in bperf_load_program()
62 skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
64 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); in bperf_load_program()
66 /* we need one copy of events per cpu for reading */ in bperf_load_program()
67 map_size = total_cpus * evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
68 bpf_map__set_max_entries(skel->maps.events, map_size); in bperf_load_program()
69 bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups); in bperf_load_program()
70 /* previous result is saved in a per-cpu array */ in bperf_load_program()
71 map_size = evlist->core.nr_entries / nr_cgroups; in bperf_load_program()
72 bpf_map__set_max_entries(skel->maps.prev_readings, map_size); in bperf_load_program()
73 /* cgroup result needs all events (per-cpu) */ in bperf_load_program()
74 map_size = evlist->core.nr_entries; in bperf_load_program()
75 bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size); in bperf_load_program()
86 skel->bss->use_cgroup_v2 = 1; in bperf_load_program()
88 err = -1; in bperf_load_program()
91 if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) { in bperf_load_program()
96 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { in bperf_load_program()
97 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, in bperf_load_program()
107 * Update cgrp_idx map from cgroup-id to event index. in bperf_load_program()
113 if (cgrp == NULL || evsel->cgrp == leader_cgrp) { in bperf_load_program()
114 leader_cgrp = evsel->cgrp; in bperf_load_program()
115 evsel->cgrp = NULL; in bperf_load_program()
118 err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1); in bperf_load_program()
120 evsel->supported = true; in bperf_load_program()
122 map_fd = bpf_map__fd(skel->maps.events); in bperf_load_program()
123 perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) { in bperf_load_program()
125 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu; in bperf_load_program()
130 evsel->cgrp = leader_cgrp; in bperf_load_program()
133 if (evsel->cgrp == cgrp) in bperf_load_program()
136 cgrp = evsel->cgrp; in bperf_load_program()
140 err = -1; in bperf_load_program()
144 map_fd = bpf_map__fd(skel->maps.cgrp_idx); in bperf_load_program()
145 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY); in bperf_load_program()
158 prog_fd = bpf_program__fd(skel->progs.trigger_read); in bperf_load_program()
162 "Therefore, --for-each-cgroup might show inaccurate readings\n"); in bperf_load_program()
175 evsel->bperf_leader_prog_fd = -1; in bperf_cgrp__load()
176 evsel->bperf_leader_link_fd = -1; in bperf_cgrp__load()
178 if (!bperf_loaded && bperf_load_program(evsel->evlist)) in bperf_cgrp__load()
179 return -1; in bperf_cgrp__load()
183 evsel->follower_skel = (struct bperf_follower_bpf *)skel; in bperf_cgrp__load()
189 int cpu __maybe_unused, int fd __maybe_unused) in bperf_cgrp__install_pe()
196 * trigger the leader prog on each cpu, so the cgrp_reading map could get
201 struct perf_cpu cpu; in bperf_cgrp__sync_counters() local
203 int prog_fd = bpf_program__fd(skel->progs.trigger_read); in bperf_cgrp__sync_counters()
205 perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus) in bperf_cgrp__sync_counters()
206 bperf_trigger_reading(prog_fd, cpu.cpu); in bperf_cgrp__sync_counters()
213 if (evsel->core.idx) in bperf_cgrp__enable()
216 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__enable()
218 skel->bss->enabled = 1; in bperf_cgrp__enable()
224 if (evsel->core.idx) in bperf_cgrp__disable()
227 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__disable()
229 skel->bss->enabled = 0; in bperf_cgrp__disable()
235 struct evlist *evlist = evsel->evlist; in bperf_cgrp__read()
236 int total_cpus = cpu__max_cpu().cpu; in bperf_cgrp__read()
241 if (evsel->core.idx) in bperf_cgrp__read()
244 bperf_cgrp__sync_counters(evsel->evlist); in bperf_cgrp__read()
248 return -ENOMEM; in bperf_cgrp__read()
250 reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings); in bperf_cgrp__read()
253 __u32 idx = evsel->core.idx; in bperf_cgrp__read()
255 struct perf_cpu cpu; in bperf_cgrp__read() local
260 idx, evsel__name(evsel), evsel->cgrp->name); in bperf_cgrp__read()
264 perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) { in bperf_cgrp__read()
265 counts = perf_counts(evsel->counts, i, 0); in bperf_cgrp__read()
266 counts->val = values[cpu.cpu].counter; in bperf_cgrp__read()
267 counts->ena = values[cpu.cpu].enabled; in bperf_cgrp__read()
268 counts->run = values[cpu.cpu].running; in bperf_cgrp__read()
279 if (evsel->core.idx) in bperf_cgrp__destroy()