Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
22 * CPU number.
34 return (data->mask32_data.long_size == 4) in perf_record_cpu_map_data__test_bit()
35 ? (bit_word32 < data->mask32_data.nr) && in perf_record_cpu_map_data__test_bit()
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0 in perf_record_cpu_map_data__test_bit()
37 : (bit_word64 < data->mask64_data.nr) && in perf_record_cpu_map_data__test_bit()
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0; in perf_record_cpu_map_data__test_bit()
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
46 if (data->mask32_data.long_size == 4) in perf_record_cpu_map_data__read_one_mask()
47 bitmap[0] = data->mask32_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
49 bitmap[0] = data->mask64_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
51 if (data->mask32_data.long_size == 4) { in perf_record_cpu_map_data__read_one_mask()
52 bitmap[0] = data->mask32_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
56 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32); in perf_record_cpu_map_data__read_one_mask()
57 bitmap[1] = (unsigned long)data->mask64_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
59 bitmap[0] = (unsigned long)data->mask64_data.mask[i]; in perf_record_cpu_map_data__read_one_mask()
60 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32); in perf_record_cpu_map_data__read_one_mask()
69 map = perf_cpu_map__empty_new(data->cpus_data.nr); in cpu_map__from_entries()
73 for (unsigned int i = 0; i < data->cpus_data.nr; i++) { in cpu_map__from_entries()
75 * Special treatment for -1, which is not real cpu number, in cpu_map__from_entries()
76 * and we need to use (int) -1 to initialize map[i], in cpu_map__from_entries()
79 if (data->cpus_data.cpu[i] == (u16) -1) { in cpu_map__from_entries()
80 RC_CHK_ACCESS(map)->map[i].cpu = -1; in cpu_map__from_entries()
81 } else if (data->cpus_data.cpu[i] < INT16_MAX) { in cpu_map__from_entries()
82 RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i]; in cpu_map__from_entries()
84 pr_err("Invalid cpumap entry %u\n", data->cpus_data.cpu[i]); in cpu_map__from_entries()
96 int weight = 0, mask_nr = data->mask32_data.nr; in cpu_map__from_mask()
109 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE); in cpu_map__from_mask()
110 int cpu; in cpu_map__from_mask() local
113 for_each_set_bit(cpu, local_copy, 64) { in cpu_map__from_mask()
114 if (cpu + cpus_per_i < INT16_MAX) { in cpu_map__from_mask()
115 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i; in cpu_map__from_mask()
117 pr_err("Invalid cpumap entry %d\n", cpu + cpus_per_i); in cpu_map__from_mask()
132 map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu - in cpu_map__from_range()
133 data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu); in cpu_map__from_range()
137 if (data->range_cpu_data.any_cpu) in cpu_map__from_range()
138 RC_CHK_ACCESS(map)->map[i++].cpu = -1; in cpu_map__from_range()
140 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu; in cpu_map__from_range() local
141 i++, cpu++) { in cpu_map__from_range()
142 if (cpu < INT16_MAX) { in cpu_map__from_range()
143 RC_CHK_ACCESS(map)->map[i].cpu = cpu; in cpu_map__from_range()
145 pr_err("Invalid cpumap entry %d\n", cpu); in cpu_map__from_range()
156 switch (data->type) { in cpu_map__new_data()
164 pr_err("cpu_map__new_data unknown type %d\n", data->type); in cpu_map__new_data()
179 struct perf_cpu_map *perf_cpu_map__empty_new(int nr) in perf_cpu_map__empty_new() argument
181 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr); in perf_cpu_map__empty_new()
184 for (int i = 0; i < nr; i++) in perf_cpu_map__empty_new()
185 RC_CHK_ACCESS(cpus)->map[i].cpu = -1; in perf_cpu_map__empty_new()
191 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr) in cpu_aggr_map__empty_new() argument
193 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr); in cpu_aggr_map__empty_new()
198 cpus->nr = nr; in cpu_aggr_map__empty_new()
199 for (i = 0; i < nr; i++) in cpu_aggr_map__empty_new()
200 cpus->map[i] = aggr_cpu_id__empty(); in cpu_aggr_map__empty_new()
206 static int cpu__get_topology_int(int cpu, const char *name, int *value) in cpu__get_topology_int() argument
211 "devices/system/cpu/cpu%d/topology/%s", cpu, name); in cpu__get_topology_int()
216 int cpu__get_socket_id(struct perf_cpu cpu) in cpu__get_socket_id() argument
218 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value); in cpu__get_socket_id()
222 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__socket() argument
226 id.socket = cpu__get_socket_id(cpu); in aggr_cpu_id__socket()
235 if (a->node != b->node) in aggr_cpu_id__cmp()
236 return a->node - b->node; in aggr_cpu_id__cmp()
237 else if (a->socket != b->socket) in aggr_cpu_id__cmp()
238 return a->socket - b->socket; in aggr_cpu_id__cmp()
239 else if (a->die != b->die) in aggr_cpu_id__cmp()
240 return a->die - b->die; in aggr_cpu_id__cmp()
241 else if (a->cluster != b->cluster) in aggr_cpu_id__cmp()
242 return a->cluster - b->cluster; in aggr_cpu_id__cmp()
243 else if (a->cache_lvl != b->cache_lvl) in aggr_cpu_id__cmp()
244 return a->cache_lvl - b->cache_lvl; in aggr_cpu_id__cmp()
245 else if (a->cache != b->cache) in aggr_cpu_id__cmp()
246 return a->cache - b->cache; in aggr_cpu_id__cmp()
247 else if (a->core != b->core) in aggr_cpu_id__cmp()
248 return a->core - b->core; in aggr_cpu_id__cmp()
250 return a->thread_idx - b->thread_idx; in aggr_cpu_id__cmp()
258 struct perf_cpu cpu; in cpu_aggr_map__new() local
265 c->nr = 0; in cpu_aggr_map__new()
267 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in cpu_aggr_map__new()
269 struct aggr_cpu_id cpu_id = get_id(cpu, data); in cpu_aggr_map__new()
271 for (int j = 0; j < c->nr; j++) { in cpu_aggr_map__new()
272 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) { in cpu_aggr_map__new()
278 c->map[c->nr] = cpu_id; in cpu_aggr_map__new()
279 c->nr++; in cpu_aggr_map__new()
283 if (c->nr != perf_cpu_map__nr(cpus)) { in cpu_aggr_map__new()
286 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr); in cpu_aggr_map__new()
294 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp); in cpu_aggr_map__new()
300 int cpu__get_die_id(struct perf_cpu cpu) in cpu__get_die_id() argument
302 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value); in cpu__get_die_id()
307 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) in aggr_cpu_id__die() argument
312 die = cpu__get_die_id(cpu); in aggr_cpu_id__die()
322 id = aggr_cpu_id__socket(cpu, data); in aggr_cpu_id__die()
330 int cpu__get_cluster_id(struct perf_cpu cpu) in cpu__get_cluster_id() argument
332 int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value); in cpu__get_cluster_id()
337 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data) in aggr_cpu_id__cluster() argument
339 int cluster = cpu__get_cluster_id(cpu); in aggr_cpu_id__cluster()
346 id = aggr_cpu_id__die(cpu, data); in aggr_cpu_id__cluster()
354 int cpu__get_core_id(struct perf_cpu cpu) in cpu__get_core_id() argument
356 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); in cpu__get_core_id()
360 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) in aggr_cpu_id__core() argument
363 int core = cpu__get_core_id(cpu); in aggr_cpu_id__core()
366 id = aggr_cpu_id__cluster(cpu, data); in aggr_cpu_id__core()
379 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data) in aggr_cpu_id__cpu() argument
384 id = aggr_cpu_id__core(cpu, data); in aggr_cpu_id__cpu()
388 id.cpu = cpu; in aggr_cpu_id__cpu()
393 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__node() argument
397 id.node = cpu__get_node(cpu); in aggr_cpu_id__node()
401 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__global() argument
405 /* it always aggregates to the cpu 0 */ in aggr_cpu_id__global()
406 cpu.cpu = 0; in aggr_cpu_id__global()
407 id.cpu = cpu; in aggr_cpu_id__global()
411 /* setup simple routines to easily access node numbers given a cpu number */
419 return -1; in get_max_num()
424 while (--num) { in get_max_num()
425 if ((buf[num] == ',') || (buf[num] == '-')) { in get_max_num()
431 err = -1; in get_max_num()
435 /* convert from 0-based to 1-based */ in get_max_num()
443 /* Determine highest possible cpu in the system for sparse allocation */
448 int max, ret = -1; in set_max_cpu_num()
451 max_cpu_num.cpu = 4096; in set_max_cpu_num()
452 max_present_cpu_num.cpu = 4096; in set_max_cpu_num()
458 /* get the highest possible cpu number for a sparse allocation */ in set_max_cpu_num()
459 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); in set_max_cpu_num()
469 max_cpu_num.cpu = max; in set_max_cpu_num()
471 /* get the highest present cpu number for a sparse allocation */ in set_max_cpu_num()
472 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); in set_max_cpu_num()
482 ret = -1; in set_max_cpu_num()
485 max_present_cpu_num.cpu = (int16_t)max; in set_max_cpu_num()
488 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu); in set_max_cpu_num()
496 int ret = -1; in set_max_node_num()
505 /* get the highest possible cpu number for a sparse allocation */ in set_max_node_num()
529 if (unlikely(!max_cpu_num.cpu)) in cpu__max_cpu()
537 if (unlikely(!max_present_cpu_num.cpu)) in cpu__max_present_cpu()
544 int cpu__get_node(struct perf_cpu cpu) in cpu__get_node() argument
548 return -1; in cpu__get_node()
551 return cpunode_map[cpu.cpu]; in cpu__get_node()
561 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int)); in init_cpunode_map()
564 return -1; in init_cpunode_map()
567 for (i = 0; i < max_cpu_num.cpu; i++) in init_cpunode_map()
568 cpunode_map[i] = -1; in init_cpunode_map()
577 unsigned int cpu, mem; in cpu__setup_cpunode_map() local
585 return -1; in cpu__setup_cpunode_map()
594 return -1; in cpu__setup_cpunode_map()
603 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1) in cpu__setup_cpunode_map()
606 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); in cpu__setup_cpunode_map()
616 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) in cpu__setup_cpunode_map()
618 cpunode_map[cpu] = mem; in cpu__setup_cpunode_map()
628 int i, start = -1; in cpu_map__snprint()
635 struct perf_cpu cpu = { .cpu = INT16_MAX }; in cpu_map__snprint() local
639 cpu = perf_cpu_map__cpu(map, i); in cpu_map__snprint()
641 if (start == -1) { in cpu_map__snprint()
644 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
646 perf_cpu_map__cpu(map, i).cpu); in cpu_map__snprint()
648 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) { in cpu_map__snprint()
649 int end = i - 1; in cpu_map__snprint()
652 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
654 perf_cpu_map__cpu(map, start).cpu); in cpu_map__snprint()
656 ret += snprintf(buf + ret, size - ret, in cpu_map__snprint()
657 "%s%d-%d", COMMA, in cpu_map__snprint()
658 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu); in cpu_map__snprint()
676 return val - 10 + 'a'; in hex_char()
690 bitmap = zalloc(last_cpu.cpu / 8 + 1); in cpu_map__snprint_mask()
697 bitmap[c.cpu / 8] |= 1 << (c.cpu % 8); in cpu_map__snprint_mask()
699 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { in cpu_map__snprint_mask() local
700 unsigned char bits = bitmap[cpu / 8]; in cpu_map__snprint_mask()
702 if (cpu % 8) in cpu_map__snprint_mask()
708 if ((cpu % 32) == 0 && cpu > 0) in cpu_map__snprint_mask()
714 buf[size - 1] = '\0'; in cpu_map__snprint_mask()
715 return ptr - buf; in cpu_map__snprint_mask()
723 online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */ in cpu_map__online()
730 return a->thread_idx == b->thread_idx && in aggr_cpu_id__equal()
731 a->node == b->node && in aggr_cpu_id__equal()
732 a->socket == b->socket && in aggr_cpu_id__equal()
733 a->die == b->die && in aggr_cpu_id__equal()
734 a->cluster == b->cluster && in aggr_cpu_id__equal()
735 a->cache_lvl == b->cache_lvl && in aggr_cpu_id__equal()
736 a->cache == b->cache && in aggr_cpu_id__equal()
737 a->core == b->core && in aggr_cpu_id__equal()
738 a->cpu.cpu == b->cpu.cpu; in aggr_cpu_id__equal()
743 return a->thread_idx == -1 && in aggr_cpu_id__is_empty()
744 a->node == -1 && in aggr_cpu_id__is_empty()
745 a->socket == -1 && in aggr_cpu_id__is_empty()
746 a->die == -1 && in aggr_cpu_id__is_empty()
747 a->cluster == -1 && in aggr_cpu_id__is_empty()
748 a->cache_lvl == -1 && in aggr_cpu_id__is_empty()
749 a->cache == -1 && in aggr_cpu_id__is_empty()
750 a->core == -1 && in aggr_cpu_id__is_empty()
751 a->cpu.cpu == -1; in aggr_cpu_id__is_empty()
757 .thread_idx = -1, in aggr_cpu_id__empty()
758 .node = -1, in aggr_cpu_id__empty()
759 .socket = -1, in aggr_cpu_id__empty()
760 .die = -1, in aggr_cpu_id__empty()
761 .cluster = -1, in aggr_cpu_id__empty()
762 .cache_lvl = -1, in aggr_cpu_id__empty()
763 .cache = -1, in aggr_cpu_id__empty()
764 .core = -1, in aggr_cpu_id__empty()
765 .cpu = (struct perf_cpu){ .cpu = -1 }, in aggr_cpu_id__empty()