Lines Matching +full:cpu +full:- +full:core
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__aio_alloc()
81 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc()
82 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc()
83 return -1; in perf_mmap__aio_alloc()
91 if (map->aio.data[idx]) { in perf_mmap__aio_free()
92 munmap(map->aio.data[idx], mmap__mmap_len(map)); in perf_mmap__aio_free()
93 map->aio.data[idx] = NULL; in perf_mmap__aio_free()
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument
106 data = map->aio.data[idx]; in perf_mmap__aio_bind()
108 node_index = cpu__get_node(cpu); in perf_mmap__aio_bind()
112 return -1; in perf_mmap__aio_bind()
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", in perf_mmap__aio_bind()
118 err = -1; in perf_mmap__aio_bind()
128 map->aio.data[idx] = malloc(mmap__mmap_len(map)); in perf_mmap__aio_alloc()
129 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc()
130 return -1; in perf_mmap__aio_alloc()
137 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free()
141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind()
151 map->aio.nr_cblocks = mp->nr_cblocks; in perf_mmap__aio_mmap()
152 if (map->aio.nr_cblocks) { in perf_mmap__aio_mmap()
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); in perf_mmap__aio_mmap()
154 if (!map->aio.aiocb) { in perf_mmap__aio_mmap()
156 return -1; in perf_mmap__aio_mmap()
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); in perf_mmap__aio_mmap()
159 if (!map->aio.cblocks) { in perf_mmap__aio_mmap()
161 return -1; in perf_mmap__aio_mmap()
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); in perf_mmap__aio_mmap()
164 if (!map->aio.data) { in perf_mmap__aio_mmap()
166 return -1; in perf_mmap__aio_mmap()
169 for (i = 0; i < map->aio.nr_cblocks; ++i) { in perf_mmap__aio_mmap()
171 if (ret == -1) { in perf_mmap__aio_mmap()
173 return -1; in perf_mmap__aio_mmap()
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
176 if (ret == -1) in perf_mmap__aio_mmap()
177 return -1; in perf_mmap__aio_mmap()
179 * Use cblock.aio_fildes value different from -1 in perf_mmap__aio_mmap()
184 map->aio.cblocks[i].aio_fildes = -1; in perf_mmap__aio_mmap()
188 * are kept in separate per-prio queues and adding in perf_mmap__aio_mmap()
189 * a new request will iterate thru shorter per-prio in perf_mmap__aio_mmap()
193 prio = delta_max - i; in perf_mmap__aio_mmap()
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; in perf_mmap__aio_mmap()
205 for (i = 0; i < map->aio.nr_cblocks; ++i) in perf_mmap__aio_munmap()
207 if (map->aio.data) in perf_mmap__aio_munmap()
208 zfree(&map->aio.data); in perf_mmap__aio_munmap()
209 zfree(&map->aio.cblocks); in perf_mmap__aio_munmap()
210 zfree(&map->aio.aiocb); in perf_mmap__aio_munmap()
231 bitmap_free(map->affinity_mask.bits); in mmap__munmap()
234 zstd_fini(&map->zstd_data); in mmap__munmap()
238 if (map->data != NULL) { in mmap__munmap()
239 munmap(map->data, mmap__mmap_len(map)); in mmap__munmap()
240 map->data = NULL; in mmap__munmap()
242 auxtrace_mmap__munmap(&map->auxtrace_mmap); in mmap__munmap()
248 struct perf_cpu cpu; in build_node_mask() local
257 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ in build_node_mask()
258 if (cpu__get_node(cpu) == node) in build_node_mask()
259 __set_bit(cpu.cpu, mask->bits); in build_node_mask()
265 map->affinity_mask.nbits = cpu__max_cpu().cpu; in perf_mmap__setup_affinity_mask()
266 map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits); in perf_mmap__setup_affinity_mask()
267 if (!map->affinity_mask.bits) in perf_mmap__setup_affinity_mask()
268 return -1; in perf_mmap__setup_affinity_mask()
270 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask()
271 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask()
272 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask()
273 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits); in perf_mmap__setup_affinity_mask()
278 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu) in mmap__mmap() argument
280 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap()
283 return -1; in mmap__mmap()
286 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
290 return -1; in mmap__mmap()
294 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); in mmap__mmap()
296 map->core.flush = mp->flush; in mmap__mmap()
299 if (zstd_init(&map->zstd_data, mp->comp_level)) { in mmap__mmap()
301 return -1; in mmap__mmap()
305 if (mp->comp_level && !perf_mmap__aio_enabled(map)) { in mmap__mmap()
306 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in mmap__mmap()
308 if (map->data == MAP_FAILED) { in mmap__mmap()
311 map->data = NULL; in mmap__mmap()
312 return -1; in mmap__mmap()
316 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, in mmap__mmap()
317 &mp->auxtrace_mp, map->core.base, fd)) in mmap__mmap()
318 return -1; in mmap__mmap()
326 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push()
327 unsigned char *data = md->core.base + page_size; in perf_mmap__push()
332 rc = perf_mmap__read_init(&md->core); in perf_mmap__push()
334 return (rc == -EAGAIN) ? 1 : -1; in perf_mmap__push()
336 size = md->core.end - md->core.start; in perf_mmap__push()
338 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push()
339 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
340 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push()
341 md->core.start += size; in perf_mmap__push()
344 rc = -1; in perf_mmap__push()
349 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
350 size = md->core.end - md->core.start; in perf_mmap__push()
351 md->core.start += size; in perf_mmap__push()
354 rc = -1; in perf_mmap__push()
358 md->core.prev = head; in perf_mmap__push()
359 perf_mmap__consume(&md->core); in perf_mmap__push()
366 clone->nbits = original->nbits; in mmap_cpu_mask__duplicate()
367 clone->bits = bitmap_zalloc(original->nbits); in mmap_cpu_mask__duplicate()
368 if (!clone->bits) in mmap_cpu_mask__duplicate()
369 return -ENOMEM; in mmap_cpu_mask__duplicate()
371 memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original)); in mmap_cpu_mask__duplicate()