Lines Matching +full:cpu +full:- +full:core

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * In-Memory Collection (IMC) Performance Monitor counter support.
12 #include <asm/imc-pmu.h>
20 * Used to avoid races in counting the nest-pmu units during hotplug
30 /* Core IMC data structures and variables */
49 * core and trace-imc
59 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu()
62 PMU_FORMAT_ATTR(event, "config:0-61");
63 PMU_FORMAT_ATTR(offset, "config:0-31");
65 PMU_FORMAT_ATTR(mode, "config:33-40");
79 /* Format attribute for imc trace-mode */
80 PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
81 PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
82 PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
83 PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
107 switch(imc_pmu->domain){ in imc_pmu_cpumask_get_attr()
140 sysfs_attr_init(&attr->attr.attr); in device_str_attr_create()
142 attr->event_str = str; in device_str_attr_create()
143 attr->attr.attr.name = name; in device_str_attr_create()
144 attr->attr.attr.mode = 0444; in device_str_attr_create()
145 attr->attr.show = perf_event_sysfs_show; in device_str_attr_create()
147 return &attr->attr.attr; in device_str_attr_create()
160 event->value = base + reg; in imc_parse_event()
162 if (of_property_read_string(np, "event-name", &s)) in imc_parse_event()
165 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); in imc_parse_event()
166 if (!event->name) in imc_parse_event()
173 event->scale = kstrdup(s, GFP_KERNEL); in imc_parse_event()
174 if (!event->scale) in imc_parse_event()
182 event->unit = kstrdup(s, GFP_KERNEL); in imc_parse_event()
183 if (!event->unit) in imc_parse_event()
189 kfree(event->unit); in imc_parse_event()
190 kfree(event->scale); in imc_parse_event()
191 kfree(event->name); in imc_parse_event()
192 return -EINVAL; in imc_parse_event()
242 if (of_property_read_string(node, "events-prefix", &prefix)) in update_events_in_group()
256 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group()
257 if (!pmu->events) in update_events_in_group()
258 return -ENOMEM; in update_events_in_group()
263 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group()
271 imc_free_events(pmu->events, ct); in update_events_in_group()
272 return -ENOMEM; in update_events_in_group()
279 * "ct" has the total event structs added from the events-parent node. in update_events_in_group()
286 imc_free_events(pmu->events, ct); in update_events_in_group()
287 return -ENOMEM; in update_events_in_group()
290 attr_group->name = "events"; in update_events_in_group()
291 attr_group->attrs = attrs; in update_events_in_group()
293 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); in update_events_in_group()
294 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); in update_events_in_group()
299 if (pmu->events[i].scale) { in update_events_in_group()
300 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); in update_events_in_group()
301 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); in update_events_in_group()
308 if (pmu->events[i].unit) { in update_events_in_group()
309 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); in update_events_in_group()
310 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); in update_events_in_group()
319 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; in update_events_in_group()
325 static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) in get_nest_pmu_ref() argument
327 return per_cpu(local_nest_imc_refc, cpu); in get_nest_pmu_ref()
338 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
343 static int ppc_nest_imc_cpu_offline(unsigned int cpu) in ppc_nest_imc_cpu_offline() argument
345 int nid, target = -1; in ppc_nest_imc_cpu_offline()
350 * Check in the designated list for this cpu. Dont bother in ppc_nest_imc_cpu_offline()
353 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) in ppc_nest_imc_cpu_offline()
370 * Now that this cpu is one of the designated, in ppc_nest_imc_cpu_offline()
371 * find a next cpu a) which is online and b) in same chip. in ppc_nest_imc_cpu_offline()
373 nid = cpu_to_node(cpu); in ppc_nest_imc_cpu_offline()
378 * If this(target) is the last cpu in the cpumask for this chip, in ppc_nest_imc_cpu_offline()
379 * check for any possible online cpu in the chip. in ppc_nest_imc_cpu_offline()
381 if (unlikely(target == cpu)) in ppc_nest_imc_cpu_offline()
382 target = cpumask_any_but(l_cpumask, cpu); in ppc_nest_imc_cpu_offline()
385 * Update the cpumask with the target cpu and in ppc_nest_imc_cpu_offline()
390 nest_change_cpu_context(cpu, target); in ppc_nest_imc_cpu_offline()
393 get_hard_smp_processor_id(cpu)); in ppc_nest_imc_cpu_offline()
395 * If this is the last cpu in this chip then, skip the reference in ppc_nest_imc_cpu_offline()
398 ref = get_nest_pmu_ref(cpu); in ppc_nest_imc_cpu_offline()
400 return -EINVAL; in ppc_nest_imc_cpu_offline()
402 ref->refc = 0; in ppc_nest_imc_cpu_offline()
407 static int ppc_nest_imc_cpu_online(unsigned int cpu) in ppc_nest_imc_cpu_online() argument
414 l_cpumask = cpumask_of_node(cpu_to_node(cpu)); in ppc_nest_imc_cpu_online()
417 * If this is not the first online CPU on this node, then in ppc_nest_imc_cpu_online()
424 * If this is the first online cpu on this node in ppc_nest_imc_cpu_online()
428 get_hard_smp_processor_id(cpu)); in ppc_nest_imc_cpu_online()
432 /* Make this CPU the designated target for counter collection */ in ppc_nest_imc_cpu_online()
433 cpumask_set_cpu(cpu, &nest_imc_cpumask); in ppc_nest_imc_cpu_online()
450 if (event->cpu < 0) in nest_imc_counters_release()
453 node_id = cpu_to_node(event->cpu); in nest_imc_counters_release()
461 ref = get_nest_pmu_ref(event->cpu); in nest_imc_counters_release()
466 mutex_lock(&ref->lock); in nest_imc_counters_release()
467 if (ref->refc == 0) { in nest_imc_counters_release()
473 * function set the ref->count to zero, if the cpu which is in nest_imc_counters_release()
474 * about to offline is the last cpu in a given node and make in nest_imc_counters_release()
478 mutex_unlock(&ref->lock); in nest_imc_counters_release()
481 ref->refc--; in nest_imc_counters_release()
482 if (ref->refc == 0) { in nest_imc_counters_release()
484 get_hard_smp_processor_id(event->cpu)); in nest_imc_counters_release()
486 mutex_unlock(&ref->lock); in nest_imc_counters_release()
487 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); in nest_imc_counters_release()
490 } else if (ref->refc < 0) { in nest_imc_counters_release()
491 WARN(1, "nest-imc: Invalid event reference count\n"); in nest_imc_counters_release()
492 ref->refc = 0; in nest_imc_counters_release()
494 mutex_unlock(&ref->lock); in nest_imc_counters_release()
500 u32 l_config, config = event->attr.config; in nest_imc_event_init()
506 if (event->attr.type != event->pmu->type) in nest_imc_event_init()
507 return -ENOENT; in nest_imc_event_init()
510 if (event->hw.sample_period) in nest_imc_event_init()
511 return -EINVAL; in nest_imc_event_init()
513 if (event->cpu < 0) in nest_imc_event_init()
514 return -EINVAL; in nest_imc_event_init()
519 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) in nest_imc_event_init()
520 return -EINVAL; in nest_imc_event_init()
523 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). in nest_imc_event_init()
524 * Get the base memory addresss for this cpu. in nest_imc_event_init()
526 chip_id = cpu_to_chip_id(event->cpu); in nest_imc_event_init()
530 return -ENODEV; in nest_imc_event_init()
532 pcni = pmu->mem_info; in nest_imc_event_init()
534 if (pcni->id == chip_id) { in nest_imc_event_init()
539 } while (pcni->vbase != 0); in nest_imc_event_init()
542 return -ENODEV; in nest_imc_event_init()
548 event->hw.event_base = (u64)pcni->vbase + l_config; in nest_imc_event_init()
549 node_id = cpu_to_node(event->cpu); in nest_imc_event_init()
556 ref = get_nest_pmu_ref(event->cpu); in nest_imc_event_init()
558 return -EINVAL; in nest_imc_event_init()
560 mutex_lock(&ref->lock); in nest_imc_event_init()
561 if (ref->refc == 0) { in nest_imc_event_init()
563 get_hard_smp_processor_id(event->cpu)); in nest_imc_event_init()
565 mutex_unlock(&ref->lock); in nest_imc_event_init()
566 pr_err("nest-imc: Unable to start the counters for node %d\n", in nest_imc_event_init()
571 ++ref->refc; in nest_imc_event_init()
572 mutex_unlock(&ref->lock); in nest_imc_event_init()
574 event->destroy = nest_imc_counters_release; in nest_imc_event_init()
579 * core_imc_mem_init : Initializes memory for the current core.
584 * base address at which the core imc counters are populated.
586 static int core_imc_mem_init(int cpu, int size) in core_imc_mem_init() argument
588 int nid, rc = 0, core_id = (cpu / threads_per_core); in core_imc_mem_init()
593 * alloc_pages_node() will allocate memory for core in the in core_imc_mem_init()
596 nid = cpu_to_node(cpu); in core_imc_mem_init()
597 mem_info = &core_imc_pmu->mem_info[core_id]; in core_imc_mem_init()
598 mem_info->id = core_id; in core_imc_mem_init()
600 /* We need only vbase for core counters */ in core_imc_mem_init()
605 return -ENOMEM; in core_imc_mem_init()
606 mem_info->vbase = page_address(page); in core_imc_mem_init()
613 __pa((void *)mem_info->vbase), in core_imc_mem_init()
614 get_hard_smp_processor_id(cpu)); in core_imc_mem_init()
616 free_pages((u64)mem_info->vbase, get_order(size)); in core_imc_mem_init()
617 mem_info->vbase = NULL; in core_imc_mem_init()
623 static bool is_core_imc_mem_inited(int cpu) in is_core_imc_mem_inited() argument
626 int core_id = (cpu / threads_per_core); in is_core_imc_mem_inited()
628 mem_info = &core_imc_pmu->mem_info[core_id]; in is_core_imc_mem_inited()
629 if (!mem_info->vbase) in is_core_imc_mem_inited()
635 static int ppc_core_imc_cpu_online(unsigned int cpu) in ppc_core_imc_cpu_online() argument
641 /* Get the cpumask for this core */ in ppc_core_imc_cpu_online()
642 l_cpumask = cpu_sibling_mask(cpu); in ppc_core_imc_cpu_online()
644 /* If a cpu for this core is already set, then, don't do anything */ in ppc_core_imc_cpu_online()
648 if (!is_core_imc_mem_inited(cpu)) { in ppc_core_imc_cpu_online()
649 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); in ppc_core_imc_cpu_online()
651 pr_info("core_imc memory allocation for cpu %d failed\n", cpu); in ppc_core_imc_cpu_online()
656 /* set the cpu in the mask */ in ppc_core_imc_cpu_online()
657 cpumask_set_cpu(cpu, &core_imc_cpumask); in ppc_core_imc_cpu_online()
661 static int ppc_core_imc_cpu_offline(unsigned int cpu) in ppc_core_imc_cpu_offline() argument
668 * clear this cpu out of the mask, if not present in the mask, in ppc_core_imc_cpu_offline()
671 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) in ppc_core_imc_cpu_offline()
685 if (!core_imc_pmu->pmu.event_init) in ppc_core_imc_cpu_offline()
688 /* Find any online cpu in that core except the current "cpu" */ in ppc_core_imc_cpu_offline()
689 ncpu = cpumask_last(cpu_sibling_mask(cpu)); in ppc_core_imc_cpu_offline()
691 if (unlikely(ncpu == cpu)) in ppc_core_imc_cpu_offline()
692 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); in ppc_core_imc_cpu_offline()
696 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); in ppc_core_imc_cpu_offline()
699 * If this is the last cpu in this core then, skip taking refernce in ppc_core_imc_cpu_offline()
700 * count mutex lock for this core and directly zero "refc" for in ppc_core_imc_cpu_offline()
701 * this core. in ppc_core_imc_cpu_offline()
704 get_hard_smp_processor_id(cpu)); in ppc_core_imc_cpu_offline()
705 core_id = cpu / threads_per_core; in ppc_core_imc_cpu_offline()
708 return -EINVAL; in ppc_core_imc_cpu_offline()
710 ref->refc = 0; in ppc_core_imc_cpu_offline()
713 * last cpu in this core and core-imc event running in ppc_core_imc_cpu_offline()
714 * in this cpu. in ppc_core_imc_cpu_offline()
718 imc_global_refc.refc--; in ppc_core_imc_cpu_offline()
736 imc_global_refc.refc--; in reset_global_refc()
740 * event for this domain(thread/core/trace), in reset_global_refc()
755 if (event->cpu < 0) in core_imc_counters_release()
761 * enable or disable the core counters. in core_imc_counters_release()
763 core_id = event->cpu / threads_per_core; in core_imc_counters_release()
765 /* Take the mutex lock and decrement the refernce count for this core */ in core_imc_counters_release()
770 mutex_lock(&ref->lock); in core_imc_counters_release()
771 if (ref->refc == 0) { in core_imc_counters_release()
774 * started, followed by offlining of all cpus in a given core. in core_imc_counters_release()
777 * function set the ref->count to zero, if the cpu which is in core_imc_counters_release()
778 * about to offline is the last cpu in a given core and make in core_imc_counters_release()
779 * an OPAL call to disable the engine in that core. in core_imc_counters_release()
782 mutex_unlock(&ref->lock); in core_imc_counters_release()
785 ref->refc--; in core_imc_counters_release()
786 if (ref->refc == 0) { in core_imc_counters_release()
788 get_hard_smp_processor_id(event->cpu)); in core_imc_counters_release()
790 mutex_unlock(&ref->lock); in core_imc_counters_release()
791 pr_err("IMC: Unable to stop the counters for core %d\n", core_id); in core_imc_counters_release()
794 } else if (ref->refc < 0) { in core_imc_counters_release()
795 WARN(1, "core-imc: Invalid event reference count\n"); in core_imc_counters_release()
796 ref->refc = 0; in core_imc_counters_release()
798 mutex_unlock(&ref->lock); in core_imc_counters_release()
806 u64 config = event->attr.config; in core_imc_event_init()
811 if (event->attr.type != event->pmu->type) in core_imc_event_init()
812 return -ENOENT; in core_imc_event_init()
815 if (event->hw.sample_period) in core_imc_event_init()
816 return -EINVAL; in core_imc_event_init()
818 if (event->cpu < 0) in core_imc_event_init()
819 return -EINVAL; in core_imc_event_init()
821 event->hw.idx = -1; in core_imc_event_init()
825 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in core_imc_event_init()
826 return -EINVAL; in core_imc_event_init()
828 if (!is_core_imc_mem_inited(event->cpu)) in core_imc_event_init()
829 return -ENODEV; in core_imc_event_init()
831 core_id = event->cpu / threads_per_core; in core_imc_event_init()
832 pcmi = &core_imc_pmu->mem_info[core_id]; in core_imc_event_init()
833 if ((!pcmi->vbase)) in core_imc_event_init()
834 return -ENODEV; in core_imc_event_init()
836 /* Get the core_imc mutex for this core */ in core_imc_event_init()
839 return -EINVAL; in core_imc_event_init()
842 * Core pmu units are enabled only when it is used. in core_imc_event_init()
844 * If yes, take the mutex lock and enable the core counters. in core_imc_event_init()
847 mutex_lock(&ref->lock); in core_imc_event_init()
848 if (ref->refc == 0) { in core_imc_event_init()
850 get_hard_smp_processor_id(event->cpu)); in core_imc_event_init()
852 mutex_unlock(&ref->lock); in core_imc_event_init()
853 pr_err("core-imc: Unable to start the counters for core %d\n", in core_imc_event_init()
858 ++ref->refc; in core_imc_event_init()
859 mutex_unlock(&ref->lock); in core_imc_event_init()
862 * Since the system can run either in accumulation or trace-mode in core_imc_event_init()
863 * of IMC at a time, core-imc events are allowed only if no other in core_imc_event_init()
874 * the system, so set the refc.id to core-imc. in core_imc_event_init()
880 return -EBUSY; in core_imc_event_init()
884 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); in core_imc_event_init()
885 event->destroy = core_imc_counters_release; in core_imc_event_init()
892 * The physical base address of the page allocated for a cpu will be
893 * written to the LDBAR for that cpu, when the thread-imc event
899 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
906 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
919 * free the memory in cpu offline path. in thread_imc_mem_alloc()
925 return -ENOMEM; in thread_imc_mem_alloc()
935 static int ppc_thread_imc_cpu_online(unsigned int cpu) in ppc_thread_imc_cpu_online() argument
937 return thread_imc_mem_alloc(cpu, thread_imc_mem_size); in ppc_thread_imc_cpu_online()
940 static int ppc_thread_imc_cpu_offline(unsigned int cpu) in ppc_thread_imc_cpu_offline() argument
947 * For thread-imc, bit 0 of LDBAR will be set to 1 in the in ppc_thread_imc_cpu_offline()
953 /* Reduce the refc if thread-imc event running on this cpu */ in ppc_thread_imc_cpu_offline()
956 imc_global_refc.refc--; in ppc_thread_imc_cpu_offline()
972 u32 config = event->attr.config; in thread_imc_event_init()
976 if (event->attr.type != event->pmu->type) in thread_imc_event_init()
977 return -ENOENT; in thread_imc_event_init()
980 return -EACCES; in thread_imc_event_init()
983 if (event->hw.sample_period) in thread_imc_event_init()
984 return -EINVAL; in thread_imc_event_init()
986 event->hw.idx = -1; in thread_imc_event_init()
990 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in thread_imc_event_init()
991 return -EINVAL; in thread_imc_event_init()
993 target = event->hw.target; in thread_imc_event_init()
995 return -EINVAL; in thread_imc_event_init()
999 * Check if any other trace/core imc events are running in the in thread_imc_event_init()
1000 * system, if not set the global id to thread-imc. in thread_imc_event_init()
1007 return -EBUSY; in thread_imc_event_init()
1011 event->pmu->task_ctx_nr = perf_sw_context; in thread_imc_event_init()
1012 event->destroy = reset_global_refc; in thread_imc_event_init()
1018 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) in is_thread_imc_pmu()
1030 return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); in get_event_base_addr()
1033 return (u64 *)event->hw.event_base; in get_event_base_addr()
1060 * In-Memory Collection (IMC) counters are free flowing counters. in imc_read_counter()
1067 local64_set(&event->hw.prev_count, data); in imc_read_counter()
1076 counter_prev = local64_read(&event->hw.prev_count); in imc_event_update()
1078 final_count = counter_new - counter_prev; in imc_event_update()
1081 local64_add(final_count, &event->count); in imc_event_update()
1122 return -EINVAL; in thread_imc_event_add()
1136 return -EINVAL; in thread_imc_event_add()
1138 mutex_lock(&ref->lock); in thread_imc_event_add()
1139 if (ref->refc == 0) { in thread_imc_event_add()
1142 mutex_unlock(&ref->lock); in thread_imc_event_add()
1143 pr_err("thread-imc: Unable to start the counter\ in thread_imc_event_add()
1144 for core %d\n", core_id); in thread_imc_event_add()
1145 return -EINVAL; in thread_imc_event_add()
1148 ++ref->refc; in thread_imc_event_add()
1149 mutex_unlock(&ref->lock); in thread_imc_event_add()
1166 mutex_lock(&ref->lock); in thread_imc_event_del()
1167 ref->refc--; in thread_imc_event_del()
1168 if (ref->refc == 0) { in thread_imc_event_del()
1171 mutex_unlock(&ref->lock); in thread_imc_event_del()
1172 pr_err("thread-imc: Unable to stop the counters\ in thread_imc_event_del()
1173 for core %d\n", core_id); in thread_imc_event_del()
1176 } else if (ref->refc < 0) { in thread_imc_event_del()
1177 ref->refc = 0; in thread_imc_event_del()
1179 mutex_unlock(&ref->lock); in thread_imc_event_del()
1192 * Allocate a page of memory for each cpu, and load LDBAR with 0.
1207 return -ENOMEM; in trace_imc_mem_alloc()
1228 static int ppc_trace_imc_cpu_online(unsigned int cpu) in ppc_trace_imc_cpu_online() argument
1230 return trace_imc_mem_alloc(cpu, trace_imc_mem_size); in ppc_trace_imc_cpu_online()
1233 static int ppc_trace_imc_cpu_offline(unsigned int cpu) in ppc_trace_imc_cpu_offline() argument
1237 * it is set to zero for imc trace-mode in ppc_trace_imc_cpu_offline()
1239 * Reduce the refc if any trace-imc event running in ppc_trace_imc_cpu_offline()
1240 * on this cpu. in ppc_trace_imc_cpu_offline()
1244 imc_global_refc.refc--; in ppc_trace_imc_cpu_offline()
1264 * Function to parse trace-imc data obtained
1274 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb) in trace_imc_prepare_sample()
1275 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1)); in trace_imc_prepare_sample()
1277 return -EINVAL; in trace_imc_prepare_sample()
1279 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) != in trace_imc_prepare_sample()
1280 be64_to_cpu(READ_ONCE(mem->tb2))) in trace_imc_prepare_sample()
1281 return -EINVAL; in trace_imc_prepare_sample()
1284 data->ip = be64_to_cpu(READ_ONCE(mem->ip)); in trace_imc_prepare_sample()
1285 data->period = event->hw.last_period; in trace_imc_prepare_sample()
1287 header->type = PERF_RECORD_SAMPLE; in trace_imc_prepare_sample()
1288 header->size = sizeof(*header) + event->header_size; in trace_imc_prepare_sample()
1289 header->misc = 0; in trace_imc_prepare_sample()
1292 switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { in trace_imc_prepare_sample()
1293 case 0:/* when MSR HV and PR not set in the trace-record */ in trace_imc_prepare_sample()
1294 header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; in trace_imc_prepare_sample()
1297 header->misc |= PERF_RECORD_MISC_GUEST_USER; in trace_imc_prepare_sample()
1300 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1303 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1310 if (is_kernel_addr(data->ip)) in trace_imc_prepare_sample()
1311 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1313 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1354 /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */ in trace_imc_event_add()
1358 /* trace-imc reference count */ in trace_imc_event_add()
1363 return -EINVAL; in trace_imc_event_add()
1367 mutex_lock(&ref->lock); in trace_imc_event_add()
1368 if (ref->refc == 0) { in trace_imc_event_add()
1371 mutex_unlock(&ref->lock); in trace_imc_event_add()
1372 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); in trace_imc_event_add()
1373 return -EINVAL; in trace_imc_event_add()
1376 ++ref->refc; in trace_imc_event_add()
1377 mutex_unlock(&ref->lock); in trace_imc_event_add()
1410 mutex_lock(&ref->lock); in trace_imc_event_del()
1411 ref->refc--; in trace_imc_event_del()
1412 if (ref->refc == 0) { in trace_imc_event_del()
1415 mutex_unlock(&ref->lock); in trace_imc_event_del()
1416 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); in trace_imc_event_del()
1419 } else if (ref->refc < 0) { in trace_imc_event_del()
1420 ref->refc = 0; in trace_imc_event_del()
1422 mutex_unlock(&ref->lock); in trace_imc_event_del()
1429 if (event->attr.type != event->pmu->type) in trace_imc_event_init()
1430 return -ENOENT; in trace_imc_event_init()
1433 return -EACCES; in trace_imc_event_init()
1436 if (event->attr.sample_period == 0) in trace_imc_event_init()
1437 return -ENOENT; in trace_imc_event_init()
1441 * no other thread is running any core/thread imc in trace_imc_event_init()
1447 * No core/thread imc events are running in the in trace_imc_event_init()
1448 * system, so set the refc.id to trace-imc. in trace_imc_event_init()
1454 return -EBUSY; in trace_imc_event_init()
1458 event->hw.idx = -1; in trace_imc_event_init()
1460 event->pmu->task_ctx_nr = perf_hw_context; in trace_imc_event_init()
1461 event->destroy = reset_global_refc; in trace_imc_event_init()
1468 pmu->pmu.task_ctx_nr = perf_invalid_context; in update_pmu_ops()
1469 pmu->pmu.add = imc_event_add; in update_pmu_ops()
1470 pmu->pmu.del = imc_event_stop; in update_pmu_ops()
1471 pmu->pmu.start = imc_event_start; in update_pmu_ops()
1472 pmu->pmu.stop = imc_event_stop; in update_pmu_ops()
1473 pmu->pmu.read = imc_event_update; in update_pmu_ops()
1474 pmu->pmu.attr_groups = pmu->attr_groups; in update_pmu_ops()
1475 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; in update_pmu_ops()
1476 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; in update_pmu_ops()
1478 switch (pmu->domain) { in update_pmu_ops()
1480 pmu->pmu.event_init = nest_imc_event_init; in update_pmu_ops()
1481 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1484 pmu->pmu.event_init = core_imc_event_init; in update_pmu_ops()
1485 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1488 pmu->pmu.event_init = thread_imc_event_init; in update_pmu_ops()
1489 pmu->pmu.add = thread_imc_event_add; in update_pmu_ops()
1490 pmu->pmu.del = thread_imc_event_del; in update_pmu_ops()
1491 pmu->pmu.start_txn = thread_imc_pmu_start_txn; in update_pmu_ops()
1492 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; in update_pmu_ops()
1493 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; in update_pmu_ops()
1496 pmu->pmu.event_init = trace_imc_event_init; in update_pmu_ops()
1497 pmu->pmu.add = trace_imc_event_add; in update_pmu_ops()
1498 pmu->pmu.del = trace_imc_event_del; in update_pmu_ops()
1499 pmu->pmu.start = trace_imc_event_start; in update_pmu_ops()
1500 pmu->pmu.stop = trace_imc_event_stop; in update_pmu_ops()
1501 pmu->pmu.read = trace_imc_event_read; in update_pmu_ops()
1502 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group; in update_pmu_ops()
1513 int nid, i, cpu; in init_nest_pmu_ref() local
1519 return -ENOMEM; in init_nest_pmu_ref()
1542 for_each_possible_cpu(cpu) { in init_nest_pmu_ref()
1543 nid = cpu_to_node(cpu); in init_nest_pmu_ref()
1546 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; in init_nest_pmu_ref()
1557 struct imc_mem_info *ptr = core_imc_pmu->mem_info; in cleanup_all_core_imc_memory()
1558 int size = core_imc_pmu->counter_mem_size; in cleanup_all_core_imc_memory()
1573 * By setting 0th bit of LDBAR to zero, we disable thread-imc in thread_imc_ldbar_disable()
1610 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) in imc_common_mem_free()
1611 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); in imc_common_mem_free()
1612 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); in imc_common_mem_free()
1616 * Common function to unregister cpu hotplug callback and
1623 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { in imc_common_cpuhp_mem_free()
1633 nest_pmus--; in imc_common_cpuhp_mem_free()
1638 if (pmu_ptr->domain == IMC_DOMAIN_CORE) { in imc_common_cpuhp_mem_free()
1644 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { in imc_common_cpuhp_mem_free()
1649 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) { in imc_common_cpuhp_mem_free()
1656 * Function to unregister thread-imc if core-imc
1663 perf_pmu_unregister(&thread_imc_pmu->pmu); in unregister_thread_imc()
1667 * imc_mem_init : Function to support memory allocation for core imc.
1673 int nr_cores, cpu, res = -ENOMEM; in imc_mem_init() local
1676 return -ENODEV; in imc_mem_init()
1678 switch (pmu_ptr->domain) { in imc_mem_init()
1681 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); in imc_mem_init()
1682 if (!pmu_ptr->pmu.name) in imc_mem_init()
1697 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1698 if (!pmu_ptr->pmu.name) in imc_mem_init()
1702 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), in imc_mem_init()
1705 if (!pmu_ptr->mem_info) in imc_mem_init()
1712 kfree(pmu_ptr->mem_info); in imc_mem_init()
1720 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1721 if (!pmu_ptr->pmu.name) in imc_mem_init()
1724 thread_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1725 for_each_online_cpu(cpu) { in imc_mem_init()
1726 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); in imc_mem_init()
1737 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1738 if (!pmu_ptr->pmu.name) in imc_mem_init()
1739 return -ENOMEM; in imc_mem_init()
1745 return -ENOMEM; in imc_mem_init()
1747 trace_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1748 for_each_online_cpu(cpu) { in imc_mem_init()
1749 res = trace_imc_mem_alloc(cpu, trace_imc_mem_size); in imc_mem_init()
1757 return -EINVAL; in imc_mem_init()
1772 * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback.
1783 switch (pmu_ptr->domain) { in init_imc_pmu()
1786 * Nest imc pmu need only one cpu per chip, we initialize the in init_imc_pmu()
1800 /* Register for cpu hotplug notification. */ in init_imc_pmu()
1838 return -EINVAL; /* Unknown domain */ in init_imc_pmu()
1849 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); in init_imc_pmu()
1854 pmu_ptr->pmu.name); in init_imc_pmu()