Lines Matching full:pmu
40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
74 struct pmu pmu; member
93 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) in ddr_perf_filter_cap_get() argument
95 u32 quirks = pmu->devtype_data->quirks; in ddr_perf_filter_cap_get()
114 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_filter_cap_show() local
120 ddr_perf_filter_cap_get(pmu, cap)); in ddr_perf_filter_cap_show()
145 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local
147 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in ddr_perf_cpumask_show()
266 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_is_enhanced_filtered() local
268 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; in ddr_perf_is_enhanced_filtered()
273 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) in ddr_perf_alloc_counter() argument
283 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) in ddr_perf_alloc_counter()
290 if (pmu->events[i] == NULL) in ddr_perf_alloc_counter()
297 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) in ddr_perf_free_counter() argument
299 pmu->events[counter] = NULL; in ddr_perf_free_counter()
302 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) in ddr_perf_read_counter() argument
304 struct perf_event *event = pmu->events[counter]; in ddr_perf_read_counter()
305 void __iomem *base = pmu->base; in ddr_perf_read_counter()
309 * axid-read and axid-write event if PMU core supports enhanced in ddr_perf_read_counter()
319 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_init() local
323 if (event->attr.type != event->pmu->type) in ddr_perf_event_init()
330 dev_warn(pmu->dev, "Can't provide per-task data!\n"); in ddr_perf_event_init()
339 if (event->group_leader->pmu != event->pmu && in ddr_perf_event_init()
343 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { in ddr_perf_event_init()
353 if (sibling->pmu != event->pmu && in ddr_perf_event_init()
358 event->cpu = pmu->cpu; in ddr_perf_event_init()
367 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_update() local
374 new_raw_count = ddr_perf_read_counter(pmu, counter); in ddr_perf_event_update()
383 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, in ddr_perf_counter_enable() argument
396 writel(0, pmu->base + reg); in ddr_perf_counter_enable()
399 writel(val, pmu->base + reg); in ddr_perf_counter_enable()
402 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; in ddr_perf_counter_enable()
403 writel(val, pmu->base + reg); in ddr_perf_counter_enable()
409 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_start() local
415 ddr_perf_counter_enable(pmu, event->attr.config, counter, true); in ddr_perf_event_start()
422 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_add() local
428 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { in ddr_perf_event_add()
432 if (pmu->events[i] && in ddr_perf_event_add()
433 !ddr_perf_filters_compatible(event, pmu->events[i])) in ddr_perf_event_add()
440 writel(cfg1, pmu->base + COUNTER_DPCR1); in ddr_perf_event_add()
444 counter = ddr_perf_alloc_counter(pmu, cfg); in ddr_perf_event_add()
446 dev_dbg(pmu->dev, "There are not enough counters\n"); in ddr_perf_event_add()
450 pmu->events[counter] = event; in ddr_perf_event_add()
451 pmu->active_events++; in ddr_perf_event_add()
464 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_stop() local
468 ddr_perf_counter_enable(pmu, event->attr.config, counter, false); in ddr_perf_event_stop()
476 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_del() local
482 ddr_perf_free_counter(pmu, counter); in ddr_perf_event_del()
483 pmu->active_events--; in ddr_perf_event_del()
487 static void ddr_perf_pmu_enable(struct pmu *pmu) in ddr_perf_pmu_enable() argument
489 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); in ddr_perf_pmu_enable()
499 static void ddr_perf_pmu_disable(struct pmu *pmu) in ddr_perf_pmu_disable() argument
501 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); in ddr_perf_pmu_disable()
510 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, in ddr_perf_init() argument
513 *pmu = (struct ddr_pmu) { in ddr_perf_init()
514 .pmu = (struct pmu) { in ddr_perf_init()
532 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); in ddr_perf_init()
533 return pmu->id; in ddr_perf_init()
539 struct ddr_pmu *pmu = (struct ddr_pmu *) p; in ddr_perf_irq_handler() local
543 ddr_perf_counter_enable(pmu, in ddr_perf_irq_handler()
559 if (!pmu->events[i]) in ddr_perf_irq_handler()
562 event = pmu->events[i]; in ddr_perf_irq_handler()
570 ddr_perf_counter_enable(pmu, in ddr_perf_irq_handler()
582 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); in ddr_perf_offline_cpu() local
585 if (cpu != pmu->cpu) in ddr_perf_offline_cpu()
592 perf_pmu_migrate_context(&pmu->pmu, cpu, target); in ddr_perf_offline_cpu()
593 pmu->cpu = target; in ddr_perf_offline_cpu()
595 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); in ddr_perf_offline_cpu()
602 struct ddr_pmu *pmu; in ddr_perf_probe() local
616 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); in ddr_perf_probe()
617 if (!pmu) in ddr_perf_probe()
620 num = ddr_perf_init(pmu, base, &pdev->dev); in ddr_perf_probe()
622 platform_set_drvdata(pdev, pmu); in ddr_perf_probe()
629 pmu->devtype_data = of_device_get_match_data(&pdev->dev); in ddr_perf_probe()
631 pmu->cpu = raw_smp_processor_id(); in ddr_perf_probe()
642 pmu->cpuhp_state = ret; in ddr_perf_probe()
644 /* Register the pmu instance for cpu hotplug */ in ddr_perf_probe()
645 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_probe()
663 pmu); in ddr_perf_probe()
669 pmu->irq = irq; in ddr_perf_probe()
670 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); in ddr_perf_probe()
672 dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); in ddr_perf_probe()
676 ret = perf_pmu_register(&pmu->pmu, name, -1); in ddr_perf_probe()
683 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_probe()
685 cpuhp_remove_multi_state(pmu->cpuhp_state); in ddr_perf_probe()
687 ida_simple_remove(&ddr_ida, pmu->id); in ddr_perf_probe()
688 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); in ddr_perf_probe()
694 struct ddr_pmu *pmu = platform_get_drvdata(pdev); in ddr_perf_remove() local
696 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_remove()
697 cpuhp_remove_multi_state(pmu->cpuhp_state); in ddr_perf_remove()
698 irq_set_affinity_hint(pmu->irq, NULL); in ddr_perf_remove()
700 perf_pmu_unregister(&pmu->pmu); in ddr_perf_remove()
702 ida_simple_remove(&ddr_ida, pmu->id); in ddr_perf_remove()
708 .name = "imx-ddr-pmu",