1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ARM CoreSight Architecture PMU driver.
4 *
5 * This driver adds support for uncore PMU based on ARM CoreSight Performance
6 * Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
7 * like other uncore PMUs, it does not support process specific events and
8 * cannot be used in sampling mode.
9 *
10 * This code is based on other uncore PMUs like ARM DSU PMU. It provides a
11 * generic implementation to operate the PMU according to CoreSight PMU
12 * architecture and ACPI ARM PMU table (APMT) documents below:
13 * - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
14 * - APMT document number: ARM DEN0117.
15 *
16 * The user should refer to the vendor technical documentation to get details
17 * about the supported events.
18 *
19 * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
20 *
21 */
22
23 #include <linux/acpi.h>
24 #include <linux/cacheinfo.h>
25 #include <linux/ctype.h>
26 #include <linux/interrupt.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/of.h>
31 #include <linux/perf_event.h>
32 #include <linux/platform_device.h>
33
34 #include "arm_cspmu.h"
35
36 #define PMUNAME "arm_cspmu"
37 #define DRVNAME "arm-cs-arch-pmu"
38
39 #define ARM_CSPMU_CPUMASK_ATTR(_name, _config) \
40 ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
41 (unsigned long)_config)
42
43 /* Each SET/CLR register supports up to 32 counters. */
44 #define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
45 #define ARM_CSPMU_SET_CLR_COUNTER_NUM \
46 (1 << ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
47
48 /* Convert counter idx into SET/CLR register number. */
49 #define COUNTER_TO_SET_CLR_ID(idx) \
50 (idx >> ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
51
52 /* Convert counter idx into SET/CLR register bit. */
53 #define COUNTER_TO_SET_CLR_BIT(idx) \
54 (idx & (ARM_CSPMU_SET_CLR_COUNTER_NUM - 1))
55
56 #define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
57 #define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
58
59 /*
60 * Maximum poll count for reading counter value using high-low-high sequence.
61 */
62 #define HILOHI_MAX_POLL 1000
63
64 static unsigned long arm_cspmu_cpuhp_state;
65
66 static DEFINE_MUTEX(arm_cspmu_lock);
67
68 static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
69 const struct perf_event *event);
70 static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
71 const struct perf_event *event);
72
arm_cspmu_apmt_node(struct device * dev)73 static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
74 {
75 struct acpi_apmt_node **ptr = dev_get_platdata(dev);
76
77 return ptr ? *ptr : NULL;
78 }
79
80 /*
81 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
82 * counter register. The counter register can be implemented as 32-bit or 64-bit
83 * register depending on the value of PMCFGR.SIZE field. For 64-bit access,
84 * single-copy 64-bit atomic support is implementation defined. APMT node flag
85 * is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
86 * single copy atomic is not supported, the driver treats the register as a pair
87 * of 32-bit register.
88 */
89
90 /*
91 * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
92 */
read_reg64_hilohi(const void __iomem * addr,u32 max_poll_count)93 static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
94 {
95 u32 val_lo, val_hi;
96 u64 val;
97
98 /* Use high-low-high sequence to avoid tearing */
99 do {
100 if (max_poll_count-- == 0) {
101 pr_err("ARM CSPMU: timeout hi-low-high sequence\n");
102 return 0;
103 }
104
105 val_hi = readl(addr + 4);
106 val_lo = readl(addr);
107 } while (val_hi != readl(addr + 4));
108
109 val = (((u64)val_hi << 32) | val_lo);
110
111 return val;
112 }
113
114 /* Check if cycle counter is supported. */
supports_cycle_counter(const struct arm_cspmu * cspmu)115 static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
116 {
117 return (cspmu->pmcfgr & PMCFGR_CC);
118 }
119
120 /* Get counter size, which is (PMCFGR_SIZE + 1). */
counter_size(const struct arm_cspmu * cspmu)121 static inline u32 counter_size(const struct arm_cspmu *cspmu)
122 {
123 return FIELD_GET(PMCFGR_SIZE, cspmu->pmcfgr) + 1;
124 }
125
126 /* Get counter mask. */
counter_mask(const struct arm_cspmu * cspmu)127 static inline u64 counter_mask(const struct arm_cspmu *cspmu)
128 {
129 return GENMASK_ULL(counter_size(cspmu) - 1, 0);
130 }
131
132 /* Check if counter is implemented as 64-bit register. */
use_64b_counter_reg(const struct arm_cspmu * cspmu)133 static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
134 {
135 return (counter_size(cspmu) > 32);
136 }
137
arm_cspmu_sysfs_event_show(struct device * dev,struct device_attribute * attr,char * buf)138 ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140 {
141 struct perf_pmu_events_attr *pmu_attr;
142
143 pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
144 return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
145 }
146 EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
147
148 /* Default event list. */
149 static struct attribute *arm_cspmu_event_attrs[] = {
150 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
151 NULL,
152 };
153
154 static struct attribute **
arm_cspmu_get_event_attrs(const struct arm_cspmu * cspmu)155 arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
156 {
157 struct attribute **attrs;
158
159 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_event_attrs,
160 sizeof(arm_cspmu_event_attrs), GFP_KERNEL);
161
162 return attrs;
163 }
164
165 static umode_t
arm_cspmu_event_attr_is_visible(struct kobject * kobj,struct attribute * attr,int unused)166 arm_cspmu_event_attr_is_visible(struct kobject *kobj,
167 struct attribute *attr, int unused)
168 {
169 struct device *dev = kobj_to_dev(kobj);
170 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
171 struct perf_pmu_events_attr *eattr;
172
173 eattr = container_of(attr, typeof(*eattr), attr.attr);
174
175 /* Hide cycle event if not supported */
176 if (!supports_cycle_counter(cspmu) &&
177 eattr->id == ARM_CSPMU_EVT_CYCLES_DEFAULT)
178 return 0;
179
180 return attr->mode;
181 }
182
183 static struct attribute *arm_cspmu_format_attrs[] = {
184 ARM_CSPMU_FORMAT_EVENT_ATTR,
185 ARM_CSPMU_FORMAT_FILTER_ATTR,
186 ARM_CSPMU_FORMAT_FILTER2_ATTR,
187 NULL,
188 };
189
190 static struct attribute **
arm_cspmu_get_format_attrs(const struct arm_cspmu * cspmu)191 arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
192 {
193 struct attribute **attrs;
194
195 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_format_attrs,
196 sizeof(arm_cspmu_format_attrs), GFP_KERNEL);
197
198 return attrs;
199 }
200
arm_cspmu_event_type(const struct perf_event * event)201 static u32 arm_cspmu_event_type(const struct perf_event *event)
202 {
203 return event->attr.config & ARM_CSPMU_EVENT_MASK;
204 }
205
arm_cspmu_is_cycle_counter_event(const struct perf_event * event)206 static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
207 {
208 return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
209 }
210
arm_cspmu_identifier_show(struct device * dev,struct device_attribute * attr,char * page)211 static ssize_t arm_cspmu_identifier_show(struct device *dev,
212 struct device_attribute *attr,
213 char *page)
214 {
215 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
216
217 return sysfs_emit(page, "%s\n", cspmu->identifier);
218 }
219
220 static struct device_attribute arm_cspmu_identifier_attr =
221 __ATTR(identifier, 0444, arm_cspmu_identifier_show, NULL);
222
223 static struct attribute *arm_cspmu_identifier_attrs[] = {
224 &arm_cspmu_identifier_attr.attr,
225 NULL,
226 };
227
228 static struct attribute_group arm_cspmu_identifier_attr_group = {
229 .attrs = arm_cspmu_identifier_attrs,
230 };
231
arm_cspmu_get_identifier(const struct arm_cspmu * cspmu)232 static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
233 {
234 const char *identifier =
235 devm_kasprintf(cspmu->dev, GFP_KERNEL, "%x",
236 cspmu->impl.pmiidr);
237 return identifier;
238 }
239
240 static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] = {
241 "mc",
242 "smmu",
243 "pcie",
244 "acpi",
245 "cache",
246 };
247
arm_cspmu_get_name(const struct arm_cspmu * cspmu)248 static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
249 {
250 struct device *dev;
251 struct acpi_apmt_node *apmt_node;
252 u8 pmu_type;
253 char *name;
254 char acpi_hid_string[ACPI_ID_LEN] = { 0 };
255 static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
256
257 dev = cspmu->dev;
258 apmt_node = arm_cspmu_apmt_node(dev);
259 if (!apmt_node)
260 return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u",
261 atomic_fetch_inc(&pmu_idx[0]));
262
263 pmu_type = apmt_node->type;
264
265 if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
266 dev_err(dev, "unsupported PMU type-%u\n", pmu_type);
267 return NULL;
268 }
269
270 if (pmu_type == ACPI_APMT_NODE_TYPE_ACPI) {
271 memcpy(acpi_hid_string,
272 &apmt_node->inst_primary,
273 sizeof(apmt_node->inst_primary));
274 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%s_%u", PMUNAME,
275 arm_cspmu_type_str[pmu_type],
276 acpi_hid_string,
277 apmt_node->inst_secondary);
278 } else {
279 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%d", PMUNAME,
280 arm_cspmu_type_str[pmu_type],
281 atomic_fetch_inc(&pmu_idx[pmu_type]));
282 }
283
284 return name;
285 }
286
arm_cspmu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)287 static ssize_t arm_cspmu_cpumask_show(struct device *dev,
288 struct device_attribute *attr,
289 char *buf)
290 {
291 struct pmu *pmu = dev_get_drvdata(dev);
292 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
293 struct dev_ext_attribute *eattr =
294 container_of(attr, struct dev_ext_attribute, attr);
295 unsigned long mask_id = (unsigned long)eattr->var;
296 const cpumask_t *cpumask;
297
298 switch (mask_id) {
299 case ARM_CSPMU_ACTIVE_CPU_MASK:
300 cpumask = &cspmu->active_cpu;
301 break;
302 case ARM_CSPMU_ASSOCIATED_CPU_MASK:
303 cpumask = &cspmu->associated_cpus;
304 break;
305 default:
306 return 0;
307 }
308 return cpumap_print_to_pagebuf(true, buf, cpumask);
309 }
310
311 static struct attribute *arm_cspmu_cpumask_attrs[] = {
312 ARM_CSPMU_CPUMASK_ATTR(cpumask, ARM_CSPMU_ACTIVE_CPU_MASK),
313 ARM_CSPMU_CPUMASK_ATTR(associated_cpus, ARM_CSPMU_ASSOCIATED_CPU_MASK),
314 NULL,
315 };
316
317 static struct attribute_group arm_cspmu_cpumask_attr_group = {
318 .attrs = arm_cspmu_cpumask_attrs,
319 };
320
321 static struct arm_cspmu_impl_match impl_match[] = {
322 {
323 .module_name = "nvidia_cspmu",
324 .pmiidr_val = ARM_CSPMU_IMPL_ID_NVIDIA,
325 .pmiidr_mask = PMIIDR_IMPLEMENTER,
326 .module = NULL,
327 .impl_init_ops = NULL,
328 },
329 {
330 .module_name = "ampere_cspmu",
331 .pmiidr_val = ARM_CSPMU_IMPL_ID_AMPERE,
332 .pmiidr_mask = PMIIDR_IMPLEMENTER,
333 .module = NULL,
334 .impl_init_ops = NULL,
335 },
336
337 {0}
338 };
339
arm_cspmu_impl_match_get(u32 pmiidr)340 static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
341 {
342 struct arm_cspmu_impl_match *match = impl_match;
343
344 for (; match->pmiidr_val; match++) {
345 u32 mask = match->pmiidr_mask;
346
347 if ((match->pmiidr_val & mask) == (pmiidr & mask))
348 return match;
349 }
350
351 return NULL;
352 }
353
arm_cspmu_get_pmiidr(struct arm_cspmu * cspmu)354 static u32 arm_cspmu_get_pmiidr(struct arm_cspmu *cspmu)
355 {
356 u32 pmiidr, pmpidr;
357
358 pmiidr = readl(cspmu->base0 + PMIIDR);
359
360 if (pmiidr != 0)
361 return pmiidr;
362
363 /* Construct PMIIDR value from PMPIDRs. */
364
365 pmpidr = readl(cspmu->base0 + PMPIDR0);
366 pmiidr |= FIELD_PREP(PMIIDR_PRODUCTID_PART_0,
367 FIELD_GET(PMPIDR0_PART_0, pmpidr));
368
369 pmpidr = readl(cspmu->base0 + PMPIDR1);
370 pmiidr |= FIELD_PREP(PMIIDR_PRODUCTID_PART_1,
371 FIELD_GET(PMPIDR1_PART_1, pmpidr));
372 pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_0,
373 FIELD_GET(PMPIDR1_DES_0, pmpidr));
374
375 pmpidr = readl(cspmu->base0 + PMPIDR2);
376 pmiidr |= FIELD_PREP(PMIIDR_VARIANT,
377 FIELD_GET(PMPIDR2_REVISION, pmpidr));
378 pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_1,
379 FIELD_GET(PMPIDR2_DES_1, pmpidr));
380
381 pmpidr = readl(cspmu->base0 + PMPIDR3);
382 pmiidr |= FIELD_PREP(PMIIDR_REVISION,
383 FIELD_GET(PMPIDR3_REVAND, pmpidr));
384
385 pmpidr = readl(cspmu->base0 + PMPIDR4);
386 pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_2,
387 FIELD_GET(PMPIDR4_DES_2, pmpidr));
388
389 return pmiidr;
390 }
391
392 #define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name
393
arm_cspmu_init_impl_ops(struct arm_cspmu * cspmu)394 static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
395 {
396 int ret = 0;
397 struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
398 struct arm_cspmu_impl_match *match;
399
400 /* Start with a default PMU implementation */
401 cspmu->impl.module = THIS_MODULE;
402 cspmu->impl.pmiidr = arm_cspmu_get_pmiidr(cspmu);
403 cspmu->impl.ops = (struct arm_cspmu_impl_ops) {
404 DEFAULT_IMPL_OP(get_event_attrs),
405 DEFAULT_IMPL_OP(get_format_attrs),
406 DEFAULT_IMPL_OP(get_identifier),
407 DEFAULT_IMPL_OP(get_name),
408 DEFAULT_IMPL_OP(is_cycle_counter_event),
409 DEFAULT_IMPL_OP(event_type),
410 DEFAULT_IMPL_OP(set_cc_filter),
411 DEFAULT_IMPL_OP(set_ev_filter),
412 DEFAULT_IMPL_OP(event_attr_is_visible),
413 };
414
415 /* Firmware may override implementer/product ID from PMIIDR */
416 if (apmt_node && apmt_node->impl_id)
417 cspmu->impl.pmiidr = apmt_node->impl_id;
418
419 /* Find implementer specific attribute ops. */
420 match = arm_cspmu_impl_match_get(cspmu->impl.pmiidr);
421
422 /* Load implementer module and initialize the callbacks. */
423 if (match) {
424 mutex_lock(&arm_cspmu_lock);
425
426 if (match->impl_init_ops) {
427 /* Prevent unload until PMU registration is done. */
428 if (try_module_get(match->module)) {
429 cspmu->impl.module = match->module;
430 cspmu->impl.match = match;
431 ret = match->impl_init_ops(cspmu);
432 if (ret)
433 module_put(match->module);
434 } else {
435 WARN(1, "arm_cspmu failed to get module: %s\n",
436 match->module_name);
437 ret = -EINVAL;
438 }
439 } else {
440 request_module_nowait(match->module_name);
441 ret = -EPROBE_DEFER;
442 }
443
444 mutex_unlock(&arm_cspmu_lock);
445 }
446
447 return ret;
448 }
449
450 static struct attribute_group *
arm_cspmu_alloc_event_attr_group(struct arm_cspmu * cspmu)451 arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
452 {
453 struct attribute_group *event_group;
454 struct device *dev = cspmu->dev;
455 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
456
457 event_group =
458 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
459 if (!event_group)
460 return NULL;
461
462 event_group->name = "events";
463 event_group->is_visible = impl_ops->event_attr_is_visible;
464 event_group->attrs = impl_ops->get_event_attrs(cspmu);
465
466 if (!event_group->attrs)
467 return NULL;
468
469 return event_group;
470 }
471
472 static struct attribute_group *
arm_cspmu_alloc_format_attr_group(struct arm_cspmu * cspmu)473 arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
474 {
475 struct attribute_group *format_group;
476 struct device *dev = cspmu->dev;
477
478 format_group =
479 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
480 if (!format_group)
481 return NULL;
482
483 format_group->name = "format";
484 format_group->attrs = cspmu->impl.ops.get_format_attrs(cspmu);
485
486 if (!format_group->attrs)
487 return NULL;
488
489 return format_group;
490 }
491
arm_cspmu_alloc_attr_groups(struct arm_cspmu * cspmu)492 static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
493 {
494 const struct attribute_group **attr_groups = cspmu->attr_groups;
495 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
496
497 cspmu->identifier = impl_ops->get_identifier(cspmu);
498 cspmu->name = impl_ops->get_name(cspmu);
499
500 if (!cspmu->identifier || !cspmu->name)
501 return -ENOMEM;
502
503 attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
504 attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
505 attr_groups[2] = &arm_cspmu_identifier_attr_group;
506 attr_groups[3] = &arm_cspmu_cpumask_attr_group;
507
508 if (!attr_groups[0] || !attr_groups[1])
509 return -ENOMEM;
510
511 return 0;
512 }
513
arm_cspmu_reset_counters(struct arm_cspmu * cspmu)514 static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
515 {
516 writel(PMCR_C | PMCR_P, cspmu->base0 + PMCR);
517 }
518
arm_cspmu_start_counters(struct arm_cspmu * cspmu)519 static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
520 {
521 writel(PMCR_E, cspmu->base0 + PMCR);
522 }
523
arm_cspmu_stop_counters(struct arm_cspmu * cspmu)524 static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
525 {
526 writel(0, cspmu->base0 + PMCR);
527 }
528
arm_cspmu_enable(struct pmu * pmu)529 static void arm_cspmu_enable(struct pmu *pmu)
530 {
531 bool disabled;
532 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
533
534 disabled = bitmap_empty(cspmu->hw_events.used_ctrs,
535 cspmu->num_logical_ctrs);
536
537 if (disabled)
538 return;
539
540 arm_cspmu_start_counters(cspmu);
541 }
542
arm_cspmu_disable(struct pmu * pmu)543 static void arm_cspmu_disable(struct pmu *pmu)
544 {
545 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
546
547 arm_cspmu_stop_counters(cspmu);
548 }
549
arm_cspmu_get_event_idx(struct arm_cspmu_hw_events * hw_events,struct perf_event * event)550 static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
551 struct perf_event *event)
552 {
553 int idx, ret;
554 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
555
556 if (supports_cycle_counter(cspmu)) {
557 if (cspmu->impl.ops.is_cycle_counter_event(event)) {
558 /* Search for available cycle counter. */
559 if (test_and_set_bit(cspmu->cycle_counter_logical_idx,
560 hw_events->used_ctrs))
561 return -EAGAIN;
562
563 return cspmu->cycle_counter_logical_idx;
564 }
565
566 /*
567 * Search a regular counter from the used counter bitmap.
568 * The cycle counter divides the bitmap into two parts. Search
569 * the first then second half to exclude the cycle counter bit.
570 */
571 idx = find_first_zero_bit(hw_events->used_ctrs,
572 cspmu->cycle_counter_logical_idx);
573 if (idx >= cspmu->cycle_counter_logical_idx) {
574 idx = find_next_zero_bit(
575 hw_events->used_ctrs,
576 cspmu->num_logical_ctrs,
577 cspmu->cycle_counter_logical_idx + 1);
578 }
579 } else {
580 idx = find_first_zero_bit(hw_events->used_ctrs,
581 cspmu->num_logical_ctrs);
582 }
583
584 if (idx >= cspmu->num_logical_ctrs)
585 return -EAGAIN;
586
587 if (cspmu->impl.ops.validate_event) {
588 ret = cspmu->impl.ops.validate_event(cspmu, event);
589 if (ret)
590 return ret;
591 }
592
593 set_bit(idx, hw_events->used_ctrs);
594
595 return idx;
596 }
597
arm_cspmu_validate_event(struct pmu * pmu,struct arm_cspmu_hw_events * hw_events,struct perf_event * event)598 static bool arm_cspmu_validate_event(struct pmu *pmu,
599 struct arm_cspmu_hw_events *hw_events,
600 struct perf_event *event)
601 {
602 if (is_software_event(event))
603 return true;
604
605 /* Reject groups spanning multiple HW PMUs. */
606 if (event->pmu != pmu)
607 return false;
608
609 return (arm_cspmu_get_event_idx(hw_events, event) >= 0);
610 }
611
612 /*
613 * Make sure the group of events can be scheduled at once
614 * on the PMU.
615 */
arm_cspmu_validate_group(struct perf_event * event)616 static bool arm_cspmu_validate_group(struct perf_event *event)
617 {
618 struct perf_event *sibling, *leader = event->group_leader;
619 struct arm_cspmu_hw_events fake_hw_events;
620
621 if (event->group_leader == event)
622 return true;
623
624 memset(&fake_hw_events, 0, sizeof(fake_hw_events));
625
626 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events, leader))
627 return false;
628
629 for_each_sibling_event(sibling, leader) {
630 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events,
631 sibling))
632 return false;
633 }
634
635 return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
636 }
637
arm_cspmu_event_init(struct perf_event * event)638 static int arm_cspmu_event_init(struct perf_event *event)
639 {
640 struct arm_cspmu *cspmu;
641 struct hw_perf_event *hwc = &event->hw;
642
643 cspmu = to_arm_cspmu(event->pmu);
644
645 if (event->attr.type != event->pmu->type)
646 return -ENOENT;
647
648 /*
649 * Following other "uncore" PMUs, we do not support sampling mode or
650 * attach to a task (per-process mode).
651 */
652 if (is_sampling_event(event)) {
653 dev_dbg(cspmu->pmu.dev,
654 "Can't support sampling events\n");
655 return -EOPNOTSUPP;
656 }
657
658 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
659 dev_dbg(cspmu->pmu.dev,
660 "Can't support per-task counters\n");
661 return -EINVAL;
662 }
663
664 /*
665 * Make sure the CPU assignment is on one of the CPUs associated with
666 * this PMU.
667 */
668 if (!cpumask_test_cpu(event->cpu, &cspmu->associated_cpus)) {
669 dev_dbg(cspmu->pmu.dev,
670 "Requested cpu is not associated with the PMU\n");
671 return -EINVAL;
672 }
673
674 /* Enforce the current active CPU to handle the events in this PMU. */
675 event->cpu = cpumask_first(&cspmu->active_cpu);
676 if (event->cpu >= nr_cpu_ids)
677 return -EINVAL;
678
679 if (!arm_cspmu_validate_group(event))
680 return -EINVAL;
681
682 /*
683 * The logical counter id is tracked with hw_perf_event.extra_reg.idx.
684 * The physical counter id is tracked with hw_perf_event.idx.
685 * We don't assign an index until we actually place the event onto
686 * hardware. Use -1 to signify that we haven't decided where to put it
687 * yet.
688 */
689 hwc->idx = -1;
690 hwc->extra_reg.idx = -1;
691 hwc->config = cspmu->impl.ops.event_type(event);
692
693 return 0;
694 }
695
counter_offset(u32 reg_sz,u32 ctr_idx)696 static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
697 {
698 return (PMEVCNTR_LO + (reg_sz * ctr_idx));
699 }
700
arm_cspmu_write_counter(struct perf_event * event,u64 val)701 static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
702 {
703 u32 offset;
704 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
705
706 if (use_64b_counter_reg(cspmu)) {
707 offset = counter_offset(sizeof(u64), event->hw.idx);
708
709 if (cspmu->has_atomic_dword)
710 writeq(val, cspmu->base1 + offset);
711 else
712 lo_hi_writeq(val, cspmu->base1 + offset);
713 } else {
714 offset = counter_offset(sizeof(u32), event->hw.idx);
715
716 writel(lower_32_bits(val), cspmu->base1 + offset);
717 }
718 }
719
arm_cspmu_read_counter(struct perf_event * event)720 static u64 arm_cspmu_read_counter(struct perf_event *event)
721 {
722 u32 offset;
723 const void __iomem *counter_addr;
724 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
725
726 if (use_64b_counter_reg(cspmu)) {
727 offset = counter_offset(sizeof(u64), event->hw.idx);
728 counter_addr = cspmu->base1 + offset;
729
730 return cspmu->has_atomic_dword ?
731 readq(counter_addr) :
732 read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL);
733 }
734
735 offset = counter_offset(sizeof(u32), event->hw.idx);
736 return readl(cspmu->base1 + offset);
737 }
738
739 /*
740 * arm_cspmu_set_event_period: Set the period for the counter.
741 *
742 * To handle cases of extreme interrupt latency, we program
743 * the counter with half of the max count for the counters.
744 */
arm_cspmu_set_event_period(struct perf_event * event)745 static void arm_cspmu_set_event_period(struct perf_event *event)
746 {
747 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
748 u64 val = counter_mask(cspmu) >> 1ULL;
749
750 local64_set(&event->hw.prev_count, val);
751 arm_cspmu_write_counter(event, val);
752 }
753
arm_cspmu_enable_counter(struct arm_cspmu * cspmu,int idx)754 static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
755 {
756 u32 reg_id, reg_bit, inten_off, cnten_off;
757
758 reg_id = COUNTER_TO_SET_CLR_ID(idx);
759 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
760
761 inten_off = PMINTENSET + (4 * reg_id);
762 cnten_off = PMCNTENSET + (4 * reg_id);
763
764 writel(BIT(reg_bit), cspmu->base0 + inten_off);
765 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
766 }
767
arm_cspmu_disable_counter(struct arm_cspmu * cspmu,int idx)768 static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
769 {
770 u32 reg_id, reg_bit, inten_off, cnten_off;
771
772 reg_id = COUNTER_TO_SET_CLR_ID(idx);
773 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
774
775 inten_off = PMINTENCLR + (4 * reg_id);
776 cnten_off = PMCNTENCLR + (4 * reg_id);
777
778 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
779 writel(BIT(reg_bit), cspmu->base0 + inten_off);
780 }
781
arm_cspmu_event_update(struct perf_event * event)782 static void arm_cspmu_event_update(struct perf_event *event)
783 {
784 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
785 struct hw_perf_event *hwc = &event->hw;
786 u64 delta, prev, now;
787
788 do {
789 prev = local64_read(&hwc->prev_count);
790 now = arm_cspmu_read_counter(event);
791 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
792
793 delta = (now - prev) & counter_mask(cspmu);
794 local64_add(delta, &event->count);
795 }
796
arm_cspmu_set_event(struct arm_cspmu * cspmu,struct hw_perf_event * hwc)797 static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
798 struct hw_perf_event *hwc)
799 {
800 u32 offset = PMEVTYPER + (4 * hwc->idx);
801
802 writel(hwc->config, cspmu->base0 + offset);
803 }
804
arm_cspmu_set_ev_filter(struct arm_cspmu * cspmu,const struct perf_event * event)805 static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
806 const struct perf_event *event)
807 {
808 u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
809 u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
810 u32 offset = 4 * event->hw.idx;
811
812 writel(filter, cspmu->base0 + PMEVFILTR + offset);
813 writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
814 }
815
arm_cspmu_set_cc_filter(struct arm_cspmu * cspmu,const struct perf_event * event)816 static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
817 const struct perf_event *event)
818 {
819 u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
820
821 writel(filter, cspmu->base0 + PMCCFILTR);
822 }
823
arm_cspmu_start(struct perf_event * event,int pmu_flags)824 static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
825 {
826 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
827 struct hw_perf_event *hwc = &event->hw;
828
829 /* We always reprogram the counter */
830 if (pmu_flags & PERF_EF_RELOAD)
831 WARN_ON(!(hwc->state & PERF_HES_UPTODATE));
832
833 arm_cspmu_set_event_period(event);
834
835 if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
836 cspmu->impl.ops.set_cc_filter(cspmu, event);
837 } else {
838 arm_cspmu_set_event(cspmu, hwc);
839 cspmu->impl.ops.set_ev_filter(cspmu, event);
840 }
841
842 hwc->state = 0;
843
844 arm_cspmu_enable_counter(cspmu, hwc->idx);
845 }
846
arm_cspmu_stop(struct perf_event * event,int pmu_flags)847 static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
848 {
849 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
850 struct hw_perf_event *hwc = &event->hw;
851
852 if (hwc->state & PERF_HES_STOPPED)
853 return;
854
855 arm_cspmu_disable_counter(cspmu, hwc->idx);
856
857 if (cspmu->impl.ops.reset_ev_filter)
858 cspmu->impl.ops.reset_ev_filter(cspmu, event);
859
860 arm_cspmu_event_update(event);
861
862 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
863 }
864
to_phys_idx(struct arm_cspmu * cspmu,u32 idx)865 static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
866 {
867 return (idx == cspmu->cycle_counter_logical_idx) ?
868 ARM_CSPMU_CYCLE_CNTR_IDX : idx;
869 }
870
arm_cspmu_add(struct perf_event * event,int flags)871 static int arm_cspmu_add(struct perf_event *event, int flags)
872 {
873 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
874 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
875 struct hw_perf_event *hwc = &event->hw;
876 int idx;
877
878 if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
879 &cspmu->associated_cpus)))
880 return -ENOENT;
881
882 idx = arm_cspmu_get_event_idx(hw_events, event);
883 if (idx < 0)
884 return idx;
885
886 hw_events->events[idx] = event;
887 hwc->idx = to_phys_idx(cspmu, idx);
888 hwc->extra_reg.idx = idx;
889 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
890
891 if (flags & PERF_EF_START)
892 arm_cspmu_start(event, PERF_EF_RELOAD);
893
894 /* Propagate changes to the userspace mapping. */
895 perf_event_update_userpage(event);
896
897 return 0;
898 }
899
arm_cspmu_del(struct perf_event * event,int flags)900 static void arm_cspmu_del(struct perf_event *event, int flags)
901 {
902 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
903 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
904 struct hw_perf_event *hwc = &event->hw;
905 int idx = hwc->extra_reg.idx;
906
907 arm_cspmu_stop(event, PERF_EF_UPDATE);
908
909 hw_events->events[idx] = NULL;
910
911 clear_bit(idx, hw_events->used_ctrs);
912
913 perf_event_update_userpage(event);
914 }
915
arm_cspmu_read(struct perf_event * event)916 static void arm_cspmu_read(struct perf_event *event)
917 {
918 arm_cspmu_event_update(event);
919 }
920
arm_cspmu_alloc(struct platform_device * pdev)921 static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
922 {
923 struct acpi_apmt_node *apmt_node;
924 struct arm_cspmu *cspmu;
925 struct device *dev = &pdev->dev;
926
927 cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL);
928 if (!cspmu)
929 return NULL;
930
931 cspmu->dev = dev;
932 platform_set_drvdata(pdev, cspmu);
933
934 apmt_node = arm_cspmu_apmt_node(dev);
935 if (apmt_node) {
936 cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
937 } else {
938 u32 width = 0;
939
940 device_property_read_u32(dev, "reg-io-width", &width);
941 cspmu->has_atomic_dword = (width == 8);
942 }
943
944 return cspmu;
945 }
946
arm_cspmu_init_mmio(struct arm_cspmu * cspmu)947 static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
948 {
949 struct device *dev;
950 struct platform_device *pdev;
951
952 dev = cspmu->dev;
953 pdev = to_platform_device(dev);
954
955 /* Base address for page 0. */
956 cspmu->base0 = devm_platform_ioremap_resource(pdev, 0);
957 if (IS_ERR(cspmu->base0)) {
958 dev_err(dev, "ioremap failed for page-0 resource\n");
959 return PTR_ERR(cspmu->base0);
960 }
961
962 /* Base address for page 1 if supported. Otherwise point to page 0. */
963 cspmu->base1 = cspmu->base0;
964 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
965 cspmu->base1 = devm_platform_ioremap_resource(pdev, 1);
966 if (IS_ERR(cspmu->base1)) {
967 dev_err(dev, "ioremap failed for page-1 resource\n");
968 return PTR_ERR(cspmu->base1);
969 }
970 }
971
972 cspmu->pmcfgr = readl(cspmu->base0 + PMCFGR);
973
974 cspmu->num_logical_ctrs = FIELD_GET(PMCFGR_N, cspmu->pmcfgr) + 1;
975
976 cspmu->cycle_counter_logical_idx = ARM_CSPMU_MAX_HW_CNTRS;
977
978 if (supports_cycle_counter(cspmu)) {
979 /*
980 * The last logical counter is mapped to cycle counter if
981 * there is a gap between regular and cycle counter. Otherwise,
982 * logical and physical have 1-to-1 mapping.
983 */
984 cspmu->cycle_counter_logical_idx =
985 (cspmu->num_logical_ctrs <= ARM_CSPMU_CYCLE_CNTR_IDX) ?
986 cspmu->num_logical_ctrs - 1 :
987 ARM_CSPMU_CYCLE_CNTR_IDX;
988 }
989
990 cspmu->num_set_clr_reg =
991 DIV_ROUND_UP(cspmu->num_logical_ctrs,
992 ARM_CSPMU_SET_CLR_COUNTER_NUM);
993
994 cspmu->hw_events.events =
995 devm_kcalloc(dev, cspmu->num_logical_ctrs,
996 sizeof(*cspmu->hw_events.events), GFP_KERNEL);
997
998 if (!cspmu->hw_events.events)
999 return -ENOMEM;
1000
1001 return 0;
1002 }
1003
arm_cspmu_get_reset_overflow(struct arm_cspmu * cspmu,u32 * pmovs)1004 static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
1005 u32 *pmovs)
1006 {
1007 int i;
1008 u32 pmovclr_offset = PMOVSCLR;
1009 u32 has_overflowed = 0;
1010
1011 for (i = 0; i < cspmu->num_set_clr_reg; ++i) {
1012 pmovs[i] = readl(cspmu->base1 + pmovclr_offset);
1013 has_overflowed |= pmovs[i];
1014 writel(pmovs[i], cspmu->base1 + pmovclr_offset);
1015 pmovclr_offset += sizeof(u32);
1016 }
1017
1018 return has_overflowed != 0;
1019 }
1020
arm_cspmu_handle_irq(int irq_num,void * dev)1021 static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
1022 {
1023 int idx, has_overflowed;
1024 struct perf_event *event;
1025 struct arm_cspmu *cspmu = dev;
1026 DECLARE_BITMAP(pmovs, ARM_CSPMU_MAX_HW_CNTRS);
1027 bool handled = false;
1028
1029 arm_cspmu_stop_counters(cspmu);
1030
1031 has_overflowed = arm_cspmu_get_reset_overflow(cspmu, (u32 *)pmovs);
1032 if (!has_overflowed)
1033 goto done;
1034
1035 for_each_set_bit(idx, cspmu->hw_events.used_ctrs,
1036 cspmu->num_logical_ctrs) {
1037 event = cspmu->hw_events.events[idx];
1038
1039 if (!event)
1040 continue;
1041
1042 if (!test_bit(event->hw.idx, pmovs))
1043 continue;
1044
1045 arm_cspmu_event_update(event);
1046 arm_cspmu_set_event_period(event);
1047
1048 handled = true;
1049 }
1050
1051 done:
1052 arm_cspmu_start_counters(cspmu);
1053 return IRQ_RETVAL(handled);
1054 }
1055
arm_cspmu_request_irq(struct arm_cspmu * cspmu)1056 static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
1057 {
1058 int irq, ret;
1059 struct device *dev;
1060 struct platform_device *pdev;
1061
1062 dev = cspmu->dev;
1063 pdev = to_platform_device(dev);
1064
1065 /* Skip IRQ request if the PMU does not support overflow interrupt. */
1066 irq = platform_get_irq_optional(pdev, 0);
1067 if (irq < 0)
1068 return irq == -ENXIO ? 0 : irq;
1069
1070 ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq,
1071 IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev),
1072 cspmu);
1073 if (ret) {
1074 dev_err(dev, "Could not request IRQ %d\n", irq);
1075 return ret;
1076 }
1077
1078 cspmu->irq = irq;
1079
1080 return 0;
1081 }
1082
1083 #if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
1084 #include <acpi/processor.h>
1085
arm_cspmu_find_cpu_container(int cpu,u32 container_uid)1086 static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
1087 {
1088 struct device *cpu_dev;
1089 struct acpi_device *acpi_dev;
1090
1091 cpu_dev = get_cpu_device(cpu);
1092 if (!cpu_dev)
1093 return -ENODEV;
1094
1095 acpi_dev = ACPI_COMPANION(cpu_dev);
1096 while (acpi_dev) {
1097 if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
1098 return 0;
1099
1100 acpi_dev = acpi_dev_parent(acpi_dev);
1101 }
1102
1103 return -ENODEV;
1104 }
1105
arm_cspmu_acpi_get_cpus(struct arm_cspmu * cspmu)1106 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1107 {
1108 struct acpi_apmt_node *apmt_node;
1109 int affinity_flag;
1110 int cpu;
1111
1112 apmt_node = arm_cspmu_apmt_node(cspmu->dev);
1113 affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
1114
1115 if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
1116 for_each_possible_cpu(cpu) {
1117 if (apmt_node->proc_affinity ==
1118 get_acpi_id_for_cpu(cpu)) {
1119 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1120 break;
1121 }
1122 }
1123 } else {
1124 for_each_possible_cpu(cpu) {
1125 if (arm_cspmu_find_cpu_container(
1126 cpu, apmt_node->proc_affinity))
1127 continue;
1128
1129 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1130 }
1131 }
1132
1133 return 0;
1134 }
1135 #else
arm_cspmu_acpi_get_cpus(struct arm_cspmu * cspmu)1136 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1137 {
1138 return -ENODEV;
1139 }
1140 #endif
1141
arm_cspmu_of_get_cpus(struct arm_cspmu * cspmu)1142 static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
1143 {
1144 struct of_phandle_iterator it;
1145 int ret, cpu;
1146
1147 of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) {
1148 cpu = of_cpu_node_to_id(it.node);
1149 if (cpu < 0)
1150 continue;
1151 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1152 }
1153 return ret == -ENOENT ? 0 : ret;
1154 }
1155
arm_cspmu_get_cpus(struct arm_cspmu * cspmu)1156 static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
1157 {
1158 int ret = 0;
1159
1160 if (arm_cspmu_apmt_node(cspmu->dev))
1161 ret = arm_cspmu_acpi_get_cpus(cspmu);
1162 else if (device_property_present(cspmu->dev, "cpus"))
1163 ret = arm_cspmu_of_get_cpus(cspmu);
1164 else
1165 cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask);
1166
1167 if (!ret && cpumask_empty(&cspmu->associated_cpus)) {
1168 dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
1169 ret = -ENODEV;
1170 }
1171 return ret;
1172 }
1173
arm_cspmu_register_pmu(struct arm_cspmu * cspmu)1174 static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
1175 {
1176 int ret, capabilities;
1177
1178 ret = arm_cspmu_alloc_attr_groups(cspmu);
1179 if (ret)
1180 return ret;
1181
1182 ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
1183 &cspmu->cpuhp_node);
1184 if (ret)
1185 return ret;
1186
1187 capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1188 if (cspmu->irq == 0)
1189 capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1190
1191 cspmu->pmu = (struct pmu){
1192 .task_ctx_nr = perf_invalid_context,
1193 .module = cspmu->impl.module,
1194 .parent = cspmu->dev,
1195 .pmu_enable = arm_cspmu_enable,
1196 .pmu_disable = arm_cspmu_disable,
1197 .event_init = arm_cspmu_event_init,
1198 .add = arm_cspmu_add,
1199 .del = arm_cspmu_del,
1200 .start = arm_cspmu_start,
1201 .stop = arm_cspmu_stop,
1202 .read = arm_cspmu_read,
1203 .attr_groups = cspmu->attr_groups,
1204 .capabilities = capabilities,
1205 };
1206
1207 /* Hardware counter init */
1208 arm_cspmu_reset_counters(cspmu);
1209
1210 ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
1211 if (ret) {
1212 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state,
1213 &cspmu->cpuhp_node);
1214 }
1215
1216 return ret;
1217 }
1218
arm_cspmu_device_probe(struct platform_device * pdev)1219 static int arm_cspmu_device_probe(struct platform_device *pdev)
1220 {
1221 int ret;
1222 struct arm_cspmu *cspmu;
1223
1224 cspmu = arm_cspmu_alloc(pdev);
1225 if (!cspmu)
1226 return -ENOMEM;
1227
1228 ret = arm_cspmu_init_mmio(cspmu);
1229 if (ret)
1230 return ret;
1231
1232 ret = arm_cspmu_request_irq(cspmu);
1233 if (ret)
1234 return ret;
1235
1236 ret = arm_cspmu_get_cpus(cspmu);
1237 if (ret)
1238 return ret;
1239
1240 ret = arm_cspmu_init_impl_ops(cspmu);
1241 if (ret)
1242 return ret;
1243
1244 ret = arm_cspmu_register_pmu(cspmu);
1245
1246 /* Matches arm_cspmu_init_impl_ops() above. */
1247 if (cspmu->impl.module != THIS_MODULE)
1248 module_put(cspmu->impl.module);
1249
1250 return ret;
1251 }
1252
arm_cspmu_device_remove(struct platform_device * pdev)1253 static void arm_cspmu_device_remove(struct platform_device *pdev)
1254 {
1255 struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
1256
1257 perf_pmu_unregister(&cspmu->pmu);
1258 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
1259 }
1260
1261 static const struct platform_device_id arm_cspmu_id[] = {
1262 {DRVNAME, 0},
1263 { },
1264 };
1265 MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
1266
1267 static const struct of_device_id arm_cspmu_of_match[] = {
1268 { .compatible = "arm,coresight-pmu" },
1269 {}
1270 };
1271 MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);
1272
1273 static struct platform_driver arm_cspmu_driver = {
1274 .driver = {
1275 .name = DRVNAME,
1276 .of_match_table = arm_cspmu_of_match,
1277 .suppress_bind_attrs = true,
1278 },
1279 .probe = arm_cspmu_device_probe,
1280 .remove = arm_cspmu_device_remove,
1281 .id_table = arm_cspmu_id,
1282 };
1283
arm_cspmu_set_active_cpu(int cpu,struct arm_cspmu * cspmu)1284 static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
1285 {
1286 cpumask_set_cpu(cpu, &cspmu->active_cpu);
1287 if (cspmu->irq)
1288 WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
1289 }
1290
arm_cspmu_cpu_online(unsigned int cpu,struct hlist_node * node)1291 static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1292 {
1293 struct arm_cspmu *cspmu =
1294 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1295
1296 if (!cpumask_test_cpu(cpu, &cspmu->associated_cpus))
1297 return 0;
1298
1299 /* If the PMU is already managed, there is nothing to do */
1300 if (!cpumask_empty(&cspmu->active_cpu))
1301 return 0;
1302
1303 /* Use this CPU for event counting */
1304 arm_cspmu_set_active_cpu(cpu, cspmu);
1305
1306 return 0;
1307 }
1308
arm_cspmu_cpu_teardown(unsigned int cpu,struct hlist_node * node)1309 static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1310 {
1311 unsigned int dst;
1312
1313 struct arm_cspmu *cspmu =
1314 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1315
1316 /* Nothing to do if this CPU doesn't own the PMU */
1317 if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu))
1318 return 0;
1319
1320 /* Choose a new CPU to migrate ownership of the PMU to */
1321 dst = cpumask_any_and_but(&cspmu->associated_cpus,
1322 cpu_online_mask, cpu);
1323 if (dst >= nr_cpu_ids)
1324 return 0;
1325
1326 /* Use this CPU for event counting */
1327 perf_pmu_migrate_context(&cspmu->pmu, cpu, dst);
1328 arm_cspmu_set_active_cpu(dst, cspmu);
1329
1330 return 0;
1331 }
1332
arm_cspmu_init(void)1333 static int __init arm_cspmu_init(void)
1334 {
1335 int ret;
1336
1337 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1338 "perf/arm/cspmu:online",
1339 arm_cspmu_cpu_online,
1340 arm_cspmu_cpu_teardown);
1341 if (ret < 0)
1342 return ret;
1343 arm_cspmu_cpuhp_state = ret;
1344 return platform_driver_register(&arm_cspmu_driver);
1345 }
1346
arm_cspmu_exit(void)1347 static void __exit arm_cspmu_exit(void)
1348 {
1349 platform_driver_unregister(&arm_cspmu_driver);
1350 cpuhp_remove_multi_state(arm_cspmu_cpuhp_state);
1351 }
1352
arm_cspmu_impl_register(const struct arm_cspmu_impl_match * impl_match)1353 int arm_cspmu_impl_register(const struct arm_cspmu_impl_match *impl_match)
1354 {
1355 struct arm_cspmu_impl_match *match;
1356 int ret = 0;
1357
1358 match = arm_cspmu_impl_match_get(impl_match->pmiidr_val);
1359
1360 if (match) {
1361 mutex_lock(&arm_cspmu_lock);
1362
1363 if (!match->impl_init_ops) {
1364 match->module = impl_match->module;
1365 match->impl_init_ops = impl_match->impl_init_ops;
1366 } else {
1367 /* Broken match table may contain non-unique entries */
1368 WARN(1, "arm_cspmu backend already registered for module: %s, pmiidr: 0x%x, mask: 0x%x\n",
1369 match->module_name,
1370 match->pmiidr_val,
1371 match->pmiidr_mask);
1372
1373 ret = -EINVAL;
1374 }
1375
1376 mutex_unlock(&arm_cspmu_lock);
1377
1378 if (!ret)
1379 ret = driver_attach(&arm_cspmu_driver.driver);
1380 } else {
1381 pr_err("arm_cspmu reg failed, unable to find a match for pmiidr: 0x%x\n",
1382 impl_match->pmiidr_val);
1383
1384 ret = -EINVAL;
1385 }
1386
1387 return ret;
1388 }
1389 EXPORT_SYMBOL_GPL(arm_cspmu_impl_register);
1390
arm_cspmu_match_device(struct device * dev,const void * match)1391 static int arm_cspmu_match_device(struct device *dev, const void *match)
1392 {
1393 struct arm_cspmu *cspmu = platform_get_drvdata(to_platform_device(dev));
1394
1395 return (cspmu && cspmu->impl.match == match) ? 1 : 0;
1396 }
1397
arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match * impl_match)1398 void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match)
1399 {
1400 struct device *dev;
1401 struct arm_cspmu_impl_match *match;
1402
1403 match = arm_cspmu_impl_match_get(impl_match->pmiidr_val);
1404
1405 if (WARN_ON(!match))
1406 return;
1407
1408 /* Unbind the driver from all matching backend devices. */
1409 while ((dev = driver_find_device(&arm_cspmu_driver.driver, NULL,
1410 match, arm_cspmu_match_device))) {
1411 device_release_driver(dev);
1412 put_device(dev);
1413 }
1414
1415 mutex_lock(&arm_cspmu_lock);
1416
1417 match->module = NULL;
1418 match->impl_init_ops = NULL;
1419
1420 mutex_unlock(&arm_cspmu_lock);
1421 }
1422 EXPORT_SYMBOL_GPL(arm_cspmu_impl_unregister);
1423
1424 module_init(arm_cspmu_init);
1425 module_exit(arm_cspmu_exit);
1426
1427 MODULE_DESCRIPTION("ARM CoreSight Architecture Performance Monitor Driver");
1428 MODULE_LICENSE("GPL v2");
1429