1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon PA uncore Hardware event counters support
4 *
5 * Copyright (C) 2020 HiSilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 *
8 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9 */
10 #include <linux/acpi.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/list.h>
15 #include <linux/smp.h>
16
17 #include "hisi_uncore_pmu.h"
18
19 /* PA register definition */
20 #define PA_PERF_CTRL 0x1c00
21 #define PA_EVENT_CTRL 0x1c04
22 #define PA_TT_CTRL 0x1c08
23 #define PA_TGTID_CTRL 0x1c14
24 #define PA_SRCID_CTRL 0x1c18
25
26 /* H32 PA interrupt registers */
27 #define PA_INT_MASK 0x1c70
28 #define PA_INT_STATUS 0x1c78
29 #define PA_INT_CLEAR 0x1c7c
30
31 #define H60PA_INT_STATUS 0x1c70
32 #define H60PA_INT_MASK 0x1c74
33
34 #define PA_EVENT_TYPE0 0x1c80
35 #define PA_PMU_VERSION 0x1cf0
36 #define PA_EVENT_CNT0_L 0x1d00
37
38 #define PA_EVTYPE_MASK 0xff
39 #define PA_NR_COUNTERS 0x8
40 #define PA_PERF_CTRL_EN BIT(0)
41 #define PA_TRACETAG_EN BIT(4)
42 #define PA_TGTID_EN BIT(11)
43 #define PA_SRCID_EN BIT(11)
44 #define PA_TGTID_NONE 0
45 #define PA_SRCID_NONE 0
46 #define PA_TGTID_MSK_SHIFT 12
47 #define PA_SRCID_MSK_SHIFT 12
48
49 HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
50 HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
51 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
52 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
53 HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
54
55 struct hisi_pa_pmu_int_regs {
56 u32 mask_offset;
57 u32 clear_offset;
58 u32 status_offset;
59 };
60
hisi_pa_pmu_enable_tracetag(struct perf_event * event)61 static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
62 {
63 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
64 u32 tt_en = hisi_get_tracetag_en(event);
65
66 if (tt_en) {
67 u32 val;
68
69 val = readl(pa_pmu->base + PA_TT_CTRL);
70 val |= PA_TRACETAG_EN;
71 writel(val, pa_pmu->base + PA_TT_CTRL);
72 }
73 }
74
hisi_pa_pmu_clear_tracetag(struct perf_event * event)75 static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
76 {
77 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
78 u32 tt_en = hisi_get_tracetag_en(event);
79
80 if (tt_en) {
81 u32 val;
82
83 val = readl(pa_pmu->base + PA_TT_CTRL);
84 val &= ~PA_TRACETAG_EN;
85 writel(val, pa_pmu->base + PA_TT_CTRL);
86 }
87 }
88
hisi_pa_pmu_config_tgtid(struct perf_event * event)89 static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
90 {
91 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
92 u32 cmd = hisi_get_tgtid_cmd(event);
93
94 if (cmd) {
95 u32 msk = hisi_get_tgtid_msk(event);
96 u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
97
98 writel(val, pa_pmu->base + PA_TGTID_CTRL);
99 }
100 }
101
hisi_pa_pmu_clear_tgtid(struct perf_event * event)102 static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
103 {
104 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
105 u32 cmd = hisi_get_tgtid_cmd(event);
106
107 if (cmd)
108 writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
109 }
110
hisi_pa_pmu_config_srcid(struct perf_event * event)111 static void hisi_pa_pmu_config_srcid(struct perf_event *event)
112 {
113 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
114 u32 cmd = hisi_get_srcid_cmd(event);
115
116 if (cmd) {
117 u32 msk = hisi_get_srcid_msk(event);
118 u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
119
120 writel(val, pa_pmu->base + PA_SRCID_CTRL);
121 }
122 }
123
hisi_pa_pmu_clear_srcid(struct perf_event * event)124 static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
125 {
126 struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
127 u32 cmd = hisi_get_srcid_cmd(event);
128
129 if (cmd)
130 writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
131 }
132
hisi_pa_pmu_enable_filter(struct perf_event * event)133 static void hisi_pa_pmu_enable_filter(struct perf_event *event)
134 {
135 if (event->attr.config1 != 0x0) {
136 hisi_pa_pmu_enable_tracetag(event);
137 hisi_pa_pmu_config_srcid(event);
138 hisi_pa_pmu_config_tgtid(event);
139 }
140 }
141
hisi_pa_pmu_disable_filter(struct perf_event * event)142 static void hisi_pa_pmu_disable_filter(struct perf_event *event)
143 {
144 if (event->attr.config1 != 0x0) {
145 hisi_pa_pmu_clear_tgtid(event);
146 hisi_pa_pmu_clear_srcid(event);
147 hisi_pa_pmu_clear_tracetag(event);
148 }
149 }
150
hisi_pa_pmu_get_counter_offset(int idx)151 static u32 hisi_pa_pmu_get_counter_offset(int idx)
152 {
153 return (PA_EVENT_CNT0_L + idx * 8);
154 }
155
hisi_pa_pmu_read_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)156 static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
157 struct hw_perf_event *hwc)
158 {
159 return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
160 }
161
hisi_pa_pmu_write_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc,u64 val)162 static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
163 struct hw_perf_event *hwc, u64 val)
164 {
165 writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
166 }
167
hisi_pa_pmu_write_evtype(struct hisi_pmu * pa_pmu,int idx,u32 type)168 static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
169 u32 type)
170 {
171 u32 reg, reg_idx, shift, val;
172
173 /*
174 * Select the appropriate event select register(PA_EVENT_TYPE0/1).
175 * There are 2 event select registers for the 8 hardware counters.
176 * Event code is 8-bits and for the former 4 hardware counters,
177 * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
178 * PA_EVENT_TYPE1 is chosen.
179 */
180 reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
181 reg_idx = idx % 4;
182 shift = 8 * reg_idx;
183
184 /* Write event code to pa_EVENT_TYPEx Register */
185 val = readl(pa_pmu->base + reg);
186 val &= ~(PA_EVTYPE_MASK << shift);
187 val |= (type << shift);
188 writel(val, pa_pmu->base + reg);
189 }
190
hisi_pa_pmu_start_counters(struct hisi_pmu * pa_pmu)191 static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
192 {
193 u32 val;
194
195 val = readl(pa_pmu->base + PA_PERF_CTRL);
196 val |= PA_PERF_CTRL_EN;
197 writel(val, pa_pmu->base + PA_PERF_CTRL);
198 }
199
hisi_pa_pmu_stop_counters(struct hisi_pmu * pa_pmu)200 static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
201 {
202 u32 val;
203
204 val = readl(pa_pmu->base + PA_PERF_CTRL);
205 val &= ~(PA_PERF_CTRL_EN);
206 writel(val, pa_pmu->base + PA_PERF_CTRL);
207 }
208
hisi_pa_pmu_enable_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)209 static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
210 struct hw_perf_event *hwc)
211 {
212 u32 val;
213
214 /* Enable counter index in PA_EVENT_CTRL register */
215 val = readl(pa_pmu->base + PA_EVENT_CTRL);
216 val |= 1 << hwc->idx;
217 writel(val, pa_pmu->base + PA_EVENT_CTRL);
218 }
219
hisi_pa_pmu_disable_counter(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)220 static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
221 struct hw_perf_event *hwc)
222 {
223 u32 val;
224
225 /* Clear counter index in PA_EVENT_CTRL register */
226 val = readl(pa_pmu->base + PA_EVENT_CTRL);
227 val &= ~(1 << hwc->idx);
228 writel(val, pa_pmu->base + PA_EVENT_CTRL);
229 }
230
hisi_pa_pmu_enable_counter_int(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)231 static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
232 struct hw_perf_event *hwc)
233 {
234 struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
235 u32 val;
236
237 /* Write 0 to enable interrupt */
238 val = readl(pa_pmu->base + regs->mask_offset);
239 val &= ~(1 << hwc->idx);
240 writel(val, pa_pmu->base + regs->mask_offset);
241 }
242
hisi_pa_pmu_disable_counter_int(struct hisi_pmu * pa_pmu,struct hw_perf_event * hwc)243 static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
244 struct hw_perf_event *hwc)
245 {
246 struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
247 u32 val;
248
249 /* Write 1 to mask interrupt */
250 val = readl(pa_pmu->base + regs->mask_offset);
251 val |= 1 << hwc->idx;
252 writel(val, pa_pmu->base + regs->mask_offset);
253 }
254
hisi_pa_pmu_get_int_status(struct hisi_pmu * pa_pmu)255 static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
256 {
257 struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
258
259 return readl(pa_pmu->base + regs->status_offset);
260 }
261
hisi_pa_pmu_clear_int_status(struct hisi_pmu * pa_pmu,int idx)262 static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
263 {
264 struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
265
266 writel(1 << idx, pa_pmu->base + regs->clear_offset);
267 }
268
hisi_pa_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * pa_pmu)269 static int hisi_pa_pmu_init_data(struct platform_device *pdev,
270 struct hisi_pmu *pa_pmu)
271 {
272 hisi_uncore_pmu_init_topology(pa_pmu, &pdev->dev);
273
274 /*
275 * As PA PMU is in a SICL, use the SICL_ID and the index ID
276 * to identify the PA PMU.
277 */
278 if (pa_pmu->topo.sicl_id < 0) {
279 dev_err(&pdev->dev, "Cannot read sicl-id!\n");
280 return -EINVAL;
281 }
282
283 if (pa_pmu->topo.index_id < 0) {
284 dev_err(&pdev->dev, "Cannot read idx-id!\n");
285 return -EINVAL;
286 }
287
288 pa_pmu->dev_info = device_get_match_data(&pdev->dev);
289 if (!pa_pmu->dev_info)
290 return -ENODEV;
291
292 pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
293 if (IS_ERR(pa_pmu->base)) {
294 dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
295 return PTR_ERR(pa_pmu->base);
296 }
297
298 pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
299
300 return 0;
301 }
302
303 static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
304 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
305 HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
306 HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
307 HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
308 HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
309 HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
310 NULL,
311 };
312
313 static const struct attribute_group hisi_pa_pmu_v2_format_group = {
314 .name = "format",
315 .attrs = hisi_pa_pmu_v2_format_attr,
316 };
317
318 static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
319 HISI_PMU_EVENT_ATTR(rx_req, 0x40),
320 HISI_PMU_EVENT_ATTR(tx_req, 0x5c),
321 HISI_PMU_EVENT_ATTR(cycle, 0x78),
322 NULL
323 };
324
325 static const struct attribute_group hisi_pa_pmu_v2_events_group = {
326 .name = "events",
327 .attrs = hisi_pa_pmu_v2_events_attr,
328 };
329
330 static struct attribute *hisi_pa_pmu_v3_events_attr[] = {
331 HISI_PMU_EVENT_ATTR(tx_req, 0x0),
332 HISI_PMU_EVENT_ATTR(tx_dat, 0x1),
333 HISI_PMU_EVENT_ATTR(tx_snp, 0x2),
334 HISI_PMU_EVENT_ATTR(rx_req, 0x7),
335 HISI_PMU_EVENT_ATTR(rx_dat, 0x8),
336 HISI_PMU_EVENT_ATTR(rx_snp, 0x9),
337 NULL
338 };
339
340 static const struct attribute_group hisi_pa_pmu_v3_events_group = {
341 .name = "events",
342 .attrs = hisi_pa_pmu_v3_events_attr,
343 };
344
345 static struct attribute *hisi_h60pa_pmu_events_attr[] = {
346 HISI_PMU_EVENT_ATTR(rx_flit, 0x50),
347 HISI_PMU_EVENT_ATTR(tx_flit, 0x65),
348 NULL
349 };
350
351 static const struct attribute_group hisi_h60pa_pmu_events_group = {
352 .name = "events",
353 .attrs = hisi_h60pa_pmu_events_attr,
354 };
355
356 static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
357 .mask_offset = PA_INT_MASK,
358 .clear_offset = PA_INT_CLEAR,
359 .status_offset = PA_INT_STATUS,
360 };
361
362 static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
363 &hisi_pa_pmu_v2_format_group,
364 &hisi_pa_pmu_v2_events_group,
365 &hisi_pmu_cpumask_attr_group,
366 &hisi_pmu_identifier_group,
367 NULL
368 };
369
370 static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
371 .name = "pa",
372 .attr_groups = hisi_pa_pmu_v2_attr_groups,
373 .private = &hisi_pa_pmu_regs,
374 };
375
376 static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
377 &hisi_pa_pmu_v2_format_group,
378 &hisi_pa_pmu_v3_events_group,
379 &hisi_pmu_cpumask_attr_group,
380 &hisi_pmu_identifier_group,
381 NULL
382 };
383
384 static const struct hisi_pmu_dev_info hisi_h32pa_v3 = {
385 .name = "pa",
386 .attr_groups = hisi_pa_pmu_v3_attr_groups,
387 .private = &hisi_pa_pmu_regs,
388 };
389
390 static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
391 .mask_offset = H60PA_INT_MASK,
392 .clear_offset = H60PA_INT_STATUS, /* Clear on write */
393 .status_offset = H60PA_INT_STATUS,
394 };
395
396 static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
397 &hisi_pa_pmu_v2_format_group,
398 &hisi_h60pa_pmu_events_group,
399 &hisi_pmu_cpumask_attr_group,
400 &hisi_pmu_identifier_group,
401 NULL
402 };
403
404 static const struct hisi_pmu_dev_info hisi_h60pa = {
405 .name = "h60pa",
406 .attr_groups = hisi_h60pa_pmu_attr_groups,
407 .private = &hisi_h60pa_pmu_regs,
408 };
409
410 static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
411 .write_evtype = hisi_pa_pmu_write_evtype,
412 .get_event_idx = hisi_uncore_pmu_get_event_idx,
413 .start_counters = hisi_pa_pmu_start_counters,
414 .stop_counters = hisi_pa_pmu_stop_counters,
415 .enable_counter = hisi_pa_pmu_enable_counter,
416 .disable_counter = hisi_pa_pmu_disable_counter,
417 .enable_counter_int = hisi_pa_pmu_enable_counter_int,
418 .disable_counter_int = hisi_pa_pmu_disable_counter_int,
419 .write_counter = hisi_pa_pmu_write_counter,
420 .read_counter = hisi_pa_pmu_read_counter,
421 .get_int_status = hisi_pa_pmu_get_int_status,
422 .clear_int_status = hisi_pa_pmu_clear_int_status,
423 .enable_filter = hisi_pa_pmu_enable_filter,
424 .disable_filter = hisi_pa_pmu_disable_filter,
425 };
426
hisi_pa_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * pa_pmu)427 static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
428 struct hisi_pmu *pa_pmu)
429 {
430 int ret;
431
432 ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
433 if (ret)
434 return ret;
435
436 ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
437 if (ret)
438 return ret;
439
440 pa_pmu->pmu_events.attr_groups = pa_pmu->dev_info->attr_groups;
441 pa_pmu->num_counters = PA_NR_COUNTERS;
442 pa_pmu->ops = &hisi_uncore_pa_ops;
443 pa_pmu->check_event = 0xB0;
444 pa_pmu->counter_bits = 64;
445 pa_pmu->dev = &pdev->dev;
446 pa_pmu->on_cpu = -1;
447
448 return 0;
449 }
450
hisi_pa_pmu_probe(struct platform_device * pdev)451 static int hisi_pa_pmu_probe(struct platform_device *pdev)
452 {
453 struct hisi_pmu *pa_pmu;
454 char *name;
455 int ret;
456
457 pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
458 if (!pa_pmu)
459 return -ENOMEM;
460
461 ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
462 if (ret)
463 return ret;
464
465 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%d",
466 pa_pmu->topo.sicl_id, pa_pmu->dev_info->name,
467 pa_pmu->topo.index_id);
468 if (!name)
469 return -ENOMEM;
470
471 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
472 &pa_pmu->node);
473 if (ret) {
474 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
475 return ret;
476 }
477
478 hisi_pmu_init(pa_pmu, THIS_MODULE);
479 ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
480 if (ret) {
481 dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
482 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
483 &pa_pmu->node);
484 return ret;
485 }
486
487 platform_set_drvdata(pdev, pa_pmu);
488 return ret;
489 }
490
hisi_pa_pmu_remove(struct platform_device * pdev)491 static void hisi_pa_pmu_remove(struct platform_device *pdev)
492 {
493 struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
494
495 perf_pmu_unregister(&pa_pmu->pmu);
496 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
497 &pa_pmu->node);
498 }
499
500 static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
501 { "HISI0273", (kernel_ulong_t)&hisi_h32pa_v2 },
502 { "HISI0275", (kernel_ulong_t)&hisi_h32pa_v3 },
503 { "HISI0274", (kernel_ulong_t)&hisi_h60pa },
504 {}
505 };
506 MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
507
508 static struct platform_driver hisi_pa_pmu_driver = {
509 .driver = {
510 .name = "hisi_pa_pmu",
511 .acpi_match_table = hisi_pa_pmu_acpi_match,
512 .suppress_bind_attrs = true,
513 },
514 .probe = hisi_pa_pmu_probe,
515 .remove = hisi_pa_pmu_remove,
516 };
517
hisi_pa_pmu_module_init(void)518 static int __init hisi_pa_pmu_module_init(void)
519 {
520 int ret;
521
522 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
523 "AP_PERF_ARM_HISI_PA_ONLINE",
524 hisi_uncore_pmu_online_cpu,
525 hisi_uncore_pmu_offline_cpu);
526 if (ret) {
527 pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
528 return ret;
529 }
530
531 ret = platform_driver_register(&hisi_pa_pmu_driver);
532 if (ret)
533 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
534
535 return ret;
536 }
537 module_init(hisi_pa_pmu_module_init);
538
hisi_pa_pmu_module_exit(void)539 static void __exit hisi_pa_pmu_module_exit(void)
540 {
541 platform_driver_unregister(&hisi_pa_pmu_driver);
542 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
543 }
544 module_exit(hisi_pa_pmu_module_exit);
545
546 MODULE_IMPORT_NS("HISI_PMU");
547 MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
548 MODULE_LICENSE("GPL v2");
549 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
550 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
551