1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2017 NXP
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/clk.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/of_irq.h>
15 #include <linux/perf_event.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18
19 #define COUNTER_CNTL 0x0
20 #define COUNTER_READ 0x20
21
22 #define COUNTER_DPCR1 0x30
23 #define COUNTER_MUX_CNTL 0x50
24 #define COUNTER_MASK_COMP 0x54
25
26 #define CNTL_OVER 0x1
27 #define CNTL_CLEAR 0x2
28 #define CNTL_EN 0x4
29 #define CNTL_EN_MASK 0xFFFFFFFB
30 #define CNTL_CLEAR_MASK 0xFFFFFFFD
31 #define CNTL_OVER_MASK 0xFFFFFFFE
32
33 #define CNTL_CP_SHIFT 16
34 #define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT)
35 #define CNTL_CSV_SHIFT 24
36 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
37
38 #define READ_PORT_SHIFT 0
39 #define READ_PORT_MASK (0x7 << READ_PORT_SHIFT)
40 #define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */
41 #define WRITE_PORT_SHIFT 8
42 #define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT)
43 #define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */
44
45 #define EVENT_CYCLES_ID 0
46 #define EVENT_CYCLES_COUNTER 0
47 #define NUM_COUNTERS 4
48
49 /* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
50 #define CYCLES_COUNTER_MASK 0x0FFFFFFF
51 #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
52
53 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
54
55 #define DDR_PERF_DEV_NAME "imx8_ddr"
56 #define DB_PERF_DEV_NAME "imx8_db"
57 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
58
59 static DEFINE_IDA(ddr_ida);
60 static DEFINE_IDA(db_ida);
61
62 /* DDR Perf hardware feature */
63 #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
64 #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
65 #define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */
66
67 /* Perf type */
68 enum fsl_ddr_type {
69 DDR_PERF_TYPE = 0, /* ddr Perf (default) */
70 DB_PERF_TYPE, /* db Perf */
71 };
72
73 struct fsl_ddr_devtype_data {
74 unsigned int quirks; /* quirks needed for different DDR Perf core */
75 const char *identifier; /* system PMU identifier for userspace */
76 enum fsl_ddr_type type; /* types of Perf, ddr or db */
77 };
78
79 static const struct fsl_ddr_devtype_data imx8_devtype_data;
80
81 static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
82 .quirks = DDR_CAP_AXI_ID_FILTER,
83 };
84
85 static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
86 .quirks = DDR_CAP_AXI_ID_FILTER,
87 .identifier = "i.MX8MQ",
88 };
89
90 static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
91 .quirks = DDR_CAP_AXI_ID_FILTER,
92 .identifier = "i.MX8MM",
93 };
94
95 static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
96 .quirks = DDR_CAP_AXI_ID_FILTER,
97 .identifier = "i.MX8MN",
98 };
99
100 static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
101 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
102 .identifier = "i.MX8MP",
103 };
104
105 static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = {
106 .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
107 .identifier = "i.MX8DXL",
108 };
109
110 static const struct fsl_ddr_devtype_data imx8dxl_db_devtype_data = {
111 .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
112 .identifier = "i.MX8DXL",
113 .type = DB_PERF_TYPE,
114 };
115
116 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
117 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
118 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
119 { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
120 { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
121 { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
122 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
123 { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data},
124 { .compatible = "fsl,imx8dxl-db-pmu", .data = &imx8dxl_db_devtype_data},
125 { /* sentinel */ }
126 };
127 MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
128
129 struct ddr_pmu {
130 struct pmu pmu;
131 void __iomem *base;
132 unsigned int cpu;
133 struct hlist_node node;
134 struct device *dev;
135 struct perf_event *events[NUM_COUNTERS];
136 enum cpuhp_state cpuhp_state;
137 const struct fsl_ddr_devtype_data *devtype_data;
138 int irq;
139 int id;
140 int active_counter;
141 };
142
ddr_perf_identifier_show(struct device * dev,struct device_attribute * attr,char * page)143 static ssize_t ddr_perf_identifier_show(struct device *dev,
144 struct device_attribute *attr,
145 char *page)
146 {
147 struct ddr_pmu *pmu = dev_get_drvdata(dev);
148
149 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
150 }
151
ddr_perf_identifier_attr_visible(struct kobject * kobj,struct attribute * attr,int n)152 static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
153 struct attribute *attr,
154 int n)
155 {
156 struct device *dev = kobj_to_dev(kobj);
157 struct ddr_pmu *pmu = dev_get_drvdata(dev);
158
159 if (!pmu->devtype_data->identifier)
160 return 0;
161 return attr->mode;
162 };
163
164 static struct device_attribute ddr_perf_identifier_attr =
165 __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
166
167 static struct attribute *ddr_perf_identifier_attrs[] = {
168 &ddr_perf_identifier_attr.attr,
169 NULL,
170 };
171
172 static const struct attribute_group ddr_perf_identifier_attr_group = {
173 .attrs = ddr_perf_identifier_attrs,
174 .is_visible = ddr_perf_identifier_attr_visible,
175 };
176
177 enum ddr_perf_filter_capabilities {
178 PERF_CAP_AXI_ID_FILTER = 0,
179 PERF_CAP_AXI_ID_FILTER_ENHANCED,
180 PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER,
181 PERF_CAP_AXI_ID_FEAT_MAX,
182 };
183
ddr_perf_filter_cap_get(struct ddr_pmu * pmu,int cap)184 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
185 {
186 u32 quirks = pmu->devtype_data->quirks;
187
188 switch (cap) {
189 case PERF_CAP_AXI_ID_FILTER:
190 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
191 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
192 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
193 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
194 case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER:
195 return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER);
196 default:
197 WARN(1, "unknown filter cap %d\n", cap);
198 }
199
200 return 0;
201 }
202
ddr_perf_filter_cap_show(struct device * dev,struct device_attribute * attr,char * buf)203 static ssize_t ddr_perf_filter_cap_show(struct device *dev,
204 struct device_attribute *attr,
205 char *buf)
206 {
207 struct ddr_pmu *pmu = dev_get_drvdata(dev);
208 struct dev_ext_attribute *ea =
209 container_of(attr, struct dev_ext_attribute, attr);
210 int cap = (long)ea->var;
211
212 return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
213 }
214
215 #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
216 (&((struct dev_ext_attribute) { \
217 __ATTR(_name, 0444, _func, NULL), (void *)_var \
218 }).attr.attr)
219
220 #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
221 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
222
223 static struct attribute *ddr_perf_filter_cap_attr[] = {
224 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
225 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
226 PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER),
227 NULL,
228 };
229
230 static const struct attribute_group ddr_perf_filter_cap_attr_group = {
231 .name = "caps",
232 .attrs = ddr_perf_filter_cap_attr,
233 };
234
ddr_perf_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t ddr_perf_cpumask_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237 {
238 struct ddr_pmu *pmu = dev_get_drvdata(dev);
239
240 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
241 }
242
243 static struct device_attribute ddr_perf_cpumask_attr =
244 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
245
246 static struct attribute *ddr_perf_cpumask_attrs[] = {
247 &ddr_perf_cpumask_attr.attr,
248 NULL,
249 };
250
251 static const struct attribute_group ddr_perf_cpumask_attr_group = {
252 .attrs = ddr_perf_cpumask_attrs,
253 };
254
255 static ssize_t
ddr_pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)256 ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
257 char *page)
258 {
259 struct perf_pmu_events_attr *pmu_attr;
260
261 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
262 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
263 }
264
265 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
266 PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id)
267
268 static struct attribute *ddr_perf_events_attrs[] = {
269 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
270 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
271 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
272 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
273 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
274 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
275 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
276 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
277 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
278 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
279 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
280 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
281 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
282 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
283 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
284 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
285 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
286 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
287 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
288 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
289 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
290 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
291 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
292 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
293 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
294 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
295 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
296 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
297 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
298 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
299 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
300 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
301 NULL,
302 };
303
304 static const int ddr_perf_db_visible_event_list[] = {
305 EVENT_CYCLES_ID,
306 0x41,
307 0x42,
308 };
309
ddr_perf_events_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)310 static umode_t ddr_perf_events_attrs_is_visible(struct kobject *kobj,
311 struct attribute *attr, int n)
312 {
313 struct device *dev = kobj_to_dev(kobj);
314 struct ddr_pmu *pmu = dev_get_drvdata(dev);
315 struct perf_pmu_events_attr *pmu_attr;
316 unsigned int i;
317
318 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
319
320 if (pmu->devtype_data->type == DDR_PERF_TYPE)
321 return attr->mode;
322
323 /* DB Type */
324 for (i = 0; i < ARRAY_SIZE(ddr_perf_db_visible_event_list); i++)
325 if (pmu_attr->id == ddr_perf_db_visible_event_list[i])
326 return attr->mode;
327
328 return 0;
329 }
330
331 static const struct attribute_group ddr_perf_events_attr_group = {
332 .name = "events",
333 .attrs = ddr_perf_events_attrs,
334 .is_visible = ddr_perf_events_attrs_is_visible,
335 };
336
337 PMU_FORMAT_ATTR(event, "config:0-7");
338 PMU_FORMAT_ATTR(axi_id, "config1:0-15");
339 PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
340 PMU_FORMAT_ATTR(axi_port, "config2:0-2");
341 PMU_FORMAT_ATTR(axi_channel, "config2:3-3");
342
343 static struct attribute *ddr_perf_format_attrs[] = {
344 &format_attr_event.attr,
345 &format_attr_axi_id.attr,
346 &format_attr_axi_mask.attr,
347 &format_attr_axi_port.attr,
348 &format_attr_axi_channel.attr,
349 NULL,
350 };
351
352 static const struct attribute_group ddr_perf_format_attr_group = {
353 .name = "format",
354 .attrs = ddr_perf_format_attrs,
355 };
356
357 static const struct attribute_group *attr_groups[] = {
358 &ddr_perf_events_attr_group,
359 &ddr_perf_format_attr_group,
360 &ddr_perf_cpumask_attr_group,
361 &ddr_perf_filter_cap_attr_group,
362 &ddr_perf_identifier_attr_group,
363 NULL,
364 };
365
ddr_perf_is_filtered(struct perf_event * event)366 static bool ddr_perf_is_filtered(struct perf_event *event)
367 {
368 return event->attr.config == 0x41 || event->attr.config == 0x42;
369 }
370
ddr_perf_filter_val(struct perf_event * event)371 static u32 ddr_perf_filter_val(struct perf_event *event)
372 {
373 return event->attr.config1;
374 }
375
ddr_perf_filters_compatible(struct perf_event * a,struct perf_event * b)376 static bool ddr_perf_filters_compatible(struct perf_event *a,
377 struct perf_event *b)
378 {
379 if (!ddr_perf_is_filtered(a))
380 return true;
381 if (!ddr_perf_is_filtered(b))
382 return true;
383 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
384 }
385
ddr_perf_is_enhanced_filtered(struct perf_event * event)386 static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
387 {
388 unsigned int filt;
389 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
390
391 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
392 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
393 ddr_perf_is_filtered(event);
394 }
395
ddr_perf_alloc_counter(struct ddr_pmu * pmu,int event)396 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
397 {
398 int i;
399
400 /*
401 * Always map cycle event to counter 0
402 * Cycles counter is dedicated for cycle event
403 * can't used for the other events
404 */
405 if (event == EVENT_CYCLES_ID) {
406 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
407 return EVENT_CYCLES_COUNTER;
408 else
409 return -ENOENT;
410 }
411
412 for (i = 1; i < NUM_COUNTERS; i++) {
413 if (pmu->events[i] == NULL)
414 return i;
415 }
416
417 return -ENOENT;
418 }
419
ddr_perf_free_counter(struct ddr_pmu * pmu,int counter)420 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
421 {
422 pmu->events[counter] = NULL;
423 }
424
ddr_perf_read_counter(struct ddr_pmu * pmu,int counter)425 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
426 {
427 struct perf_event *event = pmu->events[counter];
428 void __iomem *base = pmu->base;
429
430 /*
431 * return bytes instead of bursts from ddr transaction for
432 * axid-read and axid-write event if PMU core supports enhanced
433 * filter.
434 */
435 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
436 COUNTER_READ;
437 return readl_relaxed(base + counter * 4);
438 }
439
ddr_perf_event_init(struct perf_event * event)440 static int ddr_perf_event_init(struct perf_event *event)
441 {
442 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
443 struct hw_perf_event *hwc = &event->hw;
444 struct perf_event *sibling;
445
446 if (event->attr.type != event->pmu->type)
447 return -ENOENT;
448
449 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
450 return -EOPNOTSUPP;
451
452 if (event->cpu < 0) {
453 dev_warn(pmu->dev, "Can't provide per-task data!\n");
454 return -EOPNOTSUPP;
455 }
456
457 /*
458 * We must NOT create groups containing mixed PMUs, although software
459 * events are acceptable (for example to create a CCN group
460 * periodically read when a hrtimer aka cpu-clock leader triggers).
461 */
462 if (event->group_leader->pmu != event->pmu &&
463 !is_software_event(event->group_leader))
464 return -EINVAL;
465
466 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
467 if (!ddr_perf_filters_compatible(event, event->group_leader))
468 return -EINVAL;
469 for_each_sibling_event(sibling, event->group_leader) {
470 if (!ddr_perf_filters_compatible(event, sibling))
471 return -EINVAL;
472 }
473 }
474
475 for_each_sibling_event(sibling, event->group_leader) {
476 if (sibling->pmu != event->pmu &&
477 !is_software_event(sibling))
478 return -EINVAL;
479 }
480
481 event->cpu = pmu->cpu;
482 hwc->idx = -1;
483
484 return 0;
485 }
486
ddr_perf_counter_enable(struct ddr_pmu * pmu,int config,int counter,bool enable)487 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
488 int counter, bool enable)
489 {
490 u8 reg = counter * 4 + COUNTER_CNTL;
491 int val;
492
493 if (enable) {
494 /*
495 * cycle counter is special which should firstly write 0 then
496 * write 1 into CLEAR bit to clear it. Other counters only
497 * need write 0 into CLEAR bit and it turns out to be 1 by
498 * hardware. Below enable flow is harmless for all counters.
499 */
500 writel(0, pmu->base + reg);
501 val = CNTL_EN | CNTL_CLEAR;
502 val |= FIELD_PREP(CNTL_CSV_MASK, config);
503
504 /*
505 * On i.MX8MP we need to bias the cycle counter to overflow more often.
506 * We do this by initializing bits [23:16] of the counter value via the
507 * COUNTER_CTRL Counter Parameter (CP) field.
508 */
509 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
510 if (counter == EVENT_CYCLES_COUNTER)
511 val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
512 }
513
514 writel(val, pmu->base + reg);
515 } else {
516 /* Disable counter */
517 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
518 writel(val, pmu->base + reg);
519 }
520 }
521
ddr_perf_counter_overflow(struct ddr_pmu * pmu,int counter)522 static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
523 {
524 int val;
525
526 val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
527
528 return val & CNTL_OVER;
529 }
530
ddr_perf_counter_clear(struct ddr_pmu * pmu,int counter)531 static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
532 {
533 u8 reg = counter * 4 + COUNTER_CNTL;
534 int val;
535
536 val = readl_relaxed(pmu->base + reg);
537 val &= ~CNTL_CLEAR;
538 writel(val, pmu->base + reg);
539
540 val |= CNTL_CLEAR;
541 writel(val, pmu->base + reg);
542 }
543
ddr_perf_event_update(struct perf_event * event)544 static void ddr_perf_event_update(struct perf_event *event)
545 {
546 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
547 struct hw_perf_event *hwc = &event->hw;
548 u64 new_raw_count;
549 int counter = hwc->idx;
550 int ret;
551
552 new_raw_count = ddr_perf_read_counter(pmu, counter);
553 /* Remove the bias applied in ddr_perf_counter_enable(). */
554 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
555 if (counter == EVENT_CYCLES_COUNTER)
556 new_raw_count &= CYCLES_COUNTER_MASK;
557 }
558
559 local64_add(new_raw_count, &event->count);
560
561 /*
562 * For legacy SoCs: event counter continue counting when overflow,
563 * no need to clear the counter.
564 * For new SoCs: event counter stop counting when overflow, need
565 * clear counter to let it count again.
566 */
567 if (counter != EVENT_CYCLES_COUNTER) {
568 ret = ddr_perf_counter_overflow(pmu, counter);
569 if (ret)
570 dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
571 event->attr.config);
572 }
573
574 /* clear counter every time for both cycle counter and event counter */
575 ddr_perf_counter_clear(pmu, counter);
576 }
577
ddr_perf_event_start(struct perf_event * event,int flags)578 static void ddr_perf_event_start(struct perf_event *event, int flags)
579 {
580 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
581 struct hw_perf_event *hwc = &event->hw;
582 int counter = hwc->idx;
583
584 local64_set(&hwc->prev_count, 0);
585
586 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
587
588 if (!pmu->active_counter++)
589 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
590 EVENT_CYCLES_COUNTER, true);
591
592 hwc->state = 0;
593 }
594
ddr_perf_event_add(struct perf_event * event,int flags)595 static int ddr_perf_event_add(struct perf_event *event, int flags)
596 {
597 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
598 struct hw_perf_event *hwc = &event->hw;
599 int counter;
600 int cfg = event->attr.config;
601 int cfg1 = event->attr.config1;
602 int cfg2 = event->attr.config2;
603
604 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
605 int i;
606
607 for (i = 1; i < NUM_COUNTERS; i++) {
608 if (pmu->events[i] &&
609 !ddr_perf_filters_compatible(event, pmu->events[i]))
610 return -EINVAL;
611 }
612
613 if (ddr_perf_is_filtered(event)) {
614 /* revert axi id masking(axi_mask) value */
615 cfg1 ^= AXI_MASKING_REVERT;
616 writel(cfg1, pmu->base + COUNTER_DPCR1);
617 }
618 }
619
620 counter = ddr_perf_alloc_counter(pmu, cfg);
621 if (counter < 0) {
622 dev_dbg(pmu->dev, "There are not enough counters\n");
623 return -EOPNOTSUPP;
624 }
625
626 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) {
627 if (ddr_perf_is_filtered(event)) {
628 /* revert axi id masking(axi_mask) value */
629 cfg1 ^= AXI_MASKING_REVERT;
630 writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4));
631
632 if (cfg == 0x41) {
633 /* revert axi read channel(axi_channel) value */
634 cfg2 ^= READ_CHANNEL_REVERT;
635 cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2);
636 } else {
637 /* revert axi write channel(axi_channel) value */
638 cfg2 ^= WRITE_CHANNEL_REVERT;
639 cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2);
640 }
641
642 writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4));
643 }
644 }
645
646 pmu->events[counter] = event;
647 hwc->idx = counter;
648
649 hwc->state |= PERF_HES_STOPPED;
650
651 if (flags & PERF_EF_START)
652 ddr_perf_event_start(event, flags);
653
654 return 0;
655 }
656
ddr_perf_event_stop(struct perf_event * event,int flags)657 static void ddr_perf_event_stop(struct perf_event *event, int flags)
658 {
659 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
660 struct hw_perf_event *hwc = &event->hw;
661 int counter = hwc->idx;
662
663 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
664 ddr_perf_event_update(event);
665
666 if (!--pmu->active_counter)
667 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
668 EVENT_CYCLES_COUNTER, false);
669
670 hwc->state |= PERF_HES_STOPPED;
671 }
672
ddr_perf_event_del(struct perf_event * event,int flags)673 static void ddr_perf_event_del(struct perf_event *event, int flags)
674 {
675 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
676 struct hw_perf_event *hwc = &event->hw;
677 int counter = hwc->idx;
678
679 ddr_perf_event_stop(event, PERF_EF_UPDATE);
680
681 ddr_perf_free_counter(pmu, counter);
682 hwc->idx = -1;
683 }
684
ddr_perf_pmu_enable(struct pmu * pmu)685 static void ddr_perf_pmu_enable(struct pmu *pmu)
686 {
687 }
688
ddr_perf_pmu_disable(struct pmu * pmu)689 static void ddr_perf_pmu_disable(struct pmu *pmu)
690 {
691 }
692
ddr_perf_init(struct ddr_pmu * pmu,void __iomem * base,struct device * dev)693 static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
694 struct device *dev)
695 {
696 *pmu = (struct ddr_pmu) {
697 .pmu = (struct pmu) {
698 .module = THIS_MODULE,
699 .parent = dev,
700 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
701 .task_ctx_nr = perf_invalid_context,
702 .attr_groups = attr_groups,
703 .event_init = ddr_perf_event_init,
704 .add = ddr_perf_event_add,
705 .del = ddr_perf_event_del,
706 .start = ddr_perf_event_start,
707 .stop = ddr_perf_event_stop,
708 .read = ddr_perf_event_update,
709 .pmu_enable = ddr_perf_pmu_enable,
710 .pmu_disable = ddr_perf_pmu_disable,
711 },
712 .base = base,
713 .dev = dev,
714 };
715 }
716
ddr_perf_irq_handler(int irq,void * p)717 static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
718 {
719 int i;
720 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
721 struct perf_event *event;
722
723 /* all counter will stop if cycle counter disabled */
724 ddr_perf_counter_enable(pmu,
725 EVENT_CYCLES_ID,
726 EVENT_CYCLES_COUNTER,
727 false);
728 /*
729 * When the cycle counter overflows, all counters are stopped,
730 * and an IRQ is raised. If any other counter overflows, it
731 * continues counting, and no IRQ is raised. But for new SoCs,
732 * such as i.MX8MP, event counter would stop when overflow, so
733 * we need use cycle counter to stop overflow of event counter.
734 *
735 * Cycles occur at least 4 times as often as other events, so we
736 * can update all events on a cycle counter overflow and not
737 * lose events.
738 *
739 */
740 for (i = 0; i < NUM_COUNTERS; i++) {
741
742 if (!pmu->events[i])
743 continue;
744
745 event = pmu->events[i];
746
747 ddr_perf_event_update(event);
748 }
749
750 ddr_perf_counter_enable(pmu,
751 EVENT_CYCLES_ID,
752 EVENT_CYCLES_COUNTER,
753 true);
754
755 return IRQ_HANDLED;
756 }
757
ddr_perf_offline_cpu(unsigned int cpu,struct hlist_node * node)758 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
759 {
760 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
761 int target;
762
763 if (cpu != pmu->cpu)
764 return 0;
765
766 target = cpumask_any_but(cpu_online_mask, cpu);
767 if (target >= nr_cpu_ids)
768 return 0;
769
770 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
771 pmu->cpu = target;
772
773 WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
774
775 return 0;
776 }
777
ddr_perf_probe(struct platform_device * pdev)778 static int ddr_perf_probe(struct platform_device *pdev)
779 {
780 struct clk_bulk_data *clks;
781 struct ddr_pmu *pmu;
782 struct device_node *np;
783 void __iomem *base;
784 struct ida *ida;
785 char *name;
786 int nclks;
787 int num;
788 int ret;
789 int irq;
790
791 base = devm_platform_ioremap_resource(pdev, 0);
792 if (IS_ERR(base))
793 return PTR_ERR(base);
794
795 np = pdev->dev.of_node;
796
797 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
798 if (!pmu)
799 return -ENOMEM;
800
801 ddr_perf_init(pmu, base, &pdev->dev);
802
803 platform_set_drvdata(pdev, pmu);
804
805 nclks = devm_clk_bulk_get_all_enabled(&pdev->dev, &clks);
806 if (nclks < 0)
807 return dev_err_probe(&pdev->dev, nclks, "Failure get clks\n");
808
809 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
810
811 ida = pmu->devtype_data->type == DDR_PERF_TYPE ? &ddr_ida : &db_ida;
812 num = ida_alloc(ida, GFP_KERNEL);
813 if (num < 0)
814 return num;
815
816 pmu->id = num;
817
818 if (pmu->devtype_data->type == DDR_PERF_TYPE)
819 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num);
820 else
821 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DB_PERF_DEV_NAME "%d", num);
822
823 if (!name) {
824 ret = -ENOMEM;
825 goto idr_free;
826 }
827
828 pmu->cpu = raw_smp_processor_id();
829 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
830 DDR_CPUHP_CB_NAME,
831 NULL,
832 ddr_perf_offline_cpu);
833
834 if (ret < 0) {
835 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
836 goto idr_free;
837 }
838
839 pmu->cpuhp_state = ret;
840
841 /* Register the pmu instance for cpu hotplug */
842 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
843 if (ret) {
844 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
845 goto cpuhp_instance_err;
846 }
847
848 /* Request irq */
849 irq = of_irq_get(np, 0);
850 if (irq < 0) {
851 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
852 ret = irq;
853 goto ddr_perf_err;
854 }
855
856 ret = devm_request_irq(&pdev->dev, irq,
857 ddr_perf_irq_handler,
858 IRQF_NOBALANCING | IRQF_NO_THREAD,
859 DDR_CPUHP_CB_NAME,
860 pmu);
861 if (ret < 0) {
862 dev_err(&pdev->dev, "Request irq failed: %d", ret);
863 goto ddr_perf_err;
864 }
865
866 pmu->irq = irq;
867 ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
868 if (ret) {
869 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
870 goto ddr_perf_err;
871 }
872
873 ret = perf_pmu_register(&pmu->pmu, name, -1);
874 if (ret)
875 goto ddr_perf_err;
876
877 return 0;
878
879 ddr_perf_err:
880 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
881 cpuhp_instance_err:
882 cpuhp_remove_multi_state(pmu->cpuhp_state);
883 idr_free:
884 ida_free(ida, pmu->id);
885 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
886 return ret;
887 }
888
ddr_perf_remove(struct platform_device * pdev)889 static void ddr_perf_remove(struct platform_device *pdev)
890 {
891 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
892
893 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
894 cpuhp_remove_multi_state(pmu->cpuhp_state);
895
896 perf_pmu_unregister(&pmu->pmu);
897
898 if (pmu->devtype_data->type == DDR_PERF_TYPE)
899 ida_free(&ddr_ida, pmu->id);
900 else
901 ida_free(&db_ida, pmu->id);
902
903 }
904
905 static struct platform_driver imx_ddr_pmu_driver = {
906 .driver = {
907 .name = "imx-ddr-pmu",
908 .of_match_table = imx_ddr_pmu_dt_ids,
909 .suppress_bind_attrs = true,
910 },
911 .probe = ddr_perf_probe,
912 .remove = ddr_perf_remove,
913 };
914
915 module_platform_driver(imx_ddr_pmu_driver);
916 MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver");
917 MODULE_LICENSE("GPL v2");
918