Lines Matching +full:ignore +full:- +full:power +full:- +full:on +full:- +full:sel

1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2016-2020 Arm Limited
3 // CMN-600 Coherent Mesh Network PMU driver
11 #include <linux/io-64-nonatomic-lo-hi.h>
33 #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
44 #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
78 /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
81 /* HN-Ps are weird... */
127 /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
157 /* Similarly for the 40-bit cycle counter */
168 #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
169 #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
170 #define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
171 #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
172 #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
177 /* Note that we don't yet support the tertiary match group on newer IPs */
183 #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
184 #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
185 #define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
186 #define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
187 #define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
188 #define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
189 #define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
191 /* Made-up event IDs for watchpoint direction */
203 CMN_ANY = -1,
204 NOT_CMN600 = -2,
216 /* CMN-600 r0px shouldn't exist in silicon, thankfully */
270 SEL_NONE = -1,
286 /* DN/HN-F/CXHA */
369 return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); in arm_cmn_xyidbits()
376 if (cmn->num_xps == 1) { in arm_cmn_nid()
386 if (cmn->ports_used & 0xc) { in arm_cmn_nid()
400 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_node_to_xp()
401 int xp_idx = cmn->mesh_x * nid.y + nid.x; in arm_cmn_node_to_xp()
403 return cmn->xps + xp_idx; in arm_cmn_node_to_xp()
410 for (dn = cmn->dns; dn->type; dn++) in arm_cmn_node()
411 if (dn->type == type) in arm_cmn_node()
418 switch (cmn->part) { in arm_cmn_model()
438 if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650) in arm_cmn_device_connect_info()
441 * CI-700 may have extra ports, but still has the in arm_cmn_device_connect_info()
444 if (cmn->part == PART_CI700) in arm_cmn_device_connect_info()
448 return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset); in arm_cmn_device_connect_info()
458 case 0x01: return " RN-I |"; in arm_cmn_device_type()
459 case 0x02: return " RN-D |"; in arm_cmn_device_type()
460 case 0x04: return " RN-F_B |"; in arm_cmn_device_type()
461 case 0x05: return "RN-F_B_E|"; in arm_cmn_device_type()
462 case 0x06: return " RN-F_A |"; in arm_cmn_device_type()
463 case 0x07: return "RN-F_A_E|"; in arm_cmn_device_type()
464 case 0x08: return " HN-T |"; in arm_cmn_device_type()
465 case 0x09: return " HN-I |"; in arm_cmn_device_type()
466 case 0x0a: return " HN-D |"; in arm_cmn_device_type()
467 case 0x0b: return " HN-P |"; in arm_cmn_device_type()
468 case 0x0c: return " SN-F |"; in arm_cmn_device_type()
470 case 0x0e: return " HN-F |"; in arm_cmn_device_type()
471 case 0x0f: return " SN-F_E |"; in arm_cmn_device_type()
472 case 0x10: return " SN-F_D |"; in arm_cmn_device_type()
476 case 0x14: return " RN-F_D |"; in arm_cmn_device_type()
477 case 0x15: return "RN-F_D_E|"; in arm_cmn_device_type()
478 case 0x16: return " RN-F_C |"; in arm_cmn_device_type()
479 case 0x17: return "RN-F_C_E|"; in arm_cmn_device_type()
480 case 0x18: return " RN-F_E |"; in arm_cmn_device_type()
481 case 0x19: return "RN-F_E_E|"; in arm_cmn_device_type()
483 case 0x1d: return " HN-V |"; in arm_cmn_device_type()
491 struct arm_cmn *cmn = s->private; in arm_cmn_show_logid()
494 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_show_logid()
495 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_show_logid()
497 if (dn->type == CMN_TYPE_XP) in arm_cmn_show_logid()
499 /* Ignore the extra components that will overlap on some ports */ in arm_cmn_show_logid()
500 if (dn->type < CMN_TYPE_HNI) in arm_cmn_show_logid()
506 seq_printf(s, " #%-2d |", dn->logid); in arm_cmn_show_logid()
514 struct arm_cmn *cmn = s->private; in arm_cmn_map_show()
515 int x, y, p, pmax = fls(cmn->ports_used); in arm_cmn_map_show()
518 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
521 y = cmn->mesh_y; in arm_cmn_map_show()
522 while (y--) { in arm_cmn_map_show()
523 int xp_base = cmn->mesh_x * y; in arm_cmn_map_show()
526 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
527 seq_puts(s, "--------+"); in arm_cmn_map_show()
530 for (x = 0; x < cmn->mesh_x; x++) { in arm_cmn_map_show()
531 struct arm_cmn_node *xp = cmn->xps + xp_base + x; in arm_cmn_map_show()
535 seq_printf(s, " XP #%-2d |", xp_base + x); in arm_cmn_map_show()
539 for (x = 0; x < cmn->mesh_x; x++) { in arm_cmn_map_show()
540 s8 dtc = cmn->xps[xp_base + x].dtc; in arm_cmn_map_show()
548 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
553 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
556 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
559 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
562 seq_puts(s, "\n-----+"); in arm_cmn_map_show()
564 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
565 seq_puts(s, "--------+"); in arm_cmn_map_show()
576 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id); in arm_cmn_debugfs_init()
580 cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops); in arm_cmn_debugfs_init()
597 for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
599 /* @i is the DTC number, @idx is the counter index on that DTC */
601 for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0)
606 return (struct arm_cmn_hw_event *)&event->hw; in to_cmn_hw()
653 if (eattr->type == CMN_TYPE_DTC) in arm_cmn_event_show()
654 return sysfs_emit(buf, "type=0x%x\n", eattr->type); in arm_cmn_event_show()
656 if (eattr->type == CMN_TYPE_WP) in arm_cmn_event_show()
659 eattr->type, eattr->eventid); in arm_cmn_event_show()
661 if (eattr->fsel > SEL_NONE) in arm_cmn_event_show()
663 eattr->type, eattr->eventid, eattr->occupid); in arm_cmn_event_show()
665 return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type, in arm_cmn_event_show()
666 eattr->eventid); in arm_cmn_event_show()
681 if (!(eattr->model & arm_cmn_model(cmn))) in arm_cmn_event_attr_is_visible()
684 type = eattr->type; in arm_cmn_event_attr_is_visible()
685 eventid = eattr->eventid; in arm_cmn_event_attr_is_visible()
689 return attr->mode; in arm_cmn_event_attr_is_visible()
696 if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) in arm_cmn_event_attr_is_visible()
699 if (chan == 4 && cmn->part == PART_CMN600) in arm_cmn_event_attr_is_visible()
702 if ((chan == 5 && cmn->rsp_vc_num < 2) || in arm_cmn_event_attr_is_visible()
703 (chan == 6 && cmn->dat_vc_num < 2) || in arm_cmn_event_attr_is_visible()
704 (chan == 7 && cmn->snp_vc_num < 2) || in arm_cmn_event_attr_is_visible()
705 (chan == 8 && cmn->req_vc_num < 2)) in arm_cmn_event_attr_is_visible()
709 /* Revision-specific differences */ in arm_cmn_event_attr_is_visible()
710 if (cmn->part == PART_CMN600) { in arm_cmn_event_attr_is_visible()
711 if (cmn->rev < REV_CMN600_R1P3) { in arm_cmn_event_attr_is_visible()
715 if (cmn->rev < REV_CMN600_R1P2) { in arm_cmn_event_attr_is_visible()
721 } else if (cmn->part == PART_CMN650) { in arm_cmn_event_attr_is_visible()
722 if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) { in arm_cmn_event_attr_is_visible()
730 } else if (cmn->part == PART_CMN700) { in arm_cmn_event_attr_is_visible()
731 if (cmn->rev < REV_CMN700_R2P0) { in arm_cmn_event_attr_is_visible()
739 if (cmn->rev < REV_CMN700_R1P0) { in arm_cmn_event_attr_is_visible()
748 return attr->mode; in arm_cmn_event_attr_is_visible()
873 * DVM node events conflict with HN-I events in the equivalent PMU
874 * slot, but our lazy short-cut of using the DTM counter index for
972 * HN-P events squat on top of the HN-I similarly to DVM events, except
995 /* We treat watchpoints as a special made-up class of XP events */
1256 int lo = __ffs(fmt->field), hi = __fls(fmt->field); in arm_cmn_format_show()
1261 if (!fmt->config) in arm_cmn_format_show()
1262 return sysfs_emit(buf, "config:%d-%d\n", lo, hi); in arm_cmn_format_show()
1264 return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi); in arm_cmn_format_show()
1304 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu)); in arm_cmn_cpumask_show()
1315 return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev); in arm_cmn_identifier_show()
1351 bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600; in arm_cmn_wp_config()
1368 if (!cmn->state) in arm_cmn_set_state()
1369 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR); in arm_cmn_set_state()
1370 cmn->state |= state; in arm_cmn_set_state()
1375 cmn->state &= ~state; in arm_cmn_clear_state()
1376 if (!cmn->state) in arm_cmn_clear_state()
1378 cmn->dtc[0].base + CMN_DT_PMCR); in arm_cmn_clear_state()
1401 if (dtm != &cmn->dtms[dn->dtm]) { in arm_cmn_read_dtm()
1402 dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_read_dtm()
1403 reg = readq_relaxed(dtm->base + offset); in arm_cmn_read_dtm()
1405 dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_read_dtm()
1413 u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR); in arm_cmn_read_cc()
1415 writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR); in arm_cmn_read_cc()
1416 return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1); in arm_cmn_read_cc()
1423 val = readl_relaxed(dtc->base + pmevcnt); in arm_cmn_read_counter()
1424 writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt); in arm_cmn_read_counter()
1425 return val - CMN_COUNTER_INIT; in arm_cmn_read_counter()
1430 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_init_counter()
1435 writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx)); in arm_cmn_init_counter()
1436 cmn->dtc[i].counters[idx] = event; in arm_cmn_init_counter()
1440 local64_set(&event->hw.prev_count, count); in arm_cmn_init_counter()
1445 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_read()
1451 delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]); in arm_cmn_event_read()
1452 local64_add(delta, &event->count); in arm_cmn_event_read()
1456 prev = local64_xchg(&event->hw.prev_count, new); in arm_cmn_event_read()
1458 delta = new - prev; in arm_cmn_event_read()
1462 new = arm_cmn_read_counter(cmn->dtc + i, idx); in arm_cmn_event_read()
1466 local64_add(delta, &event->count); in arm_cmn_event_read()
1477 if (!dn->occupid[fsel].count) { in arm_cmn_set_event_sel_hi()
1478 dn->occupid[fsel].val = occupid; in arm_cmn_set_event_sel_hi()
1480 dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) | in arm_cmn_set_event_sel_hi()
1482 dn->occupid[SEL_SN_HOME_SEL].val) | in arm_cmn_set_event_sel_hi()
1484 dn->occupid[SEL_HBT_LBT_SEL].val) | in arm_cmn_set_event_sel_hi()
1486 dn->occupid[SEL_CLASS_OCCUP_ID].val) | in arm_cmn_set_event_sel_hi()
1488 dn->occupid[SEL_OCCUP1ID].val); in arm_cmn_set_event_sel_hi()
1489 writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); in arm_cmn_set_event_sel_hi()
1490 } else if (dn->occupid[fsel].val != occupid) { in arm_cmn_set_event_sel_hi()
1491 return -EBUSY; in arm_cmn_set_event_sel_hi()
1493 dn->occupid[fsel].count++; in arm_cmn_set_event_sel_hi()
1501 dn->event_w[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1502 writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1504 dn->event[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1505 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1511 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_start()
1518 i = hw->dtc_idx[0]; in arm_cmn_event_start()
1519 writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); in arm_cmn_event_start()
1520 cmn->dtc[i].cc_active = true; in arm_cmn_event_start()
1527 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_start()
1533 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_start()
1536 hw->wide_sel); in arm_cmn_event_start()
1542 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_stop()
1549 i = hw->dtc_idx[0]; in arm_cmn_event_stop()
1550 cmn->dtc[i].cc_active = false; in arm_cmn_event_stop()
1555 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_stop()
1561 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_stop()
1563 arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel); in arm_cmn_event_stop()
1590 val->cycles = true; in arm_cmn_val_add_event()
1595 val->dtc_count[dtc]++; in arm_cmn_val_add_event()
1598 int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_val_add_event() local
1600 val->dtm_count[dtm]++; in arm_cmn_val_add_event()
1602 if (sel > SEL_NONE) in arm_cmn_val_add_event()
1603 val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1; in arm_cmn_val_add_event()
1609 val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; in arm_cmn_val_add_event()
1617 struct perf_event *sibling, *leader = event->group_leader; in arm_cmn_validate_group()
1620 int i, ret = -EINVAL; in arm_cmn_validate_group()
1625 if (event->pmu != leader->pmu && !is_software_event(leader)) in arm_cmn_validate_group()
1626 return -EINVAL; in arm_cmn_validate_group()
1630 return -ENOMEM; in arm_cmn_validate_group()
1638 ret = val->cycles ? -EINVAL : 0; in arm_cmn_validate_group()
1643 if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS) in arm_cmn_validate_group()
1647 int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_validate_group() local
1649 if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) in arm_cmn_validate_group()
1652 if (sel > SEL_NONE && val->occupid[dtm][sel] && in arm_cmn_validate_group()
1653 val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1) in arm_cmn_validate_group()
1660 if (val->wp[dtm][wp_idx]) in arm_cmn_validate_group()
1663 wp_cmb = val->wp[dtm][wp_idx ^ 1]; in arm_cmn_validate_group()
1681 for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { in arm_cmn_filter_sel()
1683 if (e->model & model && e->type == type && e->eventid == eventid) in arm_cmn_filter_sel()
1684 return e->fsel; in arm_cmn_filter_sel()
1692 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_init()
1699 if (event->attr.type != event->pmu->type) in arm_cmn_event_init()
1700 return -ENOENT; in arm_cmn_event_init()
1702 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in arm_cmn_event_init()
1703 return -EINVAL; in arm_cmn_event_init()
1705 event->cpu = cmn->cpu; in arm_cmn_event_init()
1706 if (event->cpu < 0) in arm_cmn_event_init()
1707 return -EINVAL; in arm_cmn_event_init()
1720 return -EINVAL; in arm_cmn_event_init()
1721 /* ...but the DTM may depend on which port we're watching */ in arm_cmn_event_init()
1722 if (cmn->multi_dtm) in arm_cmn_event_init()
1723 hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; in arm_cmn_event_init()
1724 } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) { in arm_cmn_event_init()
1725 hw->wide_sel = true; in arm_cmn_event_init()
1729 hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid); in arm_cmn_event_init()
1734 hw->dn = arm_cmn_node(cmn, type); in arm_cmn_event_init()
1735 if (!hw->dn) in arm_cmn_event_init()
1736 return -EINVAL; in arm_cmn_event_init()
1738 memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx)); in arm_cmn_event_init()
1739 for (dn = hw->dn; dn->type == type; dn++) { in arm_cmn_event_init()
1740 if (bynodeid && dn->id != nodeid) { in arm_cmn_event_init()
1741 hw->dn++; in arm_cmn_event_init()
1744 hw->num_dns++; in arm_cmn_event_init()
1745 if (dn->dtc < 0) in arm_cmn_event_init()
1746 memset(hw->dtc_idx, 0, cmn->num_dtcs); in arm_cmn_event_init()
1748 hw->dtc_idx[dn->dtc] = 0; in arm_cmn_event_init()
1754 if (!hw->num_dns) { in arm_cmn_event_init()
1757 dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", in arm_cmn_event_init()
1759 return -EINVAL; in arm_cmn_event_init()
1771 while (i--) { in arm_cmn_event_clear()
1772 struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; in arm_cmn_event_clear()
1773 unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_clear()
1776 dtm->wp_event[arm_cmn_wp_idx(event)] = -1; in arm_cmn_event_clear()
1778 if (hw->filter_sel > SEL_NONE) in arm_cmn_event_clear()
1779 hw->dn[i].occupid[hw->filter_sel].count--; in arm_cmn_event_clear()
1781 dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); in arm_cmn_event_clear()
1782 writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); in arm_cmn_event_clear()
1784 memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); in arm_cmn_event_clear()
1787 cmn->dtc[j].counters[idx] = NULL; in arm_cmn_event_clear()
1792 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_add()
1799 while (cmn->dtc[i].cycles) in arm_cmn_event_add()
1800 if (++i == cmn->num_dtcs) in arm_cmn_event_add()
1801 return -ENOSPC; in arm_cmn_event_add()
1803 cmn->dtc[i].cycles = event; in arm_cmn_event_add()
1804 hw->dtc_idx[0] = i; in arm_cmn_event_add()
1813 if (cmn->part == PART_CMN600 && j > 0) { in arm_cmn_event_add()
1814 idx = hw->dtc_idx[0]; in arm_cmn_event_add()
1817 while (cmn->dtc[j].counters[idx]) in arm_cmn_event_add()
1819 return -ENOSPC; in arm_cmn_event_add()
1821 hw->dtc_idx[j] = idx; in arm_cmn_event_add()
1826 struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_event_add()
1827 unsigned int dtm_idx, shift, d = max_t(int, dn->dtc, 0); in arm_cmn_event_add()
1831 while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) in arm_cmn_event_add()
1841 if (dtm->wp_event[wp_idx] >= 0) in arm_cmn_event_add()
1844 tmp = dtm->wp_event[wp_idx ^ 1]; in arm_cmn_event_add()
1846 CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp])) in arm_cmn_event_add()
1850 dtm->wp_event[wp_idx] = hw->dtc_idx[d]; in arm_cmn_event_add()
1851 writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); in arm_cmn_event_add()
1853 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_event_add()
1855 if (cmn->multi_dtm) in arm_cmn_event_add()
1861 if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event))) in arm_cmn_event_add()
1865 arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); in arm_cmn_event_add()
1867 dtm->input_sel[dtm_idx] = input_sel; in arm_cmn_event_add()
1869 dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); in arm_cmn_event_add()
1870 dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift; in arm_cmn_event_add()
1871 dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); in arm_cmn_event_add()
1872 reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; in arm_cmn_event_add()
1873 writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); in arm_cmn_event_add()
1886 return -ENOSPC; in arm_cmn_event_add()
1891 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_del()
1898 cmn->dtc[hw->dtc_idx[0]].cycles = NULL; in arm_cmn_event_del()
1900 arm_cmn_event_clear(cmn, event, hw->num_dns); in arm_cmn_event_del()
1907 * plus it seems they don't work properly on some hardware anyway :(
1929 perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu); in arm_cmn_migrate()
1930 for (i = 0; i < cmn->num_dtcs; i++) in arm_cmn_migrate()
1931 irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu)); in arm_cmn_migrate()
1932 cmn->cpu = cpu; in arm_cmn_migrate()
1941 node = dev_to_node(cmn->dev); in arm_cmn_pmu_online_cpu()
1942 if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) in arm_cmn_pmu_online_cpu()
1955 if (cpu != cmn->cpu) in arm_cmn_pmu_offline_cpu()
1958 node = dev_to_node(cmn->dev); in arm_cmn_pmu_offline_cpu()
1975 u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR); in arm_cmn_handle_irq()
1982 if (WARN_ON(!dtc->counters[i])) in arm_cmn_handle_irq()
1985 local64_add(delta, &dtc->counters[i]->count); in arm_cmn_handle_irq()
1991 if (dtc->cc_active && !WARN_ON(!dtc->cycles)) { in arm_cmn_handle_irq()
1993 local64_add(delta, &dtc->cycles->count); in arm_cmn_handle_irq()
1997 writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR); in arm_cmn_handle_irq()
1999 if (!dtc->irq_friend) in arm_cmn_handle_irq()
2001 dtc += dtc->irq_friend; in arm_cmn_handle_irq()
2010 for (i = 0; i < cmn->num_dtcs; i++) { in arm_cmn_init_irqs()
2011 irq = cmn->dtc[i].irq; in arm_cmn_init_irqs()
2012 for (j = i; j--; ) { in arm_cmn_init_irqs()
2013 if (cmn->dtc[j].irq == irq) { in arm_cmn_init_irqs()
2014 cmn->dtc[j].irq_friend = i - j; in arm_cmn_init_irqs()
2018 err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq, in arm_cmn_init_irqs()
2020 dev_name(cmn->dev), &cmn->dtc[i]); in arm_cmn_init_irqs()
2024 err = irq_set_affinity(irq, cpumask_of(cmn->cpu)); in arm_cmn_init_irqs()
2037 dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); in arm_cmn_init_dtm()
2038 dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; in arm_cmn_init_dtm()
2039 writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); in arm_cmn_init_dtm()
2041 dtm->wp_event[i] = -1; in arm_cmn_init_dtm()
2042 writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); in arm_cmn_init_dtm()
2043 writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i)); in arm_cmn_init_dtm()
2049 struct arm_cmn_dtc *dtc = cmn->dtc + idx; in arm_cmn_init_dtc()
2051 dtc->base = dn->pmu_base - CMN_PMU_OFFSET; in arm_cmn_init_dtc()
2052 dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); in arm_cmn_init_dtc()
2053 if (dtc->irq < 0) in arm_cmn_init_dtc()
2054 return dtc->irq; in arm_cmn_init_dtc()
2056 writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); in arm_cmn_init_dtc()
2057 writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); in arm_cmn_init_dtc()
2058 writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); in arm_cmn_init_dtc()
2059 writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); in arm_cmn_init_dtc()
2069 cmp = dna->type - dnb->type; in arm_cmn_node_cmp()
2071 cmp = dna->logid - dnb->logid; in arm_cmn_node_cmp()
2080 cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); in arm_cmn_init_dtcs()
2081 if (!cmn->dtc) in arm_cmn_init_dtcs()
2082 return -ENOMEM; in arm_cmn_init_dtcs()
2084 sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL); in arm_cmn_init_dtcs()
2086 cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); in arm_cmn_init_dtcs()
2088 if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) { in arm_cmn_init_dtcs()
2091 for (int i = 0; i < cmn->num_dtcs; i++) in arm_cmn_init_dtcs()
2092 arm_cmn_node_to_xp(cmn, dn + i)->dtc = i; in arm_cmn_init_dtcs()
2095 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_init_dtcs()
2096 if (dn->type == CMN_TYPE_XP) in arm_cmn_init_dtcs()
2100 dn->dtc = xp->dtc; in arm_cmn_init_dtcs()
2101 dn->dtm = xp->dtm; in arm_cmn_init_dtcs()
2102 if (cmn->multi_dtm) in arm_cmn_init_dtcs()
2103 dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; in arm_cmn_init_dtcs()
2105 if (dn->type == CMN_TYPE_DTC) { in arm_cmn_init_dtcs()
2112 /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ in arm_cmn_init_dtcs()
2113 if (dn->type == CMN_TYPE_RND) in arm_cmn_init_dtcs()
2114 dn->type = CMN_TYPE_RNI; in arm_cmn_init_dtcs()
2116 /* We split the RN-I off already, so let the CCLA part match CCLA events */ in arm_cmn_init_dtcs()
2117 if (dn->type == CMN_TYPE_CCLA_RNI) in arm_cmn_init_dtcs()
2118 dn->type = CMN_TYPE_CCLA; in arm_cmn_init_dtcs()
2130 if (cmn->part == PART_CMN650 || cmn->part == PART_CI700) in arm_cmn_dtc_domain()
2139 u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO); in arm_cmn_init_node_info()
2141 node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg); in arm_cmn_init_node_info()
2142 node->id = FIELD_GET(CMN_NI_NODE_ID, reg); in arm_cmn_init_node_info()
2143 node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg); in arm_cmn_init_node_info()
2145 node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET; in arm_cmn_init_node_info()
2147 if (node->type == CMN_TYPE_CFG) in arm_cmn_init_node_info()
2149 else if (node->type == CMN_TYPE_XP) in arm_cmn_init_node_info()
2154 dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n", in arm_cmn_init_node_info()
2155 (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ', in arm_cmn_init_node_info()
2156 node->type, node->logid, offset); in arm_cmn_init_node_info()
2185 return -ENODEV; in arm_cmn_discover()
2187 cfg_region = cmn->base + rgn_offset; in arm_cmn_discover()
2192 if (cmn->part && cmn->part != part) in arm_cmn_discover()
2193 dev_warn(cmn->dev, in arm_cmn_discover()
2195 cmn->part, part); in arm_cmn_discover()
2196 cmn->part = part; in arm_cmn_discover()
2198 dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part); in arm_cmn_discover()
2201 cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); in arm_cmn_discover()
2204 cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; in arm_cmn_discover()
2205 cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); in arm_cmn_discover()
2206 cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); in arm_cmn_discover()
2209 cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg); in arm_cmn_discover()
2210 cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg); in arm_cmn_discover()
2216 cmn->num_xps = child_count; in arm_cmn_discover()
2217 cmn->num_dns = cmn->num_xps; in arm_cmn_discover()
2220 for (i = 0; i < cmn->num_xps; i++) { in arm_cmn_discover()
2224 reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO); in arm_cmn_discover()
2225 cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); in arm_cmn_discover()
2231 * bound, account for double the number of non-XP nodes. in arm_cmn_discover()
2233 dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps, in arm_cmn_discover()
2236 return -ENOMEM; in arm_cmn_discover()
2238 /* Initial safe upper bound on DTMs for any possible mesh layout */ in arm_cmn_discover()
2239 i = cmn->num_xps; in arm_cmn_discover()
2240 if (cmn->multi_dtm) in arm_cmn_discover()
2241 i += cmn->num_xps + 1; in arm_cmn_discover()
2242 dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL); in arm_cmn_discover()
2244 return -ENOMEM; in arm_cmn_discover()
2247 cmn->dns = dn; in arm_cmn_discover()
2248 cmn->dtms = dtm; in arm_cmn_discover()
2249 for (i = 0; i < cmn->num_xps; i++) { in arm_cmn_discover()
2250 void __iomem *xp_region = cmn->base + xp_offset[i]; in arm_cmn_discover()
2261 if (xp->id == (1 << 3)) in arm_cmn_discover()
2262 cmn->mesh_x = xp->logid; in arm_cmn_discover()
2264 if (cmn->part == PART_CMN600) in arm_cmn_discover()
2265 xp->dtc = -1; in arm_cmn_discover()
2267 xp->dtc = arm_cmn_dtc_domain(cmn, xp_region); in arm_cmn_discover()
2269 xp->dtm = dtm - cmn->dtms; in arm_cmn_discover()
2276 * with port 2 connected, for the HN-D. in arm_cmn_discover()
2282 if (cmn->multi_dtm && (xp_ports & 0xc)) in arm_cmn_discover()
2284 if (cmn->multi_dtm && (xp_ports & 0x30)) in arm_cmn_discover()
2287 cmn->ports_used |= xp_ports; in arm_cmn_discover()
2297 * we haven't a clue how to power up arbitrary CHI requesters. in arm_cmn_discover()
2298 * As of CMN-600r1 these could only be RN-SAMs or CXLAs, in arm_cmn_discover()
2301 * but they don't go to regular XP DTMs, and they depend on in arm_cmn_discover()
2305 dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); in arm_cmn_discover()
2311 * A child offset of 0 can only occur on CMN-600; otherwise it in arm_cmn_discover()
2315 if (reg == 0 && cmn->part != PART_CMN600) { in arm_cmn_discover()
2316 dev_dbg(cmn->dev, "bogus child pointer?\n"); in arm_cmn_discover()
2322 switch (dn->type) { in arm_cmn_discover()
2324 cmn->num_dtcs++; in arm_cmn_discover()
2361 dn[1].type = arm_cmn_subtype(dn->type); in arm_cmn_discover()
2366 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); in arm_cmn_discover()
2367 return -ENODEV; in arm_cmn_discover()
2373 cmn->num_dns = dn - cmn->dns; in arm_cmn_discover()
2375 /* Cheeky +1 to help terminate pointer-based iteration later */ in arm_cmn_discover()
2376 sz = (void *)(dn + 1) - (void *)cmn->dns; in arm_cmn_discover()
2377 dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); in arm_cmn_discover()
2379 cmn->dns = dn; in arm_cmn_discover()
2381 sz = (void *)dtm - (void *)cmn->dtms; in arm_cmn_discover()
2382 dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL); in arm_cmn_discover()
2384 cmn->dtms = dtm; in arm_cmn_discover()
2390 if (!cmn->mesh_x) in arm_cmn_discover()
2391 cmn->mesh_x = cmn->num_xps; in arm_cmn_discover()
2392 cmn->mesh_y = cmn->num_xps / cmn->mesh_x; in arm_cmn_discover()
2395 if (cmn->num_xps == 1) in arm_cmn_discover()
2396 dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); in arm_cmn_discover()
2398 dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev); in arm_cmn_discover()
2399 reg = cmn->ports_used; in arm_cmn_discover()
2400 dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", in arm_cmn_discover()
2401 cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg, in arm_cmn_discover()
2402 cmn->multi_dtm ? ", multi-DTM" : ""); in arm_cmn_discover()
2413 return -EINVAL; in arm_cmn600_acpi_probe()
2417 return -EINVAL; in arm_cmn600_acpi_probe()
2427 cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg)); in arm_cmn600_acpi_probe()
2428 if (!cmn->base) in arm_cmn600_acpi_probe()
2429 return -ENOMEM; in arm_cmn600_acpi_probe()
2431 return root->start - cfg->start; in arm_cmn600_acpi_probe()
2438 return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode; in arm_cmn600_of_probe()
2448 cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); in arm_cmn_probe()
2450 return -ENOMEM; in arm_cmn_probe()
2452 cmn->dev = &pdev->dev; in arm_cmn_probe()
2453 cmn->part = (unsigned long)device_get_match_data(cmn->dev); in arm_cmn_probe()
2456 if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { in arm_cmn_probe()
2460 cmn->base = devm_platform_ioremap_resource(pdev, 0); in arm_cmn_probe()
2461 if (IS_ERR(cmn->base)) in arm_cmn_probe()
2462 return PTR_ERR(cmn->base); in arm_cmn_probe()
2463 if (cmn->part == PART_CMN600) in arm_cmn_probe()
2464 rootnode = arm_cmn600_of_probe(pdev->dev.of_node); in arm_cmn_probe()
2481 cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); in arm_cmn_probe()
2482 cmn->pmu = (struct pmu) { in arm_cmn_probe()
2501 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id); in arm_cmn_probe()
2503 return -ENOMEM; in arm_cmn_probe()
2505 err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_probe()
2509 err = perf_pmu_register(&cmn->pmu, name, -1); in arm_cmn_probe()
2511 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_probe()
2522 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); in arm_cmn_remove()
2524 perf_pmu_unregister(&cmn->pmu); in arm_cmn_remove()
2525 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_remove()
2526 debugfs_remove(cmn->debug); in arm_cmn_remove()
2532 { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
2533 { .compatible = "arm,cmn-650" },
2534 { .compatible = "arm,cmn-700" },
2535 { .compatible = "arm,ci-700" },
2553 .name = "arm-cmn",
2573 arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL); in arm_cmn_init()
2594 MODULE_DESCRIPTION("Arm CMN-600 PMU driver");