Lines Matching +full:two +full:- +full:lane
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2021-2023 Alibaba Inc.
27 * Event Counter Data Select includes two parts:
28 * - 27-24: Group number(4-bit: 0..0x7)
29 * - 23-16: Event number(8-bit: 0..0x13) within the Group
66 #define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
67 #define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
68 #define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
119 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); in cpumask_show()
138 PMU_FORMAT_ATTR(eventid, "config:0-15");
139 PMU_FORMAT_ATTR(type, "config:16-19");
140 PMU_FORMAT_ATTR(lane, "config:20-27");
158 u8 lane; member
168 if (eattr->type == DWC_PCIE_LANE_EVENT) in dwc_pcie_event_show()
169 return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", in dwc_pcie_event_show()
170 eattr->eventid, eattr->type); in dwc_pcie_event_show()
171 else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) in dwc_pcie_event_show()
173 eattr->eventid, eattr->type); in dwc_pcie_event_show()
183 .lane = _lane, \
211 * Leave it to the user to specify the lane ID to avoid generating
261 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_lane_event_enable()
262 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_lane_event_enable()
277 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_time_based_event_enable()
278 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_time_based_event_enable()
287 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_read_lane_event_counter()
288 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_read_lane_event_counter()
289 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_read_lane_event_counter()
299 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_read_time_based_counter()
300 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_read_time_based_counter()
302 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_read_time_based_counter()
307 * The 64-bit value of the data counter is spread across two in dwc_pcie_pmu_read_time_based_counter()
328 * The Group#1 event measures the amount of data processed in 16-byte in dwc_pcie_pmu_read_time_based_counter()
329 * units. Simplify the end-user interface by multiplying the counter in dwc_pcie_pmu_read_time_based_counter()
340 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_update()
345 prev = local64_read(&hwc->prev_count); in dwc_pcie_pmu_event_update()
352 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); in dwc_pcie_pmu_event_update()
354 delta = (now - prev) & DWC_PCIE_MAX_PERIOD; in dwc_pcie_pmu_event_update()
355 /* 32-bit counter for Lane Event Counting */ in dwc_pcie_pmu_event_update()
359 local64_add(delta, &event->count); in dwc_pcie_pmu_event_update()
364 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_init()
367 u32 lane; in dwc_pcie_pmu_event_init() local
369 if (event->attr.type != event->pmu->type) in dwc_pcie_pmu_event_init()
370 return -ENOENT; in dwc_pcie_pmu_event_init()
374 return -EINVAL; in dwc_pcie_pmu_event_init()
377 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) in dwc_pcie_pmu_event_init()
378 return -EINVAL; in dwc_pcie_pmu_event_init()
380 if (event->group_leader != event && in dwc_pcie_pmu_event_init()
381 !is_software_event(event->group_leader)) in dwc_pcie_pmu_event_init()
382 return -EINVAL; in dwc_pcie_pmu_event_init()
384 for_each_sibling_event(sibling, event->group_leader) { in dwc_pcie_pmu_event_init()
385 if (sibling->pmu != event->pmu && !is_software_event(sibling)) in dwc_pcie_pmu_event_init()
386 return -EINVAL; in dwc_pcie_pmu_event_init()
390 return -EINVAL; in dwc_pcie_pmu_event_init()
393 lane = DWC_PCIE_EVENT_LANE(event); in dwc_pcie_pmu_event_init()
394 if (lane < 0 || lane >= pcie_pmu->nr_lanes) in dwc_pcie_pmu_event_init()
395 return -EINVAL; in dwc_pcie_pmu_event_init()
398 event->cpu = pcie_pmu->on_cpu; in dwc_pcie_pmu_event_init()
405 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_start()
406 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_start()
409 hwc->state = 0; in dwc_pcie_pmu_event_start()
410 local64_set(&hwc->prev_count, 0); in dwc_pcie_pmu_event_start()
420 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_stop()
422 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_stop()
424 if (event->hw.state & PERF_HES_STOPPED) in dwc_pcie_pmu_event_stop()
433 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in dwc_pcie_pmu_event_stop()
438 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_add()
439 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_event_add()
440 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_add()
443 int lane = DWC_PCIE_EVENT_LANE(event); in dwc_pcie_pmu_event_add() local
444 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_event_add()
448 if (pcie_pmu->event[type]) in dwc_pcie_pmu_event_add()
449 return -ENOSPC; in dwc_pcie_pmu_event_add()
451 pcie_pmu->event[type] = event; in dwc_pcie_pmu_event_add()
452 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in dwc_pcie_pmu_event_add()
457 FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | in dwc_pcie_pmu_event_add()
486 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_del()
491 pcie_pmu->event[type] = NULL; in dwc_pcie_pmu_event_del()
508 if (dev_info->pdev == pdev) in dwc_pcie_find_dev_info()
518 perf_pmu_unregister(&pcie_pmu->pmu); in dwc_pcie_unregister_pmu()
530 for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { in dwc_pcie_match_des_cap()
531 vsec = pci_find_vsec_capability(pdev, vid->vendor_id, in dwc_pcie_match_des_cap()
544 "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); in dwc_pcie_match_des_cap()
550 platform_device_unregister(dev_info->plat_dev); in dwc_pcie_unregister_dev()
551 list_del(&dev_info->dev_node); in dwc_pcie_unregister_dev()
561 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); in dwc_pcie_register_dev()
570 return -ENOMEM; in dwc_pcie_register_dev()
573 dev_info->plat_dev = plat_dev; in dwc_pcie_register_dev()
574 dev_info->pdev = pdev; in dwc_pcie_register_dev()
575 list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); in dwc_pcie_register_dev()
611 struct pci_dev *pdev = plat_dev->dev.platform_data; in dwc_pcie_pmu_probe()
618 vsec = pci_find_vsec_capability(pdev, pdev->vendor, in dwc_pcie_pmu_probe()
621 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); in dwc_pcie_pmu_probe()
622 name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); in dwc_pcie_pmu_probe()
624 return -ENOMEM; in dwc_pcie_pmu_probe()
626 pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); in dwc_pcie_pmu_probe()
628 return -ENOMEM; in dwc_pcie_pmu_probe()
630 pcie_pmu->pdev = pdev; in dwc_pcie_pmu_probe()
631 pcie_pmu->ras_des_offset = vsec; in dwc_pcie_pmu_probe()
632 pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); in dwc_pcie_pmu_probe()
633 pcie_pmu->on_cpu = -1; in dwc_pcie_pmu_probe()
634 pcie_pmu->pmu = (struct pmu){ in dwc_pcie_pmu_probe()
636 .parent = &pdev->dev, in dwc_pcie_pmu_probe()
651 &pcie_pmu->cpuhp_node); in dwc_pcie_pmu_probe()
658 ret = devm_add_action_or_reset(&plat_dev->dev, in dwc_pcie_pmu_probe()
660 &pcie_pmu->cpuhp_node); in dwc_pcie_pmu_probe()
664 ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); in dwc_pcie_pmu_probe()
669 ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, in dwc_pcie_pmu_probe()
682 if (pcie_pmu->on_cpu == -1) in dwc_pcie_pmu_online_cpu()
683 pcie_pmu->on_cpu = cpumask_local_spread( in dwc_pcie_pmu_online_cpu()
684 0, dev_to_node(&pcie_pmu->pdev->dev)); in dwc_pcie_pmu_online_cpu()
699 if (cpu != pcie_pmu->on_cpu) in dwc_pcie_pmu_offline_cpu()
702 pcie_pmu->on_cpu = -1; in dwc_pcie_pmu_offline_cpu()
703 pdev = pcie_pmu->pdev; in dwc_pcie_pmu_offline_cpu()
704 node = dev_to_node(&pdev->dev); in dwc_pcie_pmu_offline_cpu()
717 perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); in dwc_pcie_pmu_offline_cpu()
718 pcie_pmu->on_cpu = target; in dwc_pcie_pmu_offline_cpu()
747 return -ENODEV; in dwc_pcie_pmu_init()