Lines Matching +full:rpm +full:- +full:stats

1 // SPDX-License-Identifier: GPL-2.0
24 #define DRV_NAME "Marvell-CGX/RPM"
25 #define DRV_STRING "Marvell CGX/RPM Driver"
90 return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || in is_dev_rpm()
91 (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); in is_dev_rpm()
96 if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) in is_lmac_valid()
98 return test_bit(lmac_id, &cgx->lmac_bmap); in is_lmac_valid()
108 for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in get_sequence_id_of_lmac()
122 return ((struct cgx *)cgxd)->mac_ops; in get_mac_ops()
127 return ((struct cgx *)cgxd)->fifo_len; in cgx_get_fifo_len()
132 writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_write()
138 return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_read()
144 if (!cgx || lmac_id >= cgx->max_lmac_per_mac) in lmac_pdata()
147 return cgx->lmac_idmap[lmac_id]; in lmac_pdata()
153 int idmax = -ENODEV; in cgx_get_cgxcnt_max()
156 if (cgx_dev->cgx_id > idmax) in cgx_get_cgxcnt_max()
157 idmax = cgx_dev->cgx_id; in cgx_get_cgxcnt_max()
170 return -ENODEV; in cgx_get_lmac_cnt()
172 return cgx->lmac_count; in cgx_get_lmac_cnt()
180 if (cgx_dev->cgx_id == cgx_id) in cgx_get_pdata()
212 return -EINVAL; in cgx_get_cgxid()
214 return cgx->cgx_id; in cgx_get_cgxid()
233 if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) in cgx_get_nix_resetbit()
236 first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_get_nix_resetbit()
237 p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); in cgx_get_nix_resetbit()
256 return -ENODEV; in cgx_get_link_info()
258 *linfo = lmac->link_info; in cgx_get_link_info()
271 return -ENODEV; in cgx_lmac_addr_set()
274 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_set()
283 index = id * lmac->mac_to_index_bmap.max; in cgx_lmac_addr_set()
306 mac_ops = cgx->mac_ops; in cgx_read_dmac_ctrl()
320 mac_ops = cgx->mac_ops; in cgx_read_dmac_entry()
334 return -ENODEV; in cgx_lmac_addr_add()
336 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_add()
338 idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); in cgx_lmac_addr_add()
344 index = id * lmac->mac_to_index_bmap.max + idx; in cgx_lmac_addr_add()
357 lmac->mcast_filters_count++; in cgx_lmac_addr_add()
358 } else if (!lmac->mcast_filters_count) { in cgx_lmac_addr_add()
376 return -ENODEV; in cgx_lmac_addr_reset()
378 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_reset()
382 set_bit(0, lmac->mac_to_index_bmap.bmap); in cgx_lmac_addr_reset()
386 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_reset()
412 return -ENODEV; in cgx_lmac_addr_update()
414 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_update()
416 if (index >= lmac->mac_to_index_bmap.max) in cgx_lmac_addr_update()
417 return -EINVAL; in cgx_lmac_addr_update()
420 if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) in cgx_lmac_addr_update()
421 return -EINVAL; in cgx_lmac_addr_update()
425 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_update()
445 return -ENODEV; in cgx_lmac_addr_del()
447 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_del()
449 if (index >= lmac->mac_to_index_bmap.max) in cgx_lmac_addr_del()
450 return -EINVAL; in cgx_lmac_addr_del()
456 rvu_free_rsrc(&lmac->mac_to_index_bmap, index); in cgx_lmac_addr_del()
460 index = id * lmac->mac_to_index_bmap.max + index; in cgx_lmac_addr_del()
467 lmac->mcast_filters_count--; in cgx_lmac_addr_del()
469 if (!lmac->mcast_filters_count) { in cgx_lmac_addr_del()
487 return lmac->mac_to_index_bmap.max; in cgx_lmac_addr_max_entries_get()
501 mac_ops = cgx_dev->mac_ops; in cgx_lmac_addr_get()
505 index = id * lmac->mac_to_index_bmap.max; in cgx_lmac_addr_get()
516 return -ENODEV; in cgx_set_pkind()
518 cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); in cgx_set_pkind()
537 fifo_len = cgx->fifo_len; in cgx_get_lmac_fifo_len()
538 num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); in cgx_get_lmac_fifo_len()
565 return -ENODEV; in cgx_lmac_internal_loopback()
568 if (lmac->lmac_type == LMAC_MODE_SGMII || in cgx_lmac_internal_loopback()
569 lmac->lmac_type == LMAC_MODE_QSGMII) { in cgx_lmac_internal_loopback()
600 max_dmac = lmac->mac_to_index_bmap.max; in cgx_lmac_promisc_config()
603 mac_ops = cgx->mac_ops; in cgx_lmac_promisc_config()
649 return -ENODEV; in cgx_lmac_get_pause_frm_status()
676 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) in cgx_lmac_enadis_rx_pause_fwding()
719 return -ENODEV; in cgx_get_rx_stats()
721 /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ in cgx_get_rx_stats()
734 return -ENODEV; in cgx_get_tx_stats()
741 return ((struct cgx *)cgxd)->hw_features; in cgx_features_get()
750 return -ENODEV; in cgx_stats_reset()
754 /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ in cgx_stats_reset()
770 if (!linfo->fec) in cgx_set_fec_stats_count()
773 switch (linfo->lmac_type_id) { in cgx_set_fec_stats_count()
787 if (linfo->fec == OTX2_FEC_BASER) in cgx_set_fec_stats_count()
798 int stats, fec_stats_count = 0; in cgx_get_fec_stats() local
803 return -ENODEV; in cgx_get_fec_stats()
805 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) in cgx_get_fec_stats()
809 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); in cgx_get_fec_stats()
810 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { in cgx_get_fec_stats()
817 for (stats = 0; stats < fec_stats_count; stats++) { in cgx_get_fec_stats()
818 rsp->fec_corr_blks += in cgx_get_fec_stats()
819 cgx_read(cgx, lmac_id, corr_reg + (stats * 8)); in cgx_get_fec_stats()
820 rsp->fec_uncorr_blks += in cgx_get_fec_stats()
821 cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8)); in cgx_get_fec_stats()
832 return -ENODEV; in cgx_lmac_rx_tx_enable()
849 return -ENODEV; in cgx_lmac_tx_enable()
873 return -ENODEV; in cgx_lmac_enadis_pause_frm()
961 return -ENODEV; in verify_lmac_fc_cfg()
964 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
966 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
969 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
971 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); in verify_lmac_fc_cfg()
974 if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) { in verify_lmac_fc_cfg()
975 dev_warn(&cgx->pdev->dev, in verify_lmac_fc_cfg()
977 return -EPERM; in verify_lmac_fc_cfg()
980 if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) { in verify_lmac_fc_cfg()
981 dev_warn(&cgx->pdev->dev, in verify_lmac_fc_cfg()
983 return -EPERM; in verify_lmac_fc_cfg()
996 return -ENODEV; in cgx_lmac_pfc_config()
1026 cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id); in cgx_lmac_pfc_config()
1039 return -ENODEV; in cgx_lmac_get_pfc_frm_cfg()
1081 struct cgx *cgx = lmac->cgx; in cgx_fwi_cmd_send()
1087 err = mutex_lock_interruptible(&lmac->cmd_lock); in cgx_fwi_cmd_send()
1092 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); in cgx_fwi_cmd_send()
1094 err = -EBUSY; in cgx_fwi_cmd_send()
1102 lmac->cmd_pend = true; in cgx_fwi_cmd_send()
1105 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); in cgx_fwi_cmd_send()
1108 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, in cgx_fwi_cmd_send()
1110 dev = &cgx->pdev->dev; in cgx_fwi_cmd_send()
1112 cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req)); in cgx_fwi_cmd_send()
1119 *resp = lmac->resp; in cgx_fwi_cmd_send()
1122 mutex_unlock(&lmac->cmd_lock); in cgx_fwi_cmd_send()
1134 return -ENODEV; in cgx_fwi_cmd_generic()
1141 return -EIO; in cgx_fwi_cmd_generic()
1188 if (args->duplex == DUPLEX_UNKNOWN) in set_mod_args()
1189 args->duplex = duplex; in set_mod_args()
1190 if (args->speed == SPEED_UNKNOWN) in set_mod_args()
1191 args->speed = speed; in set_mod_args()
1192 if (args->an == AUTONEG_UNKNOWN) in set_mod_args()
1193 args->an = autoneg; in set_mod_args()
1194 args->mode = mode; in set_mod_args()
1195 args->ports = 0; in set_mod_args()
1290 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); in link_status_user_format()
1291 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); in link_status_user_format()
1292 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; in link_status_user_format()
1293 linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); in link_status_user_format()
1294 linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); in link_status_user_format()
1295 linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); in link_status_user_format()
1297 if (linfo->lmac_type_id >= LMAC_MODE_MAX) { in link_status_user_format()
1298 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", in link_status_user_format()
1299 linfo->lmac_type_id, cgx->cgx_id, lmac_id); in link_status_user_format()
1300 strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); in link_status_user_format()
1304 strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], in link_status_user_format()
1305 sizeof(linfo->lmac_type)); in link_status_user_format()
1313 struct cgx *cgx = lmac->cgx; in cgx_link_change_handler()
1318 dev = &cgx->pdev->dev; in cgx_link_change_handler()
1320 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); in cgx_link_change_handler()
1323 event.cgx_id = cgx->cgx_id; in cgx_link_change_handler()
1324 event.lmac_id = lmac->lmac_id; in cgx_link_change_handler()
1327 lmac->link_info = event.link_uinfo; in cgx_link_change_handler()
1328 linfo = &lmac->link_info; in cgx_link_change_handler()
1334 spin_lock(&lmac->event_cb_lock); in cgx_link_change_handler()
1336 if (!lmac->event_cb.notify_link_chg) { in cgx_link_change_handler()
1338 cgx->cgx_id, lmac->lmac_id); in cgx_link_change_handler()
1341 cgx->cgx_id, lmac->lmac_id, err_type); in cgx_link_change_handler()
1344 cgx->cgx_id, lmac->lmac_id, in cgx_link_change_handler()
1345 linfo->link_up ? "UP" : "DOWN", linfo->speed); in cgx_link_change_handler()
1349 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) in cgx_link_change_handler()
1352 spin_unlock(&lmac->event_cb_lock); in cgx_link_change_handler()
1382 cgx = lmac->cgx; in cgx_fwi_event_handler()
1384 /* Clear SW_INT for RPM and CMR_INT for CGX */ in cgx_fwi_event_handler()
1385 offset = cgx->mac_ops->int_register; in cgx_fwi_event_handler()
1386 clear_bit = cgx->mac_ops->int_ena_bit; in cgx_fwi_event_handler()
1388 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); in cgx_fwi_event_handler()
1398 lmac->resp = event; in cgx_fwi_event_handler()
1409 lmac->cmd_pend = false; in cgx_fwi_event_handler()
1410 wake_up(&lmac->wq_cmd_cmplt); in cgx_fwi_event_handler()
1422 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); in cgx_fwi_event_handler()
1423 cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); in cgx_fwi_event_handler()
1438 return -ENODEV; in cgx_lmac_evh_register()
1440 lmac->event_cb = *cb; in cgx_lmac_evh_register()
1453 return -ENODEV; in cgx_lmac_evh_unregister()
1455 spin_lock_irqsave(&lmac->event_cb_lock, flags); in cgx_lmac_evh_unregister()
1456 lmac->event_cb.notify_link_chg = NULL; in cgx_lmac_evh_unregister()
1457 lmac->event_cb.data = NULL; in cgx_lmac_evh_unregister()
1458 spin_unlock_irqrestore(&lmac->event_cb_lock, flags); in cgx_lmac_evh_unregister()
1472 return -ENXIO; in cgx_get_fwdata_base()
1474 first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_get_fwdata_base()
1490 return -ENODEV; in cgx_set_link_mode()
1495 return -EINVAL; in cgx_set_link_mode()
1515 return -ENXIO; in cgx_set_fec()
1523 cgx->lmac_idmap[lmac_id]->link_info.fec = in cgx_set_fec()
1525 return cgx->lmac_idmap[lmac_id]->link_info.fec; in cgx_set_fec()
1534 return -ENODEV; in cgx_get_phy_fec_stats()
1563 int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); in cgx_fwi_read_version()
1572 struct device *dev = &cgx->pdev->dev; in cgx_lmac_verify_fwi_version()
1577 if (!cgx->lmac_count) in cgx_lmac_verify_fwi_version()
1589 return -EIO; in cgx_lmac_verify_fwi_version()
1597 struct device *dev = &cgx->pdev->dev; in cgx_lmac_linkup_work()
1601 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in cgx_lmac_linkup_work()
1605 cgx->cgx_id, i); in cgx_lmac_linkup_work()
1614 return -ENODEV; in cgx_lmac_linkup_start()
1616 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); in cgx_lmac_linkup_start()
1627 return -ENODEV; in cgx_lmac_reset()
1641 struct mac_ops *mac_ops = cgx->mac_ops; in cgx_configure_interrupt()
1646 irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + in cgx_configure_interrupt()
1647 cnt * mac_ops->irq_offset); in cgx_configure_interrupt()
1648 offset = mac_ops->int_set_reg; in cgx_configure_interrupt()
1649 ena_bit = mac_ops->int_ena_bit; in cgx_configure_interrupt()
1656 err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); in cgx_configure_interrupt()
1661 cgx_write(cgx, lmac->lmac_id, offset, ena_bit); in cgx_configure_interrupt()
1676 return cgx->lmac_idmap[lmac_index]->lmac_id; in cgx_get_lmacid()
1683 return cgx->lmac_bmap; in cgx_get_lmac_bmap()
1695 if (cgx->mac_ops->non_contiguous_serdes_lane) { in cgx_lmac_init()
1704 if (cgx->lmac_count > cgx->max_lmac_per_mac) in cgx_lmac_init()
1705 cgx->lmac_count = cgx->max_lmac_per_mac; in cgx_lmac_init()
1707 for (i = 0; i < cgx->lmac_count; i++) { in cgx_lmac_init()
1710 return -ENOMEM; in cgx_lmac_init()
1711 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); in cgx_lmac_init()
1712 if (!lmac->name) { in cgx_lmac_init()
1713 err = -ENOMEM; in cgx_lmac_init()
1716 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); in cgx_lmac_init()
1717 if (cgx->mac_ops->non_contiguous_serdes_lane) { in cgx_lmac_init()
1718 lmac->lmac_id = __ffs64(lmac_list); in cgx_lmac_init()
1719 lmac_list &= ~BIT_ULL(lmac->lmac_id); in cgx_lmac_init()
1721 lmac->lmac_id = i; in cgx_lmac_init()
1724 lmac->cgx = cgx; in cgx_lmac_init()
1725 lmac->mac_to_index_bmap.max = in cgx_lmac_init()
1726 cgx->mac_ops->dmac_filter_count / in cgx_lmac_init()
1727 cgx->lmac_count; in cgx_lmac_init()
1729 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); in cgx_lmac_init()
1734 set_bit(0, lmac->mac_to_index_bmap.bmap); in cgx_lmac_init()
1736 lmac->rx_fc_pfvf_bmap.max = 128; in cgx_lmac_init()
1737 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap); in cgx_lmac_init()
1741 lmac->tx_fc_pfvf_bmap.max = 128; in cgx_lmac_init()
1742 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap); in cgx_lmac_init()
1746 init_waitqueue_head(&lmac->wq_cmd_cmplt); in cgx_lmac_init()
1747 mutex_init(&lmac->cmd_lock); in cgx_lmac_init()
1748 spin_lock_init(&lmac->event_cb_lock); in cgx_lmac_init()
1749 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); in cgx_lmac_init()
1754 cgx->lmac_idmap[lmac->lmac_id] = lmac; in cgx_lmac_init()
1755 set_bit(lmac->lmac_id, &cgx->lmac_bmap); in cgx_lmac_init()
1756 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); in cgx_lmac_init()
1757 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); in cgx_lmac_init()
1761 cgx->mac_ops->mac_x2p_reset(cgx, true); in cgx_lmac_init()
1765 rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap); in cgx_lmac_init()
1767 rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap); in cgx_lmac_init()
1769 rvu_free_bitmap(&lmac->mac_to_index_bmap); in cgx_lmac_init()
1771 kfree(lmac->name); in cgx_lmac_init()
1782 if (cgx->cgx_cmd_workq) { in cgx_lmac_exit()
1783 destroy_workqueue(cgx->cgx_cmd_workq); in cgx_lmac_exit()
1784 cgx->cgx_cmd_workq = NULL; in cgx_lmac_exit()
1788 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { in cgx_lmac_exit()
1789 lmac = cgx->lmac_idmap[i]; in cgx_lmac_exit()
1792 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); in cgx_lmac_exit()
1793 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); in cgx_lmac_exit()
1794 kfree(lmac->mac_to_index_bmap.bmap); in cgx_lmac_exit()
1795 kfree(lmac->name); in cgx_lmac_exit()
1807 cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); in cgx_populate_features()
1808 cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); in cgx_populate_features()
1811 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | in cgx_populate_features()
1814 cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | in cgx_populate_features()
1820 if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || in cgx_get_rxid_mapoffset()
1834 for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) in cgx_x2p_reset()
1835 cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); in cgx_x2p_reset()
1855 return -ENODEV; in cgx_enadis_rx()
1903 struct device *dev = &pdev->dev; in cgx_probe()
1909 return -ENOMEM; in cgx_probe()
1910 cgx->pdev = pdev; in cgx_probe()
1916 cgx->mac_ops = rpm_get_mac_ops(cgx); in cgx_probe()
1918 cgx->mac_ops = &cgx_mac_ops; in cgx_probe()
1920 cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); in cgx_probe()
1936 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); in cgx_probe()
1937 if (!cgx->reg_base) { in cgx_probe()
1939 err = -ENOMEM; in cgx_probe()
1943 cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); in cgx_probe()
1944 if (!cgx->lmac_count) { in cgx_probe()
1945 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); in cgx_probe()
1946 err = -EOPNOTSUPP; in cgx_probe()
1950 nvec = pci_msix_vec_count(cgx->pdev); in cgx_probe()
1958 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) in cgx_probe()
1962 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); in cgx_probe()
1963 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); in cgx_probe()
1964 if (!cgx->cgx_cmd_workq) { in cgx_probe()
1966 err = -ENOMEM; in cgx_probe()
1970 list_add(&cgx->cgx_list, &cgx_list); in cgx_probe()
1975 mutex_init(&cgx->lock); in cgx_probe()
1985 list_del(&cgx->cgx_list); in cgx_probe()
2002 list_del(&cgx->cgx_list); in cgx_remove()