Lines Matching +full:75 +full:- +full:ec

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2014-2025 Broadcom
23 #include <linux/dma-mapping.h>
49 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
51 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
57 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
60 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
63 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
72 * peripheral registers for CPU-native byte order. in bcmgenet_writel()
102 * the platform is explicitly configured for 64-bits/LPAE. in dmadesc_set_addr()
148 return bcmgenet_readl(priv->base + in bcmgenet_tbuf_ctrl_get()
149 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_get()
157 bcmgenet_writel(val, priv->base + in bcmgenet_tbuf_ctrl_set()
158 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_set()
166 return bcmgenet_readl(priv->base + in bcmgenet_bp_mc_get()
167 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_get()
175 bcmgenet_writel(val, priv->base + in bcmgenet_bp_mc_set()
176 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_set()
318 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_readl()
325 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_writel()
332 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_readl()
339 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_writel()
370 /* GENET v4 supports 40-bits pointer addressing
410 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_readl()
419 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_writel()
428 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_readl()
437 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_writel()
520 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4); in bcmgenet_hfb_set_filter_length()
535 size--; in bcmgenet_hfb_validate_mask()
538 return -EINVAL; in bcmgenet_hfb_validate_mask()
554 index = f_index * priv->hw_params->hfb_filter_size + offset / 2; in bcmgenet_hfb_insert_data()
557 while (size--) { in bcmgenet_hfb_insert_data()
601 struct ethtool_rx_flow_spec *fs = &rule->fs; in bcmgenet_hfb_create_rxnfc_filter()
608 f = fs->location + 1; in bcmgenet_hfb_create_rxnfc_filter()
609 if (fs->flow_type & FLOW_MAC_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
611 &fs->h_ext.h_dest, &fs->m_ext.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
612 sizeof(fs->h_ext.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
615 if (fs->flow_type & FLOW_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
616 if (fs->m_ext.vlan_etype || in bcmgenet_hfb_create_rxnfc_filter()
617 fs->m_ext.vlan_tci) { in bcmgenet_hfb_create_rxnfc_filter()
619 &fs->h_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
620 &fs->m_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
621 sizeof(fs->h_ext.vlan_etype)); in bcmgenet_hfb_create_rxnfc_filter()
623 &fs->h_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
624 &fs->m_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
625 sizeof(fs->h_ext.vlan_tci)); in bcmgenet_hfb_create_rxnfc_filter()
631 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_hfb_create_rxnfc_filter()
635 &fs->h_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
636 &fs->m_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
637 sizeof(fs->h_u.ether_spec.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
639 &fs->h_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
640 &fs->m_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
641 sizeof(fs->h_u.ether_spec.h_source)); in bcmgenet_hfb_create_rxnfc_filter()
643 &fs->h_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
644 &fs->m_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
645 sizeof(fs->h_u.ether_spec.h_proto)); in bcmgenet_hfb_create_rxnfc_filter()
655 &fs->h_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
656 &fs->m_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
657 sizeof(fs->h_u.usr_ip4_spec.tos)); in bcmgenet_hfb_create_rxnfc_filter()
659 &fs->h_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
660 &fs->m_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
661 sizeof(fs->h_u.usr_ip4_spec.proto)); in bcmgenet_hfb_create_rxnfc_filter()
663 &fs->h_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
664 &fs->m_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
665 sizeof(fs->h_u.usr_ip4_spec.ip4src)); in bcmgenet_hfb_create_rxnfc_filter()
667 &fs->h_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
668 &fs->m_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
669 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); in bcmgenet_hfb_create_rxnfc_filter()
670 if (!fs->m_u.usr_ip4_spec.l4_4_bytes) in bcmgenet_hfb_create_rxnfc_filter()
679 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); in bcmgenet_hfb_create_rxnfc_filter()
682 &fs->h_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
683 &fs->m_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
690 if (fs->ring_cookie == RX_CLS_FLOW_WAKE) in bcmgenet_hfb_create_rxnfc_filter()
692 else if (fs->ring_cookie == RX_CLS_FLOW_DISC) in bcmgenet_hfb_create_rxnfc_filter()
693 q = priv->hw_params->rx_queues + 1; in bcmgenet_hfb_create_rxnfc_filter()
696 q = fs->ring_cookie; in bcmgenet_hfb_create_rxnfc_filter()
699 rule->state = BCMGENET_RXNFC_STATE_ENABLED; in bcmgenet_hfb_create_rxnfc_filter()
711 base = f_index * priv->hw_params->hfb_filter_size; in bcmgenet_hfb_clear_filter()
712 for (i = 0; i < priv->hw_params->hfb_filter_size; i++) in bcmgenet_hfb_clear_filter()
731 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) in bcmgenet_hfb_clear()
743 INIT_LIST_HEAD(&priv->rxnfc_list); in bcmgenet_hfb_init()
745 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); in bcmgenet_hfb_init()
746 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_hfb_init()
757 return clk_prepare_enable(priv->clk); in bcmgenet_begin()
765 clk_disable_unprepare(priv->clk); in bcmgenet_complete()
772 return -EINVAL; in bcmgenet_get_link_ksettings()
774 if (!dev->phydev) in bcmgenet_get_link_ksettings()
775 return -ENODEV; in bcmgenet_get_link_ksettings()
777 phy_ethtool_ksettings_get(dev->phydev, cmd); in bcmgenet_get_link_ksettings()
786 return -EINVAL; in bcmgenet_set_link_ksettings()
788 if (!dev->phydev) in bcmgenet_set_link_ksettings()
789 return -ENODEV; in bcmgenet_set_link_ksettings()
791 return phy_ethtool_ksettings_set(dev->phydev, cmd); in bcmgenet_set_link_ksettings()
801 ret = clk_prepare_enable(priv->clk); in bcmgenet_set_features()
807 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); in bcmgenet_set_features()
809 clk_disable_unprepare(priv->clk); in bcmgenet_set_features()
818 return priv->msg_enable; in bcmgenet_get_msglevel()
825 priv->msg_enable = level; in bcmgenet_set_msglevel()
829 struct ethtool_coalesce *ec, in bcmgenet_get_coalesce() argument
837 ec->tx_max_coalesced_frames = in bcmgenet_get_coalesce()
839 ec->rx_max_coalesced_frames = in bcmgenet_get_coalesce()
841 ec->rx_coalesce_usecs = in bcmgenet_get_coalesce()
844 for (i = 0; i <= priv->hw_params->rx_queues; i++) { in bcmgenet_get_coalesce()
845 ring = &priv->rx_rings[i]; in bcmgenet_get_coalesce()
846 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
855 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_set_rx_coalesce()
856 unsigned int i = ring->index; in bcmgenet_set_rx_coalesce()
868 struct ethtool_coalesce *ec) in bcmgenet_set_ring_rx_coalesce() argument
873 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
874 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
875 usecs = ring->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
876 pkts = ring->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
878 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { in bcmgenet_set_ring_rx_coalesce()
879 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); in bcmgenet_set_ring_rx_coalesce()
884 ring->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcmgenet_set_ring_rx_coalesce()
889 struct ethtool_coalesce *ec, in bcmgenet_set_coalesce() argument
900 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
901 ec->tx_max_coalesced_frames == 0 || in bcmgenet_set_coalesce()
902 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
903 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) in bcmgenet_set_coalesce()
904 return -EINVAL; in bcmgenet_set_coalesce()
906 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) in bcmgenet_set_coalesce()
907 return -EINVAL; in bcmgenet_set_coalesce()
915 * ethtool knob to do coalescing on a per-queue basis in bcmgenet_set_coalesce()
917 for (i = 0; i <= priv->hw_params->tx_queues; i++) in bcmgenet_set_coalesce()
919 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
922 for (i = 0; i <= priv->hw_params->rx_queues; i++) in bcmgenet_set_coalesce()
923 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); in bcmgenet_set_coalesce()
936 epause->autoneg = priv->autoneg_pause; in bcmgenet_get_pauseparam()
941 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
942 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
945 epause->tx_pause = priv->tx_pause; in bcmgenet_get_pauseparam()
946 epause->rx_pause = priv->rx_pause; in bcmgenet_get_pauseparam()
955 if (!dev->phydev) in bcmgenet_set_pauseparam()
956 return -ENODEV; in bcmgenet_set_pauseparam()
958 if (!phy_validate_pause(dev->phydev, epause)) in bcmgenet_set_pauseparam()
959 return -EINVAL; in bcmgenet_set_pauseparam()
961 priv->autoneg_pause = !!epause->autoneg; in bcmgenet_set_pauseparam()
962 priv->tx_pause = !!epause->tx_pause; in bcmgenet_set_pauseparam()
963 priv->rx_pause = !!epause->rx_pause; in bcmgenet_set_pauseparam()
965 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_set_pauseparam()
972 BCMGENET_STAT_NETDEV = -1,
991 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
998 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1010 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1139 strscpy(info->driver, "bcmgenet", sizeof(info->driver)); in bcmgenet_get_drvinfo()
1148 return -EOPNOTSUPP; in bcmgenet_get_sset_count()
1218 switch (s->type) { in bcmgenet_update_mib_counters()
1235 val = bcmgenet_umac_readl(priv, s->reg_offset); in bcmgenet_update_mib_counters()
1239 s->reg_offset); in bcmgenet_update_mib_counters()
1242 s->reg_offset); in bcmgenet_update_mib_counters()
1247 j += s->stat_sizeof; in bcmgenet_update_mib_counters()
1248 p = (char *)priv + s->stat_offset; in bcmgenet_update_mib_counters()
1263 dev->netdev_ops->ndo_get_stats(dev); in bcmgenet_get_ethtool_stats()
1270 if (s->type == BCMGENET_STAT_NETDEV) in bcmgenet_get_ethtool_stats()
1271 p = (char *)&dev->stats; in bcmgenet_get_ethtool_stats()
1274 p += s->stat_offset; in bcmgenet_get_ethtool_stats()
1276 s->stat_sizeof == sizeof(unsigned long)) in bcmgenet_get_ethtool_stats()
1287 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; in bcmgenet_eee_enable_set()
1290 if (enable && !priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1291 clk_prepare_enable(priv->clk_eee); in bcmgenet_eee_enable_set()
1292 priv->clk_eee_enabled = true; in bcmgenet_eee_enable_set()
1303 reg = bcmgenet_readl(priv->base + off); in bcmgenet_eee_enable_set()
1308 bcmgenet_writel(reg, priv->base + off); in bcmgenet_eee_enable_set()
1318 if (!enable && priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1319 clk_disable_unprepare(priv->clk_eee); in bcmgenet_eee_enable_set()
1320 priv->clk_eee_enabled = false; in bcmgenet_eee_enable_set()
1323 priv->eee.eee_enabled = enable; in bcmgenet_eee_enable_set()
1324 priv->eee.tx_lpi_enabled = tx_lpi_enabled; in bcmgenet_eee_enable_set()
1330 struct ethtool_keee *p = &priv->eee; in bcmgenet_get_eee()
1333 return -EOPNOTSUPP; in bcmgenet_get_eee()
1335 if (!dev->phydev) in bcmgenet_get_eee()
1336 return -ENODEV; in bcmgenet_get_eee()
1338 e->tx_lpi_enabled = p->tx_lpi_enabled; in bcmgenet_get_eee()
1339 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); in bcmgenet_get_eee()
1341 return phy_ethtool_get_eee(dev->phydev, e); in bcmgenet_get_eee()
1347 struct ethtool_keee *p = &priv->eee; in bcmgenet_set_eee()
1351 return -EOPNOTSUPP; in bcmgenet_set_eee()
1353 if (!dev->phydev) in bcmgenet_set_eee()
1354 return -ENODEV; in bcmgenet_set_eee()
1356 p->eee_enabled = e->eee_enabled; in bcmgenet_set_eee()
1358 if (!p->eee_enabled) { in bcmgenet_set_eee()
1361 active = phy_init_eee(dev->phydev, false) >= 0; in bcmgenet_set_eee()
1362 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); in bcmgenet_set_eee()
1363 bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); in bcmgenet_set_eee()
1366 return phy_ethtool_set_eee(dev->phydev, e); in bcmgenet_set_eee()
1375 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && in bcmgenet_validate_flow()
1376 cmd->fs.location != RX_CLS_LOC_ANY) { in bcmgenet_validate_flow()
1378 cmd->fs.location); in bcmgenet_validate_flow()
1379 return -EINVAL; in bcmgenet_validate_flow()
1382 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1384 l4_mask = &cmd->fs.m_u.usr_ip4_spec; in bcmgenet_validate_flow()
1386 if (VALIDATE_MASK(l4_mask->ip4src) || in bcmgenet_validate_flow()
1387 VALIDATE_MASK(l4_mask->ip4dst) || in bcmgenet_validate_flow()
1388 VALIDATE_MASK(l4_mask->l4_4_bytes) || in bcmgenet_validate_flow()
1389 VALIDATE_MASK(l4_mask->proto) || in bcmgenet_validate_flow()
1390 VALIDATE_MASK(l4_mask->ip_ver) || in bcmgenet_validate_flow()
1391 VALIDATE_MASK(l4_mask->tos)) { in bcmgenet_validate_flow()
1393 return -EINVAL; in bcmgenet_validate_flow()
1397 eth_mask = &cmd->fs.m_u.ether_spec; in bcmgenet_validate_flow()
1399 if (VALIDATE_MASK(eth_mask->h_dest) || in bcmgenet_validate_flow()
1400 VALIDATE_MASK(eth_mask->h_source) || in bcmgenet_validate_flow()
1401 VALIDATE_MASK(eth_mask->h_proto)) { in bcmgenet_validate_flow()
1403 return -EINVAL; in bcmgenet_validate_flow()
1408 cmd->fs.flow_type); in bcmgenet_validate_flow()
1409 return -EINVAL; in bcmgenet_validate_flow()
1412 if ((cmd->fs.flow_type & FLOW_EXT)) { in bcmgenet_validate_flow()
1414 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || in bcmgenet_validate_flow()
1415 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { in bcmgenet_validate_flow()
1417 return -EINVAL; in bcmgenet_validate_flow()
1419 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { in bcmgenet_validate_flow()
1420 netdev_err(dev, "rxnfc: user-def not supported\n"); in bcmgenet_validate_flow()
1421 return -EINVAL; in bcmgenet_validate_flow()
1425 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1427 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { in bcmgenet_validate_flow()
1429 return -EINVAL; in bcmgenet_validate_flow()
1443 if (priv->hw_params->hfb_filter_size < 128) { in bcmgenet_insert_flow()
1445 return -EINVAL; in bcmgenet_insert_flow()
1448 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && in bcmgenet_insert_flow()
1449 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE && in bcmgenet_insert_flow()
1450 cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) { in bcmgenet_insert_flow()
1452 cmd->fs.ring_cookie); in bcmgenet_insert_flow()
1453 return -EINVAL; in bcmgenet_insert_flow()
1460 if (cmd->fs.location == RX_CLS_LOC_ANY) { in bcmgenet_insert_flow()
1461 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { in bcmgenet_insert_flow()
1462 cmd->fs.location = loc_rule->fs.location; in bcmgenet_insert_flow()
1463 err = memcmp(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1470 loc_rule = &priv->rxnfc_rules[i]; in bcmgenet_insert_flow()
1471 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1472 cmd->fs.location = i; in bcmgenet_insert_flow()
1477 cmd->fs.location = RX_CLS_LOC_ANY; in bcmgenet_insert_flow()
1478 return -ENOSPC; in bcmgenet_insert_flow()
1481 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_insert_flow()
1483 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_insert_flow()
1484 bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); in bcmgenet_insert_flow()
1485 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1486 list_del(&loc_rule->list); in bcmgenet_insert_flow()
1487 bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); in bcmgenet_insert_flow()
1489 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_insert_flow()
1490 memcpy(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1495 list_add_tail(&loc_rule->list, &priv->rxnfc_list); in bcmgenet_insert_flow()
1507 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) in bcmgenet_delete_flow()
1508 return -EINVAL; in bcmgenet_delete_flow()
1510 rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_delete_flow()
1511 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1512 err = -ENOENT; in bcmgenet_delete_flow()
1516 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_delete_flow()
1517 bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); in bcmgenet_delete_flow()
1518 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1519 list_del(&rule->list); in bcmgenet_delete_flow()
1520 bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); in bcmgenet_delete_flow()
1522 rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_delete_flow()
1523 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); in bcmgenet_delete_flow()
1534 switch (cmd->cmd) { in bcmgenet_set_rxnfc()
1542 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", in bcmgenet_set_rxnfc()
1543 cmd->cmd); in bcmgenet_set_rxnfc()
1544 return -EINVAL; in bcmgenet_set_rxnfc()
1558 return -EINVAL; in bcmgenet_get_flow()
1560 rule = &priv->rxnfc_rules[loc]; in bcmgenet_get_flow()
1561 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_get_flow()
1562 err = -ENOENT; in bcmgenet_get_flow()
1564 memcpy(&cmd->fs, &rule->fs, in bcmgenet_get_flow()
1575 list_for_each(pos, &priv->rxnfc_list) in bcmgenet_get_num_flows()
1589 switch (cmd->cmd) { in bcmgenet_get_rxnfc()
1591 cmd->data = priv->hw_params->rx_queues ?: 1; in bcmgenet_get_rxnfc()
1594 cmd->rule_cnt = bcmgenet_get_num_flows(priv); in bcmgenet_get_rxnfc()
1595 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; in bcmgenet_get_rxnfc()
1598 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); in bcmgenet_get_rxnfc()
1601 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_get_rxnfc()
1602 if (i < cmd->rule_cnt) in bcmgenet_get_rxnfc()
1603 rule_locs[i++] = rule->fs.location; in bcmgenet_get_rxnfc()
1604 cmd->rule_cnt = i; in bcmgenet_get_rxnfc()
1605 cmd->data = MAX_NUM_OF_FS_RULES; in bcmgenet_get_rxnfc()
1608 err = -EOPNOTSUPP; in bcmgenet_get_rxnfc()
1654 phy_detach(priv->dev->phydev); in bcmgenet_power_down()
1678 bcmgenet_phy_power_set(priv->dev, false); in bcmgenet_power_down()
1720 bcmgenet_phy_power_set(priv->dev, true); in bcmgenet_power_up()
1745 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb()
1746 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_get_txcb()
1749 if (ring->write_ptr == ring->end_ptr) in bcmgenet_get_txcb()
1750 ring->write_ptr = ring->cb_ptr; in bcmgenet_get_txcb()
1752 ring->write_ptr++; in bcmgenet_get_txcb()
1762 tx_cb_ptr = ring->cbs; in bcmgenet_put_txcb()
1763 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_put_txcb()
1766 if (ring->write_ptr == ring->cb_ptr) in bcmgenet_put_txcb()
1767 ring->write_ptr = ring->end_ptr; in bcmgenet_put_txcb()
1769 ring->write_ptr--; in bcmgenet_put_txcb()
1776 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_disable()
1777 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_disable()
1783 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_enable()
1784 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_enable()
1790 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_enable()
1796 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_disable()
1809 skb = cb->skb; in bcmgenet_free_tx_cb()
1812 cb->skb = NULL; in bcmgenet_free_tx_cb()
1813 if (cb == GENET_CB(skb)->first_cb) in bcmgenet_free_tx_cb()
1823 if (cb == GENET_CB(skb)->last_cb) in bcmgenet_free_tx_cb()
1843 skb = cb->skb; in bcmgenet_free_rx_cb()
1844 cb->skb = NULL; in bcmgenet_free_rx_cb()
1868 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR); in __bcmgenet_tx_reclaim()
1871 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) in __bcmgenet_tx_reclaim()
1873 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; in __bcmgenet_tx_reclaim()
1877 __func__, ring->index, ring->c_index, c_index, txbds_ready); in __bcmgenet_tx_reclaim()
1881 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, in __bcmgenet_tx_reclaim()
1882 &priv->tx_cbs[ring->clean_ptr]); in __bcmgenet_tx_reclaim()
1885 bytes_compl += GENET_CB(skb)->bytes_sent; in __bcmgenet_tx_reclaim()
1890 if (likely(ring->clean_ptr < ring->end_ptr)) in __bcmgenet_tx_reclaim()
1891 ring->clean_ptr++; in __bcmgenet_tx_reclaim()
1893 ring->clean_ptr = ring->cb_ptr; in __bcmgenet_tx_reclaim()
1896 ring->free_bds += txbds_processed; in __bcmgenet_tx_reclaim()
1897 ring->c_index = c_index; in __bcmgenet_tx_reclaim()
1899 ring->packets += pkts_compl; in __bcmgenet_tx_reclaim()
1900 ring->bytes += bytes_compl; in __bcmgenet_tx_reclaim()
1902 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), in __bcmgenet_tx_reclaim()
1913 struct device *kdev = &priv->pdev->dev; in bcmgenet_tx_reclaim()
1918 spin_lock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1922 drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_tx_reclaim()
1924 ring->prod_index = ring->c_index & DMA_C_INDEX_MASK; in bcmgenet_tx_reclaim()
1925 while (drop--) { in bcmgenet_tx_reclaim()
1927 skb = cb_ptr->skb; in bcmgenet_tx_reclaim()
1929 if (skb && cb_ptr == GENET_CB(skb)->first_cb) { in bcmgenet_tx_reclaim()
1936 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_tx_reclaim()
1937 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_tx_reclaim()
1938 wr_ptr = ring->write_ptr * WORDS_PER_BD(priv); in bcmgenet_tx_reclaim()
1939 bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr, in bcmgenet_tx_reclaim()
1942 spin_unlock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1954 spin_lock(&ring->lock); in bcmgenet_tx_poll()
1955 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); in bcmgenet_tx_poll()
1956 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { in bcmgenet_tx_poll()
1957 txq = netdev_get_tx_queue(ring->priv->dev, ring->index); in bcmgenet_tx_poll()
1960 spin_unlock(&ring->lock); in bcmgenet_tx_poll()
1978 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true); in bcmgenet_tx_reclaim_all()
1979 } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev)); in bcmgenet_tx_reclaim_all()
2003 priv->mib.tx_realloc_tsb_failed++; in bcmgenet_add_tsb()
2004 dev->stats.tx_dropped++; in bcmgenet_add_tsb()
2009 priv->mib.tx_realloc_tsb++; in bcmgenet_add_tsb()
2013 status = (struct status_64 *)skb->data; in bcmgenet_add_tsb()
2015 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcmgenet_add_tsb()
2016 ip_ver = skb->protocol; in bcmgenet_add_tsb()
2019 ip_proto = ip_hdr(skb)->protocol; in bcmgenet_add_tsb()
2022 ip_proto = ipv6_hdr(skb)->nexthdr; in bcmgenet_add_tsb()
2030 offset = skb_checksum_start_offset(skb) - sizeof(*status); in bcmgenet_add_tsb()
2032 (offset + skb->csum_offset) | in bcmgenet_add_tsb()
2039 status->tx_csum_info = tx_csum_info; in bcmgenet_add_tsb()
2053 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit()
2073 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
2076 nr_frags = skb_shinfo(skb)->nr_frags; in bcmgenet_xmit()
2078 spin_lock(&ring->lock); in bcmgenet_xmit()
2079 if (ring->free_bds <= (nr_frags + 1)) { in bcmgenet_xmit()
2089 GENET_CB(skb)->bytes_sent = skb->len; in bcmgenet_xmit()
2105 GENET_CB(skb)->first_cb = tx_cb_ptr; in bcmgenet_xmit()
2107 mapping = dma_map_single(kdev, skb->data, size, in bcmgenet_xmit()
2111 frag = &skb_shinfo(skb)->frags[i - 1]; in bcmgenet_xmit()
2119 priv->mib.tx_dma_failed++; in bcmgenet_xmit()
2127 tx_cb_ptr->skb = skb; in bcmgenet_xmit()
2130 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); in bcmgenet_xmit()
2139 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcmgenet_xmit()
2145 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); in bcmgenet_xmit()
2148 GENET_CB(skb)->last_cb = tx_cb_ptr; in bcmgenet_xmit()
2154 ring->free_bds -= nr_frags + 1; in bcmgenet_xmit()
2155 ring->prod_index += nr_frags + 1; in bcmgenet_xmit()
2156 ring->prod_index &= DMA_P_INDEX_MASK; in bcmgenet_xmit()
2158 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); in bcmgenet_xmit()
2160 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) in bcmgenet_xmit()
2165 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_xmit()
2166 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_xmit()
2168 spin_unlock(&ring->lock); in bcmgenet_xmit()
2177 while (i-- > 0) { in bcmgenet_xmit()
2189 struct device *kdev = &priv->pdev->dev; in bcmgenet_rx_refill()
2195 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, in bcmgenet_rx_refill()
2198 priv->mib.alloc_rx_buff_failed++; in bcmgenet_rx_refill()
2199 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2204 /* DMA-map the new Rx skb */ in bcmgenet_rx_refill()
2205 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, in bcmgenet_rx_refill()
2208 priv->mib.rx_dma_failed++; in bcmgenet_rx_refill()
2210 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2215 /* Grab the current Rx skb from the ring and DMA-unmap it */ in bcmgenet_rx_refill()
2219 cb->skb = skb; in bcmgenet_rx_refill()
2221 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); in bcmgenet_rx_refill()
2222 dmadesc_set_addr(priv, cb->bd_addr, mapping); in bcmgenet_rx_refill()
2228 /* bcmgenet_desc_rx - descriptor based rx process.
2234 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_desc_rx()
2235 struct net_device *dev = priv->dev; in bcmgenet_desc_rx()
2247 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); in bcmgenet_desc_rx()
2250 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); in bcmgenet_desc_rx()
2254 if (discards > ring->old_discards) { in bcmgenet_desc_rx()
2255 discards = discards - ring->old_discards; in bcmgenet_desc_rx()
2256 ring->errors += discards; in bcmgenet_desc_rx()
2257 ring->old_discards += discards; in bcmgenet_desc_rx()
2259 /* Clear HW register when we reach 75% of maximum 0xFFFF */ in bcmgenet_desc_rx()
2260 if (ring->old_discards >= 0xC000) { in bcmgenet_desc_rx()
2261 ring->old_discards = 0; in bcmgenet_desc_rx()
2262 bcmgenet_rdma_ring_writel(priv, ring->index, 0, in bcmgenet_desc_rx()
2268 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2278 cb = &priv->rx_cbs[ring->read_ptr]; in bcmgenet_desc_rx()
2282 ring->dropped++; in bcmgenet_desc_rx()
2286 status = (struct status_64 *)skb->data; in bcmgenet_desc_rx()
2287 dma_length_status = status->length_status; in bcmgenet_desc_rx()
2288 if (dev->features & NETIF_F_RXCSUM) { in bcmgenet_desc_rx()
2289 rx_csum = (__force __be16)(status->rx_csum & 0xffff); in bcmgenet_desc_rx()
2291 skb->csum = (__force __wsum)ntohs(rx_csum); in bcmgenet_desc_rx()
2292 skb->ip_summed = CHECKSUM_COMPLETE; in bcmgenet_desc_rx()
2304 __func__, p_index, ring->c_index, in bcmgenet_desc_rx()
2305 ring->read_ptr, dma_length_status); in bcmgenet_desc_rx()
2309 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2310 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2318 ring->errors++; in bcmgenet_desc_rx()
2332 dev->stats.rx_crc_errors++; in bcmgenet_desc_rx()
2334 dev->stats.rx_over_errors++; in bcmgenet_desc_rx()
2336 dev->stats.rx_frame_errors++; in bcmgenet_desc_rx()
2338 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2339 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2348 len -= 66; in bcmgenet_desc_rx()
2350 if (priv->crc_fwd_en) { in bcmgenet_desc_rx()
2351 skb_trim(skb, len - ETH_FCS_LEN); in bcmgenet_desc_rx()
2352 len -= ETH_FCS_LEN; in bcmgenet_desc_rx()
2358 skb->protocol = eth_type_trans(skb, priv->dev); in bcmgenet_desc_rx()
2359 ring->packets++; in bcmgenet_desc_rx()
2360 ring->bytes += len; in bcmgenet_desc_rx()
2362 dev->stats.multicast++; in bcmgenet_desc_rx()
2365 napi_gro_receive(&ring->napi, skb); in bcmgenet_desc_rx()
2370 if (likely(ring->read_ptr < ring->end_ptr)) in bcmgenet_desc_rx()
2371 ring->read_ptr++; in bcmgenet_desc_rx()
2373 ring->read_ptr = ring->cb_ptr; in bcmgenet_desc_rx()
2375 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2376 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); in bcmgenet_desc_rx()
2379 ring->dim.bytes = bytes_processed; in bcmgenet_desc_rx()
2380 ring->dim.packets = rxpktprocessed; in bcmgenet_desc_rx()
2400 if (ring->dim.use_dim) { in bcmgenet_rx_poll()
2401 dim_update_sample(ring->dim.event_ctr, ring->dim.packets, in bcmgenet_rx_poll()
2402 ring->dim.bytes, &dim_sample); in bcmgenet_rx_poll()
2403 net_dim(&ring->dim.dim, &dim_sample); in bcmgenet_rx_poll()
2417 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in bcmgenet_dim_work()
2420 dim->state = DIM_START_MEASURE; in bcmgenet_dim_work()
2431 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_alloc_rx_buffers()
2434 for (i = 0; i < ring->size; i++) { in bcmgenet_alloc_rx_buffers()
2435 cb = ring->cbs + i; in bcmgenet_alloc_rx_buffers()
2439 if (!cb->skb) in bcmgenet_alloc_rx_buffers()
2440 return -ENOMEM; in bcmgenet_alloc_rx_buffers()
2452 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_free_rx_buffers()
2453 cb = &priv->rx_cbs[i]; in bcmgenet_free_rx_buffers()
2455 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); in bcmgenet_free_rx_buffers()
2465 spin_lock_bh(&priv->reg_lock); in umac_enable_set()
2468 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2476 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2478 /* UniMAC stops on a packet boundary, wait for a full-size packet in umac_enable_set()
2492 spin_lock_bh(&priv->reg_lock); in reset_umac()
2495 spin_unlock_bh(&priv->reg_lock); in reset_umac()
2514 if (priv->internal_phy) { in bcmgenet_link_intr_enable()
2518 } else if (priv->ext_phy) { in bcmgenet_link_intr_enable()
2520 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in bcmgenet_link_intr_enable()
2529 struct device *kdev = &priv->pdev->dev; in init_umac()
2533 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); in init_umac()
2559 * a valid CHK bit to be set in the per-packet status word in init_umac()
2561 if (priv->crc_fwd_en) in init_umac()
2573 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in init_umac()
2575 reg |= BIT(priv->hw_params->bp_in_en_shift); in init_umac()
2578 if (netif_is_multiqueue(priv->dev)) in init_umac()
2579 reg |= priv->hw_params->bp_in_mask; in init_umac()
2581 reg &= ~priv->hw_params->bp_in_mask; in init_umac()
2597 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_dim()
2599 INIT_WORK(&dim->dim.work, cb); in bcmgenet_init_dim()
2600 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcmgenet_init_dim()
2601 dim->event_ctr = 0; in bcmgenet_init_dim()
2602 dim->packets = 0; in bcmgenet_init_dim()
2603 dim->bytes = 0; in bcmgenet_init_dim()
2608 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_rx_coalesce()
2612 usecs = ring->rx_coalesce_usecs; in bcmgenet_init_rx_coalesce()
2613 pkts = ring->rx_max_coalesced_frames; in bcmgenet_init_rx_coalesce()
2615 /* If DIM was enabled, re-apply default parameters */ in bcmgenet_init_rx_coalesce()
2616 if (dim->use_dim) { in bcmgenet_init_rx_coalesce()
2617 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcmgenet_init_rx_coalesce()
2630 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
2634 spin_lock_init(&ring->lock); in bcmgenet_init_tx_ring()
2635 ring->priv = priv; in bcmgenet_init_tx_ring()
2636 ring->index = index; in bcmgenet_init_tx_ring()
2637 ring->cbs = priv->tx_cbs + start_ptr; in bcmgenet_init_tx_ring()
2638 ring->size = size; in bcmgenet_init_tx_ring()
2639 ring->clean_ptr = start_ptr; in bcmgenet_init_tx_ring()
2640 ring->c_index = 0; in bcmgenet_init_tx_ring()
2641 ring->free_bds = size; in bcmgenet_init_tx_ring()
2642 ring->write_ptr = start_ptr; in bcmgenet_init_tx_ring()
2643 ring->cb_ptr = start_ptr; in bcmgenet_init_tx_ring()
2644 ring->end_ptr = end_ptr - 1; in bcmgenet_init_tx_ring()
2645 ring->prod_index = 0; in bcmgenet_init_tx_ring()
2668 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_tx_ring()
2672 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); in bcmgenet_init_tx_ring()
2680 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; in bcmgenet_init_rx_ring()
2684 ring->priv = priv; in bcmgenet_init_rx_ring()
2685 ring->index = index; in bcmgenet_init_rx_ring()
2686 ring->cbs = priv->rx_cbs + start_ptr; in bcmgenet_init_rx_ring()
2687 ring->size = size; in bcmgenet_init_rx_ring()
2688 ring->c_index = 0; in bcmgenet_init_rx_ring()
2689 ring->read_ptr = start_ptr; in bcmgenet_init_rx_ring()
2690 ring->cb_ptr = start_ptr; in bcmgenet_init_rx_ring()
2691 ring->end_ptr = end_ptr - 1; in bcmgenet_init_rx_ring()
2701 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); in bcmgenet_init_rx_ring()
2720 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_rx_ring()
2731 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_enable_tx_napi()
2732 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
2733 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2743 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_disable_tx_napi()
2744 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
2745 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2754 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_fini_tx_napi()
2755 ring = &priv->tx_rings[i]; in bcmgenet_fini_tx_napi()
2756 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2766 mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; in bcmgenet_tdma_disable()
2780 return -ETIMEDOUT; in bcmgenet_tdma_disable()
2789 mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; in bcmgenet_rdma_disable()
2803 return -ETIMEDOUT; in bcmgenet_rdma_disable()
2808 * Queues 1-4 are priority-based, each one has 32 descriptors,
2812 * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2815 * - Tx queue 0 uses tx_cbs[0..127]
2816 * - Tx queue 1 uses tx_cbs[128..159]
2817 * - Tx queue 2 uses tx_cbs[160..191]
2818 * - Tx queue 3 uses tx_cbs[192..223]
2819 * - Tx queue 4 uses tx_cbs[224..255]
2831 for (i = 0; i <= priv->hw_params->tx_queues; i++) { in bcmgenet_init_tx_queues()
2832 bcmgenet_init_tx_ring(priv, i, end - start, start, end); in bcmgenet_init_tx_queues()
2834 end += priv->hw_params->tx_bds_per_q; in bcmgenet_init_tx_queues()
2846 ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; in bcmgenet_init_tx_queues()
2859 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_enable_rx_napi()
2860 ring = &priv->rx_rings[i]; in bcmgenet_enable_rx_napi()
2861 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2871 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_disable_rx_napi()
2872 ring = &priv->rx_rings[i]; in bcmgenet_disable_rx_napi()
2873 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2874 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2883 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_fini_rx_napi()
2884 ring = &priv->rx_rings[i]; in bcmgenet_fini_rx_napi()
2885 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2891 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2904 for (i = 0; i <= priv->hw_params->rx_queues; i++) { in bcmgenet_init_rx_queues()
2905 ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end); in bcmgenet_init_rx_queues()
2910 end += priv->hw_params->rx_bds_per_q; in bcmgenet_init_rx_queues()
2914 ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; in bcmgenet_init_rx_queues()
2929 if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) { in bcmgenet_dma_teardown()
2930 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); in bcmgenet_dma_teardown()
2931 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
2938 if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) { in bcmgenet_dma_teardown()
2939 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); in bcmgenet_dma_teardown()
2940 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
2954 for (i = 0; i <= priv->hw_params->tx_queues; i++) { in bcmgenet_fini_dma()
2955 txq = netdev_get_tx_queue(priv->dev, i); in bcmgenet_fini_dma()
2960 kfree(priv->rx_cbs); in bcmgenet_fini_dma()
2961 kfree(priv->tx_cbs); in bcmgenet_fini_dma()
2972 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_init_dma()
2977 netdev_err(priv->dev, "failed to halt Tx DMA\n"); in bcmgenet_init_dma()
2984 netdev_err(priv->dev, "failed to halt Rx DMA\n"); in bcmgenet_init_dma()
3002 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; in bcmgenet_init_dma()
3003 priv->num_rx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3004 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3006 if (!priv->rx_cbs) in bcmgenet_init_dma()
3007 return -ENOMEM; in bcmgenet_init_dma()
3009 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_init_dma()
3010 cb = priv->rx_cbs + i; in bcmgenet_init_dma()
3011 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3015 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; in bcmgenet_init_dma()
3016 priv->num_tx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3017 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3019 if (!priv->tx_cbs) { in bcmgenet_init_dma()
3020 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3021 return -ENOMEM; in bcmgenet_init_dma()
3024 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_init_dma()
3025 cb = priv->tx_cbs + i; in bcmgenet_init_dma()
3026 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3030 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3034 ret = bcmgenet_init_rx_queues(priv->dev); in bcmgenet_init_dma()
3036 netdev_err(priv->dev, "failed to initialize Rx queues\n"); in bcmgenet_init_dma()
3038 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3039 kfree(priv->tx_cbs); in bcmgenet_init_dma()
3044 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3048 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_init_dma()
3069 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); in bcmgenet_irq_task()
3071 spin_lock_irq(&priv->lock); in bcmgenet_irq_task()
3072 status = priv->irq0_stat; in bcmgenet_irq_task()
3073 priv->irq0_stat = 0; in bcmgenet_irq_task()
3074 spin_unlock_irq(&priv->lock); in bcmgenet_irq_task()
3077 priv->dev->phydev->autoneg != AUTONEG_ENABLE) { in bcmgenet_irq_task()
3078 phy_init_hw(priv->dev->phydev); in bcmgenet_irq_task()
3079 genphy_config_aneg(priv->dev->phydev); in bcmgenet_irq_task()
3084 phy_mac_interrupt(priv->dev->phydev); in bcmgenet_irq_task()
3103 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr1()
3107 for (index = 0; index <= priv->hw_params->rx_queues; index++) { in bcmgenet_isr1()
3111 rx_ring = &priv->rx_rings[index]; in bcmgenet_isr1()
3112 rx_ring->dim.event_ctr++; in bcmgenet_isr1()
3114 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr1()
3116 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr1()
3121 for (index = 0; index <= priv->hw_params->tx_queues; index++) { in bcmgenet_isr1()
3125 tx_ring = &priv->tx_rings[index]; in bcmgenet_isr1()
3127 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr1()
3129 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr1()
3150 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr0()
3154 wake_up(&priv->wq); in bcmgenet_isr0()
3159 /* Save irq status for bottom-half processing. */ in bcmgenet_isr0()
3160 spin_lock_irqsave(&priv->lock, flags); in bcmgenet_isr0()
3161 priv->irq0_stat |= status; in bcmgenet_isr0()
3162 spin_unlock_irqrestore(&priv->lock, flags); in bcmgenet_isr0()
3164 schedule_work(&priv->bcmgenet_irq_work); in bcmgenet_isr0()
3225 phy_start(dev->phydev); in bcmgenet_netif_start()
3236 clk_prepare_enable(priv->clk); in bcmgenet_open()
3241 if (priv->internal_phy) in bcmgenet_open()
3252 bcmgenet_set_features(dev, dev->features); in bcmgenet_open()
3254 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_open()
3266 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, in bcmgenet_open()
3267 dev->name, priv); in bcmgenet_open()
3269 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); in bcmgenet_open()
3273 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, in bcmgenet_open()
3274 dev->name, priv); in bcmgenet_open()
3276 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); in bcmgenet_open()
3286 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_open()
3295 free_irq(priv->irq1, priv); in bcmgenet_open()
3297 free_irq(priv->irq0, priv); in bcmgenet_open()
3302 if (priv->internal_phy) in bcmgenet_open()
3304 clk_disable_unprepare(priv->clk); in bcmgenet_open()
3319 phy_stop(dev->phydev); in bcmgenet_netif_stop()
3333 cancel_work_sync(&priv->bcmgenet_irq_work); in bcmgenet_netif_stop()
3350 phy_disconnect(dev->phydev); in bcmgenet_close()
3352 free_irq(priv->irq0, priv); in bcmgenet_close()
3353 free_irq(priv->irq1, priv); in bcmgenet_close()
3355 if (priv->internal_phy) in bcmgenet_close()
3358 clk_disable_unprepare(priv->clk); in bcmgenet_close()
3365 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_dump_tx_queue()
3374 txq = netdev_get_tx_queue(priv->dev, ring->index); in bcmgenet_dump_tx_queue()
3376 spin_lock(&ring->lock); in bcmgenet_dump_tx_queue()
3378 intmsk = 1 << ring->index; in bcmgenet_dump_tx_queue()
3379 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); in bcmgenet_dump_tx_queue()
3380 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); in bcmgenet_dump_tx_queue()
3382 free_bds = ring->free_bds; in bcmgenet_dump_tx_queue()
3383 spin_unlock(&ring->lock); in bcmgenet_dump_tx_queue()
3385 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" in bcmgenet_dump_tx_queue()
3392 ring->index, ring->index, in bcmgenet_dump_tx_queue()
3395 free_bds, ring->size, in bcmgenet_dump_tx_queue()
3396 ring->prod_index, p_index & DMA_P_INDEX_MASK, in bcmgenet_dump_tx_queue()
3397 ring->c_index, c_index & DMA_C_INDEX_MASK, in bcmgenet_dump_tx_queue()
3398 ring->clean_ptr, ring->write_ptr, in bcmgenet_dump_tx_queue()
3399 ring->cb_ptr, ring->end_ptr); in bcmgenet_dump_tx_queue()
3410 for (q = 0; q <= priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3411 bcmgenet_dump_tx_queue(&priv->tx_rings[q]); in bcmgenet_timeout()
3415 for (q = 0; q <= priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3418 /* Re-enable TX interrupts if disabled */ in bcmgenet_timeout()
3423 dev->stats.tx_errors++; in bcmgenet_timeout()
3449 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); in bcmgenet_set_rx_mode()
3461 spin_lock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3463 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || in bcmgenet_set_rx_mode()
3467 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3473 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3479 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); in bcmgenet_set_rx_mode()
3481 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); in bcmgenet_set_rx_mode()
3485 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3489 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3492 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); in bcmgenet_set_rx_mode()
3505 return -EBUSY; in bcmgenet_set_mac_addr()
3507 eth_hw_addr_set(dev, addr->sa_data); in bcmgenet_set_mac_addr()
3522 for (q = 0; q <= priv->hw_params->tx_queues; q++) { in bcmgenet_get_stats()
3523 tx_ring = &priv->tx_rings[q]; in bcmgenet_get_stats()
3524 tx_bytes += tx_ring->bytes; in bcmgenet_get_stats()
3525 tx_packets += tx_ring->packets; in bcmgenet_get_stats()
3528 for (q = 0; q <= priv->hw_params->rx_queues; q++) { in bcmgenet_get_stats()
3529 rx_ring = &priv->rx_rings[q]; in bcmgenet_get_stats()
3531 rx_bytes += rx_ring->bytes; in bcmgenet_get_stats()
3532 rx_packets += rx_ring->packets; in bcmgenet_get_stats()
3533 rx_errors += rx_ring->errors; in bcmgenet_get_stats()
3534 rx_dropped += rx_ring->dropped; in bcmgenet_get_stats()
3537 dev->stats.tx_bytes = tx_bytes; in bcmgenet_get_stats()
3538 dev->stats.tx_packets = tx_packets; in bcmgenet_get_stats()
3539 dev->stats.rx_bytes = rx_bytes; in bcmgenet_get_stats()
3540 dev->stats.rx_packets = rx_packets; in bcmgenet_get_stats()
3541 dev->stats.rx_errors = rx_errors; in bcmgenet_get_stats()
3542 dev->stats.rx_missed_errors = rx_errors; in bcmgenet_get_stats()
3543 dev->stats.rx_dropped = rx_dropped; in bcmgenet_get_stats()
3544 return &dev->stats; in bcmgenet_get_stats()
3551 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || in bcmgenet_change_carrier()
3552 priv->phy_interface != PHY_INTERFACE_MODE_MOCA) in bcmgenet_change_carrier()
3553 return -EOPNOTSUPP; in bcmgenet_change_carrier()
3673 priv->hw_params = params; in bcmgenet_set_hw_params()
3684 if (major != priv->version) { in bcmgenet_set_hw_params()
3685 dev_err(&priv->pdev->dev, in bcmgenet_set_hw_params()
3687 major, priv->version); in bcmgenet_set_hw_params()
3691 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, in bcmgenet_set_hw_params()
3703 * heuristic to check for the new GPHY revision and re-arrange things in bcmgenet_set_hw_params()
3722 priv->gphy_rev = gphy_rev << 8; in bcmgenet_set_hw_params()
3725 priv->gphy_rev = gphy_rev; in bcmgenet_set_hw_params()
3730 pr_warn("GENET does not support 40-bits PA\n"); in bcmgenet_set_hw_params()
3740 priv->version, in bcmgenet_set_hw_params()
3741 params->tx_queues, params->tx_bds_per_q, in bcmgenet_set_hw_params()
3742 params->rx_queues, params->rx_bds_per_q, in bcmgenet_set_hw_params()
3743 params->bp_in_en_shift, params->bp_in_mask, in bcmgenet_set_hw_params()
3744 params->hfb_filter_cnt, params->qtag_mask, in bcmgenet_set_hw_params()
3745 params->tbuf_offset, params->hfb_offset, in bcmgenet_set_hw_params()
3746 params->hfb_reg_offset, in bcmgenet_set_hw_params()
3747 params->rdma_offset, params->tdma_offset, in bcmgenet_set_hw_params()
3748 params->words_per_bd); in bcmgenet_set_hw_params()
3805 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3806 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3807 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3808 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3809 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3810 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3811 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3818 struct bcmgenet_platform_data *pd = pdev->dev.platform_data; in bcmgenet_probe()
3823 int err = -EIO; in bcmgenet_probe()
3829 dev_err(&pdev->dev, "can't allocate net device\n"); in bcmgenet_probe()
3830 return -ENOMEM; in bcmgenet_probe()
3834 priv->irq0 = platform_get_irq(pdev, 0); in bcmgenet_probe()
3835 if (priv->irq0 < 0) { in bcmgenet_probe()
3836 err = priv->irq0; in bcmgenet_probe()
3839 priv->irq1 = platform_get_irq(pdev, 1); in bcmgenet_probe()
3840 if (priv->irq1 < 0) { in bcmgenet_probe()
3841 err = priv->irq1; in bcmgenet_probe()
3844 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcmgenet_probe()
3845 if (priv->wol_irq == -EPROBE_DEFER) { in bcmgenet_probe()
3846 err = priv->wol_irq; in bcmgenet_probe()
3850 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcmgenet_probe()
3851 if (IS_ERR(priv->base)) { in bcmgenet_probe()
3852 err = PTR_ERR(priv->base); in bcmgenet_probe()
3856 spin_lock_init(&priv->reg_lock); in bcmgenet_probe()
3857 spin_lock_init(&priv->lock); in bcmgenet_probe()
3860 priv->autoneg_pause = 1; in bcmgenet_probe()
3861 priv->tx_pause = 1; in bcmgenet_probe()
3862 priv->rx_pause = 1; in bcmgenet_probe()
3864 SET_NETDEV_DEV(dev, &pdev->dev); in bcmgenet_probe()
3865 dev_set_drvdata(&pdev->dev, dev); in bcmgenet_probe()
3866 dev->watchdog_timeo = 2 * HZ; in bcmgenet_probe()
3867 dev->ethtool_ops = &bcmgenet_ethtool_ops; in bcmgenet_probe()
3868 dev->netdev_ops = &bcmgenet_netdev_ops; in bcmgenet_probe()
3870 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); in bcmgenet_probe()
3873 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | in bcmgenet_probe()
3875 dev->hw_features |= dev->features; in bcmgenet_probe()
3876 dev->vlan_features |= dev->features; in bcmgenet_probe()
3879 priv->wol_irq_disabled = true; in bcmgenet_probe()
3880 if (priv->wol_irq > 0) { in bcmgenet_probe()
3881 err = devm_request_irq(&pdev->dev, priv->wol_irq, in bcmgenet_probe()
3882 bcmgenet_wol_isr, 0, dev->name, priv); in bcmgenet_probe()
3884 device_set_wakeup_capable(&pdev->dev, 1); in bcmgenet_probe()
3890 dev->needed_headroom += 64; in bcmgenet_probe()
3892 priv->dev = dev; in bcmgenet_probe()
3893 priv->pdev = pdev; in bcmgenet_probe()
3895 pdata = device_get_match_data(&pdev->dev); in bcmgenet_probe()
3897 priv->version = pdata->version; in bcmgenet_probe()
3898 priv->dma_max_burst_length = pdata->dma_max_burst_length; in bcmgenet_probe()
3899 priv->flags = pdata->flags; in bcmgenet_probe()
3901 priv->version = pd->genet_version; in bcmgenet_probe()
3902 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; in bcmgenet_probe()
3905 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); in bcmgenet_probe()
3906 if (IS_ERR(priv->clk)) { in bcmgenet_probe()
3907 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); in bcmgenet_probe()
3908 err = PTR_ERR(priv->clk); in bcmgenet_probe()
3912 err = clk_prepare_enable(priv->clk); in bcmgenet_probe()
3918 err = -EIO; in bcmgenet_probe()
3920 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcmgenet_probe()
3922 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcmgenet_probe()
3927 init_waitqueue_head(&priv->wq); in bcmgenet_probe()
3929 priv->rx_buf_len = RX_BUF_LENGTH; in bcmgenet_probe()
3930 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); in bcmgenet_probe()
3932 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); in bcmgenet_probe()
3933 if (IS_ERR(priv->clk_wol)) { in bcmgenet_probe()
3934 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); in bcmgenet_probe()
3935 err = PTR_ERR(priv->clk_wol); in bcmgenet_probe()
3939 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); in bcmgenet_probe()
3940 if (IS_ERR(priv->clk_eee)) { in bcmgenet_probe()
3941 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); in bcmgenet_probe()
3942 err = PTR_ERR(priv->clk_eee); in bcmgenet_probe()
3949 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) in bcmgenet_probe()
3952 if (pd && !IS_ERR_OR_NULL(pd->mac_address)) in bcmgenet_probe()
3953 eth_hw_addr_set(dev, pd->mac_address); in bcmgenet_probe()
3955 if (device_get_ethdev_address(&pdev->dev, dev)) in bcmgenet_probe()
3956 if (has_acpi_companion(&pdev->dev)) { in bcmgenet_probe()
3963 if (!is_valid_ether_addr(dev->dev_addr)) { in bcmgenet_probe()
3964 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcmgenet_probe()
3975 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); in bcmgenet_probe()
3976 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); in bcmgenet_probe()
3979 for (i = 0; i <= priv->hw_params->rx_queues; i++) in bcmgenet_probe()
3980 priv->rx_rings[i].rx_max_coalesced_frames = 1; in bcmgenet_probe()
3986 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
3997 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4005 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); in bcmgenet_remove()
4007 dev_set_drvdata(&pdev->dev, NULL); in bcmgenet_remove()
4008 unregister_netdev(priv->dev); in bcmgenet_remove()
4009 bcmgenet_mii_exit(priv->dev); in bcmgenet_remove()
4010 free_netdev(priv->dev); in bcmgenet_remove()
4030 ret = clk_prepare_enable(priv->clk); in bcmgenet_resume_noirq()
4034 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume_noirq()
4035 /* Account for Wake-on-LAN events and clear those events in bcmgenet_resume_noirq()
4043 pm_wakeup_event(&priv->pdev->dev, 0); in bcmgenet_resume_noirq()
4045 /* From WOL-enabled suspend, switch to regular clock */ in bcmgenet_resume_noirq()
4055 if (priv->internal_phy) in bcmgenet_resume_noirq()
4075 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume()
4079 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4080 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_resume()
4082 rule->fs.location + 1); in bcmgenet_resume()
4089 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_resume()
4096 phy_start_machine(dev->phydev); in bcmgenet_resume()
4099 enable_irq(priv->irq1); in bcmgenet_resume()
4107 enable_irq(priv->irq1); in bcmgenet_resume()
4112 phy_init_hw(dev->phydev); in bcmgenet_resume()
4115 genphy_config_aneg(dev->phydev); in bcmgenet_resume()
4116 bcmgenet_mii_config(priv->dev, false); in bcmgenet_resume()
4119 bcmgenet_set_features(dev, dev->features); in bcmgenet_resume()
4121 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_resume()
4125 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4126 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_resume()
4137 phy_resume(dev->phydev); in bcmgenet_resume()
4146 if (priv->internal_phy) in bcmgenet_resume()
4148 clk_disable_unprepare(priv->clk); in bcmgenet_resume()
4164 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_suspend()
4167 /* Suspend non-wake Rx data flows */ in bcmgenet_suspend()
4168 if (priv->wolopts & WAKE_FILTER) in bcmgenet_suspend()
4169 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_suspend()
4170 if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE && in bcmgenet_suspend()
4171 rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_suspend()
4172 hfb_enable |= 1 << rule->fs.location; in bcmgenet_suspend()
4189 if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) in bcmgenet_suspend()
4190 netdev_warn(priv->dev, in bcmgenet_suspend()
4195 disable_irq(priv->irq1); in bcmgenet_suspend()
4215 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ in bcmgenet_suspend_noirq()
4216 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_suspend_noirq()
4218 else if (priv->internal_phy) in bcmgenet_suspend_noirq()
4226 clk_disable_unprepare(priv->clk); in bcmgenet_suspend_noirq()
4267 MODULE_SOFTDEP("pre: mdio-bcm-unimac");