Lines Matching defs:priv
33 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
35 if (priv->is_lite && off >= RDMA_STATUS)
37 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
40 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
42 if (priv->is_lite && off >= RDMA_STATUS)
44 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
47 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
49 if (!priv->is_lite) {
63 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
66 priv->irq##which##_mask &= ~(mask); \
67 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
69 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
72 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
73 priv->irq##which##_mask |= (mask); \
83 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
98 struct bcm_sysport_priv *priv = netdev_priv(dev);
101 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
102 reg = rxchk_readl(priv, RXCHK_CONTROL);
107 if (priv->rx_chk_en)
115 if (priv->rx_chk_en && priv->crc_fwd)
129 rxchk_writel(priv, reg, RXCHK_CONTROL);
135 struct bcm_sysport_priv *priv = netdev_priv(dev);
141 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
143 reg = tdma_readl(priv, TDMA_CONTROL);
144 if (priv->tsb_en)
145 reg |= tdma_control_bit(priv, TSB_EN);
147 reg &= ~tdma_control_bit(priv, TSB_EN);
153 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
155 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
156 tdma_writel(priv, reg, TDMA_CONTROL);
160 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
166 struct bcm_sysport_priv *priv = netdev_priv(dev);
169 ret = clk_prepare_enable(priv->clk);
174 if (!priv->is_lite)
175 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
177 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
183 clk_disable_unprepare(priv->clk);
295 struct bcm_sysport_priv *priv = netdev_priv(dev);
297 return priv->msg_enable;
302 struct bcm_sysport_priv *priv = netdev_priv(dev);
304 priv->msg_enable = enable;
324 struct bcm_sysport_priv *priv = netdev_priv(dev);
332 if (priv->is_lite &&
347 struct bcm_sysport_priv *priv = netdev_priv(dev);
355 if (priv->is_lite &&
372 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
391 if (priv->is_lite)
396 val = umac_readl(priv, UMAC_MIB_START + j + offset);
399 val = rxchk_readl(priv, s->reg_offset);
401 rxchk_writel(priv, 0, s->reg_offset);
404 val = rbuf_readl(priv, s->reg_offset);
406 rbuf_writel(priv, 0, s->reg_offset);
409 if (!priv->is_lite)
412 val = rdma_readl(priv, s->reg_offset);
414 rdma_writel(priv, 0, s->reg_offset);
419 p = (char *)priv + s->stat_offset;
423 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
426 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
434 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
435 ring = &priv->tx_rings[q];
437 start = u64_stats_fetch_begin(&priv->syncp);
440 } while (u64_stats_fetch_retry(&priv->syncp, start));
450 struct bcm_sysport_priv *priv = netdev_priv(dev);
451 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
452 struct u64_stats_sync *syncp = &priv->syncp;
459 bcm_sysport_update_mib_counters(priv);
460 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
475 p = (char *)priv;
477 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
501 ring = &priv->tx_rings[i];
512 struct bcm_sysport_priv *priv = netdev_priv(dev);
515 wol->wolopts = priv->wolopts;
517 if (!(priv->wolopts & WAKE_MAGICSECURE))
520 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
526 struct bcm_sysport_priv *priv = netdev_priv(dev);
527 struct device *kdev = &priv->pdev->dev;
537 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
542 if (priv->wol_irq_disabled)
543 enable_irq_wake(priv->wol_irq);
544 priv->wol_irq_disabled = 0;
548 if (!priv->wol_irq_disabled)
549 disable_irq_wake(priv->wol_irq);
550 priv->wol_irq_disabled = 1;
553 priv->wolopts = wol->wolopts;
558 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
563 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
568 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
574 struct bcm_sysport_priv *priv = ring->priv;
577 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
583 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
591 struct bcm_sysport_priv *priv = netdev_priv(dev);
594 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
599 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
603 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
613 struct bcm_sysport_priv *priv = netdev_priv(dev);
633 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
635 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
636 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
637 usecs = priv->rx_coalesce_usecs;
638 pkts = priv->rx_max_coalesced_frames;
640 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
641 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
646 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
649 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
661 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
664 struct device *kdev = &priv->pdev->dev;
665 struct net_device *ndev = priv->netdev;
670 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
673 priv->mib.alloc_rx_buff_failed++;
674 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
681 priv->mib.rx_dma_failed++;
683 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
696 dma_desc_set_addr(priv, cb->bd_addr, mapping);
698 netif_dbg(priv, rx_status, ndev, "RX refill\n");
704 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
710 for (i = 0; i < priv->num_rx_bds; i++) {
711 cb = &priv->rx_cbs[i];
712 skb = bcm_sysport_rx_refill(priv, cb);
722 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
725 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
726 struct net_device *ndev = priv->netdev;
736 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
742 if (!priv->is_lite)
743 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
745 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
748 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
750 netif_dbg(priv, rx_status, ndev,
752 p_index, priv->rx_c_index, to_process);
755 cb = &priv->rx_cbs[priv->rx_read_ptr];
756 skb = bcm_sysport_rx_refill(priv, cb);
765 netif_err(priv, rx_err, ndev, "out of memory!\n");
777 netif_dbg(priv, rx_status, ndev,
779 p_index, priv->rx_c_index, priv->rx_read_ptr,
783 netif_err(priv, rx_status, ndev, "oversized packet\n");
791 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
799 netif_err(priv, rx_err, ndev, "error packet\n");
823 if (priv->crc_fwd) {
831 u64_stats_update_begin(&priv->syncp);
834 u64_stats_update_end(&priv->syncp);
836 napi_gro_receive(&priv->napi, skb);
839 priv->rx_read_ptr++;
841 if (priv->rx_read_ptr == priv->num_rx_bds)
842 priv->rx_read_ptr = 0;
845 priv->dim.packets = processed;
846 priv->dim.bytes = processed_bytes;
856 struct bcm_sysport_priv *priv = ring->priv;
857 struct device *kdev = &priv->pdev->dev;
876 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
880 struct net_device *ndev = priv->netdev;
888 if (!ring->priv->is_lite)
889 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
891 intrl2_0_writel(ring->priv, BIT(ring->index +
895 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
899 netif_dbg(priv, tx_done, ndev,
916 u64_stats_update_begin(&priv->syncp);
919 u64_stats_update_end(&priv->syncp);
923 netif_dbg(priv, tx_done, ndev,
931 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
938 txq = netdev_get_tx_queue(priv->netdev, ring->index);
941 released = __bcm_sysport_tx_reclaim(priv, ring);
951 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
957 __bcm_sysport_tx_reclaim(priv, ring);
967 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
972 if (!ring->priv->is_lite)
973 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
975 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
984 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
988 for (q = 0; q < priv->netdev->num_tx_queues; q++)
989 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
994 struct bcm_sysport_priv *priv =
999 work_done = bcm_sysport_desc_rx(priv, budget);
1001 priv->rx_c_index += work_done;
1002 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1008 if (!priv->is_lite)
1009 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1011 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1016 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1019 if (priv->dim.use_dim) {
1020 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1021 priv->dim.bytes, &dim_sample);
1022 net_dim(&priv->dim.dim, &dim_sample);
1028 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1032 reg = umac_readl(priv, UMAC_MPD_CTRL);
1037 umac_writel(priv, reg, UMAC_MPD_CTRL);
1039 if (priv->is_lite)
1044 reg = rbuf_readl(priv, RBUF_CONTROL);
1049 rbuf_writel(priv, reg, RBUF_CONTROL);
1052 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1058 reg = rxchk_readl(priv, RXCHK_CONTROL);
1061 rxchk_writel(priv, reg, RXCHK_CONTROL);
1066 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1067 rxchk_writel(priv, priv->filters_loc[index] <<
1069 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1073 mpd_enable_set(priv, false);
1075 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1077 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1080 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1082 netdev_info(priv->netdev,
1086 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1094 struct bcm_sysport_priv *priv =
1099 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1107 struct bcm_sysport_priv *priv = netdev_priv(dev);
1111 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1112 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1113 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1115 if (unlikely(priv->irq0_stat == 0)) {
1116 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1120 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1121 priv->dim.event_ctr++;
1122 if (likely(napi_schedule_prep(&priv->napi))) {
1124 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1125 __napi_schedule_irqoff(&priv->napi);
1132 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1133 bcm_sysport_tx_reclaim_all(priv);
1135 if (!priv->is_lite)
1140 if (!(priv->irq0_stat & ring_bit))
1143 txr = &priv->tx_rings[ring];
1146 intrl2_0_mask_set(priv, ring_bit);
1158 struct bcm_sysport_priv *priv = netdev_priv(dev);
1162 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1163 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1164 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1166 if (unlikely(priv->irq1_stat == 0)) {
1167 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1172 if (!(priv->irq1_stat & BIT(ring)))
1175 txr = &priv->tx_rings[ring];
1178 intrl2_1_mask_set(priv, BIT(ring));
1188 struct bcm_sysport_priv *priv = dev_id;
1190 pm_wakeup_event(&priv->pdev->dev, 0);
1198 struct bcm_sysport_priv *priv = netdev_priv(dev);
1200 disable_irq(priv->irq0);
1201 bcm_sysport_rx_isr(priv->irq0, priv);
1202 enable_irq(priv->irq0);
1204 if (!priv->is_lite) {
1205 disable_irq(priv->irq1);
1206 bcm_sysport_tx_isr(priv->irq1, priv);
1207 enable_irq(priv->irq1);
1215 struct bcm_sysport_priv *priv = netdev_priv(dev);
1228 priv->mib.tx_realloc_tsb_failed++;
1235 priv->mib.tx_realloc_tsb++;
1286 struct bcm_sysport_priv *priv = netdev_priv(dev);
1287 struct device *kdev = &priv->pdev->dev;
1300 ring = &priv->tx_rings[queue];
1312 if (priv->tsb_en) {
1324 priv->mib.tx_dma_failed++;
1325 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1354 spin_lock_irqsave(&priv->desc_lock, desc_flags);
1355 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1356 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1357 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1363 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1385 struct bcm_sysport_priv *priv = netdev_priv(dev);
1390 if (priv->old_link != phydev->link) {
1392 priv->old_link = phydev->link;
1395 if (priv->old_duplex != phydev->duplex) {
1397 priv->old_duplex = phydev->duplex;
1400 if (priv->is_lite)
1424 if (priv->old_pause != phydev->pause) {
1426 priv->old_pause = phydev->pause;
1436 reg = umac_readl(priv, UMAC_CMD);
1441 umac_writel(priv, reg, UMAC_CMD);
1448 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1451 struct bcm_sysport_net_dim *dim = &priv->dim;
1460 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1462 struct bcm_sysport_net_dim *dim = &priv->dim;
1466 usecs = priv->rx_coalesce_usecs;
1467 pkts = priv->rx_max_coalesced_frames;
1476 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1479 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1482 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1491 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1497 ring->priv = priv;
1498 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
1507 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1508 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1509 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1510 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1513 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1521 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1526 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1528 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1531 reg = tdma_readl(priv, TDMA_CONTROL);
1532 reg |= tdma_control_bit(priv, ACB_ALGO);
1533 tdma_writel(priv, reg, TDMA_CONTROL);
1538 reg = tdma_readl(priv, TDMA_CONTROL);
1539 if (priv->is_lite)
1543 reg |= tdma_control_bit(priv, TSB_SWAP0);
1545 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1546 tdma_writel(priv, reg, TDMA_CONTROL);
1551 tdma_writel(priv, ring->size |
1556 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1558 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1562 netif_dbg(priv, hw, priv->netdev,
1570 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1573 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1577 reg = tdma_readl(priv, TDMA_STATUS);
1579 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1591 bcm_sysport_tx_clean(priv, ring);
1598 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1602 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1608 reg = rdma_readl(priv, RDMA_CONTROL);
1613 rdma_writel(priv, reg, RDMA_CONTROL);
1617 reg = rdma_readl(priv, RDMA_STATUS);
1623 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1629 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1635 reg = tdma_readl(priv, TDMA_CONTROL);
1637 reg |= tdma_control_bit(priv, TDMA_EN);
1639 reg &= ~tdma_control_bit(priv, TDMA_EN);
1640 tdma_writel(priv, reg, TDMA_CONTROL);
1644 reg = tdma_readl(priv, TDMA_STATUS);
1651 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1656 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1664 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1665 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1666 priv->rx_c_index = 0;
1667 priv->rx_read_ptr = 0;
1668 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1670 if (!priv->rx_cbs) {
1671 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1675 for (i = 0; i < priv->num_rx_bds; i++) {
1676 cb = priv->rx_cbs + i;
1677 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1680 ret = bcm_sysport_alloc_rx_bufs(priv);
1682 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1687 reg = rdma_readl(priv, RDMA_STATUS);
1689 rdma_enable_set(priv, 0);
1691 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1692 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1693 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1694 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1695 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1698 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1699 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1700 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1701 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1703 netif_dbg(priv, hw, priv->netdev,
1705 priv->num_rx_bds, priv->rx_bds);
1710 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1717 reg = rdma_readl(priv, RDMA_STATUS);
1719 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1721 for (i = 0; i < priv->num_rx_bds; i++) {
1722 cb = &priv->rx_cbs[i];
1724 dma_unmap_single(&priv->pdev->dev,
1730 kfree(priv->rx_cbs);
1731 priv->rx_cbs = NULL;
1733 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1738 struct bcm_sysport_priv *priv = netdev_priv(dev);
1741 if (priv->is_lite)
1744 reg = umac_readl(priv, UMAC_CMD);
1749 umac_writel(priv, reg, UMAC_CMD);
1756 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1761 if (!priv->is_lite) {
1762 reg = umac_readl(priv, UMAC_CMD);
1767 umac_writel(priv, reg, UMAC_CMD);
1769 reg = gib_readl(priv, GIB_CONTROL);
1774 gib_writel(priv, reg, GIB_CONTROL);
1784 static inline void umac_reset(struct bcm_sysport_priv *priv)
1788 if (priv->is_lite)
1791 reg = umac_readl(priv, UMAC_CMD);
1793 umac_writel(priv, reg, UMAC_CMD);
1795 reg = umac_readl(priv, UMAC_CMD);
1797 umac_writel(priv, reg, UMAC_CMD);
1800 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1807 if (!priv->is_lite) {
1808 umac_writel(priv, mac0, UMAC_MAC0);
1809 umac_writel(priv, mac1, UMAC_MAC1);
1811 gib_writel(priv, mac0, GIB_MAC0);
1812 gib_writel(priv, mac1, GIB_MAC1);
1816 static void topctrl_flush(struct bcm_sysport_priv *priv)
1818 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1819 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1821 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1822 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1827 struct bcm_sysport_priv *priv = netdev_priv(dev);
1841 umac_set_hw_addr(priv, dev->dev_addr);
1849 struct bcm_sysport_priv *priv = netdev_priv(dev);
1850 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1855 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1859 start = u64_stats_fetch_begin(&priv->syncp);
1862 } while (u64_stats_fetch_retry(&priv->syncp, start));
1867 struct bcm_sysport_priv *priv = netdev_priv(dev);
1870 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1871 bcm_sysport_init_rx_coalesce(priv);
1872 napi_enable(&priv->napi);
1875 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1880 if (!priv->is_lite)
1881 intrl2_1_mask_clear(priv, 0xffffffff);
1883 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1886 static void rbuf_init(struct bcm_sysport_priv *priv)
1890 reg = rbuf_readl(priv, RBUF_CONTROL);
1893 if (priv->is_lite)
1901 rbuf_writel(priv, reg, RBUF_CONTROL);
1904 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1906 intrl2_0_mask_set(priv, 0xffffffff);
1907 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1908 if (!priv->is_lite) {
1909 intrl2_1_mask_set(priv, 0xffffffff);
1910 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1914 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1918 reg = gib_readl(priv, GIB_CONTROL);
1920 if (netdev_uses_dsa(priv->netdev)) {
1926 gib_writel(priv, reg, GIB_CONTROL);
1931 struct bcm_sysport_priv *priv = netdev_priv(dev);
1936 ret = clk_prepare_enable(priv->clk);
1938 netdev_err(dev, "could not enable priv clock\n");
1943 umac_reset(priv);
1946 topctrl_flush(priv);
1949 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1952 rbuf_init(priv);
1955 if (!priv->is_lite)
1956 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1958 gib_set_pad_extension(priv);
1966 umac_set_hw_addr(priv, dev->dev_addr);
1968 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1969 0, priv->phy_interface);
1980 priv->old_duplex = -1;
1981 priv->old_link = -1;
1982 priv->old_pause = -1;
1985 bcm_sysport_mask_all_intrs(priv);
1987 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1993 if (!priv->is_lite) {
1994 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2003 spin_lock_init(&priv->desc_lock);
2005 ret = bcm_sysport_init_tx_ring(priv, i);
2014 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2017 ret = bcm_sysport_init_rx_ring(priv);
2024 ret = rdma_enable_set(priv, 1);
2029 ret = tdma_enable_set(priv, 1);
2034 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2043 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2045 bcm_sysport_fini_rx_ring(priv);
2048 bcm_sysport_fini_tx_ring(priv, i);
2049 if (!priv->is_lite)
2050 free_irq(priv->irq1, dev);
2052 free_irq(priv->irq0, dev);
2056 clk_disable_unprepare(priv->clk);
2062 struct bcm_sysport_priv *priv = netdev_priv(dev);
2066 napi_disable(&priv->napi);
2067 cancel_work_sync(&priv->dim.dim.work);
2071 bcm_sysport_mask_all_intrs(priv);
2076 struct bcm_sysport_priv *priv = netdev_priv(dev);
2083 umac_enable_set(priv, CMD_RX_EN, 0);
2085 ret = tdma_enable_set(priv, 0);
2094 ret = rdma_enable_set(priv, 0);
2101 umac_enable_set(priv, CMD_TX_EN, 0);
2105 bcm_sysport_fini_tx_ring(priv, i);
2106 bcm_sysport_fini_rx_ring(priv);
2108 free_irq(priv->irq0, dev);
2109 if (!priv->is_lite)
2110 free_irq(priv->irq1, dev);
2115 clk_disable_unprepare(priv->clk);
2120 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2126 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2127 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2137 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2143 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2152 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2168 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2176 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2179 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2180 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2182 priv->filters_loc[index] = nfc->fs.location;
2183 set_bit(index, priv->filters);
2188 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2194 index = bcm_sysport_rule_find(priv, location);
2201 clear_bit(index, priv->filters);
2202 priv->filters_loc[index] = 0;
2210 struct bcm_sysport_priv *priv = netdev_priv(dev);
2215 ret = bcm_sysport_rule_get(priv, nfc);
2227 struct bcm_sysport_priv *priv = netdev_priv(dev);
2232 ret = bcm_sysport_rule_set(priv, nfc);
2235 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2268 struct bcm_sysport_priv *priv = netdev_priv(dev);
2279 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2306 struct bcm_sysport_priv *priv = netdev_priv(dev);
2325 if (priv->is_lite)
2331 if (priv->per_port_num_tx_queues &&
2332 priv->per_port_num_tx_queues != num_tx_queues)
2335 priv->per_port_num_tx_queues = num_tx_queues;
2339 ring = &priv->tx_rings[q];
2350 priv->ring_map[qp + port * num_tx_queues] = ring;
2361 struct bcm_sysport_priv *priv = netdev_priv(dev);
2371 ring = &priv->tx_rings[q];
2381 priv->ring_map[qp + port * num_tx_queues] = NULL;
2392 struct bcm_sysport_priv *priv;
2395 priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2396 if (priv->netdev != dev)
2445 struct bcm_sysport_priv *priv;
2477 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2482 priv = netdev_priv(dev);
2484 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2485 if (IS_ERR(priv->clk)) {
2486 ret = PTR_ERR(priv->clk);
2491 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2494 if (!priv->tx_rings) {
2499 priv->is_lite = params->is_lite;
2500 priv->num_rx_desc_words = params->num_rx_desc_words;
2502 priv->irq0 = platform_get_irq(pdev, 0);
2503 if (!priv->is_lite) {
2504 priv->irq1 = platform_get_irq(pdev, 1);
2505 priv->wol_irq = platform_get_irq_optional(pdev, 2);
2507 priv->wol_irq = platform_get_irq_optional(pdev, 1);
2509 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2514 priv->base = devm_platform_ioremap_resource(pdev, 0);
2515 if (IS_ERR(priv->base)) {
2516 ret = PTR_ERR(priv->base);
2520 priv->netdev = dev;
2521 priv->pdev = pdev;
2523 ret = of_get_phy_mode(dn, &priv->phy_interface);
2526 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2538 priv->phy_dn = dn;
2552 netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
2562 priv->wol_irq_disabled = 1;
2563 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2564 bcm_sysport_wol_isr, 0, dev->name, priv);
2568 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2569 if (IS_ERR(priv->wol_clk)) {
2570 ret = PTR_ERR(priv->wol_clk);
2581 priv->rx_max_coalesced_frames = 1;
2582 u64_stats_init(&priv->syncp);
2584 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2586 ret = register_netdevice_notifier(&priv->netdev_notifier);
2598 ret = clk_prepare_enable(priv->clk);
2600 dev_err(&pdev->dev, "could not enable priv clock\n");
2604 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2608 priv->is_lite ? " Lite" : "",
2609 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2610 priv->irq0, priv->irq1, txq, rxq);
2612 clk_disable_unprepare(priv->clk);
2619 unregister_netdevice_notifier(&priv->netdev_notifier);
2631 struct bcm_sysport_priv *priv = netdev_priv(dev);
2637 unregister_netdevice_notifier(&priv->netdev_notifier);
2645 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2647 struct net_device *ndev = priv->netdev;
2652 reg = umac_readl(priv, UMAC_MPD_CTRL);
2653 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2656 if (priv->wolopts & WAKE_MAGICSECURE) {
2658 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2660 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2664 umac_writel(priv, reg, UMAC_MPD_CTRL);
2666 if (priv->wolopts & WAKE_FILTER) {
2668 reg = rbuf_readl(priv, RBUF_CONTROL);
2669 if (priv->is_lite)
2673 rbuf_writel(priv, reg, RBUF_CONTROL);
2676 reg = rxchk_readl(priv, RXCHK_CONTROL);
2679 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2684 rxchk_writel(priv, reg, RXCHK_CONTROL);
2689 reg = rbuf_readl(priv, RBUF_STATUS);
2698 mpd_enable_set(priv, false);
2699 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2704 umac_enable_set(priv, CMD_RX_EN, 1);
2706 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2714 struct bcm_sysport_priv *priv = netdev_priv(dev);
2729 umac_enable_set(priv, CMD_RX_EN, 0);
2731 ret = rdma_enable_set(priv, 0);
2738 if (priv->rx_chk_en) {
2739 reg = rxchk_readl(priv, RXCHK_CONTROL);
2741 rxchk_writel(priv, reg, RXCHK_CONTROL);
2745 if (!priv->wolopts)
2746 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2748 ret = tdma_enable_set(priv, 0);
2757 umac_enable_set(priv, CMD_TX_EN, 0);
2759 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2763 bcm_sysport_fini_tx_ring(priv, i);
2764 bcm_sysport_fini_rx_ring(priv);
2767 if (device_may_wakeup(d) && priv->wolopts) {
2768 clk_prepare_enable(priv->wol_clk);
2769 ret = bcm_sysport_suspend_to_wol(priv);
2772 clk_disable_unprepare(priv->clk);
2780 struct bcm_sysport_priv *priv = netdev_priv(dev);
2787 ret = clk_prepare_enable(priv->clk);
2789 netdev_err(dev, "could not enable priv clock\n");
2793 if (priv->wolopts)
2794 clk_disable_unprepare(priv->wol_clk);
2796 umac_reset(priv);
2799 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2804 bcm_sysport_resume_from_wol(priv);
2808 ret = bcm_sysport_init_tx_ring(priv, i);
2817 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2820 ret = bcm_sysport_init_rx_ring(priv);
2827 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2829 ret = rdma_enable_set(priv, 1);
2838 rbuf_init(priv);
2841 if (!priv->is_lite)
2842 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2844 gib_set_pad_extension(priv);
2847 umac_set_hw_addr(priv, dev->dev_addr);
2849 umac_enable_set(priv, CMD_RX_EN, 1);
2852 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2854 umac_enable_set(priv, CMD_TX_EN, 1);
2856 ret = tdma_enable_set(priv, 1);
2871 bcm_sysport_fini_rx_ring(priv);
2874 bcm_sysport_fini_tx_ring(priv, i);
2875 clk_disable_unprepare(priv->clk);