Lines Matching full:eth
283 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
285 __raw_writel(val, eth->base + reg); in mtk_w32()
288 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
290 return __raw_readl(eth->base + reg); in mtk_r32()
293 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) in mtk_m32() argument
297 val = mtk_r32(eth, reg); in mtk_m32()
300 mtk_w32(eth, val, reg); in mtk_m32()
304 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
309 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
316 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
320 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
325 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
329 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
337 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
344 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
349 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
353 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
361 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
365 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
373 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
380 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
384 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
388 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
395 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
399 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
402 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
407 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
411 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
419 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
423 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
430 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
434 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
440 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
442 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
448 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
450 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
455 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
457 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
463 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
465 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
468 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
476 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
482 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
488 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
489 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
491 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
495 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
498 static void mtk_setup_bridge_switch(struct mtk_eth *eth) in mtk_setup_bridge_switch() argument
501 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), in mtk_setup_bridge_switch()
505 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, in mtk_setup_bridge_switch()
516 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
521 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
524 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
535 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
540 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
550 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
551 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
559 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
564 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
565 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
613 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
616 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
627 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
629 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
636 dev_err(eth->dev, in mtk_mac_config()
642 if (mtk_is_netsys_v3_or_greater(eth) && in mtk_mac_config()
647 mtk_setup_bridge_switch(eth); in mtk_mac_config()
653 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
658 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
667 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
673 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
701 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
704 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
715 if (mtk_is_netsys_v1(eth)) in mtk_set_queue_speed()
767 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
818 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
825 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
827 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
836 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
837 if (!eth->mii_bus) { in mtk_mdio_init()
842 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
843 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
844 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
845 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
846 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
847 eth->mii_bus->priv = eth; in mtk_mdio_init()
848 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
850 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
854 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
863 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
864 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); in mtk_mdio_init()
868 if (!mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
870 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); in mtk_mdio_init()
872 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); in mtk_mdio_init()
874 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
881 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
883 if (!eth->mii_bus) in mtk_mdio_cleanup()
886 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
889 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
894 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
895 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
896 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
897 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
900 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
905 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
906 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
907 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
908 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
911 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
916 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
917 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
918 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
919 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
922 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
927 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
928 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
929 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
930 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
937 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
968 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
972 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
980 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
1003 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_stats_update_mac()
1033 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
1038 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1040 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1041 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1042 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1101 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1111 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_get_desc()
1131 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1133 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1139 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) in mtk_init_fq_dma()
1140 eth->scratch_ring = eth->sram_base; in mtk_init_fq_dma()
1142 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1144 ð->phy_scratch_ring, in mtk_init_fq_dma()
1146 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1149 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1150 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
1153 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1154 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1156 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1159 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); in mtk_init_fq_dma()
1165 txd = eth->scratch_ring + i * soc->txrx.txd_size; in mtk_init_fq_dma()
1168 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1175 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_init_fq_dma()
1183 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1184 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1185 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1186 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1215 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1218 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1220 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1225 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1232 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1239 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1269 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1273 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1296 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1327 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1336 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1364 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) in mtk_tx_set_dma_desc_v2()
1382 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1384 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_set_dma_desc()
1405 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1406 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1423 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1425 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1432 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1465 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1468 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1481 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1513 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1519 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1529 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1542 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1551 eth->soc->txrx.dma_max_len); in mtk_cal_txd_req()
1560 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1565 if (!eth->netdev[i]) in mtk_queue_stopped()
1567 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1574 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1579 if (!eth->netdev[i]) in mtk_wake_queue()
1581 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1588 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1589 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1598 spin_lock(ð->page_lock); in mtk_start_xmit()
1600 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1603 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1606 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1608 spin_unlock(ð->page_lock); in mtk_start_xmit()
1615 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1633 spin_unlock(ð->page_lock); in mtk_start_xmit()
1638 spin_unlock(ð->page_lock); in mtk_start_xmit()
1644 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1650 if (!eth->hwlro) in mtk_get_rx_ring()
1651 return ð->rx_ring[0]; in mtk_get_rx_ring()
1656 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1658 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_get_rx_ring()
1668 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1673 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1674 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1675 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1678 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1681 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1687 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1689 return mtk_is_netsys_v2_or_greater(eth); in mtk_page_pool_enabled()
1692 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1701 .dev = eth->dma_dev, in mtk_create_page_pool()
1708 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1714 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, in mtk_create_page_pool()
1715 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1755 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1760 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1765 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1767 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1776 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1786 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1792 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1796 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1797 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1810 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1817 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1821 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1831 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1879 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1884 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1888 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1895 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1907 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1917 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1924 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1937 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1948 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1968 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
2001 struct mtk_eth *eth) in mtk_poll_rx() argument
2020 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
2025 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_poll_rx()
2028 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
2032 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2046 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2052 !eth->netdev[mac])) in mtk_poll_rx()
2055 netdev = eth->netdev[mac]; in mtk_poll_rx()
2057 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
2076 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2085 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2114 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2115 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2117 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2124 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2127 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2144 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_poll_rx()
2160 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) in mtk_poll_rx()
2169 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && in mtk_poll_rx()
2173 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2174 eth->dsa_meta[port]) in mtk_poll_rx()
2175 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); in mtk_poll_rx()
2179 mtk_ppe_check_skb(eth->ppe[0], skb, hash); in mtk_poll_rx()
2188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2193 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && in mtk_poll_rx()
2207 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2210 eth->rx_packets += done; in mtk_poll_rx()
2211 eth->rx_bytes += bytes; in mtk_poll_rx()
2212 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2214 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
2230 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2238 eth->tx_packets++; in mtk_poll_tx_done()
2239 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2241 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2260 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2263 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2264 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
2271 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2284 eth->soc->txrx.txd_size); in mtk_poll_tx_qdma()
2290 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2295 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2305 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2310 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2313 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2320 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2330 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2333 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2335 desc = ring->dma + cpu * eth->soc->txrx.txd_size; in mtk_poll_tx_pdma()
2348 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2350 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2354 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2355 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2357 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2362 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2364 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
2366 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2368 mtk_wake_queue(eth); in mtk_poll_tx()
2373 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2375 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2378 mtk_stats_update(eth); in mtk_handle_status_irq()
2379 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2386 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2387 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2390 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2391 mtk_handle_status_irq(eth); in mtk_napi_tx()
2392 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2393 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2395 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2396 dev_info(eth->dev, in mtk_napi_tx()
2398 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2399 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2405 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2409 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2416 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2417 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2420 mtk_handle_status_irq(eth); in mtk_napi_rx()
2425 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, in mtk_napi_rx()
2427 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2430 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2431 dev_info(eth->dev, in mtk_napi_rx()
2433 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2434 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2440 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2441 eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2444 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2449 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2451 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2452 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2469 ring->dma = eth->sram_base + ring_size * sz; in mtk_tx_alloc()
2470 ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; in mtk_tx_alloc()
2472 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2487 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_tx_alloc()
2500 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2524 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2525 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2526 mtk_w32(eth, in mtk_tx_alloc()
2529 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2533 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2540 if (mtk_is_netsys_v1(eth)) in mtk_tx_alloc()
2542 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2546 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2547 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_alloc()
2548 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2550 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2551 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2552 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2553 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2562 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2564 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2565 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2570 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2575 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2582 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2589 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2591 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2596 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_rx_alloc()
2604 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2606 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2624 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2627 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2635 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || in mtk_rx_alloc()
2637 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2638 rx_dma_size * eth->soc->txrx.rxd_size, in mtk_rx_alloc()
2641 struct mtk_tx_ring *tx_ring = ð->tx_ring; in mtk_rx_alloc()
2644 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2646 eth->soc->txrx.txd_size * (ring_no + 1); in mtk_rx_alloc()
2657 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_alloc()
2672 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2673 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2675 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2684 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2689 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2694 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_rx_alloc()
2717 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2719 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2721 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2724 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2726 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2728 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2731 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2736 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) in mtk_rx_clean() argument
2748 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_clean()
2752 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2755 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2764 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2765 ring->dma_size * eth->soc->txrx.rxd_size, in mtk_rx_clean()
2778 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2802 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2803 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2804 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2814 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2817 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2820 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2832 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2833 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2838 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2844 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2848 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2858 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2861 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2864 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2868 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2871 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2873 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2876 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2879 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2883 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2886 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2888 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2910 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2923 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2934 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2945 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2953 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2960 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
3044 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
3050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3051 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3053 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3055 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3059 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3064 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
3069 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
3072 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3076 err = mtk_init_fq_dma(eth); in mtk_dma_init()
3081 err = mtk_tx_alloc(eth); in mtk_dma_init()
3085 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3086 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
3091 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
3095 if (eth->hwlro) { in mtk_dma_init()
3097 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
3101 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3106 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3110 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3111 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3112 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3118 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3120 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3124 if (eth->netdev[i]) in mtk_dma_free()
3125 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
3126 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { in mtk_dma_free()
3127 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3129 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3130 eth->scratch_ring = NULL; in mtk_dma_free()
3131 eth->phy_scratch_ring = 0; in mtk_dma_free()
3133 mtk_tx_clean(eth); in mtk_dma_free()
3134 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); in mtk_dma_free()
3135 mtk_rx_clean(eth, ð->rx_ring_qdma, false); in mtk_dma_free()
3137 if (eth->hwlro) { in mtk_dma_free()
3138 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3140 mtk_rx_clean(eth, ð->rx_ring[i], false); in mtk_dma_free()
3143 kfree(eth->scratch_head); in mtk_dma_free()
3146 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3148 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3158 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3160 if (test_bit(MTK_RESETTING, ð->state)) in mtk_tx_timeout()
3163 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3166 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3167 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3169 schedule_work(ð->pending_work); in mtk_tx_timeout()
3174 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3176 eth->rx_events++; in mtk_handle_irq_rx()
3177 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
3178 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_handle_irq_rx()
3179 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
3187 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3189 eth->tx_events++; in mtk_handle_irq_tx()
3190 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
3191 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3192 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
3200 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3201 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3203 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3204 eth->soc->txrx.rx_irq_done_mask) { in mtk_handle_irq()
3205 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3206 eth->soc->txrx.rx_irq_done_mask) in mtk_handle_irq()
3209 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3210 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3221 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3223 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3224 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3225 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3226 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3227 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3231 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3234 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3237 err = mtk_dma_init(eth); in mtk_start_dma()
3239 mtk_dma_free(eth); in mtk_start_dma()
3243 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3244 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3249 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_start_dma()
3255 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3257 mtk_w32(eth, in mtk_start_dma()
3262 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3270 static void mtk_gdm_config(struct mtk_eth *eth, u32 config) in mtk_gdm_config() argument
3274 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3280 if (!eth->netdev[i]) in mtk_gdm_config()
3283 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3293 if (netdev_uses_dsa(eth->netdev[i])) in mtk_gdm_config()
3296 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3299 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_gdm_config()
3300 mtk_w32(eth, 0, MTK_RST_GL); in mtk_gdm_config()
3317 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3351 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3359 struct mtk_eth *eth = mac->hw; in mtk_open() local
3370 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
3371 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3375 err = mtk_start_dma(eth); in mtk_open()
3381 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3382 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3386 mtk_gdm_config(eth, gdm_config); in mtk_open()
3388 napi_enable(ð->tx_napi); in mtk_open()
3389 napi_enable(ð->rx_napi); in mtk_open()
3390 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3391 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); in mtk_open()
3392 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3395 refcount_inc(ð->dma_refcnt); in mtk_open()
3400 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_open()
3403 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3404 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3405 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3416 eth->dsa_meta[i] = md_dst; in mtk_open()
3422 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3425 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3427 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); in mtk_open()
3433 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3439 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3440 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3441 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3443 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3447 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3459 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3469 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3472 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); in mtk_stop()
3474 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3475 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_stop()
3476 napi_disable(ð->tx_napi); in mtk_stop()
3477 napi_disable(ð->rx_napi); in mtk_stop()
3479 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3480 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3482 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3483 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3484 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3486 mtk_dma_free(eth); in mtk_stop()
3488 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3489 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3498 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3502 if (eth->hwlro) { in mtk_xdp_setup()
3512 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3516 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3536 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3538 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3543 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3549 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3554 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3557 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3562 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3571 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3579 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3580 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3584 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3586 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3588 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3598 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3599 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3600 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3602 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3610 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3611 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3615 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3617 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3619 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3629 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3630 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3631 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3633 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3640 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3643 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3662 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3666 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3667 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3669 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_reset()
3672 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3675 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3679 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_reset()
3682 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3688 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3690 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_hw_reset()
3691 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3693 else if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3694 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3698 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3702 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3706 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3710 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3712 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3714 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3715 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3719 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_warm_reset()
3721 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3723 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3727 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_warm_reset()
3729 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3735 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3738 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3740 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3744 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3747 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3749 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3753 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3755 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3763 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3767 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3769 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3772 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3775 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3776 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3777 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3779 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3780 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3781 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3788 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3789 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3791 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3792 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3793 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3794 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3795 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3796 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3801 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3802 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3809 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3810 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3811 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3812 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3815 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3816 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3822 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3823 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3824 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3826 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3834 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3837 if (test_bit(MTK_RESETTING, ð->state)) in mtk_hw_reset_monitor_work()
3841 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3842 schedule_work(ð->pending_work); in mtk_hw_reset_monitor_work()
3845 schedule_delayed_work(ð->reset.monitor_work, in mtk_hw_reset_monitor_work()
3849 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3853 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3856 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
3860 pm_runtime_enable(eth->dev); in mtk_hw_init()
3861 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3863 ret = mtk_clk_enable(eth); in mtk_hw_init()
3868 if (eth->ethsys) in mtk_hw_init()
3869 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3870 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3872 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3873 ret = device_reset(eth->dev); in mtk_hw_init()
3875 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3880 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3881 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3884 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3885 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3893 mtk_hw_warm_reset(eth); in mtk_hw_init()
3895 mtk_hw_reset(eth); in mtk_hw_init()
3897 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_init()
3899 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3900 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3903 if (eth->pctl) { in mtk_hw_init()
3905 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3908 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3911 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3919 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3924 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3932 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3933 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3934 if (mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3935 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3936 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3938 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3942 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3943 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3946 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3947 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3950 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3951 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3952 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3953 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3954 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3956 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3958 mtk_w32(eth, 0x00000302, PSE_DROP_CFG); in mtk_hw_init()
3961 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); in mtk_hw_init()
3962 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); in mtk_hw_init()
3965 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); in mtk_hw_init()
3972 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
3973 } else if (!mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3975 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
3978 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); in mtk_hw_init()
3981 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
3984 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
3985 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
3986 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
3987 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
3988 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
3989 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
3990 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
3991 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
3994 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
3995 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
3996 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
3997 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
3998 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
3999 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
4000 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
4001 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
4004 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
4005 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
4006 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
4007 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
4008 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
4009 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
4016 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4017 pm_runtime_disable(eth->dev); in mtk_hw_init()
4023 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
4025 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
4028 mtk_clk_disable(eth); in mtk_hw_deinit()
4030 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4031 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4039 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
4042 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
4043 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
4050 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
4052 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4080 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
4087 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_prepare_for_reset()
4089 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_prepare_for_reset()
4090 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4092 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4094 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_prepare_for_reset()
4098 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4099 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4102 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
4106 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
4107 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
4113 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
4119 set_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4121 mtk_prepare_for_reset(eth); in mtk_pending_work()
4126 mtk_prepare_for_reset(eth); in mtk_pending_work()
4130 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4133 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4139 if (eth->dev->pins) in mtk_pending_work()
4140 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4141 eth->dev->pins->default_state); in mtk_pending_work()
4142 mtk_hw_init(eth, true); in mtk_pending_work()
4146 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4149 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4150 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4152 dev_close(eth->netdev[i]); in mtk_pending_work()
4158 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_pending_work()
4160 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_pending_work()
4161 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4163 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4166 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_pending_work()
4169 clear_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4176 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4181 if (!eth->netdev[i]) in mtk_free_dev()
4183 free_netdev(eth->netdev[i]); in mtk_free_dev()
4186 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4187 if (!eth->dsa_meta[i]) in mtk_free_dev()
4189 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4195 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4201 if (!eth->netdev[i]) in mtk_unreg_dev()
4203 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4204 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4206 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4212 static void mtk_sgmii_destroy(struct mtk_eth *eth) in mtk_sgmii_destroy() argument
4217 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4220 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4222 mtk_sgmii_destroy(eth); in mtk_cleanup()
4223 mtk_unreg_dev(eth); in mtk_cleanup()
4224 mtk_free_dev(eth); in mtk_cleanup()
4225 cancel_work_sync(ð->pending_work); in mtk_cleanup()
4226 cancel_delayed_work_sync(ð->reset.monitor_work); in mtk_cleanup()
4327 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4332 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4333 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
4485 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4496 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4502 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4506 if (eth->netdev[id]) { in mtk_add_mac()
4507 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4511 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4514 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4515 if (!eth->netdev[id]) { in mtk_add_mac()
4516 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4519 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4520 eth->mac[id] = mac; in mtk_add_mac()
4522 mac->hw = eth; in mtk_add_mac()
4525 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4531 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4532 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4533 eth->netdev[id]->dev_addr); in mtk_add_mac()
4539 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4543 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4550 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_add_mac()
4558 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4566 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
4592 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4628 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4629 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4630 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4631 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4633 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4634 if (eth->hwlro) in mtk_add_mac()
4635 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4637 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4639 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4640 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4642 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4643 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4645 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4646 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4648 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4650 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4655 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4656 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4664 free_netdev(eth->netdev[id]); in mtk_add_mac()
4668 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4677 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4687 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4697 static int mtk_sgmii_init(struct mtk_eth *eth) in mtk_sgmii_init() argument
4705 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
4719 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
4720 eth->soc->ana_rgc3, in mtk_sgmii_init()
4731 struct mtk_eth *eth; in mtk_probe() local
4734 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4735 if (!eth) in mtk_probe()
4738 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4740 eth->dev = &pdev->dev; in mtk_probe()
4741 eth->dma_dev = &pdev->dev; in mtk_probe()
4742 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4743 if (IS_ERR(eth->base)) in mtk_probe()
4744 return PTR_ERR(eth->base); in mtk_probe()
4746 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4747 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4749 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4754 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4755 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); in mtk_probe()
4756 if (IS_ERR(eth->sram_base)) in mtk_probe()
4757 return PTR_ERR(eth->sram_base); in mtk_probe()
4759 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4763 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
4774 spin_lock_init(ð->page_lock); in mtk_probe()
4775 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
4776 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
4777 spin_lock_init(ð->dim_lock); in mtk_probe()
4779 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4780 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
4781 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4783 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4784 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
4786 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4787 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4789 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4791 return PTR_ERR(eth->ethsys); in mtk_probe()
4795 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4796 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4798 if (IS_ERR(eth->infra)) { in mtk_probe()
4800 return PTR_ERR(eth->infra); in mtk_probe()
4814 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4815 err = mtk_sgmii_init(eth); in mtk_probe()
4821 if (eth->soc->required_pctl) { in mtk_probe()
4822 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4824 if (IS_ERR(eth->pctl)) { in mtk_probe()
4826 err = PTR_ERR(eth->pctl); in mtk_probe()
4831 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_probe()
4837 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4838 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4844 eth->phy_scratch_ring = res_sram->start; in mtk_probe()
4846 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4851 if (eth->soc->offload_version) { in mtk_probe()
4857 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4865 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4867 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4873 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4874 eth->irq[i] = eth->irq[0]; in mtk_probe()
4876 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4877 if (eth->irq[i] < 0) { in mtk_probe()
4883 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4884 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4886 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4887 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4891 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4897 eth->clks[i] = NULL; in mtk_probe()
4901 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4902 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
4904 err = mtk_hw_init(eth, false); in mtk_probe()
4908 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4912 "mediatek,eth-mac")) in mtk_probe()
4918 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4925 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4926 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4928 dev_name(eth->dev), eth); in mtk_probe()
4930 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4932 dev_name(eth->dev), eth); in mtk_probe()
4936 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4938 dev_name(eth->dev), eth); in mtk_probe()
4944 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4945 err = mtk_mdio_init(eth); in mtk_probe()
4950 if (eth->soc->offload_version) { in mtk_probe()
4951 u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; in mtk_probe()
4953 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); in mtk_probe()
4955 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; in mtk_probe()
4957 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
4959 if (!eth->ppe[i]) { in mtk_probe()
4965 err = mtk_eth_offload_init(eth); in mtk_probe()
4971 if (!eth->netdev[i]) in mtk_probe()
4974 err = register_netdev(eth->netdev[i]); in mtk_probe()
4976 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
4979 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
4981 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
4987 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
4988 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
4989 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
4991 platform_set_drvdata(pdev, eth); in mtk_probe()
4992 schedule_delayed_work(ð->reset.monitor_work, in mtk_probe()
4998 mtk_ppe_deinit(eth); in mtk_probe()
4999 mtk_mdio_cleanup(eth); in mtk_probe()
5001 mtk_free_dev(eth); in mtk_probe()
5003 mtk_hw_deinit(eth); in mtk_probe()
5007 mtk_sgmii_destroy(eth); in mtk_probe()
5014 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
5020 if (!eth->netdev[i]) in mtk_remove()
5022 mtk_stop(eth->netdev[i]); in mtk_remove()
5023 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5028 mtk_hw_deinit(eth); in mtk_remove()
5030 netif_napi_del(ð->tx_napi); in mtk_remove()
5031 netif_napi_del(ð->rx_napi); in mtk_remove()
5032 mtk_cleanup(eth); in mtk_remove()
5033 mtk_mdio_cleanup(eth); in mtk_remove()
5219 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5220 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5221 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5222 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5223 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5224 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5225 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5226 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5227 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },