Lines Matching refs:pp

488 	struct mvneta_port	*pp;
762 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
764 writel(data, pp->base + offset);
768 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
770 return readl(pp->base + offset);
791 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
797 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
798 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
799 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
807 struct mvneta_port *pp = netdev_priv(dev);
820 cpu_stats = per_cpu_ptr(pp->stats, cpu);
856 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
875 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
887 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
932 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
936 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
940 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
945 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
963 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1002 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
1017 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1028 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1039 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1046 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1052 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1059 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1065 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1072 dev_warn(pp->dev->dev.parent,
1078 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1080 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1084 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1090 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1092 if (pp->bm_win_id < 0) {
1096 pp->bm_win_id = i;
1103 i = pp->bm_win_id;
1106 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1107 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1110 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1112 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1115 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1117 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1119 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1122 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1127 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1134 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1139 pp->bm_win_id = -1;
1142 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1155 struct mvneta_port *pp)
1160 if (!pp->neta_armada3700) {
1163 ret = mvneta_bm_port_mbus_init(pp);
1169 netdev_info(pp->dev, "missing long pool id\n");
1174 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1175 MVNETA_BM_LONG, pp->id,
1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1177 if (!pp->pool_long) {
1178 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1182 pp->pool_long->port_map |= 1 << pp->id;
1184 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1185 pp->pool_long->id);
1192 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1193 MVNETA_BM_SHORT, pp->id,
1195 if (!pp->pool_short) {
1196 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1197 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1202 pp->pool_short->port_map |= 1 << pp->id;
1203 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1204 pp->pool_short->id);
1211 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1213 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1218 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1237 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1242 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1245 pp->bm_priv = NULL;
1246 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1247 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1252 static void mvneta_port_up(struct mvneta_port *pp)
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1264 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1274 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1278 static void mvneta_port_down(struct mvneta_port *pp)
1284 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1288 mvreg_write(pp, MVNETA_RXQ_CMD,
1295 netdev_warn(pp->dev,
1302 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1308 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1311 mvreg_write(pp, MVNETA_TXQ_CMD,
1318 netdev_warn(pp->dev,
1326 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1334 netdev_warn(pp->dev,
1341 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1349 static void mvneta_port_enable(struct mvneta_port *pp)
1354 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1356 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1360 static void mvneta_port_disable(struct mvneta_port *pp)
1365 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1367 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1388 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1405 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1416 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1419 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1425 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1430 struct mvneta_port *pp = arg;
1435 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1443 struct mvneta_port *pp = arg;
1448 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1449 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1450 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1455 struct mvneta_port *pp = arg;
1460 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1461 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1462 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1474 static void mvneta_defaults_set(struct mvneta_port *pp)
1482 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1485 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1486 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1489 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1499 if (!pp->neta_armada3700) {
1513 txq_map = (cpu == pp->rxq_def) ?
1521 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1525 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1526 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1529 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1531 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1532 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1535 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1536 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1539 if (pp->bm_priv)
1545 mvreg_write(pp, MVNETA_ACC_MODE, val);
1547 if (pp->bm_priv)
1548 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1551 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1552 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1555 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1556 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1571 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1576 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1578 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1580 mvneta_set_ucast_table(pp, -1);
1581 mvneta_set_special_mcast_table(pp, -1);
1582 mvneta_set_other_mcast_table(pp, -1);
1585 mvreg_write(pp, MVNETA_INTR_ENABLE,
1589 mvneta_mib_counters_clear(pp);
1593 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1604 val = mvreg_read(pp, MVNETA_TX_MTU);
1607 mvreg_write(pp, MVNETA_TX_MTU, val);
1610 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1617 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1620 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1627 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1633 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1649 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1659 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1663 static void mvneta_mac_addr_set(struct mvneta_port *pp,
1674 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1675 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1679 mvneta_set_ucast_addr(pp, addr[5], queue);
1685 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1695 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1701 clk_rate = clk_get_rate(pp->clk);
1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1708 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1734 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1752 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1768 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1774 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1778 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1813 static void mvneta_rx_error(struct mvneta_port *pp,
1816 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1845 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
1847 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1859 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1864 return &pp->txqs[queue];
1868 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1889 dma_unmap_single(pp->dev->dev.parent,
1913 static void mvneta_txq_done(struct mvneta_port *pp,
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1919 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1923 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1935 static int mvneta_rx_refill(struct mvneta_port *pp,
1948 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1986 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1991 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1993 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1995 if (pp->bm_priv) {
2002 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2004 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2025 mvneta_update_stats(struct mvneta_port *pp,
2028 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2050 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2056 stats = this_cpu_ptr(pp->stats);
2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2095 struct device *dev = pp->dev->dev.parent;
2174 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2176 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2188 txq = &pp->txqs[cpu % txq_number];
2189 nq = netdev_get_tx_queue(pp->dev, txq->id);
2192 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
2200 mvneta_txq_pend_desc_add(pp, txq, 0);
2215 struct mvneta_port *pp = netdev_priv(dev);
2216 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2223 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2229 txq = &pp->txqs[cpu % txq_number];
2230 nq = netdev_get_tx_queue(pp->dev, txq->id);
2234 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
2243 mvneta_txq_pend_desc_add(pp, txq, 0);
2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2264 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2269 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2279 err = xdp_do_redirect(pp->dev, xdp, prog);
2281 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2290 ret = mvneta_xdp_xmit_back(pp, xdp);
2292 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act);
2298 trace_xdp_exception(pp->dev, prog, act);
2301 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2314 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2322 struct net_device *dev = pp->dev;
2344 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2349 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2356 struct net_device *dev = pp->dev;
2380 pp->rx_offset_correction, data_len);
2395 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2416 skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2429 struct mvneta_port *pp, int budget,
2433 struct net_device *dev = pp->dev;
2443 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2445 xdp_prog = READ_ONCE(pp->xdp_prog);
2464 mvneta_rx_error(pp, rx_desc);
2472 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2482 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2491 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2496 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2499 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2501 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2503 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2523 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2529 mvneta_update_stats(pp, &ps);
2532 refill = mvneta_rx_refill_queue(pp, rxq);
2535 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2542 struct mvneta_port *pp, int rx_todo,
2545 struct net_device *dev = pp->dev;
2551 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2575 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2581 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2584 mvneta_rx_error(pp, rx_desc);
2595 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2604 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2611 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2625 stats = this_cpu_ptr(pp->stats);
2640 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2653 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2659 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2668 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2673 static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
2676 struct device *dev = pp->dev->dev.parent;
2689 static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
2692 struct device *dev = pp->dev->dev.parent;
2701 mvneta_free_tso_hdrs(pp, txq);
2778 static void mvneta_release_descs(struct mvneta_port *pp,
2793 dma_unmap_single(pp->dev->dev.parent,
2811 struct mvneta_port *pp = netdev_priv(dev);
2860 mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
2865 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2881 dma_map_single(pp->dev->dev.parent, addr,
2884 if (dma_mapping_error(pp->dev->dev.parent,
2909 mvneta_release_descs(pp, txq, first_desc, i - 1);
2916 struct mvneta_port *pp = netdev_priv(dev);
2918 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2966 if (mvneta_tx_frag_process(pp, skb, txq)) {
2980 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2990 mvneta_txq_pend_desc_add(pp, txq, frags);
3008 static void mvneta_txq_done_force(struct mvneta_port *pp,
3012 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3015 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
3026 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
3033 txq = mvneta_tx_done_policy(pp, cause_tx_done);
3035 nq = netdev_get_tx_queue(pp->dev, txq->id);
3039 mvneta_txq_done(pp, txq);
3074 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
3087 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
3097 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
3109 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
3120 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
3130 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
3142 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3148 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3154 if (pp->mcast_count[crc_result] == 0) {
3155 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3160 pp->mcast_count[crc_result]--;
3161 if (pp->mcast_count[crc_result] != 0) {
3162 netdev_info(pp->dev,
3164 pp->mcast_count[crc_result], crc_result);
3168 pp->mcast_count[crc_result]++;
3170 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3176 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3181 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3183 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3190 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3191 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3198 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3199 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3205 struct mvneta_port *pp = netdev_priv(dev);
3210 mvneta_rx_unicast_promisc_set(pp, 1);
3211 mvneta_set_ucast_table(pp, pp->rxq_def);
3212 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3213 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3216 mvneta_rx_unicast_promisc_set(pp, 0);
3217 mvneta_set_ucast_table(pp, -1);
3218 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3222 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3223 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3226 mvneta_set_special_mcast_table(pp, -1);
3227 mvneta_set_other_mcast_table(pp, -1);
3231 mvneta_mcast_addr_set(pp, ha->addr,
3232 pp->rxq_def);
3242 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3244 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3245 napi_schedule(&pp->napi);
3255 disable_percpu_irq(port->pp->dev->irq);
3261 static void mvneta_link_change(struct mvneta_port *pp)
3263 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3265 phylink_pcs_change(&pp->phylink_pcs,
3281 struct mvneta_port *pp = netdev_priv(napi->dev);
3282 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3284 if (!netif_running(pp->dev)) {
3290 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3292 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3294 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3298 mvneta_link_change(pp);
3303 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3310 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3316 if (pp->bm_priv)
3317 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3318 &pp->rxqs[rx_queue]);
3320 rx_done = mvneta_rx_swbm(napi, pp, budget,
3321 &pp->rxqs[rx_queue]);
3328 if (pp->neta_armada3700) {
3332 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3338 enable_percpu_irq(pp->dev->irq, 0);
3342 if (pp->neta_armada3700)
3343 pp->cause_rx_tx = cause_rx_tx;
3350 static int mvneta_create_page_pool(struct mvneta_port *pp,
3353 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3359 .dev = pp->dev->dev.parent,
3361 .offset = pp->rx_offset_correction,
3373 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3394 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3399 err = mvneta_create_page_pool(pp, rxq, num);
3405 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3407 netdev_err(pp->dev,
3417 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3423 static void mvneta_tx_reset(struct mvneta_port *pp)
3429 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3431 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3432 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3435 static void mvneta_rx_reset(struct mvneta_port *pp)
3437 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3438 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3443 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3446 rxq->size = pp->rx_ring_size;
3449 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3460 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3464 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3465 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3468 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3469 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3471 if (!pp->bm_priv) {
3473 mvneta_rxq_offset_set(pp, rxq, 0);
3474 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3476 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3477 mvneta_rxq_bm_disable(pp, rxq);
3478 mvneta_rxq_fill(pp, rxq, rxq->size);
3481 mvneta_rxq_offset_set(pp, rxq,
3482 NET_SKB_PAD - pp->rx_offset_correction);
3484 mvneta_rxq_bm_enable(pp, rxq);
3486 mvneta_rxq_long_pool_set(pp, rxq);
3487 mvneta_rxq_short_pool_set(pp, rxq);
3488 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3493 static int mvneta_rxq_init(struct mvneta_port *pp,
3499 ret = mvneta_rxq_sw_init(pp, rxq);
3503 mvneta_rxq_hw_init(pp, rxq);
3509 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3512 mvneta_rxq_drop_pkts(pp, rxq);
3515 dma_free_coherent(pp->dev->dev.parent,
3528 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3533 txq->size = pp->tx_ring_size;
3543 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3556 err = mvneta_alloc_tso_hdrs(pp, txq);
3561 if (pp->neta_armada3700)
3566 cpu = pp->rxq_def % num_present_cpus();
3568 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3573 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3577 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3578 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3581 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3582 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3584 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3588 static int mvneta_txq_init(struct mvneta_port *pp,
3593 ret = mvneta_txq_sw_init(pp, txq);
3597 mvneta_txq_hw_init(pp, txq);
3603 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3606 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3610 mvneta_free_tso_hdrs(pp, txq);
3612 dma_free_coherent(pp->dev->dev.parent,
3625 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3629 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3630 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3633 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3634 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3637 static void mvneta_txq_deinit(struct mvneta_port *pp,
3640 mvneta_txq_sw_deinit(pp, txq);
3641 mvneta_txq_hw_deinit(pp, txq);
3645 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3650 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3654 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3659 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3664 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3669 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3672 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3674 mvneta_cleanup_rxqs(pp);
3683 static int mvneta_setup_txqs(struct mvneta_port *pp)
3688 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3690 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3692 mvneta_cleanup_txqs(pp);
3700 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3704 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3708 return phy_power_on(pp->comphy);
3711 static int mvneta_config_interface(struct mvneta_port *pp,
3716 if (pp->comphy) {
3720 ret = mvneta_comphy_init(pp, interface);
3725 mvreg_write(pp, MVNETA_SERDES_CFG,
3731 mvreg_write(pp, MVNETA_SERDES_CFG,
3736 mvreg_write(pp, MVNETA_SERDES_CFG,
3744 pp->phy_interface = interface;
3749 static void mvneta_start_dev(struct mvneta_port *pp)
3753 WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3755 mvneta_max_rx_size_set(pp, pp->pkt_size);
3756 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3759 mvneta_port_enable(pp);
3761 if (!pp->neta_armada3700) {
3765 per_cpu_ptr(pp->ports, cpu);
3770 napi_enable(&pp->napi);
3774 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3776 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3780 phylink_start(pp->phylink);
3783 phylink_speed_up(pp->phylink);
3785 netif_tx_start_all_queues(pp->dev);
3787 clear_bit(__MVNETA_DOWN, &pp->state);
3790 static void mvneta_stop_dev(struct mvneta_port *pp)
3794 set_bit(__MVNETA_DOWN, &pp->state);
3796 if (device_may_wakeup(&pp->dev->dev))
3797 phylink_speed_down(pp->phylink, false);
3799 phylink_stop(pp->phylink);
3801 if (!pp->neta_armada3700) {
3804 per_cpu_ptr(pp->ports, cpu);
3809 napi_disable(&pp->napi);
3812 netif_carrier_off(pp->dev);
3814 mvneta_port_down(pp);
3815 netif_tx_stop_all_queues(pp->dev);
3818 mvneta_port_disable(pp);
3821 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3824 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3826 mvneta_tx_reset(pp);
3827 mvneta_rx_reset(pp);
3829 WARN_ON(phy_power_off(pp->comphy));
3834 struct mvneta_port *pp = arg;
3836 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3841 struct mvneta_port *pp = arg;
3843 disable_percpu_irq(pp->dev->irq);
3849 struct mvneta_port *pp = netdev_priv(dev);
3850 struct bpf_prog *prog = pp->xdp_prog;
3870 if (pp->bm_priv)
3871 mvneta_bm_update_mtu(pp, mtu);
3880 mvneta_stop_dev(pp);
3881 on_each_cpu(mvneta_percpu_disable, pp, true);
3883 mvneta_cleanup_txqs(pp);
3884 mvneta_cleanup_rxqs(pp);
3886 if (pp->bm_priv)
3887 mvneta_bm_update_mtu(pp, mtu);
3889 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3891 ret = mvneta_setup_rxqs(pp);
3897 ret = mvneta_setup_txqs(pp);
3903 on_each_cpu(mvneta_percpu_enable, pp, true);
3904 mvneta_start_dev(pp);
3914 struct mvneta_port *pp = netdev_priv(dev);
3916 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3920 pp->tx_csum_limit);
3927 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3931 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3932 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3944 struct mvneta_port *pp = netdev_priv(dev);
3952 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3955 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3992 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
3995 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
4021 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4062 old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4066 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
4074 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4075 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4077 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4079 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4094 struct mvneta_port *pp = netdev_priv(ndev);
4096 return &pp->phylink_pcs;
4103 struct mvneta_port *pp = netdev_priv(ndev);
4106 if (pp->phy_interface != interface ||
4113 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4116 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4119 if (pp->phy_interface != interface)
4120 WARN_ON(phy_power_off(pp->comphy));
4124 unsigned long rate = clk_get_rate(pp->clk);
4126 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
4137 struct mvneta_port *pp = netdev_priv(ndev);
4138 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
4139 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4140 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
4176 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4178 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4180 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4183 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4193 struct mvneta_port *pp = netdev_priv(ndev);
4198 clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
4200 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
4203 if (pp->phy_interface != interface)
4205 WARN_ON(mvneta_config_interface(pp, interface));
4211 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4213 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4223 struct mvneta_port *pp = netdev_priv(ndev);
4226 mvneta_port_down(pp);
4229 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4232 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4243 struct mvneta_port *pp = netdev_priv(ndev);
4247 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4266 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4272 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4278 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4281 mvneta_port_up(pp);
4286 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
4289 lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4293 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
4299 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
4302 status = mvreg_read(pp, MVNETA_GMAC_STATUS);
4321 lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4323 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
4326 lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4329 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
4345 static int mvneta_mdio_probe(struct mvneta_port *pp)
4348 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4351 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4353 phylink_ethtool_get_wol(pp->phylink, &wol);
4354 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4358 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4363 static void mvneta_mdio_remove(struct mvneta_port *pp)
4365 phylink_disconnect_phy(pp->phylink);
4372 static void mvneta_percpu_elect(struct mvneta_port *pp)
4379 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4380 elected_cpu = pp->rxq_def;
4394 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4404 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4407 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4413 pp, true);
4420 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4422 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4427 if (pp->neta_armada3700)
4431 spin_lock(&pp->lock);
4436 if (pp->is_stopped) {
4437 spin_unlock(&pp->lock);
4441 netif_tx_stop_all_queues(pp->dev);
4450 per_cpu_ptr(pp->ports, other_cpu);
4457 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4464 mvneta_percpu_enable(pp);
4470 mvneta_percpu_elect(pp);
4473 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4474 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4477 netif_tx_start_all_queues(pp->dev);
4478 spin_unlock(&pp->lock);
4486 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4488 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4494 spin_lock(&pp->lock);
4496 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4497 spin_unlock(&pp->lock);
4502 mvneta_percpu_disable(pp);
4508 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4512 spin_lock(&pp->lock);
4513 mvneta_percpu_elect(pp);
4514 spin_unlock(&pp->lock);
4516 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4517 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4520 netif_tx_start_all_queues(pp->dev);
4526 struct mvneta_port *pp = netdev_priv(dev);
4529 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4531 ret = mvneta_setup_rxqs(pp);
4535 ret = mvneta_setup_txqs(pp);
4540 if (pp->neta_armada3700)
4541 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4542 dev->name, pp);
4544 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4545 dev->name, pp->ports);
4547 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4551 if (!pp->neta_armada3700) {
4555 on_each_cpu(mvneta_percpu_enable, pp, true);
4557 pp->is_stopped = false;
4562 &pp->node_online);
4567 &pp->node_dead);
4572 ret = mvneta_mdio_probe(pp);
4578 mvneta_start_dev(pp);
4583 if (!pp->neta_armada3700)
4585 &pp->node_dead);
4587 if (!pp->neta_armada3700)
4589 &pp->node_online);
4591 if (pp->neta_armada3700) {
4592 free_irq(pp->dev->irq, pp);
4594 on_each_cpu(mvneta_percpu_disable, pp, true);
4595 free_percpu_irq(pp->dev->irq, pp->ports);
4598 mvneta_cleanup_txqs(pp);
4600 mvneta_cleanup_rxqs(pp);
4607 struct mvneta_port *pp = netdev_priv(dev);
4609 if (!pp->neta_armada3700) {
4615 spin_lock(&pp->lock);
4616 pp->is_stopped = true;
4617 spin_unlock(&pp->lock);
4619 mvneta_stop_dev(pp);
4620 mvneta_mdio_remove(pp);
4623 &pp->node_online);
4625 &pp->node_dead);
4626 on_each_cpu(mvneta_percpu_disable, pp, true);
4627 free_percpu_irq(dev->irq, pp->ports);
4629 mvneta_stop_dev(pp);
4630 mvneta_mdio_remove(pp);
4631 free_irq(dev->irq, pp);
4634 mvneta_cleanup_rxqs(pp);
4635 mvneta_cleanup_txqs(pp);
4642 struct mvneta_port *pp = netdev_priv(dev);
4644 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4651 struct mvneta_port *pp = netdev_priv(dev);
4660 if (pp->bm_priv) {
4666 need_update = !!pp->xdp_prog != !!prog;
4670 old_prog = xchg(&pp->xdp_prog, prog);
4697 struct mvneta_port *pp = netdev_priv(ndev);
4699 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4707 struct mvneta_port *pp = netdev_priv(ndev);
4709 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4714 struct mvneta_port *pp = netdev_priv(dev);
4716 return phylink_ethtool_nway_reset(pp->phylink);
4726 struct mvneta_port *pp = netdev_priv(dev);
4730 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4733 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4734 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4738 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4740 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4753 struct mvneta_port *pp = netdev_priv(dev);
4755 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4756 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4758 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4781 struct mvneta_port *pp = netdev_priv(netdev);
4785 ring->rx_pending = pp->rx_ring_size;
4786 ring->tx_pending = pp->tx_ring_size;
4795 struct mvneta_port *pp = netdev_priv(dev);
4799 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4802 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4804 if (pp->tx_ring_size != ring->tx_pending)
4806 pp->tx_ring_size, ring->tx_pending);
4823 struct mvneta_port *pp = netdev_priv(dev);
4825 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4831 struct mvneta_port *pp = netdev_priv(dev);
4833 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4840 struct mvneta_port *pp = netdev_priv(netdev);
4846 if (!pp->bm_priv) {
4853 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4871 stats = per_cpu_ptr(pp->stats, cpu);
4897 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4901 void __iomem *base = pp->base;
4906 mvneta_ethtool_update_pcpu_stats(pp, &stats);
4913 pp->ethtool_stats[i] += val;
4920 pp->ethtool_stats[i] += val;
4925 val = phylink_get_eee_err(pp->phylink);
4926 pp->ethtool_stats[i] += val;
4929 pp->ethtool_stats[i] = stats.skb_alloc_error;
4932 pp->ethtool_stats[i] = stats.refill_error;
4935 pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4938 pp->ethtool_stats[i] = stats.ps.xdp_pass;
4941 pp->ethtool_stats[i] = stats.ps.xdp_drop;
4944 pp->ethtool_stats[i] = stats.ps.xdp_tx;
4947 pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4950 pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4953 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4961 static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
4967 if (pp->rxqs[i].page_pool)
4968 page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
4977 struct mvneta_port *pp = netdev_priv(dev);
4980 mvneta_ethtool_update_stats(pp);
4983 *data++ = pp->ethtool_stats[i];
4985 if (!pp->bm_priv)
4986 mvneta_ethtool_pp_stats(pp, data);
4993 struct mvneta_port *pp = netdev_priv(dev);
4995 if (!pp->bm_priv)
5022 static int mvneta_config_rss(struct mvneta_port *pp)
5027 netif_tx_stop_all_queues(pp->dev);
5029 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
5031 if (!pp->neta_armada3700) {
5035 per_cpu_ptr(pp->ports, cpu);
5041 napi_synchronize(&pp->napi);
5042 napi_disable(&pp->napi);
5045 pp->rxq_def = pp->indir[0];
5048 mvneta_set_rx_mode(pp->dev);
5051 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
5052 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
5055 spin_lock(&pp->lock);
5056 mvneta_percpu_elect(pp);
5057 spin_unlock(&pp->lock);
5059 if (!pp->neta_armada3700) {
5063 per_cpu_ptr(pp->ports, cpu);
5068 napi_enable(&pp->napi);
5071 netif_tx_start_all_queues(pp->dev);
5080 struct mvneta_port *pp = netdev_priv(dev);
5083 if (pp->neta_armada3700)
5097 memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
5099 return mvneta_config_rss(pp);
5105 struct mvneta_port *pp = netdev_priv(dev);
5108 if (pp->neta_armada3700)
5116 memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
5124 struct mvneta_port *pp = netdev_priv(dev);
5126 phylink_ethtool_get_wol(pp->phylink, wol);
5132 struct mvneta_port *pp = netdev_priv(dev);
5135 ret = phylink_ethtool_set_wol(pp->phylink, wol);
5145 struct mvneta_port *pp = netdev_priv(dev);
5147 return phylink_ethtool_get_eee(pp->phylink, eee);
5153 struct mvneta_port *pp = netdev_priv(dev);
5161 return phylink_ethtool_set_eee(pp->phylink, eee);
5164 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
5166 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
5169 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
5171 u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
5176 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
5179 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
5185 core_clk_rate = clk_get_rate(pp->clk);
5196 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5198 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5201 mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
5206 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
5208 u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5211 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5214 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
5237 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
5245 struct mvneta_port *pp = netdev_priv(dev);
5257 mvneta_clear_rx_prio_map(pp);
5260 mvneta_disable_per_queue_rate_limit(pp);
5277 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
5282 mvneta_disable_per_queue_rate_limit(pp);
5289 ret = mvneta_enable_per_queue_rate_limit(pp);
5300 ret = mvneta_setup_queue_rates(pp, txq,
5365 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5370 mvneta_port_disable(pp);
5373 mvneta_defaults_set(pp);
5375 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5376 if (!pp->txqs)
5381 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5383 txq->size = pp->tx_ring_size;
5387 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5388 if (!pp->rxqs)
5393 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5395 rxq->size = pp->rx_ring_size;
5399 = devm_kmalloc_array(pp->dev->dev.parent,
5411 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5419 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5420 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5423 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5433 mvreg_write(pp, MVNETA_WIN_BASE(i),
5438 mvreg_write(pp, MVNETA_WIN_SIZE(i),
5445 if (pp->neta_ac5)
5446 mvreg_write(pp, MVNETA_WIN_BASE(0),
5453 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5458 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5459 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5463 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5466 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5475 mvneta_mac_disable_tx_lpi(&pp->phylink_config);
5485 struct mvneta_port *pp;
5506 pp = netdev_priv(dev);
5507 spin_lock_init(&pp->lock);
5508 pp->dn = dn;
5510 pp->rxq_def = rxq_def;
5511 pp->indir[0] = rxq_def;
5519 pp->phy_interface = phy_mode;
5528 pp->comphy = comphy;
5530 pp->base = devm_platform_ioremap_resource(pdev, 0);
5531 if (IS_ERR(pp->base))
5532 return PTR_ERR(pp->base);
5536 pp->neta_armada3700 = true;
5538 pp->neta_armada3700 = true;
5539 pp->neta_ac5 = true;
5546 pp->clk = devm_clk_get(&pdev->dev, "core");
5547 if (IS_ERR(pp->clk))
5548 pp->clk = devm_clk_get(&pdev->dev, NULL);
5549 if (IS_ERR(pp->clk)) {
5550 err = PTR_ERR(pp->clk);
5554 clk_prepare_enable(pp->clk);
5556 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5557 if (!IS_ERR(pp->clk_bus))
5558 clk_prepare_enable(pp->clk_bus);
5560 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
5562 pp->phylink_config.dev = &dev->dev;
5563 pp->phylink_config.type = PHYLINK_NETDEV;
5564 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
5568 __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
5569 __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
5570 pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
5571 pp->phylink_config.lpi_timer_default = 250;
5572 pp->phylink_config.eee_enabled_default = true;
5574 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
5576 pp->phylink_config.supported_interfaces);
5582 pp->phylink_config.supported_interfaces);
5584 pp->phylink_config.supported_interfaces);
5586 pp->phylink_config.supported_interfaces);
5590 pp->phylink_config.supported_interfaces);
5595 pp->phylink_config.supported_interfaces);
5597 pp->phylink_config.supported_interfaces);
5600 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5607 pp->phylink = phylink;
5610 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5611 if (!pp->ports) {
5617 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5618 if (!pp->stats) {
5627 mvneta_get_mac_addr(pp, hw_mac_addr);
5651 pp->tx_csum_limit = tx_csum_limit;
5653 pp->dram_target_info = mv_mbus_dram_info();
5658 if (pp->dram_target_info || pp->neta_armada3700)
5659 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5661 pp->tx_ring_size = MVNETA_MAX_TXD;
5662 pp->rx_ring_size = MVNETA_MAX_RXD;
5664 pp->dev = dev;
5667 pp->id = global_port_id++;
5672 pp->bm_priv = mvneta_bm_get(bm_node);
5673 if (pp->bm_priv) {
5674 err = mvneta_bm_port_init(pdev, pp);
5678 mvneta_bm_put(pp->bm_priv);
5679 pp->bm_priv = NULL;
5686 pp->rx_offset_correction = max(0,
5693 if (!pp->bm_priv)
5694 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5696 err = mvneta_init(&pdev->dev, pp);
5700 err = mvneta_port_power_up(pp, pp->phy_interface);
5709 if (pp->neta_armada3700) {
5710 netif_napi_add(dev, &pp->napi, mvneta_poll);
5714 per_cpu_ptr(pp->ports, cpu);
5717 port->pp = pp;
5725 if (!pp->bm_priv)
5748 platform_set_drvdata(pdev, pp->dev);
5753 if (pp->bm_priv) {
5754 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5755 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5756 1 << pp->id);
5757 mvneta_bm_put(pp->bm_priv);
5759 free_percpu(pp->stats);
5761 free_percpu(pp->ports);
5763 if (pp->phylink)
5764 phylink_destroy(pp->phylink);
5766 clk_disable_unprepare(pp->clk_bus);
5767 clk_disable_unprepare(pp->clk);
5777 struct mvneta_port *pp = netdev_priv(dev);
5780 clk_disable_unprepare(pp->clk_bus);
5781 clk_disable_unprepare(pp->clk);
5782 free_percpu(pp->ports);
5783 free_percpu(pp->stats);
5785 phylink_destroy(pp->phylink);
5787 if (pp->bm_priv) {
5788 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5789 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5790 1 << pp->id);
5791 mvneta_bm_put(pp->bm_priv);
5800 struct mvneta_port *pp = netdev_priv(dev);
5805 if (!pp->neta_armada3700) {
5806 spin_lock(&pp->lock);
5807 pp->is_stopped = true;
5808 spin_unlock(&pp->lock);
5811 &pp->node_online);
5813 &pp->node_dead);
5817 mvneta_stop_dev(pp);
5821 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5823 mvneta_rxq_drop_pkts(pp, rxq);
5827 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5829 mvneta_txq_hw_deinit(pp, txq);
5834 clk_disable_unprepare(pp->clk_bus);
5835 clk_disable_unprepare(pp->clk);
5844 struct mvneta_port *pp = netdev_priv(dev);
5847 clk_prepare_enable(pp->clk);
5848 if (!IS_ERR(pp->clk_bus))
5849 clk_prepare_enable(pp->clk_bus);
5850 if (pp->dram_target_info || pp->neta_armada3700)
5851 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5852 if (pp->bm_priv) {
5853 err = mvneta_bm_port_init(pdev, pp);
5856 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5857 pp->bm_priv = NULL;
5860 mvneta_defaults_set(pp);
5861 err = mvneta_port_power_up(pp, pp->phy_interface);
5873 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5876 mvneta_rxq_hw_init(pp, rxq);
5880 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5883 mvneta_txq_hw_init(pp, txq);
5886 if (!pp->neta_armada3700) {
5887 spin_lock(&pp->lock);
5888 pp->is_stopped = false;
5889 spin_unlock(&pp->lock);
5891 &pp->node_online);
5893 &pp->node_dead);
5897 mvneta_start_dev(pp);