Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx

7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
156 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
158 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
270 * to cover all rate-limit values from 10Kbps up to 5Gbps
274 #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
300 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
307 #define MVNETA_RX_COAL_USEC 100
343 /* Max number of Tx descriptors */
347 #define MVNETA_MAX_TSO_SEGS 100
378 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
381 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
490 /* Pointer to the CPU-local NAPI struct */
599 u32 reserved2; /* hw_cmd - (for future use, PMT) */
600 u32 reserved3[4]; /* Reserved - (for future use) */
605 u16 reserved1; /* pnc_info - (for future use, PnC) */
613 u16 reserved4; /* csum_l4 - (for future use, PnC) */
623 u32 reserved2; /* hw_cmd - (for future use, PMT) */
625 u32 reserved3[4]; /* Reserved - (for future use) */
630 u16 reserved1; /* pnc_info - (for future use, PnC) */
636 u16 reserved4; /* csum_l4 - (for future use, PnC) */
661 /* Number of this TX queue, in the range 0-7 */
664 /* Number of TX DMA descriptors in the descriptor ring */
667 /* Number of currently used TX DMA descriptor in the
678 /* Index of last TX DMA descriptor that was inserted */
681 /* Index of the TX DMA descriptor to be cleaned up */
686 /* Virtual address of the TX DMA descriptors array */
689 /* DMA address of the TX DMA descriptors array */
692 /* Index of the last TX DMA descriptor */
695 /* Index of the next TX DMA descriptor to process */
709 /* rx queue number, in the range 0-7 */
764 writel(data, pp->base + offset); in mvreg_write()
770 return readl(pp->base + offset); in mvreg_read()
776 txq->txq_get_index++; in mvneta_txq_inc_get()
777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
778 txq->txq_get_index = 0; in mvneta_txq_inc_get()
784 txq->txq_put_index++; in mvneta_txq_inc_put()
785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
786 txq->txq_put_index = 0; in mvneta_txq_inc_put()
820 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
822 start = u64_stats_fetch_begin(&cpu_stats->syncp); in mvneta_get_stats64()
823 rx_packets = cpu_stats->es.ps.rx_packets; in mvneta_get_stats64()
824 rx_bytes = cpu_stats->es.ps.rx_bytes; in mvneta_get_stats64()
825 rx_dropped = cpu_stats->rx_dropped; in mvneta_get_stats64()
826 rx_errors = cpu_stats->rx_errors; in mvneta_get_stats64()
827 tx_packets = cpu_stats->es.ps.tx_packets; in mvneta_get_stats64()
828 tx_bytes = cpu_stats->es.ps.tx_bytes; in mvneta_get_stats64()
829 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); in mvneta_get_stats64()
831 stats->rx_packets += rx_packets; in mvneta_get_stats64()
832 stats->rx_bytes += rx_bytes; in mvneta_get_stats64()
833 stats->rx_dropped += rx_dropped; in mvneta_get_stats64()
834 stats->rx_errors += rx_errors; in mvneta_get_stats64()
835 stats->tx_packets += tx_packets; in mvneta_get_stats64()
836 stats->tx_bytes += tx_bytes; in mvneta_get_stats64()
839 stats->tx_dropped = dev->stats.tx_dropped; in mvneta_get_stats64()
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
867 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; in mvneta_rxq_non_occup_desc_add()
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
907 rx_done -= 0xff; in mvneta_rxq_desc_num_update()
914 rx_filled -= 0xff; in mvneta_rxq_desc_num_update()
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
924 int rx_desc = rxq->next_desc_to_proc; in mvneta_rxq_next_desc_get()
926 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); in mvneta_rxq_next_desc_get()
927 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvneta_rxq_next_desc_get()
928 return rxq->descs + rx_desc; in mvneta_rxq_next_desc_get()
938 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << in mvneta_max_rx_size_set()
951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
960 /* Tx descriptors helper methods */
962 /* Update HW with number of TX descriptors to be sent */
969 pend_desc += txq->pending; in mvneta_txq_pend_desc_add()
971 /* Only 255 Tx descriptors can be added at once */ in mvneta_txq_pend_desc_add()
974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
975 pend_desc -= val; in mvneta_txq_pend_desc_add()
977 txq->pending = 0; in mvneta_txq_pend_desc_add()
980 /* Get pointer to next TX descriptor to be processed (send) by HW */
984 int tx_desc = txq->next_desc_to_proc; in mvneta_txq_next_desc_get()
986 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); in mvneta_txq_next_desc_get()
987 return txq->descs + tx_desc; in mvneta_txq_next_desc_get()
990 /* Release the last allocated TX descriptor. Useful to handle DMA
991 * mapping failures in the TX path.
995 if (txq->next_desc_to_proc == 0) in mvneta_txq_desc_put()
996 txq->next_desc_to_proc = txq->last_desc - 1; in mvneta_txq_desc_put()
998 txq->next_desc_to_proc--; in mvneta_txq_desc_put()
1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_enable()
1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_enable()
1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_long_pool_set()
1046 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); in mvneta_rxq_long_pool_set()
1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_long_pool_set()
1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_short_pool_set()
1059 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); in mvneta_rxq_short_pool_set()
1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_short_pool_set()
1072 dev_warn(pp->dev->dev.parent, in mvneta_bm_pool_bufsize_set()
1092 if (pp->bm_win_id < 0) { in mvneta_mbus_io_win_set()
1096 pp->bm_win_id = i; in mvneta_mbus_io_win_set()
1101 return -ENOMEM; in mvneta_mbus_io_win_set()
1103 i = pp->bm_win_id; in mvneta_mbus_io_win_set()
1115 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); in mvneta_mbus_io_win_set()
1134 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, in mvneta_bm_port_mbus_init()
1139 pp->bm_win_id = -1; in mvneta_bm_port_mbus_init()
1141 /* Open NETA -> BM window */ in mvneta_bm_port_mbus_init()
1142 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, in mvneta_bm_port_mbus_init()
1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); in mvneta_bm_port_mbus_init()
1157 struct device_node *dn = pdev->dev.of_node; in mvneta_bm_port_init()
1160 if (!pp->neta_armada3700) { in mvneta_bm_port_init()
1168 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { in mvneta_bm_port_init()
1169 netdev_info(pp->dev, "missing long pool id\n"); in mvneta_bm_port_init()
1170 return -EINVAL; in mvneta_bm_port_init()
1174 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, in mvneta_bm_port_init()
1175 MVNETA_BM_LONG, pp->id, in mvneta_bm_port_init()
1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); in mvneta_bm_port_init()
1177 if (!pp->pool_long) { in mvneta_bm_port_init()
1178 netdev_info(pp->dev, "fail to obtain long pool for port\n"); in mvneta_bm_port_init()
1179 return -ENOMEM; in mvneta_bm_port_init()
1182 pp->pool_long->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1184 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, in mvneta_bm_port_init()
1185 pp->pool_long->id); in mvneta_bm_port_init()
1188 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) in mvneta_bm_port_init()
1192 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, in mvneta_bm_port_init()
1193 MVNETA_BM_SHORT, pp->id, in mvneta_bm_port_init()
1195 if (!pp->pool_short) { in mvneta_bm_port_init()
1196 netdev_info(pp->dev, "fail to obtain short pool for port\n"); in mvneta_bm_port_init()
1197 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_port_init()
1198 return -ENOMEM; in mvneta_bm_port_init()
1202 pp->pool_short->port_map |= 1 << pp->id; in mvneta_bm_port_init()
1203 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, in mvneta_bm_port_init()
1204 pp->pool_short->id); in mvneta_bm_port_init()
1213 struct mvneta_bm_pool *bm_pool = pp->pool_long; in mvneta_bm_update_mtu()
1214 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; in mvneta_bm_update_mtu()
1218 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); in mvneta_bm_update_mtu()
1219 if (hwbm_pool->buf_num) { in mvneta_bm_update_mtu()
1221 bm_pool->id); in mvneta_bm_update_mtu()
1225 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); in mvneta_bm_update_mtu()
1226 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); in mvneta_bm_update_mtu()
1227 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + in mvneta_bm_update_mtu()
1228 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); in mvneta_bm_update_mtu()
1231 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); in mvneta_bm_update_mtu()
1232 if (num != hwbm_pool->size) { in mvneta_bm_update_mtu()
1234 bm_pool->id, num, hwbm_pool->size); in mvneta_bm_update_mtu()
1237 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); in mvneta_bm_update_mtu()
1242 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_bm_update_mtu()
1243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); in mvneta_bm_update_mtu()
1245 pp->bm_priv = NULL; in mvneta_bm_update_mtu()
1246 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_bm_update_mtu()
1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); in mvneta_bm_update_mtu()
1251 /* Start the Ethernet port RX and TX activity */
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
1261 if (txq->descs) in mvneta_port_up()
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
1271 if (rxq->descs) in mvneta_port_up()
1295 netdev_warn(pp->dev, in mvneta_port_down()
1305 /* Stop Tx port activity. Check port Tx activity. Issue stop in mvneta_port_down()
1314 /* Wait for all Tx activity to terminate. */ in mvneta_port_down()
1318 netdev_warn(pp->dev, in mvneta_port_down()
1319 "TIMEOUT for TX stopped status=0x%08x\n", in mvneta_port_down()
1325 /* Check TX Command reg that all Txqs are stopped */ in mvneta_port_down()
1330 /* Double check to verify that TX FIFO is empty */ in mvneta_port_down()
1334 netdev_warn(pp->dev, in mvneta_port_down()
1335 "TX FIFO empty timeout status=0x%08x\n", in mvneta_port_down()
1374 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1380 if (queue == -1) { in mvneta_set_ucast_table()
1391 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1397 if (queue == -1) { in mvneta_set_special_mcast_table()
1409 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1415 if (queue == -1) { in mvneta_set_other_mcast_table()
1416 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1419 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
1469 * Resets RX and TX descriptor rings.
1492 * TX queues modulo their number. If there is only one TX in mvneta_defaults_set()
1499 if (!pp->neta_armada3700) { in mvneta_defaults_set()
1508 /* With only one TX queue we configure a special case in mvneta_defaults_set()
1513 txq_map = (cpu == pp->rxq_def) ? in mvneta_defaults_set()
1524 /* Reset RX and TX DMAs */ in mvneta_defaults_set()
1539 if (pp->bm_priv) in mvneta_defaults_set()
1547 if (pp->bm_priv) in mvneta_defaults_set()
1548 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); in mvneta_defaults_set()
1551 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_defaults_set()
1580 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1581 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1582 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1584 /* Set port interrupt enable register - default enable all */ in mvneta_defaults_set()
1592 /* Set max sizes for tx queues */
1609 /* TX token size and all TXQs token size must be larger that MTU */ in mvneta_txq_max_tx_size_set()
1651 if (queue == -1) { in mvneta_set_ucast_addr()
1669 if (queue != -1) { in mvneta_mac_addr_set()
1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1701 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1728 rx_desc->buf_phys_addr = phys_addr; in mvneta_rx_desc_fill()
1729 i = rx_desc - rxq->descs; in mvneta_rx_desc_fill()
1730 rxq->buf_virt_addr[i] = virt_addr; in mvneta_rx_desc_fill()
1740 /* Only 255 TX descriptors can be updated at once */ in mvneta_txq_sent_desc_dec()
1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1744 sent_desc = sent_desc - 0xff; in mvneta_txq_sent_desc_dec()
1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1751 /* Get number of TX descriptors already sent by HW */
1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1816 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_error()
1817 u32 status = rx_desc->status; in mvneta_rx_error()
1819 /* update per-cpu counter */ in mvneta_rx_error()
1820 u64_stats_update_begin(&stats->syncp); in mvneta_rx_error()
1821 stats->rx_errors++; in mvneta_rx_error()
1822 u64_stats_update_end(&stats->syncp); in mvneta_rx_error()
1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1827 status, rx_desc->data_size); in mvneta_rx_error()
1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1831 status, rx_desc->data_size); in mvneta_rx_error()
1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1835 status, rx_desc->data_size); in mvneta_rx_error()
1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1839 status, rx_desc->data_size); in mvneta_rx_error()
1847 if ((pp->dev->features & NETIF_F_RXCSUM) && in mvneta_rx_csum()
1855 /* Return tx queue pointer (find last set bit) according to <cause> returned
1862 int queue = fls(cause) - 1; in mvneta_tx_done_policy()
1864 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1867 /* Free tx queue skbuffs */
1881 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; in mvneta_txq_bufs_free()
1882 struct mvneta_tx_desc *tx_desc = txq->descs + in mvneta_txq_bufs_free()
1883 txq->txq_get_index; in mvneta_txq_bufs_free()
1887 if (buf->type == MVNETA_TYPE_XDP_NDO || in mvneta_txq_bufs_free()
1888 buf->type == MVNETA_TYPE_SKB) in mvneta_txq_bufs_free()
1889 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1890 tx_desc->buf_phys_addr, in mvneta_txq_bufs_free()
1891 tx_desc->data_size, DMA_TO_DEVICE); in mvneta_txq_bufs_free()
1892 if ((buf->type == MVNETA_TYPE_TSO || in mvneta_txq_bufs_free()
1893 buf->type == MVNETA_TYPE_SKB) && buf->skb) { in mvneta_txq_bufs_free()
1894 bytes_compl += buf->skb->len; in mvneta_txq_bufs_free()
1896 dev_kfree_skb_any(buf->skb); in mvneta_txq_bufs_free()
1897 } else if ((buf->type == MVNETA_TYPE_XDP_TX || in mvneta_txq_bufs_free()
1898 buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { in mvneta_txq_bufs_free()
1899 if (napi && buf->type == MVNETA_TYPE_XDP_TX) in mvneta_txq_bufs_free()
1900 xdp_return_frame_rx_napi(buf->xdpf); in mvneta_txq_bufs_free()
1902 xdp_return_frame_bulk(buf->xdpf, &bq); in mvneta_txq_bufs_free()
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1925 txq->count -= tx_done; in mvneta_txq_done()
1928 if (txq->count <= txq->tx_wake_threshold) in mvneta_txq_done()
1943 page = page_pool_alloc_pages(rxq->page_pool, in mvneta_rx_refill()
1946 return -ENOMEM; in mvneta_rx_refill()
1948 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; in mvneta_rx_refill()
1954 /* Handle tx checksum */
1957 if (skb->ip_summed == CHECKSUM_PARTIAL) { in mvneta_skb_tx_csum()
1966 ip_hdr_len = ip4h->ihl; in mvneta_skb_tx_csum()
1967 l4_proto = ip4h->protocol; in mvneta_skb_tx_csum()
1974 l4_proto = ip6h->nexthdr; in mvneta_skb_tx_csum()
1995 if (pp->bm_priv) { in mvneta_rxq_drop_pkts()
2002 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rxq_drop_pkts()
2004 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rxq_drop_pkts()
2005 rx_desc->buf_phys_addr); in mvneta_rxq_drop_pkts()
2010 for (i = 0; i < rxq->size; i++) { in mvneta_rxq_drop_pkts()
2011 struct mvneta_rx_desc *rx_desc = rxq->descs + i; in mvneta_rxq_drop_pkts()
2012 void *data = rxq->buf_virt_addr[i]; in mvneta_rxq_drop_pkts()
2013 if (!data || !(rx_desc->buf_phys_addr)) in mvneta_rxq_drop_pkts()
2016 page_pool_put_full_page(rxq->page_pool, data, false); in mvneta_rxq_drop_pkts()
2018 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) in mvneta_rxq_drop_pkts()
2019 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_rxq_drop_pkts()
2020 page_pool_destroy(rxq->page_pool); in mvneta_rxq_drop_pkts()
2021 rxq->page_pool = NULL; in mvneta_rxq_drop_pkts()
2028 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_update_stats()
2030 u64_stats_update_begin(&stats->syncp); in mvneta_update_stats()
2031 stats->es.ps.rx_packets += ps->rx_packets; in mvneta_update_stats()
2032 stats->es.ps.rx_bytes += ps->rx_bytes; in mvneta_update_stats()
2034 stats->es.ps.xdp_redirect += ps->xdp_redirect; in mvneta_update_stats()
2035 stats->es.ps.xdp_pass += ps->xdp_pass; in mvneta_update_stats()
2036 stats->es.ps.xdp_drop += ps->xdp_drop; in mvneta_update_stats()
2037 u64_stats_update_end(&stats->syncp); in mvneta_update_stats()
2044 int curr_desc = rxq->first_to_refill; in mvneta_rx_refill_queue()
2047 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { in mvneta_rx_refill_queue()
2048 rx_desc = rxq->descs + curr_desc; in mvneta_rx_refill_queue()
2049 if (!(rx_desc->buf_phys_addr)) { in mvneta_rx_refill_queue()
2054 rxq->id, i, rxq->refill_num); in mvneta_rx_refill_queue()
2056 stats = this_cpu_ptr(pp->stats); in mvneta_rx_refill_queue()
2057 u64_stats_update_begin(&stats->syncp); in mvneta_rx_refill_queue()
2058 stats->es.refill_error++; in mvneta_rx_refill_queue()
2059 u64_stats_update_end(&stats->syncp); in mvneta_rx_refill_queue()
2065 rxq->refill_num -= i; in mvneta_rx_refill_queue()
2066 rxq->first_to_refill = curr_desc; in mvneta_rx_refill_queue()
2081 for (i = 0; i < sinfo->nr_frags; i++) in mvneta_xdp_put_buff()
2082 page_pool_put_full_page(rxq->page_pool, in mvneta_xdp_put_buff()
2083 skb_frag_page(&sinfo->frags[i]), true); in mvneta_xdp_put_buff()
2086 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), in mvneta_xdp_put_buff()
2095 struct device *dev = pp->dev->dev.parent; in mvneta_xdp_submit_frame()
2101 num_frames += sinfo->nr_frags; in mvneta_xdp_submit_frame()
2103 if (txq->count + num_frames >= txq->size) in mvneta_xdp_submit_frame()
2107 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_xdp_submit_frame()
2109 int len = xdpf->len; in mvneta_xdp_submit_frame()
2113 frag = &sinfo->frags[i - 1]; in mvneta_xdp_submit_frame()
2123 : xdpf->data; in mvneta_xdp_submit_frame()
2131 buf->type = MVNETA_TYPE_XDP_NDO; in mvneta_xdp_submit_frame()
2134 : virt_to_page(xdpf->data); in mvneta_xdp_submit_frame()
2139 dma_addr += sizeof(*xdpf) + xdpf->headroom; in mvneta_xdp_submit_frame()
2142 buf->type = MVNETA_TYPE_XDP_TX; in mvneta_xdp_submit_frame()
2144 buf->xdpf = unlikely(i) ? NULL : xdpf; in mvneta_xdp_submit_frame()
2146 tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; in mvneta_xdp_submit_frame()
2147 tx_desc->buf_phys_addr = dma_addr; in mvneta_xdp_submit_frame()
2148 tx_desc->data_size = len; in mvneta_xdp_submit_frame()
2154 tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; in mvneta_xdp_submit_frame()
2156 txq->pending += num_frames; in mvneta_xdp_submit_frame()
2157 txq->count += num_frames; in mvneta_xdp_submit_frame()
2162 for (i--; i >= 0; i--) { in mvneta_xdp_submit_frame()
2164 tx_desc = txq->descs + txq->next_desc_to_proc; in mvneta_xdp_submit_frame()
2165 dma_unmap_single(dev, tx_desc->buf_phys_addr, in mvneta_xdp_submit_frame()
2166 tx_desc->data_size, in mvneta_xdp_submit_frame()
2176 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit_back()
2188 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit_back()
2189 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2194 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit_back()
2195 stats->es.ps.tx_bytes += nxmit_byte; in mvneta_xdp_xmit_back()
2196 stats->es.ps.tx_packets++; in mvneta_xdp_xmit_back()
2197 stats->es.ps.xdp_tx++; in mvneta_xdp_xmit_back()
2198 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit_back()
2202 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit_back()
2203 stats->es.ps.xdp_tx_err++; in mvneta_xdp_xmit_back()
2204 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit_back()
2216 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_xdp_xmit()
2223 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) in mvneta_xdp_xmit()
2224 return -ENETDOWN; in mvneta_xdp_xmit()
2227 return -EINVAL; in mvneta_xdp_xmit()
2229 txq = &pp->txqs[cpu % txq_number]; in mvneta_xdp_xmit()
2230 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit()
2246 u64_stats_update_begin(&stats->syncp); in mvneta_xdp_xmit()
2247 stats->es.ps.tx_bytes += nxmit_byte; in mvneta_xdp_xmit()
2248 stats->es.ps.tx_packets += nxmit; in mvneta_xdp_xmit()
2249 stats->es.ps.xdp_xmit += nxmit; in mvneta_xdp_xmit()
2250 stats->es.ps.xdp_xmit_err += num_frame - nxmit; in mvneta_xdp_xmit()
2251 u64_stats_update_end(&stats->syncp); in mvneta_xdp_xmit()
2264 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2265 data_len = xdp->data_end - xdp->data; in mvneta_run_xdp()
2269 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2274 stats->xdp_pass++; in mvneta_run_xdp()
2279 err = xdp_do_redirect(pp->dev, xdp, prog); in mvneta_run_xdp()
2285 stats->xdp_redirect++; in mvneta_run_xdp()
2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act); in mvneta_run_xdp()
2298 trace_xdp_exception(pp->dev, prog, act); in mvneta_run_xdp()
2303 stats->xdp_drop++; in mvneta_run_xdp()
2307 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; in mvneta_run_xdp()
2308 stats->rx_packets++; in mvneta_run_xdp()
2321 int data_len = -MVNETA_MH_SIZE, len; in mvneta_swbm_rx_frame()
2322 struct net_device *dev = pp->dev; in mvneta_swbm_rx_frame()
2330 data_len += len - ETH_FCS_LEN; in mvneta_swbm_rx_frame()
2332 *size = *size - len; in mvneta_swbm_rx_frame()
2334 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_rx_frame()
2335 dma_sync_single_for_cpu(dev->dev.parent, in mvneta_swbm_rx_frame()
2336 rx_desc->buf_phys_addr, in mvneta_swbm_rx_frame()
2339 rx_desc->buf_phys_addr = 0; in mvneta_swbm_rx_frame()
2344 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, in mvneta_swbm_rx_frame()
2356 struct net_device *dev = pp->dev; in mvneta_swbm_add_rx_fragment()
2365 data_len = len - ETH_FCS_LEN; in mvneta_swbm_add_rx_fragment()
2367 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_add_rx_fragment()
2368 dma_sync_single_for_cpu(dev->dev.parent, in mvneta_swbm_add_rx_fragment()
2369 rx_desc->buf_phys_addr, in mvneta_swbm_add_rx_fragment()
2371 rx_desc->buf_phys_addr = 0; in mvneta_swbm_add_rx_fragment()
2374 sinfo->nr_frags = 0; in mvneta_swbm_add_rx_fragment()
2376 if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { in mvneta_swbm_add_rx_fragment()
2377 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; in mvneta_swbm_add_rx_fragment()
2380 pp->rx_offset_correction, data_len); in mvneta_swbm_add_rx_fragment()
2383 sinfo->xdp_frags_size = *size; in mvneta_swbm_add_rx_fragment()
2389 page_pool_put_full_page(rxq->page_pool, page, true); in mvneta_swbm_add_rx_fragment()
2391 *size -= len; in mvneta_swbm_add_rx_fragment()
2399 u32 metasize = xdp->data - xdp->data_meta; in mvneta_swbm_build_skb()
2404 num_frags = sinfo->nr_frags; in mvneta_swbm_build_skb()
2406 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); in mvneta_swbm_build_skb()
2408 return ERR_PTR(-ENOMEM); in mvneta_swbm_build_skb()
2412 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mvneta_swbm_build_skb()
2413 skb_put(skb, xdp->data_end - xdp->data); in mvneta_swbm_build_skb()
2416 skb->ip_summed = mvneta_rx_csum(pp, desc_status); in mvneta_swbm_build_skb()
2420 sinfo->xdp_frags_size, in mvneta_swbm_build_skb()
2421 num_frags * xdp->frame_sz, in mvneta_swbm_build_skb()
2433 struct net_device *dev = pp->dev; in mvneta_rx_swbm()
2439 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); in mvneta_rx_swbm()
2445 xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_rx_swbm()
2454 index = rx_desc - rxq->descs; in mvneta_rx_swbm()
2455 page = (struct page *)rxq->buf_virt_addr[index]; in mvneta_rx_swbm()
2457 rx_status = rx_desc->status; in mvneta_rx_swbm()
2459 rxq->refill_num++; in mvneta_rx_swbm()
2468 size = rx_desc->data_size; in mvneta_rx_swbm()
2469 frame_sz = size - ETH_FCS_LEN; in mvneta_rx_swbm()
2476 rx_desc->buf_phys_addr = 0; in mvneta_rx_swbm()
2477 page_pool_put_full_page(rxq->page_pool, page, in mvneta_rx_swbm()
2491 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2499 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); in mvneta_rx_swbm()
2501 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_swbm()
2503 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2505 u64_stats_update_begin(&stats->syncp); in mvneta_rx_swbm()
2506 stats->es.skb_alloc_error++; in mvneta_rx_swbm()
2507 stats->rx_dropped++; in mvneta_rx_swbm()
2508 u64_stats_update_end(&stats->syncp); in mvneta_rx_swbm()
2513 ps.rx_bytes += skb->len; in mvneta_rx_swbm()
2516 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_swbm()
2523 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); in mvneta_rx_swbm()
2545 struct net_device *dev = pp->dev; in mvneta_rx_hwbm()
2570 rx_status = rx_desc->status; in mvneta_rx_hwbm()
2571 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); in mvneta_rx_hwbm()
2572 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; in mvneta_rx_hwbm()
2573 phys_addr = rx_desc->buf_phys_addr; in mvneta_rx_hwbm()
2575 bm_pool = &pp->bm_priv->bm_pools[pool_id]; in mvneta_rx_hwbm()
2581 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2582 rx_desc->buf_phys_addr); in mvneta_rx_hwbm()
2595 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, in mvneta_rx_hwbm()
2596 rx_desc->buf_phys_addr, in mvneta_rx_hwbm()
2603 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_hwbm()
2604 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2611 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, in mvneta_rx_hwbm()
2612 rx_desc->buf_phys_addr); in mvneta_rx_hwbm()
2619 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); in mvneta_rx_hwbm()
2623 netdev_err(dev, "Linux processing - Can't refill\n"); in mvneta_rx_hwbm()
2625 stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2626 u64_stats_update_begin(&stats->syncp); in mvneta_rx_hwbm()
2627 stats->es.refill_error++; in mvneta_rx_hwbm()
2628 u64_stats_update_end(&stats->syncp); in mvneta_rx_hwbm()
2633 frag_size = bm_pool->hwbm_pool.frag_size; in mvneta_rx_hwbm()
2640 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, in mvneta_rx_hwbm()
2641 bm_pool->buf_size, DMA_FROM_DEVICE); in mvneta_rx_hwbm()
2652 skb->protocol = eth_type_trans(skb, dev); in mvneta_rx_hwbm()
2653 skb->ip_summed = mvneta_rx_csum(pp, rx_status); in mvneta_rx_hwbm()
2659 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx_hwbm()
2661 u64_stats_update_begin(&stats->syncp); in mvneta_rx_hwbm()
2662 stats->es.ps.rx_packets += rcvd_pkts; in mvneta_rx_hwbm()
2663 stats->es.ps.rx_bytes += rcvd_bytes; in mvneta_rx_hwbm()
2664 u64_stats_update_end(&stats->syncp); in mvneta_rx_hwbm()
2676 struct device *dev = pp->dev->dev.parent; in mvneta_free_tso_hdrs()
2680 if (txq->tso_hdrs[i]) { in mvneta_free_tso_hdrs()
2682 txq->tso_hdrs[i], in mvneta_free_tso_hdrs()
2683 txq->tso_hdrs_phys[i]); in mvneta_free_tso_hdrs()
2684 txq->tso_hdrs[i] = NULL; in mvneta_free_tso_hdrs()
2692 struct device *dev = pp->dev->dev.parent; in mvneta_alloc_tso_hdrs()
2695 num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE); in mvneta_alloc_tso_hdrs()
2697 txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE, in mvneta_alloc_tso_hdrs()
2698 &txq->tso_hdrs_phys[i], in mvneta_alloc_tso_hdrs()
2700 if (!txq->tso_hdrs[i]) { in mvneta_alloc_tso_hdrs()
2702 return -ENOMEM; in mvneta_alloc_tso_hdrs()
2713 index = txq->txq_put_index / MVNETA_TSO_PER_PAGE; in mvneta_get_tso_hdr()
2714 offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE; in mvneta_get_tso_hdr()
2716 *dma = txq->tso_hdrs_phys[index] + offset; in mvneta_get_tso_hdr()
2718 return txq->tso_hdrs[index] + offset; in mvneta_get_tso_hdr()
2724 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tso_put_hdr()
2734 tx_desc->data_size = hdr_len; in mvneta_tso_put_hdr()
2735 tx_desc->command = mvneta_skb_tx_csum(skb); in mvneta_tso_put_hdr()
2736 tx_desc->command |= MVNETA_TXD_F_DESC; in mvneta_tso_put_hdr()
2737 tx_desc->buf_phys_addr = hdr_phys; in mvneta_tso_put_hdr()
2738 buf->type = MVNETA_TYPE_TSO; in mvneta_tso_put_hdr()
2739 buf->skb = NULL; in mvneta_tso_put_hdr()
2749 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tso_put_data()
2753 tx_desc->data_size = size; in mvneta_tso_put_data()
2754 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, in mvneta_tso_put_data()
2756 if (unlikely(dma_mapping_error(dev->dev.parent, in mvneta_tso_put_data()
2757 tx_desc->buf_phys_addr))) { in mvneta_tso_put_data()
2759 return -ENOMEM; in mvneta_tso_put_data()
2762 tx_desc->command = 0; in mvneta_tso_put_data()
2763 buf->type = MVNETA_TYPE_SKB; in mvneta_tso_put_data()
2764 buf->skb = NULL; in mvneta_tso_put_data()
2768 tx_desc->command = MVNETA_TXD_L_DESC; in mvneta_tso_put_data()
2772 buf->skb = skb; in mvneta_tso_put_data()
2785 if (desc_idx >= txq->size) in mvneta_release_descs()
2786 desc_idx -= txq->size; in mvneta_release_descs()
2788 for (i = num; i >= 0; i--) { in mvneta_release_descs()
2789 struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx; in mvneta_release_descs()
2790 struct mvneta_tx_buf *buf = &txq->buf[desc_idx]; in mvneta_release_descs()
2792 if (buf->type == MVNETA_TYPE_SKB) in mvneta_release_descs()
2793 dma_unmap_single(pp->dev->dev.parent, in mvneta_release_descs()
2794 tx_desc->buf_phys_addr, in mvneta_release_descs()
2795 tx_desc->data_size, in mvneta_release_descs()
2801 desc_idx = txq->size; in mvneta_release_descs()
2802 desc_idx -= 1; in mvneta_release_descs()
2815 if ((txq->count + tso_count_descs(skb)) >= txq->size) in mvneta_tx_tso()
2823 first_desc = txq->txq_put_index; in mvneta_tx_tso()
2828 total_len = skb->len - hdr_len; in mvneta_tx_tso()
2830 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); in mvneta_tx_tso()
2831 total_len -= data_left; in mvneta_tx_tso()
2848 data_left -= size; in mvneta_tx_tso()
2858 * be DMA-unmapped. in mvneta_tx_tso()
2860 mvneta_release_descs(pp, txq, first_desc, desc_count - 1); in mvneta_tx_tso()
2864 /* Handle tx fragmentation processing */
2869 int i, nr_frags = skb_shinfo(skb)->nr_frags; in mvneta_tx_frag_process()
2870 int first_desc = txq->txq_put_index; in mvneta_tx_frag_process()
2873 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tx_frag_process()
2874 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mvneta_tx_frag_process()
2878 tx_desc->data_size = skb_frag_size(frag); in mvneta_tx_frag_process()
2880 tx_desc->buf_phys_addr = in mvneta_tx_frag_process()
2881 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
2882 tx_desc->data_size, DMA_TO_DEVICE); in mvneta_tx_frag_process()
2884 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
2885 tx_desc->buf_phys_addr)) { in mvneta_tx_frag_process()
2890 if (i == nr_frags - 1) { in mvneta_tx_frag_process()
2892 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; in mvneta_tx_frag_process()
2893 buf->skb = skb; in mvneta_tx_frag_process()
2896 tx_desc->command = 0; in mvneta_tx_frag_process()
2897 buf->skb = NULL; in mvneta_tx_frag_process()
2899 buf->type = MVNETA_TYPE_SKB; in mvneta_tx_frag_process()
2909 mvneta_release_descs(pp, txq, first_desc, i - 1); in mvneta_tx_frag_process()
2910 return -ENOMEM; in mvneta_tx_frag_process()
2913 /* Main tx processing */
2918 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
2919 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; in mvneta_tx()
2921 int len = skb->len; in mvneta_tx()
2933 frags = skb_shinfo(skb)->nr_frags + 1; in mvneta_tx()
2940 tx_desc->data_size = skb_headlen(skb); in mvneta_tx()
2942 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, in mvneta_tx()
2943 tx_desc->data_size, in mvneta_tx()
2945 if (unlikely(dma_mapping_error(dev->dev.parent, in mvneta_tx()
2946 tx_desc->buf_phys_addr))) { in mvneta_tx()
2952 buf->type = MVNETA_TYPE_SKB; in mvneta_tx()
2956 tx_desc->command = tx_cmd; in mvneta_tx()
2957 buf->skb = skb; in mvneta_tx()
2962 buf->skb = NULL; in mvneta_tx()
2964 tx_desc->command = tx_cmd; in mvneta_tx()
2967 dma_unmap_single(dev->dev.parent, in mvneta_tx()
2968 tx_desc->buf_phys_addr, in mvneta_tx()
2969 tx_desc->data_size, in mvneta_tx()
2980 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
2984 txq->count += frags; in mvneta_tx()
2985 if (txq->count >= txq->tx_stop_threshold) in mvneta_tx()
2989 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) in mvneta_tx()
2992 txq->pending += frags; in mvneta_tx()
2994 u64_stats_update_begin(&stats->syncp); in mvneta_tx()
2995 stats->es.ps.tx_bytes += len; in mvneta_tx()
2996 stats->es.ps.tx_packets++; in mvneta_tx()
2997 u64_stats_update_end(&stats->syncp); in mvneta_tx()
2999 dev->stats.tx_dropped++; in mvneta_tx()
3007 /* Free tx resources, when resetting a port */
3012 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done_force()
3013 int tx_done = txq->count; in mvneta_txq_done_force()
3018 txq->count = 0; in mvneta_txq_done_force()
3019 txq->txq_put_index = 0; in mvneta_txq_done_force()
3020 txq->txq_get_index = 0; in mvneta_txq_done_force()
3023 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
3035 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
3038 if (txq->count) in mvneta_tx_done_gbe()
3042 cause_tx_done &= ~((1 << txq->id)); in mvneta_tx_done_gbe()
3058 for (j = 7; j >= 0; j--) { in mvneta_addr_crc()
3069 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3071 * Table entries in the DA-Filter table. This method set the Special
3090 if (queue == -1) in mvneta_set_special_mcast_addr()
3103 * A CRC-8 is used as an index to the Other Multicast Table entries
3104 * in the DA-Filter table.
3105 * The method gets the CRC-8 value from the calling routine and
3107 * specified CRC-8 .
3122 if (queue == -1) { in mvneta_set_other_mcast_addr()
3135 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3137 * Table entries in the DA-Filter table.
3138 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
3140 * DA-Filter table.
3153 if (queue == -1) { in mvneta_mcast_addr_set()
3154 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
3155 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
3157 return -EINVAL; in mvneta_mcast_addr_set()
3160 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
3161 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
3162 netdev_info(pp->dev, in mvneta_mcast_addr_set()
3164 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
3165 return -EINVAL; in mvneta_mcast_addr_set()
3168 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
3208 if (dev->flags & IFF_PROMISC) { in mvneta_set_rx_mode()
3211 mvneta_set_ucast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3212 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3213 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3217 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
3218 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); in mvneta_set_rx_mode()
3220 if (dev->flags & IFF_ALLMULTI) { in mvneta_set_rx_mode()
3222 mvneta_set_special_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3223 mvneta_set_other_mcast_table(pp, pp->rxq_def); in mvneta_set_rx_mode()
3226 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
3227 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
3231 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
3232 pp->rxq_def); in mvneta_set_rx_mode()
3239 /* Interrupt handling - the callback for request_irq() */
3245 napi_schedule(&pp->napi); in mvneta_isr()
3250 /* Interrupt handling - the callback for request_percpu_irq() */
3255 disable_percpu_irq(port->pp->dev->irq); in mvneta_percpu_isr()
3256 napi_schedule(&port->napi); in mvneta_percpu_isr()
3265 phylink_pcs_change(&pp->phylink_pcs, in mvneta_link_change()
3270 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3271 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3272 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3281 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll()
3282 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
3284 if (!netif_running(pp->dev)) { in mvneta_poll()
3301 /* Release Tx descriptors */ in mvneta_poll()
3310 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : in mvneta_poll()
3311 port->cause_rx_tx; in mvneta_poll()
3315 rx_queue = rx_queue - 1; in mvneta_poll()
3316 if (pp->bm_priv) in mvneta_poll()
3318 &pp->rxqs[rx_queue]); in mvneta_poll()
3321 &pp->rxqs[rx_queue]); in mvneta_poll()
3328 if (pp->neta_armada3700) { in mvneta_poll()
3338 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
3342 if (pp->neta_armada3700) in mvneta_poll()
3343 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3345 port->cause_rx_tx = cause_rx_tx; in mvneta_poll()
3353 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); in mvneta_create_page_pool()
3359 .dev = pp->dev->dev.parent, in mvneta_create_page_pool()
3361 .offset = pp->rx_offset_correction, in mvneta_create_page_pool()
3366 rxq->page_pool = page_pool_create(&pp_params); in mvneta_create_page_pool()
3367 if (IS_ERR(rxq->page_pool)) { in mvneta_create_page_pool()
3368 err = PTR_ERR(rxq->page_pool); in mvneta_create_page_pool()
3369 rxq->page_pool = NULL; in mvneta_create_page_pool()
3373 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, in mvneta_create_page_pool()
3378 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mvneta_create_page_pool()
3379 rxq->page_pool); in mvneta_create_page_pool()
3386 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mvneta_create_page_pool()
3388 page_pool_destroy(rxq->page_pool); in mvneta_create_page_pool()
3389 rxq->page_pool = NULL; in mvneta_create_page_pool()
3404 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); in mvneta_rxq_fill()
3405 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, in mvneta_rxq_fill()
3407 netdev_err(pp->dev, in mvneta_rxq_fill()
3409 __func__, rxq->id, i, num); in mvneta_rxq_fill()
3422 /* Free all packets pending transmit from all TXQs and reset TX port */
3427 /* free the skb's in the tx ring */ in mvneta_tx_reset()
3429 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
3441 /* Rx/Tx queue initialization/cleanup methods */
3446 rxq->size = pp->rx_ring_size; in mvneta_rxq_sw_init()
3449 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_sw_init()
3450 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_sw_init()
3451 &rxq->descs_phys, GFP_KERNEL); in mvneta_rxq_sw_init()
3452 if (!rxq->descs) in mvneta_rxq_sw_init()
3453 return -ENOMEM; in mvneta_rxq_sw_init()
3455 rxq->last_desc = rxq->size - 1; in mvneta_rxq_sw_init()
3464 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_hw_init()
3465 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_hw_init()
3468 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_hw_init()
3469 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_hw_init()
3471 if (!pp->bm_priv) { in mvneta_rxq_hw_init()
3476 MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_hw_init()
3478 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3482 NET_SKB_PAD - pp->rx_offset_correction); in mvneta_rxq_hw_init()
3488 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); in mvneta_rxq_hw_init()
3514 if (rxq->descs) in mvneta_rxq_deinit()
3515 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
3516 rxq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_rxq_deinit()
3517 rxq->descs, in mvneta_rxq_deinit()
3518 rxq->descs_phys); in mvneta_rxq_deinit()
3520 rxq->descs = NULL; in mvneta_rxq_deinit()
3521 rxq->last_desc = 0; in mvneta_rxq_deinit()
3522 rxq->next_desc_to_proc = 0; in mvneta_rxq_deinit()
3523 rxq->descs_phys = 0; in mvneta_rxq_deinit()
3524 rxq->first_to_refill = 0; in mvneta_rxq_deinit()
3525 rxq->refill_num = 0; in mvneta_rxq_deinit()
3533 txq->size = pp->tx_ring_size; in mvneta_txq_sw_init()
3539 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; in mvneta_txq_sw_init()
3540 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in mvneta_txq_sw_init()
3542 /* Allocate memory for TX descriptors */ in mvneta_txq_sw_init()
3543 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_sw_init()
3544 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_sw_init()
3545 &txq->descs_phys, GFP_KERNEL); in mvneta_txq_sw_init()
3546 if (!txq->descs) in mvneta_txq_sw_init()
3547 return -ENOMEM; in mvneta_txq_sw_init()
3549 txq->last_desc = txq->size - 1; in mvneta_txq_sw_init()
3551 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); in mvneta_txq_sw_init()
3552 if (!txq->buf) in mvneta_txq_sw_init()
3553 return -ENOMEM; in mvneta_txq_sw_init()
3561 if (pp->neta_armada3700) in mvneta_txq_sw_init()
3564 cpu = txq->id % num_present_cpus(); in mvneta_txq_sw_init()
3566 cpu = pp->rxq_def % num_present_cpus(); in mvneta_txq_sw_init()
3567 cpumask_set_cpu(cpu, &txq->affinity_mask); in mvneta_txq_sw_init()
3568 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); in mvneta_txq_sw_init()
3577 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_hw_init()
3578 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_hw_init()
3580 /* Set Tx descriptors queue starting address */ in mvneta_txq_hw_init()
3581 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_hw_init()
3582 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_hw_init()
3584 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_hw_init()
3587 /* Create and initialize a tx queue */
3606 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_sw_deinit()
3608 kfree(txq->buf); in mvneta_txq_sw_deinit()
3611 if (txq->descs) in mvneta_txq_sw_deinit()
3612 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_sw_deinit()
3613 txq->size * MVNETA_DESC_ALIGNED_SIZE, in mvneta_txq_sw_deinit()
3614 txq->descs, txq->descs_phys); in mvneta_txq_sw_deinit()
3618 txq->buf = NULL; in mvneta_txq_sw_deinit()
3619 txq->descs = NULL; in mvneta_txq_sw_deinit()
3620 txq->last_desc = 0; in mvneta_txq_sw_deinit()
3621 txq->next_desc_to_proc = 0; in mvneta_txq_sw_deinit()
3622 txq->descs_phys = 0; in mvneta_txq_sw_deinit()
3629 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3630 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3632 /* Set Tx descriptors queue starting address and size */ in mvneta_txq_hw_deinit()
3633 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3634 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_hw_deinit()
3644 /* Cleanup all Tx queues */
3650 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
3659 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
3669 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
3672 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
3682 /* Init all tx queues */
3688 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
3690 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
3704 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); in mvneta_comphy_init()
3708 return phy_power_on(pp->comphy); in mvneta_comphy_init()
3716 if (pp->comphy) { in mvneta_config_interface()
3744 pp->phy_interface = interface; in mvneta_config_interface()
3753 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); in mvneta_start_dev()
3755 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3756 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
3758 /* start the Rx/Tx activity */ in mvneta_start_dev()
3761 if (!pp->neta_armada3700) { in mvneta_start_dev()
3765 per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
3767 napi_enable(&port->napi); in mvneta_start_dev()
3770 napi_enable(&pp->napi); in mvneta_start_dev()
3780 phylink_start(pp->phylink); in mvneta_start_dev()
3783 phylink_speed_up(pp->phylink); in mvneta_start_dev()
3785 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
3787 clear_bit(__MVNETA_DOWN, &pp->state); in mvneta_start_dev()
3794 set_bit(__MVNETA_DOWN, &pp->state); in mvneta_stop_dev()
3796 if (device_may_wakeup(&pp->dev->dev)) in mvneta_stop_dev()
3797 phylink_speed_down(pp->phylink, false); in mvneta_stop_dev()
3799 phylink_stop(pp->phylink); in mvneta_stop_dev()
3801 if (!pp->neta_armada3700) { in mvneta_stop_dev()
3804 per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
3806 napi_disable(&port->napi); in mvneta_stop_dev()
3809 napi_disable(&pp->napi); in mvneta_stop_dev()
3812 netif_carrier_off(pp->dev); in mvneta_stop_dev()
3815 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
3829 WARN_ON(phy_power_off(pp->comphy)); in mvneta_stop_dev()
3836 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
3843 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
3850 struct bpf_prog *prog = pp->xdp_prog; in mvneta_change_mtu()
3859 if (prog && !prog->aux->xdp_has_frags && in mvneta_change_mtu()
3864 return -EINVAL; in mvneta_change_mtu()
3867 WRITE_ONCE(dev->mtu, mtu); in mvneta_change_mtu()
3870 if (pp->bm_priv) in mvneta_change_mtu()
3886 if (pp->bm_priv) in mvneta_change_mtu()
3889 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
3916 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
3920 pp->tx_csum_limit); in mvneta_fix_features()
3952 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
3955 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); in mvneta_set_mac_addr()
3970 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... in mvneta_pcs_inband_caps()
3971 * When <PortType> = 1 (1000BASE-X) this field must be set to 1." in mvneta_pcs_inband_caps()
3998 state->speed = in mvneta_pcs_get_state()
3999 state->interface == PHY_INTERFACE_MODE_2500BASEX ? in mvneta_pcs_get_state()
4002 state->speed = SPEED_100; in mvneta_pcs_get_state()
4004 state->speed = SPEED_10; in mvneta_pcs_get_state()
4006 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); in mvneta_pcs_get_state()
4007 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); in mvneta_pcs_get_state()
4008 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); in mvneta_pcs_get_state()
4011 state->pause |= MLO_PAUSE_RX; in mvneta_pcs_get_state()
4013 state->pause |= MLO_PAUSE_TX; in mvneta_pcs_get_state()
4058 /* Phy or fixed speed - disable in-band AN modes */ in mvneta_pcs_config()
4093 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_select_pcs()
4096 return &pp->phylink_pcs; in mvneta_mac_select_pcs()
4102 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_prepare()
4106 if (pp->phy_interface != interface || in mvneta_mac_prepare()
4109 * in-band mode. According to Armada 370 documentation, we in mvneta_mac_prepare()
4110 * can only change the port mode and in-band enable when the in mvneta_mac_prepare()
4119 if (pp->phy_interface != interface) in mvneta_mac_prepare()
4120 WARN_ON(phy_power_off(pp->comphy)); in mvneta_mac_prepare()
4124 unsigned long rate = clk_get_rate(pp->clk); in mvneta_mac_prepare()
4136 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_config()
4152 if (state->interface == PHY_INTERFACE_MODE_QSGMII || in mvneta_mac_config()
4153 state->interface == PHY_INTERFACE_MODE_SGMII || in mvneta_mac_config()
4154 phy_interface_mode_is_8023z(state->interface)) in mvneta_mac_config()
4158 /* Phy or fixed speed - nothing to do, leave the in mvneta_mac_config()
4159 * configured speed, duplex and flow control as-is. in mvneta_mac_config()
4161 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { in mvneta_mac_config()
4165 /* 802.3z negotiation - only 1000base-X */ in mvneta_mac_config()
4172 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) in mvneta_mac_config()
4192 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_finish()
4196 /* Disable 1ms clock if not in in-band mode */ in mvneta_mac_finish()
4203 if (pp->phy_interface != interface) in mvneta_mac_finish()
4207 /* Allow the link to come up if in in-band mode, otherwise the in mvneta_mac_finish()
4222 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_link_down()
4242 struct net_device *ndev = to_net_dev(config->dev); in mvneta_mac_link_up()
4286 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); in mvneta_mac_disable_tx_lpi()
4299 struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); in mvneta_mac_enable_tx_lpi()
4310 /* At 100M speeds, the timer resolutions are 10us, and in mvneta_mac_enable_tx_lpi()
4348 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); in mvneta_mdio_probe()
4351 netdev_err(pp->dev, "could not attach PHY: %d\n", err); in mvneta_mdio_probe()
4353 phylink_ethtool_get_wol(pp->phylink, &wol); in mvneta_mdio_probe()
4354 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); in mvneta_mdio_probe()
4358 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); in mvneta_mdio_probe()
4365 phylink_disconnect_phy(pp->phylink); in mvneta_mdio_remove()
4379 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) in mvneta_percpu_elect()
4380 elected_cpu = pp->rxq_def; in mvneta_percpu_elect()
4394 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); in mvneta_percpu_elect()
4396 /* We update the TX queue map only if we have one in mvneta_percpu_elect()
4397 * queue. In this case we associate the TX queue to in mvneta_percpu_elect()
4422 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_online()
4424 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts in mvneta_cpu_online()
4425 * are routed to CPU 0, so we don't need all the cpu-hotplug support in mvneta_cpu_online()
4427 if (pp->neta_armada3700) in mvneta_cpu_online()
4430 netdev_lock(port->napi.dev); in mvneta_cpu_online()
4431 spin_lock(&pp->lock); in mvneta_cpu_online()
4436 if (pp->is_stopped) { in mvneta_cpu_online()
4437 spin_unlock(&pp->lock); in mvneta_cpu_online()
4438 netdev_unlock(port->napi.dev); in mvneta_cpu_online()
4441 netif_tx_stop_all_queues(pp->dev); in mvneta_cpu_online()
4450 per_cpu_ptr(pp->ports, other_cpu); in mvneta_cpu_online()
4452 napi_synchronize(&other_port->napi); in mvneta_cpu_online()
4458 napi_enable_locked(&port->napi); in mvneta_cpu_online()
4461 * Enable per-CPU interrupts on the CPU that is in mvneta_cpu_online()
4467 * Enable per-CPU interrupt on the one CPU we care in mvneta_cpu_online()
4477 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_online()
4478 spin_unlock(&pp->lock); in mvneta_cpu_online()
4479 netdev_unlock(port->napi.dev); in mvneta_cpu_online()
4488 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_cpu_down_prepare()
4494 spin_lock(&pp->lock); in mvneta_cpu_down_prepare()
4497 spin_unlock(&pp->lock); in mvneta_cpu_down_prepare()
4499 napi_synchronize(&port->napi); in mvneta_cpu_down_prepare()
4500 napi_disable(&port->napi); in mvneta_cpu_down_prepare()
4501 /* Disable per-CPU interrupts on the CPU that is brought down. */ in mvneta_cpu_down_prepare()
4512 spin_lock(&pp->lock); in mvneta_cpu_dead()
4514 spin_unlock(&pp->lock); in mvneta_cpu_dead()
4520 netif_tx_start_all_queues(pp->dev); in mvneta_cpu_dead()
4529 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
4540 if (pp->neta_armada3700) in mvneta_open()
4541 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
4542 dev->name, pp); in mvneta_open()
4544 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, in mvneta_open()
4545 dev->name, pp->ports); in mvneta_open()
4547 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
4551 if (!pp->neta_armada3700) { in mvneta_open()
4552 /* Enable per-CPU interrupt on all the CPU to handle our RX in mvneta_open()
4557 pp->is_stopped = false; in mvneta_open()
4562 &pp->node_online); in mvneta_open()
4567 &pp->node_dead); in mvneta_open()
4583 if (!pp->neta_armada3700) in mvneta_open()
4585 &pp->node_dead); in mvneta_open()
4587 if (!pp->neta_armada3700) in mvneta_open()
4589 &pp->node_online); in mvneta_open()
4591 if (pp->neta_armada3700) { in mvneta_open()
4592 free_irq(pp->dev->irq, pp); in mvneta_open()
4595 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
4609 if (!pp->neta_armada3700) { in mvneta_stop()
4615 spin_lock(&pp->lock); in mvneta_stop()
4616 pp->is_stopped = true; in mvneta_stop()
4617 spin_unlock(&pp->lock); in mvneta_stop()
4623 &pp->node_online); in mvneta_stop()
4625 &pp->node_dead); in mvneta_stop()
4627 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
4631 free_irq(dev->irq, pp); in mvneta_stop()
4644 return phylink_mii_ioctl(pp->phylink, ifr, cmd); in mvneta_ioctl()
4654 if (prog && !prog->aux->xdp_has_frags && in mvneta_xdp_setup()
4655 dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { in mvneta_xdp_setup()
4657 return -EOPNOTSUPP; in mvneta_xdp_setup()
4660 if (pp->bm_priv) { in mvneta_xdp_setup()
4663 return -EOPNOTSUPP; in mvneta_xdp_setup()
4666 need_update = !!pp->xdp_prog != !!prog; in mvneta_xdp_setup()
4670 old_prog = xchg(&pp->xdp_prog, prog); in mvneta_xdp_setup()
4682 switch (xdp->command) { in mvneta_xdp()
4684 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); in mvneta_xdp()
4686 return -EINVAL; in mvneta_xdp()
4699 return phylink_ethtool_ksettings_set(pp->phylink, cmd); in mvneta_ethtool_set_link_ksettings()
4709 return phylink_ethtool_ksettings_get(pp->phylink, cmd); in mvneta_ethtool_get_link_ksettings()
4716 return phylink_ethtool_nway_reset(pp->phylink); in mvneta_ethtool_nway_reset()
4730 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
4731 rxq->time_coal = c->rx_coalesce_usecs; in mvneta_ethtool_set_coalesce()
4732 rxq->pkts_coal = c->rx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
4733 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
4734 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
4738 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
4739 txq->done_pkts_coal = c->tx_max_coalesced_frames; in mvneta_ethtool_set_coalesce()
4740 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
4755 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
4756 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
4758 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
4766 strscpy(drvinfo->driver, MVNETA_DRIVER_NAME, in mvneta_ethtool_get_drvinfo()
4767 sizeof(drvinfo->driver)); in mvneta_ethtool_get_drvinfo()
4768 strscpy(drvinfo->version, MVNETA_DRIVER_VERSION, in mvneta_ethtool_get_drvinfo()
4769 sizeof(drvinfo->version)); in mvneta_ethtool_get_drvinfo()
4770 strscpy(drvinfo->bus_info, dev_name(&dev->dev), in mvneta_ethtool_get_drvinfo()
4771 sizeof(drvinfo->bus_info)); in mvneta_ethtool_get_drvinfo()
4783 ring->rx_max_pending = MVNETA_MAX_RXD; in mvneta_ethtool_get_ringparam()
4784 ring->tx_max_pending = MVNETA_MAX_TXD; in mvneta_ethtool_get_ringparam()
4785 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
4786 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
4797 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) in mvneta_ethtool_set_ringparam()
4798 return -EINVAL; in mvneta_ethtool_set_ringparam()
4799 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
4800 ring->rx_pending : MVNETA_MAX_RXD; in mvneta_ethtool_set_ringparam()
4802 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
4804 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
4805 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", in mvneta_ethtool_set_ringparam()
4806 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
4813 return -ENOMEM; in mvneta_ethtool_set_ringparam()
4825 phylink_ethtool_get_pauseparam(pp->phylink, pause); in mvneta_ethtool_get_pauseparam()
4833 return phylink_ethtool_set_pauseparam(pp->phylink, pause); in mvneta_ethtool_set_pauseparam()
4846 if (!pp->bm_priv) { in mvneta_ethtool_get_strings()
4871 stats = per_cpu_ptr(pp->stats, cpu); in mvneta_ethtool_update_pcpu_stats()
4873 start = u64_stats_fetch_begin(&stats->syncp); in mvneta_ethtool_update_pcpu_stats()
4874 skb_alloc_error = stats->es.skb_alloc_error; in mvneta_ethtool_update_pcpu_stats()
4875 refill_error = stats->es.refill_error; in mvneta_ethtool_update_pcpu_stats()
4876 xdp_redirect = stats->es.ps.xdp_redirect; in mvneta_ethtool_update_pcpu_stats()
4877 xdp_pass = stats->es.ps.xdp_pass; in mvneta_ethtool_update_pcpu_stats()
4878 xdp_drop = stats->es.ps.xdp_drop; in mvneta_ethtool_update_pcpu_stats()
4879 xdp_xmit = stats->es.ps.xdp_xmit; in mvneta_ethtool_update_pcpu_stats()
4880 xdp_xmit_err = stats->es.ps.xdp_xmit_err; in mvneta_ethtool_update_pcpu_stats()
4881 xdp_tx = stats->es.ps.xdp_tx; in mvneta_ethtool_update_pcpu_stats()
4882 xdp_tx_err = stats->es.ps.xdp_tx_err; in mvneta_ethtool_update_pcpu_stats()
4883 } while (u64_stats_fetch_retry(&stats->syncp, start)); in mvneta_ethtool_update_pcpu_stats()
4885 es->skb_alloc_error += skb_alloc_error; in mvneta_ethtool_update_pcpu_stats()
4886 es->refill_error += refill_error; in mvneta_ethtool_update_pcpu_stats()
4887 es->ps.xdp_redirect += xdp_redirect; in mvneta_ethtool_update_pcpu_stats()
4888 es->ps.xdp_pass += xdp_pass; in mvneta_ethtool_update_pcpu_stats()
4889 es->ps.xdp_drop += xdp_drop; in mvneta_ethtool_update_pcpu_stats()
4890 es->ps.xdp_xmit += xdp_xmit; in mvneta_ethtool_update_pcpu_stats()
4891 es->ps.xdp_xmit_err += xdp_xmit_err; in mvneta_ethtool_update_pcpu_stats()
4892 es->ps.xdp_tx += xdp_tx; in mvneta_ethtool_update_pcpu_stats()
4893 es->ps.xdp_tx_err += xdp_tx_err; in mvneta_ethtool_update_pcpu_stats()
4901 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
4910 switch (s->type) { in mvneta_ethtool_update_stats()
4912 val = readl_relaxed(base + s->offset); in mvneta_ethtool_update_stats()
4913 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4916 /* Docs say to read low 32-bit then high */ in mvneta_ethtool_update_stats()
4917 low = readl_relaxed(base + s->offset); in mvneta_ethtool_update_stats()
4918 high = readl_relaxed(base + s->offset + 4); in mvneta_ethtool_update_stats()
4920 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4923 switch (s->offset) { in mvneta_ethtool_update_stats()
4925 val = phylink_get_eee_err(pp->phylink); in mvneta_ethtool_update_stats()
4926 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
4929 pp->ethtool_stats[i] = stats.skb_alloc_error; in mvneta_ethtool_update_stats()
4932 pp->ethtool_stats[i] = stats.refill_error; in mvneta_ethtool_update_stats()
4935 pp->ethtool_stats[i] = stats.ps.xdp_redirect; in mvneta_ethtool_update_stats()
4938 pp->ethtool_stats[i] = stats.ps.xdp_pass; in mvneta_ethtool_update_stats()
4941 pp->ethtool_stats[i] = stats.ps.xdp_drop; in mvneta_ethtool_update_stats()
4944 pp->ethtool_stats[i] = stats.ps.xdp_tx; in mvneta_ethtool_update_stats()
4947 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; in mvneta_ethtool_update_stats()
4950 pp->ethtool_stats[i] = stats.ps.xdp_xmit; in mvneta_ethtool_update_stats()
4953 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; in mvneta_ethtool_update_stats()
4967 if (pp->rxqs[i].page_pool) in mvneta_ethtool_pp_stats()
4968 page_pool_get_stats(pp->rxqs[i].page_pool, &stats); in mvneta_ethtool_pp_stats()
4983 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
4985 if (!pp->bm_priv) in mvneta_ethtool_get_stats()
4995 if (!pp->bm_priv) in mvneta_ethtool_get_sset_count()
5001 return -EOPNOTSUPP; in mvneta_ethtool_get_sset_count()
5013 switch (info->cmd) { in mvneta_ethtool_get_rxnfc()
5015 info->data = rxq_number; in mvneta_ethtool_get_rxnfc()
5018 return -EOPNOTSUPP; in mvneta_ethtool_get_rxnfc()
5020 return -EOPNOTSUPP; in mvneta_ethtool_get_rxnfc()
5029 netif_tx_stop_all_queues(pp->dev); in mvneta_config_rss()
5033 if (!pp->neta_armada3700) { in mvneta_config_rss()
5037 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
5039 napi_synchronize(&pcpu_port->napi); in mvneta_config_rss()
5040 napi_disable(&pcpu_port->napi); in mvneta_config_rss()
5043 napi_synchronize(&pp->napi); in mvneta_config_rss()
5044 napi_disable(&pp->napi); in mvneta_config_rss()
5047 pp->rxq_def = pp->indir[0]; in mvneta_config_rss()
5050 mvneta_set_rx_mode(pp->dev); in mvneta_config_rss()
5053 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); in mvneta_config_rss()
5057 spin_lock(&pp->lock); in mvneta_config_rss()
5059 spin_unlock(&pp->lock); in mvneta_config_rss()
5061 if (!pp->neta_armada3700) { in mvneta_config_rss()
5065 per_cpu_ptr(pp->ports, cpu); in mvneta_config_rss()
5067 napi_enable(&pcpu_port->napi); in mvneta_config_rss()
5070 napi_enable(&pp->napi); in mvneta_config_rss()
5073 netif_tx_start_all_queues(pp->dev); in mvneta_config_rss()
5085 if (pp->neta_armada3700) in mvneta_ethtool_set_rxfh()
5086 return -EOPNOTSUPP; in mvneta_ethtool_set_rxfh()
5091 if (rxfh->key || in mvneta_ethtool_set_rxfh()
5092 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in mvneta_ethtool_set_rxfh()
5093 rxfh->hfunc != ETH_RSS_HASH_TOP)) in mvneta_ethtool_set_rxfh()
5094 return -EOPNOTSUPP; in mvneta_ethtool_set_rxfh()
5096 if (!rxfh->indir) in mvneta_ethtool_set_rxfh()
5099 memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_set_rxfh()
5110 if (pp->neta_armada3700) in mvneta_ethtool_get_rxfh()
5111 return -EOPNOTSUPP; in mvneta_ethtool_get_rxfh()
5113 rxfh->hfunc = ETH_RSS_HASH_TOP; in mvneta_ethtool_get_rxfh()
5115 if (!rxfh->indir) in mvneta_ethtool_get_rxfh()
5118 memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); in mvneta_ethtool_get_rxfh()
5128 phylink_ethtool_get_wol(pp->phylink, wol); in mvneta_ethtool_get_wol()
5137 ret = phylink_ethtool_set_wol(pp->phylink, wol); in mvneta_ethtool_set_wol()
5139 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); in mvneta_ethtool_set_wol()
5145 struct ethtool_keee *eee) in mvneta_ethtool_get_eee() argument
5149 return phylink_ethtool_get_eee(pp->phylink, eee); in mvneta_ethtool_get_eee()
5153 struct ethtool_keee *eee) in mvneta_ethtool_set_eee() argument
5158 * it being an 8-bit register. in mvneta_ethtool_set_eee()
5160 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) in mvneta_ethtool_set_eee()
5161 return -EINVAL; in mvneta_ethtool_set_eee()
5163 return phylink_ethtool_set_eee(pp->phylink, eee); in mvneta_ethtool_set_eee()
5187 core_clk_rate = clk_get_rate(pp->clk); in mvneta_enable_per_queue_rate_limit()
5189 return -EINVAL; in mvneta_enable_per_queue_rate_limit()
5195 return -EINVAL; in mvneta_enable_per_queue_rate_limit()
5226 return -EINVAL; in mvneta_setup_queue_rates()
5233 return -EINVAL; in mvneta_setup_queue_rates()
5251 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) in mvneta_setup_mqprio()
5254 num_tc = mqprio->qopt.num_tc; in mvneta_setup_mqprio()
5257 return -EINVAL; in mvneta_setup_mqprio()
5267 netdev_set_num_tc(dev, mqprio->qopt.num_tc); in mvneta_setup_mqprio()
5269 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { in mvneta_setup_mqprio()
5270 netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], in mvneta_setup_mqprio()
5271 mqprio->qopt.offset[tc]); in mvneta_setup_mqprio()
5273 for (rxq = mqprio->qopt.offset[tc]; in mvneta_setup_mqprio()
5274 rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; in mvneta_setup_mqprio()
5277 return -EINVAL; in mvneta_setup_mqprio()
5283 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { in mvneta_setup_mqprio()
5288 if (mqprio->qopt.num_tc > txq_number) in mvneta_setup_mqprio()
5289 return -EINVAL; in mvneta_setup_mqprio()
5295 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { in mvneta_setup_mqprio()
5296 for (txq = mqprio->qopt.offset[tc]; in mvneta_setup_mqprio()
5297 txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; in mvneta_setup_mqprio()
5300 return -EINVAL; in mvneta_setup_mqprio()
5303 mqprio->min_rate[tc], in mvneta_setup_mqprio()
5304 mqprio->max_rate[tc]); in mvneta_setup_mqprio()
5320 return -EOPNOTSUPP; in mvneta_setup_tc()
5377 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); in mvneta_init()
5378 if (!pp->txqs) in mvneta_init()
5379 return -ENOMEM; in mvneta_init()
5381 /* Initialize TX descriptor rings */ in mvneta_init()
5383 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
5384 txq->id = queue; in mvneta_init()
5385 txq->size = pp->tx_ring_size; in mvneta_init()
5386 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; in mvneta_init()
5389 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); in mvneta_init()
5390 if (!pp->rxqs) in mvneta_init()
5391 return -ENOMEM; in mvneta_init()
5395 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
5396 rxq->id = queue; in mvneta_init()
5397 rxq->size = pp->rx_ring_size; in mvneta_init()
5398 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; in mvneta_init()
5399 rxq->time_coal = MVNETA_RX_COAL_USEC; in mvneta_init()
5400 rxq->buf_virt_addr in mvneta_init()
5401 = devm_kmalloc_array(pp->dev->dev.parent, in mvneta_init()
5402 rxq->size, in mvneta_init()
5403 sizeof(*rxq->buf_virt_addr), in mvneta_init()
5405 if (!rxq->buf_virt_addr) in mvneta_init()
5406 return -ENOMEM; in mvneta_init()
5432 for (i = 0; i < dram->num_cs; i++) { in mvneta_conf_mbus_windows()
5433 const struct mbus_dram_window *cs = dram->cs + i; in mvneta_conf_mbus_windows()
5436 (cs->base & 0xffff0000) | in mvneta_conf_mbus_windows()
5437 (cs->mbus_attr << 8) | in mvneta_conf_mbus_windows()
5438 dram->mbus_dram_target_id); in mvneta_conf_mbus_windows()
5441 (cs->size - 1) & 0xffff0000); in mvneta_conf_mbus_windows()
5447 if (pp->neta_ac5) in mvneta_conf_mbus_windows()
5474 return -EINVAL; in mvneta_port_power_up()
5477 mvneta_mac_disable_tx_lpi(&pp->phylink_config); in mvneta_port_power_up()
5485 struct device_node *dn = pdev->dev.of_node; in mvneta_probe()
5498 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), in mvneta_probe()
5501 return -ENOMEM; in mvneta_probe()
5503 dev->tx_queue_len = MVNETA_MAX_TXD; in mvneta_probe()
5504 dev->watchdog_timeo = 5 * HZ; in mvneta_probe()
5505 dev->netdev_ops = &mvneta_netdev_ops; in mvneta_probe()
5506 dev->ethtool_ops = &mvneta_eth_tool_ops; in mvneta_probe()
5509 spin_lock_init(&pp->lock); in mvneta_probe()
5510 pp->dn = dn; in mvneta_probe()
5512 pp->rxq_def = rxq_def; in mvneta_probe()
5513 pp->indir[0] = rxq_def; in mvneta_probe()
5517 dev_err(&pdev->dev, "incorrect phy-mode\n"); in mvneta_probe()
5521 pp->phy_interface = phy_mode; in mvneta_probe()
5523 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); in mvneta_probe()
5524 if (comphy == ERR_PTR(-EPROBE_DEFER)) in mvneta_probe()
5525 return -EPROBE_DEFER; in mvneta_probe()
5530 pp->comphy = comphy; in mvneta_probe()
5532 pp->base = devm_platform_ioremap_resource(pdev, 0); in mvneta_probe()
5533 if (IS_ERR(pp->base)) in mvneta_probe()
5534 return PTR_ERR(pp->base); in mvneta_probe()
5537 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) in mvneta_probe()
5538 pp->neta_armada3700 = true; in mvneta_probe()
5539 if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) { in mvneta_probe()
5540 pp->neta_armada3700 = true; in mvneta_probe()
5541 pp->neta_ac5 = true; in mvneta_probe()
5544 dev->irq = irq_of_parse_and_map(dn, 0); in mvneta_probe()
5545 if (dev->irq == 0) in mvneta_probe()
5546 return -EINVAL; in mvneta_probe()
5548 pp->clk = devm_clk_get(&pdev->dev, "core"); in mvneta_probe()
5549 if (IS_ERR(pp->clk)) in mvneta_probe()
5550 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
5551 if (IS_ERR(pp->clk)) { in mvneta_probe()
5552 err = PTR_ERR(pp->clk); in mvneta_probe()
5556 clk_prepare_enable(pp->clk); in mvneta_probe()
5558 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); in mvneta_probe()
5559 if (!IS_ERR(pp->clk_bus)) in mvneta_probe()
5560 clk_prepare_enable(pp->clk_bus); in mvneta_probe()
5562 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; in mvneta_probe()
5564 pp->phylink_config.dev = &dev->dev; in mvneta_probe()
5565 pp->phylink_config.type = PHYLINK_NETDEV; in mvneta_probe()
5566 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | in mvneta_probe()
5569 /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */ in mvneta_probe()
5570 __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces); in mvneta_probe()
5571 __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces); in mvneta_probe()
5572 pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD; in mvneta_probe()
5573 pp->phylink_config.lpi_timer_default = 250; in mvneta_probe()
5574 pp->phylink_config.eee_enabled_default = true; in mvneta_probe()
5576 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); in mvneta_probe()
5578 pp->phylink_config.supported_interfaces); in mvneta_probe()
5584 pp->phylink_config.supported_interfaces); in mvneta_probe()
5586 pp->phylink_config.supported_interfaces); in mvneta_probe()
5588 pp->phylink_config.supported_interfaces); in mvneta_probe()
5590 /* No COMPHY, with only 2500BASE-X mode supported */ in mvneta_probe()
5592 pp->phylink_config.supported_interfaces); in mvneta_probe()
5595 /* No COMPHY, we can switch between 1000BASE-X and SGMII */ in mvneta_probe()
5597 pp->phylink_config.supported_interfaces); in mvneta_probe()
5599 pp->phylink_config.supported_interfaces); in mvneta_probe()
5602 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, in mvneta_probe()
5609 pp->phylink = phylink; in mvneta_probe()
5611 /* Alloc per-cpu port structure */ in mvneta_probe()
5612 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
5613 if (!pp->ports) { in mvneta_probe()
5614 err = -ENOMEM; in mvneta_probe()
5618 /* Alloc per-cpu stats */ in mvneta_probe()
5619 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
5620 if (!pp->stats) { in mvneta_probe()
5621 err = -ENOMEM; in mvneta_probe()
5639 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { in mvneta_probe()
5643 dev_info(&pdev->dev, in mvneta_probe()
5644 "Wrong TX csum limit in DT, set to %dB\n", in mvneta_probe()
5647 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { in mvneta_probe()
5653 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
5655 pp->dram_target_info = mv_mbus_dram_info(); in mvneta_probe()
5660 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_probe()
5661 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_probe()
5663 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
5664 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
5666 pp->dev = dev; in mvneta_probe()
5667 SET_NETDEV_DEV(dev, &pdev->dev); in mvneta_probe()
5669 pp->id = global_port_id++; in mvneta_probe()
5672 bm_node = of_parse_phandle(dn, "buffer-manager", 0); in mvneta_probe()
5674 pp->bm_priv = mvneta_bm_get(bm_node); in mvneta_probe()
5675 if (pp->bm_priv) { in mvneta_probe()
5678 dev_info(&pdev->dev, in mvneta_probe()
5680 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5681 pp->bm_priv = NULL; in mvneta_probe()
5685 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit in mvneta_probe()
5686 * platforms and 0B for 32-bit ones. in mvneta_probe()
5688 pp->rx_offset_correction = max(0, in mvneta_probe()
5689 NET_SKB_PAD - in mvneta_probe()
5695 if (!pp->bm_priv) in mvneta_probe()
5696 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_probe()
5698 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
5702 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_probe()
5704 dev_err(&pdev->dev, "can't power up port\n"); in mvneta_probe()
5708 /* Armada3700 network controller does not support per-cpu in mvneta_probe()
5711 if (pp->neta_armada3700) { in mvneta_probe()
5712 netif_napi_add(dev, &pp->napi, mvneta_poll); in mvneta_probe()
5716 per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
5718 netif_napi_add(dev, &port->napi, mvneta_poll); in mvneta_probe()
5719 port->pp = pp; in mvneta_probe()
5723 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in mvneta_probe()
5725 dev->hw_features |= dev->features; in mvneta_probe()
5726 dev->vlan_features |= dev->features; in mvneta_probe()
5727 if (!pp->bm_priv) in mvneta_probe()
5728 dev->xdp_features = NETDEV_XDP_ACT_BASIC | in mvneta_probe()
5733 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in mvneta_probe()
5736 /* MTU range: 68 - 9676 */ in mvneta_probe()
5737 dev->min_mtu = ETH_MIN_MTU; in mvneta_probe()
5738 /* 9676 == 9700 - 20 and rounding to 8 */ in mvneta_probe()
5739 dev->max_mtu = 9676; in mvneta_probe()
5743 dev_err(&pdev->dev, "failed to register\n"); in mvneta_probe()
5748 dev->dev_addr); in mvneta_probe()
5750 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
5755 if (pp->bm_priv) { in mvneta_probe()
5756 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_probe()
5757 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_probe()
5758 1 << pp->id); in mvneta_probe()
5759 mvneta_bm_put(pp->bm_priv); in mvneta_probe()
5761 free_percpu(pp->stats); in mvneta_probe()
5763 free_percpu(pp->ports); in mvneta_probe()
5765 if (pp->phylink) in mvneta_probe()
5766 phylink_destroy(pp->phylink); in mvneta_probe()
5768 clk_disable_unprepare(pp->clk_bus); in mvneta_probe()
5769 clk_disable_unprepare(pp->clk); in mvneta_probe()
5771 irq_dispose_mapping(dev->irq); in mvneta_probe()
5782 clk_disable_unprepare(pp->clk_bus); in mvneta_remove()
5783 clk_disable_unprepare(pp->clk); in mvneta_remove()
5784 free_percpu(pp->ports); in mvneta_remove()
5785 free_percpu(pp->stats); in mvneta_remove()
5786 irq_dispose_mapping(dev->irq); in mvneta_remove()
5787 phylink_destroy(pp->phylink); in mvneta_remove()
5789 if (pp->bm_priv) { in mvneta_remove()
5790 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); in mvneta_remove()
5791 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, in mvneta_remove()
5792 1 << pp->id); in mvneta_remove()
5793 mvneta_bm_put(pp->bm_priv); in mvneta_remove()
5807 if (!pp->neta_armada3700) { in mvneta_suspend()
5808 spin_lock(&pp->lock); in mvneta_suspend()
5809 pp->is_stopped = true; in mvneta_suspend()
5810 spin_unlock(&pp->lock); in mvneta_suspend()
5813 &pp->node_online); in mvneta_suspend()
5815 &pp->node_dead); in mvneta_suspend()
5823 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_suspend()
5829 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_suspend()
5836 clk_disable_unprepare(pp->clk_bus); in mvneta_suspend()
5837 clk_disable_unprepare(pp->clk); in mvneta_suspend()
5849 clk_prepare_enable(pp->clk); in mvneta_resume()
5850 if (!IS_ERR(pp->clk_bus)) in mvneta_resume()
5851 clk_prepare_enable(pp->clk_bus); in mvneta_resume()
5852 if (pp->dram_target_info || pp->neta_armada3700) in mvneta_resume()
5853 mvneta_conf_mbus_windows(pp, pp->dram_target_info); in mvneta_resume()
5854 if (pp->bm_priv) { in mvneta_resume()
5857 dev_info(&pdev->dev, "use SW buffer management\n"); in mvneta_resume()
5858 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; in mvneta_resume()
5859 pp->bm_priv = NULL; in mvneta_resume()
5863 err = mvneta_port_power_up(pp, pp->phy_interface); in mvneta_resume()
5875 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_resume()
5877 rxq->next_desc_to_proc = 0; in mvneta_resume()
5882 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_resume()
5884 txq->next_desc_to_proc = 0; in mvneta_resume()
5888 if (!pp->neta_armada3700) { in mvneta_resume()
5889 spin_lock(&pp->lock); in mvneta_resume()
5890 pp->is_stopped = false; in mvneta_resume()
5891 spin_unlock(&pp->lock); in mvneta_resume()
5893 &pp->node_online); in mvneta_resume()
5895 &pp->node_dead); in mvneta_resume()
5910 { .compatible = "marvell,armada-370-neta" },
5911 { .compatible = "marvell,armada-xp-neta" },
5912 { .compatible = "marvell,armada-3700-neta" },
5913 { .compatible = "marvell,armada-ac5-neta" },
5967 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5968 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.c…