Lines Matching full:dp
66 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) in nfp_net_dma_map_rx() argument
68 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, in nfp_net_dma_map_rx()
69 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_map_rx()
70 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in nfp_net_dma_map_rx()
74 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) in nfp_net_dma_sync_dev_rx() argument
76 dma_sync_single_for_device(dp->dev, dma_addr, in nfp_net_dma_sync_dev_rx()
77 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_sync_dev_rx()
78 dp->rx_dma_dir); in nfp_net_dma_sync_dev_rx()
81 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) in nfp_net_dma_unmap_rx() argument
83 dma_unmap_single_attrs(dp->dev, dma_addr, in nfp_net_dma_unmap_rx()
84 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_unmap_rx()
85 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in nfp_net_dma_unmap_rx()
88 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, in nfp_net_dma_sync_cpu_rx() argument
91 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, in nfp_net_dma_sync_cpu_rx()
92 len, dp->rx_dma_dir); in nfp_net_dma_sync_cpu_rx()
437 struct nfp_net_dp *dp = &nn->dp; in nfp_net_irqs_assign() local
440 dp->num_r_vecs = nn->max_r_vecs; in nfp_net_irqs_assign()
444 if (dp->num_rx_rings > dp->num_r_vecs || in nfp_net_irqs_assign()
445 dp->num_tx_rings > dp->num_r_vecs) in nfp_net_irqs_assign()
446 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", in nfp_net_irqs_assign()
447 dp->num_rx_rings, dp->num_tx_rings, in nfp_net_irqs_assign()
448 dp->num_r_vecs); in nfp_net_irqs_assign()
450 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); in nfp_net_irqs_assign()
451 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); in nfp_net_irqs_assign()
452 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_irqs_assign()
518 netif_carrier_on(nn->dp.netdev); in nfp_net_read_link_status()
519 netdev_info(nn->dp.netdev, "NIC Link is Up\n"); in nfp_net_read_link_status()
521 netif_carrier_off(nn->dp.netdev); in nfp_net_read_link_status()
522 netdev_info(nn->dp.netdev, "NIC Link is Down\n"); in nfp_net_read_link_status()
765 * @dp: NFP Net data path struct
774 static void nfp_net_tx_csum(struct nfp_net_dp *dp, in nfp_net_tx_csum() argument
783 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) in nfp_net_tx_csum()
802 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); in nfp_net_tx_csum()
814 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); in nfp_net_tx_csum()
827 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, in nfp_net_tls_tx() argument
836 if (likely(!dp->ktls_tx)) in nfp_net_tls_tx()
866 nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n"); in nfp_net_tls_tx()
987 struct nfp_net_dp *dp; in nfp_net_tx() local
993 dp = &nn->dp; in nfp_net_tx()
995 tx_ring = &dp->tx_rings[qidx]; in nfp_net_tx()
1001 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", in nfp_net_tx()
1003 nd_q = netdev_get_tx_queue(dp->netdev, qidx); in nfp_net_tx()
1012 skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); in nfp_net_tx()
1023 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_net_tx()
1025 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_net_tx()
1051 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); in nfp_net_tx()
1052 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { in nfp_net_tx()
1068 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, in nfp_net_tx()
1070 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_net_tx()
1093 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx()
1108 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_net_tx()
1117 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_net_tx()
1123 nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); in nfp_net_tx()
1142 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_tx_complete() local
1178 dma_unmap_single(dp->dev, tx_buf->dma_addr, in nfp_net_tx_complete()
1186 dma_unmap_page(dp->dev, tx_buf->dma_addr, in nfp_net_tx_complete()
1206 if (!dp->netdev) in nfp_net_tx_complete()
1209 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx_complete()
1267 * @dp: NFP Net data path struct
1273 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_reset() argument
1291 dma_unmap_single(dp->dev, tx_buf->dma_addr, in nfp_net_tx_ring_reset()
1296 dma_unmap_page(dp->dev, tx_buf->dma_addr, in nfp_net_tx_ring_reset()
1318 if (tx_ring->is_xdp || !dp->netdev) in nfp_net_tx_ring_reset()
1321 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx_ring_reset()
1335 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) in nfp_net_calc_fl_bufsz() argument
1340 fl_bufsz += dp->rx_dma_off; in nfp_net_calc_fl_bufsz()
1341 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_calc_fl_bufsz()
1344 fl_bufsz += dp->rx_offset; in nfp_net_calc_fl_bufsz()
1345 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; in nfp_net_calc_fl_bufsz()
1364 * @dp: NFP Net data path struct
1371 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) in nfp_net_rx_alloc_one() argument
1375 if (!dp->xdp_prog) { in nfp_net_rx_alloc_one()
1376 frag = netdev_alloc_frag(dp->fl_bufsz); in nfp_net_rx_alloc_one()
1384 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); in nfp_net_rx_alloc_one()
1388 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_rx_alloc_one()
1389 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_net_rx_alloc_one()
1390 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_rx_alloc_one()
1391 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_net_rx_alloc_one()
1398 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) in nfp_net_napi_alloc_one() argument
1402 if (!dp->xdp_prog) { in nfp_net_napi_alloc_one()
1403 frag = napi_alloc_frag(dp->fl_bufsz); in nfp_net_napi_alloc_one()
1415 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_napi_alloc_one()
1416 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_net_napi_alloc_one()
1417 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_napi_alloc_one()
1418 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_net_napi_alloc_one()
1427 * @dp: NFP Net data path struct
1432 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, in nfp_net_rx_give_one() argument
1440 nfp_net_dma_sync_dev_rx(dp, dma_addr); in nfp_net_rx_give_one()
1450 dma_addr + dp->rx_dma_off); in nfp_net_rx_give_one()
1493 * @dp: NFP Net data path struct
1501 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_free() argument
1514 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_free()
1515 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); in nfp_net_rx_ring_bufs_free()
1523 * @dp: NFP Net data path struct
1527 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_alloc() argument
1536 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_alloc()
1538 nfp_net_rx_ring_bufs_free(dp, rx_ring); in nfp_net_rx_ring_bufs_alloc()
1548 * @dp: NFP Net data path struct
1552 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, in nfp_net_rx_ring_fill_freelist() argument
1558 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, in nfp_net_rx_ring_fill_freelist()
1578 * @dp: NFP Net data path struct
1584 static void nfp_net_rx_csum(struct nfp_net_dp *dp, in nfp_net_rx_csum() argument
1591 if (!(dp->netdev->features & NETIF_F_RXCSUM)) in nfp_net_rx_csum()
1714 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, in nfp_net_rx_drop() argument
1733 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); in nfp_net_rx_drop()
1739 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, in nfp_net_tx_xdp_buf() argument
1744 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA; in nfp_net_tx_xdp_buf()
1760 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, in nfp_net_tx_xdp_buf()
1771 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); in nfp_net_tx_xdp_buf()
1779 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, in nfp_net_tx_xdp_buf()
1812 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_rx() local
1823 xdp_prog = READ_ONCE(dp->xdp_prog); in nfp_net_rx()
1824 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; in nfp_net_rx()
1873 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_net_rx()
1874 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_rx()
1877 pkt_off += dp->rx_offset; in nfp_net_rx()
1887 (dp->rx_offset && meta_len > dp->rx_offset))) { in nfp_net_rx()
1888 nn_dp_warn(dp, "oversized RX packet metadata %u\n", in nfp_net_rx()
1890 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_net_rx()
1894 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, in nfp_net_rx()
1897 if (!dp->chained_metadata_format) { in nfp_net_rx()
1898 nfp_net_set_hash_desc(dp->netdev, &meta, in nfp_net_rx()
1901 if (unlikely(nfp_net_parse_meta(dp->netdev, &meta, in nfp_net_rx()
1905 nn_dp_warn(dp, "invalid RX packet metadata\n"); in nfp_net_rx()
1906 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, in nfp_net_rx()
1933 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, in nfp_net_rx()
1938 trace_xdp_exception(dp->netdev, in nfp_net_rx()
1945 trace_xdp_exception(dp->netdev, xdp_prog, act); in nfp_net_rx()
1948 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_net_rx()
1955 netdev = dp->netdev; in nfp_net_rx()
1957 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_net_rx()
1961 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_net_rx()
1967 nn = netdev_priv(dp->netdev); in nfp_net_rx()
1971 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, in nfp_net_rx()
1982 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_net_rx()
1985 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); in nfp_net_rx()
1987 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); in nfp_net_rx()
1991 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_net_rx()
1993 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); in nfp_net_rx()
2004 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); in nfp_net_rx()
2080 struct nfp_net_dp *dp; in nfp_ctrl_tx_one() local
2084 dp = &r_vec->nfp_net->dp; in nfp_ctrl_tx_one()
2088 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n"); in nfp_ctrl_tx_one()
2105 nn_dp_warn(dp, "CTRL TX on skb without headroom\n"); in nfp_ctrl_tx_one()
2114 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_ctrl_tx_one()
2116 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_ctrl_tx_one()
2147 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n"); in nfp_ctrl_tx_one()
2203 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_ctrl_rx_one() argument
2232 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_ctrl_rx_one()
2233 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_ctrl_rx_one()
2236 pkt_off += dp->rx_offset; in nfp_ctrl_rx_one()
2245 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); in nfp_ctrl_rx_one()
2248 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", in nfp_ctrl_rx_one()
2250 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_ctrl_rx_one()
2254 skb = build_skb(rxbuf->frag, dp->fl_bufsz); in nfp_ctrl_rx_one()
2256 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_ctrl_rx_one()
2259 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); in nfp_ctrl_rx_one()
2261 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); in nfp_ctrl_rx_one()
2265 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_ctrl_rx_one()
2267 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); in nfp_ctrl_rx_one()
2281 struct nfp_net_dp *dp = &nn->dp; in nfp_ctrl_rx() local
2284 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) in nfp_ctrl_rx()
2303 nn_dp_warn(&r_vec->nfp_net->dp, in nfp_ctrl_poll()
2333 if (nn->dp.netdev) { in nfp_net_vecs_init()
2355 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_tx_ring_free() local
2360 dma_free_coherent(dp->dev, tx_ring->size, in nfp_net_tx_ring_free()
2372 * @dp: NFP Net data path struct
2378 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_alloc() argument
2382 tx_ring->cnt = dp->txd_cnt; in nfp_net_tx_ring_alloc()
2385 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, in nfp_net_tx_ring_alloc()
2389 …netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count:… in nfp_net_tx_ring_alloc()
2399 if (!tx_ring->is_xdp && dp->netdev) in nfp_net_tx_ring_alloc()
2400 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, in nfp_net_tx_ring_alloc()
2411 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, in nfp_net_tx_ring_bufs_free() argument
2423 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); in nfp_net_tx_ring_bufs_free()
2429 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, in nfp_net_tx_ring_bufs_alloc() argument
2439 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); in nfp_net_tx_ring_bufs_alloc()
2441 nfp_net_tx_ring_bufs_free(dp, tx_ring); in nfp_net_tx_ring_bufs_alloc()
2449 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_tx_rings_prepare() argument
2453 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), in nfp_net_tx_rings_prepare()
2455 if (!dp->tx_rings) in nfp_net_tx_rings_prepare()
2458 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_prepare()
2461 if (r >= dp->num_stack_tx_rings) in nfp_net_tx_rings_prepare()
2462 bias = dp->num_stack_tx_rings; in nfp_net_tx_rings_prepare()
2464 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], in nfp_net_tx_rings_prepare()
2467 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2470 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2478 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2480 nfp_net_tx_ring_free(&dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2482 kfree(dp->tx_rings); in nfp_net_tx_rings_prepare()
2486 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) in nfp_net_tx_rings_free() argument
2490 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_free()
2491 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free()
2492 nfp_net_tx_ring_free(&dp->tx_rings[r]); in nfp_net_tx_rings_free()
2495 kfree(dp->tx_rings); in nfp_net_tx_rings_free()
2505 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_rx_ring_free() local
2507 if (dp->netdev) in nfp_net_rx_ring_free()
2512 dma_free_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_free()
2524 * @dp: NFP Net data path struct
2530 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) in nfp_net_rx_ring_alloc() argument
2534 if (dp->netdev) { in nfp_net_rx_ring_alloc()
2535 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, in nfp_net_rx_ring_alloc()
2541 rx_ring->cnt = dp->rxd_cnt; in nfp_net_rx_ring_alloc()
2543 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_alloc()
2547 …netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count:… in nfp_net_rx_ring_alloc()
2564 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_rx_rings_prepare() argument
2568 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), in nfp_net_rx_rings_prepare()
2570 if (!dp->rx_rings) in nfp_net_rx_rings_prepare()
2573 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_prepare()
2574 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); in nfp_net_rx_rings_prepare()
2576 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
2579 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
2587 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
2589 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
2591 kfree(dp->rx_rings); in nfp_net_rx_rings_prepare()
2595 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) in nfp_net_rx_rings_free() argument
2599 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_free()
2600 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_free()
2601 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_free()
2604 kfree(dp->rx_rings); in nfp_net_rx_rings_free()
2608 nfp_net_vector_assign_rings(struct nfp_net_dp *dp, in nfp_net_vector_assign_rings() argument
2611 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
2613 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
2615 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? in nfp_net_vector_assign_rings()
2616 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; in nfp_net_vector_assign_rings()
2626 if (nn->dp.netdev) in nfp_net_prepare_vector()
2627 netif_napi_add(nn->dp.netdev, &r_vec->napi, in nfp_net_prepare_vector()
2637 if (nn->dp.netdev) in nfp_net_prepare_vector()
2659 if (nn->dp.netdev) in nfp_net_cleanup_vector()
2712 for (i = 0; i < nn->dp.num_rx_rings; i++) in nfp_net_coalesce_write_cfg()
2718 for (i = 0; i < nn->dp.num_tx_rings; i++) in nfp_net_coalesce_write_cfg()
2760 new_ctrl = nn->dp.ctrl; in nfp_net_clear_config_and_disable()
2777 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_clear_config_and_disable()
2778 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); in nfp_net_clear_config_and_disable()
2779 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_clear_config_and_disable()
2780 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); in nfp_net_clear_config_and_disable()
2781 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_clear_config_and_disable()
2784 nn->dp.ctrl = new_ctrl; in nfp_net_clear_config_and_disable()
2816 new_ctrl = nn->dp.ctrl; in nfp_net_set_config_and_enable()
2818 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { in nfp_net_set_config_and_enable()
2825 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) { in nfp_net_set_config_and_enable()
2830 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_set_config_and_enable()
2831 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); in nfp_net_set_config_and_enable()
2832 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
2833 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); in nfp_net_set_config_and_enable()
2835 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? in nfp_net_set_config_and_enable()
2836 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); in nfp_net_set_config_and_enable()
2838 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? in nfp_net_set_config_and_enable()
2839 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); in nfp_net_set_config_and_enable()
2841 if (nn->dp.netdev) in nfp_net_set_config_and_enable()
2842 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_set_config_and_enable()
2844 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu); in nfp_net_set_config_and_enable()
2846 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; in nfp_net_set_config_and_enable()
2864 nn->dp.ctrl = new_ctrl; in nfp_net_set_config_and_enable()
2866 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
2867 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); in nfp_net_set_config_and_enable()
2881 netif_carrier_off(nn->dp.netdev); in nfp_net_close_stack()
2884 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_close_stack()
2889 netif_tx_disable(nn->dp.netdev); in nfp_net_close_stack()
2900 nfp_net_tx_rings_free(&nn->dp); in nfp_net_close_free_all()
2901 nfp_net_rx_rings_free(&nn->dp); in nfp_net_close_free_all()
2903 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_close_free_all()
2941 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_ctrl_close()
2961 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_stack()
2966 netif_tx_wake_all_queues(nn->dp.netdev); in nfp_net_open_stack()
2988 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_alloc_all()
2994 err = nfp_net_rx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
2998 err = nfp_net_tx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
3003 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_open_alloc_all()
3008 nfp_net_rx_rings_free(&nn->dp); in nfp_net_open_alloc_all()
3010 r = nn->dp.num_r_vecs; in nfp_net_open_alloc_all()
3034 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); in nfp_net_netdev_open()
3038 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); in nfp_net_netdev_open()
3090 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_ctrl_open()
3109 new_ctrl = nn->dp.ctrl; in nfp_net_set_rx_mode()
3125 if (new_ctrl == nn->dp.ctrl) in nfp_net_set_rx_mode()
3131 nn->dp.ctrl = new_ctrl; in nfp_net_set_rx_mode()
3140 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); in nfp_net_rss_init_itbl()
3143 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap() argument
3145 struct nfp_net_dp new_dp = *dp; in nfp_net_dp_swap()
3147 *dp = nn->dp; in nfp_net_dp_swap()
3148 nn->dp = new_dp; in nfp_net_dp_swap()
3150 nn->dp.netdev->mtu = new_dp.mtu; in nfp_net_dp_swap()
3152 if (!netif_is_rxfh_configured(nn->dp.netdev)) in nfp_net_dp_swap()
3156 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap_enable() argument
3161 nfp_net_dp_swap(nn, dp); in nfp_net_dp_swap_enable()
3164 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_dp_swap_enable()
3166 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); in nfp_net_dp_swap_enable()
3170 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { in nfp_net_dp_swap_enable()
3171 err = netif_set_real_num_tx_queues(nn->dp.netdev, in nfp_net_dp_swap_enable()
3172 nn->dp.num_stack_tx_rings); in nfp_net_dp_swap_enable()
3188 *new = nn->dp; in nfp_net_clone_dp()
3201 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_check_config() argument
3205 if (!dp->xdp_prog) in nfp_net_check_config()
3207 if (dp->fl_bufsz > PAGE_SIZE) { in nfp_net_check_config()
3211 if (dp->num_tx_rings > nn->max_tx_rings) { in nfp_net_check_config()
3219 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_ring_reconfig() argument
3224 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); in nfp_net_ring_reconfig()
3226 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_ring_reconfig()
3227 if (dp->xdp_prog) in nfp_net_ring_reconfig()
3228 dp->num_stack_tx_rings -= dp->num_rx_rings; in nfp_net_ring_reconfig()
3230 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); in nfp_net_ring_reconfig()
3232 err = nfp_net_check_config(nn, dp, extack); in nfp_net_ring_reconfig()
3236 if (!netif_running(dp->netdev)) { in nfp_net_ring_reconfig()
3237 nfp_net_dp_swap(nn, dp); in nfp_net_ring_reconfig()
3243 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { in nfp_net_ring_reconfig()
3246 dp->num_r_vecs = r; in nfp_net_ring_reconfig()
3251 err = nfp_net_rx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
3255 err = nfp_net_tx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
3263 err = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
3270 err2 = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
3275 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
3278 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
3279 nfp_net_tx_rings_free(dp); in nfp_net_ring_reconfig()
3283 kfree(dp); in nfp_net_ring_reconfig()
3288 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
3290 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
3292 kfree(dp); in nfp_net_ring_reconfig()
3299 struct nfp_net_dp *dp; in nfp_net_change_mtu() local
3306 dp = nfp_net_clone_dp(nn); in nfp_net_change_mtu()
3307 if (!dp) in nfp_net_change_mtu()
3310 dp->mtu = new_mtu; in nfp_net_change_mtu()
3312 return nfp_net_ring_reconfig(nn, dp, NULL); in nfp_net_change_mtu()
3415 new_ctrl = nn->dp.ctrl; in nfp_net_set_features()
3474 if (new_ctrl == nn->dp.ctrl) in nfp_net_set_features()
3477 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); in nfp_net_set_features()
3483 nn->dp.ctrl = new_ctrl; in nfp_net_set_features()
3549 if (nn->dp.is_vf || nn->vnic_no_name) in nfp_net_get_phys_port_name()
3562 struct nfp_net_dp *dp; in nfp_net_xdp_setup_drv() local
3565 if (!prog == !nn->dp.xdp_prog) { in nfp_net_xdp_setup_drv()
3566 WRITE_ONCE(nn->dp.xdp_prog, prog); in nfp_net_xdp_setup_drv()
3571 dp = nfp_net_clone_dp(nn); in nfp_net_xdp_setup_drv()
3572 if (!dp) in nfp_net_xdp_setup_drv()
3575 dp->xdp_prog = prog; in nfp_net_xdp_setup_drv()
3576 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; in nfp_net_xdp_setup_drv()
3577 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in nfp_net_xdp_setup_drv()
3578 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; in nfp_net_xdp_setup_drv()
3581 err = nfp_net_ring_reconfig(nn, dp, bpf->extack); in nfp_net_xdp_setup_drv()
3703 nn->dp.is_vf ? "VF " : "", in nfp_net_info()
3704 nn->dp.num_tx_rings, nn->max_tx_rings, in nfp_net_info()
3705 nn->dp.num_rx_rings, nn->max_rx_rings); in nfp_net_info()
3767 nn->dp.netdev = netdev; in nfp_net_alloc()
3774 nn->dp.dev = &pdev->dev; in nfp_net_alloc()
3775 nn->dp.ctrl_bar = ctrl_bar; in nfp_net_alloc()
3781 nn->dp.num_tx_rings = min_t(unsigned int, in nfp_net_alloc()
3783 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, in nfp_net_alloc()
3786 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); in nfp_net_alloc()
3787 nn->dp.num_r_vecs = min_t(unsigned int, in nfp_net_alloc()
3788 nn->dp.num_r_vecs, num_online_cpus()); in nfp_net_alloc()
3790 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; in nfp_net_alloc()
3791 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; in nfp_net_alloc()
3800 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, in nfp_net_alloc()
3812 if (nn->dp.netdev) in nfp_net_alloc()
3813 free_netdev(nn->dp.netdev); in nfp_net_alloc()
3828 if (nn->dp.netdev) in nfp_net_free()
3829 free_netdev(nn->dp.netdev); in nfp_net_free()
3873 dev_warn(nn->dp.dev, in nfp_net_rss_init()
3904 struct net_device *netdev = nn->dp.netdev; in nfp_net_netdev_init()
3906 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_netdev_init()
3908 netdev->mtu = nn->dp.mtu; in nfp_net_netdev_init()
3922 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; in nfp_net_netdev_init()
3926 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; in nfp_net_netdev_init()
3930 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; in nfp_net_netdev_init()
3935 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: in nfp_net_netdev_init()
3944 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; in nfp_net_netdev_init()
3949 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; in nfp_net_netdev_init()
3958 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; in nfp_net_netdev_init()
3965 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; in nfp_net_netdev_init()
3970 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; in nfp_net_netdev_init()
3980 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; in nfp_net_netdev_init()
4007 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 || in nfp_net_read_caps()
4008 !nn->dp.netdev || in nfp_net_read_caps()
4014 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4) in nfp_net_read_caps()
4026 nn->dp.rx_offset = reg; in nfp_net_read_caps()
4028 nn->dp.rx_offset = NFP_NET_RX_OFFSET; in nfp_net_read_caps()
4032 if (!nn->dp.netdev) in nfp_net_read_caps()
4048 nn->dp.rx_dma_dir = DMA_FROM_DEVICE; in nfp_net_init()
4056 nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); in nfp_net_init()
4058 nn->dp.mtu = nn->max_mtu; in nfp_net_init()
4060 nn->dp.mtu = NFP_NET_DEFAULT_MTU; in nfp_net_init()
4062 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); in nfp_net_init()
4065 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA; in nfp_net_init()
4069 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: in nfp_net_init()
4075 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; in nfp_net_init()
4080 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; in nfp_net_init()
4095 if (nn->dp.netdev) { in nfp_net_init()
4109 if (!nn->dp.netdev) in nfp_net_init()
4111 return register_netdev(nn->dp.netdev); in nfp_net_init()
4124 if (!nn->dp.netdev) in nfp_net_clean()
4127 unregister_netdev(nn->dp.netdev); in nfp_net_clean()