Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata

1 // SPDX-License-Identifier: GPL-2.0
12 #include <net/xdp.h>
27 * ice_prgm_fdir_fltr - Program a Flow Director filter
47 return -ENOENT; in ice_prgm_fdir_fltr()
48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
50 return -ENOENT; in ice_prgm_fdir_fltr()
51 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
56 return -EAGAIN; in ice_prgm_fdir_fltr()
64 return -EINVAL; in ice_prgm_fdir_fltr()
67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
75 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
84 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr()
88 tx_buf->type = ICE_TX_BUF_DUMMY; in ice_prgm_fdir_fltr()
89 tx_buf->raw_buf = raw_packet; in ice_prgm_fdir_fltr()
91 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr()
100 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr()
102 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
116 dma_unmap_page(ring->dev, in ice_unmap_and_free_tx_buf()
121 switch (tx_buf->type) { in ice_unmap_and_free_tx_buf()
123 devm_kfree(ring->dev, tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
126 dev_kfree_skb_any(tx_buf->skb); in ice_unmap_and_free_tx_buf()
129 page_frag_free(tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
132 xdp_return_frame(tx_buf->xdpf); in ice_unmap_and_free_tx_buf()
136 tx_buf->next_to_watch = NULL; in ice_unmap_and_free_tx_buf()
137 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_unmap_and_free_tx_buf()
144 return netdev_get_tx_queue(ring->netdev, ring->q_index); in txring_txq()
148 * ice_clean_tx_ring - Free any empty Tx buffers
156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
162 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
166 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
172 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
175 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
177 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
178 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
180 if (!tx_ring->netdev) in ice_clean_tx_ring()
188 * ice_free_tx_ring - Free Tx resources per queue
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
199 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
201 if (tx_ring->desc) { in ice_free_tx_ring()
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
204 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
205 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
206 tx_ring->desc = NULL; in ice_free_tx_ring()
211 * ice_clean_tx_irq - Reclaim resources after transmit completes
221 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
222 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
229 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
231 i -= tx_ring->count; in ice_clean_tx_irq()
233 prefetch(&vsi->state); in ice_clean_tx_irq()
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_tx_irq()
243 prefetchw(&tx_buf->skb->users); in ice_clean_tx_irq()
249 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_tx_irq()
254 tx_buf->next_to_watch = NULL; in ice_clean_tx_irq()
257 total_bytes += tx_buf->bytecount; in ice_clean_tx_irq()
258 total_pkts += tx_buf->gso_segs; in ice_clean_tx_irq()
261 napi_consume_skb(tx_buf->skb, napi_budget); in ice_clean_tx_irq()
264 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
270 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_tx_irq()
280 i -= tx_ring->count; in ice_clean_tx_irq()
281 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
287 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
301 i -= tx_ring->count; in ice_clean_tx_irq()
302 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
309 budget--; in ice_clean_tx_irq()
312 i += tx_ring->count; in ice_clean_tx_irq()
313 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
326 !test_bit(ICE_VSI_DOWN, vsi->state)) { in ice_clean_tx_irq()
328 ++tx_ring->ring_stats->tx_stats.restart_q; in ice_clean_tx_irq()
336 * ice_setup_tx_ring - Allocate the Tx descriptors
343 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
347 return -ENOMEM; in ice_setup_tx_ring()
350 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
351 tx_ring->tx_buf = in ice_setup_tx_ring()
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
354 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
355 return -ENOMEM; in ice_setup_tx_ring()
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
362 if (!tx_ring->desc) { in ice_setup_tx_ring()
368 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
369 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
374 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
375 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
376 return -ENOMEM; in ice_setup_tx_ring()
380 * ice_clean_rx_ring - Free Rx buffers
385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring() local
386 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
391 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
399 if (xdp->data) { in ice_clean_rx_ring()
400 xdp_return_buff(xdp); in ice_clean_rx_ring()
401 xdp->data = NULL; in ice_clean_rx_ring()
404 /* Free all the Rx ring sk_buffs */ in ice_clean_rx_ring()
405 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
408 if (!rx_buf->page) in ice_clean_rx_ring()
414 dma_sync_single_range_for_cpu(dev, rx_buf->dma, in ice_clean_rx_ring()
415 rx_buf->page_offset, in ice_clean_rx_ring()
416 rx_ring->rx_buf_len, in ice_clean_rx_ring()
420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
422 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring()
424 rx_buf->page = NULL; in ice_clean_rx_ring()
425 rx_buf->page_offset = 0; in ice_clean_rx_ring()
429 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
430 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
432 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
435 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
437 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
439 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
440 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
441 rx_ring->first_desc = 0; in ice_clean_rx_ring()
442 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
446 * ice_free_rx_ring - Free Rx resources
456 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
457 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
459 WRITE_ONCE(rx_ring->xdp_prog, NULL); in ice_free_rx_ring()
460 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
461 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
462 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
464 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
465 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
468 if (rx_ring->desc) { in ice_free_rx_ring()
469 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
471 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
472 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
473 rx_ring->desc = NULL; in ice_free_rx_ring()
478 * ice_setup_rx_ring - Allocate the Rx descriptors
479 * @rx_ring: the Rx ring to set up
485 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
489 return -ENOMEM; in ice_setup_rx_ring()
492 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
493 rx_ring->rx_buf = in ice_setup_rx_ring()
494 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
495 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
496 return -ENOMEM; in ice_setup_rx_ring()
499 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
501 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
503 if (!rx_ring->desc) { in ice_setup_rx_ring()
504 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", in ice_setup_rx_ring()
509 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
510 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
511 rx_ring->first_desc = 0; in ice_setup_rx_ring()
513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
519 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
520 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
521 return -ENOMEM; in ice_setup_rx_ring()
525 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
526 * @rx_ring: Rx ring
527 * @xdp: xdp_buff used as input to the XDP program
528 * @xdp_prog: XDP program to run
530 * @eop_desc: Last descriptor in packet to read metadata from
535 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
545 ice_xdp_meta_set_desc(xdp, eop_desc); in ice_run_xdp()
547 act = bpf_prog_run_xdp(xdp_prog, xdp); in ice_run_xdp()
553 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp()
554 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp()
556 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp()
561 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
566 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
570 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
580 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
581 * @xdpf: XDP frame that will be converted to XDP buff
582 * @xdp_ring: XDP ring for transmission
587 struct xdp_buff xdp; in ice_xmit_xdp_ring() local
589 xdp.data_hard_start = (void *)xdpf; in ice_xmit_xdp_ring()
590 xdp.data = xdpf->data; in ice_xmit_xdp_ring()
591 xdp.data_end = xdp.data + xdpf->len; in ice_xmit_xdp_ring()
592 xdp.frame_sz = xdpf->frame_sz; in ice_xmit_xdp_ring()
593 xdp.flags = xdpf->flags; in ice_xmit_xdp_ring()
595 return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); in ice_xmit_xdp_ring()
599 * ice_xdp_xmit - submit packets to XDP ring for transmission
601 * @n: number of XDP frames to be transmitted
602 * @frames: XDP frames to be transmitted
606 * will be free'ed by XDP core.
607 * For error cases, a negative errno code is returned and no-frames
616 struct ice_vsi *vsi = np->vsi; in ice_xdp_xmit()
621 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_xdp_xmit()
622 return -ENETDOWN; in ice_xdp_xmit()
625 return -ENXIO; in ice_xdp_xmit()
628 return -EINVAL; in ice_xdp_xmit()
631 queue_index %= vsi->num_xdp_txq; in ice_xdp_xmit()
632 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
633 spin_lock(&xdp_ring->tx_lock); in ice_xdp_xmit()
636 if (unlikely(queue_index >= vsi->num_xdp_txq)) in ice_xdp_xmit()
637 return -ENXIO; in ice_xdp_xmit()
638 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
641 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; in ice_xdp_xmit()
652 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); in ice_xdp_xmit()
657 spin_unlock(&xdp_ring->tx_lock); in ice_xdp_xmit()
663 * ice_alloc_mapped_page - recycle or make a new page
673 struct page *page = bi->page; in ice_alloc_mapped_page()
683 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
688 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
694 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
696 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
700 bi->dma = dma; in ice_alloc_mapped_page()
701 bi->page = page; in ice_alloc_mapped_page()
702 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
703 page_ref_add(page, USHRT_MAX - 1); in ice_alloc_mapped_page()
704 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page()
710 * ice_alloc_rx_bufs - Replace used receive buffers
718 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
725 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
729 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || in ice_alloc_rx_bufs()
733 /* get the Rx descriptor and buffer based on next_to_use */ in ice_alloc_rx_bufs()
735 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
743 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
744 bi->page_offset, in ice_alloc_rx_bufs()
745 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
749 * because each write-back erases this info. in ice_alloc_rx_bufs()
751 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs()
756 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
758 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
763 rx_desc->wb.status_error0 = 0; in ice_alloc_rx_bufs()
765 cleaned_count--; in ice_alloc_rx_bufs()
768 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
775 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
776 * @rx_buf: Rx buffer to adjust
779 * Update the offset within page so that Rx buf will be ready to be reused.
781 * so the second half of page assigned to Rx buffer will be used, otherwise
789 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset()
792 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset()
797 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
808 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page()
809 struct page *page = rx_buf->page; in ice_can_reuse_rx_page()
811 /* avoid re-using remote and pfmemalloc pages */ in ice_can_reuse_rx_page()
816 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) in ice_can_reuse_rx_page()
820 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) in ice_can_reuse_rx_page()
821 if (rx_buf->page_offset > ICE_LAST_OFFSET) in ice_can_reuse_rx_page()
830 page_ref_add(page, USHRT_MAX - 1); in ice_can_reuse_rx_page()
831 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page()
838 * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
839 * @rx_ring: Rx descriptor ring to transact packets on
840 * @xdp: xdp buff to place the data into
844 * This function will add the data contained in rx_buf->page to the xdp buf.
848 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_add_xdp_frag() argument
851 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); in ice_add_xdp_frag()
856 if (!xdp_buff_has_frags(xdp)) { in ice_add_xdp_frag()
857 sinfo->nr_frags = 0; in ice_add_xdp_frag()
858 sinfo->xdp_frags_size = 0; in ice_add_xdp_frag()
859 xdp_buff_set_frags_flag(xdp); in ice_add_xdp_frag()
862 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) in ice_add_xdp_frag()
863 return -ENOMEM; in ice_add_xdp_frag()
865 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, in ice_add_xdp_frag()
866 rx_buf->page_offset, size); in ice_add_xdp_frag()
867 sinfo->xdp_frags_size += size; in ice_add_xdp_frag()
868 /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() in ice_add_xdp_frag()
871 rx_ring->nr_frags = sinfo->nr_frags; in ice_add_xdp_frag()
873 if (page_is_pfmemalloc(rx_buf->page)) in ice_add_xdp_frag()
874 xdp_buff_set_frag_pfmemalloc(xdp); in ice_add_xdp_frag()
880 * ice_reuse_rx_page - page flip buffer and store it back on the ring
881 * @rx_ring: Rx descriptor ring to store buffers on
889 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
892 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
896 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
902 new_buf->dma = old_buf->dma; in ice_reuse_rx_page()
903 new_buf->page = old_buf->page; in ice_reuse_rx_page()
904 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page()
905 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page()
909 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
910 * @rx_ring: Rx descriptor ring to transact packets on
914 * This function will pull an Rx buffer from the ring and synchronize it
923 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
924 prefetchw(rx_buf->page); in ice_get_rx_buf()
929 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
930 rx_buf->page_offset, size, in ice_get_rx_buf()
934 rx_buf->pagecnt_bias--; in ice_get_rx_buf()
940 * ice_get_pgcnts - grab page_count() for gathered fragments
941 * @rx_ring: Rx descriptor ring to store the page counts on
943 * This function is intended to be called right before running XDP
946 * way as XDP program can change the refcount of page
950 u32 nr_frags = rx_ring->nr_frags + 1; in ice_get_pgcnts()
951 u32 idx = rx_ring->first_desc; in ice_get_pgcnts()
953 u32 cnt = rx_ring->count; in ice_get_pgcnts()
956 rx_buf = &rx_ring->rx_buf[idx]; in ice_get_pgcnts()
957 rx_buf->pgcnt = page_count(rx_buf->page); in ice_get_pgcnts()
965 * ice_build_skb - Build skb around an existing buffer
966 * @rx_ring: Rx descriptor ring to transact packets on
967 * @xdp: xdp_buff pointing to the data
969 * This function builds an skb around an existing XDP buffer, taking care
974 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_build_skb() argument
976 u8 metasize = xdp->data - xdp->data_meta; in ice_build_skb()
981 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_build_skb()
982 sinfo = xdp_get_shared_info_from_buff(xdp); in ice_build_skb()
983 nr_frags = sinfo->nr_frags; in ice_build_skb()
986 /* Prefetch first cache line of first page. If xdp->data_meta in ice_build_skb()
987 * is unused, this points exactly as xdp->data, otherwise we in ice_build_skb()
991 net_prefetch(xdp->data_meta); in ice_build_skb()
993 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in ice_build_skb()
997 /* must to record Rx queue, otherwise OS features such as in ice_build_skb()
1000 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
1003 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ice_build_skb()
1004 __skb_put(skb, xdp->data_end - xdp->data); in ice_build_skb()
1008 if (unlikely(xdp_buff_has_frags(xdp))) in ice_build_skb()
1010 sinfo->xdp_frags_size, in ice_build_skb()
1011 nr_frags * xdp->frame_sz, in ice_build_skb()
1012 xdp_buff_is_frag_pfmemalloc(xdp)); in ice_build_skb()
1018 * ice_construct_skb - Allocate skb and populate it
1019 * @rx_ring: Rx descriptor ring to transact packets on
1020 * @xdp: xdp_buff pointing to the data
1027 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_construct_skb() argument
1029 unsigned int size = xdp->data_end - xdp->data; in ice_construct_skb()
1037 net_prefetch(xdp->data); in ice_construct_skb()
1039 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_construct_skb()
1040 sinfo = xdp_get_shared_info_from_buff(xdp); in ice_construct_skb()
1041 nr_frags = sinfo->nr_frags; in ice_construct_skb()
1045 skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); in ice_construct_skb()
1049 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1050 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1054 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); in ice_construct_skb()
1057 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, in ice_construct_skb()
1061 size -= headlen; in ice_construct_skb()
1067 if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { in ice_construct_skb()
1071 skb_add_rx_frag(skb, 0, rx_buf->page, in ice_construct_skb()
1072 rx_buf->page_offset + headlen, size, in ice_construct_skb()
1073 xdp->frame_sz); in ice_construct_skb()
1075 /* buffer is unused, restore biased page count in Rx buffer; in ice_construct_skb()
1078 * as-is in ice_construct_skb()
1080 rx_buf->pagecnt_bias++; in ice_construct_skb()
1083 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_construct_skb()
1086 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in ice_construct_skb()
1089 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in ice_construct_skb()
1090 sinfo->xdp_frags_size, in ice_construct_skb()
1091 nr_frags * xdp->frame_sz, in ice_construct_skb()
1092 xdp_buff_is_frag_pfmemalloc(xdp)); in ice_construct_skb()
1099 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1100 * @rx_ring: Rx descriptor ring to transact packets on
1101 * @rx_buf: Rx buffer to pull data from
1117 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1120 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_put_rx_buf()
1124 rx_buf->page = NULL; in ice_put_rx_buf()
1128 * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
1129 * @rx_ring: Rx ring with all the auxiliary data
1130 * @xdp: XDP buffer carrying linear + frags part
1133 * @verdict: return code from XDP program execution
1137 * returned by XDP program;
1139 static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_put_rx_mbuf() argument
1142 u32 nr_frags = rx_ring->nr_frags + 1; in ice_put_rx_mbuf()
1143 u32 idx = rx_ring->first_desc; in ice_put_rx_mbuf()
1144 u32 cnt = rx_ring->count; in ice_put_rx_mbuf()
1149 if (unlikely(xdp_buff_has_frags(xdp))) in ice_put_rx_mbuf()
1150 post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; in ice_put_rx_mbuf()
1153 buf = &rx_ring->rx_buf[idx]; in ice_put_rx_mbuf()
1156 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_put_rx_mbuf()
1159 buf->pagecnt_bias++; in ice_put_rx_mbuf()
1161 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_put_rx_mbuf()
1169 /* handle buffers that represented frags released by XDP prog; in ice_put_rx_mbuf()
1170 * for these we keep pagecnt_bias as-is; refcount from struct page in ice_put_rx_mbuf()
1171 * has been decremented within XDP prog and we do not have to increase in ice_put_rx_mbuf()
1175 buf = &rx_ring->rx_buf[idx]; in ice_put_rx_mbuf()
1181 xdp->data = NULL; in ice_put_rx_mbuf()
1182 rx_ring->first_desc = ntc; in ice_put_rx_mbuf()
1183 rx_ring->nr_frags = 0; in ice_put_rx_mbuf()
1187 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1188 * @rx_ring: Rx descriptor ring to transact packets on
1191 * This function provides a "bounce buffer" approach to Rx interrupt
1201 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1202 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq() local
1205 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1207 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1211 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1213 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1214 cached_ntu = xdp_ring->next_to_use; in ice_clean_rx_irq()
1217 /* start the loop to process Rx packets bounded by 'budget' */ in ice_clean_rx_irq()
1226 /* get the Rx desc from Rx ring based on 'next_to_clean' */ in ice_clean_rx_irq()
1232 * hardware wrote DD then it will be non-zero in ice_clean_rx_irq()
1235 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) in ice_clean_rx_irq()
1245 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { in ice_clean_rx_irq()
1246 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_rx_irq()
1248 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && in ice_clean_rx_irq()
1249 ctrl_vsi->vf) in ice_clean_rx_irq()
1253 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1257 size = le16_to_cpu(rx_desc->wb.pkt_len) & in ice_clean_rx_irq()
1263 if (!xdp->data) { in ice_clean_rx_irq()
1266 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - in ice_clean_rx_irq()
1268 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); in ice_clean_rx_irq()
1269 xdp_buff_clear_frags_flag(xdp); in ice_clean_rx_irq()
1270 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { in ice_clean_rx_irq()
1271 ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); in ice_clean_rx_irq()
1282 xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); in ice_clean_rx_irq()
1285 total_rx_bytes += xdp_get_buff_len(xdp); in ice_clean_rx_irq()
1288 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); in ice_clean_rx_irq()
1293 skb = ice_build_skb(rx_ring, xdp); in ice_clean_rx_irq()
1295 skb = ice_construct_skb(rx_ring, xdp); in ice_clean_rx_irq()
1298 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_clean_rx_irq()
1301 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); in ice_clean_rx_irq()
1307 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, in ice_clean_rx_irq()
1320 total_rx_bytes += skb->len; in ice_clean_rx_irq()
1333 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1340 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1361 ring_stats = tx_ring->ring_stats; in __ice_update_sample()
1364 packets += ring_stats->stats.pkts; in __ice_update_sample()
1365 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1373 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1376 packets += ring_stats->stats.pkts; in __ice_update_sample()
1377 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1381 dim_update_sample(q_vector->total_events, packets, bytes, sample); in __ice_update_sample()
1382 sample->comp_ctr = 0; in __ice_update_sample()
1389 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) in __ice_update_sample()
1390 rc->dim.state = DIM_START_MEASURE; in __ice_update_sample()
1394 * ice_net_dim - Update net DIM algorithm
1400 * This function is a no-op if the ring is not configured to dynamic ITR.
1404 struct ice_ring_container *tx = &q_vector->tx; in ice_net_dim()
1405 struct ice_ring_container *rx = &q_vector->rx; in ice_net_dim() local
1411 net_dim(&tx->dim, &dim_sample); in ice_net_dim()
1414 if (ITR_IS_DYNAMIC(rx)) { in ice_net_dim()
1417 __ice_update_sample(q_vector, rx, &dim_sample, false); in ice_net_dim()
1418 net_dim(&rx->dim, &dim_sample); in ice_net_dim()
1423 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1431 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this in ice_buildreg_itr()
1440 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); in ice_buildreg_itr()
1444 * ice_enable_interrupt - re-enable MSI-X interrupt
1447 * If the VSI is down, the interrupt will not be re-enabled. Also,
1453 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt()
1454 bool wb_en = q_vector->wb_on_itr; in ice_enable_interrupt()
1457 if (test_bit(ICE_DOWN, vsi->state)) in ice_enable_interrupt()
1468 q_vector->wb_on_itr = false; in ice_enable_interrupt()
1481 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); in ice_enable_interrupt()
1485 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1488 * We need to tell hardware to write-back completed descriptors even when
1494 * This sets the write-back frequency to whatever was set previously for the
1500 struct ice_vsi *vsi = q_vector->vsi; in ice_set_wb_on_itr()
1503 if (q_vector->wb_on_itr) in ice_set_wb_on_itr()
1508 * be static in non-adaptive mode (user configured) in ice_set_wb_on_itr()
1510 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), in ice_set_wb_on_itr()
1515 q_vector->wb_on_itr = true; in ice_set_wb_on_itr()
1519 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1540 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1541 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll()
1559 /* normally we have 1 Rx ring per q_vector */ in ice_napi_poll()
1560 if (unlikely(q_vector->num_ring_rx > 1)) in ice_napi_poll()
1561 /* We attempt to distribute budget to each Rx queue fairly, but in ice_napi_poll()
1565 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); in ice_napi_poll()
1567 /* Max of 1 Rx ring in this q_vector so give it the budget */ in ice_napi_poll()
1570 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1571 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll()
1574 /* A dedicated path for zero-copy allows making a single in ice_napi_poll()
1578 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1590 * cache-lines will still continue even if we're polling. in ice_napi_poll()
1596 /* Exit the polling mode, but don't re-enable interrupts if stack might in ice_napi_poll()
1597 * poll us due to busy-polling in ice_napi_poll()
1606 return min_t(int, work_done, budget - 1); in ice_napi_poll()
1610 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1614 * Returns -EBUSY if a stop is needed, else 0
1624 return -EBUSY; in __ice_maybe_stop_tx()
1626 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ice_maybe_stop_tx()
1628 ++tx_ring->ring_stats->tx_stats.restart_q; in __ice_maybe_stop_tx()
1633 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1648 * ice_tx_map - Build the Tx descriptor
1662 u16 i = tx_ring->next_to_use; in ice_tx_map()
1671 td_tag = off->td_l2tag1; in ice_tx_map()
1672 td_cmd = off->td_cmd; in ice_tx_map()
1673 td_offset = off->td_offset; in ice_tx_map()
1674 skb = first->skb; in ice_tx_map()
1676 data_len = skb->data_len; in ice_tx_map()
1681 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { in ice_tx_map()
1683 td_tag = first->vid; in ice_tx_map()
1686 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1690 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ice_tx_map()
1693 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1701 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); in ice_tx_map()
1702 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1708 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1715 if (i == tx_ring->count) { in ice_tx_map()
1721 size -= max_data; in ice_tx_map()
1724 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1730 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, in ice_tx_map()
1736 if (i == tx_ring->count) { in ice_tx_map()
1742 data_len -= size; in ice_tx_map()
1744 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1747 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1748 tx_buf->type = ICE_TX_BUF_FRAG; in ice_tx_map()
1752 skb_tx_timestamp(first->skb); in ice_tx_map()
1755 if (i == tx_ring->count) in ice_tx_map()
1760 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1772 first->next_to_watch = tx_desc; in ice_tx_map()
1774 tx_ring->next_to_use = i; in ice_tx_map()
1779 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map()
1783 writel(i, tx_ring->tail); in ice_tx_map()
1790 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1795 i = tx_ring->count; in ice_tx_map()
1796 i--; in ice_tx_map()
1799 tx_ring->next_to_use = i; in ice_tx_map()
1803 * ice_tx_csum - Enable Tx checksum offloads
1812 const struct ice_tx_ring *tx_ring = off->tx_ring; in ice_tx_csum()
1814 struct sk_buff *skb = first->skb; in ice_tx_csum()
1829 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tx_csum()
1843 l2_len = ip.hdr - skb->data; in ice_tx_csum()
1849 if (ip.v4->version == 4) in ice_tx_csum()
1850 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1851 else if (ip.v6->version == 6) in ice_tx_csum()
1852 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1854 if (skb->encapsulation) { in ice_tx_csum()
1859 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1860 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? in ice_tx_csum()
1863 l4_proto = ip.v4->protocol; in ice_tx_csum()
1864 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1869 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1870 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in ice_tx_csum()
1873 return -1; in ice_tx_csum()
1880 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1884 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1888 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1892 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1893 return -1; in ice_tx_csum()
1900 tunnel |= ((l4.hdr - ip.hdr) / 4) << in ice_tx_csum()
1907 tunnel |= ((ip.hdr - l4.hdr) / 2) << in ice_tx_csum()
1910 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ice_tx_csum()
1912 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && in ice_tx_csum()
1913 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in ice_tx_csum()
1917 off->cd_tunnel_params |= tunnel; in ice_tx_csum()
1922 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
1929 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); in ice_tx_csum()
1930 if (ip.v4->version == 4) in ice_tx_csum()
1931 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1932 if (ip.v6->version == 6) in ice_tx_csum()
1933 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1937 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1938 l4_proto = ip.v4->protocol; in ice_tx_csum()
1942 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1947 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1950 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1952 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, in ice_tx_csum()
1955 return -1; in ice_tx_csum()
1959 l3_len = l4.hdr - ip.hdr; in ice_tx_csum()
1962 if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) && in ice_tx_csum()
1963 !(first->tx_flags & ICE_TX_FLAGS_TSO) && in ice_tx_csum()
1966 u16 csum_start = (skb->csum_start - skb->mac_header) / 2; in ice_tx_csum()
1967 u16 csum_offset = skb->csum_offset / 2; in ice_tx_csum()
1978 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
1979 off->cd_gcs_params = gcs_params; in ice_tx_csum()
1981 off->td_offset |= offset; in ice_tx_csum()
1982 off->td_cmd |= cmd; in ice_tx_csum()
1991 l4_len = l4.tcp->doff; in ice_tx_csum()
2008 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
2009 return -1; in ice_tx_csum()
2014 off->td_cmd |= cmd; in ice_tx_csum()
2015 off->td_offset |= offset; in ice_tx_csum()
2020 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2030 struct sk_buff *skb = first->skb; in ice_tx_prepare_vlan_flags()
2033 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) in ice_tx_prepare_vlan_flags()
2041 first->vid = skb_vlan_tag_get(skb); in ice_tx_prepare_vlan_flags()
2042 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) in ice_tx_prepare_vlan_flags()
2043 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; in ice_tx_prepare_vlan_flags()
2045 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; in ice_tx_prepare_vlan_flags()
2052 * ice_tso - computes mss and TSO length to prepare for TSO
2061 struct sk_buff *skb = first->skb; in ice_tso()
2078 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tso()
2097 if (ip.v4->version == 4) { in ice_tso()
2098 ip.v4->tot_len = 0; in ice_tso()
2099 ip.v4->check = 0; in ice_tso()
2101 ip.v6->payload_len = 0; in ice_tso()
2104 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in ice_tso()
2110 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in ice_tso()
2111 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in ice_tso()
2112 l4.udp->len = 0; in ice_tso()
2115 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2118 paylen = skb->len - l4_start; in ice_tso()
2119 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2128 if (ip.v4->version == 4) { in ice_tso()
2129 ip.v4->tot_len = 0; in ice_tso()
2130 ip.v4->check = 0; in ice_tso()
2132 ip.v6->payload_len = 0; in ice_tso()
2137 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2140 paylen = skb->len - l4_start; in ice_tso()
2142 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in ice_tso()
2143 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2146 off->header_len = (u8)sizeof(l4.udp) + l4_start; in ice_tso()
2148 csum_replace_by_diff(&l4.tcp->check, in ice_tso()
2151 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); in ice_tso()
2155 first->gso_segs = skb_shinfo(skb)->gso_segs; in ice_tso()
2156 first->bytecount += (first->gso_segs - 1) * off->header_len; in ice_tso()
2158 cd_tso_len = skb->len - off->header_len; in ice_tso()
2159 cd_mss = skb_shinfo(skb)->gso_size; in ice_tso()
2162 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tso()
2166 first->tx_flags |= ICE_TX_FLAGS_TSO; in ice_tso()
2171 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2176 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2187 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2204 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2211 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ice_xmit_desc_count()
2212 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; in ice_xmit_desc_count()
2218 if (!nr_frags--) in ice_xmit_desc_count()
2228 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2246 nr_frags = skb_shinfo(skb)->nr_frags; in __ice_chk_linearize()
2247 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) in __ice_chk_linearize()
2253 nr_frags -= ICE_MAX_BUF_TXD - 2; in __ice_chk_linearize()
2254 frag = &skb_shinfo(skb)->frags[0]; in __ice_chk_linearize()
2262 sum = 1 - skb_shinfo(skb)->gso_size; in __ice_chk_linearize()
2274 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __ice_chk_linearize()
2286 int align_pad = -(skb_frag_off(stale)) & in __ice_chk_linearize()
2287 (ICE_MAX_READ_REQ_SIZE - 1); in __ice_chk_linearize()
2289 sum -= align_pad; in __ice_chk_linearize()
2290 stale_size -= align_pad; in __ice_chk_linearize()
2293 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2294 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2302 if (!nr_frags--) in __ice_chk_linearize()
2305 sum -= stale_size; in __ice_chk_linearize()
2312 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2316 * Note: Our HW can't scatter-gather more than 8 fragments to build
2334 * ice_tstamp - set up context descriptor for hardware timestamp
2347 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ice_tstamp()
2351 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tstamp()
2355 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2357 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; in ice_tstamp()
2361 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tstamp()
2364 first->tx_flags |= ICE_TX_FLAGS_TSYN; in ice_tstamp()
2368 * ice_xmit_frame_ring - Sends buffer on Tx ring
2378 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2393 count = ice_txd_use_count(skb->len); in ice_xmit_frame_ring()
2394 tx_ring->ring_stats->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2405 tx_ring->ring_stats->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2415 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2416 first->skb = skb; in ice_xmit_frame_ring()
2417 first->type = ICE_TX_BUF_SKB; in ice_xmit_frame_ring()
2418 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); in ice_xmit_frame_ring()
2419 first->gso_segs = 1; in ice_xmit_frame_ring()
2420 first->tx_flags = 0; in ice_xmit_frame_ring()
2424 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { in ice_xmit_frame_ring()
2428 offload.cd_l2tag2 = first->vid; in ice_xmit_frame_ring()
2443 if (unlikely((skb->priority == TC_PRIO_CONTROL || in ice_xmit_frame_ring()
2444 eth->h_proto == htons(ETH_P_LLDP)) && in ice_xmit_frame_ring()
2445 vsi->type == ICE_VSI_PF && in ice_xmit_frame_ring()
2446 vsi->port_info->qos_cfg.is_sw_lldp)) in ice_xmit_frame_ring()
2452 if ((ice_is_switchdev_running(vsi->back) || in ice_xmit_frame_ring()
2453 ice_lag_is_switchdev_running(vsi->back)) && in ice_xmit_frame_ring()
2454 vsi->type != ICE_VSI_SF) in ice_xmit_frame_ring()
2459 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2464 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2467 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); in ice_xmit_frame_ring()
2468 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); in ice_xmit_frame_ring()
2469 cdesc->gcs = cpu_to_le16(offload.cd_gcs_params); in ice_xmit_frame_ring()
2470 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); in ice_xmit_frame_ring()
2483 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2492 struct ice_vsi *vsi = np->vsi; in ice_start_xmit()
2495 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2507 * ice_get_dscp_up - return the UP/TC value for a SKB
2517 if (skb->protocol == htons(ETH_P_IP)) in ice_get_dscp_up()
2519 else if (skb->protocol == htons(ETH_P_IPV6)) in ice_get_dscp_up()
2522 return dcbcfg->dscp_map[dscp]; in ice_get_dscp_up()
2532 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_select_queue()
2533 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) in ice_select_queue()
2534 skb->priority = ice_get_dscp_up(dcbcfg, skb); in ice_select_queue()
2540 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2545 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2546 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2551 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2553 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2556 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_ctrl_tx_irq()
2566 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_ctrl_tx_irq()
2571 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2572 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2573 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2580 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2581 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2587 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2591 if (tx_buf->type == ICE_TX_BUF_DUMMY) in ice_clean_ctrl_tx_irq()
2592 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2595 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_ctrl_tx_irq()
2596 tx_buf->tx_flags = 0; in ice_clean_ctrl_tx_irq()
2597 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2599 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2600 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2607 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2608 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2612 budget--; in ice_clean_ctrl_tx_irq()
2615 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2616 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()
2618 /* re-enable interrupt if needed */ in ice_clean_ctrl_tx_irq()
2619 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); in ice_clean_ctrl_tx_irq()