Lines Matching +full:xdp +full:- +full:rx +full:- +full:metadata
1 // SPDX-License-Identifier: GPL-2.0
12 #include <net/xdp.h>
27 * ice_prgm_fdir_fltr - Program a Flow Director filter
47 return -ENOENT; in ice_prgm_fdir_fltr()
48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
50 return -ENOENT; in ice_prgm_fdir_fltr()
51 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
56 return -EAGAIN; in ice_prgm_fdir_fltr()
64 return -EINVAL; in ice_prgm_fdir_fltr()
67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
75 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
84 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr()
88 tx_buf->type = ICE_TX_BUF_DUMMY; in ice_prgm_fdir_fltr()
89 tx_buf->raw_buf = raw_packet; in ice_prgm_fdir_fltr()
91 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr()
100 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr()
102 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
116 dma_unmap_page(ring->dev, in ice_unmap_and_free_tx_buf()
121 switch (tx_buf->type) { in ice_unmap_and_free_tx_buf()
123 devm_kfree(ring->dev, tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
126 dev_kfree_skb_any(tx_buf->skb); in ice_unmap_and_free_tx_buf()
129 page_frag_free(tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
132 xdp_return_frame(tx_buf->xdpf); in ice_unmap_and_free_tx_buf()
136 tx_buf->next_to_watch = NULL; in ice_unmap_and_free_tx_buf()
137 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_unmap_and_free_tx_buf()
144 return netdev_get_tx_queue(ring->netdev, ring->q_index); in txring_txq()
148 * ice_clean_tx_ring - Free any empty Tx buffers
156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
162 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
166 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
172 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
175 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
177 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
178 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
180 if (!tx_ring->netdev) in ice_clean_tx_ring()
188 * ice_free_tx_ring - Free Tx resources per queue
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
199 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
201 if (tx_ring->desc) { in ice_free_tx_ring()
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
204 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
205 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
206 tx_ring->desc = NULL; in ice_free_tx_ring()
211 * ice_clean_tx_irq - Reclaim resources after transmit completes
221 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
222 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
229 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
231 i -= tx_ring->count; in ice_clean_tx_irq()
233 prefetch(&vsi->state); in ice_clean_tx_irq()
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_tx_irq()
243 prefetchw(&tx_buf->skb->users); in ice_clean_tx_irq()
249 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_tx_irq()
254 tx_buf->next_to_watch = NULL; in ice_clean_tx_irq()
257 total_bytes += tx_buf->bytecount; in ice_clean_tx_irq()
258 total_pkts += tx_buf->gso_segs; in ice_clean_tx_irq()
261 napi_consume_skb(tx_buf->skb, napi_budget); in ice_clean_tx_irq()
264 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
270 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_tx_irq()
280 i -= tx_ring->count; in ice_clean_tx_irq()
281 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
287 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
301 i -= tx_ring->count; in ice_clean_tx_irq()
302 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
309 budget--; in ice_clean_tx_irq()
312 i += tx_ring->count; in ice_clean_tx_irq()
313 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
326 !test_bit(ICE_VSI_DOWN, vsi->state)) { in ice_clean_tx_irq()
328 ++tx_ring->ring_stats->tx_stats.restart_q; in ice_clean_tx_irq()
336 * ice_setup_tx_ring - Allocate the Tx descriptors
343 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
347 return -ENOMEM; in ice_setup_tx_ring()
350 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
351 tx_ring->tx_buf = in ice_setup_tx_ring()
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
354 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
355 return -ENOMEM; in ice_setup_tx_ring()
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
362 if (!tx_ring->desc) { in ice_setup_tx_ring()
368 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
369 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
374 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
375 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
376 return -ENOMEM; in ice_setup_tx_ring()
380 * ice_clean_rx_ring - Free Rx buffers
385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring() local
386 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
391 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
399 if (xdp->data) { in ice_clean_rx_ring()
400 xdp_return_buff(xdp); in ice_clean_rx_ring()
401 xdp->data = NULL; in ice_clean_rx_ring()
404 /* Free all the Rx ring sk_buffs */ in ice_clean_rx_ring()
405 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
408 if (!rx_buf->page) in ice_clean_rx_ring()
414 dma_sync_single_range_for_cpu(dev, rx_buf->dma, in ice_clean_rx_ring()
415 rx_buf->page_offset, in ice_clean_rx_ring()
416 rx_ring->rx_buf_len, in ice_clean_rx_ring()
420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
422 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring()
424 rx_buf->page = NULL; in ice_clean_rx_ring()
425 rx_buf->page_offset = 0; in ice_clean_rx_ring()
429 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
430 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
432 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
435 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
437 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
439 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
440 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
441 rx_ring->first_desc = 0; in ice_clean_rx_ring()
442 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
446 * ice_free_rx_ring - Free Rx resources
456 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
457 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
459 rx_ring->xdp_prog = NULL; in ice_free_rx_ring()
460 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
461 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
462 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
464 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
465 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
468 if (rx_ring->desc) { in ice_free_rx_ring()
469 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
471 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
472 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
473 rx_ring->desc = NULL; in ice_free_rx_ring()
478 * ice_setup_rx_ring - Allocate the Rx descriptors
479 * @rx_ring: the Rx ring to set up
485 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
489 return -ENOMEM; in ice_setup_rx_ring()
492 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
493 rx_ring->rx_buf = in ice_setup_rx_ring()
494 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
495 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
496 return -ENOMEM; in ice_setup_rx_ring()
499 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
501 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
503 if (!rx_ring->desc) { in ice_setup_rx_ring()
504 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", in ice_setup_rx_ring()
509 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
510 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
511 rx_ring->first_desc = 0; in ice_setup_rx_ring()
513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
519 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
520 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
521 return -ENOMEM; in ice_setup_rx_ring()
526 * @rx_ring: ptr to Rx ring
538 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ice_rx_frame_truesize()
540 truesize = rx_ring->rx_offset ? in ice_rx_frame_truesize()
541 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ice_rx_frame_truesize()
549 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
550 * @rx_ring: Rx ring
551 * @xdp: xdp_buff used as input to the XDP program
552 * @xdp_prog: XDP program to run
554 * @rx_buf: Rx buffer to store the XDP action
555 * @eop_desc: Last descriptor in packet to read metadata from
560 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
570 ice_xdp_meta_set_desc(xdp, eop_desc); in ice_run_xdp()
572 act = bpf_prog_run_xdp(xdp_prog, xdp); in ice_run_xdp()
578 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp()
579 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp()
581 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp()
586 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
591 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
595 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
601 ice_set_rx_bufs_act(xdp, rx_ring, ret); in ice_run_xdp()
605 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
606 * @xdpf: XDP frame that will be converted to XDP buff
607 * @xdp_ring: XDP ring for transmission
612 struct xdp_buff xdp; in ice_xmit_xdp_ring() local
614 xdp.data_hard_start = (void *)xdpf; in ice_xmit_xdp_ring()
615 xdp.data = xdpf->data; in ice_xmit_xdp_ring()
616 xdp.data_end = xdp.data + xdpf->len; in ice_xmit_xdp_ring()
617 xdp.frame_sz = xdpf->frame_sz; in ice_xmit_xdp_ring()
618 xdp.flags = xdpf->flags; in ice_xmit_xdp_ring()
620 return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); in ice_xmit_xdp_ring()
624 * ice_xdp_xmit - submit packets to XDP ring for transmission
626 * @n: number of XDP frames to be transmitted
627 * @frames: XDP frames to be transmitted
631 * will be free'ed by XDP core.
632 * For error cases, a negative errno code is returned and no-frames
641 struct ice_vsi *vsi = np->vsi; in ice_xdp_xmit()
646 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_xdp_xmit()
647 return -ENETDOWN; in ice_xdp_xmit()
650 return -ENXIO; in ice_xdp_xmit()
653 return -EINVAL; in ice_xdp_xmit()
656 queue_index %= vsi->num_xdp_txq; in ice_xdp_xmit()
657 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
658 spin_lock(&xdp_ring->tx_lock); in ice_xdp_xmit()
661 if (unlikely(queue_index >= vsi->num_xdp_txq)) in ice_xdp_xmit()
662 return -ENXIO; in ice_xdp_xmit()
663 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
666 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; in ice_xdp_xmit()
677 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); in ice_xdp_xmit()
682 spin_unlock(&xdp_ring->tx_lock); in ice_xdp_xmit()
688 * ice_alloc_mapped_page - recycle or make a new page
698 struct page *page = bi->page; in ice_alloc_mapped_page()
708 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
713 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
719 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
721 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
725 bi->dma = dma; in ice_alloc_mapped_page()
726 bi->page = page; in ice_alloc_mapped_page()
727 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
728 page_ref_add(page, USHRT_MAX - 1); in ice_alloc_mapped_page()
729 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page()
735 * ice_alloc_rx_bufs - Replace used receive buffers
743 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
750 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
754 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || in ice_alloc_rx_bufs()
758 /* get the Rx descriptor and buffer based on next_to_use */ in ice_alloc_rx_bufs()
760 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
768 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
769 bi->page_offset, in ice_alloc_rx_bufs()
770 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
774 * because each write-back erases this info. in ice_alloc_rx_bufs()
776 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs()
781 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
783 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
788 rx_desc->wb.status_error0 = 0; in ice_alloc_rx_bufs()
790 cleaned_count--; in ice_alloc_rx_bufs()
793 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
800 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
801 * @rx_buf: Rx buffer to adjust
804 * Update the offset within page so that Rx buf will be ready to be reused.
806 * so the second half of page assigned to Rx buffer will be used, otherwise
814 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset()
817 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset()
822 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
833 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page()
834 struct page *page = rx_buf->page; in ice_can_reuse_rx_page()
836 /* avoid re-using remote and pfmemalloc pages */ in ice_can_reuse_rx_page()
842 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) in ice_can_reuse_rx_page()
846 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) in ice_can_reuse_rx_page()
847 if (rx_buf->page_offset > ICE_LAST_OFFSET) in ice_can_reuse_rx_page()
856 page_ref_add(page, USHRT_MAX - 1); in ice_can_reuse_rx_page()
857 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page()
864 * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
865 * @rx_ring: Rx descriptor ring to transact packets on
866 * @xdp: xdp buff to place the data into
870 * This function will add the data contained in rx_buf->page to the xdp buf.
874 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_add_xdp_frag() argument
877 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); in ice_add_xdp_frag()
882 if (!xdp_buff_has_frags(xdp)) { in ice_add_xdp_frag()
883 sinfo->nr_frags = 0; in ice_add_xdp_frag()
884 sinfo->xdp_frags_size = 0; in ice_add_xdp_frag()
885 xdp_buff_set_frags_flag(xdp); in ice_add_xdp_frag()
888 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { in ice_add_xdp_frag()
889 ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); in ice_add_xdp_frag()
890 return -ENOMEM; in ice_add_xdp_frag()
893 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, in ice_add_xdp_frag()
894 rx_buf->page_offset, size); in ice_add_xdp_frag()
895 sinfo->xdp_frags_size += size; in ice_add_xdp_frag()
896 /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() in ice_add_xdp_frag()
899 rx_ring->nr_frags = sinfo->nr_frags; in ice_add_xdp_frag()
901 if (page_is_pfmemalloc(rx_buf->page)) in ice_add_xdp_frag()
902 xdp_buff_set_frag_pfmemalloc(xdp); in ice_add_xdp_frag()
908 * ice_reuse_rx_page - page flip buffer and store it back on the ring
909 * @rx_ring: Rx descriptor ring to store buffers on
917 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
920 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
924 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
930 new_buf->dma = old_buf->dma; in ice_reuse_rx_page()
931 new_buf->page = old_buf->page; in ice_reuse_rx_page()
932 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page()
933 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page()
937 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
938 * @rx_ring: Rx descriptor ring to transact packets on
942 * This function will pull an Rx buffer from the ring and synchronize it
951 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
952 rx_buf->pgcnt = in ice_get_rx_buf()
954 page_count(rx_buf->page); in ice_get_rx_buf()
958 prefetchw(rx_buf->page); in ice_get_rx_buf()
963 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
964 rx_buf->page_offset, size, in ice_get_rx_buf()
968 rx_buf->pagecnt_bias--; in ice_get_rx_buf()
974 * ice_build_skb - Build skb around an existing buffer
975 * @rx_ring: Rx descriptor ring to transact packets on
976 * @xdp: xdp_buff pointing to the data
978 * This function builds an skb around an existing XDP buffer, taking care
983 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_build_skb() argument
985 u8 metasize = xdp->data - xdp->data_meta; in ice_build_skb()
990 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_build_skb()
991 sinfo = xdp_get_shared_info_from_buff(xdp); in ice_build_skb()
992 nr_frags = sinfo->nr_frags; in ice_build_skb()
995 /* Prefetch first cache line of first page. If xdp->data_meta in ice_build_skb()
996 * is unused, this points exactly as xdp->data, otherwise we in ice_build_skb()
1000 net_prefetch(xdp->data_meta); in ice_build_skb()
1002 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in ice_build_skb()
1006 /* must to record Rx queue, otherwise OS features such as in ice_build_skb()
1009 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
1012 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ice_build_skb()
1013 __skb_put(skb, xdp->data_end - xdp->data); in ice_build_skb()
1017 if (unlikely(xdp_buff_has_frags(xdp))) in ice_build_skb()
1019 sinfo->xdp_frags_size, in ice_build_skb()
1020 nr_frags * xdp->frame_sz, in ice_build_skb()
1021 xdp_buff_is_frag_pfmemalloc(xdp)); in ice_build_skb()
1027 * ice_construct_skb - Allocate skb and populate it
1028 * @rx_ring: Rx descriptor ring to transact packets on
1029 * @xdp: xdp_buff pointing to the data
1036 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_construct_skb() argument
1038 unsigned int size = xdp->data_end - xdp->data; in ice_construct_skb()
1046 net_prefetch(xdp->data); in ice_construct_skb()
1048 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_construct_skb()
1049 sinfo = xdp_get_shared_info_from_buff(xdp); in ice_construct_skb()
1050 nr_frags = sinfo->nr_frags; in ice_construct_skb()
1054 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb()
1059 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1060 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1064 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); in ice_construct_skb()
1067 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, in ice_construct_skb()
1071 size -= headlen; in ice_construct_skb()
1077 if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { in ice_construct_skb()
1081 skb_add_rx_frag(skb, 0, rx_buf->page, in ice_construct_skb()
1082 rx_buf->page_offset + headlen, size, in ice_construct_skb()
1083 xdp->frame_sz); in ice_construct_skb()
1088 * as-is in ice_construct_skb()
1090 rx_buf->act = ICE_SKB_CONSUMED; in ice_construct_skb()
1093 if (unlikely(xdp_buff_has_frags(xdp))) { in ice_construct_skb()
1096 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in ice_construct_skb()
1099 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in ice_construct_skb()
1100 sinfo->xdp_frags_size, in ice_construct_skb()
1101 nr_frags * xdp->frame_sz, in ice_construct_skb()
1102 xdp_buff_is_frag_pfmemalloc(xdp)); in ice_construct_skb()
1109 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1110 * @rx_ring: Rx descriptor ring to transact packets on
1111 * @rx_buf: Rx buffer to pull data from
1127 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1130 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_put_rx_buf()
1134 rx_buf->page = NULL; in ice_put_rx_buf()
1138 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1139 * @rx_ring: Rx descriptor ring to transact packets on
1142 * This function provides a "bounce buffer" approach to Rx interrupt
1152 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1153 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq() local
1154 u32 cached_ntc = rx_ring->first_desc; in ice_clean_rx_irq()
1157 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1158 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1166 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); in ice_clean_rx_irq()
1169 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1171 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1172 cached_ntu = xdp_ring->next_to_use; in ice_clean_rx_irq()
1175 /* start the loop to process Rx packets bounded by 'budget' */ in ice_clean_rx_irq()
1184 /* get the Rx desc from Rx ring based on 'next_to_clean' */ in ice_clean_rx_irq()
1190 * hardware wrote DD then it will be non-zero in ice_clean_rx_irq()
1193 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) in ice_clean_rx_irq()
1203 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { in ice_clean_rx_irq()
1204 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_rx_irq()
1206 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && in ice_clean_rx_irq()
1207 ctrl_vsi->vf) in ice_clean_rx_irq()
1211 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1215 size = le16_to_cpu(rx_desc->wb.pkt_len) & in ice_clean_rx_irq()
1221 if (!xdp->data) { in ice_clean_rx_irq()
1224 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - in ice_clean_rx_irq()
1226 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); in ice_clean_rx_irq()
1229 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); in ice_clean_rx_irq()
1231 xdp_buff_clear_frags_flag(xdp); in ice_clean_rx_irq()
1232 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { in ice_clean_rx_irq()
1242 ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); in ice_clean_rx_irq()
1243 if (rx_buf->act == ICE_XDP_PASS) in ice_clean_rx_irq()
1245 total_rx_bytes += xdp_get_buff_len(xdp); in ice_clean_rx_irq()
1248 xdp->data = NULL; in ice_clean_rx_irq()
1249 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1250 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1254 skb = ice_build_skb(rx_ring, xdp); in ice_clean_rx_irq()
1256 skb = ice_construct_skb(rx_ring, xdp); in ice_clean_rx_irq()
1259 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_clean_rx_irq()
1260 rx_buf->act = ICE_XDP_CONSUMED; in ice_clean_rx_irq()
1261 if (unlikely(xdp_buff_has_frags(xdp))) in ice_clean_rx_irq()
1262 ice_set_rx_bufs_act(xdp, rx_ring, in ice_clean_rx_irq()
1264 xdp->data = NULL; in ice_clean_rx_irq()
1265 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1266 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1269 xdp->data = NULL; in ice_clean_rx_irq()
1270 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1271 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1274 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, in ice_clean_rx_irq()
1287 total_rx_bytes += skb->len; in ice_clean_rx_irq()
1300 first = rx_ring->first_desc; in ice_clean_rx_irq()
1302 struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; in ice_clean_rx_irq()
1304 if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { in ice_clean_rx_irq()
1305 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_clean_rx_irq()
1306 xdp_xmit |= buf->act; in ice_clean_rx_irq()
1307 } else if (buf->act & ICE_XDP_CONSUMED) { in ice_clean_rx_irq()
1308 buf->pagecnt_bias++; in ice_clean_rx_irq()
1309 } else if (buf->act == ICE_XDP_PASS) { in ice_clean_rx_irq()
1310 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_clean_rx_irq()
1317 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1324 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1345 ring_stats = tx_ring->ring_stats; in __ice_update_sample()
1348 packets += ring_stats->stats.pkts; in __ice_update_sample()
1349 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1357 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1360 packets += ring_stats->stats.pkts; in __ice_update_sample()
1361 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1365 dim_update_sample(q_vector->total_events, packets, bytes, sample); in __ice_update_sample()
1366 sample->comp_ctr = 0; in __ice_update_sample()
1373 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) in __ice_update_sample()
1374 rc->dim.state = DIM_START_MEASURE; in __ice_update_sample()
1378 * ice_net_dim - Update net DIM algorithm
1384 * This function is a no-op if the ring is not configured to dynamic ITR.
1388 struct ice_ring_container *tx = &q_vector->tx; in ice_net_dim()
1389 struct ice_ring_container *rx = &q_vector->rx; in ice_net_dim() local
1395 net_dim(&tx->dim, dim_sample); in ice_net_dim()
1398 if (ITR_IS_DYNAMIC(rx)) { in ice_net_dim()
1401 __ice_update_sample(q_vector, rx, &dim_sample, false); in ice_net_dim()
1402 net_dim(&rx->dim, dim_sample); in ice_net_dim()
1407 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1415 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this in ice_buildreg_itr()
1424 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); in ice_buildreg_itr()
1428 * ice_enable_interrupt - re-enable MSI-X interrupt
1431 * If the VSI is down, the interrupt will not be re-enabled. Also,
1437 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt()
1438 bool wb_en = q_vector->wb_on_itr; in ice_enable_interrupt()
1441 if (test_bit(ICE_DOWN, vsi->state)) in ice_enable_interrupt()
1452 q_vector->wb_on_itr = false; in ice_enable_interrupt()
1465 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); in ice_enable_interrupt()
1469 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1472 * We need to tell hardware to write-back completed descriptors even when
1478 * This sets the write-back frequency to whatever was set previously for the
1484 struct ice_vsi *vsi = q_vector->vsi; in ice_set_wb_on_itr()
1487 if (q_vector->wb_on_itr) in ice_set_wb_on_itr()
1492 * be static in non-adaptive mode (user configured) in ice_set_wb_on_itr()
1494 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), in ice_set_wb_on_itr()
1499 q_vector->wb_on_itr = true; in ice_set_wb_on_itr()
1503 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1524 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1527 if (tx_ring->xsk_pool) in ice_napi_poll()
1542 /* normally we have 1 Rx ring per q_vector */ in ice_napi_poll()
1543 if (unlikely(q_vector->num_ring_rx > 1)) in ice_napi_poll()
1544 /* We attempt to distribute budget to each Rx queue fairly, but in ice_napi_poll()
1548 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); in ice_napi_poll()
1550 /* Max of 1 Rx ring in this q_vector so give it the budget */ in ice_napi_poll()
1553 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1556 /* A dedicated path for zero-copy allows making a single in ice_napi_poll()
1560 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1572 * cache-lines will still continue even if we're polling. in ice_napi_poll()
1578 /* Exit the polling mode, but don't re-enable interrupts if stack might in ice_napi_poll()
1579 * poll us due to busy-polling in ice_napi_poll()
1588 return min_t(int, work_done, budget - 1); in ice_napi_poll()
1592 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1596 * Returns -EBUSY if a stop is needed, else 0
1606 return -EBUSY; in __ice_maybe_stop_tx()
1608 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ice_maybe_stop_tx()
1610 ++tx_ring->ring_stats->tx_stats.restart_q; in __ice_maybe_stop_tx()
1615 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1630 * ice_tx_map - Build the Tx descriptor
1644 u16 i = tx_ring->next_to_use; in ice_tx_map()
1653 td_tag = off->td_l2tag1; in ice_tx_map()
1654 td_cmd = off->td_cmd; in ice_tx_map()
1655 td_offset = off->td_offset; in ice_tx_map()
1656 skb = first->skb; in ice_tx_map()
1658 data_len = skb->data_len; in ice_tx_map()
1663 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { in ice_tx_map()
1665 td_tag = first->vid; in ice_tx_map()
1668 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1672 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ice_tx_map()
1675 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1683 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); in ice_tx_map()
1684 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1690 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1697 if (i == tx_ring->count) { in ice_tx_map()
1703 size -= max_data; in ice_tx_map()
1706 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1712 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, in ice_tx_map()
1718 if (i == tx_ring->count) { in ice_tx_map()
1724 data_len -= size; in ice_tx_map()
1726 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1729 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1730 tx_buf->type = ICE_TX_BUF_FRAG; in ice_tx_map()
1734 skb_tx_timestamp(first->skb); in ice_tx_map()
1737 if (i == tx_ring->count) in ice_tx_map()
1742 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1754 first->next_to_watch = tx_desc; in ice_tx_map()
1756 tx_ring->next_to_use = i; in ice_tx_map()
1761 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map()
1765 writel(i, tx_ring->tail); in ice_tx_map()
1772 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1777 i = tx_ring->count; in ice_tx_map()
1778 i--; in ice_tx_map()
1781 tx_ring->next_to_use = i; in ice_tx_map()
1785 * ice_tx_csum - Enable Tx checksum offloads
1795 struct sk_buff *skb = first->skb; in ice_tx_csum()
1810 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tx_csum()
1824 l2_len = ip.hdr - skb->data; in ice_tx_csum()
1830 if (ip.v4->version == 4) in ice_tx_csum()
1831 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1832 else if (ip.v6->version == 6) in ice_tx_csum()
1833 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1835 if (skb->encapsulation) { in ice_tx_csum()
1840 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1841 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? in ice_tx_csum()
1844 l4_proto = ip.v4->protocol; in ice_tx_csum()
1845 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1850 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1851 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in ice_tx_csum()
1854 return -1; in ice_tx_csum()
1861 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1865 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1869 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1873 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1874 return -1; in ice_tx_csum()
1881 tunnel |= ((l4.hdr - ip.hdr) / 4) << in ice_tx_csum()
1888 tunnel |= ((ip.hdr - l4.hdr) / 2) << in ice_tx_csum()
1891 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ice_tx_csum()
1893 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && in ice_tx_csum()
1894 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in ice_tx_csum()
1898 off->cd_tunnel_params |= tunnel; in ice_tx_csum()
1903 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
1910 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); in ice_tx_csum()
1911 if (ip.v4->version == 4) in ice_tx_csum()
1912 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1913 if (ip.v6->version == 6) in ice_tx_csum()
1914 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1918 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1919 l4_proto = ip.v4->protocol; in ice_tx_csum()
1923 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1928 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1931 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1933 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, in ice_tx_csum()
1936 return -1; in ice_tx_csum()
1940 l3_len = l4.hdr - ip.hdr; in ice_tx_csum()
1948 l4_len = l4.tcp->doff; in ice_tx_csum()
1965 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1966 return -1; in ice_tx_csum()
1971 off->td_cmd |= cmd; in ice_tx_csum()
1972 off->td_offset |= offset; in ice_tx_csum()
1977 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1987 struct sk_buff *skb = first->skb; in ice_tx_prepare_vlan_flags()
1990 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) in ice_tx_prepare_vlan_flags()
1998 first->vid = skb_vlan_tag_get(skb); in ice_tx_prepare_vlan_flags()
1999 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) in ice_tx_prepare_vlan_flags()
2000 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; in ice_tx_prepare_vlan_flags()
2002 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; in ice_tx_prepare_vlan_flags()
2009 * ice_tso - computes mss and TSO length to prepare for TSO
2018 struct sk_buff *skb = first->skb; in ice_tso()
2035 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tso()
2054 if (ip.v4->version == 4) { in ice_tso()
2055 ip.v4->tot_len = 0; in ice_tso()
2056 ip.v4->check = 0; in ice_tso()
2058 ip.v6->payload_len = 0; in ice_tso()
2061 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in ice_tso()
2067 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in ice_tso()
2068 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in ice_tso()
2069 l4.udp->len = 0; in ice_tso()
2072 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2075 paylen = skb->len - l4_start; in ice_tso()
2076 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2085 if (ip.v4->version == 4) { in ice_tso()
2086 ip.v4->tot_len = 0; in ice_tso()
2087 ip.v4->check = 0; in ice_tso()
2089 ip.v6->payload_len = 0; in ice_tso()
2094 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2097 paylen = skb->len - l4_start; in ice_tso()
2099 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in ice_tso()
2100 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2103 off->header_len = (u8)sizeof(l4.udp) + l4_start; in ice_tso()
2105 csum_replace_by_diff(&l4.tcp->check, in ice_tso()
2108 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); in ice_tso()
2112 first->gso_segs = skb_shinfo(skb)->gso_segs; in ice_tso()
2113 first->bytecount += (first->gso_segs - 1) * off->header_len; in ice_tso()
2115 cd_tso_len = skb->len - off->header_len; in ice_tso()
2116 cd_mss = skb_shinfo(skb)->gso_size; in ice_tso()
2119 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tso()
2123 first->tx_flags |= ICE_TX_FLAGS_TSO; in ice_tso()
2128 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2133 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2144 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2161 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2168 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ice_xmit_desc_count()
2169 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; in ice_xmit_desc_count()
2175 if (!nr_frags--) in ice_xmit_desc_count()
2185 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2203 nr_frags = skb_shinfo(skb)->nr_frags; in __ice_chk_linearize()
2204 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) in __ice_chk_linearize()
2210 nr_frags -= ICE_MAX_BUF_TXD - 2; in __ice_chk_linearize()
2211 frag = &skb_shinfo(skb)->frags[0]; in __ice_chk_linearize()
2219 sum = 1 - skb_shinfo(skb)->gso_size; in __ice_chk_linearize()
2231 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __ice_chk_linearize()
2243 int align_pad = -(skb_frag_off(stale)) & in __ice_chk_linearize()
2244 (ICE_MAX_READ_REQ_SIZE - 1); in __ice_chk_linearize()
2246 sum -= align_pad; in __ice_chk_linearize()
2247 stale_size -= align_pad; in __ice_chk_linearize()
2250 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2251 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2259 if (!nr_frags--) in __ice_chk_linearize()
2262 sum -= stale_size; in __ice_chk_linearize()
2269 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2273 * Note: Our HW can't scatter-gather more than 8 fragments to build
2291 * ice_tstamp - set up context descriptor for hardware timestamp
2304 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ice_tstamp()
2308 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tstamp()
2312 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2314 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; in ice_tstamp()
2318 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tstamp()
2321 first->tx_flags |= ICE_TX_FLAGS_TSYN; in ice_tstamp()
2325 * ice_xmit_frame_ring - Sends buffer on Tx ring
2335 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2350 count = ice_txd_use_count(skb->len); in ice_xmit_frame_ring()
2351 tx_ring->ring_stats->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2362 tx_ring->ring_stats->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2372 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2373 first->skb = skb; in ice_xmit_frame_ring()
2374 first->type = ICE_TX_BUF_SKB; in ice_xmit_frame_ring()
2375 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); in ice_xmit_frame_ring()
2376 first->gso_segs = 1; in ice_xmit_frame_ring()
2377 first->tx_flags = 0; in ice_xmit_frame_ring()
2381 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { in ice_xmit_frame_ring()
2385 offload.cd_l2tag2 = first->vid; in ice_xmit_frame_ring()
2400 if (unlikely((skb->priority == TC_PRIO_CONTROL || in ice_xmit_frame_ring()
2401 eth->h_proto == htons(ETH_P_LLDP)) && in ice_xmit_frame_ring()
2402 vsi->type == ICE_VSI_PF && in ice_xmit_frame_ring()
2403 vsi->port_info->qos_cfg.is_sw_lldp)) in ice_xmit_frame_ring()
2409 if (ice_is_switchdev_running(vsi->back)) in ice_xmit_frame_ring()
2414 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2419 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2422 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); in ice_xmit_frame_ring()
2423 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); in ice_xmit_frame_ring()
2424 cdesc->rsvd = cpu_to_le16(0); in ice_xmit_frame_ring()
2425 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); in ice_xmit_frame_ring()
2438 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2447 struct ice_vsi *vsi = np->vsi; in ice_start_xmit()
2450 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2462 * ice_get_dscp_up - return the UP/TC value for a SKB
2472 if (skb->protocol == htons(ETH_P_IP)) in ice_get_dscp_up()
2474 else if (skb->protocol == htons(ETH_P_IPV6)) in ice_get_dscp_up()
2477 return dcbcfg->dscp_map[dscp]; in ice_get_dscp_up()
2487 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_select_queue()
2488 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) in ice_select_queue()
2489 skb->priority = ice_get_dscp_up(dcbcfg, skb); in ice_select_queue()
2495 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2500 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2501 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2506 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2508 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2511 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_ctrl_tx_irq()
2521 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_ctrl_tx_irq()
2526 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2527 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2528 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2535 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2536 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2542 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2546 if (tx_buf->type == ICE_TX_BUF_DUMMY) in ice_clean_ctrl_tx_irq()
2547 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2550 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_ctrl_tx_irq()
2551 tx_buf->tx_flags = 0; in ice_clean_ctrl_tx_irq()
2552 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2554 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2555 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2562 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2563 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2567 budget--; in ice_clean_ctrl_tx_irq()
2570 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2571 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()
2573 /* re-enable interrupt if needed */ in ice_clean_ctrl_tx_irq()
2574 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); in ice_clean_ctrl_tx_irq()