1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 2013 - 2018 Intel Corporation. */
37f12ad74SGreg Rose
43314f209SJesse Brandeburg #include <linux/bitfield.h>
5306ec721SAlexander Lobakin #include <linux/net/intel/libie/rx.h>
67ed3f5f0SPaul Gortmaker #include <linux/prefetch.h>
77ed3f5f0SPaul Gortmaker
85ec8b7d1SJesse Brandeburg #include "iavf.h"
9ad64ed8bSJesse Brandeburg #include "iavf_trace.h"
1066bc8e0fSJesse Brandeburg #include "iavf_prototype.h"
1148ccdcd8SJacob Keller #include "iavf_ptp.h"
127f12ad74SGreg Rose
138447357eSMateusz Polchlopek /**
148447357eSMateusz Polchlopek * iavf_is_descriptor_done - tests DD bit in Rx descriptor
158447357eSMateusz Polchlopek * @qw1: quad word 1 from descriptor to get Descriptor Done field from
168447357eSMateusz Polchlopek * @flex: is the descriptor flex or legacy
178447357eSMateusz Polchlopek *
188447357eSMateusz Polchlopek * This function tests the descriptor done bit in specified descriptor. Because
198447357eSMateusz Polchlopek * there are two types of descriptors (legacy and flex) the parameter rx_ring
208447357eSMateusz Polchlopek * is used to distinguish.
218447357eSMateusz Polchlopek *
228447357eSMateusz Polchlopek * Return: true or false based on the state of DD bit in Rx descriptor.
238447357eSMateusz Polchlopek */
iavf_is_descriptor_done(u64 qw1,bool flex)248447357eSMateusz Polchlopek static bool iavf_is_descriptor_done(u64 qw1, bool flex)
258447357eSMateusz Polchlopek {
268447357eSMateusz Polchlopek if (flex)
278447357eSMateusz Polchlopek return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
288447357eSMateusz Polchlopek else
298447357eSMateusz Polchlopek return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
308447357eSMateusz Polchlopek }
318447357eSMateusz Polchlopek
build_ctob(u32 td_cmd,u32 td_offset,unsigned int size,u32 td_tag)3270dc7ab7SJacob Keller static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
337f12ad74SGreg Rose u32 td_tag)
347f12ad74SGreg Rose {
35f1cad2ceSJesse Brandeburg return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
3656184e01SJesse Brandeburg ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
3756184e01SJesse Brandeburg ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
3856184e01SJesse Brandeburg ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
3956184e01SJesse Brandeburg ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
407f12ad74SGreg Rose }
417f12ad74SGreg Rose
4256184e01SJesse Brandeburg #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
437f12ad74SGreg Rose
447f12ad74SGreg Rose /**
4556184e01SJesse Brandeburg * iavf_unmap_and_free_tx_resource - Release a Tx buffer
467f12ad74SGreg Rose * @ring: the ring that owns the buffer
477f12ad74SGreg Rose * @tx_buffer: the buffer to free
487f12ad74SGreg Rose **/
iavf_unmap_and_free_tx_resource(struct iavf_ring * ring,struct iavf_tx_buffer * tx_buffer)4956184e01SJesse Brandeburg static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
5056184e01SJesse Brandeburg struct iavf_tx_buffer *tx_buffer)
517f12ad74SGreg Rose {
527f12ad74SGreg Rose if (tx_buffer->skb) {
5356184e01SJesse Brandeburg if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
5464bfd68eSAlexander Duyck kfree(tx_buffer->raw_buf);
5564bfd68eSAlexander Duyck else
567f12ad74SGreg Rose dev_kfree_skb_any(tx_buffer->skb);
577f12ad74SGreg Rose if (dma_unmap_len(tx_buffer, len))
587f12ad74SGreg Rose dma_unmap_single(ring->dev,
597f12ad74SGreg Rose dma_unmap_addr(tx_buffer, dma),
607f12ad74SGreg Rose dma_unmap_len(tx_buffer, len),
617f12ad74SGreg Rose DMA_TO_DEVICE);
627f12ad74SGreg Rose } else if (dma_unmap_len(tx_buffer, len)) {
637f12ad74SGreg Rose dma_unmap_page(ring->dev,
647f12ad74SGreg Rose dma_unmap_addr(tx_buffer, dma),
657f12ad74SGreg Rose dma_unmap_len(tx_buffer, len),
667f12ad74SGreg Rose DMA_TO_DEVICE);
677f12ad74SGreg Rose }
68a42e7a36SKiran Patil
697f12ad74SGreg Rose tx_buffer->next_to_watch = NULL;
707f12ad74SGreg Rose tx_buffer->skb = NULL;
717f12ad74SGreg Rose dma_unmap_len_set(tx_buffer, len, 0);
727f12ad74SGreg Rose /* tx_buffer must be completely set up in the transmit path */
737f12ad74SGreg Rose }
747f12ad74SGreg Rose
757f12ad74SGreg Rose /**
76129cf89eSJesse Brandeburg * iavf_clean_tx_ring - Free any empty Tx buffers
777f12ad74SGreg Rose * @tx_ring: ring to be cleaned
787f12ad74SGreg Rose **/
iavf_clean_tx_ring(struct iavf_ring * tx_ring)79a4aadf0fSPrzemek Kitszel static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
807f12ad74SGreg Rose {
817f12ad74SGreg Rose unsigned long bi_size;
827f12ad74SGreg Rose u16 i;
837f12ad74SGreg Rose
847f12ad74SGreg Rose /* ring already cleared, nothing to do */
857f12ad74SGreg Rose if (!tx_ring->tx_bi)
867f12ad74SGreg Rose return;
877f12ad74SGreg Rose
887f12ad74SGreg Rose /* Free all the Tx ring sk_buffs */
897f12ad74SGreg Rose for (i = 0; i < tx_ring->count; i++)
9056184e01SJesse Brandeburg iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
917f12ad74SGreg Rose
9256184e01SJesse Brandeburg bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
937f12ad74SGreg Rose memset(tx_ring->tx_bi, 0, bi_size);
947f12ad74SGreg Rose
957f12ad74SGreg Rose /* Zero out the descriptor ring */
967f12ad74SGreg Rose memset(tx_ring->desc, 0, tx_ring->size);
977f12ad74SGreg Rose
987f12ad74SGreg Rose tx_ring->next_to_use = 0;
997f12ad74SGreg Rose tx_ring->next_to_clean = 0;
1007f12ad74SGreg Rose
1017f12ad74SGreg Rose if (!tx_ring->netdev)
1027f12ad74SGreg Rose return;
1037f12ad74SGreg Rose
1047f12ad74SGreg Rose /* cleanup Tx queue statistics */
105e486bdfdSAlexander Duyck netdev_tx_reset_queue(txring_txq(tx_ring));
1067f12ad74SGreg Rose }
1077f12ad74SGreg Rose
1087f12ad74SGreg Rose /**
109129cf89eSJesse Brandeburg * iavf_free_tx_resources - Free Tx resources per queue
1107f12ad74SGreg Rose * @tx_ring: Tx descriptor ring for a specific queue
1117f12ad74SGreg Rose *
1127f12ad74SGreg Rose * Free all transmit software resources
1137f12ad74SGreg Rose **/
iavf_free_tx_resources(struct iavf_ring * tx_ring)11456184e01SJesse Brandeburg void iavf_free_tx_resources(struct iavf_ring *tx_ring)
1157f12ad74SGreg Rose {
116129cf89eSJesse Brandeburg iavf_clean_tx_ring(tx_ring);
1177f12ad74SGreg Rose kfree(tx_ring->tx_bi);
1187f12ad74SGreg Rose tx_ring->tx_bi = NULL;
1197f12ad74SGreg Rose
1207f12ad74SGreg Rose if (tx_ring->desc) {
1217f12ad74SGreg Rose dma_free_coherent(tx_ring->dev, tx_ring->size,
1227f12ad74SGreg Rose tx_ring->desc, tx_ring->dma);
1237f12ad74SGreg Rose tx_ring->desc = NULL;
1247f12ad74SGreg Rose }
1257f12ad74SGreg Rose }
1267f12ad74SGreg Rose
1277f12ad74SGreg Rose /**
128129cf89eSJesse Brandeburg * iavf_get_tx_pending - how many Tx descriptors not processed
129f5254429SJacob Keller * @ring: the ring of descriptors
130dd353109SAnjali Singhai Jain * @in_sw: is tx_pending being checked in SW or HW
131a68de58dSJesse Brandeburg *
1329c6c1259SKiran Patil * Since there is no access to the ring head register
1339c6c1259SKiran Patil * in XL710, we need to use our local copies
134a68de58dSJesse Brandeburg **/
iavf_get_tx_pending(struct iavf_ring * ring,bool in_sw)135a4aadf0fSPrzemek Kitszel static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
136a68de58dSJesse Brandeburg {
1379c6c1259SKiran Patil u32 head, tail;
138a68de58dSJesse Brandeburg
139809f23c0SBrett Creeley /* underlying hardware might not allow access and/or always return
140809f23c0SBrett Creeley * 0 for the head/tail registers so just use the cached values
141809f23c0SBrett Creeley */
142dd353109SAnjali Singhai Jain head = ring->next_to_clean;
143809f23c0SBrett Creeley tail = ring->next_to_use;
1449c6c1259SKiran Patil
1459c6c1259SKiran Patil if (head != tail)
1469c6c1259SKiran Patil return (head < tail) ?
1479c6c1259SKiran Patil tail - head : (tail + ring->count - head);
1489c6c1259SKiran Patil
1499c6c1259SKiran Patil return 0;
150a68de58dSJesse Brandeburg }
151a68de58dSJesse Brandeburg
15207d44190SSudheer Mogilappagari /**
153a4aadf0fSPrzemek Kitszel * iavf_force_wb - Issue SW Interrupt so HW does a wb
154a4aadf0fSPrzemek Kitszel * @vsi: the VSI we care about
155a4aadf0fSPrzemek Kitszel * @q_vector: the vector on which to force writeback
156a4aadf0fSPrzemek Kitszel **/
iavf_force_wb(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)157a4aadf0fSPrzemek Kitszel static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
158a4aadf0fSPrzemek Kitszel {
159a4aadf0fSPrzemek Kitszel u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
160a4aadf0fSPrzemek Kitszel IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
161a4aadf0fSPrzemek Kitszel IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
162a4aadf0fSPrzemek Kitszel IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
163a4aadf0fSPrzemek Kitszel /* allow 00 to be written to the index */;
164a4aadf0fSPrzemek Kitszel
165a4aadf0fSPrzemek Kitszel wr32(&vsi->back->hw,
166a4aadf0fSPrzemek Kitszel IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
167a4aadf0fSPrzemek Kitszel val);
168a4aadf0fSPrzemek Kitszel }
169a4aadf0fSPrzemek Kitszel
170a4aadf0fSPrzemek Kitszel /**
171129cf89eSJesse Brandeburg * iavf_detect_recover_hung - Function to detect and recover hung_queues
17207d44190SSudheer Mogilappagari * @vsi: pointer to vsi struct with tx queues
17307d44190SSudheer Mogilappagari *
17407d44190SSudheer Mogilappagari * VSI has netdev and netdev has TX queues. This function is to check each of
17507d44190SSudheer Mogilappagari * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
17607d44190SSudheer Mogilappagari **/
iavf_detect_recover_hung(struct iavf_vsi * vsi)17756184e01SJesse Brandeburg void iavf_detect_recover_hung(struct iavf_vsi *vsi)
17807d44190SSudheer Mogilappagari {
17956184e01SJesse Brandeburg struct iavf_ring *tx_ring = NULL;
18007d44190SSudheer Mogilappagari struct net_device *netdev;
18107d44190SSudheer Mogilappagari unsigned int i;
18207d44190SSudheer Mogilappagari int packets;
18307d44190SSudheer Mogilappagari
18407d44190SSudheer Mogilappagari if (!vsi)
18507d44190SSudheer Mogilappagari return;
18607d44190SSudheer Mogilappagari
18756184e01SJesse Brandeburg if (test_bit(__IAVF_VSI_DOWN, vsi->state))
18807d44190SSudheer Mogilappagari return;
18907d44190SSudheer Mogilappagari
19007d44190SSudheer Mogilappagari netdev = vsi->netdev;
19107d44190SSudheer Mogilappagari if (!netdev)
19207d44190SSudheer Mogilappagari return;
19307d44190SSudheer Mogilappagari
19407d44190SSudheer Mogilappagari if (!netif_carrier_ok(netdev))
19507d44190SSudheer Mogilappagari return;
19607d44190SSudheer Mogilappagari
19707d44190SSudheer Mogilappagari for (i = 0; i < vsi->back->num_active_queues; i++) {
19807d44190SSudheer Mogilappagari tx_ring = &vsi->back->tx_rings[i];
19907d44190SSudheer Mogilappagari if (tx_ring && tx_ring->desc) {
20007d44190SSudheer Mogilappagari /* If packet counter has not changed the queue is
20107d44190SSudheer Mogilappagari * likely stalled, so force an interrupt for this
20207d44190SSudheer Mogilappagari * queue.
20307d44190SSudheer Mogilappagari *
20407d44190SSudheer Mogilappagari * prev_pkt_ctr would be negative if there was no
20507d44190SSudheer Mogilappagari * pending work.
20607d44190SSudheer Mogilappagari */
20707d44190SSudheer Mogilappagari packets = tx_ring->stats.packets & INT_MAX;
20897cadd3dSAlexander Lobakin if (tx_ring->prev_pkt_ctr == packets) {
209129cf89eSJesse Brandeburg iavf_force_wb(vsi, tx_ring->q_vector);
21007d44190SSudheer Mogilappagari continue;
21107d44190SSudheer Mogilappagari }
21207d44190SSudheer Mogilappagari
21307d44190SSudheer Mogilappagari /* Memory barrier between read of packet count and call
214129cf89eSJesse Brandeburg * to iavf_get_tx_pending()
21507d44190SSudheer Mogilappagari */
21607d44190SSudheer Mogilappagari smp_rmb();
21797cadd3dSAlexander Lobakin tx_ring->prev_pkt_ctr =
218129cf89eSJesse Brandeburg iavf_get_tx_pending(tx_ring, true) ? packets : -1;
21907d44190SSudheer Mogilappagari }
22007d44190SSudheer Mogilappagari }
22107d44190SSudheer Mogilappagari }
22207d44190SSudheer Mogilappagari
2231dc8b538SAlexander Duyck #define WB_STRIDE 4
224c29af37fSAnjali Singhai Jain
2251943d8baSJesse Brandeburg /**
22656184e01SJesse Brandeburg * iavf_clean_tx_irq - Reclaim resources after transmit completes
227a619afe8SAlexander Duyck * @vsi: the VSI we care about
228a619afe8SAlexander Duyck * @tx_ring: Tx ring to clean
229a619afe8SAlexander Duyck * @napi_budget: Used to determine if we are in netpoll
2307f12ad74SGreg Rose *
2317f12ad74SGreg Rose * Returns true if there's any budget left (e.g. the clean is finished)
2327f12ad74SGreg Rose **/
iavf_clean_tx_irq(struct iavf_vsi * vsi,struct iavf_ring * tx_ring,int napi_budget)23356184e01SJesse Brandeburg static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
23456184e01SJesse Brandeburg struct iavf_ring *tx_ring, int napi_budget)
2357f12ad74SGreg Rose {
236168d91cfSMitch Williams int i = tx_ring->next_to_clean;
23756184e01SJesse Brandeburg struct iavf_tx_buffer *tx_buf;
23856184e01SJesse Brandeburg struct iavf_tx_desc *tx_desc;
239a619afe8SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0;
2404635fd3aSPrzemyslaw Patynowski unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
2417f12ad74SGreg Rose
2427f12ad74SGreg Rose tx_buf = &tx_ring->tx_bi[i];
243f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, i);
2447f12ad74SGreg Rose i -= tx_ring->count;
2457f12ad74SGreg Rose
2467f12ad74SGreg Rose do {
24756184e01SJesse Brandeburg struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
2487f12ad74SGreg Rose
2497f12ad74SGreg Rose /* if next_to_watch is not set then there is no work pending */
2507f12ad74SGreg Rose if (!eop_desc)
2517f12ad74SGreg Rose break;
2527f12ad74SGreg Rose
2537f12ad74SGreg Rose /* prevent any other reads prior to eop_desc */
254f72271e2SBrian King smp_rmb();
2557f12ad74SGreg Rose
256ad64ed8bSJesse Brandeburg iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
257b1cb07dbSPreethi Banala /* if the descriptor isn't done, no work yet to do */
258b1cb07dbSPreethi Banala if (!(eop_desc->cmd_type_offset_bsz &
259f1cad2ceSJesse Brandeburg cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
2607f12ad74SGreg Rose break;
2617f12ad74SGreg Rose
2627f12ad74SGreg Rose /* clear next_to_watch to prevent false hangs */
2637f12ad74SGreg Rose tx_buf->next_to_watch = NULL;
2647f12ad74SGreg Rose
2657f12ad74SGreg Rose /* update the statistics for this packet */
2667f12ad74SGreg Rose total_bytes += tx_buf->bytecount;
2677f12ad74SGreg Rose total_packets += tx_buf->gso_segs;
2687f12ad74SGreg Rose
2697f12ad74SGreg Rose /* free the skb */
270a619afe8SAlexander Duyck napi_consume_skb(tx_buf->skb, napi_budget);
2717f12ad74SGreg Rose
2727f12ad74SGreg Rose /* unmap skb header data */
2737f12ad74SGreg Rose dma_unmap_single(tx_ring->dev,
2747f12ad74SGreg Rose dma_unmap_addr(tx_buf, dma),
2757f12ad74SGreg Rose dma_unmap_len(tx_buf, len),
2767f12ad74SGreg Rose DMA_TO_DEVICE);
2777f12ad74SGreg Rose
2787f12ad74SGreg Rose /* clear tx_buffer data */
2797f12ad74SGreg Rose tx_buf->skb = NULL;
2807f12ad74SGreg Rose dma_unmap_len_set(tx_buf, len, 0);
2817f12ad74SGreg Rose
2827f12ad74SGreg Rose /* unmap remaining buffers */
2837f12ad74SGreg Rose while (tx_desc != eop_desc) {
284ad64ed8bSJesse Brandeburg iavf_trace(clean_tx_irq_unmap,
285ed0980c4SScott Peterson tx_ring, tx_desc, tx_buf);
2867f12ad74SGreg Rose
2877f12ad74SGreg Rose tx_buf++;
2887f12ad74SGreg Rose tx_desc++;
2897f12ad74SGreg Rose i++;
2907f12ad74SGreg Rose if (unlikely(!i)) {
2917f12ad74SGreg Rose i -= tx_ring->count;
2927f12ad74SGreg Rose tx_buf = tx_ring->tx_bi;
293f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, 0);
2947f12ad74SGreg Rose }
2957f12ad74SGreg Rose
2967f12ad74SGreg Rose /* unmap any remaining paged data */
2977f12ad74SGreg Rose if (dma_unmap_len(tx_buf, len)) {
2987f12ad74SGreg Rose dma_unmap_page(tx_ring->dev,
2997f12ad74SGreg Rose dma_unmap_addr(tx_buf, dma),
3007f12ad74SGreg Rose dma_unmap_len(tx_buf, len),
3017f12ad74SGreg Rose DMA_TO_DEVICE);
3027f12ad74SGreg Rose dma_unmap_len_set(tx_buf, len, 0);
3037f12ad74SGreg Rose }
3047f12ad74SGreg Rose }
3057f12ad74SGreg Rose
3067f12ad74SGreg Rose /* move us one more past the eop_desc for start of next pkt */
3077f12ad74SGreg Rose tx_buf++;
3087f12ad74SGreg Rose tx_desc++;
3097f12ad74SGreg Rose i++;
3107f12ad74SGreg Rose if (unlikely(!i)) {
3117f12ad74SGreg Rose i -= tx_ring->count;
3127f12ad74SGreg Rose tx_buf = tx_ring->tx_bi;
313f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, 0);
3147f12ad74SGreg Rose }
3157f12ad74SGreg Rose
316016890b9SJesse Brandeburg prefetch(tx_desc);
317016890b9SJesse Brandeburg
3187f12ad74SGreg Rose /* update budget accounting */
3197f12ad74SGreg Rose budget--;
3207f12ad74SGreg Rose } while (likely(budget));
3217f12ad74SGreg Rose
3227f12ad74SGreg Rose i += tx_ring->count;
3237f12ad74SGreg Rose tx_ring->next_to_clean = i;
3247f12ad74SGreg Rose u64_stats_update_begin(&tx_ring->syncp);
3257f12ad74SGreg Rose tx_ring->stats.bytes += total_bytes;
3267f12ad74SGreg Rose tx_ring->stats.packets += total_packets;
3277f12ad74SGreg Rose u64_stats_update_end(&tx_ring->syncp);
3287f12ad74SGreg Rose tx_ring->q_vector->tx.total_bytes += total_bytes;
3297f12ad74SGreg Rose tx_ring->q_vector->tx.total_packets += total_packets;
3307f12ad74SGreg Rose
33156184e01SJesse Brandeburg if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
332f6d83d13SAnjali Singhai Jain /* check to see if there are < 4 descriptors
333f6d83d13SAnjali Singhai Jain * waiting to be written back, then kick the hardware to force
334f6d83d13SAnjali Singhai Jain * them to be written back in case we stay in NAPI.
335f6d83d13SAnjali Singhai Jain * In this mode on X722 we do not enable Interrupt.
336f6d83d13SAnjali Singhai Jain */
337129cf89eSJesse Brandeburg unsigned int j = iavf_get_tx_pending(tx_ring, false);
338f6d83d13SAnjali Singhai Jain
339f6d83d13SAnjali Singhai Jain if (budget &&
3401dc8b538SAlexander Duyck ((j / WB_STRIDE) == 0) && (j > 0) &&
34156184e01SJesse Brandeburg !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
34256184e01SJesse Brandeburg (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
34397cadd3dSAlexander Lobakin tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
344f6d83d13SAnjali Singhai Jain }
345f6d83d13SAnjali Singhai Jain
346e486bdfdSAlexander Duyck /* notify netdev of completed buffers */
347e486bdfdSAlexander Duyck netdev_tx_completed_queue(txring_txq(tx_ring),
3487f12ad74SGreg Rose total_packets, total_bytes);
3497f12ad74SGreg Rose
350b85c94b6SJesse Brandeburg #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
3517f12ad74SGreg Rose if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
35256184e01SJesse Brandeburg (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
3537f12ad74SGreg Rose /* Make sure that anybody stopping the queue after this
3547f12ad74SGreg Rose * sees the new next_to_clean.
3557f12ad74SGreg Rose */
3567f12ad74SGreg Rose smp_mb();
3577f12ad74SGreg Rose if (__netif_subqueue_stopped(tx_ring->netdev,
3587f12ad74SGreg Rose tx_ring->queue_index) &&
35956184e01SJesse Brandeburg !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
3607f12ad74SGreg Rose netif_wake_subqueue(tx_ring->netdev,
3617f12ad74SGreg Rose tx_ring->queue_index);
3627f12ad74SGreg Rose ++tx_ring->tx_stats.restart_queue;
3637f12ad74SGreg Rose }
3647f12ad74SGreg Rose }
3657f12ad74SGreg Rose
366b03a8c1fSKiran Patil return !!budget;
3677f12ad74SGreg Rose }
3687f12ad74SGreg Rose
3697f12ad74SGreg Rose /**
370129cf89eSJesse Brandeburg * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
371c29af37fSAnjali Singhai Jain * @vsi: the VSI we care about
372ecc6a239SAnjali Singhai Jain * @q_vector: the vector on which to enable writeback
373c29af37fSAnjali Singhai Jain *
374c29af37fSAnjali Singhai Jain **/
iavf_enable_wb_on_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)37556184e01SJesse Brandeburg static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
37656184e01SJesse Brandeburg struct iavf_q_vector *q_vector)
377c29af37fSAnjali Singhai Jain {
3788e0764b4SAnjali Singhai Jain u16 flags = q_vector->tx.ring[0].flags;
3798e0764b4SAnjali Singhai Jain u32 val;
3808e0764b4SAnjali Singhai Jain
38156184e01SJesse Brandeburg if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
382ecc6a239SAnjali Singhai Jain return;
383ecc6a239SAnjali Singhai Jain
3848e0764b4SAnjali Singhai Jain if (q_vector->arm_wb_state)
3858e0764b4SAnjali Singhai Jain return;
3868e0764b4SAnjali Singhai Jain
387f1cad2ceSJesse Brandeburg val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
388f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
3898e0764b4SAnjali Singhai Jain
3908e0764b4SAnjali Singhai Jain wr32(&vsi->back->hw,
391f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
3928e0764b4SAnjali Singhai Jain q_vector->arm_wb_state = true;
393ecc6a239SAnjali Singhai Jain }
394ecc6a239SAnjali Singhai Jain
iavf_container_is_rx(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)39570dc7ab7SJacob Keller static bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
39656184e01SJesse Brandeburg struct iavf_ring_container *rc)
397a0073a4bSAlexander Duyck {
398a0073a4bSAlexander Duyck return &q_vector->rx == rc;
399a0073a4bSAlexander Duyck }
400a0073a4bSAlexander Duyck
401d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_100G 2560
402d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_50G 1280
403d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_40G 1024
404d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_20G 512
405d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_10G 256
406d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_1G 32
407a0073a4bSAlexander Duyck
iavf_mbps_itr_multiplier(u32 speed_mbps)408d73dd127SMateusz Palczewski static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
409d73dd127SMateusz Palczewski {
410d73dd127SMateusz Palczewski switch (speed_mbps) {
411d73dd127SMateusz Palczewski case SPEED_100000:
412d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_100G;
413d73dd127SMateusz Palczewski case SPEED_50000:
414d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_50G;
415d73dd127SMateusz Palczewski case SPEED_40000:
416d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_40G;
417d73dd127SMateusz Palczewski case SPEED_25000:
418d73dd127SMateusz Palczewski case SPEED_20000:
419d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_20G;
420d73dd127SMateusz Palczewski case SPEED_10000:
421a0073a4bSAlexander Duyck default:
422d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_10G;
423d73dd127SMateusz Palczewski case SPEED_1000:
424d73dd127SMateusz Palczewski case SPEED_100:
425d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_1G;
426d73dd127SMateusz Palczewski }
427a0073a4bSAlexander Duyck }
428a0073a4bSAlexander Duyck
429d73dd127SMateusz Palczewski static unsigned int
iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)430d73dd127SMateusz Palczewski iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
431d73dd127SMateusz Palczewski {
432d73dd127SMateusz Palczewski switch (speed_virtchnl) {
433d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_40GB:
434d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_40G;
435d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_25GB:
436d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_20GB:
437d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_20G;
438d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_10GB:
439d73dd127SMateusz Palczewski default:
440d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_10G;
441d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_1GB:
442d73dd127SMateusz Palczewski case VIRTCHNL_LINK_SPEED_100MB:
443d73dd127SMateusz Palczewski return IAVF_AIM_MULTIPLIER_1G;
444d73dd127SMateusz Palczewski }
445d73dd127SMateusz Palczewski }
446d73dd127SMateusz Palczewski
iavf_itr_divisor(struct iavf_adapter * adapter)447d73dd127SMateusz Palczewski static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
448d73dd127SMateusz Palczewski {
449d73dd127SMateusz Palczewski if (ADV_LINK_SUPPORT(adapter))
450d73dd127SMateusz Palczewski return IAVF_ITR_ADAPTIVE_MIN_INC *
451d73dd127SMateusz Palczewski iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
452d73dd127SMateusz Palczewski else
453d73dd127SMateusz Palczewski return IAVF_ITR_ADAPTIVE_MIN_INC *
454d73dd127SMateusz Palczewski iavf_virtchnl_itr_multiplier(adapter->link_speed);
455a0073a4bSAlexander Duyck }
456a0073a4bSAlexander Duyck
457c29af37fSAnjali Singhai Jain /**
45856184e01SJesse Brandeburg * iavf_update_itr - update the dynamic ITR value based on statistics
459a0073a4bSAlexander Duyck * @q_vector: structure containing interrupt and ring information
4607f12ad74SGreg Rose * @rc: structure containing ring performance data
4617f12ad74SGreg Rose *
462a0073a4bSAlexander Duyck * Stores a new ITR value based on packets and byte
463a0073a4bSAlexander Duyck * counts during the last interrupt. The advantage of per interrupt
464a0073a4bSAlexander Duyck * computation is faster updates and more accurate ITR for the current
465a0073a4bSAlexander Duyck * traffic pattern. Constants in this function were computed
466a0073a4bSAlexander Duyck * based on theoretical maximum wire speed and thresholds were set based
467a0073a4bSAlexander Duyck * on testing data as well as attempting to minimize response time
4687f12ad74SGreg Rose * while increasing bulk throughput.
4697f12ad74SGreg Rose **/
iavf_update_itr(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)47056184e01SJesse Brandeburg static void iavf_update_itr(struct iavf_q_vector *q_vector,
47156184e01SJesse Brandeburg struct iavf_ring_container *rc)
4727f12ad74SGreg Rose {
473a0073a4bSAlexander Duyck unsigned int avg_wire_size, packets, bytes, itr;
474a0073a4bSAlexander Duyck unsigned long next_update = jiffies;
4757f12ad74SGreg Rose
476a0073a4bSAlexander Duyck /* If we don't have any rings just leave ourselves set for maximum
477a0073a4bSAlexander Duyck * possible latency so we take ourselves out of the equation.
478a0073a4bSAlexander Duyck */
47971dc3719SAlexander Duyck if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
480a0073a4bSAlexander Duyck return;
48171dc3719SAlexander Duyck
482a0073a4bSAlexander Duyck /* For Rx we want to push the delay up and default to low latency.
483a0073a4bSAlexander Duyck * for Tx we want to pull the delay down and default to high latency.
484742c9875SJacob Keller */
48556184e01SJesse Brandeburg itr = iavf_container_is_rx(q_vector, rc) ?
48656184e01SJesse Brandeburg IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
48756184e01SJesse Brandeburg IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
488a0073a4bSAlexander Duyck
489a0073a4bSAlexander Duyck /* If we didn't update within up to 1 - 2 jiffies we can assume
490a0073a4bSAlexander Duyck * that either packets are coming in so slow there hasn't been
491a0073a4bSAlexander Duyck * any work, or that there is so much work that NAPI is dealing
492a0073a4bSAlexander Duyck * with interrupt moderation and we don't need to do anything.
493a0073a4bSAlexander Duyck */
494a0073a4bSAlexander Duyck if (time_after(next_update, rc->next_update))
495a0073a4bSAlexander Duyck goto clear_counts;
496a0073a4bSAlexander Duyck
497a0073a4bSAlexander Duyck /* If itr_countdown is set it means we programmed an ITR within
498a0073a4bSAlexander Duyck * the last 4 interrupt cycles. This has a side effect of us
499a0073a4bSAlexander Duyck * potentially firing an early interrupt. In order to work around
500a0073a4bSAlexander Duyck * this we need to throw out any data received for a few
501a0073a4bSAlexander Duyck * interrupts following the update.
502a0073a4bSAlexander Duyck */
503a0073a4bSAlexander Duyck if (q_vector->itr_countdown) {
504a0073a4bSAlexander Duyck itr = rc->target_itr;
505a0073a4bSAlexander Duyck goto clear_counts;
506742c9875SJacob Keller }
507742c9875SJacob Keller
508a0073a4bSAlexander Duyck packets = rc->total_packets;
509a0073a4bSAlexander Duyck bytes = rc->total_bytes;
510a0073a4bSAlexander Duyck
51156184e01SJesse Brandeburg if (iavf_container_is_rx(q_vector, rc)) {
512a0073a4bSAlexander Duyck /* If Rx there are 1 to 4 packets and bytes are less than
513a0073a4bSAlexander Duyck * 9000 assume insufficient data to use bulk rate limiting
514a0073a4bSAlexander Duyck * approach unless Tx is already in bulk rate limiting. We
515a0073a4bSAlexander Duyck * are likely latency driven.
516a0073a4bSAlexander Duyck */
517a0073a4bSAlexander Duyck if (packets && packets < 4 && bytes < 9000 &&
51856184e01SJesse Brandeburg (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
51956184e01SJesse Brandeburg itr = IAVF_ITR_ADAPTIVE_LATENCY;
520a0073a4bSAlexander Duyck goto adjust_by_size;
521a0073a4bSAlexander Duyck }
522a0073a4bSAlexander Duyck } else if (packets < 4) {
523a0073a4bSAlexander Duyck /* If we have Tx and Rx ITR maxed and Tx ITR is running in
524a0073a4bSAlexander Duyck * bulk mode and we are receiving 4 or fewer packets just
525a0073a4bSAlexander Duyck * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
526a0073a4bSAlexander Duyck * that the Rx can relax.
527a0073a4bSAlexander Duyck */
52856184e01SJesse Brandeburg if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
52956184e01SJesse Brandeburg (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
53056184e01SJesse Brandeburg IAVF_ITR_ADAPTIVE_MAX_USECS)
531a0073a4bSAlexander Duyck goto clear_counts;
532a0073a4bSAlexander Duyck } else if (packets > 32) {
533a0073a4bSAlexander Duyck /* If we have processed over 32 packets in a single interrupt
534a0073a4bSAlexander Duyck * for Tx assume we need to switch over to "bulk" mode.
535a0073a4bSAlexander Duyck */
53656184e01SJesse Brandeburg rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
537a0073a4bSAlexander Duyck }
538a0073a4bSAlexander Duyck
539a0073a4bSAlexander Duyck /* We have no packets to actually measure against. This means
540a0073a4bSAlexander Duyck * either one of the other queues on this vector is active or
541a0073a4bSAlexander Duyck * we are a Tx queue doing TSO with too high of an interrupt rate.
54251cc6d9fSJesse Brandeburg *
543a0073a4bSAlexander Duyck * Between 4 and 56 we can assume that our current interrupt delay
544a0073a4bSAlexander Duyck * is only slightly too low. As such we should increase it by a small
545a0073a4bSAlexander Duyck * fixed amount.
5467f12ad74SGreg Rose */
547a0073a4bSAlexander Duyck if (packets < 56) {
54856184e01SJesse Brandeburg itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
54956184e01SJesse Brandeburg if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
55056184e01SJesse Brandeburg itr &= IAVF_ITR_ADAPTIVE_LATENCY;
55156184e01SJesse Brandeburg itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
552a0073a4bSAlexander Duyck }
553a0073a4bSAlexander Duyck goto clear_counts;
5547f12ad74SGreg Rose }
555c56625d5SJesse Brandeburg
556a0073a4bSAlexander Duyck if (packets <= 256) {
557a0073a4bSAlexander Duyck itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
55856184e01SJesse Brandeburg itr &= IAVF_ITR_MASK;
5597f12ad74SGreg Rose
560a0073a4bSAlexander Duyck /* Between 56 and 112 is our "goldilocks" zone where we are
561a0073a4bSAlexander Duyck * working out "just right". Just report that our current
562a0073a4bSAlexander Duyck * ITR is good for us.
563a0073a4bSAlexander Duyck */
564a0073a4bSAlexander Duyck if (packets <= 112)
565a0073a4bSAlexander Duyck goto clear_counts;
566a0073a4bSAlexander Duyck
567a0073a4bSAlexander Duyck /* If packet count is 128 or greater we are likely looking
568a0073a4bSAlexander Duyck * at a slight overrun of the delay we want. Try halving
569a0073a4bSAlexander Duyck * our delay to see if that will cut the number of packets
570a0073a4bSAlexander Duyck * in half per interrupt.
571a0073a4bSAlexander Duyck */
572a0073a4bSAlexander Duyck itr /= 2;
57356184e01SJesse Brandeburg itr &= IAVF_ITR_MASK;
57456184e01SJesse Brandeburg if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
57556184e01SJesse Brandeburg itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
576a0073a4bSAlexander Duyck
577a0073a4bSAlexander Duyck goto clear_counts;
5787f12ad74SGreg Rose }
5797f12ad74SGreg Rose
580a0073a4bSAlexander Duyck /* The paths below assume we are dealing with a bulk ITR since
581a0073a4bSAlexander Duyck * number of packets is greater than 256. We are just going to have
582a0073a4bSAlexander Duyck * to compute a value and try to bring the count under control,
583a0073a4bSAlexander Duyck * though for smaller packet sizes there isn't much we can do as
584a0073a4bSAlexander Duyck * NAPI polling will likely be kicking in sooner rather than later.
585a0073a4bSAlexander Duyck */
58656184e01SJesse Brandeburg itr = IAVF_ITR_ADAPTIVE_BULK;
587a0073a4bSAlexander Duyck
588a0073a4bSAlexander Duyck adjust_by_size:
589a0073a4bSAlexander Duyck /* If packet counts are 256 or greater we can assume we have a gross
590a0073a4bSAlexander Duyck * overestimation of what the rate should be. Instead of trying to fine
591a0073a4bSAlexander Duyck * tune it just use the formula below to try and dial in an exact value
592a0073a4bSAlexander Duyck * give the current packet size of the frame.
593a0073a4bSAlexander Duyck */
594a0073a4bSAlexander Duyck avg_wire_size = bytes / packets;
595a0073a4bSAlexander Duyck
596a0073a4bSAlexander Duyck /* The following is a crude approximation of:
597a0073a4bSAlexander Duyck * wmem_default / (size + overhead) = desired_pkts_per_int
598a0073a4bSAlexander Duyck * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
599a0073a4bSAlexander Duyck * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
600a0073a4bSAlexander Duyck *
601a0073a4bSAlexander Duyck * Assuming wmem_default is 212992 and overhead is 640 bytes per
602a0073a4bSAlexander Duyck * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
603a0073a4bSAlexander Duyck * formula down to
604a0073a4bSAlexander Duyck *
605a0073a4bSAlexander Duyck * (170 * (size + 24)) / (size + 640) = ITR
606a0073a4bSAlexander Duyck *
607a0073a4bSAlexander Duyck * We first do some math on the packet size and then finally bitshift
608a0073a4bSAlexander Duyck * by 8 after rounding up. We also have to account for PCIe link speed
609a0073a4bSAlexander Duyck * difference as ITR scales based on this.
610a0073a4bSAlexander Duyck */
611a0073a4bSAlexander Duyck if (avg_wire_size <= 60) {
612a0073a4bSAlexander Duyck /* Start at 250k ints/sec */
613a0073a4bSAlexander Duyck avg_wire_size = 4096;
614a0073a4bSAlexander Duyck } else if (avg_wire_size <= 380) {
615a0073a4bSAlexander Duyck /* 250K ints/sec to 60K ints/sec */
616a0073a4bSAlexander Duyck avg_wire_size *= 40;
617a0073a4bSAlexander Duyck avg_wire_size += 1696;
618a0073a4bSAlexander Duyck } else if (avg_wire_size <= 1084) {
619a0073a4bSAlexander Duyck /* 60K ints/sec to 36K ints/sec */
620a0073a4bSAlexander Duyck avg_wire_size *= 15;
621a0073a4bSAlexander Duyck avg_wire_size += 11452;
622a0073a4bSAlexander Duyck } else if (avg_wire_size <= 1980) {
623a0073a4bSAlexander Duyck /* 36K ints/sec to 30K ints/sec */
624a0073a4bSAlexander Duyck avg_wire_size *= 5;
625a0073a4bSAlexander Duyck avg_wire_size += 22420;
626a0073a4bSAlexander Duyck } else {
627a0073a4bSAlexander Duyck /* plateau at a limit of 30K ints/sec */
628a0073a4bSAlexander Duyck avg_wire_size = 32256;
629a0073a4bSAlexander Duyck }
630a0073a4bSAlexander Duyck
631a0073a4bSAlexander Duyck /* If we are in low latency mode halve our delay which doubles the
632a0073a4bSAlexander Duyck * rate to somewhere between 100K to 16K ints/sec
633a0073a4bSAlexander Duyck */
63456184e01SJesse Brandeburg if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
635a0073a4bSAlexander Duyck avg_wire_size /= 2;
636a0073a4bSAlexander Duyck
637a0073a4bSAlexander Duyck /* Resultant value is 256 times larger than it needs to be. This
638a0073a4bSAlexander Duyck * gives us room to adjust the value as needed to either increase
639a0073a4bSAlexander Duyck * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
640a0073a4bSAlexander Duyck *
641a0073a4bSAlexander Duyck * Use addition as we have already recorded the new latency flag
642a0073a4bSAlexander Duyck * for the ITR value.
643a0073a4bSAlexander Duyck */
644d73dd127SMateusz Palczewski itr += DIV_ROUND_UP(avg_wire_size,
645d73dd127SMateusz Palczewski iavf_itr_divisor(q_vector->adapter)) *
64656184e01SJesse Brandeburg IAVF_ITR_ADAPTIVE_MIN_INC;
647a0073a4bSAlexander Duyck
64856184e01SJesse Brandeburg if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
64956184e01SJesse Brandeburg itr &= IAVF_ITR_ADAPTIVE_LATENCY;
65056184e01SJesse Brandeburg itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
651a0073a4bSAlexander Duyck }
652a0073a4bSAlexander Duyck
653a0073a4bSAlexander Duyck clear_counts:
654a0073a4bSAlexander Duyck /* write back value */
655a0073a4bSAlexander Duyck rc->target_itr = itr;
656a0073a4bSAlexander Duyck
657a0073a4bSAlexander Duyck /* next update should occur within next jiffy */
658a0073a4bSAlexander Duyck rc->next_update = next_update + 1;
659a0073a4bSAlexander Duyck
6607f12ad74SGreg Rose rc->total_bytes = 0;
6617f12ad74SGreg Rose rc->total_packets = 0;
6627f12ad74SGreg Rose }
6637f12ad74SGreg Rose
6644eeb1fffSJesse Brandeburg /**
665129cf89eSJesse Brandeburg * iavf_setup_tx_descriptors - Allocate the Tx descriptors
6667f12ad74SGreg Rose * @tx_ring: the tx ring to set up
6677f12ad74SGreg Rose *
6687f12ad74SGreg Rose * Return 0 on success, negative on error
6697f12ad74SGreg Rose **/
iavf_setup_tx_descriptors(struct iavf_ring * tx_ring)67056184e01SJesse Brandeburg int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
6717f12ad74SGreg Rose {
6727f12ad74SGreg Rose struct device *dev = tx_ring->dev;
6737f12ad74SGreg Rose int bi_size;
6747f12ad74SGreg Rose
6757f12ad74SGreg Rose if (!dev)
6767f12ad74SGreg Rose return -ENOMEM;
6777f12ad74SGreg Rose
67867c818a1SMitch Williams /* warn if we are about to overwrite the pointer */
67967c818a1SMitch Williams WARN_ON(tx_ring->tx_bi);
68056184e01SJesse Brandeburg bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
6817f12ad74SGreg Rose tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
6827f12ad74SGreg Rose if (!tx_ring->tx_bi)
6837f12ad74SGreg Rose goto err;
6847f12ad74SGreg Rose
6857f12ad74SGreg Rose /* round up to nearest 4K */
68656184e01SJesse Brandeburg tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
6877f12ad74SGreg Rose tx_ring->size = ALIGN(tx_ring->size, 4096);
6887f12ad74SGreg Rose tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6897f12ad74SGreg Rose &tx_ring->dma, GFP_KERNEL);
6907f12ad74SGreg Rose if (!tx_ring->desc) {
6917f12ad74SGreg Rose dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
6927f12ad74SGreg Rose tx_ring->size);
6937f12ad74SGreg Rose goto err;
6947f12ad74SGreg Rose }
6957f12ad74SGreg Rose
6967f12ad74SGreg Rose tx_ring->next_to_use = 0;
6977f12ad74SGreg Rose tx_ring->next_to_clean = 0;
69897cadd3dSAlexander Lobakin tx_ring->prev_pkt_ctr = -1;
6997f12ad74SGreg Rose return 0;
7007f12ad74SGreg Rose
7017f12ad74SGreg Rose err:
7027f12ad74SGreg Rose kfree(tx_ring->tx_bi);
7037f12ad74SGreg Rose tx_ring->tx_bi = NULL;
7047f12ad74SGreg Rose return -ENOMEM;
7057f12ad74SGreg Rose }
7067f12ad74SGreg Rose
7077f12ad74SGreg Rose /**
708129cf89eSJesse Brandeburg * iavf_clean_rx_ring - Free Rx buffers
7097f12ad74SGreg Rose * @rx_ring: ring to be cleaned
7107f12ad74SGreg Rose **/
iavf_clean_rx_ring(struct iavf_ring * rx_ring)711a4aadf0fSPrzemek Kitszel static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
7127f12ad74SGreg Rose {
7137f12ad74SGreg Rose /* ring already cleared, nothing to do */
7145fa4caffSAlexander Lobakin if (!rx_ring->rx_fqes)
7157f12ad74SGreg Rose return;
7167f12ad74SGreg Rose
717e72e5659SScott Peterson if (rx_ring->skb) {
718e72e5659SScott Peterson dev_kfree_skb(rx_ring->skb);
719e72e5659SScott Peterson rx_ring->skb = NULL;
720e72e5659SScott Peterson }
721e72e5659SScott Peterson
7225fa4caffSAlexander Lobakin /* Free all the Rx ring buffers */
7235fa4caffSAlexander Lobakin for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
7245fa4caffSAlexander Lobakin const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
725ab9ad98eSJesse Brandeburg
7266ad5ff6eSAlexander Lobakin libeth_rx_recycle_slow(rx_fqes->netmem);
727ab9ad98eSJesse Brandeburg
7285fa4caffSAlexander Lobakin if (unlikely(++i == rx_ring->count))
7295fa4caffSAlexander Lobakin i = 0;
7307f12ad74SGreg Rose }
7317f12ad74SGreg Rose
7327f12ad74SGreg Rose rx_ring->next_to_clean = 0;
7337f12ad74SGreg Rose rx_ring->next_to_use = 0;
7347f12ad74SGreg Rose }
7357f12ad74SGreg Rose
7367f12ad74SGreg Rose /**
737129cf89eSJesse Brandeburg * iavf_free_rx_resources - Free Rx resources
7387f12ad74SGreg Rose * @rx_ring: ring to clean the resources from
7397f12ad74SGreg Rose *
7407f12ad74SGreg Rose * Free all receive software resources
7417f12ad74SGreg Rose **/
iavf_free_rx_resources(struct iavf_ring * rx_ring)74256184e01SJesse Brandeburg void iavf_free_rx_resources(struct iavf_ring *rx_ring)
7437f12ad74SGreg Rose {
7445fa4caffSAlexander Lobakin struct libeth_fq fq = {
7455fa4caffSAlexander Lobakin .fqes = rx_ring->rx_fqes,
7465fa4caffSAlexander Lobakin .pp = rx_ring->pp,
7475fa4caffSAlexander Lobakin };
7485fa4caffSAlexander Lobakin
749129cf89eSJesse Brandeburg iavf_clean_rx_ring(rx_ring);
7507f12ad74SGreg Rose
7517f12ad74SGreg Rose if (rx_ring->desc) {
7525fa4caffSAlexander Lobakin dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size,
7537f12ad74SGreg Rose rx_ring->desc, rx_ring->dma);
7547f12ad74SGreg Rose rx_ring->desc = NULL;
7557f12ad74SGreg Rose }
7565fa4caffSAlexander Lobakin
7575fa4caffSAlexander Lobakin libeth_rx_fq_destroy(&fq);
7585fa4caffSAlexander Lobakin rx_ring->rx_fqes = NULL;
7595fa4caffSAlexander Lobakin rx_ring->pp = NULL;
7607f12ad74SGreg Rose }
7617f12ad74SGreg Rose
7627f12ad74SGreg Rose /**
763129cf89eSJesse Brandeburg * iavf_setup_rx_descriptors - Allocate Rx descriptors
7647f12ad74SGreg Rose * @rx_ring: Rx descriptor ring (for a specific queue) to setup
7657f12ad74SGreg Rose *
7667f12ad74SGreg Rose * Returns 0 on success, negative on failure
7677f12ad74SGreg Rose **/
iavf_setup_rx_descriptors(struct iavf_ring * rx_ring)76856184e01SJesse Brandeburg int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
7697f12ad74SGreg Rose {
7705fa4caffSAlexander Lobakin struct libeth_fq fq = {
7715fa4caffSAlexander Lobakin .count = rx_ring->count,
7725fa4caffSAlexander Lobakin .buf_len = LIBIE_MAX_RX_BUF_LEN,
7735fa4caffSAlexander Lobakin .nid = NUMA_NO_NODE,
7745fa4caffSAlexander Lobakin };
7755fa4caffSAlexander Lobakin int ret;
7767f12ad74SGreg Rose
7775fa4caffSAlexander Lobakin ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi);
7785fa4caffSAlexander Lobakin if (ret)
7795fa4caffSAlexander Lobakin return ret;
7805fa4caffSAlexander Lobakin
7815fa4caffSAlexander Lobakin rx_ring->pp = fq.pp;
7825fa4caffSAlexander Lobakin rx_ring->rx_fqes = fq.fqes;
7835fa4caffSAlexander Lobakin rx_ring->truesize = fq.truesize;
7845fa4caffSAlexander Lobakin rx_ring->rx_buf_len = fq.buf_len;
7857f12ad74SGreg Rose
786f217d6caSCarolyn Wyborny u64_stats_init(&rx_ring->syncp);
787638702bdSCarolyn Wyborny
7887f12ad74SGreg Rose /* Round up to nearest 4K */
789e9f476d7SMateusz Polchlopek rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
7907f12ad74SGreg Rose rx_ring->size = ALIGN(rx_ring->size, 4096);
7915fa4caffSAlexander Lobakin rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
7927f12ad74SGreg Rose &rx_ring->dma, GFP_KERNEL);
7937f12ad74SGreg Rose
7947f12ad74SGreg Rose if (!rx_ring->desc) {
7955fa4caffSAlexander Lobakin dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
7967f12ad74SGreg Rose rx_ring->size);
7977f12ad74SGreg Rose goto err;
7987f12ad74SGreg Rose }
7997f12ad74SGreg Rose
8007f12ad74SGreg Rose rx_ring->next_to_clean = 0;
8017f12ad74SGreg Rose rx_ring->next_to_use = 0;
8027f12ad74SGreg Rose
8037f12ad74SGreg Rose return 0;
8045fa4caffSAlexander Lobakin
8057f12ad74SGreg Rose err:
8065fa4caffSAlexander Lobakin libeth_rx_fq_destroy(&fq);
8075fa4caffSAlexander Lobakin rx_ring->rx_fqes = NULL;
8085fa4caffSAlexander Lobakin rx_ring->pp = NULL;
8095fa4caffSAlexander Lobakin
8107f12ad74SGreg Rose return -ENOMEM;
8117f12ad74SGreg Rose }
8127f12ad74SGreg Rose
8137f12ad74SGreg Rose /**
81456184e01SJesse Brandeburg * iavf_release_rx_desc - Store the new tail and head values
8157f12ad74SGreg Rose * @rx_ring: ring to bump
8167f12ad74SGreg Rose * @val: new head index
8177f12ad74SGreg Rose **/
iavf_release_rx_desc(struct iavf_ring * rx_ring,u32 val)81870dc7ab7SJacob Keller static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
8197f12ad74SGreg Rose {
8207f12ad74SGreg Rose rx_ring->next_to_use = val;
821ab9ad98eSJesse Brandeburg
8227f12ad74SGreg Rose /* Force memory writes to complete before letting h/w
8237f12ad74SGreg Rose * know there are new descriptors to fetch. (Only
8247f12ad74SGreg Rose * applicable for weak-ordered memory model archs,
8257f12ad74SGreg Rose * such as IA-64).
8267f12ad74SGreg Rose */
8277f12ad74SGreg Rose wmb();
8287f12ad74SGreg Rose writel(val, rx_ring->tail);
8297f12ad74SGreg Rose }
8307f12ad74SGreg Rose
8317f12ad74SGreg Rose /**
83256184e01SJesse Brandeburg * iavf_receive_skb - Send a completed packet up the stack
8337f12ad74SGreg Rose * @rx_ring: rx ring in play
8347f12ad74SGreg Rose * @skb: packet to send up
8357f12ad74SGreg Rose * @vlan_tag: vlan tag for packet
8367f12ad74SGreg Rose **/
iavf_receive_skb(struct iavf_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)83756184e01SJesse Brandeburg static void iavf_receive_skb(struct iavf_ring *rx_ring,
8387f12ad74SGreg Rose struct sk_buff *skb, u16 vlan_tag)
8397f12ad74SGreg Rose {
84056184e01SJesse Brandeburg struct iavf_q_vector *q_vector = rx_ring->q_vector;
8417f12ad74SGreg Rose
842a149f2c3SJesse Brandeburg if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
843a149f2c3SJesse Brandeburg (vlan_tag & VLAN_VID_MASK))
8447f12ad74SGreg Rose __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
845ccd219d2SBrett Creeley else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
846ccd219d2SBrett Creeley vlan_tag & VLAN_VID_MASK)
847ccd219d2SBrett Creeley __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
8487f12ad74SGreg Rose
8497f12ad74SGreg Rose napi_gro_receive(&q_vector->napi, skb);
8507f12ad74SGreg Rose }
8517f12ad74SGreg Rose
8527f12ad74SGreg Rose /**
853129cf89eSJesse Brandeburg * iavf_alloc_rx_buffers - Replace used receive buffers
854ab9ad98eSJesse Brandeburg * @rx_ring: ring to place buffers on
855ab9ad98eSJesse Brandeburg * @cleaned_count: number of buffers to replace
856ab9ad98eSJesse Brandeburg *
857ab9ad98eSJesse Brandeburg * Returns false if all allocations were successful, true if any fail
858ab9ad98eSJesse Brandeburg **/
iavf_alloc_rx_buffers(struct iavf_ring * rx_ring,u16 cleaned_count)85956184e01SJesse Brandeburg bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
860ab9ad98eSJesse Brandeburg {
8615fa4caffSAlexander Lobakin const struct libeth_fq_fp fq = {
8625fa4caffSAlexander Lobakin .pp = rx_ring->pp,
8635fa4caffSAlexander Lobakin .fqes = rx_ring->rx_fqes,
8645fa4caffSAlexander Lobakin .truesize = rx_ring->truesize,
8655fa4caffSAlexander Lobakin .count = rx_ring->count,
8665fa4caffSAlexander Lobakin };
867ab9ad98eSJesse Brandeburg u16 ntu = rx_ring->next_to_use;
868e9f476d7SMateusz Polchlopek struct iavf_rx_desc *rx_desc;
869ab9ad98eSJesse Brandeburg
870ab9ad98eSJesse Brandeburg /* do nothing if no valid netdev defined */
871ab9ad98eSJesse Brandeburg if (!rx_ring->netdev || !cleaned_count)
872ab9ad98eSJesse Brandeburg return false;
873ab9ad98eSJesse Brandeburg
874f1cad2ceSJesse Brandeburg rx_desc = IAVF_RX_DESC(rx_ring, ntu);
875ab9ad98eSJesse Brandeburg
876ab9ad98eSJesse Brandeburg do {
8775fa4caffSAlexander Lobakin dma_addr_t addr;
878ab9ad98eSJesse Brandeburg
8795fa4caffSAlexander Lobakin addr = libeth_rx_alloc(&fq, ntu);
8805fa4caffSAlexander Lobakin if (addr == DMA_MAPPING_ERROR)
8815fa4caffSAlexander Lobakin goto no_buffers;
88259605bc0SAlexander Duyck
883ab9ad98eSJesse Brandeburg /* Refresh the desc even if buffer_addrs didn't change
884ab9ad98eSJesse Brandeburg * because each write-back erases this info.
885ab9ad98eSJesse Brandeburg */
886e9f476d7SMateusz Polchlopek rx_desc->qw0 = cpu_to_le64(addr);
887ab9ad98eSJesse Brandeburg
888ab9ad98eSJesse Brandeburg rx_desc++;
889ab9ad98eSJesse Brandeburg ntu++;
890ab9ad98eSJesse Brandeburg if (unlikely(ntu == rx_ring->count)) {
891f1cad2ceSJesse Brandeburg rx_desc = IAVF_RX_DESC(rx_ring, 0);
892ab9ad98eSJesse Brandeburg ntu = 0;
893ab9ad98eSJesse Brandeburg }
894ab9ad98eSJesse Brandeburg
895ab9ad98eSJesse Brandeburg /* clear the status bits for the next_to_use descriptor */
896e9f476d7SMateusz Polchlopek rx_desc->qw1 = 0;
897ab9ad98eSJesse Brandeburg
898ab9ad98eSJesse Brandeburg cleaned_count--;
899ab9ad98eSJesse Brandeburg } while (cleaned_count);
900ab9ad98eSJesse Brandeburg
901ab9ad98eSJesse Brandeburg if (rx_ring->next_to_use != ntu)
90256184e01SJesse Brandeburg iavf_release_rx_desc(rx_ring, ntu);
903ab9ad98eSJesse Brandeburg
904ab9ad98eSJesse Brandeburg return false;
905ab9ad98eSJesse Brandeburg
906ab9ad98eSJesse Brandeburg no_buffers:
907ab9ad98eSJesse Brandeburg if (rx_ring->next_to_use != ntu)
90856184e01SJesse Brandeburg iavf_release_rx_desc(rx_ring, ntu);
909ab9ad98eSJesse Brandeburg
9105fa4caffSAlexander Lobakin rx_ring->rx_stats.alloc_page_failed++;
9115fa4caffSAlexander Lobakin
912ab9ad98eSJesse Brandeburg /* make sure to come back via polling to try again after
913ab9ad98eSJesse Brandeburg * allocation failure
914ab9ad98eSJesse Brandeburg */
915ab9ad98eSJesse Brandeburg return true;
916ab9ad98eSJesse Brandeburg }
917ab9ad98eSJesse Brandeburg
918ab9ad98eSJesse Brandeburg /**
9192dc8e7c3SJacob Keller * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
9207f12ad74SGreg Rose * @vsi: the VSI we care about
9217f12ad74SGreg Rose * @skb: skb currently being received and modified
9222dc8e7c3SJacob Keller * @decoded_pt: decoded ptype information
9232dc8e7c3SJacob Keller * @csum_bits: decoded Rx descriptor information
9247f12ad74SGreg Rose **/
iavf_rx_csum(const struct iavf_vsi * vsi,struct sk_buff * skb,struct libeth_rx_pt decoded_pt,struct libeth_rx_csum csum_bits)9252dc8e7c3SJacob Keller static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
9262dc8e7c3SJacob Keller struct libeth_rx_pt decoded_pt,
9272dc8e7c3SJacob Keller struct libeth_rx_csum csum_bits)
9287f12ad74SGreg Rose {
929858296c8SAlexander Duyck bool ipv4, ipv6;
930ab9ad98eSJesse Brandeburg
9317f12ad74SGreg Rose skb->ip_summed = CHECKSUM_NONE;
9327f12ad74SGreg Rose
9338a3c91ccSJesse Brandeburg /* did the hardware decode the packet and checksum? */
9342dc8e7c3SJacob Keller if (unlikely(!csum_bits.l3l4p))
9358a3c91ccSJesse Brandeburg return;
9368a3c91ccSJesse Brandeburg
9372dc8e7c3SJacob Keller ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
9382dc8e7c3SJacob Keller ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
9398a3c91ccSJesse Brandeburg
9402dc8e7c3SJacob Keller if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
9418a3c91ccSJesse Brandeburg goto checksum_fail;
9428a3c91ccSJesse Brandeburg
943ddf1d0d7SJesse Brandeburg /* likely incorrect csum if alternate IP extension headers found */
9442dc8e7c3SJacob Keller if (unlikely(ipv6 && csum_bits.ipv6exadd))
9457f12ad74SGreg Rose return;
9467f12ad74SGreg Rose
9478a3c91ccSJesse Brandeburg /* there was some L4 error, count error and punt packet to the stack */
9482dc8e7c3SJacob Keller if (unlikely(csum_bits.l4e))
9498a3c91ccSJesse Brandeburg goto checksum_fail;
9507f12ad74SGreg Rose
9518a3c91ccSJesse Brandeburg /* handle packets that were not able to be checksummed due
9528a3c91ccSJesse Brandeburg * to arrival speed, in this case the stack can compute
9538a3c91ccSJesse Brandeburg * the csum.
9548a3c91ccSJesse Brandeburg */
9552dc8e7c3SJacob Keller if (unlikely(csum_bits.pprs))
9568a3c91ccSJesse Brandeburg return;
9578a3c91ccSJesse Brandeburg
9587f12ad74SGreg Rose skb->ip_summed = CHECKSUM_UNNECESSARY;
9598a3c91ccSJesse Brandeburg return;
9608a3c91ccSJesse Brandeburg
9618a3c91ccSJesse Brandeburg checksum_fail:
9628a3c91ccSJesse Brandeburg vsi->back->hw_csum_rx_error++;
9637f12ad74SGreg Rose }
9647f12ad74SGreg Rose
9657f12ad74SGreg Rose /**
9662dc8e7c3SJacob Keller * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
9672dc8e7c3SJacob Keller * @vsi: the VSI we care about
9682dc8e7c3SJacob Keller * @qw1: quad word 1
9692dc8e7c3SJacob Keller * @decoded_pt: decoded packet type
9702dc8e7c3SJacob Keller *
9712dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
9722dc8e7c3SJacob Keller * descriptor writeback format.
9732dc8e7c3SJacob Keller *
9742dc8e7c3SJacob Keller * Return: decoded checksum bits.
975857942fdSAnjali Singhai Jain **/
9762dc8e7c3SJacob Keller static struct libeth_rx_csum
iavf_legacy_rx_csum(const struct iavf_vsi * vsi,u64 qw1,const struct libeth_rx_pt decoded_pt)9772dc8e7c3SJacob Keller iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
9782dc8e7c3SJacob Keller const struct libeth_rx_pt decoded_pt)
979857942fdSAnjali Singhai Jain {
9802dc8e7c3SJacob Keller struct libeth_rx_csum csum_bits = {};
981857942fdSAnjali Singhai Jain
9822dc8e7c3SJacob Keller if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
9832dc8e7c3SJacob Keller return csum_bits;
9842dc8e7c3SJacob Keller
9852dc8e7c3SJacob Keller csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
9862dc8e7c3SJacob Keller csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
9872dc8e7c3SJacob Keller csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
9882dc8e7c3SJacob Keller csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
9892dc8e7c3SJacob Keller csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
9902dc8e7c3SJacob Keller csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
9912dc8e7c3SJacob Keller
9922dc8e7c3SJacob Keller return csum_bits;
9932dc8e7c3SJacob Keller }
9942dc8e7c3SJacob Keller
9952dc8e7c3SJacob Keller /**
9962dc8e7c3SJacob Keller * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
9972dc8e7c3SJacob Keller * @vsi: the VSI we care about
9982dc8e7c3SJacob Keller * @qw1: quad word 1
9992dc8e7c3SJacob Keller * @decoded_pt: decoded packet type
10002dc8e7c3SJacob Keller *
10012dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
10022dc8e7c3SJacob Keller * descriptor writeback format.
10032dc8e7c3SJacob Keller *
10042dc8e7c3SJacob Keller * Return: decoded checksum bits.
10052dc8e7c3SJacob Keller **/
10062dc8e7c3SJacob Keller static struct libeth_rx_csum
iavf_flex_rx_csum(const struct iavf_vsi * vsi,u64 qw1,const struct libeth_rx_pt decoded_pt)10072dc8e7c3SJacob Keller iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
10082dc8e7c3SJacob Keller const struct libeth_rx_pt decoded_pt)
10092dc8e7c3SJacob Keller {
10102dc8e7c3SJacob Keller struct libeth_rx_csum csum_bits = {};
10112dc8e7c3SJacob Keller
10122dc8e7c3SJacob Keller if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
10132dc8e7c3SJacob Keller return csum_bits;
10142dc8e7c3SJacob Keller
10152dc8e7c3SJacob Keller csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
10162dc8e7c3SJacob Keller csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
10172dc8e7c3SJacob Keller csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
10182dc8e7c3SJacob Keller csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
10192dc8e7c3SJacob Keller csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
10202dc8e7c3SJacob Keller csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
10212dc8e7c3SJacob Keller csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
10222dc8e7c3SJacob Keller
10232dc8e7c3SJacob Keller return csum_bits;
10242dc8e7c3SJacob Keller }
10252dc8e7c3SJacob Keller
10262dc8e7c3SJacob Keller /**
10272dc8e7c3SJacob Keller * iavf_legacy_rx_hash - set the hash value in the skb
10282dc8e7c3SJacob Keller * @ring: descriptor ring
10292dc8e7c3SJacob Keller * @qw0: quad word 0
10302dc8e7c3SJacob Keller * @qw1: quad word 1
10312dc8e7c3SJacob Keller * @skb: skb currently being received and modified
10322dc8e7c3SJacob Keller * @decoded_pt: decoded packet type
10332dc8e7c3SJacob Keller *
10342dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
10352dc8e7c3SJacob Keller * descriptor writeback format.
10362dc8e7c3SJacob Keller **/
iavf_legacy_rx_hash(const struct iavf_ring * ring,__le64 qw0,__le64 qw1,struct sk_buff * skb,const struct libeth_rx_pt decoded_pt)10372dc8e7c3SJacob Keller static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
10382dc8e7c3SJacob Keller __le64 qw1, struct sk_buff *skb,
10392dc8e7c3SJacob Keller const struct libeth_rx_pt decoded_pt)
10402dc8e7c3SJacob Keller {
10412dc8e7c3SJacob Keller const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
10422dc8e7c3SJacob Keller u32 hash;
10432dc8e7c3SJacob Keller
10442dc8e7c3SJacob Keller if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
1045857942fdSAnjali Singhai Jain return;
1046857942fdSAnjali Singhai Jain
10472dc8e7c3SJacob Keller if ((qw1 & rss_mask) == rss_mask) {
10482dc8e7c3SJacob Keller hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
10492dc8e7c3SJacob Keller libeth_rx_pt_set_hash(skb, hash, decoded_pt);
10502dc8e7c3SJacob Keller }
10512dc8e7c3SJacob Keller }
10522dc8e7c3SJacob Keller
10532dc8e7c3SJacob Keller /**
10542dc8e7c3SJacob Keller * iavf_flex_rx_hash - set the hash value in the skb
10552dc8e7c3SJacob Keller * @ring: descriptor ring
10562dc8e7c3SJacob Keller * @qw1: quad word 1
10572dc8e7c3SJacob Keller * @skb: skb currently being received and modified
10582dc8e7c3SJacob Keller * @decoded_pt: decoded packet type
10592dc8e7c3SJacob Keller *
10602dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
10612dc8e7c3SJacob Keller * descriptor writeback format.
10622dc8e7c3SJacob Keller **/
iavf_flex_rx_hash(const struct iavf_ring * ring,__le64 qw1,struct sk_buff * skb,const struct libeth_rx_pt decoded_pt)10632dc8e7c3SJacob Keller static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
10642dc8e7c3SJacob Keller struct sk_buff *skb,
10652dc8e7c3SJacob Keller const struct libeth_rx_pt decoded_pt)
10662dc8e7c3SJacob Keller {
10672dc8e7c3SJacob Keller bool rss_valid;
10682dc8e7c3SJacob Keller u32 hash;
10692dc8e7c3SJacob Keller
10702dc8e7c3SJacob Keller if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
10712dc8e7c3SJacob Keller return;
10722dc8e7c3SJacob Keller
10732dc8e7c3SJacob Keller rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
10742dc8e7c3SJacob Keller if (rss_valid) {
10752dc8e7c3SJacob Keller hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
10762dc8e7c3SJacob Keller libeth_rx_pt_set_hash(skb, hash, decoded_pt);
1077857942fdSAnjali Singhai Jain }
1078857942fdSAnjali Singhai Jain }
1079857942fdSAnjali Singhai Jain
1080857942fdSAnjali Singhai Jain /**
108148ccdcd8SJacob Keller * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
108248ccdcd8SJacob Keller * @rx_ring: descriptor ring
108348ccdcd8SJacob Keller * @qw2: quad word 2 of descriptor
108448ccdcd8SJacob Keller * @qw3: quad word 3 of descriptor
108548ccdcd8SJacob Keller * @skb: skb currently being received
108648ccdcd8SJacob Keller *
108748ccdcd8SJacob Keller * Read the Rx timestamp value from the descriptor and pass it to the stack.
108848ccdcd8SJacob Keller *
108948ccdcd8SJacob Keller * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
109048ccdcd8SJacob Keller * descriptor writeback format.
109148ccdcd8SJacob Keller */
iavf_flex_rx_tstamp(const struct iavf_ring * rx_ring,__le64 qw2,__le64 qw3,struct sk_buff * skb)109248ccdcd8SJacob Keller static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
109348ccdcd8SJacob Keller __le64 qw3, struct sk_buff *skb)
109448ccdcd8SJacob Keller {
109548ccdcd8SJacob Keller u32 tstamp;
109648ccdcd8SJacob Keller u64 ns;
109748ccdcd8SJacob Keller
109848ccdcd8SJacob Keller /* Skip processing if timestamps aren't enabled */
109948ccdcd8SJacob Keller if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
110048ccdcd8SJacob Keller return;
110148ccdcd8SJacob Keller
110248ccdcd8SJacob Keller /* Check if this Rx descriptor has a valid timestamp */
110348ccdcd8SJacob Keller if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
110448ccdcd8SJacob Keller return;
110548ccdcd8SJacob Keller
110648ccdcd8SJacob Keller /* the ts_low field only contains the valid bit and sub-nanosecond
110748ccdcd8SJacob Keller * precision, so we don't need to extract it.
110848ccdcd8SJacob Keller */
110948ccdcd8SJacob Keller tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
111048ccdcd8SJacob Keller
111148ccdcd8SJacob Keller ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
111248ccdcd8SJacob Keller tstamp);
111348ccdcd8SJacob Keller
111448ccdcd8SJacob Keller *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
111548ccdcd8SJacob Keller .hwtstamp = ns_to_ktime(ns),
111648ccdcd8SJacob Keller };
111748ccdcd8SJacob Keller }
111848ccdcd8SJacob Keller
111948ccdcd8SJacob Keller /**
1120129cf89eSJesse Brandeburg * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1121ab9ad98eSJesse Brandeburg * @rx_ring: rx descriptor ring packet is being transacted on
1122ab9ad98eSJesse Brandeburg * @rx_desc: pointer to the EOP Rx descriptor
1123ab9ad98eSJesse Brandeburg * @skb: pointer to current skb being populated
11242dc8e7c3SJacob Keller * @ptype: the packet type decoded by hardware
11258447357eSMateusz Polchlopek * @flex: is the descriptor flex or legacy
11267f12ad74SGreg Rose *
1127ab9ad98eSJesse Brandeburg * This function checks the ring, descriptor, and packet information in
1128ab9ad98eSJesse Brandeburg * order to populate the hash, checksum, VLAN, protocol, and
1129ab9ad98eSJesse Brandeburg * other fields within the skb.
11307f12ad74SGreg Rose **/
iavf_process_skb_fields(const struct iavf_ring * rx_ring,const struct iavf_rx_desc * rx_desc,struct sk_buff * skb,u32 ptype,bool flex)11312dc8e7c3SJacob Keller static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
11322dc8e7c3SJacob Keller const struct iavf_rx_desc *rx_desc,
11338447357eSMateusz Polchlopek struct sk_buff *skb, u32 ptype,
11348447357eSMateusz Polchlopek bool flex)
11357f12ad74SGreg Rose {
11362dc8e7c3SJacob Keller struct libeth_rx_csum csum_bits;
11372dc8e7c3SJacob Keller struct libeth_rx_pt decoded_pt;
11382dc8e7c3SJacob Keller __le64 qw0 = rx_desc->qw0;
11392dc8e7c3SJacob Keller __le64 qw1 = rx_desc->qw1;
114048ccdcd8SJacob Keller __le64 qw2 = rx_desc->qw2;
114148ccdcd8SJacob Keller __le64 qw3 = rx_desc->qw3;
1142857942fdSAnjali Singhai Jain
11432dc8e7c3SJacob Keller decoded_pt = libie_rx_pt_parse(ptype);
11442dc8e7c3SJacob Keller
11458447357eSMateusz Polchlopek if (flex) {
11462dc8e7c3SJacob Keller iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
114748ccdcd8SJacob Keller iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
11482dc8e7c3SJacob Keller csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
11492dc8e7c3SJacob Keller decoded_pt);
11508447357eSMateusz Polchlopek } else {
11518447357eSMateusz Polchlopek iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
11528447357eSMateusz Polchlopek csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
11538447357eSMateusz Polchlopek decoded_pt);
11542dc8e7c3SJacob Keller }
11552dc8e7c3SJacob Keller iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
1156a132af24SMitch Williams
1157ab9ad98eSJesse Brandeburg skb_record_rx_queue(skb, rx_ring->queue_index);
1158a5b268e4SAlexander Duyck
1159a5b268e4SAlexander Duyck /* modifies the skb - consumes the enet header */
1160a5b268e4SAlexander Duyck skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1161a132af24SMitch Williams }
1162a132af24SMitch Williams
1163a132af24SMitch Williams /**
116456184e01SJesse Brandeburg * iavf_cleanup_headers - Correct empty headers
1165ab9ad98eSJesse Brandeburg * @rx_ring: rx descriptor ring packet is being transacted on
1166ab9ad98eSJesse Brandeburg * @skb: pointer to current skb being fixed
1167ab9ad98eSJesse Brandeburg *
1168ab9ad98eSJesse Brandeburg * Also address the case where we are pulling data in on pages only
1169ab9ad98eSJesse Brandeburg * and as such no data is present in the skb header.
1170ab9ad98eSJesse Brandeburg *
1171ab9ad98eSJesse Brandeburg * In addition if skb is not at least 60 bytes we need to pad it so that
1172ab9ad98eSJesse Brandeburg * it is large enough to qualify as a valid Ethernet frame.
1173ab9ad98eSJesse Brandeburg *
1174ab9ad98eSJesse Brandeburg * Returns true if an error was encountered and skb was freed.
1175a132af24SMitch Williams **/
iavf_cleanup_headers(struct iavf_ring * rx_ring,struct sk_buff * skb)117656184e01SJesse Brandeburg static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1177ab9ad98eSJesse Brandeburg {
1178ab9ad98eSJesse Brandeburg /* if eth_skb_pad returns an error the skb was freed */
1179ab9ad98eSJesse Brandeburg if (eth_skb_pad(skb))
1180ab9ad98eSJesse Brandeburg return true;
1181ab9ad98eSJesse Brandeburg
1182ab9ad98eSJesse Brandeburg return false;
1183ab9ad98eSJesse Brandeburg }
1184ab9ad98eSJesse Brandeburg
1185ab9ad98eSJesse Brandeburg /**
118656184e01SJesse Brandeburg * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1187ab9ad98eSJesse Brandeburg * @skb: sk_buff to place the data into
11885fa4caffSAlexander Lobakin * @rx_buffer: buffer containing page to add
1189a0cfc313SAlexander Duyck * @size: packet length from rx_desc
1190ab9ad98eSJesse Brandeburg *
1191ab9ad98eSJesse Brandeburg * This function will add the data contained in rx_buffer->page to the skb.
1192fa2343e9SAlexander Duyck * It will just attach the page as a frag to the skb.
1193ab9ad98eSJesse Brandeburg *
1194fa2343e9SAlexander Duyck * The function will then update the page offset.
1195ab9ad98eSJesse Brandeburg **/
iavf_add_rx_frag(struct sk_buff * skb,const struct libeth_fqe * rx_buffer,unsigned int size)11965fa4caffSAlexander Lobakin static void iavf_add_rx_frag(struct sk_buff *skb,
11975fa4caffSAlexander Lobakin const struct libeth_fqe *rx_buffer,
1198a0cfc313SAlexander Duyck unsigned int size)
1199ab9ad98eSJesse Brandeburg {
12006ad5ff6eSAlexander Lobakin u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset;
1201efa14c39SMitch Williams
12026ad5ff6eSAlexander Lobakin skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
12036ad5ff6eSAlexander Lobakin rx_buffer->netmem, rx_buffer->offset + hr,
12046ad5ff6eSAlexander Lobakin size, rx_buffer->truesize);
12059a064128SAlexander Duyck }
12069a064128SAlexander Duyck
12079a064128SAlexander Duyck /**
120856184e01SJesse Brandeburg * iavf_build_skb - Build skb around an existing buffer
1209f8b45b74SAlexander Duyck * @rx_buffer: Rx buffer to pull data from
1210f8b45b74SAlexander Duyck * @size: size of buffer to add to skb
1211f8b45b74SAlexander Duyck *
1212f8b45b74SAlexander Duyck * This function builds an skb around an existing Rx buffer, taking care
1213f8b45b74SAlexander Duyck * to set up the skb correctly and avoid any memcpy overhead.
1214f8b45b74SAlexander Duyck */
iavf_build_skb(const struct libeth_fqe * rx_buffer,unsigned int size)12155fa4caffSAlexander Lobakin static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
1216f8b45b74SAlexander Duyck unsigned int size)
1217f8b45b74SAlexander Duyck {
12186ad5ff6eSAlexander Lobakin struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
1219*c8d6830eSByungchul Park u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
1220f8b45b74SAlexander Duyck struct sk_buff *skb;
12215fa4caffSAlexander Lobakin void *va;
1222f8b45b74SAlexander Duyck
1223f8b45b74SAlexander Duyck /* prefetch first cache line of first page */
12246ad5ff6eSAlexander Lobakin va = page_address(buf_page) + rx_buffer->offset;
12255fa4caffSAlexander Lobakin net_prefetch(va + hr);
1226f468f21bSTariq Toukan
1227f8b45b74SAlexander Duyck /* build an skb around the page buffer */
12285fa4caffSAlexander Lobakin skb = napi_build_skb(va, rx_buffer->truesize);
1229f8b45b74SAlexander Duyck if (unlikely(!skb))
1230f8b45b74SAlexander Duyck return NULL;
1231f8b45b74SAlexander Duyck
12325fa4caffSAlexander Lobakin skb_mark_for_recycle(skb);
12335fa4caffSAlexander Lobakin
1234f8b45b74SAlexander Duyck /* update pointers within the skb to store the data */
12355fa4caffSAlexander Lobakin skb_reserve(skb, hr);
1236f8b45b74SAlexander Duyck __skb_put(skb, size);
1237f8b45b74SAlexander Duyck
1238f8b45b74SAlexander Duyck return skb;
1239f8b45b74SAlexander Duyck }
1240f8b45b74SAlexander Duyck
1241f8b45b74SAlexander Duyck /**
124256184e01SJesse Brandeburg * iavf_is_non_eop - process handling of non-EOP buffers
1243ab9ad98eSJesse Brandeburg * @rx_ring: Rx ring being processed
12442dc8e7c3SJacob Keller * @fields: Rx descriptor extracted fields
1245ab9ad98eSJesse Brandeburg *
1246ab9ad98eSJesse Brandeburg * This function updates next to clean. If the buffer is an EOP buffer
1247ab9ad98eSJesse Brandeburg * this function exits returning false, otherwise it will place the
1248ab9ad98eSJesse Brandeburg * sk_buff in the next buffer to be chained and return true indicating
1249ab9ad98eSJesse Brandeburg * that this is in fact a non-EOP buffer.
1250ab9ad98eSJesse Brandeburg **/
iavf_is_non_eop(struct iavf_ring * rx_ring,struct libeth_rqe_info fields)125156184e01SJesse Brandeburg static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
12522dc8e7c3SJacob Keller struct libeth_rqe_info fields)
1253ab9ad98eSJesse Brandeburg {
1254ab9ad98eSJesse Brandeburg u32 ntc = rx_ring->next_to_clean + 1;
1255ab9ad98eSJesse Brandeburg
1256ab9ad98eSJesse Brandeburg /* fetch, update, and store next to clean */
1257ab9ad98eSJesse Brandeburg ntc = (ntc < rx_ring->count) ? ntc : 0;
1258ab9ad98eSJesse Brandeburg rx_ring->next_to_clean = ntc;
1259ab9ad98eSJesse Brandeburg
1260f1cad2ceSJesse Brandeburg prefetch(IAVF_RX_DESC(rx_ring, ntc));
1261ab9ad98eSJesse Brandeburg
1262ab9ad98eSJesse Brandeburg /* if we are the last buffer then there is nothing else to do */
12632dc8e7c3SJacob Keller if (likely(fields.eop))
1264ab9ad98eSJesse Brandeburg return false;
1265ab9ad98eSJesse Brandeburg
1266ab9ad98eSJesse Brandeburg rx_ring->rx_stats.non_eop_descs++;
1267ab9ad98eSJesse Brandeburg
1268ab9ad98eSJesse Brandeburg return true;
1269ab9ad98eSJesse Brandeburg }
1270ab9ad98eSJesse Brandeburg
1271ab9ad98eSJesse Brandeburg /**
12722dc8e7c3SJacob Keller * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
12732dc8e7c3SJacob Keller * @rx_ring: rx descriptor ring
12742dc8e7c3SJacob Keller * @rx_desc: the descriptor to process
12752dc8e7c3SJacob Keller *
12762dc8e7c3SJacob Keller * Decode the Rx descriptor and extract relevant information including the
12772dc8e7c3SJacob Keller * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
12782dc8e7c3SJacob Keller *
12792dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
12802dc8e7c3SJacob Keller * descriptor writeback format.
12812dc8e7c3SJacob Keller *
12822dc8e7c3SJacob Keller * Return: fields extracted from the Rx descriptor.
12832dc8e7c3SJacob Keller */
12842dc8e7c3SJacob Keller static struct libeth_rqe_info
iavf_extract_legacy_rx_fields(const struct iavf_ring * rx_ring,const struct iavf_rx_desc * rx_desc)12852dc8e7c3SJacob Keller iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
12862dc8e7c3SJacob Keller const struct iavf_rx_desc *rx_desc)
12872dc8e7c3SJacob Keller {
12882dc8e7c3SJacob Keller u64 qw0 = le64_to_cpu(rx_desc->qw0);
12892dc8e7c3SJacob Keller u64 qw1 = le64_to_cpu(rx_desc->qw1);
12902dc8e7c3SJacob Keller u64 qw2 = le64_to_cpu(rx_desc->qw2);
12912dc8e7c3SJacob Keller struct libeth_rqe_info fields;
12922dc8e7c3SJacob Keller bool l2tag1p, l2tag2p;
12932dc8e7c3SJacob Keller
12942dc8e7c3SJacob Keller fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
12952dc8e7c3SJacob Keller fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
12962dc8e7c3SJacob Keller
12972dc8e7c3SJacob Keller if (!fields.eop)
12982dc8e7c3SJacob Keller return fields;
12992dc8e7c3SJacob Keller
13002dc8e7c3SJacob Keller fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
13012dc8e7c3SJacob Keller fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
13022dc8e7c3SJacob Keller fields.vlan = 0;
13032dc8e7c3SJacob Keller
13042dc8e7c3SJacob Keller if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
13052dc8e7c3SJacob Keller l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
13062dc8e7c3SJacob Keller if (l2tag1p)
13072dc8e7c3SJacob Keller fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
13082dc8e7c3SJacob Keller } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
13092dc8e7c3SJacob Keller l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
13102dc8e7c3SJacob Keller if (l2tag2p)
13112dc8e7c3SJacob Keller fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
13122dc8e7c3SJacob Keller }
13132dc8e7c3SJacob Keller
13142dc8e7c3SJacob Keller return fields;
13152dc8e7c3SJacob Keller }
13162dc8e7c3SJacob Keller
13172dc8e7c3SJacob Keller /**
13182dc8e7c3SJacob Keller * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
13192dc8e7c3SJacob Keller * @rx_ring: rx descriptor ring
13202dc8e7c3SJacob Keller * @rx_desc: the descriptor to process
13212dc8e7c3SJacob Keller *
13222dc8e7c3SJacob Keller * Decode the Rx descriptor and extract relevant information including the
13232dc8e7c3SJacob Keller * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
13242dc8e7c3SJacob Keller *
13252dc8e7c3SJacob Keller * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
13262dc8e7c3SJacob Keller * descriptor writeback format.
13272dc8e7c3SJacob Keller *
13282dc8e7c3SJacob Keller * Return: fields extracted from the Rx descriptor.
13292dc8e7c3SJacob Keller */
13302dc8e7c3SJacob Keller static struct libeth_rqe_info
iavf_extract_flex_rx_fields(const struct iavf_ring * rx_ring,const struct iavf_rx_desc * rx_desc)13312dc8e7c3SJacob Keller iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
13322dc8e7c3SJacob Keller const struct iavf_rx_desc *rx_desc)
13332dc8e7c3SJacob Keller {
13342dc8e7c3SJacob Keller struct libeth_rqe_info fields = {};
13352dc8e7c3SJacob Keller u64 qw0 = le64_to_cpu(rx_desc->qw0);
13362dc8e7c3SJacob Keller u64 qw1 = le64_to_cpu(rx_desc->qw1);
13372dc8e7c3SJacob Keller u64 qw2 = le64_to_cpu(rx_desc->qw2);
13382dc8e7c3SJacob Keller bool l2tag1p, l2tag2p;
13392dc8e7c3SJacob Keller
13402dc8e7c3SJacob Keller fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
13412dc8e7c3SJacob Keller fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
13422dc8e7c3SJacob Keller
13432dc8e7c3SJacob Keller if (!fields.eop)
13442dc8e7c3SJacob Keller return fields;
13452dc8e7c3SJacob Keller
13462dc8e7c3SJacob Keller fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
13472dc8e7c3SJacob Keller fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
13482dc8e7c3SJacob Keller fields.vlan = 0;
13492dc8e7c3SJacob Keller
13502dc8e7c3SJacob Keller if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
13512dc8e7c3SJacob Keller l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
13522dc8e7c3SJacob Keller if (l2tag1p)
13532dc8e7c3SJacob Keller fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
13542dc8e7c3SJacob Keller } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
13552dc8e7c3SJacob Keller l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
13562dc8e7c3SJacob Keller if (l2tag2p)
13572dc8e7c3SJacob Keller fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
13582dc8e7c3SJacob Keller }
13592dc8e7c3SJacob Keller
13602dc8e7c3SJacob Keller return fields;
13612dc8e7c3SJacob Keller }
13622dc8e7c3SJacob Keller
13632dc8e7c3SJacob Keller static struct libeth_rqe_info
iavf_extract_rx_fields(const struct iavf_ring * rx_ring,const struct iavf_rx_desc * rx_desc,bool flex)13642dc8e7c3SJacob Keller iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
13658447357eSMateusz Polchlopek const struct iavf_rx_desc *rx_desc,
13668447357eSMateusz Polchlopek bool flex)
13672dc8e7c3SJacob Keller {
13688447357eSMateusz Polchlopek if (flex)
13692dc8e7c3SJacob Keller return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
13708447357eSMateusz Polchlopek else
13718447357eSMateusz Polchlopek return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
13722dc8e7c3SJacob Keller }
13732dc8e7c3SJacob Keller
13742dc8e7c3SJacob Keller /**
137556184e01SJesse Brandeburg * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1376ab9ad98eSJesse Brandeburg * @rx_ring: rx descriptor ring to transact packets on
1377ab9ad98eSJesse Brandeburg * @budget: Total limit on number of packets to process
1378ab9ad98eSJesse Brandeburg *
1379ab9ad98eSJesse Brandeburg * This function provides a "bounce buffer" approach to Rx interrupt
1380ab9ad98eSJesse Brandeburg * processing. The advantage to this is that on systems that have
1381ab9ad98eSJesse Brandeburg * expensive overhead for IOMMU access this provides a means of avoiding
1382ab9ad98eSJesse Brandeburg * it by maintaining the mapping of the page to the system.
1383ab9ad98eSJesse Brandeburg *
1384ab9ad98eSJesse Brandeburg * Returns amount of work completed
1385ab9ad98eSJesse Brandeburg **/
iavf_clean_rx_irq(struct iavf_ring * rx_ring,int budget)138656184e01SJesse Brandeburg static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1387a132af24SMitch Williams {
13888447357eSMateusz Polchlopek bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
1389a132af24SMitch Williams unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1390e72e5659SScott Peterson struct sk_buff *skb = rx_ring->skb;
139156184e01SJesse Brandeburg u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1392c2e245abSJesse Brandeburg bool failure = false;
1393ab9ad98eSJesse Brandeburg
1394b85c94b6SJesse Brandeburg while (likely(total_rx_packets < (unsigned int)budget)) {
13952dc8e7c3SJacob Keller struct libeth_rqe_info fields;
13965fa4caffSAlexander Lobakin struct libeth_fqe *rx_buffer;
1397e9f476d7SMateusz Polchlopek struct iavf_rx_desc *rx_desc;
13988447357eSMateusz Polchlopek u64 qw1;
1399a132af24SMitch Williams
1400a132af24SMitch Williams /* return some buffers to hardware, one at a time is too slow */
140156184e01SJesse Brandeburg if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1402c2e245abSJesse Brandeburg failure = failure ||
1403129cf89eSJesse Brandeburg iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1404a132af24SMitch Williams cleaned_count = 0;
1405a132af24SMitch Williams }
1406a132af24SMitch Williams
1407f1cad2ceSJesse Brandeburg rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1408ab9ad98eSJesse Brandeburg
1409a132af24SMitch Williams /* This memory barrier is needed to keep us from reading
1410d57c0e08SAlexander Duyck * any other fields out of the rx_desc until we have
1411d57c0e08SAlexander Duyck * verified the descriptor has been written back.
1412a132af24SMitch Williams */
141367317166SAlexander Duyck dma_rmb();
14142dc8e7c3SJacob Keller
14158447357eSMateusz Polchlopek qw1 = le64_to_cpu(rx_desc->qw1);
14168447357eSMateusz Polchlopek /* If DD field (descriptor done) is unset then other fields are
14178447357eSMateusz Polchlopek * not valid
14188447357eSMateusz Polchlopek */
14198447357eSMateusz Polchlopek if (!iavf_is_descriptor_done(qw1, flex))
1420efa14c39SMitch Williams break;
1421a132af24SMitch Williams
14228447357eSMateusz Polchlopek fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
14230e626ff7SAlexander Duyck
1424ad64ed8bSJesse Brandeburg iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
14255fa4caffSAlexander Lobakin
14265fa4caffSAlexander Lobakin rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
14272dc8e7c3SJacob Keller if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
14285fa4caffSAlexander Lobakin goto skip_data;
14299a064128SAlexander Duyck
1430fa2343e9SAlexander Duyck /* retrieve a buffer from the ring */
1431fa2343e9SAlexander Duyck if (skb)
14322dc8e7c3SJacob Keller iavf_add_rx_frag(skb, rx_buffer, fields.len);
1433fa2343e9SAlexander Duyck else
14342dc8e7c3SJacob Keller skb = iavf_build_skb(rx_buffer, fields.len);
1435fa2343e9SAlexander Duyck
1436fa2343e9SAlexander Duyck /* exit if we failed to retrieve a buffer */
1437fa2343e9SAlexander Duyck if (!skb) {
1438fa2343e9SAlexander Duyck rx_ring->rx_stats.alloc_buff_failed++;
1439ab9ad98eSJesse Brandeburg break;
1440fa2343e9SAlexander Duyck }
1441a132af24SMitch Williams
14425fa4caffSAlexander Lobakin skip_data:
1443a132af24SMitch Williams cleaned_count++;
1444a132af24SMitch Williams
14452dc8e7c3SJacob Keller if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
1446a132af24SMitch Williams continue;
1447a132af24SMitch Williams
14482dc8e7c3SJacob Keller /* RXE field in descriptor is an indication of the MAC errors
14492dc8e7c3SJacob Keller * (like CRC, alignment, oversize etc). If it is set then iavf
14502dc8e7c3SJacob Keller * should finish.
1451ab9ad98eSJesse Brandeburg */
14522dc8e7c3SJacob Keller if (unlikely(fields.rxe)) {
1453a132af24SMitch Williams dev_kfree_skb_any(skb);
1454741b8b83SAlexander Duyck skb = NULL;
1455a132af24SMitch Williams continue;
14567f12ad74SGreg Rose }
14577f12ad74SGreg Rose
145856184e01SJesse Brandeburg if (iavf_cleanup_headers(rx_ring, skb)) {
1459e72e5659SScott Peterson skb = NULL;
1460ab9ad98eSJesse Brandeburg continue;
1461e72e5659SScott Peterson }
1462ab9ad98eSJesse Brandeburg
14637f12ad74SGreg Rose /* probably a little skewed due to removing CRC */
14647f12ad74SGreg Rose total_rx_bytes += skb->len;
14657f12ad74SGreg Rose
1466ab9ad98eSJesse Brandeburg /* populate checksum, VLAN, and protocol */
14678447357eSMateusz Polchlopek iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
1468ab9ad98eSJesse Brandeburg
1469ad64ed8bSJesse Brandeburg iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
14702dc8e7c3SJacob Keller iavf_receive_skb(rx_ring, skb, fields.vlan);
1471e72e5659SScott Peterson skb = NULL;
14727f12ad74SGreg Rose
1473ab9ad98eSJesse Brandeburg /* update budget accounting */
1474ab9ad98eSJesse Brandeburg total_rx_packets++;
1475ab9ad98eSJesse Brandeburg }
14767f12ad74SGreg Rose
1477e72e5659SScott Peterson rx_ring->skb = skb;
1478e72e5659SScott Peterson
14797f12ad74SGreg Rose u64_stats_update_begin(&rx_ring->syncp);
14807f12ad74SGreg Rose rx_ring->stats.packets += total_rx_packets;
14817f12ad74SGreg Rose rx_ring->stats.bytes += total_rx_bytes;
14827f12ad74SGreg Rose u64_stats_update_end(&rx_ring->syncp);
14837f12ad74SGreg Rose rx_ring->q_vector->rx.total_packets += total_rx_packets;
14847f12ad74SGreg Rose rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
14857f12ad74SGreg Rose
1486ab9ad98eSJesse Brandeburg /* guarantee a trip back through this routine if there was a failure */
1487b85c94b6SJesse Brandeburg return failure ? budget : (int)total_rx_packets;
14887f12ad74SGreg Rose }
14897f12ad74SGreg Rose
iavf_buildreg_itr(const int type,u16 itr)149056184e01SJesse Brandeburg static inline u32 iavf_buildreg_itr(const int type, u16 itr)
14918f5e39ceSJesse Brandeburg {
14928f5e39ceSJesse Brandeburg u32 val;
14938f5e39ceSJesse Brandeburg
14944ff17929SAlexander Duyck /* We don't bother with setting the CLEARPBA bit as the data sheet
14954ff17929SAlexander Duyck * points out doing so is "meaningless since it was already
14964ff17929SAlexander Duyck * auto-cleared". The auto-clearing happens when the interrupt is
14974ff17929SAlexander Duyck * asserted.
14984ff17929SAlexander Duyck *
14994ff17929SAlexander Duyck * Hardware errata 28 for also indicates that writing to a
15004ff17929SAlexander Duyck * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
15014ff17929SAlexander Duyck * an event in the PBA anyway so we need to rely on the automask
15024ff17929SAlexander Duyck * to hold pending events for us until the interrupt is re-enabled
150392418fb1SAlexander Duyck *
150492418fb1SAlexander Duyck * The itr value is reported in microseconds, and the register
150592418fb1SAlexander Duyck * value is recorded in 2 microsecond units. For this reason we
150692418fb1SAlexander Duyck * only need to shift by the interval shift - 1 instead of the
150792418fb1SAlexander Duyck * full value.
15084ff17929SAlexander Duyck */
150956184e01SJesse Brandeburg itr &= IAVF_ITR_MASK;
151092418fb1SAlexander Duyck
1511f1cad2ceSJesse Brandeburg val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1512f1cad2ceSJesse Brandeburg (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1513f1cad2ceSJesse Brandeburg (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
15148f5e39ceSJesse Brandeburg
15158f5e39ceSJesse Brandeburg return val;
15168f5e39ceSJesse Brandeburg }
15178f5e39ceSJesse Brandeburg
15188f5e39ceSJesse Brandeburg /* a small macro to shorten up some long lines */
1519f1cad2ceSJesse Brandeburg #define INTREG IAVF_VFINT_DYN_CTLN1
15208f5e39ceSJesse Brandeburg
1521a0073a4bSAlexander Duyck /* The act of updating the ITR will cause it to immediately trigger. In order
1522a0073a4bSAlexander Duyck * to prevent this from throwing off adaptive update statistics we defer the
1523a0073a4bSAlexander Duyck * update so that it can only happen so often. So after either Tx or Rx are
1524a0073a4bSAlexander Duyck * updated we make the adaptive scheme wait until either the ITR completely
1525a0073a4bSAlexander Duyck * expires via the next_update expiration or we have been through at least
1526a0073a4bSAlexander Duyck * 3 interrupts.
1527a0073a4bSAlexander Duyck */
1528a0073a4bSAlexander Duyck #define ITR_COUNTDOWN_START 3
1529a0073a4bSAlexander Duyck
15307f12ad74SGreg Rose /**
153156184e01SJesse Brandeburg * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1532de32e3efSCarolyn Wyborny * @vsi: the VSI we care about
1533de32e3efSCarolyn Wyborny * @q_vector: q_vector for which itr is being updated and interrupt enabled
1534de32e3efSCarolyn Wyborny *
1535de32e3efSCarolyn Wyborny **/
iavf_update_enable_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)153670dc7ab7SJacob Keller static void iavf_update_enable_itr(struct iavf_vsi *vsi,
153756184e01SJesse Brandeburg struct iavf_q_vector *q_vector)
1538de32e3efSCarolyn Wyborny {
1539f349daa5SJesse Brandeburg struct iavf_hw *hw = &vsi->back->hw;
1540556fdfd6SAlexander Duyck u32 intval;
15418f5e39ceSJesse Brandeburg
1542a0073a4bSAlexander Duyck /* These will do nothing if dynamic updates are not enabled */
154356184e01SJesse Brandeburg iavf_update_itr(q_vector, &q_vector->tx);
154456184e01SJesse Brandeburg iavf_update_itr(q_vector, &q_vector->rx);
1545ee2319cfSJesse Brandeburg
1546a0073a4bSAlexander Duyck /* This block of logic allows us to get away with only updating
1547a0073a4bSAlexander Duyck * one ITR value with each interrupt. The idea is to perform a
1548a0073a4bSAlexander Duyck * pseudo-lazy update with the following criteria.
1549a0073a4bSAlexander Duyck *
1550a0073a4bSAlexander Duyck * 1. Rx is given higher priority than Tx if both are in same state
1551a0073a4bSAlexander Duyck * 2. If we must reduce an ITR that is given highest priority.
1552a0073a4bSAlexander Duyck * 3. We then give priority to increasing ITR based on amount.
15538f5e39ceSJesse Brandeburg */
1554a0073a4bSAlexander Duyck if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1555a0073a4bSAlexander Duyck /* Rx ITR needs to be reduced, this is highest priority */
155656184e01SJesse Brandeburg intval = iavf_buildreg_itr(IAVF_RX_ITR,
1557556fdfd6SAlexander Duyck q_vector->rx.target_itr);
1558556fdfd6SAlexander Duyck q_vector->rx.current_itr = q_vector->rx.target_itr;
1559a0073a4bSAlexander Duyck q_vector->itr_countdown = ITR_COUNTDOWN_START;
1560a0073a4bSAlexander Duyck } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1561a0073a4bSAlexander Duyck ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1562a0073a4bSAlexander Duyck (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1563a0073a4bSAlexander Duyck /* Tx ITR needs to be reduced, this is second priority
1564a0073a4bSAlexander Duyck * Tx ITR needs to be increased more than Rx, fourth priority
1565556fdfd6SAlexander Duyck */
156656184e01SJesse Brandeburg intval = iavf_buildreg_itr(IAVF_TX_ITR,
1567556fdfd6SAlexander Duyck q_vector->tx.target_itr);
1568556fdfd6SAlexander Duyck q_vector->tx.current_itr = q_vector->tx.target_itr;
1569a0073a4bSAlexander Duyck q_vector->itr_countdown = ITR_COUNTDOWN_START;
1570a0073a4bSAlexander Duyck } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1571a0073a4bSAlexander Duyck /* Rx ITR needs to be increased, third priority */
157256184e01SJesse Brandeburg intval = iavf_buildreg_itr(IAVF_RX_ITR,
1573a0073a4bSAlexander Duyck q_vector->rx.target_itr);
1574a0073a4bSAlexander Duyck q_vector->rx.current_itr = q_vector->rx.target_itr;
1575a0073a4bSAlexander Duyck q_vector->itr_countdown = ITR_COUNTDOWN_START;
1576556fdfd6SAlexander Duyck } else {
1577a0073a4bSAlexander Duyck /* No ITR update, lowest priority */
157856184e01SJesse Brandeburg intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1579a0073a4bSAlexander Duyck if (q_vector->itr_countdown)
1580a0073a4bSAlexander Duyck q_vector->itr_countdown--;
1581556fdfd6SAlexander Duyck }
1582556fdfd6SAlexander Duyck
158356184e01SJesse Brandeburg if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1584556fdfd6SAlexander Duyck wr32(hw, INTREG(q_vector->reg_idx), intval);
1585de32e3efSCarolyn Wyborny }
1586de32e3efSCarolyn Wyborny
1587de32e3efSCarolyn Wyborny /**
1588129cf89eSJesse Brandeburg * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
15897f12ad74SGreg Rose * @napi: napi struct with our devices info in it
15907f12ad74SGreg Rose * @budget: amount of work driver is allowed to do this pass, in packets
15917f12ad74SGreg Rose *
15927f12ad74SGreg Rose * This function will clean all queues associated with a q_vector.
15937f12ad74SGreg Rose *
15947f12ad74SGreg Rose * Returns the amount of work done
15957f12ad74SGreg Rose **/
iavf_napi_poll(struct napi_struct * napi,int budget)1596129cf89eSJesse Brandeburg int iavf_napi_poll(struct napi_struct *napi, int budget)
15977f12ad74SGreg Rose {
159856184e01SJesse Brandeburg struct iavf_q_vector *q_vector =
159956184e01SJesse Brandeburg container_of(napi, struct iavf_q_vector, napi);
160056184e01SJesse Brandeburg struct iavf_vsi *vsi = q_vector->vsi;
160156184e01SJesse Brandeburg struct iavf_ring *ring;
16027f12ad74SGreg Rose bool clean_complete = true;
1603c29af37fSAnjali Singhai Jain bool arm_wb = false;
16047f12ad74SGreg Rose int budget_per_ring;
160532b3e08fSJesse Brandeburg int work_done = 0;
16067f12ad74SGreg Rose
160756184e01SJesse Brandeburg if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
16087f12ad74SGreg Rose napi_complete(napi);
16097f12ad74SGreg Rose return 0;
16107f12ad74SGreg Rose }
16117f12ad74SGreg Rose
16127f12ad74SGreg Rose /* Since the actual Tx work is minimal, we can give the Tx a larger
16137f12ad74SGreg Rose * budget and be more aggressive about cleaning up the Tx descriptors.
16147f12ad74SGreg Rose */
161556184e01SJesse Brandeburg iavf_for_each_ring(ring, q_vector->tx) {
161656184e01SJesse Brandeburg if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1617f2edaaaaSAlexander Duyck clean_complete = false;
1618f2edaaaaSAlexander Duyck continue;
1619f2edaaaaSAlexander Duyck }
162097cadd3dSAlexander Lobakin arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB);
162197cadd3dSAlexander Lobakin ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB;
1622c29af37fSAnjali Singhai Jain }
16237f12ad74SGreg Rose
1624c67cacebSAlexander Duyck /* Handle case where we are called by netpoll with a budget of 0 */
1625c67cacebSAlexander Duyck if (budget <= 0)
1626c67cacebSAlexander Duyck goto tx_only;
1627c67cacebSAlexander Duyck
16287f12ad74SGreg Rose /* We attempt to distribute budget to each Rx queue fairly, but don't
16297f12ad74SGreg Rose * allow the budget to go below 1 because that would exit polling early.
16307f12ad74SGreg Rose */
16317f12ad74SGreg Rose budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
16327f12ad74SGreg Rose
163356184e01SJesse Brandeburg iavf_for_each_ring(ring, q_vector->rx) {
163456184e01SJesse Brandeburg int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
163532b3e08fSJesse Brandeburg
163632b3e08fSJesse Brandeburg work_done += cleaned;
1637f2edaaaaSAlexander Duyck /* if we clean as many as budgeted, we must not be done */
1638f2edaaaaSAlexander Duyck if (cleaned >= budget_per_ring)
1639f2edaaaaSAlexander Duyck clean_complete = false;
1640a132af24SMitch Williams }
16417f12ad74SGreg Rose
16427f12ad74SGreg Rose /* If work not completed, return budget and polling will return */
1643c29af37fSAnjali Singhai Jain if (!clean_complete) {
164496db776aSAlan Brady int cpu_id = smp_processor_id();
164596db776aSAlan Brady
164696db776aSAlan Brady /* It is possible that the interrupt affinity has changed but,
164796db776aSAlan Brady * if the cpu is pegged at 100%, polling will never exit while
164896db776aSAlan Brady * traffic continues and the interrupt will be stuck on this
164996db776aSAlan Brady * cpu. We check to make sure affinity is correct before we
165096db776aSAlan Brady * continue to poll, otherwise we must stop polling so the
165196db776aSAlan Brady * interrupt can move to the correct cpu.
165296db776aSAlan Brady */
1653b0ca7dc0SAhmed Zaki if (!cpumask_test_cpu(cpu_id,
1654b0ca7dc0SAhmed Zaki &q_vector->napi.config->affinity_mask)) {
16556d977729SJacob Keller /* Tell napi that we are done polling */
16566d977729SJacob Keller napi_complete_done(napi, work_done);
16576d977729SJacob Keller
16586d977729SJacob Keller /* Force an interrupt */
1659129cf89eSJesse Brandeburg iavf_force_wb(vsi, q_vector);
16606d977729SJacob Keller
16616d977729SJacob Keller /* Return budget-1 so that polling stops */
16626d977729SJacob Keller return budget - 1;
16636d977729SJacob Keller }
1664c67cacebSAlexander Duyck tx_only:
1665164c9f54SAnjali Singhai Jain if (arm_wb) {
1666164c9f54SAnjali Singhai Jain q_vector->tx.ring[0].tx_stats.tx_force_wb++;
166756184e01SJesse Brandeburg iavf_enable_wb_on_itr(vsi, q_vector);
1668164c9f54SAnjali Singhai Jain }
16697f12ad74SGreg Rose return budget;
1670c29af37fSAnjali Singhai Jain }
16717f12ad74SGreg Rose
167256184e01SJesse Brandeburg if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
16738e0764b4SAnjali Singhai Jain q_vector->arm_wb_state = false;
16748e0764b4SAnjali Singhai Jain
16750bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might
16760bcd952fSJesse Brandeburg * poll us due to busy-polling
16770bcd952fSJesse Brandeburg */
16780bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done)))
167956184e01SJesse Brandeburg iavf_update_enable_itr(vsi, q_vector);
168096db776aSAlan Brady
1681349181b7SKaren Sornek return min_t(int, work_done, budget - 1);
16827f12ad74SGreg Rose }
16837f12ad74SGreg Rose
16847f12ad74SGreg Rose /**
1685129cf89eSJesse Brandeburg * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
16867f12ad74SGreg Rose * @skb: send buffer
16877f12ad74SGreg Rose * @tx_ring: ring to send buffer on
16887f12ad74SGreg Rose * @flags: the tx flags to be set
16897f12ad74SGreg Rose *
16907f12ad74SGreg Rose * Checks the skb and set up correspondingly several generic transmit flags
16917f12ad74SGreg Rose * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
16927f12ad74SGreg Rose *
16937f12ad74SGreg Rose * Returns error code indicate the frame should be dropped upon error and the
16947f12ad74SGreg Rose * otherwise returns 0 to indicate the flags has been set properly.
16957f12ad74SGreg Rose **/
iavf_tx_prepare_vlan_flags(struct sk_buff * skb,struct iavf_ring * tx_ring,u32 * flags)1696ccd219d2SBrett Creeley static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1697ccd219d2SBrett Creeley struct iavf_ring *tx_ring, u32 *flags)
16987f12ad74SGreg Rose {
16997f12ad74SGreg Rose u32 tx_flags = 0;
17007f12ad74SGreg Rose
1701ccd219d2SBrett Creeley
1702ccd219d2SBrett Creeley /* stack will only request hardware VLAN insertion offload for protocols
1703ccd219d2SBrett Creeley * that the driver supports and has enabled
170431eaaccfSGreg Rose */
1705ccd219d2SBrett Creeley if (!skb_vlan_tag_present(skb))
1706ccd219d2SBrett Creeley return;
170731eaaccfSGreg Rose
170856184e01SJesse Brandeburg tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1709ccd219d2SBrett Creeley if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
1710ccd219d2SBrett Creeley tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1711ccd219d2SBrett Creeley } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
171256184e01SJesse Brandeburg tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1713ccd219d2SBrett Creeley } else {
1714ccd219d2SBrett Creeley dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
1715ccd219d2SBrett Creeley return;
17167f12ad74SGreg Rose }
17177f12ad74SGreg Rose
17187f12ad74SGreg Rose *flags = tx_flags;
17197f12ad74SGreg Rose }
17207f12ad74SGreg Rose
17217f12ad74SGreg Rose /**
172256184e01SJesse Brandeburg * iavf_tso - set up the tso context descriptor
172352ea3e80SAlexander Duyck * @first: pointer to first Tx buffer for xmit
17247f12ad74SGreg Rose * @hdr_len: ptr to the size of the packet header
17259c883bd3SShannon Nelson * @cd_type_cmd_tso_mss: Quad Word 1
17267f12ad74SGreg Rose *
17277f12ad74SGreg Rose * Returns 0 if no TSO can happen, 1 if tso is going, or error
17287f12ad74SGreg Rose **/
iavf_tso(struct iavf_tx_buffer * first,u8 * hdr_len,u64 * cd_type_cmd_tso_mss)172956184e01SJesse Brandeburg static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
173052ea3e80SAlexander Duyck u64 *cd_type_cmd_tso_mss)
17317f12ad74SGreg Rose {
173252ea3e80SAlexander Duyck struct sk_buff *skb = first->skb;
173303f9d6a5SAlexander Duyck u64 cd_cmd, cd_tso_len, cd_mss;
1734c777019aSAlexander Duyck union {
1735c777019aSAlexander Duyck struct iphdr *v4;
1736c777019aSAlexander Duyck struct ipv6hdr *v6;
1737c777019aSAlexander Duyck unsigned char *hdr;
1738c777019aSAlexander Duyck } ip;
1739c49a7bc3SAlexander Duyck union {
1740c49a7bc3SAlexander Duyck struct tcphdr *tcp;
17415453205cSAlexander Duyck struct udphdr *udp;
1742c49a7bc3SAlexander Duyck unsigned char *hdr;
1743c49a7bc3SAlexander Duyck } l4;
1744c49a7bc3SAlexander Duyck u32 paylen, l4_offset;
174552ea3e80SAlexander Duyck u16 gso_segs, gso_size;
17467f12ad74SGreg Rose int err;
17477f12ad74SGreg Rose
1748e9f6563dSShannon Nelson if (skb->ip_summed != CHECKSUM_PARTIAL)
1749e9f6563dSShannon Nelson return 0;
1750e9f6563dSShannon Nelson
17517f12ad74SGreg Rose if (!skb_is_gso(skb))
17527f12ad74SGreg Rose return 0;
17537f12ad74SGreg Rose
1754fe6d4aa4SFrancois Romieu err = skb_cow_head(skb, 0);
1755fe6d4aa4SFrancois Romieu if (err < 0)
17567f12ad74SGreg Rose return err;
17577f12ad74SGreg Rose
1758c777019aSAlexander Duyck ip.hdr = skb_network_header(skb);
1759c777019aSAlexander Duyck l4.hdr = skb_transport_header(skb);
176085e76d03SAnjali Singhai
1761c777019aSAlexander Duyck /* initialize outer IP header fields */
1762c777019aSAlexander Duyck if (ip.v4->version == 4) {
1763c777019aSAlexander Duyck ip.v4->tot_len = 0;
1764c777019aSAlexander Duyck ip.v4->check = 0;
1765c49a7bc3SAlexander Duyck } else {
1766c777019aSAlexander Duyck ip.v6->payload_len = 0;
1767c777019aSAlexander Duyck }
1768c777019aSAlexander Duyck
1769577389a5SAlexander Duyck if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
17701c7b4a23SAlexander Duyck SKB_GSO_GRE_CSUM |
17717e13318dSTom Herbert SKB_GSO_IPXIP4 |
1772bf2d1df3SAlexander Duyck SKB_GSO_IPXIP6 |
1773577389a5SAlexander Duyck SKB_GSO_UDP_TUNNEL |
17745453205cSAlexander Duyck SKB_GSO_UDP_TUNNEL_CSUM)) {
17751c7b4a23SAlexander Duyck if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
17761c7b4a23SAlexander Duyck (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
17771c7b4a23SAlexander Duyck l4.udp->len = 0;
17781c7b4a23SAlexander Duyck
17795453205cSAlexander Duyck /* determine offset of outer transport header */
17805453205cSAlexander Duyck l4_offset = l4.hdr - skb->data;
17815453205cSAlexander Duyck
17825453205cSAlexander Duyck /* remove payload length from outer checksum */
178324d41e5eSAlexander Duyck paylen = skb->len - l4_offset;
1784b9c015d4SJacob Keller csum_replace_by_diff(&l4.udp->check,
1785b9c015d4SJacob Keller (__force __wsum)htonl(paylen));
17865453205cSAlexander Duyck }
17875453205cSAlexander Duyck
1788c777019aSAlexander Duyck /* reset pointers to inner headers */
1789c777019aSAlexander Duyck ip.hdr = skb_inner_network_header(skb);
1790c777019aSAlexander Duyck l4.hdr = skb_inner_transport_header(skb);
1791c777019aSAlexander Duyck
1792c777019aSAlexander Duyck /* initialize inner IP header fields */
1793c777019aSAlexander Duyck if (ip.v4->version == 4) {
1794c777019aSAlexander Duyck ip.v4->tot_len = 0;
1795c777019aSAlexander Duyck ip.v4->check = 0;
1796c777019aSAlexander Duyck } else {
1797c777019aSAlexander Duyck ip.v6->payload_len = 0;
1798c777019aSAlexander Duyck }
17997f12ad74SGreg Rose }
18007f12ad74SGreg Rose
1801c49a7bc3SAlexander Duyck /* determine offset of inner transport header */
1802c49a7bc3SAlexander Duyck l4_offset = l4.hdr - skb->data;
1803c49a7bc3SAlexander Duyck /* remove payload length from inner checksum */
180424d41e5eSAlexander Duyck paylen = skb->len - l4_offset;
1805c49a7bc3SAlexander Duyck
1806c91a4f9fSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1807c91a4f9fSBrett Creeley csum_replace_by_diff(&l4.udp->check,
1808c91a4f9fSBrett Creeley (__force __wsum)htonl(paylen));
1809c91a4f9fSBrett Creeley /* compute length of UDP segmentation header */
1810c91a4f9fSBrett Creeley *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1811c91a4f9fSBrett Creeley } else {
1812c91a4f9fSBrett Creeley csum_replace_by_diff(&l4.tcp->check,
1813c91a4f9fSBrett Creeley (__force __wsum)htonl(paylen));
1814c91a4f9fSBrett Creeley /* compute length of TCP segmentation header */
1815c91a4f9fSBrett Creeley *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1816c91a4f9fSBrett Creeley }
18177f12ad74SGreg Rose
181852ea3e80SAlexander Duyck /* pull values out of skb_shinfo */
181952ea3e80SAlexander Duyck gso_size = skb_shinfo(skb)->gso_size;
182052ea3e80SAlexander Duyck gso_segs = skb_shinfo(skb)->gso_segs;
182152ea3e80SAlexander Duyck
182252ea3e80SAlexander Duyck /* update GSO size and bytecount with header size */
182352ea3e80SAlexander Duyck first->gso_segs = gso_segs;
182452ea3e80SAlexander Duyck first->bytecount += (first->gso_segs - 1) * *hdr_len;
182552ea3e80SAlexander Duyck
18267f12ad74SGreg Rose /* find the field values */
182756184e01SJesse Brandeburg cd_cmd = IAVF_TX_CTX_DESC_TSO;
18287f12ad74SGreg Rose cd_tso_len = skb->len - *hdr_len;
182952ea3e80SAlexander Duyck cd_mss = gso_size;
183056184e01SJesse Brandeburg *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
183156184e01SJesse Brandeburg (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
183256184e01SJesse Brandeburg (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
18337f12ad74SGreg Rose return 1;
18347f12ad74SGreg Rose }
18357f12ad74SGreg Rose
18367f12ad74SGreg Rose /**
183756184e01SJesse Brandeburg * iavf_tx_enable_csum - Enable Tx checksum offloads
18387f12ad74SGreg Rose * @skb: send buffer
183989232c3bSAnjali Singhai Jain * @tx_flags: pointer to Tx flags currently set
18407f12ad74SGreg Rose * @td_cmd: Tx descriptor command bits to set
18417f12ad74SGreg Rose * @td_offset: Tx descriptor header offsets to set
1842529f1f65SAlexander Duyck * @tx_ring: Tx descriptor ring
18437f12ad74SGreg Rose * @cd_tunneling: ptr to context desc bits
18447f12ad74SGreg Rose **/
iavf_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct iavf_ring * tx_ring,u32 * cd_tunneling)184556184e01SJesse Brandeburg static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
18467f12ad74SGreg Rose u32 *td_cmd, u32 *td_offset,
184756184e01SJesse Brandeburg struct iavf_ring *tx_ring,
18487f12ad74SGreg Rose u32 *cd_tunneling)
18497f12ad74SGreg Rose {
1850b96b78f2SAlexander Duyck union {
1851b96b78f2SAlexander Duyck struct iphdr *v4;
1852b96b78f2SAlexander Duyck struct ipv6hdr *v6;
1853b96b78f2SAlexander Duyck unsigned char *hdr;
1854b96b78f2SAlexander Duyck } ip;
1855b96b78f2SAlexander Duyck union {
1856b96b78f2SAlexander Duyck struct tcphdr *tcp;
1857b96b78f2SAlexander Duyck struct udphdr *udp;
1858b96b78f2SAlexander Duyck unsigned char *hdr;
1859b96b78f2SAlexander Duyck } l4;
1860a3fd9d88SAlexander Duyck unsigned char *exthdr;
1861d1bd743bSJesse Brandeburg u32 offset, cmd = 0;
1862a3fd9d88SAlexander Duyck __be16 frag_off;
1863b96b78f2SAlexander Duyck u8 l4_proto = 0;
1864b96b78f2SAlexander Duyck
1865529f1f65SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL)
1866529f1f65SAlexander Duyck return 0;
1867529f1f65SAlexander Duyck
1868b96b78f2SAlexander Duyck ip.hdr = skb_network_header(skb);
1869b96b78f2SAlexander Duyck l4.hdr = skb_transport_header(skb);
18707f12ad74SGreg Rose
1871475b4205SAlexander Duyck /* compute outer L2 header size */
1872f1cad2ceSJesse Brandeburg offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1873475b4205SAlexander Duyck
18747f12ad74SGreg Rose if (skb->encapsulation) {
1875d1bd743bSJesse Brandeburg u32 tunnel = 0;
1876a0064728SAlexander Duyck /* define outer network header type */
187756184e01SJesse Brandeburg if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
187856184e01SJesse Brandeburg tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
187956184e01SJesse Brandeburg IAVF_TX_CTX_EXT_IP_IPV4 :
188056184e01SJesse Brandeburg IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1881475b4205SAlexander Duyck
1882a0064728SAlexander Duyck l4_proto = ip.v4->protocol;
188356184e01SJesse Brandeburg } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
188456184e01SJesse Brandeburg tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
1885a3fd9d88SAlexander Duyck
1886a3fd9d88SAlexander Duyck exthdr = ip.hdr + sizeof(*ip.v6);
1887a0064728SAlexander Duyck l4_proto = ip.v6->nexthdr;
1888a3fd9d88SAlexander Duyck if (l4.hdr != exthdr)
1889a3fd9d88SAlexander Duyck ipv6_skip_exthdr(skb, exthdr - skb->data,
1890a3fd9d88SAlexander Duyck &l4_proto, &frag_off);
1891a0064728SAlexander Duyck }
1892a0064728SAlexander Duyck
1893a0064728SAlexander Duyck /* define outer transport */
1894a0064728SAlexander Duyck switch (l4_proto) {
189545991204SAnjali Singhai Jain case IPPROTO_UDP:
189656184e01SJesse Brandeburg tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
189756184e01SJesse Brandeburg *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
189845991204SAnjali Singhai Jain break;
1899a0064728SAlexander Duyck case IPPROTO_GRE:
190056184e01SJesse Brandeburg tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
190156184e01SJesse Brandeburg *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
1902a0064728SAlexander Duyck break;
1903577389a5SAlexander Duyck case IPPROTO_IPIP:
1904577389a5SAlexander Duyck case IPPROTO_IPV6:
190556184e01SJesse Brandeburg *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
1906577389a5SAlexander Duyck l4.hdr = skb_inner_network_header(skb);
1907577389a5SAlexander Duyck break;
190845991204SAnjali Singhai Jain default:
190956184e01SJesse Brandeburg if (*tx_flags & IAVF_TX_FLAGS_TSO)
1910529f1f65SAlexander Duyck return -1;
1911529f1f65SAlexander Duyck
1912529f1f65SAlexander Duyck skb_checksum_help(skb);
1913529f1f65SAlexander Duyck return 0;
191445991204SAnjali Singhai Jain }
1915b96b78f2SAlexander Duyck
1916577389a5SAlexander Duyck /* compute outer L3 header size */
1917577389a5SAlexander Duyck tunnel |= ((l4.hdr - ip.hdr) / 4) <<
191856184e01SJesse Brandeburg IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
1919577389a5SAlexander Duyck
1920577389a5SAlexander Duyck /* switch IP header pointer from outer to inner header */
1921577389a5SAlexander Duyck ip.hdr = skb_inner_network_header(skb);
1922577389a5SAlexander Duyck
1923475b4205SAlexander Duyck /* compute tunnel header size */
1924475b4205SAlexander Duyck tunnel |= ((ip.hdr - l4.hdr) / 2) <<
192556184e01SJesse Brandeburg IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
1926475b4205SAlexander Duyck
19275453205cSAlexander Duyck /* indicate if we need to offload outer UDP header */
192856184e01SJesse Brandeburg if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
19291c7b4a23SAlexander Duyck !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
19305453205cSAlexander Duyck (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
193156184e01SJesse Brandeburg tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
19325453205cSAlexander Duyck
1933475b4205SAlexander Duyck /* record tunnel offload values */
1934475b4205SAlexander Duyck *cd_tunneling |= tunnel;
1935475b4205SAlexander Duyck
1936b96b78f2SAlexander Duyck /* switch L4 header pointer from outer to inner */
1937b96b78f2SAlexander Duyck l4.hdr = skb_inner_transport_header(skb);
1938a0064728SAlexander Duyck l4_proto = 0;
19397f12ad74SGreg Rose
1940a0064728SAlexander Duyck /* reset type as we transition from outer to inner headers */
194156184e01SJesse Brandeburg *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
1942a0064728SAlexander Duyck if (ip.v4->version == 4)
194356184e01SJesse Brandeburg *tx_flags |= IAVF_TX_FLAGS_IPV4;
1944a0064728SAlexander Duyck if (ip.v6->version == 6)
194556184e01SJesse Brandeburg *tx_flags |= IAVF_TX_FLAGS_IPV6;
194685e76d03SAnjali Singhai }
19477f12ad74SGreg Rose
19487f12ad74SGreg Rose /* Enable IP checksum offloads */
194956184e01SJesse Brandeburg if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1950b96b78f2SAlexander Duyck l4_proto = ip.v4->protocol;
19517f12ad74SGreg Rose /* the stack computes the IP header already, the only time we
19527f12ad74SGreg Rose * need the hardware to recompute it is in the case of TSO.
19537f12ad74SGreg Rose */
195456184e01SJesse Brandeburg cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1955f1cad2ceSJesse Brandeburg IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
1956f1cad2ceSJesse Brandeburg IAVF_TX_DESC_CMD_IIPT_IPV4;
195756184e01SJesse Brandeburg } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1958f1cad2ceSJesse Brandeburg cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1959a3fd9d88SAlexander Duyck
1960a3fd9d88SAlexander Duyck exthdr = ip.hdr + sizeof(*ip.v6);
1961a3fd9d88SAlexander Duyck l4_proto = ip.v6->nexthdr;
1962a3fd9d88SAlexander Duyck if (l4.hdr != exthdr)
1963a3fd9d88SAlexander Duyck ipv6_skip_exthdr(skb, exthdr - skb->data,
1964a3fd9d88SAlexander Duyck &l4_proto, &frag_off);
19657f12ad74SGreg Rose }
1966b96b78f2SAlexander Duyck
1967475b4205SAlexander Duyck /* compute inner L3 header size */
1968f1cad2ceSJesse Brandeburg offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
19697f12ad74SGreg Rose
19707f12ad74SGreg Rose /* Enable L4 checksum offloads */
1971b96b78f2SAlexander Duyck switch (l4_proto) {
19727f12ad74SGreg Rose case IPPROTO_TCP:
19737f12ad74SGreg Rose /* enable checksum offloads */
1974f1cad2ceSJesse Brandeburg cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1975f1cad2ceSJesse Brandeburg offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
19767f12ad74SGreg Rose break;
19777f12ad74SGreg Rose case IPPROTO_SCTP:
19787f12ad74SGreg Rose /* enable SCTP checksum offload */
1979f1cad2ceSJesse Brandeburg cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1980475b4205SAlexander Duyck offset |= (sizeof(struct sctphdr) >> 2) <<
1981f1cad2ceSJesse Brandeburg IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
19827f12ad74SGreg Rose break;
19837f12ad74SGreg Rose case IPPROTO_UDP:
19847f12ad74SGreg Rose /* enable UDP checksum offload */
1985f1cad2ceSJesse Brandeburg cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1986475b4205SAlexander Duyck offset |= (sizeof(struct udphdr) >> 2) <<
1987f1cad2ceSJesse Brandeburg IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
19887f12ad74SGreg Rose break;
19897f12ad74SGreg Rose default:
199056184e01SJesse Brandeburg if (*tx_flags & IAVF_TX_FLAGS_TSO)
1991529f1f65SAlexander Duyck return -1;
1992529f1f65SAlexander Duyck skb_checksum_help(skb);
1993529f1f65SAlexander Duyck return 0;
19947f12ad74SGreg Rose }
1995475b4205SAlexander Duyck
1996475b4205SAlexander Duyck *td_cmd |= cmd;
1997475b4205SAlexander Duyck *td_offset |= offset;
1998529f1f65SAlexander Duyck
1999529f1f65SAlexander Duyck return 1;
20007f12ad74SGreg Rose }
20017f12ad74SGreg Rose
20027f12ad74SGreg Rose /**
2003262de08fSJesse Brandeburg * iavf_create_tx_ctx - Build the Tx context descriptor
20047f12ad74SGreg Rose * @tx_ring: ring to create the descriptor on
20057f12ad74SGreg Rose * @cd_type_cmd_tso_mss: Quad Word 1
20067f12ad74SGreg Rose * @cd_tunneling: Quad Word 0 - bits 0-31
20077f12ad74SGreg Rose * @cd_l2tag2: Quad Word 0 - bits 32-63
20087f12ad74SGreg Rose **/
iavf_create_tx_ctx(struct iavf_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2)200956184e01SJesse Brandeburg static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
20107f12ad74SGreg Rose const u64 cd_type_cmd_tso_mss,
20117f12ad74SGreg Rose const u32 cd_tunneling, const u32 cd_l2tag2)
20127f12ad74SGreg Rose {
201356184e01SJesse Brandeburg struct iavf_tx_context_desc *context_desc;
20147f12ad74SGreg Rose int i = tx_ring->next_to_use;
20157f12ad74SGreg Rose
2016f1cad2ceSJesse Brandeburg if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2017ff40dd5dSJesse Brandeburg !cd_tunneling && !cd_l2tag2)
20187f12ad74SGreg Rose return;
20197f12ad74SGreg Rose
20207f12ad74SGreg Rose /* grab the next descriptor */
2021f1cad2ceSJesse Brandeburg context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
20227f12ad74SGreg Rose
20237f12ad74SGreg Rose i++;
20247f12ad74SGreg Rose tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
20257f12ad74SGreg Rose
20267f12ad74SGreg Rose /* cpu_to_le32 and assign to struct fields */
20277f12ad74SGreg Rose context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
20287f12ad74SGreg Rose context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
20293efbbb20SJesse Brandeburg context_desc->rsvd = cpu_to_le16(0);
20307f12ad74SGreg Rose context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
20317f12ad74SGreg Rose }
20327f12ad74SGreg Rose
20337f12ad74SGreg Rose /**
2034129cf89eSJesse Brandeburg * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
203571da6197SAnjali Singhai * @skb: send buffer
203671da6197SAnjali Singhai *
20373f3f7cb8SAlexander Duyck * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
20383f3f7cb8SAlexander Duyck * and so we need to figure out the cases where we need to linearize the skb.
20393f3f7cb8SAlexander Duyck *
20403f3f7cb8SAlexander Duyck * For TSO we need to count the TSO header and segment payload separately.
20413f3f7cb8SAlexander Duyck * As such we need to check cases where we have 7 fragments or more as we
20423f3f7cb8SAlexander Duyck * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
20433f3f7cb8SAlexander Duyck * the segment payload in the first descriptor, and another 7 for the
20443f3f7cb8SAlexander Duyck * fragments.
204571da6197SAnjali Singhai **/
__iavf_chk_linearize(struct sk_buff * skb)2046129cf89eSJesse Brandeburg bool __iavf_chk_linearize(struct sk_buff *skb)
204771da6197SAnjali Singhai {
2048d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale;
20493f3f7cb8SAlexander Duyck int nr_frags, sum;
205071da6197SAnjali Singhai
20513f3f7cb8SAlexander Duyck /* no need to check if number of frags is less than 7 */
20522d37490bSAlexander Duyck nr_frags = skb_shinfo(skb)->nr_frags;
205356184e01SJesse Brandeburg if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
20542d37490bSAlexander Duyck return false;
205571da6197SAnjali Singhai
20562d37490bSAlexander Duyck /* We need to walk through the list and validate that each group
2057841493a3SAlexander Duyck * of 6 fragments totals at least gso_size.
20582d37490bSAlexander Duyck */
205956184e01SJesse Brandeburg nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
206071da6197SAnjali Singhai frag = &skb_shinfo(skb)->frags[0];
20612d37490bSAlexander Duyck
20622d37490bSAlexander Duyck /* Initialize size to the negative value of gso_size minus 1. We
20632d37490bSAlexander Duyck * use this as the worst case scenerio in which the frag ahead
20642d37490bSAlexander Duyck * of us only provides one byte which is why we are limited to 6
20652d37490bSAlexander Duyck * descriptors for a single transmit as the header and previous
20662d37490bSAlexander Duyck * fragment are already consuming 2 descriptors.
20672d37490bSAlexander Duyck */
20683f3f7cb8SAlexander Duyck sum = 1 - skb_shinfo(skb)->gso_size;
20692d37490bSAlexander Duyck
20703f3f7cb8SAlexander Duyck /* Add size of frags 0 through 4 to create our initial sum */
20713f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20723f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20733f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20743f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20753f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20762d37490bSAlexander Duyck
20772d37490bSAlexander Duyck /* Walk through fragments adding latest fragment, testing it, and
20782d37490bSAlexander Duyck * then removing stale fragments from the sum.
20792d37490bSAlexander Duyck */
2080248de22eSAlexander Duyck for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2081248de22eSAlexander Duyck int stale_size = skb_frag_size(stale);
2082248de22eSAlexander Duyck
20833f3f7cb8SAlexander Duyck sum += skb_frag_size(frag++);
20842d37490bSAlexander Duyck
2085248de22eSAlexander Duyck /* The stale fragment may present us with a smaller
2086248de22eSAlexander Duyck * descriptor than the actual fragment size. To account
2087248de22eSAlexander Duyck * for that we need to remove all the data on the front and
2088248de22eSAlexander Duyck * figure out what the remainder would be in the last
2089248de22eSAlexander Duyck * descriptor associated with the fragment.
2090248de22eSAlexander Duyck */
209156184e01SJesse Brandeburg if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2092b54c9d5bSJonathan Lemon int align_pad = -(skb_frag_off(stale)) &
209356184e01SJesse Brandeburg (IAVF_MAX_READ_REQ_SIZE - 1);
2094248de22eSAlexander Duyck
2095248de22eSAlexander Duyck sum -= align_pad;
2096248de22eSAlexander Duyck stale_size -= align_pad;
2097248de22eSAlexander Duyck
2098248de22eSAlexander Duyck do {
209956184e01SJesse Brandeburg sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
210056184e01SJesse Brandeburg stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
210156184e01SJesse Brandeburg } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2102248de22eSAlexander Duyck }
2103248de22eSAlexander Duyck
21042d37490bSAlexander Duyck /* if sum is negative we failed to make sufficient progress */
21052d37490bSAlexander Duyck if (sum < 0)
21062d37490bSAlexander Duyck return true;
21072d37490bSAlexander Duyck
2108841493a3SAlexander Duyck if (!nr_frags--)
210971da6197SAnjali Singhai break;
21102d37490bSAlexander Duyck
2111248de22eSAlexander Duyck sum -= stale_size;
211271da6197SAnjali Singhai }
211371da6197SAnjali Singhai
21142d37490bSAlexander Duyck return false;
211571da6197SAnjali Singhai }
211671da6197SAnjali Singhai
211771da6197SAnjali Singhai /**
2118129cf89eSJesse Brandeburg * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
21198f6a2b05SJesse Brandeburg * @tx_ring: the ring to be checked
21208f6a2b05SJesse Brandeburg * @size: the size buffer we want to assure is available
21218f6a2b05SJesse Brandeburg *
21228f6a2b05SJesse Brandeburg * Returns -EBUSY if a stop is needed, else 0
21238f6a2b05SJesse Brandeburg **/
__iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)212456184e01SJesse Brandeburg int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
21258f6a2b05SJesse Brandeburg {
21268f6a2b05SJesse Brandeburg netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
21278f6a2b05SJesse Brandeburg /* Memory barrier before checking head and tail */
21288f6a2b05SJesse Brandeburg smp_mb();
21298f6a2b05SJesse Brandeburg
21308f6a2b05SJesse Brandeburg /* Check again in a case another CPU has just made room available. */
213156184e01SJesse Brandeburg if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
21328f6a2b05SJesse Brandeburg return -EBUSY;
21338f6a2b05SJesse Brandeburg
21348f6a2b05SJesse Brandeburg /* A reprieve! - use start_queue because it doesn't call schedule */
21358f6a2b05SJesse Brandeburg netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
21368f6a2b05SJesse Brandeburg ++tx_ring->tx_stats.restart_queue;
21378f6a2b05SJesse Brandeburg return 0;
21388f6a2b05SJesse Brandeburg }
21398f6a2b05SJesse Brandeburg
21408f6a2b05SJesse Brandeburg /**
2141129cf89eSJesse Brandeburg * iavf_tx_map - Build the Tx descriptor
21427f12ad74SGreg Rose * @tx_ring: ring to send buffer on
21437f12ad74SGreg Rose * @skb: send buffer
21447f12ad74SGreg Rose * @first: first buffer info buffer to use
21457f12ad74SGreg Rose * @tx_flags: collected send information
21467f12ad74SGreg Rose * @hdr_len: size of the packet header
21477f12ad74SGreg Rose * @td_cmd: the command field in the descriptor
21487f12ad74SGreg Rose * @td_offset: offset for checksum or crc
21497f12ad74SGreg Rose **/
iavf_tx_map(struct iavf_ring * tx_ring,struct sk_buff * skb,struct iavf_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset)215070dc7ab7SJacob Keller static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
215156184e01SJesse Brandeburg struct iavf_tx_buffer *first, u32 tx_flags,
21527f12ad74SGreg Rose const u8 hdr_len, u32 td_cmd, u32 td_offset)
21537f12ad74SGreg Rose {
21547f12ad74SGreg Rose unsigned int data_len = skb->data_len;
21557f12ad74SGreg Rose unsigned int size = skb_headlen(skb);
2156d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag;
215756184e01SJesse Brandeburg struct iavf_tx_buffer *tx_bi;
215856184e01SJesse Brandeburg struct iavf_tx_desc *tx_desc;
21597f12ad74SGreg Rose u16 i = tx_ring->next_to_use;
21607f12ad74SGreg Rose u32 td_tag = 0;
21617f12ad74SGreg Rose dma_addr_t dma;
21627f12ad74SGreg Rose
216356184e01SJesse Brandeburg if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2164f1cad2ceSJesse Brandeburg td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
216565db56d5SJesse Brandeburg td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
21667f12ad74SGreg Rose }
21677f12ad74SGreg Rose
21687f12ad74SGreg Rose first->tx_flags = tx_flags;
21697f12ad74SGreg Rose
21707f12ad74SGreg Rose dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
21717f12ad74SGreg Rose
2172f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, i);
21737f12ad74SGreg Rose tx_bi = first;
21747f12ad74SGreg Rose
21757f12ad74SGreg Rose for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
217656184e01SJesse Brandeburg unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
21775c4654daSAlexander Duyck
21787f12ad74SGreg Rose if (dma_mapping_error(tx_ring->dev, dma))
21797f12ad74SGreg Rose goto dma_error;
21807f12ad74SGreg Rose
21817f12ad74SGreg Rose /* record length, and DMA address */
21827f12ad74SGreg Rose dma_unmap_len_set(tx_bi, len, size);
21837f12ad74SGreg Rose dma_unmap_addr_set(tx_bi, dma, dma);
21847f12ad74SGreg Rose
21855c4654daSAlexander Duyck /* align size to end of page */
218656184e01SJesse Brandeburg max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
21877f12ad74SGreg Rose tx_desc->buffer_addr = cpu_to_le64(dma);
21887f12ad74SGreg Rose
218956184e01SJesse Brandeburg while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
21907f12ad74SGreg Rose tx_desc->cmd_type_offset_bsz =
21917f12ad74SGreg Rose build_ctob(td_cmd, td_offset,
21925c4654daSAlexander Duyck max_data, td_tag);
21937f12ad74SGreg Rose
21947f12ad74SGreg Rose tx_desc++;
21957f12ad74SGreg Rose i++;
21966a7fded7SAnjali Singhai Jain
21977f12ad74SGreg Rose if (i == tx_ring->count) {
2198f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, 0);
21997f12ad74SGreg Rose i = 0;
22007f12ad74SGreg Rose }
22017f12ad74SGreg Rose
22025c4654daSAlexander Duyck dma += max_data;
22035c4654daSAlexander Duyck size -= max_data;
22047f12ad74SGreg Rose
220556184e01SJesse Brandeburg max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
22067f12ad74SGreg Rose tx_desc->buffer_addr = cpu_to_le64(dma);
22077f12ad74SGreg Rose }
22087f12ad74SGreg Rose
22097f12ad74SGreg Rose if (likely(!data_len))
22107f12ad74SGreg Rose break;
22117f12ad74SGreg Rose
22127f12ad74SGreg Rose tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
22137f12ad74SGreg Rose size, td_tag);
22147f12ad74SGreg Rose
22157f12ad74SGreg Rose tx_desc++;
22167f12ad74SGreg Rose i++;
22176a7fded7SAnjali Singhai Jain
22187f12ad74SGreg Rose if (i == tx_ring->count) {
2219f1cad2ceSJesse Brandeburg tx_desc = IAVF_TX_DESC(tx_ring, 0);
22207f12ad74SGreg Rose i = 0;
22217f12ad74SGreg Rose }
22227f12ad74SGreg Rose
22237f12ad74SGreg Rose size = skb_frag_size(frag);
22247f12ad74SGreg Rose data_len -= size;
22257f12ad74SGreg Rose
22267f12ad74SGreg Rose dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
22277f12ad74SGreg Rose DMA_TO_DEVICE);
22287f12ad74SGreg Rose
22297f12ad74SGreg Rose tx_bi = &tx_ring->tx_bi[i];
22307f12ad74SGreg Rose }
22317f12ad74SGreg Rose
22321dc8b538SAlexander Duyck netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
22337f12ad74SGreg Rose
22347f12ad74SGreg Rose i++;
22357f12ad74SGreg Rose if (i == tx_ring->count)
22367f12ad74SGreg Rose i = 0;
22377f12ad74SGreg Rose
22387f12ad74SGreg Rose tx_ring->next_to_use = i;
22397f12ad74SGreg Rose
224056184e01SJesse Brandeburg iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
22416a7fded7SAnjali Singhai Jain
2242b1cb07dbSPreethi Banala /* write last descriptor with RS and EOP bits */
224356184e01SJesse Brandeburg td_cmd |= IAVF_TXD_CMD;
22446a7fded7SAnjali Singhai Jain tx_desc->cmd_type_offset_bsz =
22451dc8b538SAlexander Duyck build_ctob(td_cmd, td_offset, size, td_tag);
22466a7fded7SAnjali Singhai Jain
2247a9e51058SJacob Keller skb_tx_timestamp(skb);
2248a9e51058SJacob Keller
22491dc8b538SAlexander Duyck /* Force memory writes to complete before letting h/w know there
22501dc8b538SAlexander Duyck * are new descriptors to fetch.
22511dc8b538SAlexander Duyck *
22521dc8b538SAlexander Duyck * We also use this memory barrier to make certain all of the
22531dc8b538SAlexander Duyck * status bits have been updated before next_to_watch is written.
22546a7fded7SAnjali Singhai Jain */
22556a7fded7SAnjali Singhai Jain wmb();
22561dc8b538SAlexander Duyck
22571dc8b538SAlexander Duyck /* set next_to_watch value indicating a packet is present */
22581dc8b538SAlexander Duyck first->next_to_watch = tx_desc;
22591dc8b538SAlexander Duyck
22601dc8b538SAlexander Duyck /* notify HW of packet */
22616b16f9eeSFlorian Westphal if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
22626a7fded7SAnjali Singhai Jain writel(i, tx_ring->tail);
22636a7fded7SAnjali Singhai Jain }
22641dc8b538SAlexander Duyck
22657f12ad74SGreg Rose return;
22667f12ad74SGreg Rose
22677f12ad74SGreg Rose dma_error:
22687f12ad74SGreg Rose dev_info(tx_ring->dev, "TX DMA map failed\n");
22697f12ad74SGreg Rose
22707f12ad74SGreg Rose /* clear dma mappings for failed tx_bi map */
22717f12ad74SGreg Rose for (;;) {
22727f12ad74SGreg Rose tx_bi = &tx_ring->tx_bi[i];
227356184e01SJesse Brandeburg iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
22747f12ad74SGreg Rose if (tx_bi == first)
22757f12ad74SGreg Rose break;
22767f12ad74SGreg Rose if (i == 0)
22777f12ad74SGreg Rose i = tx_ring->count;
22787f12ad74SGreg Rose i--;
22797f12ad74SGreg Rose }
22807f12ad74SGreg Rose
22817f12ad74SGreg Rose tx_ring->next_to_use = i;
22827f12ad74SGreg Rose }
22837f12ad74SGreg Rose
22847f12ad74SGreg Rose /**
228556184e01SJesse Brandeburg * iavf_xmit_frame_ring - Sends buffer on Tx ring
22867f12ad74SGreg Rose * @skb: send buffer
22877f12ad74SGreg Rose * @tx_ring: ring to send buffer on
22887f12ad74SGreg Rose *
22897f12ad74SGreg Rose * Returns NETDEV_TX_OK if sent, else an error code
22907f12ad74SGreg Rose **/
iavf_xmit_frame_ring(struct sk_buff * skb,struct iavf_ring * tx_ring)229156184e01SJesse Brandeburg static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
229256184e01SJesse Brandeburg struct iavf_ring *tx_ring)
22937f12ad74SGreg Rose {
2294f1cad2ceSJesse Brandeburg u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
22957f12ad74SGreg Rose u32 cd_tunneling = 0, cd_l2tag2 = 0;
229656184e01SJesse Brandeburg struct iavf_tx_buffer *first;
22977f12ad74SGreg Rose u32 td_offset = 0;
22987f12ad74SGreg Rose u32 tx_flags = 0;
22997f12ad74SGreg Rose __be16 protocol;
23007f12ad74SGreg Rose u32 td_cmd = 0;
23017f12ad74SGreg Rose u8 hdr_len = 0;
23024ec441dfSAlexander Duyck int tso, count;
23036995b36cSJesse Brandeburg
2304b74118f0SJesse Brandeburg /* prefetch the data, we'll need it later */
2305b74118f0SJesse Brandeburg prefetch(skb->data);
2306b74118f0SJesse Brandeburg
2307ad64ed8bSJesse Brandeburg iavf_trace(xmit_frame_ring, skb, tx_ring);
2308ed0980c4SScott Peterson
230956184e01SJesse Brandeburg count = iavf_xmit_descriptor_count(skb);
231056184e01SJesse Brandeburg if (iavf_chk_linearize(skb, count)) {
231152ea3e80SAlexander Duyck if (__skb_linearize(skb)) {
231252ea3e80SAlexander Duyck dev_kfree_skb_any(skb);
231352ea3e80SAlexander Duyck return NETDEV_TX_OK;
231452ea3e80SAlexander Duyck }
231556184e01SJesse Brandeburg count = iavf_txd_use_count(skb->len);
23162d37490bSAlexander Duyck tx_ring->tx_stats.tx_linearize++;
23172d37490bSAlexander Duyck }
23184ec441dfSAlexander Duyck
231956184e01SJesse Brandeburg /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
232056184e01SJesse Brandeburg * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
23214ec441dfSAlexander Duyck * + 4 desc gap to avoid the cache line where head is,
23224ec441dfSAlexander Duyck * + 1 desc for context descriptor,
23234ec441dfSAlexander Duyck * otherwise try next time
23244ec441dfSAlexander Duyck */
232556184e01SJesse Brandeburg if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
23264ec441dfSAlexander Duyck tx_ring->tx_stats.tx_busy++;
23277f12ad74SGreg Rose return NETDEV_TX_BUSY;
23284ec441dfSAlexander Duyck }
23297f12ad74SGreg Rose
233052ea3e80SAlexander Duyck /* record the location of the first descriptor for this packet */
233152ea3e80SAlexander Duyck first = &tx_ring->tx_bi[tx_ring->next_to_use];
233252ea3e80SAlexander Duyck first->skb = skb;
233352ea3e80SAlexander Duyck first->bytecount = skb->len;
233452ea3e80SAlexander Duyck first->gso_segs = 1;
233552ea3e80SAlexander Duyck
23367f12ad74SGreg Rose /* prepare the xmit flags */
2337ccd219d2SBrett Creeley iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
2338ccd219d2SBrett Creeley if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2339ccd219d2SBrett Creeley cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
2340ccd219d2SBrett Creeley IAVF_TXD_CTX_QW1_CMD_SHIFT;
234165db56d5SJesse Brandeburg cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
2342ccd219d2SBrett Creeley }
23437f12ad74SGreg Rose
23447f12ad74SGreg Rose /* obtain protocol of skb */
2345a12c4158SVlad Yasevich protocol = vlan_get_protocol(skb);
23467f12ad74SGreg Rose
23477f12ad74SGreg Rose /* setup IPv4/IPv6 offloads */
23487f12ad74SGreg Rose if (protocol == htons(ETH_P_IP))
234956184e01SJesse Brandeburg tx_flags |= IAVF_TX_FLAGS_IPV4;
23507f12ad74SGreg Rose else if (protocol == htons(ETH_P_IPV6))
235156184e01SJesse Brandeburg tx_flags |= IAVF_TX_FLAGS_IPV6;
23527f12ad74SGreg Rose
235356184e01SJesse Brandeburg tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
23547f12ad74SGreg Rose
23557f12ad74SGreg Rose if (tso < 0)
23567f12ad74SGreg Rose goto out_drop;
23577f12ad74SGreg Rose else if (tso)
235856184e01SJesse Brandeburg tx_flags |= IAVF_TX_FLAGS_TSO;
23597f12ad74SGreg Rose
23607f12ad74SGreg Rose /* Always offload the checksum, since it's in the data descriptor */
236156184e01SJesse Brandeburg tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
23627f12ad74SGreg Rose tx_ring, &cd_tunneling);
2363529f1f65SAlexander Duyck if (tso < 0)
2364529f1f65SAlexander Duyck goto out_drop;
23657f12ad74SGreg Rose
23663bc67973SAlexander Duyck /* always enable CRC insertion offload */
2367f1cad2ceSJesse Brandeburg td_cmd |= IAVF_TX_DESC_CMD_ICRC;
23683bc67973SAlexander Duyck
236956184e01SJesse Brandeburg iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
23707f12ad74SGreg Rose cd_tunneling, cd_l2tag2);
23717f12ad74SGreg Rose
2372129cf89eSJesse Brandeburg iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
23737f12ad74SGreg Rose td_cmd, td_offset);
23747f12ad74SGreg Rose
23757f12ad74SGreg Rose return NETDEV_TX_OK;
23767f12ad74SGreg Rose
23777f12ad74SGreg Rose out_drop:
2378ad64ed8bSJesse Brandeburg iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
237952ea3e80SAlexander Duyck dev_kfree_skb_any(first->skb);
238052ea3e80SAlexander Duyck first->skb = NULL;
23817f12ad74SGreg Rose return NETDEV_TX_OK;
23827f12ad74SGreg Rose }
23837f12ad74SGreg Rose
23847f12ad74SGreg Rose /**
2385129cf89eSJesse Brandeburg * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
23867f12ad74SGreg Rose * @skb: send buffer
23877f12ad74SGreg Rose * @netdev: network interface device structure
23887f12ad74SGreg Rose *
23897f12ad74SGreg Rose * Returns NETDEV_TX_OK if sent, else an error code
23907f12ad74SGreg Rose **/
iavf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)2391129cf89eSJesse Brandeburg netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
23927f12ad74SGreg Rose {
2393129cf89eSJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
239456184e01SJesse Brandeburg struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
23957f12ad74SGreg Rose
23967f12ad74SGreg Rose /* hardware can't handle really short frames, hardware padding works
23977f12ad74SGreg Rose * beyond this point
23987f12ad74SGreg Rose */
239956184e01SJesse Brandeburg if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
240056184e01SJesse Brandeburg if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
24017f12ad74SGreg Rose return NETDEV_TX_OK;
240256184e01SJesse Brandeburg skb->len = IAVF_MIN_TX_LEN;
240356184e01SJesse Brandeburg skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
24047f12ad74SGreg Rose }
24057f12ad74SGreg Rose
240656184e01SJesse Brandeburg return iavf_xmit_frame_ring(skb, tx_ring);
24077f12ad74SGreg Rose }
2408