Lines Matching +full:quad +full:- +full:precision

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
14 * iavf_is_descriptor_done - tests DD bit in Rx descriptor
15 * @qw1: quad word 1 from descriptor to get Descriptor Done field from
45 * iavf_unmap_and_free_tx_resource - Release a Tx buffer
52 if (tx_buffer->skb) { in iavf_unmap_and_free_tx_resource()
53 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) in iavf_unmap_and_free_tx_resource()
54 kfree(tx_buffer->raw_buf); in iavf_unmap_and_free_tx_resource()
56 dev_kfree_skb_any(tx_buffer->skb); in iavf_unmap_and_free_tx_resource()
58 dma_unmap_single(ring->dev, in iavf_unmap_and_free_tx_resource()
63 dma_unmap_page(ring->dev, in iavf_unmap_and_free_tx_resource()
69 tx_buffer->next_to_watch = NULL; in iavf_unmap_and_free_tx_resource()
70 tx_buffer->skb = NULL; in iavf_unmap_and_free_tx_resource()
76 * iavf_clean_tx_ring - Free any empty Tx buffers
85 if (!tx_ring->tx_bi) in iavf_clean_tx_ring()
89 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring()
90 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring()
92 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring()
93 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring()
96 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring()
98 tx_ring->next_to_use = 0; in iavf_clean_tx_ring()
99 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring()
101 if (!tx_ring->netdev) in iavf_clean_tx_ring()
109 * iavf_free_tx_resources - Free Tx resources per queue
117 kfree(tx_ring->tx_bi); in iavf_free_tx_resources()
118 tx_ring->tx_bi = NULL; in iavf_free_tx_resources()
120 if (tx_ring->desc) { in iavf_free_tx_resources()
121 dma_free_coherent(tx_ring->dev, tx_ring->size, in iavf_free_tx_resources()
122 tx_ring->desc, tx_ring->dma); in iavf_free_tx_resources()
123 tx_ring->desc = NULL; in iavf_free_tx_resources()
128 * iavf_get_tx_pending - how many Tx descriptors not processed
142 head = ring->next_to_clean; in iavf_get_tx_pending()
143 tail = ring->next_to_use; in iavf_get_tx_pending()
147 tail - head : (tail + ring->count - head); in iavf_get_tx_pending()
153 * iavf_force_wb - Issue SW Interrupt so HW does a wb
165 wr32(&vsi->back->hw, in iavf_force_wb()
166 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), in iavf_force_wb()
171 * iavf_detect_recover_hung - Function to detect and recover hung_queues
187 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) in iavf_detect_recover_hung()
190 netdev = vsi->netdev; in iavf_detect_recover_hung()
197 for (i = 0; i < vsi->back->num_active_queues; i++) { in iavf_detect_recover_hung()
198 tx_ring = &vsi->back->tx_rings[i]; in iavf_detect_recover_hung()
199 if (tx_ring && tx_ring->desc) { in iavf_detect_recover_hung()
207 packets = tx_ring->stats.packets & INT_MAX; in iavf_detect_recover_hung()
208 if (tx_ring->prev_pkt_ctr == packets) { in iavf_detect_recover_hung()
209 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung()
217 tx_ring->prev_pkt_ctr = in iavf_detect_recover_hung()
218 iavf_get_tx_pending(tx_ring, true) ? packets : -1; in iavf_detect_recover_hung()
226 * iavf_clean_tx_irq - Reclaim resources after transmit completes
236 int i = tx_ring->next_to_clean; in iavf_clean_tx_irq()
242 tx_buf = &tx_ring->tx_bi[i]; in iavf_clean_tx_irq()
244 i -= tx_ring->count; in iavf_clean_tx_irq()
247 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; in iavf_clean_tx_irq()
258 if (!(eop_desc->cmd_type_offset_bsz & in iavf_clean_tx_irq()
263 tx_buf->next_to_watch = NULL; in iavf_clean_tx_irq()
266 total_bytes += tx_buf->bytecount; in iavf_clean_tx_irq()
267 total_packets += tx_buf->gso_segs; in iavf_clean_tx_irq()
270 napi_consume_skb(tx_buf->skb, napi_budget); in iavf_clean_tx_irq()
273 dma_unmap_single(tx_ring->dev, in iavf_clean_tx_irq()
279 tx_buf->skb = NULL; in iavf_clean_tx_irq()
291 i -= tx_ring->count; in iavf_clean_tx_irq()
292 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
298 dma_unmap_page(tx_ring->dev, in iavf_clean_tx_irq()
311 i -= tx_ring->count; in iavf_clean_tx_irq()
312 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
319 budget--; in iavf_clean_tx_irq()
322 i += tx_ring->count; in iavf_clean_tx_irq()
323 tx_ring->next_to_clean = i; in iavf_clean_tx_irq()
324 u64_stats_update_begin(&tx_ring->syncp); in iavf_clean_tx_irq()
325 tx_ring->stats.bytes += total_bytes; in iavf_clean_tx_irq()
326 tx_ring->stats.packets += total_packets; in iavf_clean_tx_irq()
327 u64_stats_update_end(&tx_ring->syncp); in iavf_clean_tx_irq()
328 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq()
329 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq()
331 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { in iavf_clean_tx_irq()
341 !test_bit(__IAVF_VSI_DOWN, vsi->state) && in iavf_clean_tx_irq()
342 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) in iavf_clean_tx_irq()
343 tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB; in iavf_clean_tx_irq()
351 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in iavf_clean_tx_irq()
357 if (__netif_subqueue_stopped(tx_ring->netdev, in iavf_clean_tx_irq()
358 tx_ring->queue_index) && in iavf_clean_tx_irq()
359 !test_bit(__IAVF_VSI_DOWN, vsi->state)) { in iavf_clean_tx_irq()
360 netif_wake_subqueue(tx_ring->netdev, in iavf_clean_tx_irq()
361 tx_ring->queue_index); in iavf_clean_tx_irq()
362 ++tx_ring->tx_stats.restart_queue; in iavf_clean_tx_irq()
370 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
378 u16 flags = q_vector->tx.ring[0].flags; in iavf_enable_wb_on_itr()
384 if (q_vector->arm_wb_state) in iavf_enable_wb_on_itr()
390 wr32(&vsi->back->hw, in iavf_enable_wb_on_itr()
391 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); in iavf_enable_wb_on_itr()
392 q_vector->arm_wb_state = true; in iavf_enable_wb_on_itr()
398 return &q_vector->rx == rc; in iavf_container_is_rx()
451 iavf_mbps_itr_multiplier(adapter->link_speed_mbps); in iavf_itr_divisor()
454 iavf_virtchnl_itr_multiplier(adapter->link_speed); in iavf_itr_divisor()
458 * iavf_update_itr - update the dynamic ITR value based on statistics
479 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) in iavf_update_itr()
489 /* If we didn't update within up to 1 - 2 jiffies we can assume in iavf_update_itr()
494 if (time_after(next_update, rc->next_update)) in iavf_update_itr()
503 if (q_vector->itr_countdown) { in iavf_update_itr()
504 itr = rc->target_itr; in iavf_update_itr()
508 packets = rc->total_packets; in iavf_update_itr()
509 bytes = rc->total_bytes; in iavf_update_itr()
518 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { in iavf_update_itr()
528 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && in iavf_update_itr()
529 (q_vector->rx.target_itr & IAVF_ITR_MASK) == in iavf_update_itr()
536 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; in iavf_update_itr()
548 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; in iavf_update_itr()
557 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in iavf_update_itr()
645 iavf_itr_divisor(q_vector->adapter)) * in iavf_update_itr()
655 rc->target_itr = itr; in iavf_update_itr()
658 rc->next_update = next_update + 1; in iavf_update_itr()
660 rc->total_bytes = 0; in iavf_update_itr()
661 rc->total_packets = 0; in iavf_update_itr()
665 * iavf_setup_tx_descriptors - Allocate the Tx descriptors
672 struct device *dev = tx_ring->dev; in iavf_setup_tx_descriptors()
676 return -ENOMEM; in iavf_setup_tx_descriptors()
679 WARN_ON(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
680 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_setup_tx_descriptors()
681 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in iavf_setup_tx_descriptors()
682 if (!tx_ring->tx_bi) in iavf_setup_tx_descriptors()
686 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); in iavf_setup_tx_descriptors()
687 tx_ring->size = ALIGN(tx_ring->size, 4096); in iavf_setup_tx_descriptors()
688 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in iavf_setup_tx_descriptors()
689 &tx_ring->dma, GFP_KERNEL); in iavf_setup_tx_descriptors()
690 if (!tx_ring->desc) { in iavf_setup_tx_descriptors()
692 tx_ring->size); in iavf_setup_tx_descriptors()
696 tx_ring->next_to_use = 0; in iavf_setup_tx_descriptors()
697 tx_ring->next_to_clean = 0; in iavf_setup_tx_descriptors()
698 tx_ring->prev_pkt_ctr = -1; in iavf_setup_tx_descriptors()
702 kfree(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
703 tx_ring->tx_bi = NULL; in iavf_setup_tx_descriptors()
704 return -ENOMEM; in iavf_setup_tx_descriptors()
708 * iavf_clean_rx_ring - Free Rx buffers
714 if (!rx_ring->rx_fqes) in iavf_clean_rx_ring()
717 if (rx_ring->skb) { in iavf_clean_rx_ring()
718 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring()
719 rx_ring->skb = NULL; in iavf_clean_rx_ring()
723 for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { in iavf_clean_rx_ring()
724 const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; in iavf_clean_rx_ring()
726 page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); in iavf_clean_rx_ring()
728 if (unlikely(++i == rx_ring->count)) in iavf_clean_rx_ring()
732 rx_ring->next_to_clean = 0; in iavf_clean_rx_ring()
733 rx_ring->next_to_use = 0; in iavf_clean_rx_ring()
737 * iavf_free_rx_resources - Free Rx resources
745 .fqes = rx_ring->rx_fqes, in iavf_free_rx_resources()
746 .pp = rx_ring->pp, in iavf_free_rx_resources()
751 if (rx_ring->desc) { in iavf_free_rx_resources()
752 dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, in iavf_free_rx_resources()
753 rx_ring->desc, rx_ring->dma); in iavf_free_rx_resources()
754 rx_ring->desc = NULL; in iavf_free_rx_resources()
758 rx_ring->rx_fqes = NULL; in iavf_free_rx_resources()
759 rx_ring->pp = NULL; in iavf_free_rx_resources()
763 * iavf_setup_rx_descriptors - Allocate Rx descriptors
771 .count = rx_ring->count, in iavf_setup_rx_descriptors()
777 ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); in iavf_setup_rx_descriptors()
781 rx_ring->pp = fq.pp; in iavf_setup_rx_descriptors()
782 rx_ring->rx_fqes = fq.fqes; in iavf_setup_rx_descriptors()
783 rx_ring->truesize = fq.truesize; in iavf_setup_rx_descriptors()
784 rx_ring->rx_buf_len = fq.buf_len; in iavf_setup_rx_descriptors()
786 u64_stats_init(&rx_ring->syncp); in iavf_setup_rx_descriptors()
789 rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc); in iavf_setup_rx_descriptors()
790 rx_ring->size = ALIGN(rx_ring->size, 4096); in iavf_setup_rx_descriptors()
791 rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, in iavf_setup_rx_descriptors()
792 &rx_ring->dma, GFP_KERNEL); in iavf_setup_rx_descriptors()
794 if (!rx_ring->desc) { in iavf_setup_rx_descriptors()
795 dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", in iavf_setup_rx_descriptors()
796 rx_ring->size); in iavf_setup_rx_descriptors()
800 rx_ring->next_to_clean = 0; in iavf_setup_rx_descriptors()
801 rx_ring->next_to_use = 0; in iavf_setup_rx_descriptors()
807 rx_ring->rx_fqes = NULL; in iavf_setup_rx_descriptors()
808 rx_ring->pp = NULL; in iavf_setup_rx_descriptors()
810 return -ENOMEM; in iavf_setup_rx_descriptors()
814 * iavf_release_rx_desc - Store the new tail and head values
820 rx_ring->next_to_use = val; in iavf_release_rx_desc()
824 * applicable for weak-ordered memory model archs, in iavf_release_rx_desc()
825 * such as IA-64). in iavf_release_rx_desc()
828 writel(val, rx_ring->tail); in iavf_release_rx_desc()
832 * iavf_receive_skb - Send a completed packet up the stack
840 struct iavf_q_vector *q_vector = rx_ring->q_vector; in iavf_receive_skb()
842 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in iavf_receive_skb()
845 else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && in iavf_receive_skb()
849 napi_gro_receive(&q_vector->napi, skb); in iavf_receive_skb()
853 * iavf_alloc_rx_buffers - Replace used receive buffers
862 .pp = rx_ring->pp, in iavf_alloc_rx_buffers()
863 .fqes = rx_ring->rx_fqes, in iavf_alloc_rx_buffers()
864 .truesize = rx_ring->truesize, in iavf_alloc_rx_buffers()
865 .count = rx_ring->count, in iavf_alloc_rx_buffers()
867 u16 ntu = rx_ring->next_to_use; in iavf_alloc_rx_buffers()
871 if (!rx_ring->netdev || !cleaned_count) in iavf_alloc_rx_buffers()
884 * because each write-back erases this info. in iavf_alloc_rx_buffers()
886 rx_desc->qw0 = cpu_to_le64(addr); in iavf_alloc_rx_buffers()
890 if (unlikely(ntu == rx_ring->count)) { in iavf_alloc_rx_buffers()
896 rx_desc->qw1 = 0; in iavf_alloc_rx_buffers()
898 cleaned_count--; in iavf_alloc_rx_buffers()
901 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
907 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
910 rx_ring->rx_stats.alloc_page_failed++; in iavf_alloc_rx_buffers()
919 * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
931 skb->ip_summed = CHECKSUM_NONE; in iavf_rx_csum()
958 skb->ip_summed = CHECKSUM_UNNECESSARY; in iavf_rx_csum()
962 vsi->back->hw_csum_rx_error++; in iavf_rx_csum()
966 * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
968 * @qw1: quad word 1
982 if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) in iavf_legacy_rx_csum()
996 * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
998 * @qw1: quad word 1
1012 if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) in iavf_flex_rx_csum()
1027 * iavf_legacy_rx_hash - set the hash value in the skb
1029 * @qw0: quad word 0
1030 * @qw1: quad word 1
1044 if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) in iavf_legacy_rx_hash()
1054 * iavf_flex_rx_hash - set the hash value in the skb
1056 * @qw1: quad word 1
1070 if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) in iavf_flex_rx_hash()
1081 * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
1083 * @qw2: quad word 2 of descriptor
1084 * @qw3: quad word 3 of descriptor
1099 if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP)) in iavf_flex_rx_tstamp()
1106 /* the ts_low field only contains the valid bit and sub-nanosecond in iavf_flex_rx_tstamp()
1107 * precision, so we don't need to extract it. in iavf_flex_rx_tstamp()
1111 ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time, in iavf_flex_rx_tstamp()
1120 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1138 __le64 qw0 = rx_desc->qw0; in iavf_process_skb_fields()
1139 __le64 qw1 = rx_desc->qw1; in iavf_process_skb_fields()
1140 __le64 qw2 = rx_desc->qw2; in iavf_process_skb_fields()
1141 __le64 qw3 = rx_desc->qw3; in iavf_process_skb_fields()
1148 csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), in iavf_process_skb_fields()
1152 csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), in iavf_process_skb_fields()
1155 iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits); in iavf_process_skb_fields()
1157 skb_record_rx_queue(skb, rx_ring->queue_index); in iavf_process_skb_fields()
1159 /* modifies the skb - consumes the enet header */ in iavf_process_skb_fields()
1160 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in iavf_process_skb_fields()
1164 * iavf_cleanup_headers - Correct empty headers
1186 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1191 * This function will add the data contained in rx_buffer->page to the skb.
1200 u32 hr = rx_buffer->page->pp->p.offset; in iavf_add_rx_frag()
1202 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in iavf_add_rx_frag()
1203 rx_buffer->offset + hr, size, rx_buffer->truesize); in iavf_add_rx_frag()
1207 * iavf_build_skb - Build skb around an existing buffer
1217 u32 hr = rx_buffer->page->pp->p.offset; in iavf_build_skb()
1222 va = page_address(rx_buffer->page) + rx_buffer->offset; in iavf_build_skb()
1226 skb = napi_build_skb(va, rx_buffer->truesize); in iavf_build_skb()
1240 * iavf_is_non_eop - process handling of non-EOP buffers
1247 * that this is in fact a non-EOP buffer.
1252 u32 ntc = rx_ring->next_to_clean + 1; in iavf_is_non_eop()
1255 ntc = (ntc < rx_ring->count) ? ntc : 0; in iavf_is_non_eop()
1256 rx_ring->next_to_clean = ntc; in iavf_is_non_eop()
1264 rx_ring->rx_stats.non_eop_descs++; in iavf_is_non_eop()
1270 * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
1286 u64 qw0 = le64_to_cpu(rx_desc->qw0); in iavf_extract_legacy_rx_fields()
1287 u64 qw1 = le64_to_cpu(rx_desc->qw1); in iavf_extract_legacy_rx_fields()
1288 u64 qw2 = le64_to_cpu(rx_desc->qw2); in iavf_extract_legacy_rx_fields()
1302 if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { in iavf_extract_legacy_rx_fields()
1306 } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { in iavf_extract_legacy_rx_fields()
1316 * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
1333 u64 qw0 = le64_to_cpu(rx_desc->qw0); in iavf_extract_flex_rx_fields()
1334 u64 qw1 = le64_to_cpu(rx_desc->qw1); in iavf_extract_flex_rx_fields()
1335 u64 qw2 = le64_to_cpu(rx_desc->qw2); in iavf_extract_flex_rx_fields()
1348 if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { in iavf_extract_flex_rx_fields()
1352 } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { in iavf_extract_flex_rx_fields()
1373 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1386 bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC; in iavf_clean_rx_irq()
1388 struct sk_buff *skb = rx_ring->skb; in iavf_clean_rx_irq()
1405 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in iavf_clean_rx_irq()
1413 qw1 = le64_to_cpu(rx_desc->qw1); in iavf_clean_rx_irq()
1424 rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; in iavf_clean_rx_irq()
1436 rx_ring->rx_stats.alloc_buff_failed++; in iavf_clean_rx_irq()
1462 total_rx_bytes += skb->len; in iavf_clean_rx_irq()
1475 rx_ring->skb = skb; in iavf_clean_rx_irq()
1477 u64_stats_update_begin(&rx_ring->syncp); in iavf_clean_rx_irq()
1478 rx_ring->stats.packets += total_rx_packets; in iavf_clean_rx_irq()
1479 rx_ring->stats.bytes += total_rx_bytes; in iavf_clean_rx_irq()
1480 u64_stats_update_end(&rx_ring->syncp); in iavf_clean_rx_irq()
1481 rx_ring->q_vector->rx.total_packets += total_rx_packets; in iavf_clean_rx_irq()
1482 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in iavf_clean_rx_irq()
1494 * auto-cleared". The auto-clearing happens when the interrupt is in iavf_buildreg_itr()
1500 * to hold pending events for us until the interrupt is re-enabled in iavf_buildreg_itr()
1504 * only need to shift by the interval shift - 1 instead of the in iavf_buildreg_itr()
1511 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); in iavf_buildreg_itr()
1529 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1537 struct iavf_hw *hw = &vsi->back->hw; in iavf_update_enable_itr()
1541 iavf_update_itr(q_vector, &q_vector->tx); in iavf_update_enable_itr()
1542 iavf_update_itr(q_vector, &q_vector->rx); in iavf_update_enable_itr()
1546 * pseudo-lazy update with the following criteria. in iavf_update_enable_itr()
1552 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in iavf_update_enable_itr()
1555 q_vector->rx.target_itr); in iavf_update_enable_itr()
1556 q_vector->rx.current_itr = q_vector->rx.target_itr; in iavf_update_enable_itr()
1557 q_vector->itr_countdown = ITR_COUNTDOWN_START; in iavf_update_enable_itr()
1558 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || in iavf_update_enable_itr()
1559 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in iavf_update_enable_itr()
1560 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in iavf_update_enable_itr()
1565 q_vector->tx.target_itr); in iavf_update_enable_itr()
1566 q_vector->tx.current_itr = q_vector->tx.target_itr; in iavf_update_enable_itr()
1567 q_vector->itr_countdown = ITR_COUNTDOWN_START; in iavf_update_enable_itr()
1568 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in iavf_update_enable_itr()
1571 q_vector->rx.target_itr); in iavf_update_enable_itr()
1572 q_vector->rx.current_itr = q_vector->rx.target_itr; in iavf_update_enable_itr()
1573 q_vector->itr_countdown = ITR_COUNTDOWN_START; in iavf_update_enable_itr()
1577 if (q_vector->itr_countdown) in iavf_update_enable_itr()
1578 q_vector->itr_countdown--; in iavf_update_enable_itr()
1581 if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) in iavf_update_enable_itr()
1582 wr32(hw, INTREG(q_vector->reg_idx), intval); in iavf_update_enable_itr()
1586 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1598 struct iavf_vsi *vsi = q_vector->vsi; in iavf_napi_poll()
1605 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { in iavf_napi_poll()
1613 iavf_for_each_ring(ring, q_vector->tx) { in iavf_napi_poll()
1618 arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB); in iavf_napi_poll()
1619 ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB; in iavf_napi_poll()
1629 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); in iavf_napi_poll()
1631 iavf_for_each_ring(ring, q_vector->rx) { in iavf_napi_poll()
1651 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { in iavf_napi_poll()
1658 /* Return budget-1 so that polling stops */ in iavf_napi_poll()
1659 return budget - 1; in iavf_napi_poll()
1663 q_vector->tx.ring[0].tx_stats.tx_force_wb++; in iavf_napi_poll()
1669 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) in iavf_napi_poll()
1670 q_vector->arm_wb_state = false; in iavf_napi_poll()
1672 /* Exit the polling mode, but don't re-enable interrupts if stack might in iavf_napi_poll()
1673 * poll us due to busy-polling in iavf_napi_poll()
1678 return min_t(int, work_done, budget - 1); in iavf_napi_poll()
1682 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1706 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { in iavf_tx_prepare_vlan_flags()
1708 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { in iavf_tx_prepare_vlan_flags()
1711 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); in iavf_tx_prepare_vlan_flags()
1719 * iavf_tso - set up the tso context descriptor
1722 * @cd_type_cmd_tso_mss: Quad Word 1
1729 struct sk_buff *skb = first->skb; in iavf_tso()
1745 if (skb->ip_summed != CHECKSUM_PARTIAL) in iavf_tso()
1759 if (ip.v4->version == 4) { in iavf_tso()
1760 ip.v4->tot_len = 0; in iavf_tso()
1761 ip.v4->check = 0; in iavf_tso()
1763 ip.v6->payload_len = 0; in iavf_tso()
1766 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in iavf_tso()
1772 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in iavf_tso()
1773 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in iavf_tso()
1774 l4.udp->len = 0; in iavf_tso()
1777 l4_offset = l4.hdr - skb->data; in iavf_tso()
1780 paylen = skb->len - l4_offset; in iavf_tso()
1781 csum_replace_by_diff(&l4.udp->check, in iavf_tso()
1790 if (ip.v4->version == 4) { in iavf_tso()
1791 ip.v4->tot_len = 0; in iavf_tso()
1792 ip.v4->check = 0; in iavf_tso()
1794 ip.v6->payload_len = 0; in iavf_tso()
1799 l4_offset = l4.hdr - skb->data; in iavf_tso()
1801 paylen = skb->len - l4_offset; in iavf_tso()
1803 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in iavf_tso()
1804 csum_replace_by_diff(&l4.udp->check, in iavf_tso()
1809 csum_replace_by_diff(&l4.tcp->check, in iavf_tso()
1812 *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); in iavf_tso()
1816 gso_size = skb_shinfo(skb)->gso_size; in iavf_tso()
1817 gso_segs = skb_shinfo(skb)->gso_segs; in iavf_tso()
1820 first->gso_segs = gso_segs; in iavf_tso()
1821 first->bytecount += (first->gso_segs - 1) * *hdr_len; in iavf_tso()
1825 cd_tso_len = skb->len - *hdr_len; in iavf_tso()
1834 * iavf_tx_enable_csum - Enable Tx checksum offloads
1862 if (skb->ip_summed != CHECKSUM_PARTIAL) in iavf_tx_enable_csum()
1869 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; in iavf_tx_enable_csum()
1871 if (skb->encapsulation) { in iavf_tx_enable_csum()
1879 l4_proto = ip.v4->protocol; in iavf_tx_enable_csum()
1884 l4_proto = ip.v6->nexthdr; in iavf_tx_enable_csum()
1886 ipv6_skip_exthdr(skb, exthdr - skb->data, in iavf_tx_enable_csum()
1907 return -1; in iavf_tx_enable_csum()
1914 tunnel |= ((l4.hdr - ip.hdr) / 4) << in iavf_tx_enable_csum()
1921 tunnel |= ((ip.hdr - l4.hdr) / 2) << in iavf_tx_enable_csum()
1926 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in iavf_tx_enable_csum()
1927 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in iavf_tx_enable_csum()
1939 if (ip.v4->version == 4) in iavf_tx_enable_csum()
1941 if (ip.v6->version == 6) in iavf_tx_enable_csum()
1947 l4_proto = ip.v4->protocol; in iavf_tx_enable_csum()
1958 l4_proto = ip.v6->nexthdr; in iavf_tx_enable_csum()
1960 ipv6_skip_exthdr(skb, exthdr - skb->data, in iavf_tx_enable_csum()
1965 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; in iavf_tx_enable_csum()
1972 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; in iavf_tx_enable_csum()
1988 return -1; in iavf_tx_enable_csum()
2000 * iavf_create_tx_ctx - Build the Tx context descriptor
2002 * @cd_type_cmd_tso_mss: Quad Word 1
2003 * @cd_tunneling: Quad Word 0 - bits 0-31
2004 * @cd_l2tag2: Quad Word 0 - bits 32-63
2011 int i = tx_ring->next_to_use; in iavf_create_tx_ctx()
2021 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in iavf_create_tx_ctx()
2024 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); in iavf_create_tx_ctx()
2025 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); in iavf_create_tx_ctx()
2026 context_desc->rsvd = cpu_to_le16(0); in iavf_create_tx_ctx()
2027 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); in iavf_create_tx_ctx()
2031 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2049 nr_frags = skb_shinfo(skb)->nr_frags; in __iavf_chk_linearize()
2050 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) in __iavf_chk_linearize()
2056 nr_frags -= IAVF_MAX_BUFFER_TXD - 2; in __iavf_chk_linearize()
2057 frag = &skb_shinfo(skb)->frags[0]; in __iavf_chk_linearize()
2065 sum = 1 - skb_shinfo(skb)->gso_size; in __iavf_chk_linearize()
2077 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __iavf_chk_linearize()
2089 int align_pad = -(skb_frag_off(stale)) & in __iavf_chk_linearize()
2090 (IAVF_MAX_READ_REQ_SIZE - 1); in __iavf_chk_linearize()
2092 sum -= align_pad; in __iavf_chk_linearize()
2093 stale_size -= align_pad; in __iavf_chk_linearize()
2096 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; in __iavf_chk_linearize()
2097 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; in __iavf_chk_linearize()
2105 if (!nr_frags--) in __iavf_chk_linearize()
2108 sum -= stale_size; in __iavf_chk_linearize()
2115 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2119 * Returns -EBUSY if a stop is needed, else 0
2123 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
2129 return -EBUSY; in __iavf_maybe_stop_tx()
2131 /* A reprieve! - use start_queue because it doesn't call schedule */ in __iavf_maybe_stop_tx()
2132 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
2133 ++tx_ring->tx_stats.restart_queue; in __iavf_maybe_stop_tx()
2138 * iavf_tx_map - Build the Tx descriptor
2151 unsigned int data_len = skb->data_len; in iavf_tx_map()
2156 u16 i = tx_ring->next_to_use; in iavf_tx_map()
2165 first->tx_flags = tx_flags; in iavf_tx_map()
2167 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in iavf_tx_map()
2172 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in iavf_tx_map()
2175 if (dma_mapping_error(tx_ring->dev, dma)) in iavf_tx_map()
2183 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); in iavf_tx_map()
2184 tx_desc->buffer_addr = cpu_to_le64(dma); in iavf_tx_map()
2187 tx_desc->cmd_type_offset_bsz = in iavf_tx_map()
2194 if (i == tx_ring->count) { in iavf_tx_map()
2200 size -= max_data; in iavf_tx_map()
2203 tx_desc->buffer_addr = cpu_to_le64(dma); in iavf_tx_map()
2209 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, in iavf_tx_map()
2215 if (i == tx_ring->count) { in iavf_tx_map()
2221 data_len -= size; in iavf_tx_map()
2223 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in iavf_tx_map()
2226 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
2229 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in iavf_tx_map()
2232 if (i == tx_ring->count) in iavf_tx_map()
2235 tx_ring->next_to_use = i; in iavf_tx_map()
2241 tx_desc->cmd_type_offset_bsz = in iavf_tx_map()
2255 first->next_to_watch = tx_desc; in iavf_tx_map()
2259 writel(i, tx_ring->tail); in iavf_tx_map()
2265 dev_info(tx_ring->dev, "TX DMA map failed\n"); in iavf_tx_map()
2269 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
2274 i = tx_ring->count; in iavf_tx_map()
2275 i--; in iavf_tx_map()
2278 tx_ring->next_to_use = i; in iavf_tx_map()
2282 * iavf_xmit_frame_ring - Sends buffer on Tx ring
2302 prefetch(skb->data); in iavf_xmit_frame_ring()
2312 count = iavf_txd_use_count(skb->len); in iavf_xmit_frame_ring()
2313 tx_ring->tx_stats.tx_linearize++; in iavf_xmit_frame_ring()
2323 tx_ring->tx_stats.tx_busy++; in iavf_xmit_frame_ring()
2328 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in iavf_xmit_frame_ring()
2329 first->skb = skb; in iavf_xmit_frame_ring()
2330 first->bytecount = skb->len; in iavf_xmit_frame_ring()
2331 first->gso_segs = 1; in iavf_xmit_frame_ring()
2375 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); in iavf_xmit_frame_ring()
2376 dev_kfree_skb_any(first->skb); in iavf_xmit_frame_ring()
2377 first->skb = NULL; in iavf_xmit_frame_ring()
2382 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2391 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; in iavf_xmit_frame()
2396 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { in iavf_xmit_frame()
2397 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) in iavf_xmit_frame()
2399 skb->len = IAVF_MIN_TX_LEN; in iavf_xmit_frame()