Lines Matching +full:disable +full:- +full:eop
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
25 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
30 i = tx_ring->next_to_use; in i40e_fdir()
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
36 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index); in i40e_fdir()
39 fdata->flex_off); in i40e_fdir()
41 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype); in i40e_fdir()
45 fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id); in i40e_fdir()
55 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl); in i40e_fdir()
58 fdata->fd_status); in i40e_fdir()
60 if (fdata->cnt_index) { in i40e_fdir()
63 fdata->cnt_index); in i40e_fdir()
66 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_fdir()
67 fdir_desc->rsvd = cpu_to_le32(0); in i40e_fdir()
68 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_fdir()
69 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); in i40e_fdir()
74 * i40e_program_fdir_filter - Program a Flow Director filter
76 * @raw_packet: the pre-allocated packet buffer for FDir
96 return -ENOENT; in i40e_program_fdir_filter()
98 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
99 dev = tx_ring->dev; in i40e_program_fdir_filter()
102 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
104 return -EAGAIN; in i40e_program_fdir_filter()
114 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
115 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
119 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
121 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
123 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
131 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_program_fdir_filter()
134 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; in i40e_program_fdir_filter()
135 tx_buf->raw_buf = (void *)raw_packet; in i40e_program_fdir_filter()
137 tx_desc->cmd_type_offset_bsz = in i40e_program_fdir_filter()
146 first->next_to_watch = tx_desc; in i40e_program_fdir_filter()
148 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
152 return -1; in i40e_program_fdir_filter()
156 * i40e_create_dummy_packet - Constructs dummy packet for HW
167 bool is_vlan = !!data->vlan_tag; in i40e_create_dummy_packet()
180 ip.daddr = data->dst_ip; in i40e_create_dummy_packet()
181 ip.saddr = data->src_ip; in i40e_create_dummy_packet()
187 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, in i40e_create_dummy_packet()
189 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, in i40e_create_dummy_packet()
194 vlan.h_vlan_TCI = data->vlan_tag; in i40e_create_dummy_packet()
196 eth.h_proto = data->vlan_etype; in i40e_create_dummy_packet()
220 * i40e_create_dummy_udp_packet - helper function to create UDP packet
236 udp->dest = data->dst_port; in i40e_create_dummy_udp_packet()
237 udp->source = data->src_port; in i40e_create_dummy_udp_packet()
241 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
262 tcp->dest = data->dst_port; in i40e_create_dummy_tcp_packet()
263 tcp->source = data->src_port; in i40e_create_dummy_tcp_packet()
267 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
285 sctp->dest = data->dst_port; in i40e_create_dummy_sctp_packet()
286 sctp->source = data->src_port; in i40e_create_dummy_sctp_packet()
290 * i40e_prepare_fdir_filter - Prepare and program fdir filter
308 if (fd_data->flex_filter) { in i40e_prepare_fdir_filter()
310 __be16 pattern = fd_data->flex_word; in i40e_prepare_fdir_filter()
311 u16 off = fd_data->flex_offset; in i40e_prepare_fdir_filter()
316 if (!!fd_data->vlan_tag) in i40e_prepare_fdir_filter()
322 fd_data->pctype = pctype; in i40e_prepare_fdir_filter()
325 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
327 fd_data->pctype, fd_data->fd_id, ret); in i40e_prepare_fdir_filter()
329 return -EOPNOTSUPP; in i40e_prepare_fdir_filter()
330 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { in i40e_prepare_fdir_filter()
332 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
334 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
336 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
338 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
345 * i40e_change_filter_num - Prepare and program fdir filter
363 (*ipv4_filter_num)--; in i40e_change_filter_num()
365 (*ipv6_filter_num)--; in i40e_change_filter_num()
372 * i40e_add_del_fdir_udp - Add/Remove UDP filters
385 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_udp()
391 return -ENOMEM; in i40e_add_del_fdir_udp()
411 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, in i40e_add_del_fdir_udp()
412 &pf->fd_udp6_filter_cnt); in i40e_add_del_fdir_udp()
420 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
433 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_tcp()
439 return -ENOMEM; in i40e_add_del_fdir_tcp()
458 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, in i40e_add_del_fdir_tcp()
459 &pf->fd_tcp6_filter_cnt); in i40e_add_del_fdir_tcp()
462 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_add_del_fdir_tcp()
463 I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_add_del_fdir_tcp()
464 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); in i40e_add_del_fdir_tcp()
465 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_add_del_fdir_tcp()
473 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
487 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_sctp()
493 return -ENOMEM; in i40e_add_del_fdir_sctp()
513 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, in i40e_add_del_fdir_sctp()
514 &pf->fd_sctp6_filter_cnt); in i40e_add_del_fdir_sctp()
522 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
536 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_ip()
555 return -ENOMEM; in i40e_add_del_fdir_ip()
570 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, in i40e_add_del_fdir_ip()
571 &pf->fd_ip6_filter_cnt); in i40e_add_del_fdir_ip()
580 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
590 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir()
593 switch (input->flow_type & ~FLOW_EXT) { in i40e_add_del_fdir()
613 switch (input->ipl4_proto) { in i40e_add_del_fdir()
628 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", in i40e_add_del_fdir()
629 input->ipl4_proto); in i40e_add_del_fdir()
630 return -EINVAL; in i40e_add_del_fdir()
634 switch (input->ipl4_proto) { in i40e_add_del_fdir()
649 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", in i40e_add_del_fdir()
650 input->ipl4_proto); in i40e_add_del_fdir()
651 return -EINVAL; in i40e_add_del_fdir()
655 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", in i40e_add_del_fdir()
656 input->flow_type); in i40e_add_del_fdir()
657 return -EINVAL; in i40e_add_del_fdir()
670 * i40e_fd_handle_status - check the Programming Status for FD
682 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
683 struct pci_dev *pdev = pf->pdev; in i40e_fd_handle_status()
692 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); in i40e_fd_handle_status()
693 if (qw0->hi_dword.fd_id != 0 || in i40e_fd_handle_status()
694 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_fd_handle_status()
695 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", in i40e_fd_handle_status()
696 pf->fd_inv); in i40e_fd_handle_status()
699 * If so, auto disable ATR and set a state for in i40e_fd_handle_status()
704 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fd_handle_status()
707 pf->fd_add_err++; in i40e_fd_handle_status()
709 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); in i40e_fd_handle_status()
711 if (qw0->hi_dword.fd_id == 0 && in i40e_fd_handle_status()
712 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { in i40e_fd_handle_status()
715 * disable ATR and queue a flush right after SB in i40e_fd_handle_status()
716 * support is re-enabled. That shouldn't cause an in i40e_fd_handle_status()
719 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fd_handle_status()
720 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fd_handle_status()
725 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fd_handle_status()
727 * if we are very close to full, it makes sense to disable in i40e_fd_handle_status()
728 * FD ATR/SB and then re-enable it when there is room. in i40e_fd_handle_status()
730 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { in i40e_fd_handle_status()
731 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_fd_handle_status()
733 pf->state)) in i40e_fd_handle_status()
734 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
735 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); in i40e_fd_handle_status()
738 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
739 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", in i40e_fd_handle_status()
740 qw0->hi_dword.fd_id); in i40e_fd_handle_status()
745 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
752 if (tx_buffer->skb) { in i40e_unmap_and_free_tx_resource()
753 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_unmap_and_free_tx_resource()
754 kfree(tx_buffer->raw_buf); in i40e_unmap_and_free_tx_resource()
756 xdp_return_frame(tx_buffer->xdpf); in i40e_unmap_and_free_tx_resource()
758 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
760 dma_unmap_single(ring->dev, in i40e_unmap_and_free_tx_resource()
765 dma_unmap_page(ring->dev, in i40e_unmap_and_free_tx_resource()
771 tx_buffer->next_to_watch = NULL; in i40e_unmap_and_free_tx_resource()
772 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
778 * i40e_clean_tx_ring - Free any empty Tx buffers
786 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
790 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
794 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
796 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
799 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
800 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
803 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
805 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
806 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
808 if (!tx_ring->netdev) in i40e_clean_tx_ring()
816 * i40e_free_tx_resources - Free Tx resources per queue
824 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
825 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
827 if (tx_ring->desc) { in i40e_free_tx_resources()
828 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
829 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
830 tx_ring->desc = NULL; in i40e_free_tx_resources()
835 * i40e_get_tx_pending - how many tx descriptors not processed
848 tail = readl(ring->tail); in i40e_get_tx_pending()
850 head = ring->next_to_clean; in i40e_get_tx_pending()
851 tail = ring->next_to_use; in i40e_get_tx_pending()
856 tail - head : (tail + ring->count - head); in i40e_get_tx_pending()
862 * i40e_detect_recover_hung - Function to detect and recover hung_queues
878 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_detect_recover_hung()
881 netdev = vsi->netdev; in i40e_detect_recover_hung()
888 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_detect_recover_hung()
889 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
890 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
898 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
899 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
900 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
908 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
909 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
915 * i40e_clean_tx_irq - Reclaim resources after transmit completes
927 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
932 unsigned int budget = vsi->work_limit; in i40e_clean_tx_irq()
934 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
936 i -= tx_ring->count; in i40e_clean_tx_irq()
941 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_tx_irq()
956 tx_buf->next_to_watch = NULL; in i40e_clean_tx_irq()
959 total_bytes += tx_buf->bytecount; in i40e_clean_tx_irq()
960 total_packets += tx_buf->gso_segs; in i40e_clean_tx_irq()
964 xdp_return_frame(tx_buf->xdpf); in i40e_clean_tx_irq()
966 napi_consume_skb(tx_buf->skb, napi_budget); in i40e_clean_tx_irq()
969 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
975 tx_buf->skb = NULL; in i40e_clean_tx_irq()
987 i -= tx_ring->count; in i40e_clean_tx_irq()
988 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
994 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1007 i -= tx_ring->count; in i40e_clean_tx_irq()
1008 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1015 budget--; in i40e_clean_tx_irq()
1018 i += tx_ring->count; in i40e_clean_tx_irq()
1019 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1031 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1037 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1038 tx_ring->queue_index) && in i40e_clean_tx_irq()
1039 !test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_clean_tx_irq()
1040 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1041 tx_ring->queue_index); in i40e_clean_tx_irq()
1042 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1051 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1059 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr()
1065 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr()
1068 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_enable_wb_on_itr()
1072 wr32(&vsi->back->hw, in i40e_enable_wb_on_itr()
1073 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr()
1079 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_enable_wb_on_itr()
1081 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr()
1085 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1092 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_force_wb()
1099 wr32(&vsi->back->hw, in i40e_force_wb()
1100 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb()
1108 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_force_wb()
1115 return &q_vector->rx == rc; in i40e_container_is_rx()
1122 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { in i40e_itr_divisor()
1144 * i40e_update_itr - update the dynamic ITR value based on statistics
1165 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) in i40e_update_itr()
1175 /* If we didn't update within up to 1 - 2 jiffies we can assume in i40e_update_itr()
1180 if (time_after(next_update, rc->next_update)) in i40e_update_itr()
1189 if (q_vector->itr_countdown) { in i40e_update_itr()
1190 itr = rc->target_itr; in i40e_update_itr()
1194 packets = rc->total_packets; in i40e_update_itr()
1195 bytes = rc->total_bytes; in i40e_update_itr()
1204 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { in i40e_update_itr()
1214 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && in i40e_update_itr()
1215 (q_vector->rx.target_itr & I40E_ITR_MASK) == in i40e_update_itr()
1222 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; in i40e_update_itr()
1234 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; in i40e_update_itr()
1243 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in i40e_update_itr()
1340 rc->target_itr = itr; in i40e_update_itr()
1343 rc->next_update = next_update + 1; in i40e_update_itr()
1345 rc->total_bytes = 0; in i40e_update_itr()
1346 rc->total_packets = 0; in i40e_update_itr()
1351 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1355 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1365 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1371 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1374 new_buff->dma = old_buff->dma; in i40e_reuse_rx_page()
1375 new_buff->page = old_buff->page; in i40e_reuse_rx_page()
1376 new_buff->page_offset = old_buff->page_offset; in i40e_reuse_rx_page()
1377 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in i40e_reuse_rx_page()
1380 old_buff->page = NULL; in i40e_reuse_rx_page()
1384 * i40e_clean_programming_status - clean the programming status descriptor
1407 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1414 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1418 return -ENOMEM; in i40e_setup_tx_descriptors()
1421 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1422 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1423 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1424 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1427 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1430 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1434 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1435 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1436 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1437 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1438 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1440 tx_ring->size); in i40e_setup_tx_descriptors()
1444 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1445 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1446 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1450 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1451 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
1452 return -ENOMEM; in i40e_setup_tx_descriptors()
1457 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); in i40e_clear_rx_bi()
1461 * i40e_clean_rx_ring - Free Rx buffers
1469 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1472 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1478 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1481 if (!rx_bi->page) in i40e_clean_rx_ring()
1487 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_ring()
1488 rx_bi->dma, in i40e_clean_rx_ring()
1489 rx_bi->page_offset, in i40e_clean_rx_ring()
1490 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1494 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40e_clean_rx_ring()
1499 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in i40e_clean_rx_ring()
1501 rx_bi->page = NULL; in i40e_clean_rx_ring()
1502 rx_bi->page_offset = 0; in i40e_clean_rx_ring()
1506 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
1512 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1514 rx_ring->next_to_alloc = 0; in i40e_clean_rx_ring()
1515 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1516 rx_ring->next_to_process = 0; in i40e_clean_rx_ring()
1517 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1521 * i40e_free_rx_resources - Free Rx resources
1529 if (rx_ring->vsi->type == I40E_VSI_MAIN) in i40e_free_rx_resources()
1530 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in i40e_free_rx_resources()
1531 rx_ring->xdp_prog = NULL; in i40e_free_rx_resources()
1532 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1533 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1535 if (rx_ring->desc) { in i40e_free_rx_resources()
1536 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1537 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1538 rx_ring->desc = NULL; in i40e_free_rx_resources()
1543 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1550 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1552 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1555 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); in i40e_setup_rx_descriptors()
1556 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1557 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1558 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1560 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1562 rx_ring->size); in i40e_setup_rx_descriptors()
1563 return -ENOMEM; in i40e_setup_rx_descriptors()
1566 rx_ring->next_to_alloc = 0; in i40e_setup_rx_descriptors()
1567 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1568 rx_ring->next_to_process = 0; in i40e_setup_rx_descriptors()
1569 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1571 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; in i40e_setup_rx_descriptors()
1573 rx_ring->rx_bi = in i40e_setup_rx_descriptors()
1574 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); in i40e_setup_rx_descriptors()
1575 if (!rx_ring->rx_bi) in i40e_setup_rx_descriptors()
1576 return -ENOMEM; in i40e_setup_rx_descriptors()
1582 * i40e_release_rx_desc - Store the new tail and head values
1588 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1591 rx_ring->next_to_alloc = val; in i40e_release_rx_desc()
1595 * applicable for weak-ordered memory model archs, in i40e_release_rx_desc()
1596 * such as IA-64). in i40e_release_rx_desc()
1599 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1608 truesize = rx_ring->rx_offset ? in i40e_rx_frame_truesize()
1609 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + in i40e_rx_frame_truesize()
1617 * i40e_alloc_mapped_page - recycle or make a new page
1627 struct page *page = bi->page; in i40e_alloc_mapped_page()
1632 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_mapped_page()
1639 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1643 rx_ring->rx_stats.page_alloc_count++; in i40e_alloc_mapped_page()
1646 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in i40e_alloc_mapped_page()
1654 if (dma_mapping_error(rx_ring->dev, dma)) { in i40e_alloc_mapped_page()
1656 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1660 bi->dma = dma; in i40e_alloc_mapped_page()
1661 bi->page = page; in i40e_alloc_mapped_page()
1662 bi->page_offset = rx_ring->rx_offset; in i40e_alloc_mapped_page()
1663 page_ref_add(page, USHRT_MAX - 1); in i40e_alloc_mapped_page()
1664 bi->pagecnt_bias = USHRT_MAX; in i40e_alloc_mapped_page()
1670 * i40e_alloc_rx_buffers - Replace used receive buffers
1678 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers()
1683 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers()
1694 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in i40e_alloc_rx_buffers()
1695 bi->page_offset, in i40e_alloc_rx_buffers()
1696 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers()
1700 * because each write-back erases this info. in i40e_alloc_rx_buffers()
1702 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in i40e_alloc_rx_buffers()
1707 if (unlikely(ntu == rx_ring->count)) { in i40e_alloc_rx_buffers()
1714 rx_desc->wb.qword1.status_error_len = 0; in i40e_alloc_rx_buffers()
1716 cleaned_count--; in i40e_alloc_rx_buffers()
1719 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1725 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1735 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1750 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_rx_checksum()
1756 skb->ip_summed = CHECKSUM_NONE; in i40e_rx_checksum()
1761 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) in i40e_rx_checksum()
1785 /* don't increment checksum err here, non-fatal err */ in i40e_rx_checksum()
1804 skb->csum_level = 1; in i40e_rx_checksum()
1811 skb->ip_summed = CHECKSUM_UNNECESSARY; in i40e_rx_checksum()
1820 vsi->back->hw_csum_rx_error++; in i40e_rx_checksum()
1824 * i40e_ptype_to_htype - get a hash type
1847 * i40e_rx_hash - set the hash value in the skb
1863 if (!(ring->netdev->features & NETIF_F_RXHASH)) in i40e_rx_hash()
1866 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { in i40e_rx_hash()
1867 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); in i40e_rx_hash()
1873 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1875 * @rx_desc: pointer to the EOP Rx descriptor
1885 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_process_skb_fields()
1892 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); in i40e_process_skb_fields()
1896 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); in i40e_process_skb_fields()
1898 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_process_skb_fields()
1901 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; in i40e_process_skb_fields()
1907 /* modifies the skb - consumes the enet header */ in i40e_process_skb_fields()
1908 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_process_skb_fields()
1912 * i40e_cleanup_headers - Correct empty headers
1915 * @rx_desc: pointer to the EOP Rx descriptor
1926 /* ERR_MASK will only have valid bits if EOP set, and in i40e_cleanup_headers()
1945 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1960 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in i40e_can_reuse_rx_page()
1961 struct page *page = rx_buffer->page; in i40e_can_reuse_rx_page()
1965 rx_stats->page_waive_count++; in i40e_can_reuse_rx_page()
1971 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { in i40e_can_reuse_rx_page()
1972 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
1977 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) in i40e_can_reuse_rx_page()
1978 if (rx_buffer->page_offset > I40E_LAST_OFFSET) { in i40e_can_reuse_rx_page()
1979 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
1989 page_ref_add(page, USHRT_MAX - 1); in i40e_can_reuse_rx_page()
1990 rx_buffer->pagecnt_bias = USHRT_MAX; in i40e_can_reuse_rx_page()
1997 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2005 rx_buffer->page_offset ^= truesize; in i40e_rx_buffer_flip()
2007 rx_buffer->page_offset += truesize; in i40e_rx_buffer_flip()
2012 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2024 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); in i40e_get_rx_buffer()
2025 rx_buffer->page_count = in i40e_get_rx_buffer()
2027 page_count(rx_buffer->page); in i40e_get_rx_buffer()
2031 prefetch_page_address(rx_buffer->page); in i40e_get_rx_buffer()
2034 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_get_rx_buffer()
2035 rx_buffer->dma, in i40e_get_rx_buffer()
2036 rx_buffer->page_offset, in i40e_get_rx_buffer()
2041 rx_buffer->pagecnt_bias--; in i40e_get_rx_buffer()
2047 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2057 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) { in i40e_put_rx_buffer()
2062 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in i40e_put_rx_buffer()
2065 __page_frag_cache_drain(rx_buffer->page, in i40e_put_rx_buffer()
2066 rx_buffer->pagecnt_bias); in i40e_put_rx_buffer()
2068 rx_buffer->page = NULL; in i40e_put_rx_buffer()
2073 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2081 u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; in i40e_process_rx_buffs()
2082 u32 next = rx_ring->next_to_clean, i = 0; in i40e_process_rx_buffs()
2085 xdp->flags = 0; in i40e_process_rx_buffs()
2089 if (++next == rx_ring->count) in i40e_process_rx_buffs()
2092 if (!rx_buffer->page) in i40e_process_rx_buffs()
2096 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_process_rx_buffs()
2098 rx_buffer->pagecnt_bias++; in i40e_process_rx_buffs()
2100 /* EOP buffer will be put in i40e_clean_rx_irq() */ in i40e_process_rx_buffs()
2101 if (next == rx_ring->next_to_process) in i40e_process_rx_buffs()
2109 * i40e_construct_skb - Allocate skb and populate it
2120 unsigned int size = xdp->data_end - xdp->data; in i40e_construct_skb()
2128 net_prefetch(xdp->data); in i40e_construct_skb()
2130 /* Note, we get here by enabling legacy-rx via: in i40e_construct_skb()
2132 * ethtool --set-priv-flags <dev> legacy-rx on in i40e_construct_skb()
2135 * opposed to having legacy-rx off, where we process XDP in i40e_construct_skb()
2140 * xdp->data_meta will always point to xdp->data, since in i40e_construct_skb()
2142 * change in future for legacy-rx mode on, then lets also in i40e_construct_skb()
2143 * add xdp->data_meta handling here. in i40e_construct_skb()
2147 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in i40e_construct_skb()
2156 headlen = eth_get_headlen(skb->dev, xdp->data, in i40e_construct_skb()
2160 memcpy(__skb_put(skb, headlen), xdp->data, in i40e_construct_skb()
2165 nr_frags = sinfo->nr_frags; in i40e_construct_skb()
2167 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_construct_skb()
2169 size -= headlen; in i40e_construct_skb()
2175 skb_add_rx_frag(skb, 0, rx_buffer->page, in i40e_construct_skb()
2176 rx_buffer->page_offset + headlen, in i40e_construct_skb()
2177 size, xdp->frame_sz); in i40e_construct_skb()
2179 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_construct_skb()
2182 rx_buffer->pagecnt_bias++; in i40e_construct_skb()
2188 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in i40e_construct_skb()
2191 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in i40e_construct_skb()
2192 sinfo->xdp_frags_size, in i40e_construct_skb()
2193 nr_frags * xdp->frame_sz, in i40e_construct_skb()
2197 if (++rx_ring->next_to_clean == rx_ring->count) in i40e_construct_skb()
2198 rx_ring->next_to_clean = 0; in i40e_construct_skb()
2207 * i40e_build_skb - Build skb around an existing buffer
2217 unsigned int metasize = xdp->data - xdp->data_meta; in i40e_build_skb()
2222 /* Prefetch first cache line of first page. If xdp->data_meta in i40e_build_skb()
2223 * is unused, this points exactly as xdp->data, otherwise we in i40e_build_skb()
2227 net_prefetch(xdp->data_meta); in i40e_build_skb()
2231 nr_frags = sinfo->nr_frags; in i40e_build_skb()
2235 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in i40e_build_skb()
2240 skb_reserve(skb, xdp->data - xdp->data_hard_start); in i40e_build_skb()
2241 __skb_put(skb, xdp->data_end - xdp->data); in i40e_build_skb()
2247 sinfo->xdp_frags_size, in i40e_build_skb()
2248 nr_frags * xdp->frame_sz, in i40e_build_skb()
2255 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_build_skb()
2257 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_build_skb()
2264 * i40e_is_non_eop - process handling of non-EOP buffers
2268 * If the buffer is an EOP buffer, this function exits returning false,
2269 * otherwise return true indicating that this is in fact a non-EOP buffer.
2279 rx_ring->rx_stats.non_eop_descs++; in i40e_is_non_eop()
2298 * i40e_run_xdp - run an XDP program
2312 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in i40e_run_xdp()
2319 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2325 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp()
2331 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2335 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2346 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2357 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in i40e_xdp_ring_update_tail()
2361 * i40e_update_rx_stats - Update Rx ring statistics
2372 u64_stats_update_begin(&rx_ring->syncp); in i40e_update_rx_stats()
2373 rx_ring->stats.packets += total_rx_packets; in i40e_update_rx_stats()
2374 rx_ring->stats.bytes += total_rx_bytes; in i40e_update_rx_stats()
2375 u64_stats_update_end(&rx_ring->syncp); in i40e_update_rx_stats()
2376 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_update_rx_stats()
2377 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_update_rx_stats()
2381 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2396 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
2408 u32 ntp = rx_ring->next_to_process + 1; in i40e_inc_ntp()
2410 ntp = (ntp < rx_ring->count) ? ntp : 0; in i40e_inc_ntp()
2411 rx_ring->next_to_process = ntp; in i40e_inc_ntp()
2428 sinfo->nr_frags = 0; in i40e_add_xdp_frag()
2429 sinfo->xdp_frags_size = 0; in i40e_add_xdp_frag()
2431 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { in i40e_add_xdp_frag()
2433 return -ENOMEM; in i40e_add_xdp_frag()
2436 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page, in i40e_add_xdp_frag()
2437 rx_buffer->page_offset, size); in i40e_add_xdp_frag()
2439 sinfo->xdp_frags_size += size; in i40e_add_xdp_frag()
2441 if (page_is_pfmemalloc(rx_buffer->page)) in i40e_add_xdp_frag()
2443 *nr_frags = sinfo->nr_frags; in i40e_add_xdp_frag()
2449 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2452 * @rx_buffer: rx_buffer of eop desc
2460 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_consume_xdp_buff()
2461 xdp->data = NULL; in i40e_consume_xdp_buff()
2465 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2482 u16 clean_threshold = rx_ring->count / 2; in i40e_clean_rx_irq()
2483 unsigned int offset = rx_ring->rx_offset; in i40e_clean_rx_irq()
2484 struct xdp_buff *xdp = &rx_ring->xdp; in i40e_clean_rx_irq()
2490 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_clean_rx_irq()
2493 u16 ntp = rx_ring->next_to_process; in i40e_clean_rx_irq()
2514 * hardware wrote DD then the length will be non-zero in i40e_clean_rx_irq()
2516 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_clean_rx_irq()
2526 rx_desc->raw.qword[0], in i40e_clean_rx_irq()
2534 if (rx_ring->next_to_clean == ntp) { in i40e_clean_rx_irq()
2535 rx_ring->next_to_clean = in i40e_clean_rx_irq()
2536 rx_ring->next_to_process; in i40e_clean_rx_irq()
2553 if (!xdp->data) { in i40e_clean_rx_irq()
2556 hard_start = page_address(rx_buffer->page) + in i40e_clean_rx_irq()
2557 rx_buffer->page_offset - offset; in i40e_clean_rx_irq()
2561 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size); in i40e_clean_rx_irq()
2565 /* Overflowing packet: Drop all frags on EOP */ in i40e_clean_rx_irq()
2582 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_clean_rx_irq()
2584 rx_buffer->pagecnt_bias++; in i40e_clean_rx_irq()
2595 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq()
2604 total_rx_bytes += skb->len; in i40e_clean_rx_irq()
2610 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_clean_rx_irq()
2618 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_clean_rx_irq()
2620 xdp->data = NULL; in i40e_clean_rx_irq()
2639 * auto-cleared". The auto-clearing happens when the interrupt is in i40e_buildreg_itr()
2645 * to hold pending events for us until the interrupt is re-enabled in i40e_buildreg_itr()
2649 * only need to shift by the interval shift - 1 instead of the in i40e_buildreg_itr()
2656 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); in i40e_buildreg_itr()
2674 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2682 struct i40e_hw *hw = &vsi->back->hw; in i40e_update_enable_itr()
2685 /* If we don't have MSIX, then we only need to re-enable icr0 */ in i40e_update_enable_itr()
2686 if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) { in i40e_update_enable_itr()
2687 i40e_irq_dynamic_enable_icr0(vsi->back); in i40e_update_enable_itr()
2692 i40e_update_itr(q_vector, &q_vector->tx); in i40e_update_enable_itr()
2693 i40e_update_itr(q_vector, &q_vector->rx); in i40e_update_enable_itr()
2697 * pseudo-lazy update with the following criteria. in i40e_update_enable_itr()
2703 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in i40e_update_enable_itr()
2706 q_vector->rx.target_itr); in i40e_update_enable_itr()
2707 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2708 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2709 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || in i40e_update_enable_itr()
2710 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in i40e_update_enable_itr()
2711 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in i40e_update_enable_itr()
2716 q_vector->tx.target_itr); in i40e_update_enable_itr()
2717 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_update_enable_itr()
2718 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2719 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in i40e_update_enable_itr()
2722 q_vector->rx.target_itr); in i40e_update_enable_itr()
2723 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2724 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2728 if (q_vector->itr_countdown) in i40e_update_enable_itr()
2729 q_vector->itr_countdown--; in i40e_update_enable_itr()
2732 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_update_enable_itr()
2733 wr32(hw, INTREG(q_vector->reg_idx), intval); in i40e_update_enable_itr()
2737 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2749 struct i40e_vsi *vsi = q_vector->vsi; in i40e_napi_poll()
2760 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_napi_poll()
2768 i40e_for_each_ring(ring, q_vector->tx) { in i40e_napi_poll()
2769 bool wd = ring->xsk_pool ? in i40e_napi_poll()
2777 arm_wb |= ring->arm_wb; in i40e_napi_poll()
2778 ring->arm_wb = false; in i40e_napi_poll()
2786 if (unlikely(q_vector->num_ringpairs > 1)) in i40e_napi_poll()
2791 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); in i40e_napi_poll()
2796 i40e_for_each_ring(ring, q_vector->rx) { in i40e_napi_poll()
2797 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
2822 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { in i40e_napi_poll()
2829 /* Return budget-1 so that polling stops */ in i40e_napi_poll()
2830 return budget - 1; in i40e_napi_poll()
2834 q_vector->tx.ring[0].tx_stats.tx_force_wb++; in i40e_napi_poll()
2840 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) in i40e_napi_poll()
2841 q_vector->arm_wb_state = false; in i40e_napi_poll()
2843 /* Exit the polling mode, but don't re-enable interrupts if stack might in i40e_napi_poll()
2844 * poll us due to busy-polling in i40e_napi_poll()
2849 return min(work_done, budget - 1); in i40e_napi_poll()
2853 * i40e_atr - Add a Flow Director ATR filter
2862 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2875 if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_atr()
2878 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2882 if (!tx_ring->atr_sample_rate) in i40e_atr()
2899 l4_proto = hdr.ipv4->protocol; in i40e_atr()
2902 unsigned int inner_hlen = hdr.network - skb->data; in i40e_atr()
2909 hlen = h_offset - inner_hlen; in i40e_atr()
2918 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2920 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) { in i40e_atr()
2924 if (th->fin || th->rst) in i40e_atr()
2928 tx_ring->atr_count++; in i40e_atr()
2931 if (!th->fin && in i40e_atr()
2932 !th->syn && in i40e_atr()
2933 !th->rst && in i40e_atr()
2934 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2937 tx_ring->atr_count = 0; in i40e_atr()
2940 i = tx_ring->next_to_use; in i40e_atr()
2944 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2947 tx_ring->queue_index); in i40e_atr()
2954 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2958 dtype_cmd |= (th->fin || th->rst) ? in i40e_atr()
2974 I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)); in i40e_atr()
2978 I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)); in i40e_atr()
2980 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) in i40e_atr()
2983 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_atr()
2984 fdir_desc->rsvd = cpu_to_le32(0); in i40e_atr()
2985 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_atr()
2986 fdir_desc->fd_id = cpu_to_le32(0); in i40e_atr()
2990 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
3005 __be16 protocol = skb->protocol; in i40e_tx_prepare_vlan_flags()
3009 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
3017 skb->protocol = vlan_get_protocol(skb); in i40e_tx_prepare_vlan_flags()
3031 return -EINVAL; in i40e_tx_prepare_vlan_flags()
3033 protocol = vhdr->h_vlan_encapsulated_proto; in i40e_tx_prepare_vlan_flags()
3034 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; in i40e_tx_prepare_vlan_flags()
3038 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) in i40e_tx_prepare_vlan_flags()
3043 (skb->priority != TC_PRIO_CONTROL)) { in i40e_tx_prepare_vlan_flags()
3045 tx_flags |= (skb->priority & 0x7) << in i40e_tx_prepare_vlan_flags()
3055 vhdr->h_vlan_TCI = htons(tx_flags >> in i40e_tx_prepare_vlan_flags()
3068 * i40e_tso - set up the tso context descriptor
3078 struct sk_buff *skb = first->skb; in i40e_tso()
3095 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tso()
3114 if (ip.v4->version == 4) { in i40e_tso()
3115 ip.v4->tot_len = 0; in i40e_tso()
3116 ip.v4->check = 0; in i40e_tso()
3118 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3120 ip.v6->payload_len = 0; in i40e_tso()
3121 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3124 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in i40e_tso()
3130 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tso()
3131 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in i40e_tso()
3132 l4.udp->len = 0; in i40e_tso()
3135 l4_offset = l4.hdr - skb->data; in i40e_tso()
3138 paylen = skb->len - l4_offset; in i40e_tso()
3139 csum_replace_by_diff(&l4.udp->check, in i40e_tso()
3148 if (ip.v4->version == 4) { in i40e_tso()
3149 ip.v4->tot_len = 0; in i40e_tso()
3150 ip.v4->check = 0; in i40e_tso()
3152 ip.v6->payload_len = 0; in i40e_tso()
3157 l4_offset = l4.hdr - skb->data; in i40e_tso()
3160 paylen = skb->len - l4_offset; in i40e_tso()
3162 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in i40e_tso()
3163 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3167 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3169 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in i40e_tso()
3173 gso_size = skb_shinfo(skb)->gso_size; in i40e_tso()
3176 first->gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tso()
3177 first->bytecount += (first->gso_segs - 1) * *hdr_len; in i40e_tso()
3181 cd_tso_len = skb->len - *hdr_len; in i40e_tso()
3190 * i40e_tsyn - set up the tsyn context descriptor
3203 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in i40e_tsyn()
3213 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3214 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_tsyn()
3217 if (pf->ptp_tx && in i40e_tsyn()
3218 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { in i40e_tsyn()
3219 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in i40e_tsyn()
3220 pf->ptp_tx_start = jiffies; in i40e_tsyn()
3221 pf->ptp_tx_skb = skb_get(skb); in i40e_tsyn()
3223 pf->tx_hwtstamp_skipped++; in i40e_tsyn()
3234 * i40e_tx_enable_csum - Enable Tx checksum offloads
3263 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tx_enable_csum()
3279 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3285 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; in i40e_tx_enable_csum()
3287 if (skb->encapsulation) { in i40e_tx_enable_csum()
3295 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3302 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3303 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3306 return -1; in i40e_tx_enable_csum()
3326 return -1; in i40e_tx_enable_csum()
3333 tunnel |= ((l4.hdr - ip.hdr) / 4) << in i40e_tx_enable_csum()
3340 tunnel |= ((ip.hdr - l4.hdr) / 2) << in i40e_tx_enable_csum()
3345 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tx_enable_csum()
3346 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in i40e_tx_enable_csum()
3358 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3360 if (ip.v6->version == 6) in i40e_tx_enable_csum()
3366 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3377 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3379 ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3384 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; in i40e_tx_enable_csum()
3391 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; in i40e_tx_enable_csum()
3407 return -1; in i40e_tx_enable_csum()
3419 * i40e_create_tx_ctx - Build the Tx context descriptor
3422 * @cd_tunneling: Quad Word 0 - bits 0-31
3423 * @cd_l2tag2: Quad Word 0 - bits 32-63
3430 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3440 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3443 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); in i40e_create_tx_ctx()
3444 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); in i40e_create_tx_ctx()
3445 context_desc->rsvd = cpu_to_le16(0); in i40e_create_tx_ctx()
3446 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); in i40e_create_tx_ctx()
3450 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3454 * Returns -EBUSY if a stop is needed, else 0
3458 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3462 ++tx_ring->tx_stats.tx_stopped; in __i40e_maybe_stop_tx()
3466 return -EBUSY; in __i40e_maybe_stop_tx()
3468 /* A reprieve! - use start_queue because it doesn't call schedule */ in __i40e_maybe_stop_tx()
3469 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3470 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3475 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3493 nr_frags = skb_shinfo(skb)->nr_frags; in __i40e_chk_linearize()
3494 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) in __i40e_chk_linearize()
3500 nr_frags -= I40E_MAX_BUFFER_TXD - 2; in __i40e_chk_linearize()
3501 frag = &skb_shinfo(skb)->frags[0]; in __i40e_chk_linearize()
3509 sum = 1 - skb_shinfo(skb)->gso_size; in __i40e_chk_linearize()
3521 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __i40e_chk_linearize()
3533 int align_pad = -(skb_frag_off(stale)) & in __i40e_chk_linearize()
3534 (I40E_MAX_READ_REQ_SIZE - 1); in __i40e_chk_linearize()
3536 sum -= align_pad; in __i40e_chk_linearize()
3537 stale_size -= align_pad; in __i40e_chk_linearize()
3540 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3541 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3549 if (!nr_frags--) in __i40e_chk_linearize()
3552 sum -= stale_size; in __i40e_chk_linearize()
3559 * i40e_tx_map - Build the Tx descriptor
3568 * Returns 0 on success, -1 on failure to DMA
3574 unsigned int data_len = skb->data_len; in i40e_tx_map()
3579 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3589 first->tx_flags = tx_flags; in i40e_tx_map()
3591 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3596 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in i40e_tx_map()
3599 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3607 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); in i40e_tx_map()
3608 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3611 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3619 if (i == tx_ring->count) { in i40e_tx_map()
3625 size -= max_data; in i40e_tx_map()
3628 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3634 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, in i40e_tx_map()
3641 if (i == tx_ring->count) { in i40e_tx_map()
3647 data_len -= size; in i40e_tx_map()
3649 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3652 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3655 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3658 if (i == tx_ring->count) in i40e_tx_map()
3661 tx_ring->next_to_use = i; in i40e_tx_map()
3665 /* write last descriptor with EOP bit */ in i40e_tx_map()
3669 * below. This is safe since we don't re-use desc_count afterwards. in i40e_tx_map()
3671 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3676 tx_ring->packet_stride = 0; in i40e_tx_map()
3679 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3693 first->next_to_watch = tx_desc; in i40e_tx_map()
3697 writel(i, tx_ring->tail); in i40e_tx_map()
3703 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3707 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3712 i = tx_ring->count; in i40e_tx_map()
3713 i--; in i40e_tx_map()
3716 tx_ring->next_to_use = i; in i40e_tx_map()
3718 return -1; in i40e_tx_map()
3728 if (skb->sk && skb->sk->sk_hash) in i40e_swdcb_skb_tx_hash()
3729 hash = skb->sk->sk_hash; in i40e_swdcb_skb_tx_hash()
3731 hash = (__force u16)skb->protocol ^ skb->hash; in i40e_swdcb_skb_tx_hash()
3743 struct i40e_vsi *vsi = np->vsi; in i40e_lan_select_queue()
3752 if (vsi->tc_config.numtc == 1 || in i40e_lan_select_queue()
3753 i40e_is_tc_mqprio_enabled(vsi->back)) in i40e_lan_select_queue()
3756 prio = skb->priority; in i40e_lan_select_queue()
3757 hw = &vsi->back->hw; in i40e_lan_select_queue()
3758 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; in i40e_lan_select_queue()
3760 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) in i40e_lan_select_queue()
3764 qcount = vsi->tc_config.tc_info[tclass].qcount; in i40e_lan_select_queue()
3767 qoffset = vsi->tc_config.tc_info[tclass].qoffset; in i40e_lan_select_queue()
3772 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3780 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in i40e_xmit_xdp_ring()
3781 u16 i = 0, index = xdp_ring->next_to_use; in i40e_xmit_xdp_ring()
3782 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3785 void *data = xdpf->data; in i40e_xmit_xdp_ring()
3786 u32 size = xdpf->len; in i40e_xmit_xdp_ring()
3789 xdp_ring->tx_stats.tx_busy++; in i40e_xmit_xdp_ring()
3793 tx_head->bytecount = xdp_get_frame_len(xdpf); in i40e_xmit_xdp_ring()
3794 tx_head->gso_segs = 1; in i40e_xmit_xdp_ring()
3795 tx_head->xdpf = xdpf; in i40e_xmit_xdp_ring()
3800 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); in i40e_xmit_xdp_ring()
3801 if (dma_mapping_error(xdp_ring->dev, dma)) in i40e_xmit_xdp_ring()
3808 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_xdp_ring()
3809 tx_desc->cmd_type_offset_bsz = in i40e_xmit_xdp_ring()
3812 if (++index == xdp_ring->count) in i40e_xmit_xdp_ring()
3818 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3821 data = skb_frag_address(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3822 size = skb_frag_size(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3826 tx_desc->cmd_type_offset_bsz |= in i40e_xmit_xdp_ring()
3834 xdp_ring->xdp_tx_active++; in i40e_xmit_xdp_ring()
3836 tx_head->next_to_watch = tx_desc; in i40e_xmit_xdp_ring()
3837 xdp_ring->next_to_use = index; in i40e_xmit_xdp_ring()
3843 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3845 dma_unmap_page(xdp_ring->dev, in i40e_xmit_xdp_ring()
3854 index += xdp_ring->count; in i40e_xmit_xdp_ring()
3855 index--; in i40e_xmit_xdp_ring()
3862 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3882 prefetch(skb->data); in i40e_xmit_frame_ring()
3892 count = i40e_txd_use_count(skb->len); in i40e_xmit_frame_ring()
3893 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3903 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3908 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3909 first->skb = skb; in i40e_xmit_frame_ring()
3910 first->bytecount = skb->len; in i40e_xmit_frame_ring()
3911 first->gso_segs = 1; in i40e_xmit_frame_ring()
3954 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3955 dev_kfree_skb_any(first->skb); in i40e_xmit_frame_ring()
3956 first->skb = NULL; in i40e_xmit_frame_ring()
3959 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3961 dev_kfree_skb_any(pf->ptp_tx_skb); in i40e_xmit_frame_ring()
3962 pf->ptp_tx_skb = NULL; in i40e_xmit_frame_ring()
3963 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); in i40e_xmit_frame_ring()
3970 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3979 struct i40e_vsi *vsi = np->vsi; in i40e_lan_xmit_frame()
3980 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame()
3992 * i40e_xdp_xmit - Implements ndo_xdp_xmit
4001 * For error cases, a negative errno code is returned and no-frames
4009 struct i40e_vsi *vsi = np->vsi; in i40e_xdp_xmit()
4010 struct i40e_pf *pf = vsi->back; in i40e_xdp_xmit()
4015 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_xdp_xmit()
4016 return -ENETDOWN; in i40e_xdp_xmit()
4018 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || in i40e_xdp_xmit()
4019 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_xdp_xmit()
4020 return -ENXIO; in i40e_xdp_xmit()
4023 return -EINVAL; in i40e_xdp_xmit()
4025 xdp_ring = vsi->xdp_rings[queue_index]; in i40e_xdp_xmit()