Lines Matching +full:supports +full:- +full:cqe
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
33 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer()
37 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
38 rxq->filled_buffers--; in qede_alloc_rx_buffer()
44 return -ENOMEM; in qede_alloc_rx_buffer()
49 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
50 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
51 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
53 return -ENOMEM; in qede_alloc_rx_buffer()
56 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
57 sw_rx_data->page_offset = 0; in qede_alloc_rx_buffer()
58 sw_rx_data->data = data; in qede_alloc_rx_buffer()
59 sw_rx_data->mapping = mapping; in qede_alloc_rx_buffer()
62 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
64 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); in qede_alloc_rx_buffer()
65 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + in qede_alloc_rx_buffer()
66 rxq->rx_headroom); in qede_alloc_rx_buffer()
68 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
69 rxq->filled_buffers++; in qede_alloc_rx_buffer()
77 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
78 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
83 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
88 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", in qede_free_tx_pkt()
89 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
90 return -1; in qede_free_tx_pkt()
93 *len = skb->len; in qede_free_tx_pkt()
95 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
99 nbds = first_bd->data.nbds; in qede_free_tx_pkt()
103 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
107 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_tx_pkt()
111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt()
113 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
114 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in qede_free_tx_pkt()
119 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
123 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
134 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
135 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
140 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
141 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
143 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
147 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
149 nbd--; in qede_free_failed_tx_pkt()
152 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
158 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
159 if (tx_data_bd->nbytes) in qede_free_failed_tx_pkt()
160 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
166 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
167 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
171 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
180 if (skb->ip_summed != CHECKSUM_PARTIAL) in qede_xmit_type()
185 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) in qede_xmit_type()
188 if (skb->encapsulation) { in qede_xmit_type()
191 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type()
217 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & in qede_set_params_for_ipv6_ext()
225 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_set_params_for_ipv6_ext()
227 l4_proto = ip_hdr(skb)->protocol; in qede_set_params_for_ipv6_ext()
233 third_bd->data.bitfields |= in qede_set_params_for_ipv6_ext()
238 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); in qede_set_params_for_ipv6_ext()
239 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); in qede_set_params_for_ipv6_ext()
247 /* Map skb non-linear frag data for DMA */ in map_frag_to_bd()
248 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
250 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
251 return -ENOMEM; in map_frag_to_bd()
263 inner_tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
266 tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
273 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; in qede_pkt_req_lin()
282 allowed_frags--; in qede_pkt_req_lin()
285 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin()
296 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
312 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= in qede_xdp_xmit()
313 txq->num_tx_buffers)) { in qede_xdp_xmit()
314 txq->stopped_cnt++; in qede_xdp_xmit()
315 return -ENOMEM; in qede_xdp_xmit()
318 bd = qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
319 bd->data.nbds = 1; in qede_xdp_xmit()
320 bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); in qede_xdp_xmit()
325 bd->data.bitfields = cpu_to_le16(val); in qede_xdp_xmit()
330 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
331 xdp->mapping = dma; in qede_xdp_xmit()
332 xdp->page = page; in qede_xdp_xmit()
333 xdp->xdpf = xdpf; in qede_xdp_xmit()
335 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
344 struct device *dmadev = &edev->pdev->dev; in qede_xdp_transmit()
352 return -EINVAL; in qede_xdp_transmit()
355 return -ENETDOWN; in qede_xdp_transmit()
357 i = smp_processor_id() % edev->total_xdp_queues; in qede_xdp_transmit()
358 xdp_tx = edev->fp_array[i].xdp_tx; in qede_xdp_transmit()
360 spin_lock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
365 mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, in qede_xdp_transmit()
374 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, in qede_xdp_transmit()
382 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); in qede_xdp_transmit()
384 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_xdp_transmit()
388 spin_unlock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
390 return n_frames - drops; in qede_xdp_transmit()
399 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
400 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
403 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
408 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
409 struct device *dev = &edev->pdev->dev; in qede_xdp_tx_int()
413 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
416 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
417 xdp_info = xdp_arr + txq->sw_tx_cons; in qede_xdp_tx_int()
418 xdpf = xdp_info->xdpf; in qede_xdp_tx_int()
421 dma_unmap_single(dev, xdp_info->mapping, xdpf->len, in qede_xdp_tx_int()
425 xdp_info->xdpf = NULL; in qede_xdp_tx_int()
427 dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, in qede_xdp_tx_int()
429 __free_page(xdp_info->page); in qede_xdp_tx_int()
432 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
433 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
434 txq->xmit_pkts++; in qede_xdp_tx_int()
445 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
447 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
450 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
457 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
463 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
464 txq->xmit_pkts++; in qede_tx_int()
486 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in qede_tx_int()
487 * sends some packets consuming the whole queue again-> in qede_tx_int()
494 (edev->state == QEDE_STATE_OPEN) && in qede_tx_int()
495 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
515 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_has_rx_work()
516 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_has_rx_work()
523 qed_chain_consume(&rxq->rx_bd_ring); in qede_rx_bd_ring_consume()
524 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume()
533 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); in qede_reuse_page()
537 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page()
540 new_mapping = curr_prod->mapping + curr_prod->page_offset; in qede_reuse_page()
542 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); in qede_reuse_page()
543 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + in qede_reuse_page()
544 rxq->rx_headroom); in qede_reuse_page()
546 rxq->sw_rx_prod++; in qede_reuse_page()
547 curr_cons->data = NULL; in qede_reuse_page()
557 for (; count > 0; count--) { in qede_recycle_rx_bd_ring()
558 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring()
568 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
570 if (curr_cons->page_offset == PAGE_SIZE) { in qede_realloc_rx_buffer()
575 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
577 return -ENOMEM; in qede_realloc_rx_buffer()
580 dma_unmap_page(rxq->dev, curr_cons->mapping, in qede_realloc_rx_buffer()
581 PAGE_SIZE, rxq->data_direction); in qede_realloc_rx_buffer()
587 page_ref_inc(curr_cons->data); in qede_realloc_rx_buffer()
596 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); in qede_update_rx_prod()
597 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); in qede_update_rx_prod()
610 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), in qede_update_rx_prod()
635 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_set_skb_csum()
638 skb->csum_level = 1; in qede_set_skb_csum()
639 skb->encapsulation = 1; in qede_set_skb_csum()
651 napi_gro_receive(&fp->napi, skb); in qede_skb_receive()
656 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
658 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
662 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params()
664 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params()
666 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
667 cqe->header_len; in qede_set_gro_params()
674 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb()
676 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; in qede_fill_frag_skb()
677 struct sk_buff *skb = tpa_info->skb; in qede_fill_frag_skb()
679 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_fill_frag_skb()
683 skb_fill_page_desc(skb, tpa_info->frag_id++, in qede_fill_frag_skb()
684 current_bd->data, in qede_fill_frag_skb()
685 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
692 page_ref_inc(current_bd->data); in qede_fill_frag_skb()
698 skb->data_len += len_on_bd; in qede_fill_frag_skb()
699 skb->truesize += rxq->rx_buf_seg_size; in qede_fill_frag_skb()
700 skb->len += len_on_bd; in qede_fill_frag_skb()
705 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_fill_frag_skb()
708 return -ENOMEM; in qede_fill_frag_skb()
752 buf = page_address(bd->data) + bd->page_offset; in qede_build_skb()
753 skb = build_skb(buf, rxq->rx_buf_seg_size); in qede_build_skb()
770 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
772 if (bd->page_offset == PAGE_SIZE) { in qede_tpa_rx_build_skb()
776 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
777 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
782 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
801 * un-mapping it. in qede_rx_build_skb()
803 if ((len + pad <= edev->rx_copybreak)) { in qede_rx_build_skb()
804 unsigned int offset = bd->page_offset + pad; in qede_rx_build_skb()
806 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); in qede_rx_build_skb()
811 skb_put_data(skb, page_address(bd->data) + offset, len); in qede_rx_build_skb()
823 page_ref_inc(bd->data); in qede_rx_build_skb()
836 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
838 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
842 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_tpa_start()
843 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
845 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, in qede_tpa_start()
846 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
848 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; in qede_tpa_start()
849 tpa_info->buffer.mapping = sw_rx_data_cons->mapping; in qede_tpa_start()
851 if (unlikely(!tpa_info->skb)) { in qede_tpa_start()
855 * this might be used by FW still, it will be re-used in qede_tpa_start()
858 tpa_info->tpa_start_fail = true; in qede_tpa_start()
860 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
864 tpa_info->frag_id = 0; in qede_tpa_start()
865 tpa_info->state = QEDE_AGG_STATE_START; in qede_tpa_start()
867 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
870 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
872 tpa_info->vlan_tag = 0; in qede_tpa_start()
874 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
877 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
880 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
881 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
882 le16_to_cpu(cqe->bw_ext_bd_len_list[0])); in qede_tpa_start()
884 if (unlikely(cqe->bw_ext_bd_len_list[1])) { in qede_tpa_start()
886 …"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); in qede_tpa_start()
887 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
900 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in qede_gro_ip_csum()
901 iph->saddr, iph->daddr, 0); in qede_gro_ip_csum()
914 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in qede_gro_ipv6_csum()
915 &iph->saddr, &iph->daddr, 0); in qede_gro_ipv6_csum()
930 if (unlikely(!skb->data_len)) { in qede_gro_receive()
931 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive()
932 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive()
937 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive()
940 switch (skb->protocol) { in qede_gro_receive()
949 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", in qede_gro_receive()
950 ntohs(skb->protocol)); in qede_gro_receive()
956 skb_record_rx_queue(skb, fp->rxq->rxq_id); in qede_gro_receive()
957 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); in qede_gro_receive()
962 struct eth_fast_path_rx_tpa_cont_cqe *cqe) in qede_tpa_cont() argument
966 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
967 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
968 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
972 "Strange - TPA cont with more than a single len_list entry\n"); in qede_tpa_cont()
977 struct eth_fast_path_rx_tpa_end_cqe *cqe) in qede_tpa_end() argument
979 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end()
984 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
985 skb = tpa_info->skb; in qede_tpa_end()
987 if (tpa_info->buffer.page_offset == PAGE_SIZE) in qede_tpa_end()
988 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, in qede_tpa_end()
989 PAGE_SIZE, rxq->data_direction); in qede_tpa_end()
991 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
992 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
993 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
996 "Strange - TPA emd with more than a single len_list entry\n"); in qede_tpa_end()
998 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_tpa_end()
1002 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
1004 "Strange - TPA had %02x BDs, but SKB has only %d frags\n", in qede_tpa_end()
1005 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
1006 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
1008 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", in qede_tpa_end()
1009 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
1012 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_tpa_end()
1013 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_tpa_end()
1015 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in qede_tpa_end()
1016 * to skb_shinfo(skb)->gso_segs in qede_tpa_end()
1018 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
1020 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); in qede_tpa_end()
1022 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1026 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1028 if (tpa_info->tpa_start_fail) { in qede_tpa_end()
1029 qede_reuse_page(rxq, &tpa_info->buffer); in qede_tpa_end()
1030 tpa_info->tpa_start_fail = false; in qede_tpa_end()
1033 dev_kfree_skb_any(tpa_info->skb); in qede_tpa_end()
1034 tpa_info->skb = NULL; in qede_tpa_end()
1067 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, in qede_pkt_is_ip_fragmented() argument
1070 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1087 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_xdp() argument
1093 xdp.data_hard_start = page_address(bd->data); in qede_rx_xdp()
1097 xdp.rxq = &rxq->xdp_rxq; in qede_rx_xdp()
1098 xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */ in qede_rx_xdp()
1109 *data_offset = xdp.data - xdp.data_hard_start; in qede_rx_xdp()
1110 *len = xdp.data_end - xdp.data; in qede_rx_xdp()
1116 rxq->xdp_no_pass++; in qede_rx_xdp()
1124 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1131 if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, in qede_rx_xdp()
1132 *data_offset, *len, bd->data, in qede_rx_xdp()
1134 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1135 rxq->data_direction); in qede_rx_xdp()
1136 __free_page(bd->data); in qede_rx_xdp()
1138 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1140 dma_sync_single_for_device(rxq->dev, in qede_rx_xdp()
1141 bd->mapping + *data_offset, in qede_rx_xdp()
1142 *len, rxq->data_direction); in qede_rx_xdp()
1143 fp->xdp_xmit |= QEDE_XDP_TX; in qede_rx_xdp()
1154 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1158 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1159 rxq->data_direction); in qede_rx_xdp()
1161 if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) in qede_rx_xdp()
1164 fp->xdp_xmit |= QEDE_XDP_REDIRECT; in qede_rx_xdp()
1172 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1175 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1184 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_build_jumbo() argument
1187 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1192 pkt_len -= first_bd_len; in qede_rx_build_jumbo()
1195 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1196 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo()
1213 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_build_jumbo()
1214 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_build_jumbo()
1217 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_build_jumbo()
1220 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, in qede_rx_build_jumbo()
1221 bd->data, rxq->rx_headroom, cur_size); in qede_rx_build_jumbo()
1223 skb->truesize += PAGE_SIZE; in qede_rx_build_jumbo()
1224 skb->data_len += cur_size; in qede_rx_build_jumbo()
1225 skb->len += cur_size; in qede_rx_build_jumbo()
1226 pkt_len -= cur_size; in qede_rx_build_jumbo()
1241 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe() argument
1246 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1249 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1252 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1262 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); in qede_rx_process_cqe()
1266 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
1272 /* Get the CQE from the completion ring */ in qede_rx_process_cqe()
1273 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1274 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1280 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; in qede_rx_process_cqe()
1281 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); in qede_rx_process_cqe()
1287 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); in qede_rx_process_cqe()
1292 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_process_cqe()
1293 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_process_cqe()
1295 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1296 len = le16_to_cpu(fp_cqe->len_on_first_bd); in qede_rx_process_cqe()
1297 pad = fp_cqe->placement_offset + rxq->rx_headroom; in qede_rx_process_cqe()
1306 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1312 rxq->rx_ip_frags++; in qede_rx_process_cqe()
1314 rxq->rx_hw_errors++; in qede_rx_process_cqe()
1322 rxq->rx_alloc_errors++; in qede_rx_process_cqe()
1323 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); in qede_rx_process_cqe()
1328 * by a single cqe. in qede_rx_process_cqe()
1330 if (fp_cqe->bd_num > 1) { in qede_rx_process_cqe()
1341 /* The SKB contains all the data. Now prepare meta-magic */ in qede_rx_process_cqe()
1342 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_rx_process_cqe()
1343 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); in qede_rx_process_cqe()
1345 skb_record_rx_queue(skb, rxq->rxq_id); in qede_rx_process_cqe()
1346 qede_ptp_record_rx_ts(edev, cqe, skb); in qede_rx_process_cqe()
1348 /* SKB is prepared - pass it to stack */ in qede_rx_process_cqe()
1349 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_process_cqe()
1356 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int()
1357 struct qede_dev *edev = fp->edev; in qede_rx_int()
1361 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_rx_int()
1362 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1364 /* Memory barrier to prevent the CPU from doing speculative reads of CQE in qede_rx_int()
1365 * / BD in the while-loop before reading hw_comp_cons. If the CQE is in qede_rx_int()
1366 * read before it is written by FW, then FW writes CQE and SB, and then in qede_rx_int()
1367 * the CPU reads the hw_comp_cons, it will use an old CQE. in qede_rx_int()
1374 qed_chain_recycle_consumed(&rxq->rx_comp_ring); in qede_rx_int()
1375 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1379 rxq->rcv_pkts += rcv_pkts; in qede_rx_int()
1382 while (rxq->num_rx_buffers - rxq->filled_buffers) in qede_rx_int()
1394 qed_sb_update_sb_idx(fp->sb_info); in qede_poll_is_more_work()
1408 if (likely(fp->type & QEDE_FASTPATH_RX)) in qede_poll_is_more_work()
1409 if (qede_has_rx_work(fp->rxq)) in qede_poll_is_more_work()
1412 if (fp->type & QEDE_FASTPATH_XDP) in qede_poll_is_more_work()
1413 if (qede_txq_has_work(fp->xdp_tx)) in qede_poll_is_more_work()
1416 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll_is_more_work()
1419 for_each_cos_in_txq(fp->edev, cos) { in qede_poll_is_more_work()
1420 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1435 struct qede_dev *edev = fp->edev; in qede_poll()
1439 fp->xdp_xmit = 0; in qede_poll()
1441 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll()
1444 for_each_cos_in_txq(fp->edev, cos) { in qede_poll()
1445 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1446 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1450 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) in qede_poll()
1451 qede_xdp_tx_int(edev, fp->xdp_tx); in qede_poll()
1453 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && in qede_poll()
1454 qede_has_rx_work(fp->rxq)) ? in qede_poll()
1461 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qede_poll()
1467 if (fp->xdp_xmit & QEDE_XDP_TX) { in qede_poll()
1468 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); in qede_poll()
1470 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_poll()
1471 qede_update_tx_producer(fp->xdp_tx); in qede_poll()
1474 if (fp->xdp_xmit & QEDE_XDP_REDIRECT) in qede_poll()
1484 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qede_msix_fp_int()
1486 napi_schedule_irqoff(&fp->napi); in qede_msix_fp_int()
1509 /* Get tx-queue context and netdev index */ in qede_start_xmit()
1511 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); in qede_start_xmit()
1515 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1522 txq->tx_mem_alloc_err++; in qede_start_xmit()
1531 idx = txq->sw_tx_prod; in qede_start_xmit()
1532 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1534 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1536 first_bd->data.bd_flags.bitfields = in qede_start_xmit()
1539 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in qede_start_xmit()
1543 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1545 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1559 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1564 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1573 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); in qede_start_xmit()
1574 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1580 /* We don't re-calculate IP checksum as it is already done by in qede_start_xmit()
1583 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1587 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1593 /* Legacy FW had flipped behavior in regard to this bit - in qede_start_xmit()
1597 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1609 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1611 third_bd->data.lso_mss = in qede_start_xmit()
1612 cpu_to_le16(skb_shinfo(skb)->gso_size); in qede_start_xmit()
1615 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1621 first_bd->data.bd_flags.bitfields |= 1 << tmp; in qede_start_xmit()
1625 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1630 /* @@@TBD - if will not be removed need to check */ in qede_start_xmit()
1631 third_bd->data.bitfields |= in qede_start_xmit()
1640 first_bd->nbytes, first_bd->addr.hi, in qede_start_xmit()
1641 first_bd->addr.lo); in qede_start_xmit()
1643 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), in qede_start_xmit()
1644 le32_to_cpu(first_bd->addr.lo)) + in qede_start_xmit()
1648 le16_to_cpu(first_bd->nbytes) - in qede_start_xmit()
1654 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1656 first_bd->nbytes = cpu_to_le16(hlen); in qede_start_xmit()
1662 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << in qede_start_xmit()
1666 first_bd->data.bitfields = cpu_to_le16(val); in qede_start_xmit()
1670 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { in qede_start_xmit()
1672 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1689 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { in qede_start_xmit()
1691 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1696 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1706 first_bd->data.nbds = nbd; in qede_start_xmit()
1708 netdev_tx_sent_queue(netdev_txq, skb->len); in qede_start_xmit()
1715 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1718 txq->tx_db.data.bd_prod = in qede_start_xmit()
1719 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1724 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1730 txq->stopped_cnt++; in qede_start_xmit()
1735 * fp->bd_tx_cons in qede_start_xmit()
1739 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()
1741 (edev->state == QEDE_STATE_OPEN)) { in qede_start_xmit()
1757 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; in qede_select_queue()
1770 if (skb->encapsulation) { in qede_features_check()
1775 l4_proto = ip_hdr(skb)->protocol; in qede_features_check()
1778 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_features_check()
1793 vxln_port = edev->vxlan_dst_port; in qede_features_check()
1794 gnv_port = edev->geneve_dst_port; in qede_features_check()
1796 if ((skb_inner_mac_header(skb) - in qede_features_check()
1798 (ntohs(udp_hdr(skb)->dest) != vxln_port && in qede_features_check()
1799 ntohs(udp_hdr(skb)->dest) != gnv_port)) in qede_features_check()