Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
28 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
32 unsigned long csr_base = (unsigned long)ring->doorbell; in fbnic_ring_csr_base()
34 csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1); in fbnic_ring_csr_base()
54 * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time
72 s = u64_stats_fetch_begin(&fbn->time_seq); in fbnic_ts40_to_ns()
73 offset = READ_ONCE(fbn->time_offset); in fbnic_ts40_to_ns()
74 } while (u64_stats_fetch_retry(&fbn->time_seq, s)); in fbnic_ts40_to_ns()
76 high = READ_ONCE(fbn->time_high); in fbnic_ts40_to_ns()
81 /* Compare bits 32-39 between periodic reads and ts40, in fbnic_ts40_to_ns()
87 if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2) in fbnic_ts40_to_ns()
95 return (ring->head - ring->tail - 1) & ring->size_mask; in fbnic_desc_unused()
100 return (ring->tail - ring->head) & ring->size_mask; in fbnic_desc_used()
106 return netdev_get_tx_queue(dev, ring->q_idx); in txring_txq()
119 u64_stats_update_begin(&ring->stats.syncp); in fbnic_maybe_stop_tx()
120 ring->stats.twq.stop++; in fbnic_maybe_stop_tx()
121 u64_stats_update_end(&ring->stats.syncp); in fbnic_maybe_stop_tx()
129 struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); in fbnic_tx_sent_queue()
130 unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount; in fbnic_tx_sent_queue()
169 if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in fbnic_tx_tstamp()
172 fbn = netdev_priv(skb->dev); in fbnic_tx_tstamp()
173 if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF) in fbnic_tx_tstamp()
176 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in fbnic_tx_tstamp()
177 FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS; in fbnic_tx_tstamp()
178 FBNIC_XMIT_CB(skb)->hw_head = -1; in fbnic_tx_tstamp()
195 if (shinfo->gso_type & SKB_GSO_PARTIAL) { in fbnic_tx_lso()
197 } else if (!skb->encapsulation) { in fbnic_tx_lso()
198 if (ip_hdr(skb)->version == 4) in fbnic_tx_lso()
205 o3len = skb_inner_network_header(skb) - skb_network_header(skb); in fbnic_tx_lso()
206 *i3len -= o3len; in fbnic_tx_lso()
213 payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data)); in fbnic_tx_lso()
215 if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { in fbnic_tx_lso()
220 csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len); in fbnic_tx_lso()
226 csum_replace_by_diff(&udph->check, (__force __wsum)payload_len); in fbnic_tx_lso()
229 hdrlen = (l4hdr - skb->data) + l4len; in fbnic_tx_lso()
233 FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) | in fbnic_tx_lso()
236 FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen; in fbnic_tx_lso()
237 FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs; in fbnic_tx_lso()
239 u64_stats_update_begin(&ring->stats.syncp); in fbnic_tx_lso()
240 ring->stats.twq.lso += shinfo->gso_segs; in fbnic_tx_lso()
241 u64_stats_update_end(&ring->stats.syncp); in fbnic_tx_lso()
255 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_tx_offloads()
259 i3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_tx_offloads()
262 skb->csum_offset / 2)); in fbnic_tx_offloads()
264 if (shinfo->gso_size) { in fbnic_tx_offloads()
269 u64_stats_update_begin(&ring->stats.syncp); in fbnic_tx_offloads()
270 ring->stats.twq.csum_partial++; in fbnic_tx_offloads()
271 u64_stats_update_end(&ring->stats.syncp); in fbnic_tx_offloads()
285 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { in fbnic_rx_csum()
291 skb->ip_summed = CHECKSUM_UNNECESSARY; in fbnic_rx_csum()
295 skb->ip_summed = CHECKSUM_COMPLETE; in fbnic_rx_csum()
296 skb->csum = (__force __wsum)csum; in fbnic_rx_csum()
304 struct device *dev = skb->dev->dev.parent; in fbnic_tx_map()
305 unsigned int tail = ring->tail, first; in fbnic_tx_map()
311 ring->tx_buf[tail] = skb; in fbnic_tx_map()
314 tail &= ring->size_mask; in fbnic_tx_map()
318 data_len = skb->data_len; in fbnic_tx_map()
323 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in fbnic_tx_map()
325 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in fbnic_tx_map()
326 twd = &ring->desc[tail]; in fbnic_tx_map()
337 tail &= ring->size_mask; in fbnic_tx_map()
343 data_len -= size; in fbnic_tx_map()
353 FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask; in fbnic_tx_map()
355 ring->tail = tail; in fbnic_tx_map()
361 fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); in fbnic_tx_map()
369 writel(tail, ring->doorbell); in fbnic_tx_map()
375 netdev_err(skb->dev, "TX DMA map failed\n"); in fbnic_tx_map()
378 tail--; in fbnic_tx_map()
379 tail &= ring->size_mask; in fbnic_tx_map()
380 twd = &ring->desc[tail]; in fbnic_tx_map()
395 __le64 *meta = &ring->desc[ring->tail]; in fbnic_xmit_frame_ring()
407 desc_needed = skb_shinfo(skb)->nr_frags + 10; in fbnic_xmit_frame_ring()
408 if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed)) in fbnic_xmit_frame_ring()
414 FBNIC_XMIT_CB(skb)->bytecount = skb->len; in fbnic_xmit_frame_ring()
415 FBNIC_XMIT_CB(skb)->gso_segs = 1; in fbnic_xmit_frame_ring()
416 FBNIC_XMIT_CB(skb)->desc_count = 0; in fbnic_xmit_frame_ring()
417 FBNIC_XMIT_CB(skb)->flags = 0; in fbnic_xmit_frame_ring()
430 u64_stats_update_begin(&ring->stats.syncp); in fbnic_xmit_frame_ring()
431 ring->stats.dropped++; in fbnic_xmit_frame_ring()
432 u64_stats_update_end(&ring->stats.syncp); in fbnic_xmit_frame_ring()
439 unsigned int q_map = skb->queue_mapping; in fbnic_xmit_frame()
441 return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); in fbnic_xmit_frame()
462 skb_gso_features = skb_shinfo(skb)->gso_type; in fbnic_features_check_encap_gso()
471 /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice in fbnic_features_check_encap_gso()
479 if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2) in fbnic_features_check_encap_gso()
482 /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */ in fbnic_features_check_encap_gso()
484 if (ip6_hdr->version != 6) in fbnic_features_check_encap_gso()
487 l4_hdr = ip6_hdr->nexthdr; in fbnic_features_check_encap_gso()
488 start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr); in fbnic_features_check_encap_gso()
491 skb->data + start != skb_inner_network_header(skb)) in fbnic_features_check_encap_gso()
503 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_features_check()
507 l3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_features_check()
513 if ((l2len | l3len | skb->csum_offset) % 2 || in fbnic_features_check()
516 !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) in fbnic_features_check()
519 if (likely(!skb->encapsulation) || !skb_is_gso(skb)) in fbnic_features_check()
530 unsigned int head = ring->head; in fbnic_clean_twq0()
534 clean_desc = (hw_head - head) & ring->size_mask; in fbnic_clean_twq0()
537 struct sk_buff *skb = ring->tx_buf[head]; in fbnic_clean_twq0()
540 desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; in fbnic_clean_twq0()
544 if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) { in fbnic_clean_twq0()
545 FBNIC_XMIT_CB(skb)->hw_head = hw_head; in fbnic_clean_twq0()
551 ring->tx_buf[head] = NULL; in fbnic_clean_twq0()
553 clean_desc -= desc_cnt; in fbnic_clean_twq0()
555 while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) { in fbnic_clean_twq0()
557 head &= ring->size_mask; in fbnic_clean_twq0()
558 desc_cnt--; in fbnic_clean_twq0()
561 fbnic_unmap_single_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
563 head &= ring->size_mask; in fbnic_clean_twq0()
564 desc_cnt--; in fbnic_clean_twq0()
566 while (desc_cnt--) { in fbnic_clean_twq0()
567 fbnic_unmap_page_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
569 head &= ring->size_mask; in fbnic_clean_twq0()
572 total_bytes += FBNIC_XMIT_CB(skb)->bytecount; in fbnic_clean_twq0()
573 total_packets += FBNIC_XMIT_CB(skb)->gso_segs; in fbnic_clean_twq0()
581 ring->head = head; in fbnic_clean_twq0()
583 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0()
586 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
587 ring->stats.dropped += total_packets; in fbnic_clean_twq0()
588 ring->stats.twq.ts_lost += ts_lost; in fbnic_clean_twq0()
589 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
595 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
596 ring->stats.bytes += total_bytes; in fbnic_clean_twq0()
597 ring->stats.packets += total_packets; in fbnic_clean_twq0()
598 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
603 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
604 ring->stats.twq.wake++; in fbnic_clean_twq0()
605 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
619 head = (*ts_head < 0) ? ring->head : *ts_head; in fbnic_clean_tsq()
624 if (head == ring->tail) { in fbnic_clean_tsq()
626 netdev_err(nv->napi.dev, in fbnic_clean_tsq()
627 "Tx timestamp without matching packet\n"); in fbnic_clean_tsq()
631 skb = ring->tx_buf[head]; in fbnic_clean_tsq()
632 desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; in fbnic_clean_tsq()
635 head &= ring->size_mask; in fbnic_clean_tsq()
636 } while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)); in fbnic_clean_tsq()
638 fbn = netdev_priv(nv->napi.dev); in fbnic_clean_tsq()
646 FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS; in fbnic_clean_tsq()
648 head = FBNIC_XMIT_CB(skb)->hw_head; in fbnic_clean_tsq()
654 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_tsq()
655 ring->stats.twq.ts_packets++; in fbnic_clean_tsq()
656 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_tsq()
662 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_init()
665 rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; in fbnic_page_pool_init()
666 rx_buf->page = page; in fbnic_page_pool_init()
672 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_get()
674 rx_buf->pagecnt_bias--; in fbnic_page_pool_get()
676 return rx_buf->page; in fbnic_page_pool_get()
682 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_drain()
683 struct page *page = rx_buf->page; in fbnic_page_pool_drain()
685 if (!page_pool_unref_page(page, rx_buf->pagecnt_bias)) in fbnic_page_pool_drain()
686 page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget); in fbnic_page_pool_drain()
688 rx_buf->page = NULL; in fbnic_page_pool_drain()
695 fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); in fbnic_clean_twq()
697 fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head); in fbnic_clean_twq()
704 struct fbnic_ring *cmpl = &qt->cmpl; in fbnic_clean_tcq()
705 s32 head0 = -1, ts_head = -1; in fbnic_clean_tcq()
707 u32 head = cmpl->head; in fbnic_clean_tcq()
709 done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); in fbnic_clean_tcq()
710 raw_tcd = &cmpl->desc[head & cmpl->size_mask]; in fbnic_clean_tcq()
712 /* Walk the completion queue collecting the heads reported by NIC */ in fbnic_clean_tcq()
734 fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0); in fbnic_clean_tcq()
742 if (!(head & cmpl->size_mask)) { in fbnic_clean_tcq()
744 raw_tcd = &cmpl->desc[0]; in fbnic_clean_tcq()
748 /* Record the current head/tail of the queue */ in fbnic_clean_tcq()
749 if (cmpl->head != head) { in fbnic_clean_tcq()
750 cmpl->head = head; in fbnic_clean_tcq()
751 writel(head & cmpl->size_mask, cmpl->doorbell); in fbnic_clean_tcq()
761 unsigned int head = ring->head; in fbnic_clean_bdq()
770 head &= ring->size_mask; in fbnic_clean_bdq()
773 ring->head = head; in fbnic_clean_bdq()
778 __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT]; in fbnic_bd_prep()
794 } while (--i); in fbnic_bd_prep()
800 unsigned int i = bdq->tail; in fbnic_fill_bdq()
808 page = page_pool_dev_alloc_pages(nv->page_pool); in fbnic_fill_bdq()
810 u64_stats_update_begin(&bdq->stats.syncp); in fbnic_fill_bdq()
811 bdq->stats.rx.alloc_failed++; in fbnic_fill_bdq()
812 u64_stats_update_end(&bdq->stats.syncp); in fbnic_fill_bdq()
821 i &= bdq->size_mask; in fbnic_fill_bdq()
823 count--; in fbnic_fill_bdq()
826 if (bdq->tail != i) { in fbnic_fill_bdq()
827 bdq->tail = i; in fbnic_fill_bdq()
832 writel(i, bdq->doorbell); in fbnic_fill_bdq()
846 return pg_off - FBNIC_RX_HROOM; in fbnic_hdr_pg_start()
856 return ALIGN(pg_off, 128) - FBNIC_RX_HROOM; in fbnic_hdr_pg_end()
860 struct fbnic_pkt_buff *pkt, in fbnic_pkt_prepare() argument
865 struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx); in fbnic_pkt_prepare()
871 WARN_ON_ONCE(pkt->buff.data_hard_start); in fbnic_pkt_prepare()
873 /* Short-cut the end calculation if we know page is fully consumed */ in fbnic_pkt_prepare()
878 headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD; in fbnic_pkt_prepare()
879 frame_sz = hdr_pg_end - hdr_pg_start; in fbnic_pkt_prepare()
880 xdp_init_buff(&pkt->buff, frame_sz, NULL); in fbnic_pkt_prepare()
885 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_pkt_prepare()
892 xdp_prepare_buff(&pkt->buff, hdr_start, headroom, in fbnic_pkt_prepare()
893 len - FBNIC_RX_PAD, true); in fbnic_pkt_prepare()
895 pkt->data_truesize = 0; in fbnic_pkt_prepare()
896 pkt->data_len = 0; in fbnic_pkt_prepare()
897 pkt->nr_frags = 0; in fbnic_pkt_prepare()
901 struct fbnic_pkt_buff *pkt, in fbnic_add_rx_frag() argument
907 struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx); in fbnic_add_rx_frag()
912 FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128); in fbnic_add_rx_frag()
918 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_add_rx_frag()
922 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_add_rx_frag()
925 pkt->data_truesize += truesize; in fbnic_add_rx_frag()
927 __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); in fbnic_add_rx_frag()
930 pkt->data_len += len; in fbnic_add_rx_frag()
934 struct fbnic_pkt_buff *pkt, int budget) in fbnic_put_pkt_buff() argument
940 if (!pkt->buff.data_hard_start) in fbnic_put_pkt_buff()
943 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_put_pkt_buff()
944 nr_frags = pkt->nr_frags; in fbnic_put_pkt_buff()
946 while (nr_frags--) { in fbnic_put_pkt_buff()
947 page = skb_frag_page(&shinfo->frags[nr_frags]); in fbnic_put_pkt_buff()
948 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
951 page = virt_to_page(pkt->buff.data_hard_start); in fbnic_put_pkt_buff()
952 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
956 struct fbnic_pkt_buff *pkt) in fbnic_build_skb() argument
958 unsigned int nr_frags = pkt->nr_frags; in fbnic_build_skb()
963 truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM - in fbnic_build_skb()
964 pkt->buff.data_hard_start; in fbnic_build_skb()
967 skb = napi_build_skb(pkt->buff.data_hard_start, truesize); in fbnic_build_skb()
972 skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start); in fbnic_build_skb()
973 __skb_put(skb, pkt->buff.data_end - pkt->buff.data); in fbnic_build_skb()
976 skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta); in fbnic_build_skb()
981 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_build_skb()
984 skb->truesize += pkt->data_truesize; in fbnic_build_skb()
985 skb->data_len += pkt->data_len; in fbnic_build_skb()
986 shinfo->nr_frags = nr_frags; in fbnic_build_skb()
987 skb->len += pkt->data_len; in fbnic_build_skb()
993 skb->protocol = eth_type_trans(skb, nv->napi.dev); in fbnic_build_skb()
996 if (pkt->hwtstamp) in fbnic_build_skb()
997 skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp; in fbnic_build_skb()
1010 struct fbnic_pkt_buff *pkt) in fbnic_rx_tstamp() argument
1018 fbn = netdev_priv(nv->napi.dev); in fbnic_rx_tstamp()
1023 pkt->hwtstamp = ns_to_ktime(ns); in fbnic_rx_tstamp()
1031 struct net_device *netdev = nv->napi.dev; in fbnic_populate_skb_fields()
1032 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_populate_skb_fields()
1036 if (netdev->features & NETIF_F_RXHASH) in fbnic_populate_skb_fields()
1041 skb_record_rx_queue(skb, rcq->q_idx); in fbnic_populate_skb_fields()
1054 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_clean_rcq()
1055 struct fbnic_pkt_buff *pkt; in fbnic_clean_rcq() local
1056 s32 head0 = -1, head1 = -1; in fbnic_clean_rcq()
1058 u32 head = rcq->head; in fbnic_clean_rcq()
1060 done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; in fbnic_clean_rcq()
1061 raw_rcd = &rcq->desc[head & rcq->size_mask]; in fbnic_clean_rcq()
1062 pkt = rcq->pkt; in fbnic_clean_rcq()
1064 /* Walk the completion queue collecting the heads reported by NIC */ in fbnic_clean_rcq()
1066 struct sk_buff *skb = ERR_PTR(-EINVAL); in fbnic_clean_rcq()
1079 fbnic_pkt_prepare(nv, rcd, pkt, qt); in fbnic_clean_rcq()
1084 fbnic_add_rx_frag(nv, rcd, pkt, qt); in fbnic_clean_rcq()
1092 fbnic_rx_tstamp(nv, rcd, pkt); in fbnic_clean_rcq()
1098 skb = fbnic_build_skb(nv, pkt); in fbnic_clean_rcq()
1107 bytes += skb->len; in fbnic_clean_rcq()
1109 napi_gro_receive(&nv->napi, skb); in fbnic_clean_rcq()
1118 fbnic_put_pkt_buff(nv, pkt, 1); in fbnic_clean_rcq()
1121 pkt->buff.data_hard_start = NULL; in fbnic_clean_rcq()
1128 if (!(head & rcq->size_mask)) { in fbnic_clean_rcq()
1130 raw_rcd = &rcq->desc[0]; in fbnic_clean_rcq()
1134 u64_stats_update_begin(&rcq->stats.syncp); in fbnic_clean_rcq()
1135 rcq->stats.packets += packets; in fbnic_clean_rcq()
1136 rcq->stats.bytes += bytes; in fbnic_clean_rcq()
1137 /* Re-add ethernet header length (removed in fbnic_build_skb) */ in fbnic_clean_rcq()
1138 rcq->stats.bytes += ETH_HLEN * packets; in fbnic_clean_rcq()
1139 rcq->stats.dropped += dropped; in fbnic_clean_rcq()
1140 rcq->stats.rx.alloc_failed += alloc_failed; in fbnic_clean_rcq()
1141 rcq->stats.rx.csum_complete += csum_complete; in fbnic_clean_rcq()
1142 rcq->stats.rx.csum_none += csum_none; in fbnic_clean_rcq()
1143 u64_stats_update_end(&rcq->stats.syncp); in fbnic_clean_rcq()
1147 fbnic_clean_bdq(nv, budget, &qt->sub0, head0); in fbnic_clean_rcq()
1148 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_clean_rcq()
1151 fbnic_clean_bdq(nv, budget, &qt->sub1, head1); in fbnic_clean_rcq()
1152 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_clean_rcq()
1154 /* Record the current head/tail of the queue */ in fbnic_clean_rcq()
1155 if (rcq->head != head) { in fbnic_clean_rcq()
1156 rcq->head = head; in fbnic_clean_rcq()
1157 writel(head & rcq->size_mask, rcq->doorbell); in fbnic_clean_rcq()
1165 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_disable()
1166 u32 v_idx = nv->v_idx; in fbnic_nv_irq_disable()
1173 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_rearm()
1174 u32 v_idx = nv->v_idx; in fbnic_nv_irq_rearm()
1187 for (i = 0; i < nv->txt_count; i++) in fbnic_poll()
1188 fbnic_clean_tcq(nv, &nv->qt[i], budget); in fbnic_poll()
1190 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_poll()
1191 work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget); in fbnic_poll()
1206 napi_schedule_irqoff(&nv->napi); in fbnic_msix_clean_rings()
1214 struct fbnic_queue_stats *stats = &rxr->stats; in fbnic_aggregate_ring_rx_counters()
1217 fbn->rx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_rx_counters()
1218 fbn->rx_stats.packets += stats->packets; in fbnic_aggregate_ring_rx_counters()
1219 fbn->rx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_rx_counters()
1220 fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed; in fbnic_aggregate_ring_rx_counters()
1221 fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete; in fbnic_aggregate_ring_rx_counters()
1222 fbn->rx_stats.rx.csum_none += stats->rx.csum_none; in fbnic_aggregate_ring_rx_counters()
1224 BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3); in fbnic_aggregate_ring_rx_counters()
1230 struct fbnic_queue_stats *stats = &txr->stats; in fbnic_aggregate_ring_tx_counters()
1233 fbn->tx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_tx_counters()
1234 fbn->tx_stats.packets += stats->packets; in fbnic_aggregate_ring_tx_counters()
1235 fbn->tx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_tx_counters()
1236 fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial; in fbnic_aggregate_ring_tx_counters()
1237 fbn->tx_stats.twq.lso += stats->twq.lso; in fbnic_aggregate_ring_tx_counters()
1238 fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost; in fbnic_aggregate_ring_tx_counters()
1239 fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets; in fbnic_aggregate_ring_tx_counters()
1240 fbn->tx_stats.twq.stop += stats->twq.stop; in fbnic_aggregate_ring_tx_counters()
1241 fbn->tx_stats.twq.wake += stats->twq.wake; in fbnic_aggregate_ring_tx_counters()
1243 BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6); in fbnic_aggregate_ring_tx_counters()
1249 if (!(txr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_tx_ring()
1254 /* Remove pointer to the Tx ring */ in fbnic_remove_tx_ring()
1255 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); in fbnic_remove_tx_ring()
1256 fbn->tx[txr->q_idx] = NULL; in fbnic_remove_tx_ring()
1262 if (!(rxr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_rx_ring()
1268 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); in fbnic_remove_rx_ring()
1269 fbn->rx[rxr->q_idx] = NULL; in fbnic_remove_rx_ring()
1275 struct fbnic_dev *fbd = nv->fbd; in fbnic_free_napi_vector()
1278 for (i = 0; i < nv->txt_count; i++) { in fbnic_free_napi_vector()
1279 fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
1280 fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
1283 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_free_napi_vector()
1284 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
1285 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); in fbnic_free_napi_vector()
1286 fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
1290 page_pool_destroy(nv->page_pool); in fbnic_free_napi_vector()
1291 netif_napi_del(&nv->napi); in fbnic_free_napi_vector()
1292 fbn->napi[fbnic_napi_idx(nv)] = NULL; in fbnic_free_napi_vector()
1300 for (i = 0; i < fbn->num_napi; i++) in fbnic_free_napi_vectors()
1301 if (fbn->napi[i]) in fbnic_free_napi_vectors()
1302 fbnic_free_napi_vector(fbn, fbn->napi[i]); in fbnic_free_napi_vectors()
1314 .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count, in fbnic_alloc_nv_page_pool()
1316 .dev = nv->dev, in fbnic_alloc_nv_page_pool()
1320 .napi = &nv->napi, in fbnic_alloc_nv_page_pool()
1321 .netdev = fbn->netdev, in fbnic_alloc_nv_page_pool()
1341 nv->page_pool = pp; in fbnic_alloc_nv_page_pool()
1349 u64_stats_init(&ring->stats.syncp); in fbnic_ring_init()
1350 ring->doorbell = doorbell; in fbnic_ring_init()
1351 ring->q_idx = q_idx; in fbnic_ring_init()
1352 ring->flags = flags; in fbnic_ring_init()
1361 u32 __iomem *uc_addr = fbd->uc_addr0; in fbnic_alloc_napi_vector()
1369 return -EINVAL; in fbnic_alloc_napi_vector()
1373 return -EIO; in fbnic_alloc_napi_vector()
1375 /* Allocate NAPI vector and queue triads */ in fbnic_alloc_napi_vector()
1378 return -ENOMEM; in fbnic_alloc_napi_vector()
1380 /* Record queue triad counts */ in fbnic_alloc_napi_vector()
1381 nv->txt_count = txt_count; in fbnic_alloc_napi_vector()
1382 nv->rxt_count = rxt_count; in fbnic_alloc_napi_vector()
1384 /* Provide pointer back to fbnic and MSI-X vectors */ in fbnic_alloc_napi_vector()
1385 nv->fbd = fbd; in fbnic_alloc_napi_vector()
1386 nv->v_idx = v_idx; in fbnic_alloc_napi_vector()
1389 fbn->napi[fbnic_napi_idx(nv)] = nv; in fbnic_alloc_napi_vector()
1390 netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); in fbnic_alloc_napi_vector()
1393 netif_napi_set_irq(&nv->napi, in fbnic_alloc_napi_vector()
1394 pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); in fbnic_alloc_napi_vector()
1397 nv->dev = fbd->dev; in fbnic_alloc_napi_vector()
1411 /* Initialize queue triads */ in fbnic_alloc_napi_vector()
1412 qt = nv->qt; in fbnic_alloc_napi_vector()
1415 /* Configure Tx queue */ in fbnic_alloc_napi_vector()
1418 /* Assign Tx queue to netdev if applicable */ in fbnic_alloc_napi_vector()
1422 fbnic_ring_init(&qt->sub0, db, txq_idx, flags); in fbnic_alloc_napi_vector()
1423 fbn->tx[txq_idx] = &qt->sub0; in fbnic_alloc_napi_vector()
1424 txq_count--; in fbnic_alloc_napi_vector()
1426 fbnic_ring_init(&qt->sub0, db, 0, in fbnic_alloc_napi_vector()
1430 /* Configure Tx completion queue */ in fbnic_alloc_napi_vector()
1432 fbnic_ring_init(&qt->cmpl, db, 0, 0); in fbnic_alloc_napi_vector()
1434 /* Update Tx queue index */ in fbnic_alloc_napi_vector()
1435 txt_count--; in fbnic_alloc_napi_vector()
1438 /* Move to next queue triad */ in fbnic_alloc_napi_vector()
1443 /* Configure header queue */ in fbnic_alloc_napi_vector()
1445 fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1447 /* Configure payload queue */ in fbnic_alloc_napi_vector()
1449 fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1451 /* Configure Rx completion queue */ in fbnic_alloc_napi_vector()
1453 fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS); in fbnic_alloc_napi_vector()
1454 fbn->rx[rxq_idx] = &qt->cmpl; in fbnic_alloc_napi_vector()
1456 /* Update Rx queue index */ in fbnic_alloc_napi_vector()
1457 rxt_count--; in fbnic_alloc_napi_vector()
1460 /* Move to next queue triad */ in fbnic_alloc_napi_vector()
1467 page_pool_destroy(nv->page_pool); in fbnic_alloc_napi_vector()
1469 netif_napi_del(&nv->napi); in fbnic_alloc_napi_vector()
1470 fbn->napi[fbnic_napi_idx(nv)] = NULL; in fbnic_alloc_napi_vector()
1478 unsigned int num_tx = fbn->num_tx_queues; in fbnic_alloc_napi_vectors()
1479 unsigned int num_rx = fbn->num_rx_queues; in fbnic_alloc_napi_vectors()
1480 unsigned int num_napi = fbn->num_napi; in fbnic_alloc_napi_vectors()
1481 struct fbnic_dev *fbd = fbn->fbd; in fbnic_alloc_napi_vectors()
1484 /* Allocate 1 Tx queue per napi vector */ in fbnic_alloc_napi_vectors()
1494 num_tx--; in fbnic_alloc_napi_vectors()
1501 /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */ in fbnic_alloc_napi_vectors()
1503 int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx); in fbnic_alloc_napi_vectors()
1504 int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx); in fbnic_alloc_napi_vectors()
1512 num_tx -= tqpv; in fbnic_alloc_napi_vectors()
1515 num_rx -= rqpv; in fbnic_alloc_napi_vectors()
1526 return -ENOMEM; in fbnic_alloc_napi_vectors()
1532 kvfree(ring->buffer); in fbnic_free_ring_resources()
1533 ring->buffer = NULL; in fbnic_free_ring_resources()
1536 if (!ring->size) in fbnic_free_ring_resources()
1539 dma_free_coherent(dev, ring->size, ring->desc, ring->dma); in fbnic_free_ring_resources()
1540 ring->size_mask = 0; in fbnic_free_ring_resources()
1541 ring->size = 0; in fbnic_free_ring_resources()
1547 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_desc()
1551 size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096); in fbnic_alloc_tx_ring_desc()
1553 txr->desc = dma_alloc_coherent(dev, size, &txr->dma, in fbnic_alloc_tx_ring_desc()
1555 if (!txr->desc) in fbnic_alloc_tx_ring_desc()
1556 return -ENOMEM; in fbnic_alloc_tx_ring_desc()
1558 /* txq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_tx_ring_desc()
1559 txr->size_mask = fbn->txq_size - 1; in fbnic_alloc_tx_ring_desc()
1560 txr->size = size; in fbnic_alloc_tx_ring_desc()
1567 size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1); in fbnic_alloc_tx_ring_buffer()
1569 txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_tx_ring_buffer()
1571 return txr->tx_buf ? 0 : -ENOMEM; in fbnic_alloc_tx_ring_buffer()
1577 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_resources()
1580 if (txr->flags & FBNIC_RING_F_DISABLED) in fbnic_alloc_tx_ring_resources()
1587 if (!(txr->flags & FBNIC_RING_F_CTX)) in fbnic_alloc_tx_ring_resources()
1604 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_desc()
1605 size_t desc_size = sizeof(*rxr->desc); in fbnic_alloc_rx_ring_desc()
1609 switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) { in fbnic_alloc_rx_ring_desc()
1611 rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1615 rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1619 rxq_size = fbn->rcq_size; in fbnic_alloc_rx_ring_desc()
1622 return -EINVAL; in fbnic_alloc_rx_ring_desc()
1628 rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma, in fbnic_alloc_rx_ring_desc()
1630 if (!rxr->desc) in fbnic_alloc_rx_ring_desc()
1631 return -ENOMEM; in fbnic_alloc_rx_ring_desc()
1633 /* rxq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_rx_ring_desc()
1634 rxr->size_mask = rxq_size - 1; in fbnic_alloc_rx_ring_desc()
1635 rxr->size = size; in fbnic_alloc_rx_ring_desc()
1642 size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1644 if (rxr->flags & FBNIC_RING_F_CTX) in fbnic_alloc_rx_ring_buffer()
1645 size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1647 size = sizeof(*rxr->pkt); in fbnic_alloc_rx_ring_buffer()
1649 rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_rx_ring_buffer()
1651 return rxr->rx_buf ? 0 : -ENOMEM; in fbnic_alloc_rx_ring_buffer()
1657 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_resources()
1678 struct device *dev = fbn->netdev->dev.parent; in fbnic_free_qt_resources()
1680 fbnic_free_ring_resources(dev, &qt->cmpl); in fbnic_free_qt_resources()
1681 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_free_qt_resources()
1682 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_free_qt_resources()
1688 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_qt_resources()
1691 err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1695 err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_tx_qt_resources()
1702 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1709 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_qt_resources()
1712 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1716 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1720 err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_rx_qt_resources()
1727 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1729 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1738 /* Free Tx Resources */ in fbnic_free_nv_resources()
1739 for (i = 0; i < nv->txt_count; i++) in fbnic_free_nv_resources()
1740 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1742 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_free_nv_resources()
1743 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1751 /* Allocate Tx Resources */ in fbnic_alloc_nv_resources()
1752 for (i = 0; i < nv->txt_count; i++) { in fbnic_alloc_nv_resources()
1753 err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1759 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_alloc_nv_resources()
1760 err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1768 while (i--) in fbnic_alloc_nv_resources()
1769 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1777 for (i = 0; i < fbn->num_napi; i++) in fbnic_free_resources()
1778 fbnic_free_nv_resources(fbn, fbn->napi[i]); in fbnic_free_resources()
1783 int i, err = -ENODEV; in fbnic_alloc_resources()
1785 for (i = 0; i < fbn->num_napi; i++) { in fbnic_alloc_resources()
1786 err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]); in fbnic_alloc_resources()
1794 while (i--) in fbnic_alloc_resources()
1795 fbnic_free_nv_resources(fbn, fbn->napi[i]); in fbnic_alloc_resources()
1804 /* Associate Tx queue with NAPI */ in fbnic_set_netif_napi()
1805 for (i = 0; i < nv->txt_count; i++) { in fbnic_set_netif_napi()
1806 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_set_netif_napi()
1808 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_set_netif_napi()
1809 NETDEV_QUEUE_TYPE_TX, &nv->napi); in fbnic_set_netif_napi()
1812 /* Associate Rx queue with NAPI */ in fbnic_set_netif_napi()
1813 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_set_netif_napi()
1814 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_set_netif_napi()
1816 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_set_netif_napi()
1817 NETDEV_QUEUE_TYPE_RX, &nv->napi); in fbnic_set_netif_napi()
1825 /* Disassociate Tx queue from NAPI */ in fbnic_reset_netif_napi()
1826 for (i = 0; i < nv->txt_count; i++) { in fbnic_reset_netif_napi()
1827 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_reset_netif_napi()
1829 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_reset_netif_napi()
1833 /* Disassociate Rx queue from NAPI */ in fbnic_reset_netif_napi()
1834 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_reset_netif_napi()
1835 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_reset_netif_napi()
1837 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_reset_netif_napi()
1846 err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues, in fbnic_set_netif_queues()
1847 fbn->num_rx_queues); in fbnic_set_netif_queues()
1851 for (i = 0; i < fbn->num_napi; i++) in fbnic_set_netif_queues()
1852 fbnic_set_netif_napi(fbn->napi[i]); in fbnic_set_netif_queues()
1861 for (i = 0; i < fbn->num_napi; i++) in fbnic_reset_netif_queues()
1862 fbnic_reset_netif_napi(fbn->napi[i]); in fbnic_reset_netif_queues()
1899 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_disable()
1900 napi_disable(&fbn->napi[i]->napi); in fbnic_napi_disable()
1902 fbnic_nv_irq_disable(fbn->napi[i]); in fbnic_napi_disable()
1908 struct fbnic_dev *fbd = fbn->fbd; in fbnic_disable()
1911 for (i = 0; i < fbn->num_napi; i++) { in fbnic_disable()
1912 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_disable()
1914 /* Disable Tx queue triads */ in fbnic_disable()
1915 for (t = 0; t < nv->txt_count; t++) { in fbnic_disable()
1916 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_disable()
1918 fbnic_disable_twq0(&qt->sub0); in fbnic_disable()
1919 fbnic_disable_tcq(&qt->cmpl); in fbnic_disable()
1922 /* Disable Rx queue triads */ in fbnic_disable()
1923 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_disable()
1924 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_disable()
1926 fbnic_disable_bdq(&qt->sub0, &qt->sub1); in fbnic_disable()
1927 fbnic_disable_rcq(&qt->cmpl); in fbnic_disable()
1936 netdev_warn(fbd->netdev, "triggering Tx flush\n"); in fbnic_tx_flush()
1973 netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err); in fbnic_idle_dump()
1976 netdev_err(fbd->netdev, "0x%04x: %08x\n", in fbnic_idle_dump()
1983 static const struct fbnic_idle_regs tx[] = { in fbnic_wait_all_queues_idle() local
1997 false, fbd, tx, ARRAY_SIZE(tx)); in fbnic_wait_all_queues_idle()
1998 if (err == -ETIMEDOUT) { in fbnic_wait_all_queues_idle()
2002 fbd, tx, ARRAY_SIZE(tx)); in fbnic_wait_all_queues_idle()
2006 fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err); in fbnic_wait_all_queues_idle()
2022 for (i = 0; i < fbn->num_napi; i++) { in fbnic_flush()
2023 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_flush()
2026 /* Flush any processed Tx Queue Triads and drop the rest */ in fbnic_flush()
2027 for (t = 0; t < nv->txt_count; t++) { in fbnic_flush()
2028 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_flush()
2032 fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail); in fbnic_flush()
2034 /* Reset completion queue descriptor ring */ in fbnic_flush()
2035 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
2037 /* Nothing else to do if Tx queue is disabled */ in fbnic_flush()
2038 if (qt->sub0.flags & FBNIC_RING_F_DISABLED) in fbnic_flush()
2041 /* Reset BQL associated with Tx queue */ in fbnic_flush()
2042 tx_queue = netdev_get_tx_queue(nv->napi.dev, in fbnic_flush()
2043 qt->sub0.q_idx); in fbnic_flush()
2047 /* Flush any processed Rx Queue Triads and drop the rest */ in fbnic_flush()
2048 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_flush()
2049 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_flush()
2052 fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); in fbnic_flush()
2053 fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail); in fbnic_flush()
2055 /* Reset completion queue descriptor ring */ in fbnic_flush()
2056 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
2058 fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); in fbnic_flush()
2059 qt->cmpl.pkt->buff.data_hard_start = NULL; in fbnic_flush()
2068 for (i = 0; i < fbn->num_napi; i++) { in fbnic_fill()
2069 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_fill()
2075 for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) { in fbnic_fill()
2076 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_fill()
2079 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_fill()
2080 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_fill()
2087 u32 log_size = fls(twq->size_mask); in fbnic_enable_twq0()
2089 if (!twq->size_mask) in fbnic_enable_twq0()
2094 twq->tail = 0; in fbnic_enable_twq0()
2095 twq->head = 0; in fbnic_enable_twq0()
2098 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma)); in fbnic_enable_twq0()
2099 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma)); in fbnic_enable_twq0()
2110 u32 log_size = fls(tcq->size_mask); in fbnic_enable_tcq()
2112 if (!tcq->size_mask) in fbnic_enable_tcq()
2117 tcq->tail = 0; in fbnic_enable_tcq()
2118 tcq->head = 0; in fbnic_enable_tcq()
2121 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma)); in fbnic_enable_tcq()
2122 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma)); in fbnic_enable_tcq()
2127 /* Store interrupt information for the completion queue */ in fbnic_enable_tcq()
2128 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx); in fbnic_enable_tcq()
2129 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2); in fbnic_enable_tcq()
2132 /* Enable queue */ in fbnic_enable_tcq()
2143 ppq->tail = 0; in fbnic_enable_bdq()
2144 ppq->head = 0; in fbnic_enable_bdq()
2145 hpq->tail = 0; in fbnic_enable_bdq()
2146 hpq->head = 0; in fbnic_enable_bdq()
2148 log_size = fls(hpq->size_mask); in fbnic_enable_bdq()
2151 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma)); in fbnic_enable_bdq()
2152 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma)); in fbnic_enable_bdq()
2157 if (!ppq->size_mask) in fbnic_enable_bdq()
2160 log_size = fls(ppq->size_mask); in fbnic_enable_bdq()
2166 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma)); in fbnic_enable_bdq()
2167 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma)); in fbnic_enable_bdq()
2196 threshold = rx_desc ? : rcq->size_mask / 2; in fbnic_config_rim_threshold()
2204 struct fbnic_net *fbn = netdev_priv(nv->napi.dev); in fbnic_config_txrx_usecs()
2205 struct fbnic_dev *fbd = nv->fbd; in fbnic_config_txrx_usecs()
2208 val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) | in fbnic_config_txrx_usecs()
2210 val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) | in fbnic_config_txrx_usecs()
2213 fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); in fbnic_config_txrx_usecs()
2218 struct fbnic_net *fbn = netdev_priv(nv->napi.dev); in fbnic_config_rx_frames()
2221 for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) { in fbnic_config_rx_frames()
2222 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_config_rx_frames()
2224 fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx, in fbnic_config_rx_frames()
2225 fbn->rx_max_frames * in fbnic_config_rx_frames()
2233 struct fbnic_net *fbn = netdev_priv(nv->napi.dev); in fbnic_enable_rcq()
2234 u32 log_size = fls(rcq->size_mask); in fbnic_enable_rcq()
2250 rcq->head = 0; in fbnic_enable_rcq()
2251 rcq->tail = 0; in fbnic_enable_rcq()
2254 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma)); in fbnic_enable_rcq()
2255 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma)); in fbnic_enable_rcq()
2260 /* Store interrupt information for the completion queue */ in fbnic_enable_rcq()
2261 fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames * in fbnic_enable_rcq()
2265 /* Enable queue */ in fbnic_enable_rcq()
2271 struct fbnic_dev *fbd = fbn->fbd; in fbnic_enable()
2274 for (i = 0; i < fbn->num_napi; i++) { in fbnic_enable()
2275 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_enable()
2278 /* Setup Tx Queue Triads */ in fbnic_enable()
2279 for (t = 0; t < nv->txt_count; t++) { in fbnic_enable()
2280 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_enable()
2282 fbnic_enable_twq0(&qt->sub0); in fbnic_enable()
2283 fbnic_enable_tcq(nv, &qt->cmpl); in fbnic_enable()
2286 /* Setup Rx Queue Triads */ in fbnic_enable()
2287 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_enable()
2288 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_enable()
2290 fbnic_enable_bdq(&qt->sub0, &qt->sub1); in fbnic_enable()
2291 fbnic_config_drop_mode_rcq(nv, &qt->cmpl); in fbnic_enable()
2292 fbnic_enable_rcq(nv, &qt->cmpl); in fbnic_enable()
2307 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_enable()
2310 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_enable()
2311 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_napi_enable()
2313 napi_enable(&nv->napi); in fbnic_napi_enable()
2320 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_enable()
2340 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_depletion_check()
2343 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_depletion_check()
2344 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_napi_depletion_check()
2347 for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) { in fbnic_napi_depletion_check()
2351 if (fbnic_desc_used(&nv->qt[t].sub0) < 4 || in fbnic_napi_depletion_check()
2352 fbnic_desc_used(&nv->qt[t].sub1) < 4) in fbnic_napi_depletion_check()
2353 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_depletion_check()