Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx

1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
25 #include <linux/dma-mapping.h>
91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
265 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
269 (db)->doorbell)
272 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
273 DB_RING_IDX(db, idx), (db)->doorbell)
276 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
279 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
280 DB_RING_IDX(db, idx), (db)->doorbell)
284 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_db_nq()
286 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_nq()
294 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_nq_arm()
302 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_db_cq()
303 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | in bnxt_db_cq()
304 DB_RING_IDX(db, idx), db->doorbell); in bnxt_db_cq()
311 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) in bnxt_queue_fw_reset_work()
315 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); in bnxt_queue_fw_reset_work()
317 schedule_delayed_work(&bp->fw_reset_task, delay); in bnxt_queue_fw_reset_work()
323 queue_work(bnxt_pf_wq, &bp->sp_task); in __bnxt_queue_sp_work()
325 schedule_work(&bp->sp_task); in __bnxt_queue_sp_work()
330 set_bit(event, &bp->sp_event); in bnxt_queue_sp_work()
336 if (!rxr->bnapi->in_reset) { in bnxt_sched_reset_rxr()
337 rxr->bnapi->in_reset = true; in bnxt_sched_reset_rxr()
338 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_sched_reset_rxr()
339 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); in bnxt_sched_reset_rxr()
341 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); in bnxt_sched_reset_rxr()
344 rxr->rx_next_cons = 0xffff; in bnxt_sched_reset_rxr()
350 struct bnxt_napi *bnapi = txr->bnapi; in bnxt_sched_reset_txr()
352 if (bnapi->tx_fault) in bnxt_sched_reset_txr()
355 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", in bnxt_sched_reset_txr()
356 txr->txq_index, txr->tx_hw_cons, in bnxt_sched_reset_txr()
357 txr->tx_cons, txr->tx_prod, curr); in bnxt_sched_reset_txr()
359 bnapi->tx_fault = 1; in bnxt_sched_reset_txr()
389 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) in bnxt_xmit_get_cfa_action()
392 return md_dst->u.port_info.port_id; in bnxt_xmit_get_cfa_action()
400 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick()
401 txr->kick_pending = 0; in bnxt_txr_db_kick()
415 struct pci_dev *pdev = bp->pdev; in bnxt_start_xmit()
421 if (unlikely(i >= bp->tx_nr_rings)) { in bnxt_start_xmit()
428 txr = &bp->tx_ring[bp->tx_ring_map[i]]; in bnxt_start_xmit()
429 prod = txr->tx_prod; in bnxt_start_xmit()
432 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { in bnxt_start_xmit()
434 if (net_ratelimit() && txr->kick_pending) in bnxt_start_xmit()
438 bp->tx_wake_thresh)) in bnxt_start_xmit()
445 length = skb->len; in bnxt_start_xmit()
447 last_frag = skb_shinfo(skb)->nr_frags; in bnxt_start_xmit()
449 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
451 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
452 tx_buf->skb = skb; in bnxt_start_xmit()
453 tx_buf->nr_frags = last_frag; in bnxt_start_xmit()
463 if (skb->vlan_proto == htons(ETH_P_8021Q)) in bnxt_start_xmit()
467 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in bnxt_start_xmit()
468 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_start_xmit()
470 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && in bnxt_start_xmit()
471 atomic_dec_if_positive(&ptp->tx_avail) >= 0) { in bnxt_start_xmit()
472 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, in bnxt_start_xmit()
473 &ptp->tx_hdr_off)) { in bnxt_start_xmit()
475 ptp->tx_hdr_off += VLAN_HLEN; in bnxt_start_xmit()
477 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in bnxt_start_xmit()
479 atomic_inc(&bp->ptp_cfg->tx_avail); in bnxt_start_xmit()
484 if (unlikely(skb->no_fcs)) in bnxt_start_xmit()
487 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && in bnxt_start_xmit()
489 struct tx_push_buffer *tx_push_buf = txr->tx_push; in bnxt_start_xmit()
490 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; in bnxt_start_xmit()
491 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; in bnxt_start_xmit()
492 void __iomem *db = txr->tx_db.doorbell; in bnxt_start_xmit()
493 void *pdata = tx_push_buf->data; in bnxt_start_xmit()
498 tx_push->tx_bd_len_flags_type = in bnxt_start_xmit()
506 if (skb->ip_summed == CHECKSUM_PARTIAL) in bnxt_start_xmit()
507 tx_push1->tx_bd_hsize_lflags = in bnxt_start_xmit()
510 tx_push1->tx_bd_hsize_lflags = 0; in bnxt_start_xmit()
512 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); in bnxt_start_xmit()
513 tx_push1->tx_bd_cfa_action = in bnxt_start_xmit()
517 end = PTR_ALIGN(end, 8) - 1; in bnxt_start_xmit()
523 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; in bnxt_start_xmit()
534 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; in bnxt_start_xmit()
535 txbd->tx_bd_haddr = txr->data_mapping; in bnxt_start_xmit()
536 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); in bnxt_start_xmit()
538 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; in bnxt_start_xmit()
539 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
542 tx_push->doorbell = in bnxt_start_xmit()
544 DB_RING_IDX(&txr->tx_db, prod)); in bnxt_start_xmit()
545 WRITE_ONCE(txr->tx_prod, prod); in bnxt_start_xmit()
547 tx_buf->is_push = 1; in bnxt_start_xmit()
548 netdev_tx_sent_queue(txq, skb->len); in bnxt_start_xmit()
555 (push_len - 16) << 1); in bnxt_start_xmit()
565 pad = BNXT_MIN_PKT_SIZE - length; in bnxt_start_xmit()
572 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); in bnxt_start_xmit()
574 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) in bnxt_start_xmit()
581 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_start_xmit()
582 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); in bnxt_start_xmit()
586 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
588 txbd1->tx_bd_hsize_lflags = lflags; in bnxt_start_xmit()
590 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); in bnxt_start_xmit()
593 if (skb->encapsulation) { in bnxt_start_xmit()
606 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | in bnxt_start_xmit()
608 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); in bnxt_start_xmit()
609 length = skb_shinfo(skb)->gso_size; in bnxt_start_xmit()
610 txbd1->tx_bd_mss = cpu_to_le32(length); in bnxt_start_xmit()
612 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in bnxt_start_xmit()
613 txbd1->tx_bd_hsize_lflags |= in bnxt_start_xmit()
615 txbd1->tx_bd_mss = 0; in bnxt_start_xmit()
620 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", in bnxt_start_xmit()
621 skb->len); in bnxt_start_xmit()
626 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_start_xmit()
628 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); in bnxt_start_xmit()
629 txbd1->tx_bd_cfa_action = in bnxt_start_xmit()
633 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in bnxt_start_xmit()
636 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_start_xmit()
639 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, in bnxt_start_xmit()
642 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) in bnxt_start_xmit()
645 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
648 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_start_xmit()
651 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_start_xmit()
655 txbd->tx_bd_len_flags_type = in bnxt_start_xmit()
659 netdev_tx_sent_queue(txq, skb->len); in bnxt_start_xmit()
664 WRITE_ONCE(txr->tx_prod, prod); in bnxt_start_xmit()
669 if (free_size >= bp->tx_wake_thresh) in bnxt_start_xmit()
670 txbd0->tx_bd_len_flags_type |= in bnxt_start_xmit()
672 txr->kick_pending = 1; in bnxt_start_xmit()
678 if (netdev_xmit_more() && !tx_buf->is_push) { in bnxt_start_xmit()
679 txbd0->tx_bd_len_flags_type &= in bnxt_start_xmit()
685 bp->tx_wake_thresh); in bnxt_start_xmit()
691 atomic_inc(&bp->ptp_cfg->tx_avail); in bnxt_start_xmit()
696 prod = txr->tx_prod; in bnxt_start_xmit()
697 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
698 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), in bnxt_start_xmit()
705 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_start_xmit()
706 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), in bnxt_start_xmit()
707 skb_frag_size(&skb_shinfo(skb)->frags[i]), in bnxt_start_xmit()
714 if (txr->kick_pending) in bnxt_start_xmit()
715 bnxt_txr_db_kick(bp, txr, txr->tx_prod); in bnxt_start_xmit()
716 txr->tx_buf_ring[txr->tx_prod].skb = NULL; in bnxt_start_xmit()
724 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); in __bnxt_tx_int()
725 struct pci_dev *pdev = bp->pdev; in __bnxt_tx_int()
726 u16 hw_cons = txr->tx_hw_cons; in __bnxt_tx_int()
728 u16 cons = txr->tx_cons; in __bnxt_tx_int()
736 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; in __bnxt_tx_int()
738 skb = tx_buf->skb; in __bnxt_tx_int()
739 tx_buf->skb = NULL; in __bnxt_tx_int()
747 tx_bytes += skb->len; in __bnxt_tx_int()
749 if (tx_buf->is_push) { in __bnxt_tx_int()
750 tx_buf->is_push = 0; in __bnxt_tx_int()
754 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), in __bnxt_tx_int()
756 last = tx_buf->nr_frags; in __bnxt_tx_int()
760 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; in __bnxt_tx_int()
762 &pdev->dev, in __bnxt_tx_int()
764 skb_frag_size(&skb_shinfo(skb)->frags[j]), in __bnxt_tx_int()
767 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { in __bnxt_tx_int()
773 atomic_inc(&bp->ptp_cfg->tx_avail); in __bnxt_tx_int()
783 WRITE_ONCE(txr->tx_cons, cons); in __bnxt_tx_int()
786 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, in __bnxt_tx_int()
787 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); in __bnxt_tx_int()
796 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) in bnxt_tx_int()
799 bnapi->events &= ~BNXT_TX_CMP_EVENT; in bnxt_tx_int()
810 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, in __bnxt_alloc_rx_page()
813 page = page_pool_dev_alloc_pages(rxr->page_pool); in __bnxt_alloc_rx_page()
827 struct pci_dev *pdev = bp->pdev; in __bnxt_alloc_rx_frag()
830 data = napi_alloc_frag(bp->rx_buf_size); in __bnxt_alloc_rx_frag()
832 data = netdev_alloc_frag(bp->rx_buf_size); in __bnxt_alloc_rx_frag()
836 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, in __bnxt_alloc_rx_frag()
837 bp->rx_buf_use_size, bp->rx_dir, in __bnxt_alloc_rx_frag()
840 if (dma_mapping_error(&pdev->dev, *mapping)) { in __bnxt_alloc_rx_frag()
850 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_alloc_rx_data()
851 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_alloc_rx_data()
860 return -ENOMEM; in bnxt_alloc_rx_data()
862 mapping += bp->rx_dma_offset; in bnxt_alloc_rx_data()
863 rx_buf->data = page; in bnxt_alloc_rx_data()
864 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; in bnxt_alloc_rx_data()
869 return -ENOMEM; in bnxt_alloc_rx_data()
871 rx_buf->data = data; in bnxt_alloc_rx_data()
872 rx_buf->data_ptr = data + bp->rx_offset; in bnxt_alloc_rx_data()
874 rx_buf->mapping = mapping; in bnxt_alloc_rx_data()
876 rxbd->rx_bd_haddr = cpu_to_le64(mapping); in bnxt_alloc_rx_data()
882 u16 prod = rxr->rx_prod; in bnxt_reuse_rx_data()
884 struct bnxt *bp = rxr->bnapi->bp; in bnxt_reuse_rx_data()
887 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_reuse_rx_data()
888 cons_rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_reuse_rx_data()
890 prod_rx_buf->data = data; in bnxt_reuse_rx_data()
891 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; in bnxt_reuse_rx_data()
893 prod_rx_buf->mapping = cons_rx_buf->mapping; in bnxt_reuse_rx_data()
895 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_reuse_rx_data()
896 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; in bnxt_reuse_rx_data()
898 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; in bnxt_reuse_rx_data()
903 u16 next, max = rxr->rx_agg_bmap_size; in bnxt_find_next_agg_idx()
905 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); in bnxt_find_next_agg_idx()
907 next = find_first_zero_bit(rxr->rx_agg_bmap, max); in bnxt_find_next_agg_idx()
916 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; in bnxt_alloc_rx_page()
920 u16 sw_prod = rxr->rx_sw_agg_prod; in bnxt_alloc_rx_page()
926 return -ENOMEM; in bnxt_alloc_rx_page()
928 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) in bnxt_alloc_rx_page()
931 __set_bit(sw_prod, rxr->rx_agg_bmap); in bnxt_alloc_rx_page()
932 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; in bnxt_alloc_rx_page()
933 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); in bnxt_alloc_rx_page()
935 rx_agg_buf->page = page; in bnxt_alloc_rx_page()
936 rx_agg_buf->offset = offset; in bnxt_alloc_rx_page()
937 rx_agg_buf->mapping = mapping; in bnxt_alloc_rx_page()
938 rxbd->rx_bd_haddr = cpu_to_le64(mapping); in bnxt_alloc_rx_page()
939 rxbd->rx_bd_opaque = sw_prod; in bnxt_alloc_rx_page()
951 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_get_agg()
959 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; in bnxt_get_tpa_agg_p5()
961 return &tpa_info->agg_arr[curr]; in bnxt_get_tpa_agg_p5()
967 struct bnxt_napi *bnapi = cpr->bnapi; in bnxt_reuse_rx_agg_bufs()
968 struct bnxt *bp = bnapi->bp; in bnxt_reuse_rx_agg_bufs()
969 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_reuse_rx_agg_bufs()
970 u16 prod = rxr->rx_agg_prod; in bnxt_reuse_rx_agg_bufs()
971 u16 sw_prod = rxr->rx_sw_agg_prod; in bnxt_reuse_rx_agg_bufs()
975 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) in bnxt_reuse_rx_agg_bufs()
989 cons = agg->rx_agg_cmp_opaque; in bnxt_reuse_rx_agg_bufs()
990 __clear_bit(cons, rxr->rx_agg_bmap); in bnxt_reuse_rx_agg_bufs()
992 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) in bnxt_reuse_rx_agg_bufs()
995 __set_bit(sw_prod, rxr->rx_agg_bmap); in bnxt_reuse_rx_agg_bufs()
996 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; in bnxt_reuse_rx_agg_bufs()
997 cons_rx_buf = &rxr->rx_agg_ring[cons]; in bnxt_reuse_rx_agg_bufs()
1000 * set cons_rx_buf->page to NULL first. in bnxt_reuse_rx_agg_bufs()
1002 page = cons_rx_buf->page; in bnxt_reuse_rx_agg_bufs()
1003 cons_rx_buf->page = NULL; in bnxt_reuse_rx_agg_bufs()
1004 prod_rx_buf->page = page; in bnxt_reuse_rx_agg_bufs()
1005 prod_rx_buf->offset = cons_rx_buf->offset; in bnxt_reuse_rx_agg_bufs()
1007 prod_rx_buf->mapping = cons_rx_buf->mapping; in bnxt_reuse_rx_agg_bufs()
1009 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; in bnxt_reuse_rx_agg_bufs()
1011 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); in bnxt_reuse_rx_agg_bufs()
1012 prod_bd->rx_bd_opaque = sw_prod; in bnxt_reuse_rx_agg_bufs()
1017 rxr->rx_agg_prod = prod; in bnxt_reuse_rx_agg_bufs()
1018 rxr->rx_sw_agg_prod = sw_prod; in bnxt_reuse_rx_agg_bufs()
1029 u16 prod = rxr->rx_prod; in bnxt_rx_multi_page_skb()
1038 dma_addr -= bp->rx_dma_offset; in bnxt_rx_multi_page_skb()
1039 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, in bnxt_rx_multi_page_skb()
1040 bp->rx_dir); in bnxt_rx_multi_page_skb()
1041 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); in bnxt_rx_multi_page_skb()
1043 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_multi_page_skb()
1047 skb_reserve(skb, bp->rx_offset); in bnxt_rx_multi_page_skb()
1063 u16 prod = rxr->rx_prod; in bnxt_rx_page_skb()
1072 dma_addr -= bp->rx_dma_offset; in bnxt_rx_page_skb()
1073 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, in bnxt_rx_page_skb()
1074 bp->rx_dir); in bnxt_rx_page_skb()
1077 payload = eth_get_headlen(bp->dev, data_ptr, len); in bnxt_rx_page_skb()
1079 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); in bnxt_rx_page_skb()
1081 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_page_skb()
1086 off = (void *)data_ptr - page_address(page); in bnxt_rx_page_skb()
1088 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, in bnxt_rx_page_skb()
1091 frag = &skb_shinfo(skb)->frags[0]; in bnxt_rx_page_skb()
1094 skb->data_len -= payload; in bnxt_rx_page_skb()
1095 skb->tail += payload; in bnxt_rx_page_skb()
1106 u16 prod = rxr->rx_prod; in bnxt_rx_skb()
1116 skb = napi_build_skb(data, bp->rx_buf_size); in bnxt_rx_skb()
1117 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, in bnxt_rx_skb()
1118 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); in bnxt_rx_skb()
1124 skb_reserve(skb, bp->rx_offset); in bnxt_rx_skb()
1135 struct bnxt_napi *bnapi = cpr->bnapi; in __bnxt_rx_agg_pages()
1136 struct pci_dev *pdev = bp->pdev; in __bnxt_rx_agg_pages()
1137 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in __bnxt_rx_agg_pages()
1138 u16 prod = rxr->rx_agg_prod; in __bnxt_rx_agg_pages()
1142 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) in __bnxt_rx_agg_pages()
1146 skb_frag_t *frag = &shinfo->frags[i]; in __bnxt_rx_agg_pages()
1157 cons = agg->rx_agg_cmp_opaque; in __bnxt_rx_agg_pages()
1158 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & in __bnxt_rx_agg_pages()
1161 cons_rx_buf = &rxr->rx_agg_ring[cons]; in __bnxt_rx_agg_pages()
1162 skb_frag_fill_page_desc(frag, cons_rx_buf->page, in __bnxt_rx_agg_pages()
1163 cons_rx_buf->offset, frag_len); in __bnxt_rx_agg_pages()
1164 shinfo->nr_frags = i + 1; in __bnxt_rx_agg_pages()
1165 __clear_bit(cons, rxr->rx_agg_bmap); in __bnxt_rx_agg_pages()
1171 mapping = cons_rx_buf->mapping; in __bnxt_rx_agg_pages()
1172 page = cons_rx_buf->page; in __bnxt_rx_agg_pages()
1173 cons_rx_buf->page = NULL; in __bnxt_rx_agg_pages()
1179 --shinfo->nr_frags; in __bnxt_rx_agg_pages()
1180 cons_rx_buf->page = page; in __bnxt_rx_agg_pages()
1185 rxr->rx_agg_prod = prod; in __bnxt_rx_agg_pages()
1186 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); in __bnxt_rx_agg_pages()
1190 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, in __bnxt_rx_agg_pages()
1191 bp->rx_dir); in __bnxt_rx_agg_pages()
1196 rxr->rx_agg_prod = prod; in __bnxt_rx_agg_pages()
1216 skb->data_len += total_frag_len; in bnxt_rx_agg_pages_skb()
1217 skb->len += total_frag_len; in bnxt_rx_agg_pages_skb()
1218 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; in bnxt_rx_agg_pages_skb()
1231 shinfo->nr_frags = 0; in bnxt_rx_agg_pages_xdp()
1237 shinfo->nr_frags = agg_bufs; in bnxt_rx_agg_pages_xdp()
1238 shinfo->xdp_frags_size = total_frag_len; in bnxt_rx_agg_pages_xdp()
1252 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; in bnxt_agg_bufs_valid()
1260 struct bnxt *bp = bnapi->bp; in bnxt_copy_skb()
1261 struct pci_dev *pdev = bp->pdev; in bnxt_copy_skb()
1264 skb = napi_alloc_skb(&bnapi->napi, len); in bnxt_copy_skb()
1268 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, in bnxt_copy_skb()
1269 bp->rx_dir); in bnxt_copy_skb()
1271 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, in bnxt_copy_skb()
1274 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, in bnxt_copy_skb()
1275 bp->rx_dir); in bnxt_copy_skb()
1291 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & in bnxt_discard_rx()
1297 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_discard_rx()
1305 return -EBUSY; in bnxt_discard_rx()
1313 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; in bnxt_alloc_agg_idx()
1316 if (test_bit(idx, map->agg_idx_bmap)) in bnxt_alloc_agg_idx()
1317 idx = find_first_zero_bit(map->agg_idx_bmap, in bnxt_alloc_agg_idx()
1319 __set_bit(idx, map->agg_idx_bmap); in bnxt_alloc_agg_idx()
1320 map->agg_id_tbl[agg_id] = idx; in bnxt_alloc_agg_idx()
1326 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; in bnxt_free_agg_idx()
1328 __clear_bit(idx, map->agg_idx_bmap); in bnxt_free_agg_idx()
1333 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; in bnxt_lookup_agg_idx()
1335 return map->agg_id_tbl[agg_id]; in bnxt_lookup_agg_idx()
1342 tpa_info->cfa_code_valid = 1; in bnxt_tpa_metadata()
1343 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); in bnxt_tpa_metadata()
1344 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata()
1345 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { in bnxt_tpa_metadata()
1346 tpa_info->vlan_valid = 1; in bnxt_tpa_metadata()
1347 tpa_info->metadata = in bnxt_tpa_metadata()
1348 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); in bnxt_tpa_metadata()
1356 tpa_info->vlan_valid = 0; in bnxt_tpa_metadata_v2()
1361 tpa_info->vlan_valid = 1; in bnxt_tpa_metadata_v2()
1364 tpa_info->metadata = vlan_proto << 16 | in bnxt_tpa_metadata_v2()
1379 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_tpa_start()
1385 cons = tpa_start->rx_tpa_start_cmp_opaque; in bnxt_tpa_start()
1386 prod = rxr->rx_prod; in bnxt_tpa_start()
1387 cons_rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_tpa_start()
1388 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; in bnxt_tpa_start()
1389 tpa_info = &rxr->rx_tpa[agg_id]; in bnxt_tpa_start()
1391 if (unlikely(cons != rxr->rx_next_cons || in bnxt_tpa_start()
1393 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", in bnxt_tpa_start()
1394 cons, rxr->rx_next_cons, in bnxt_tpa_start()
1399 prod_rx_buf->data = tpa_info->data; in bnxt_tpa_start()
1400 prod_rx_buf->data_ptr = tpa_info->data_ptr; in bnxt_tpa_start()
1402 mapping = tpa_info->mapping; in bnxt_tpa_start()
1403 prod_rx_buf->mapping = mapping; in bnxt_tpa_start()
1405 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; in bnxt_tpa_start()
1407 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); in bnxt_tpa_start()
1409 tpa_info->data = cons_rx_buf->data; in bnxt_tpa_start()
1410 tpa_info->data_ptr = cons_rx_buf->data_ptr; in bnxt_tpa_start()
1411 cons_rx_buf->data = NULL; in bnxt_tpa_start()
1412 tpa_info->mapping = cons_rx_buf->mapping; in bnxt_tpa_start()
1414 tpa_info->len = in bnxt_tpa_start()
1415 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> in bnxt_tpa_start()
1418 tpa_info->hash_type = PKT_HASH_TYPE_L4; in bnxt_tpa_start()
1419 tpa_info->gso_type = SKB_GSO_TCPV4; in bnxt_tpa_start()
1421 tpa_info->gso_type = SKB_GSO_TCPV6; in bnxt_tpa_start()
1422 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ in bnxt_tpa_start()
1425 tpa_info->gso_type = SKB_GSO_TCPV6; in bnxt_tpa_start()
1426 tpa_info->rss_hash = in bnxt_tpa_start()
1427 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); in bnxt_tpa_start()
1429 tpa_info->hash_type = PKT_HASH_TYPE_NONE; in bnxt_tpa_start()
1430 tpa_info->gso_type = 0; in bnxt_tpa_start()
1431 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); in bnxt_tpa_start()
1433 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); in bnxt_tpa_start()
1434 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); in bnxt_tpa_start()
1439 tpa_info->agg_count = 0; in bnxt_tpa_start()
1441 rxr->rx_prod = NEXT_RX(prod); in bnxt_tpa_start()
1443 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); in bnxt_tpa_start()
1444 cons_rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_tpa_start()
1446 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); in bnxt_tpa_start()
1447 rxr->rx_prod = NEXT_RX(rxr->rx_prod); in bnxt_tpa_start()
1448 cons_rx_buf->data = NULL; in bnxt_tpa_start()
1463 struct iphdr *iph = (struct iphdr *)skb->data; in bnxt_gro_tunnel()
1465 if (iph->protocol == IPPROTO_UDP) in bnxt_gro_tunnel()
1468 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; in bnxt_gro_tunnel()
1470 if (iph->nexthdr == IPPROTO_UDP) in bnxt_gro_tunnel()
1474 if (uh->check) in bnxt_gro_tunnel()
1475 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; in bnxt_gro_tunnel()
1477 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; in bnxt_gro_tunnel()
1490 u32 hdr_info = tpa_info->hdr_info; in bnxt_gro_func_5731x()
1503 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - in bnxt_gro_func_5731x()
1504 ETH_HLEN - 2)); in bnxt_gro_func_5731x()
1515 inner_ip_off -= 4; in bnxt_gro_func_5731x()
1516 inner_mac_off -= 4; in bnxt_gro_func_5731x()
1517 outer_ip_off -= 4; in bnxt_gro_func_5731x()
1520 nw_off = inner_ip_off - ETH_HLEN; in bnxt_gro_func_5731x()
1522 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { in bnxt_gro_func_5731x()
1526 len = skb->len - skb_transport_offset(skb); in bnxt_gro_func_5731x()
1528 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5731x()
1533 len = skb->len - skb_transport_offset(skb); in bnxt_gro_func_5731x()
1535 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5731x()
1539 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - in bnxt_gro_func_5731x()
1540 ETH_HLEN - 2)); in bnxt_gro_func_5731x()
1554 u32 hdr_info = tpa_info->hdr_info; in bnxt_gro_func_5750x()
1561 nw_off = inner_ip_off - ETH_HLEN; in bnxt_gro_func_5750x()
1563 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? in bnxt_gro_func_5750x()
1568 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - in bnxt_gro_func_5750x()
1569 ETH_HLEN - 2)); in bnxt_gro_func_5750x()
1591 if (tpa_info->gso_type == SKB_GSO_TCPV4) { in bnxt_gro_func_5730x()
1594 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - in bnxt_gro_func_5730x()
1599 len = skb->len - skb_transport_offset(skb); in bnxt_gro_func_5730x()
1601 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); in bnxt_gro_func_5730x()
1602 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { in bnxt_gro_func_5730x()
1605 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - in bnxt_gro_func_5730x()
1610 len = skb->len - skb_transport_offset(skb); in bnxt_gro_func_5730x()
1612 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); in bnxt_gro_func_5730x()
1619 bnxt_gro_tunnel(skb, skb->protocol); in bnxt_gro_func_5730x()
1638 NAPI_GRO_CB(skb)->count = segs; in bnxt_gro_skb()
1639 skb_shinfo(skb)->gso_size = in bnxt_gro_skb()
1640 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); in bnxt_gro_skb()
1641 skb_shinfo(skb)->gso_type = tpa_info->gso_type; in bnxt_gro_skb()
1642 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_gro_skb()
1646 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); in bnxt_gro_skb()
1654 * netdev (vf-rep or PF) the packet is destined to.
1660 /* if vf-rep dev is NULL, the must belongs to the PF */ in bnxt_get_pkt_dev()
1661 return dev ? dev : bp->dev; in bnxt_get_pkt_dev()
1671 struct bnxt_napi *bnapi = cpr->bnapi; in bnxt_tpa_end()
1672 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tpa_end()
1673 struct net_device *dev = bp->dev; in bnxt_tpa_end()
1683 if (unlikely(bnapi->in_reset)) { in bnxt_tpa_end()
1687 return ERR_PTR(-EBUSY); in bnxt_tpa_end()
1691 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_tpa_end()
1695 tpa_info = &rxr->rx_tpa[agg_id]; in bnxt_tpa_end()
1696 if (unlikely(agg_bufs != tpa_info->agg_count)) { in bnxt_tpa_end()
1697 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", in bnxt_tpa_end()
1698 agg_bufs, tpa_info->agg_count); in bnxt_tpa_end()
1699 agg_bufs = tpa_info->agg_count; in bnxt_tpa_end()
1701 tpa_info->agg_count = 0; in bnxt_tpa_end()
1705 gro = !!(bp->flags & BNXT_FLAG_GRO); in bnxt_tpa_end()
1709 tpa_info = &rxr->rx_tpa[agg_id]; in bnxt_tpa_end()
1713 return ERR_PTR(-EBUSY); in bnxt_tpa_end()
1720 data = tpa_info->data; in bnxt_tpa_end()
1721 data_ptr = tpa_info->data_ptr; in bnxt_tpa_end()
1723 len = tpa_info->len; in bnxt_tpa_end()
1724 mapping = tpa_info->mapping; in bnxt_tpa_end()
1729 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", in bnxt_tpa_end()
1734 if (len <= bp->rx_copy_thresh) { in bnxt_tpa_end()
1738 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_tpa_end()
1748 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_tpa_end()
1752 tpa_info->data = new_data; in bnxt_tpa_end()
1753 tpa_info->data_ptr = new_data + bp->rx_offset; in bnxt_tpa_end()
1754 tpa_info->mapping = new_mapping; in bnxt_tpa_end()
1756 skb = napi_build_skb(data, bp->rx_buf_size); in bnxt_tpa_end()
1757 dma_unmap_single_attrs(&bp->pdev->dev, mapping, in bnxt_tpa_end()
1758 bp->rx_buf_use_size, bp->rx_dir, in bnxt_tpa_end()
1764 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_tpa_end()
1767 skb_reserve(skb, bp->rx_offset); in bnxt_tpa_end()
1775 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_tpa_end()
1780 if (tpa_info->cfa_code_valid) in bnxt_tpa_end()
1781 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); in bnxt_tpa_end()
1782 skb->protocol = eth_type_trans(skb, dev); in bnxt_tpa_end()
1784 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) in bnxt_tpa_end()
1785 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); in bnxt_tpa_end()
1787 if (tpa_info->vlan_valid && in bnxt_tpa_end()
1788 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { in bnxt_tpa_end()
1789 __be16 vlan_proto = htons(tpa_info->metadata >> in bnxt_tpa_end()
1791 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; in bnxt_tpa_end()
1802 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { in bnxt_tpa_end()
1803 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnxt_tpa_end()
1804 skb->csum_level = in bnxt_tpa_end()
1805 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; in bnxt_tpa_end()
1821 tpa_info = &rxr->rx_tpa[agg_id]; in bnxt_tpa_agg()
1822 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); in bnxt_tpa_agg()
1823 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; in bnxt_tpa_agg()
1831 if (skb->dev != bp->dev) { in bnxt_deliver_skb()
1832 /* this packet belongs to a vf-rep */ in bnxt_deliver_skb()
1836 skb_record_rx_queue(skb, bnapi->index); in bnxt_deliver_skb()
1837 napi_gro_receive(&bnapi->napi, skb); in bnxt_deliver_skb()
1843 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); in bnxt_rx_ts_valid()
1847 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) in bnxt_rx_ts_valid()
1863 __le32 flags2 = rxcmp1->rx_cmp_flags2; in bnxt_rx_vlan()
1869 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); in bnxt_rx_vlan()
1914 * 1 - 1 packet successfully received
1915 * 0 - successful TPA_START, packet not completed yet
1916 * -EBUSY - completion ring does not have all the agg buffers yet
1917 * -ENOMEM - packet aborted due to out of memory
1918 * -EIO - packet aborted due to hw error indicated in BD
1923 struct bnxt_napi *bnapi = cpr->bnapi; in bnxt_rx_pkt()
1924 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_rx_pkt()
1925 struct net_device *dev = bp->dev; in bnxt_rx_pkt()
1943 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_rx_pkt()
1955 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_rx_pkt()
1958 return -EBUSY; in bnxt_rx_pkt()
1964 prod = rxr->rx_prod; in bnxt_rx_pkt()
1981 return -EBUSY; in bnxt_rx_pkt()
1983 rc = -ENOMEM; in bnxt_rx_pkt()
1992 cons = rxcmp->rx_cmp_opaque; in bnxt_rx_pkt()
1993 if (unlikely(cons != rxr->rx_next_cons)) { in bnxt_rx_pkt()
1997 if (rxr->rx_next_cons != 0xffff) in bnxt_rx_pkt()
1998 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", in bnxt_rx_pkt()
1999 cons, rxr->rx_next_cons); in bnxt_rx_pkt()
2005 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_pkt()
2006 data = rx_buf->data; in bnxt_rx_pkt()
2007 data_ptr = rx_buf->data_ptr; in bnxt_rx_pkt()
2010 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); in bnxt_rx_pkt()
2015 return -EBUSY; in bnxt_rx_pkt()
2022 rx_buf->data = NULL; in bnxt_rx_pkt()
2023 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { in bnxt_rx_pkt()
2024 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); in bnxt_rx_pkt()
2031 rc = -EIO; in bnxt_rx_pkt()
2033 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; in bnxt_rx_pkt()
2034 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_rx_pkt()
2035 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { in bnxt_rx_pkt()
2036 netdev_warn_once(bp->dev, "RX buffer error %x\n", in bnxt_rx_pkt()
2044 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); in bnxt_rx_pkt()
2046 dma_addr = rx_buf->mapping; in bnxt_rx_pkt()
2055 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_rx_pkt()
2056 rc = -ENOMEM; in bnxt_rx_pkt()
2070 if (len <= bp->rx_copy_thresh) { in bnxt_rx_pkt()
2081 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_rx_pkt()
2082 rc = -ENOMEM; in bnxt_rx_pkt()
2088 if (rx_buf->data_ptr == data_ptr) in bnxt_rx_pkt()
2092 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, in bnxt_rx_pkt()
2095 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_rx_pkt()
2096 rc = -ENOMEM; in bnxt_rx_pkt()
2105 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_rx_pkt()
2106 rc = -ENOMEM; in bnxt_rx_pkt()
2110 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); in bnxt_rx_pkt()
2114 cpr->sw_stats.rx.rx_oom_discards += 1; in bnxt_rx_pkt()
2115 rc = -ENOMEM; in bnxt_rx_pkt()
2130 * 4-tuple in bnxt_rx_pkt()
2137 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); in bnxt_rx_pkt()
2142 skb->protocol = eth_type_trans(skb, dev); in bnxt_rx_pkt()
2144 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { in bnxt_rx_pkt()
2152 if (dev->features & NETIF_F_RXCSUM) { in bnxt_rx_pkt()
2153 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnxt_rx_pkt()
2154 skb->csum_level = RX_CMP_ENCAP(rxcmp1); in bnxt_rx_pkt()
2157 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { in bnxt_rx_pkt()
2158 if (dev->features & NETIF_F_RXCSUM) in bnxt_rx_pkt()
2159 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; in bnxt_rx_pkt()
2164 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_rx_pkt()
2168 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_rx_pkt()
2170 spin_lock_bh(&ptp->ptp_lock); in bnxt_rx_pkt()
2171 ns = timecounter_cyc2time(&ptp->tc, ts); in bnxt_rx_pkt()
2172 spin_unlock_bh(&ptp->ptp_lock); in bnxt_rx_pkt()
2175 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); in bnxt_rx_pkt()
2183 cpr->rx_packets += 1; in bnxt_rx_pkt()
2184 cpr->rx_bytes += len; in bnxt_rx_pkt()
2187 rxr->rx_prod = NEXT_RX(prod); in bnxt_rx_pkt()
2188 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); in bnxt_rx_pkt()
2212 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_force_rx_discard()
2217 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_force_rx_discard()
2220 return -EBUSY; in bnxt_force_rx_discard()
2229 rxcmp1->rx_cmp_cfa_code_errors_v2 |= in bnxt_force_rx_discard()
2235 tpa_end1->rx_tpa_end_cmp_errors_v2 |= in bnxt_force_rx_discard()
2239 if (rc && rc != -EBUSY) in bnxt_force_rx_discard()
2240 cpr->sw_stats.rx.rx_netpoll_discards += 1; in bnxt_force_rx_discard()
2246 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_health_readl()
2247 u32 reg = fw_health->regs[reg_idx]; in bnxt_fw_health_readl()
2254 pci_read_config_dword(bp->pdev, reg_off, &val); in bnxt_fw_health_readl()
2257 reg_off = fw_health->mapped_regs[reg_idx]; in bnxt_fw_health_readl()
2260 val = readl(bp->bar0 + reg_off); in bnxt_fw_health_readl()
2263 val = readl(bp->bar1 + reg_off); in bnxt_fw_health_readl()
2267 val &= fw_health->fw_reset_inprog_reg_mask; in bnxt_fw_health_readl()
2275 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_agg_ring_id_to_grp_idx()
2276 u16 grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_agg_ring_id_to_grp_idx()
2279 grp_info = &bp->grp_info[grp_idx]; in bnxt_agg_ring_id_to_grp_idx()
2280 if (grp_info->agg_fw_ring_id == ring_id) in bnxt_agg_ring_id_to_grp_idx()
2290 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) in bnxt_get_force_speed()
2291 return link_info->force_link_speed2; in bnxt_get_force_speed()
2292 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) in bnxt_get_force_speed()
2293 return link_info->force_pam4_link_speed; in bnxt_get_force_speed()
2294 return link_info->force_link_speed; in bnxt_get_force_speed()
2301 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_set_force_speed()
2302 link_info->req_link_speed = link_info->force_link_speed2; in bnxt_set_force_speed()
2303 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; in bnxt_set_force_speed()
2304 switch (link_info->req_link_speed) { in bnxt_set_force_speed()
2309 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; in bnxt_set_force_speed()
2314 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; in bnxt_set_force_speed()
2317 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; in bnxt_set_force_speed()
2321 link_info->req_link_speed = link_info->force_link_speed; in bnxt_set_force_speed()
2322 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; in bnxt_set_force_speed()
2323 if (link_info->force_pam4_link_speed) { in bnxt_set_force_speed()
2324 link_info->req_link_speed = link_info->force_pam4_link_speed; in bnxt_set_force_speed()
2325 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; in bnxt_set_force_speed()
2333 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_set_auto_speed()
2334 link_info->advertising = link_info->auto_link_speeds2; in bnxt_set_auto_speed()
2337 link_info->advertising = link_info->auto_link_speeds; in bnxt_set_auto_speed()
2338 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; in bnxt_set_auto_speed()
2345 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_force_speed_updated()
2346 if (link_info->req_link_speed != link_info->force_link_speed2) in bnxt_force_speed_updated()
2350 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && in bnxt_force_speed_updated()
2351 link_info->req_link_speed != link_info->force_link_speed) in bnxt_force_speed_updated()
2353 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && in bnxt_force_speed_updated()
2354 link_info->req_link_speed != link_info->force_pam4_link_speed) in bnxt_force_speed_updated()
2363 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_auto_speed_updated()
2364 if (link_info->advertising != link_info->auto_link_speeds2) in bnxt_auto_speed_updated()
2368 if (link_info->advertising != link_info->auto_link_speeds || in bnxt_auto_speed_updated()
2369 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) in bnxt_auto_speed_updated()
2399 …netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix … in bnxt_event_error_report()
2403 netdev_warn(bp->dev, "Pause Storm detected!\n"); in bnxt_event_error_report()
2406 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); in bnxt_event_error_report()
2428 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); in bnxt_event_error_report()
2437 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", in bnxt_event_error_report()
2439 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", in bnxt_event_error_report()
2443 bp->thermal_threshold_type = type; in bnxt_event_error_report()
2444 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); in bnxt_event_error_report()
2450 netdev_err(bp->dev, "FW reported unknown error type %u\n", in bnxt_event_error_report()
2482 u16 event_id = le16_to_cpu(cmpl->event_id); in bnxt_async_event_process()
2483 u32 data1 = le32_to_cpu(cmpl->event_data1); in bnxt_async_event_process()
2484 u32 data2 = le32_to_cpu(cmpl->event_data2); in bnxt_async_event_process()
2486 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", in bnxt_async_event_process()
2492 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_async_event_process()
2498 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && in bnxt_async_event_process()
2504 netdev_warn(bp->dev, "Link speed %d no longer supported\n", in bnxt_async_event_process()
2507 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2512 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2515 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2518 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2526 if (bp->pf.port_id != port_id) in bnxt_async_event_process()
2529 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2535 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2540 if (!bp->fw_health) in bnxt_async_event_process()
2543 bp->fw_reset_timestamp = jiffies; in bnxt_async_event_process()
2544 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; in bnxt_async_event_process()
2545 if (!bp->fw_reset_min_dsecs) in bnxt_async_event_process()
2546 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; in bnxt_async_event_process()
2547 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); in bnxt_async_event_process()
2548 if (!bp->fw_reset_max_dsecs) in bnxt_async_event_process()
2549 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; in bnxt_async_event_process()
2551 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); in bnxt_async_event_process()
2554 bp->fw_health->fatalities++; in bnxt_async_event_process()
2555 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_async_event_process()
2558 type_str = "Non-fatal"; in bnxt_async_event_process()
2559 bp->fw_health->survivals++; in bnxt_async_event_process()
2560 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); in bnxt_async_event_process()
2562 netif_warn(bp, hw, bp->dev, in bnxt_async_event_process()
2565 bp->fw_reset_min_dsecs * 100, in bnxt_async_event_process()
2566 bp->fw_reset_max_dsecs * 100); in bnxt_async_event_process()
2567 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2571 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_async_event_process()
2579 fw_health->enabled = false; in bnxt_async_event_process()
2580 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); in bnxt_async_event_process()
2583 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); in bnxt_async_event_process()
2584 fw_health->tmr_multiplier = in bnxt_async_event_process()
2585 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, in bnxt_async_event_process()
2586 bp->current_interval * 10); in bnxt_async_event_process()
2587 fw_health->tmr_counter = fw_health->tmr_multiplier; in bnxt_async_event_process()
2588 if (!fw_health->enabled) in bnxt_async_event_process()
2589 fw_health->last_fw_heartbeat = in bnxt_async_event_process()
2591 fw_health->last_fw_reset_cnt = in bnxt_async_event_process()
2596 netif_info(bp, drv, bp->dev, in bnxt_async_event_process()
2598 fw_health->primary ? "primary" : "backup", status, in bnxt_async_event_process()
2599 status_desc, fw_health->last_fw_reset_cnt); in bnxt_async_event_process()
2600 if (!fw_health->enabled) { in bnxt_async_event_process()
2605 fw_health->enabled = true; in bnxt_async_event_process()
2610 netif_notice(bp, hw, bp->dev, in bnxt_async_event_process()
2618 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_async_event_process()
2621 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", in bnxt_async_event_process()
2628 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", in bnxt_async_event_process()
2632 rxr = bp->bnapi[grp_idx]->rx_ring; in bnxt_async_event_process()
2637 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_async_event_process()
2639 netif_notice(bp, hw, bp->dev, in bnxt_async_event_process()
2643 fw_health->echo_req_data1 = data1; in bnxt_async_event_process()
2644 fw_health->echo_req_data2 = data2; in bnxt_async_event_process()
2645 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
2663 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_async_event_process()
2669 spin_lock_bh(&ptp->ptp_lock); in bnxt_async_event_process()
2672 BNXT_PHC_BITS) | ptp->current_time); in bnxt_async_event_process()
2674 spin_unlock_bh(&ptp->ptp_lock); in bnxt_async_event_process()
2681 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; in bnxt_async_event_process()
2703 seq_id = le16_to_cpu(h_cmpl->sequence_id); in bnxt_hwrm_handler()
2708 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); in bnxt_hwrm_handler()
2710 if ((vf_id < bp->pf.first_vf_id) || in bnxt_hwrm_handler()
2711 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { in bnxt_hwrm_handler()
2712 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", in bnxt_hwrm_handler()
2714 return -EINVAL; in bnxt_hwrm_handler()
2717 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); in bnxt_hwrm_handler()
2736 struct bnxt *bp = bnapi->bp; in bnxt_msix()
2737 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_msix()
2738 u32 cons = RING_CMP(cpr->cp_raw_cons); in bnxt_msix()
2740 cpr->event_ctr++; in bnxt_msix()
2741 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); in bnxt_msix()
2742 napi_schedule(&bnapi->napi); in bnxt_msix()
2748 u32 raw_cons = cpr->cp_raw_cons; in bnxt_has_work()
2752 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; in bnxt_has_work()
2760 struct bnxt *bp = bnapi->bp; in bnxt_inta()
2761 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_inta()
2762 u32 cons = RING_CMP(cpr->cp_raw_cons); in bnxt_inta()
2765 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); in bnxt_inta()
2768 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); in bnxt_inta()
2770 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) in bnxt_inta()
2775 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); in bnxt_inta()
2778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) in bnxt_inta()
2781 napi_schedule(&bnapi->napi); in bnxt_inta()
2788 struct bnxt_napi *bnapi = cpr->bnapi; in __bnxt_poll_work()
2789 u32 raw_cons = cpr->cp_raw_cons; in __bnxt_poll_work()
2795 cpr->has_more_work = 0; in __bnxt_poll_work()
2796 cpr->had_work_done = 1; in __bnxt_poll_work()
2802 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; in __bnxt_poll_work()
2814 u32 opaque = txcmp->tx_cmp_opaque; in __bnxt_poll_work()
2818 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; in __bnxt_poll_work()
2821 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); in __bnxt_poll_work()
2823 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); in __bnxt_poll_work()
2824 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & in __bnxt_poll_work()
2825 bp->tx_ring_mask; in __bnxt_poll_work()
2827 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { in __bnxt_poll_work()
2831 cpr->has_more_work = 1; in __bnxt_poll_work()
2843 /* Increment rx_pkts when rc is -ENOMEM to count towards in __bnxt_poll_work()
2848 else if (rc == -ENOMEM && budget) in __bnxt_poll_work()
2850 else if (rc == -EBUSY) /* partial completion */ in __bnxt_poll_work()
2860 cpr->has_more_work = 1; in __bnxt_poll_work()
2869 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; in __bnxt_poll_work()
2870 u16 prod = txr->tx_prod; in __bnxt_poll_work()
2875 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); in __bnxt_poll_work()
2878 cpr->cp_raw_cons = raw_cons; in __bnxt_poll_work()
2879 bnapi->events |= event; in __bnxt_poll_work()
2886 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) in __bnxt_poll_work_done()
2887 bnapi->tx_int(bp, bnapi, budget); in __bnxt_poll_work_done()
2889 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { in __bnxt_poll_work_done()
2890 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in __bnxt_poll_work_done()
2892 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in __bnxt_poll_work_done()
2894 if (bnapi->events & BNXT_AGG_EVENT) { in __bnxt_poll_work_done()
2895 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in __bnxt_poll_work_done()
2897 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in __bnxt_poll_work_done()
2899 bnapi->events &= BNXT_TX_CMP_EVENT; in __bnxt_poll_work_done()
2905 struct bnxt_napi *bnapi = cpr->bnapi; in bnxt_poll_work()
2910 /* ACK completion ring before freeing tx ring and producing new in bnxt_poll_work()
2914 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll_work()
2923 struct bnxt *bp = bnapi->bp; in bnxt_poll_nitroa0()
2924 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_poll_nitroa0()
2925 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_poll_nitroa0()
2929 u32 raw_cons = cpr->cp_raw_cons; in bnxt_poll_nitroa0()
2938 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_poll_nitroa0()
2951 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; in bnxt_poll_nitroa0()
2957 rxcmp1->rx_cmp_cfa_code_errors_v2 |= in bnxt_poll_nitroa0()
2961 if (likely(rc == -EIO) && budget) in bnxt_poll_nitroa0()
2963 else if (rc == -EBUSY) /* partial completion */ in bnxt_poll_nitroa0()
2971 netdev_err(bp->dev, in bnxt_poll_nitroa0()
2980 cpr->cp_raw_cons = raw_cons; in bnxt_poll_nitroa0()
2981 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll_nitroa0()
2982 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_poll_nitroa0()
2985 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_poll_nitroa0()
2991 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll_nitroa0()
2999 struct bnxt *bp = bnapi->bp; in bnxt_poll()
3000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_poll()
3003 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { in bnxt_poll()
3008 work_done += bnxt_poll_work(bp, cpr, budget - work_done); in bnxt_poll()
3012 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll()
3018 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); in bnxt_poll()
3022 if (bp->flags & BNXT_FLAG_DIM) { in bnxt_poll()
3025 dim_update_sample(cpr->event_ctr, in bnxt_poll()
3026 cpr->rx_packets, in bnxt_poll()
3027 cpr->rx_bytes, in bnxt_poll()
3029 net_dim(&cpr->dim, dim_sample); in bnxt_poll()
3036 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in __bnxt_poll_cqs()
3039 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs()
3040 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; in __bnxt_poll_cqs()
3042 if (cpr2->had_nqe_notify) { in __bnxt_poll_cqs()
3044 budget - work_done); in __bnxt_poll_cqs()
3045 cpr->has_more_work |= cpr2->has_more_work; in __bnxt_poll_cqs()
3054 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in __bnxt_poll_cqs_done()
3057 for (i = 0; i < cpr->cp_ring_count; i++) { in __bnxt_poll_cqs_done()
3058 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; in __bnxt_poll_cqs_done()
3061 if (cpr2->had_work_done) { in __bnxt_poll_cqs_done()
3065 cpr2->had_nqe_notify = 0; in __bnxt_poll_cqs_done()
3066 tgl = cpr2->toggle; in __bnxt_poll_cqs_done()
3068 db = &cpr2->cp_db; in __bnxt_poll_cqs_done()
3070 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | in __bnxt_poll_cqs_done()
3071 DB_RING_IDX(db, cpr2->cp_raw_cons), in __bnxt_poll_cqs_done()
3072 db->doorbell); in __bnxt_poll_cqs_done()
3073 cpr2->had_work_done = 0; in __bnxt_poll_cqs_done()
3082 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_poll_p5()
3084 u32 raw_cons = cpr->cp_raw_cons; in bnxt_poll_p5()
3085 struct bnxt *bp = bnapi->bp; in bnxt_poll_p5()
3090 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { in bnxt_poll_p5()
3094 if (cpr->has_more_work) { in bnxt_poll_p5()
3095 cpr->has_more_work = 0; in bnxt_poll_p5()
3102 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; in bnxt_poll_p5()
3105 if (cpr->has_more_work) in bnxt_poll_p5()
3110 cpr->cp_raw_cons = raw_cons; in bnxt_poll_p5()
3112 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, in bnxt_poll_p5()
3113 cpr->cp_raw_cons); in bnxt_poll_p5()
3122 type = le16_to_cpu(nqcmp->type); in bnxt_poll_p5()
3124 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); in bnxt_poll_p5()
3134 cpr2 = &cpr->cp_ring_arr[idx]; in bnxt_poll_p5()
3135 cpr2->had_nqe_notify = 1; in bnxt_poll_p5()
3136 cpr2->toggle = NQE_CN_TOGGLE(type); in bnxt_poll_p5()
3138 budget - work_done); in bnxt_poll_p5()
3139 cpr->has_more_work |= cpr2->has_more_work; in bnxt_poll_p5()
3146 if (raw_cons != cpr->cp_raw_cons) { in bnxt_poll_p5()
3147 cpr->cp_raw_cons = raw_cons; in bnxt_poll_p5()
3148 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); in bnxt_poll_p5()
3151 cpr_rx = &cpr->cp_ring_arr[0]; in bnxt_poll_p5()
3152 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && in bnxt_poll_p5()
3153 (bp->flags & BNXT_FLAG_DIM)) { in bnxt_poll_p5()
3156 dim_update_sample(cpr->event_ctr, in bnxt_poll_p5()
3157 cpr_rx->rx_packets, in bnxt_poll_p5()
3158 cpr_rx->rx_bytes, in bnxt_poll_p5()
3160 net_dim(&cpr->dim, dim_sample); in bnxt_poll_p5()
3168 struct pci_dev *pdev = bp->pdev; in bnxt_free_tx_skbs()
3170 if (!bp->tx_ring) in bnxt_free_tx_skbs()
3173 max_idx = bp->tx_nr_pages * TX_DESC_CNT; in bnxt_free_tx_skbs()
3174 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_skbs()
3175 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_free_tx_skbs()
3178 if (!txr->tx_buf_ring) in bnxt_free_tx_skbs()
3182 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; in bnxt_free_tx_skbs()
3186 if (i < bp->tx_nr_rings_xdp && in bnxt_free_tx_skbs()
3187 tx_buf->action == XDP_REDIRECT) { in bnxt_free_tx_skbs()
3188 dma_unmap_single(&pdev->dev, in bnxt_free_tx_skbs()
3192 xdp_return_frame(tx_buf->xdpf); in bnxt_free_tx_skbs()
3193 tx_buf->action = 0; in bnxt_free_tx_skbs()
3194 tx_buf->xdpf = NULL; in bnxt_free_tx_skbs()
3199 skb = tx_buf->skb; in bnxt_free_tx_skbs()
3205 tx_buf->skb = NULL; in bnxt_free_tx_skbs()
3207 if (tx_buf->is_push) { in bnxt_free_tx_skbs()
3213 dma_unmap_single(&pdev->dev, in bnxt_free_tx_skbs()
3218 last = tx_buf->nr_frags; in bnxt_free_tx_skbs()
3221 int ring_idx = j & bp->tx_ring_mask; in bnxt_free_tx_skbs()
3222 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in bnxt_free_tx_skbs()
3224 tx_buf = &txr->tx_buf_ring[ring_idx]; in bnxt_free_tx_skbs()
3226 &pdev->dev, in bnxt_free_tx_skbs()
3232 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); in bnxt_free_tx_skbs()
3238 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; in bnxt_free_one_rx_ring_skbs()
3239 struct pci_dev *pdev = bp->pdev; in bnxt_free_one_rx_ring_skbs()
3243 max_idx = bp->rx_nr_pages * RX_DESC_CNT; in bnxt_free_one_rx_ring_skbs()
3244 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; in bnxt_free_one_rx_ring_skbs()
3245 if (!rxr->rx_tpa) in bnxt_free_one_rx_ring_skbs()
3248 for (i = 0; i < bp->max_tpa; i++) { in bnxt_free_one_rx_ring_skbs()
3249 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; in bnxt_free_one_rx_ring_skbs()
3250 u8 *data = tpa_info->data; in bnxt_free_one_rx_ring_skbs()
3255 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, in bnxt_free_one_rx_ring_skbs()
3256 bp->rx_buf_use_size, bp->rx_dir, in bnxt_free_one_rx_ring_skbs()
3259 tpa_info->data = NULL; in bnxt_free_one_rx_ring_skbs()
3265 if (!rxr->rx_buf_ring) in bnxt_free_one_rx_ring_skbs()
3269 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; in bnxt_free_one_rx_ring_skbs()
3270 dma_addr_t mapping = rx_buf->mapping; in bnxt_free_one_rx_ring_skbs()
3271 void *data = rx_buf->data; in bnxt_free_one_rx_ring_skbs()
3276 rx_buf->data = NULL; in bnxt_free_one_rx_ring_skbs()
3278 page_pool_recycle_direct(rxr->page_pool, data); in bnxt_free_one_rx_ring_skbs()
3280 dma_unmap_single_attrs(&pdev->dev, mapping, in bnxt_free_one_rx_ring_skbs()
3281 bp->rx_buf_use_size, bp->rx_dir, in bnxt_free_one_rx_ring_skbs()
3288 if (!rxr->rx_agg_ring) in bnxt_free_one_rx_ring_skbs()
3292 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; in bnxt_free_one_rx_ring_skbs()
3293 struct page *page = rx_agg_buf->page; in bnxt_free_one_rx_ring_skbs()
3298 rx_agg_buf->page = NULL; in bnxt_free_one_rx_ring_skbs()
3299 __clear_bit(i, rxr->rx_agg_bmap); in bnxt_free_one_rx_ring_skbs()
3301 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_free_one_rx_ring_skbs()
3305 map = rxr->rx_tpa_idx_map; in bnxt_free_one_rx_ring_skbs()
3307 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); in bnxt_free_one_rx_ring_skbs()
3314 if (!bp->rx_ring) in bnxt_free_rx_skbs()
3317 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_free_rx_skbs()
3329 u8 init_val = ctxm->init_value; in bnxt_init_ctx_mem()
3330 u16 offset = ctxm->init_offset; in bnxt_init_ctx_mem()
3340 for (i = 0; i < len; i += ctxm->entry_size) in bnxt_init_ctx_mem()
3346 struct pci_dev *pdev = bp->pdev; in bnxt_free_ring()
3349 if (!rmem->pg_arr) in bnxt_free_ring()
3352 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_free_ring()
3353 if (!rmem->pg_arr[i]) in bnxt_free_ring()
3356 dma_free_coherent(&pdev->dev, rmem->page_size, in bnxt_free_ring()
3357 rmem->pg_arr[i], rmem->dma_arr[i]); in bnxt_free_ring()
3359 rmem->pg_arr[i] = NULL; in bnxt_free_ring()
3362 if (rmem->pg_tbl) { in bnxt_free_ring()
3363 size_t pg_tbl_size = rmem->nr_pages * 8; in bnxt_free_ring()
3365 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) in bnxt_free_ring()
3366 pg_tbl_size = rmem->page_size; in bnxt_free_ring()
3367 dma_free_coherent(&pdev->dev, pg_tbl_size, in bnxt_free_ring()
3368 rmem->pg_tbl, rmem->pg_tbl_map); in bnxt_free_ring()
3369 rmem->pg_tbl = NULL; in bnxt_free_ring()
3371 if (rmem->vmem_size && *rmem->vmem) { in bnxt_free_ring()
3372 vfree(*rmem->vmem); in bnxt_free_ring()
3373 *rmem->vmem = NULL; in bnxt_free_ring()
3379 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_ring()
3383 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) in bnxt_alloc_ring()
3385 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { in bnxt_alloc_ring()
3386 size_t pg_tbl_size = rmem->nr_pages * 8; in bnxt_alloc_ring()
3388 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) in bnxt_alloc_ring()
3389 pg_tbl_size = rmem->page_size; in bnxt_alloc_ring()
3390 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, in bnxt_alloc_ring()
3391 &rmem->pg_tbl_map, in bnxt_alloc_ring()
3393 if (!rmem->pg_tbl) in bnxt_alloc_ring()
3394 return -ENOMEM; in bnxt_alloc_ring()
3397 for (i = 0; i < rmem->nr_pages; i++) { in bnxt_alloc_ring()
3400 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, in bnxt_alloc_ring()
3401 rmem->page_size, in bnxt_alloc_ring()
3402 &rmem->dma_arr[i], in bnxt_alloc_ring()
3404 if (!rmem->pg_arr[i]) in bnxt_alloc_ring()
3405 return -ENOMEM; in bnxt_alloc_ring()
3407 if (rmem->ctx_mem) in bnxt_alloc_ring()
3408 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], in bnxt_alloc_ring()
3409 rmem->page_size); in bnxt_alloc_ring()
3410 if (rmem->nr_pages > 1 || rmem->depth > 0) { in bnxt_alloc_ring()
3411 if (i == rmem->nr_pages - 2 && in bnxt_alloc_ring()
3412 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) in bnxt_alloc_ring()
3414 else if (i == rmem->nr_pages - 1 && in bnxt_alloc_ring()
3415 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) in bnxt_alloc_ring()
3417 rmem->pg_tbl[i] = in bnxt_alloc_ring()
3418 cpu_to_le64(rmem->dma_arr[i] | extra_bits); in bnxt_alloc_ring()
3422 if (rmem->vmem_size) { in bnxt_alloc_ring()
3423 *rmem->vmem = vzalloc(rmem->vmem_size); in bnxt_alloc_ring()
3424 if (!(*rmem->vmem)) in bnxt_alloc_ring()
3425 return -ENOMEM; in bnxt_alloc_ring()
3434 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_tpa_info()
3435 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_free_tpa_info()
3437 kfree(rxr->rx_tpa_idx_map); in bnxt_free_tpa_info()
3438 rxr->rx_tpa_idx_map = NULL; in bnxt_free_tpa_info()
3439 if (rxr->rx_tpa) { in bnxt_free_tpa_info()
3440 for (j = 0; j < bp->max_tpa; j++) { in bnxt_free_tpa_info()
3441 kfree(rxr->rx_tpa[j].agg_arr); in bnxt_free_tpa_info()
3442 rxr->rx_tpa[j].agg_arr = NULL; in bnxt_free_tpa_info()
3445 kfree(rxr->rx_tpa); in bnxt_free_tpa_info()
3446 rxr->rx_tpa = NULL; in bnxt_free_tpa_info()
3454 bp->max_tpa = MAX_TPA; in bnxt_alloc_tpa_info()
3455 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_tpa_info()
3456 if (!bp->max_tpa_v2) in bnxt_alloc_tpa_info()
3458 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); in bnxt_alloc_tpa_info()
3461 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_tpa_info()
3462 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_tpa_info()
3465 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), in bnxt_alloc_tpa_info()
3467 if (!rxr->rx_tpa) in bnxt_alloc_tpa_info()
3468 return -ENOMEM; in bnxt_alloc_tpa_info()
3470 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_tpa_info()
3472 for (j = 0; j < bp->max_tpa; j++) { in bnxt_alloc_tpa_info()
3475 return -ENOMEM; in bnxt_alloc_tpa_info()
3476 rxr->rx_tpa[j].agg_arr = agg; in bnxt_alloc_tpa_info()
3478 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), in bnxt_alloc_tpa_info()
3480 if (!rxr->rx_tpa_idx_map) in bnxt_alloc_tpa_info()
3481 return -ENOMEM; in bnxt_alloc_tpa_info()
3490 if (!bp->rx_ring) in bnxt_free_rx_rings()
3494 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_rings()
3495 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_free_rx_rings()
3498 if (rxr->xdp_prog) in bnxt_free_rx_rings()
3499 bpf_prog_put(rxr->xdp_prog); in bnxt_free_rx_rings()
3501 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) in bnxt_free_rx_rings()
3502 xdp_rxq_info_unreg(&rxr->xdp_rxq); in bnxt_free_rx_rings()
3504 page_pool_destroy(rxr->page_pool); in bnxt_free_rx_rings()
3505 rxr->page_pool = NULL; in bnxt_free_rx_rings()
3507 kfree(rxr->rx_agg_bmap); in bnxt_free_rx_rings()
3508 rxr->rx_agg_bmap = NULL; in bnxt_free_rx_rings()
3510 ring = &rxr->rx_ring_struct; in bnxt_free_rx_rings()
3511 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_rx_rings()
3513 ring = &rxr->rx_agg_ring_struct; in bnxt_free_rx_rings()
3514 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_rx_rings()
3523 pp.pool_size = bp->rx_agg_ring_size; in bnxt_alloc_rx_page_pool()
3525 pp.pool_size += bp->rx_ring_size; in bnxt_alloc_rx_page_pool()
3526 pp.nid = dev_to_node(&bp->pdev->dev); in bnxt_alloc_rx_page_pool()
3527 pp.napi = &rxr->bnapi->napi; in bnxt_alloc_rx_page_pool()
3528 pp.netdev = bp->dev; in bnxt_alloc_rx_page_pool()
3529 pp.dev = &bp->pdev->dev; in bnxt_alloc_rx_page_pool()
3530 pp.dma_dir = bp->rx_dir; in bnxt_alloc_rx_page_pool()
3534 rxr->page_pool = page_pool_create(&pp); in bnxt_alloc_rx_page_pool()
3535 if (IS_ERR(rxr->page_pool)) { in bnxt_alloc_rx_page_pool()
3536 int err = PTR_ERR(rxr->page_pool); in bnxt_alloc_rx_page_pool()
3538 rxr->page_pool = NULL; in bnxt_alloc_rx_page_pool()
3548 if (!bp->rx_ring) in bnxt_alloc_rx_rings()
3549 return -ENOMEM; in bnxt_alloc_rx_rings()
3551 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_alloc_rx_rings()
3554 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rx_rings()
3555 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_rx_rings()
3558 ring = &rxr->rx_ring_struct; in bnxt_alloc_rx_rings()
3564 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); in bnxt_alloc_rx_rings()
3568 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, in bnxt_alloc_rx_rings()
3570 rxr->page_pool); in bnxt_alloc_rx_rings()
3572 xdp_rxq_info_unreg(&rxr->xdp_rxq); in bnxt_alloc_rx_rings()
3576 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_rx_rings()
3580 ring->grp_idx = i; in bnxt_alloc_rx_rings()
3584 ring = &rxr->rx_agg_ring_struct; in bnxt_alloc_rx_rings()
3585 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_rx_rings()
3589 ring->grp_idx = i; in bnxt_alloc_rx_rings()
3590 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; in bnxt_alloc_rx_rings()
3591 mem_size = rxr->rx_agg_bmap_size / 8; in bnxt_alloc_rx_rings()
3592 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); in bnxt_alloc_rx_rings()
3593 if (!rxr->rx_agg_bmap) in bnxt_alloc_rx_rings()
3594 return -ENOMEM; in bnxt_alloc_rx_rings()
3597 if (bp->flags & BNXT_FLAG_TPA) in bnxt_alloc_rx_rings()
3605 struct pci_dev *pdev = bp->pdev; in bnxt_free_tx_rings()
3607 if (!bp->tx_ring) in bnxt_free_tx_rings()
3610 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_rings()
3611 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_free_tx_rings()
3614 if (txr->tx_push) { in bnxt_free_tx_rings()
3615 dma_free_coherent(&pdev->dev, bp->tx_push_size, in bnxt_free_tx_rings()
3616 txr->tx_push, txr->tx_push_mapping); in bnxt_free_tx_rings()
3617 txr->tx_push = NULL; in bnxt_free_tx_rings()
3620 ring = &txr->tx_ring_struct; in bnxt_free_tx_rings()
3622 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_tx_rings()
3627 ((tc) * (bp)->tx_nr_rings_per_tc)
3629 #define BNXT_RING_TO_TC_OFF(bp, tx) \ argument
3630 ((tx) % (bp)->tx_nr_rings_per_tc)
3632 #define BNXT_RING_TO_TC(bp, tx) \ argument
3633 ((tx) / (bp)->tx_nr_rings_per_tc)
3638 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_tx_rings()
3640 bp->tx_push_size = 0; in bnxt_alloc_tx_rings()
3641 if (bp->tx_push_thresh) { in bnxt_alloc_tx_rings()
3645 bp->tx_push_thresh); in bnxt_alloc_tx_rings()
3649 bp->tx_push_thresh = 0; in bnxt_alloc_tx_rings()
3652 bp->tx_push_size = push_size; in bnxt_alloc_tx_rings()
3655 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_tx_rings()
3656 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_alloc_tx_rings()
3660 ring = &txr->tx_ring_struct; in bnxt_alloc_tx_rings()
3662 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_tx_rings()
3666 ring->grp_idx = txr->bnapi->index; in bnxt_alloc_tx_rings()
3667 if (bp->tx_push_size) { in bnxt_alloc_tx_rings()
3670 /* One pre-allocated DMA buffer to backup in bnxt_alloc_tx_rings()
3671 * TX push operation in bnxt_alloc_tx_rings()
3673 txr->tx_push = dma_alloc_coherent(&pdev->dev, in bnxt_alloc_tx_rings()
3674 bp->tx_push_size, in bnxt_alloc_tx_rings()
3675 &txr->tx_push_mapping, in bnxt_alloc_tx_rings()
3678 if (!txr->tx_push) in bnxt_alloc_tx_rings()
3679 return -ENOMEM; in bnxt_alloc_tx_rings()
3681 mapping = txr->tx_push_mapping + in bnxt_alloc_tx_rings()
3683 txr->data_mapping = cpu_to_le64(mapping); in bnxt_alloc_tx_rings()
3685 qidx = bp->tc_to_qidx[j]; in bnxt_alloc_tx_rings()
3686 ring->queue_id = bp->q_info[qidx].queue_id; in bnxt_alloc_tx_rings()
3687 spin_lock_init(&txr->xdp_tx_lock); in bnxt_alloc_tx_rings()
3688 if (i < bp->tx_nr_rings_xdp) in bnxt_alloc_tx_rings()
3690 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) in bnxt_alloc_tx_rings()
3698 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; in bnxt_free_cp_arrays()
3700 kfree(cpr->cp_desc_ring); in bnxt_free_cp_arrays()
3701 cpr->cp_desc_ring = NULL; in bnxt_free_cp_arrays()
3702 ring->ring_mem.pg_arr = NULL; in bnxt_free_cp_arrays()
3703 kfree(cpr->cp_desc_mapping); in bnxt_free_cp_arrays()
3704 cpr->cp_desc_mapping = NULL; in bnxt_free_cp_arrays()
3705 ring->ring_mem.dma_arr = NULL; in bnxt_free_cp_arrays()
3710 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); in bnxt_alloc_cp_arrays()
3711 if (!cpr->cp_desc_ring) in bnxt_alloc_cp_arrays()
3712 return -ENOMEM; in bnxt_alloc_cp_arrays()
3713 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), in bnxt_alloc_cp_arrays()
3715 if (!cpr->cp_desc_mapping) in bnxt_alloc_cp_arrays()
3716 return -ENOMEM; in bnxt_alloc_cp_arrays()
3724 if (!bp->bnapi) in bnxt_free_all_cp_arrays()
3726 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_all_cp_arrays()
3727 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_all_cp_arrays()
3731 bnxt_free_cp_arrays(&bnapi->cp_ring); in bnxt_free_all_cp_arrays()
3737 int i, n = bp->cp_nr_pages; in bnxt_alloc_all_cp_arrays()
3739 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_all_cp_arrays()
3740 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_all_cp_arrays()
3745 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); in bnxt_alloc_all_cp_arrays()
3756 if (!bp->bnapi) in bnxt_free_cp_rings()
3759 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_cp_rings()
3760 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_cp_rings()
3768 cpr = &bnapi->cp_ring; in bnxt_free_cp_rings()
3769 ring = &cpr->cp_ring_struct; in bnxt_free_cp_rings()
3771 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_cp_rings()
3773 if (!cpr->cp_ring_arr) in bnxt_free_cp_rings()
3776 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_free_cp_rings()
3777 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; in bnxt_free_cp_rings()
3779 ring = &cpr2->cp_ring_struct; in bnxt_free_cp_rings()
3780 bnxt_free_ring(bp, &ring->ring_mem); in bnxt_free_cp_rings()
3783 kfree(cpr->cp_ring_arr); in bnxt_free_cp_rings()
3784 cpr->cp_ring_arr = NULL; in bnxt_free_cp_rings()
3785 cpr->cp_ring_count = 0; in bnxt_free_cp_rings()
3796 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); in bnxt_alloc_cp_sub_ring()
3799 return -ENOMEM; in bnxt_alloc_cp_sub_ring()
3801 ring = &cpr->cp_ring_struct; in bnxt_alloc_cp_sub_ring()
3802 rmem = &ring->ring_mem; in bnxt_alloc_cp_sub_ring()
3803 rmem->nr_pages = bp->cp_nr_pages; in bnxt_alloc_cp_sub_ring()
3804 rmem->page_size = HW_CMPD_RING_SIZE; in bnxt_alloc_cp_sub_ring()
3805 rmem->pg_arr = (void **)cpr->cp_desc_ring; in bnxt_alloc_cp_sub_ring()
3806 rmem->dma_arr = cpr->cp_desc_mapping; in bnxt_alloc_cp_sub_ring()
3807 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; in bnxt_alloc_cp_sub_ring()
3818 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); in bnxt_alloc_cp_rings()
3820 int tcs = bp->num_tc; in bnxt_alloc_cp_rings()
3826 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_cp_rings()
3827 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_cp_rings()
3831 int rx = 0, tx = 0; in bnxt_alloc_cp_rings() local
3836 cpr = &bnapi->cp_ring; in bnxt_alloc_cp_rings()
3837 cpr->bnapi = bnapi; in bnxt_alloc_cp_rings()
3838 ring = &cpr->cp_ring_struct; in bnxt_alloc_cp_rings()
3840 rc = bnxt_alloc_ring(bp, &ring->ring_mem); in bnxt_alloc_cp_rings()
3845 ring->map_idx = i + ulp_msix; in bnxt_alloc_cp_rings()
3847 ring->map_idx = i; in bnxt_alloc_cp_rings()
3849 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_cp_rings()
3852 if (i < bp->rx_nr_rings) { in bnxt_alloc_cp_rings()
3856 if (i < bp->tx_nr_rings_xdp) { in bnxt_alloc_cp_rings()
3858 tx = 1; in bnxt_alloc_cp_rings()
3859 } else if ((sh && i < bp->tx_nr_rings) || in bnxt_alloc_cp_rings()
3860 (!sh && i >= bp->rx_nr_rings)) { in bnxt_alloc_cp_rings()
3862 tx = 1; in bnxt_alloc_cp_rings()
3865 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), in bnxt_alloc_cp_rings()
3867 if (!cpr->cp_ring_arr) in bnxt_alloc_cp_rings()
3868 return -ENOMEM; in bnxt_alloc_cp_rings()
3869 cpr->cp_ring_count = cp_count; in bnxt_alloc_cp_rings()
3872 cpr2 = &cpr->cp_ring_arr[k]; in bnxt_alloc_cp_rings()
3876 cpr2->bnapi = bnapi; in bnxt_alloc_cp_rings()
3877 cpr2->cp_idx = k; in bnxt_alloc_cp_rings()
3879 bp->rx_ring[i].rx_cpr = cpr2; in bnxt_alloc_cp_rings()
3880 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; in bnxt_alloc_cp_rings()
3882 int n, tc = k - rx; in bnxt_alloc_cp_rings()
3885 bp->tx_ring[n].tx_cpr = cpr2; in bnxt_alloc_cp_rings()
3886 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; in bnxt_alloc_cp_rings()
3889 if (tx) in bnxt_alloc_cp_rings()
3899 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_struct()
3900 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_init_ring_struct()
3910 cpr = &bnapi->cp_ring; in bnxt_init_ring_struct()
3911 ring = &cpr->cp_ring_struct; in bnxt_init_ring_struct()
3912 rmem = &ring->ring_mem; in bnxt_init_ring_struct()
3913 rmem->nr_pages = bp->cp_nr_pages; in bnxt_init_ring_struct()
3914 rmem->page_size = HW_CMPD_RING_SIZE; in bnxt_init_ring_struct()
3915 rmem->pg_arr = (void **)cpr->cp_desc_ring; in bnxt_init_ring_struct()
3916 rmem->dma_arr = cpr->cp_desc_mapping; in bnxt_init_ring_struct()
3917 rmem->vmem_size = 0; in bnxt_init_ring_struct()
3919 rxr = bnapi->rx_ring; in bnxt_init_ring_struct()
3923 ring = &rxr->rx_ring_struct; in bnxt_init_ring_struct()
3924 rmem = &ring->ring_mem; in bnxt_init_ring_struct()
3925 rmem->nr_pages = bp->rx_nr_pages; in bnxt_init_ring_struct()
3926 rmem->page_size = HW_RXBD_RING_SIZE; in bnxt_init_ring_struct()
3927 rmem->pg_arr = (void **)rxr->rx_desc_ring; in bnxt_init_ring_struct()
3928 rmem->dma_arr = rxr->rx_desc_mapping; in bnxt_init_ring_struct()
3929 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; in bnxt_init_ring_struct()
3930 rmem->vmem = (void **)&rxr->rx_buf_ring; in bnxt_init_ring_struct()
3932 ring = &rxr->rx_agg_ring_struct; in bnxt_init_ring_struct()
3933 rmem = &ring->ring_mem; in bnxt_init_ring_struct()
3934 rmem->nr_pages = bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
3935 rmem->page_size = HW_RXBD_RING_SIZE; in bnxt_init_ring_struct()
3936 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; in bnxt_init_ring_struct()
3937 rmem->dma_arr = rxr->rx_agg_desc_mapping; in bnxt_init_ring_struct()
3938 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
3939 rmem->vmem = (void **)&rxr->rx_agg_ring; in bnxt_init_ring_struct()
3943 ring = &txr->tx_ring_struct; in bnxt_init_ring_struct()
3944 rmem = &ring->ring_mem; in bnxt_init_ring_struct()
3945 rmem->nr_pages = bp->tx_nr_pages; in bnxt_init_ring_struct()
3946 rmem->page_size = HW_TXBD_RING_SIZE; in bnxt_init_ring_struct()
3947 rmem->pg_arr = (void **)txr->tx_desc_ring; in bnxt_init_ring_struct()
3948 rmem->dma_arr = txr->tx_desc_mapping; in bnxt_init_ring_struct()
3949 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; in bnxt_init_ring_struct()
3950 rmem->vmem = (void **)&txr->tx_buf_ring; in bnxt_init_ring_struct()
3961 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; in bnxt_init_rxbd_pages()
3962 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { in bnxt_init_rxbd_pages()
3971 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); in bnxt_init_rxbd_pages()
3972 rxbd->rx_bd_opaque = prod; in bnxt_init_rxbd_pages()
3979 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; in bnxt_alloc_one_rx_ring()
3980 struct net_device *dev = bp->dev; in bnxt_alloc_one_rx_ring()
3984 prod = rxr->rx_prod; in bnxt_alloc_one_rx_ring()
3985 for (i = 0; i < bp->rx_ring_size; i++) { in bnxt_alloc_one_rx_ring()
3988 ring_nr, i, bp->rx_ring_size); in bnxt_alloc_one_rx_ring()
3993 rxr->rx_prod = prod; in bnxt_alloc_one_rx_ring()
3995 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) in bnxt_alloc_one_rx_ring()
3998 prod = rxr->rx_agg_prod; in bnxt_alloc_one_rx_ring()
3999 for (i = 0; i < bp->rx_agg_ring_size; i++) { in bnxt_alloc_one_rx_ring()
4002 ring_nr, i, bp->rx_ring_size); in bnxt_alloc_one_rx_ring()
4007 rxr->rx_agg_prod = prod; in bnxt_alloc_one_rx_ring()
4009 if (rxr->rx_tpa) { in bnxt_alloc_one_rx_ring()
4013 for (i = 0; i < bp->max_tpa; i++) { in bnxt_alloc_one_rx_ring()
4016 return -ENOMEM; in bnxt_alloc_one_rx_ring()
4018 rxr->rx_tpa[i].data = data; in bnxt_alloc_one_rx_ring()
4019 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; in bnxt_alloc_one_rx_ring()
4020 rxr->rx_tpa[i].mapping = mapping; in bnxt_alloc_one_rx_ring()
4032 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | in bnxt_init_one_rx_ring()
4038 rxr = &bp->rx_ring[ring_nr]; in bnxt_init_one_rx_ring()
4039 ring = &rxr->rx_ring_struct; in bnxt_init_one_rx_ring()
4042 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, in bnxt_init_one_rx_ring()
4043 &rxr->bnapi->napi); in bnxt_init_one_rx_ring()
4045 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { in bnxt_init_one_rx_ring()
4046 bpf_prog_add(bp->xdp_prog, 1); in bnxt_init_one_rx_ring()
4047 rxr->xdp_prog = bp->xdp_prog; in bnxt_init_one_rx_ring()
4049 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_one_rx_ring()
4051 ring = &rxr->rx_agg_ring_struct; in bnxt_init_one_rx_ring()
4052 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_one_rx_ring()
4054 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { in bnxt_init_one_rx_ring()
4068 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_cp_rings()
4069 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; in bnxt_init_cp_rings()
4070 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; in bnxt_init_cp_rings()
4072 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_cp_rings()
4073 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; in bnxt_init_cp_rings()
4074 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; in bnxt_init_cp_rings()
4075 if (!cpr->cp_ring_arr) in bnxt_init_cp_rings()
4077 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_init_cp_rings()
4078 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; in bnxt_init_cp_rings()
4080 ring = &cpr2->cp_ring_struct; in bnxt_init_cp_rings()
4081 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_cp_rings()
4082 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; in bnxt_init_cp_rings()
4083 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; in bnxt_init_cp_rings()
4093 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; in bnxt_init_rx_rings()
4094 bp->rx_dma_offset = XDP_PACKET_HEADROOM; in bnxt_init_rx_rings()
4096 bp->rx_offset = BNXT_RX_OFFSET; in bnxt_init_rx_rings()
4097 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; in bnxt_init_rx_rings()
4100 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_init_rx_rings()
4113 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, in bnxt_init_tx_rings()
4116 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_init_tx_rings()
4117 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_init_tx_rings()
4118 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; in bnxt_init_tx_rings()
4120 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_tx_rings()
4122 if (i >= bp->tx_nr_rings_xdp) in bnxt_init_tx_rings()
4123 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, in bnxt_init_tx_rings()
4125 &txr->bnapi->napi); in bnxt_init_tx_rings()
4133 kfree(bp->grp_info); in bnxt_free_ring_grps()
4134 bp->grp_info = NULL; in bnxt_free_ring_grps()
4142 bp->grp_info = kcalloc(bp->cp_nr_rings, in bnxt_init_ring_grps()
4145 if (!bp->grp_info) in bnxt_init_ring_grps()
4146 return -ENOMEM; in bnxt_init_ring_grps()
4148 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_grps()
4150 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4151 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4152 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4153 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4154 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
4161 kfree(bp->vnic_info); in bnxt_free_vnics()
4162 bp->vnic_info = NULL; in bnxt_free_vnics()
4163 bp->nr_vnics = 0; in bnxt_free_vnics()
4171 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) in bnxt_alloc_vnics()
4172 num_vnics += bp->rx_nr_rings; in bnxt_alloc_vnics()
4178 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), in bnxt_alloc_vnics()
4180 if (!bp->vnic_info) in bnxt_alloc_vnics()
4181 return -ENOMEM; in bnxt_alloc_vnics()
4183 bp->nr_vnics = num_vnics; in bnxt_alloc_vnics()
4191 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_init_vnics()
4192 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_init_vnics()
4195 vnic->fw_vnic_id = INVALID_HW_RING_ID; in bnxt_init_vnics()
4197 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; in bnxt_init_vnics()
4199 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; in bnxt_init_vnics()
4201 if (bp->vnic_info[i].rss_hash_key) { in bnxt_init_vnics()
4203 u8 *key = (void *)vnic->rss_hash_key; in bnxt_init_vnics()
4206 bp->toeplitz_prefix = 0; in bnxt_init_vnics()
4207 get_random_bytes(vnic->rss_hash_key, in bnxt_init_vnics()
4210 bp->toeplitz_prefix <<= 8; in bnxt_init_vnics()
4211 bp->toeplitz_prefix |= key[k]; in bnxt_init_vnics()
4214 memcpy(vnic->rss_hash_key, in bnxt_init_vnics()
4215 bp->vnic_info[0].rss_hash_key, in bnxt_init_vnics()
4233 while (pages & (pages - 1)) in bnxt_calc_nr_ring_pages()
4241 bp->flags &= ~BNXT_FLAG_TPA; in bnxt_set_tpa_flags()
4242 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) in bnxt_set_tpa_flags()
4244 if (bp->dev->features & NETIF_F_LRO) in bnxt_set_tpa_flags()
4245 bp->flags |= BNXT_FLAG_LRO; in bnxt_set_tpa_flags()
4246 else if (bp->dev->features & NETIF_F_GRO_HW) in bnxt_set_tpa_flags()
4247 bp->flags |= BNXT_FLAG_GRO; in bnxt_set_tpa_flags()
4250 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4259 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); in bnxt_set_ring_params()
4264 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; in bnxt_set_ring_params()
4265 ring_size = bp->rx_ring_size; in bnxt_set_ring_params()
4266 bp->rx_agg_ring_size = 0; in bnxt_set_ring_params()
4267 bp->rx_agg_nr_pages = 0; in bnxt_set_ring_params()
4269 if (bp->flags & BNXT_FLAG_TPA) in bnxt_set_ring_params()
4272 bp->flags &= ~BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
4273 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { in bnxt_set_ring_params()
4276 bp->flags |= BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
4277 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; in bnxt_set_ring_params()
4284 … netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", in bnxt_set_ring_params()
4285 bp->rx_ring_size, ring_size); in bnxt_set_ring_params()
4286 bp->rx_ring_size = ring_size; in bnxt_set_ring_params()
4290 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, in bnxt_set_ring_params()
4292 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { in bnxt_set_ring_params()
4295 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; in bnxt_set_ring_params()
4296 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; in bnxt_set_ring_params()
4297 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", in bnxt_set_ring_params()
4300 bp->rx_agg_ring_size = agg_ring_size; in bnxt_set_ring_params()
4301 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
4305 rx_size = PAGE_SIZE - in bnxt_set_ring_params()
4306 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - in bnxt_set_ring_params()
4315 bp->rx_buf_use_size = rx_size; in bnxt_set_ring_params()
4316 bp->rx_buf_size = rx_space; in bnxt_set_ring_params()
4318 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); in bnxt_set_ring_params()
4319 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
4321 ring_size = bp->tx_ring_size; in bnxt_set_ring_params()
4322 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); in bnxt_set_ring_params()
4323 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; in bnxt_set_ring_params()
4325 max_rx_cmpl = bp->rx_ring_size; in bnxt_set_ring_params()
4330 if (bp->flags & BNXT_FLAG_TPA) in bnxt_set_ring_params()
4331 max_rx_cmpl += bp->max_tpa; in bnxt_set_ring_params()
4332 /* RX and TPA completions are 32-byte, all others are 16-byte */ in bnxt_set_ring_params()
4333 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; in bnxt_set_ring_params()
4334 bp->cp_ring_size = ring_size; in bnxt_set_ring_params()
4336 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); in bnxt_set_ring_params()
4337 if (bp->cp_nr_pages > MAX_CP_PAGES) { in bnxt_set_ring_params()
4338 bp->cp_nr_pages = MAX_CP_PAGES; in bnxt_set_ring_params()
4339 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; in bnxt_set_ring_params()
4340 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", in bnxt_set_ring_params()
4341 ring_size, bp->cp_ring_size); in bnxt_set_ring_params()
4343 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; in bnxt_set_ring_params()
4344 bp->cp_ring_mask = bp->cp_bit - 1; in bnxt_set_ring_params()
4352 struct net_device *dev = bp->dev; in bnxt_set_rx_skb_mode()
4355 bp->flags &= ~BNXT_FLAG_AGG_RINGS; in bnxt_set_rx_skb_mode()
4356 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; in bnxt_set_rx_skb_mode()
4358 if (bp->xdp_prog->aux->xdp_has_frags) in bnxt_set_rx_skb_mode()
4359 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); in bnxt_set_rx_skb_mode()
4361 dev->max_mtu = in bnxt_set_rx_skb_mode()
4362 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_set_rx_skb_mode()
4363 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_set_rx_skb_mode()
4364 bp->flags |= BNXT_FLAG_JUMBO; in bnxt_set_rx_skb_mode()
4365 bp->rx_skb_func = bnxt_rx_multi_page_skb; in bnxt_set_rx_skb_mode()
4367 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in bnxt_set_rx_skb_mode()
4368 bp->rx_skb_func = bnxt_rx_page_skb; in bnxt_set_rx_skb_mode()
4370 bp->rx_dir = DMA_BIDIRECTIONAL; in bnxt_set_rx_skb_mode()
4374 dev->max_mtu = bp->max_mtu; in bnxt_set_rx_skb_mode()
4375 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; in bnxt_set_rx_skb_mode()
4376 bp->rx_dir = DMA_FROM_DEVICE; in bnxt_set_rx_skb_mode()
4377 bp->rx_skb_func = bnxt_rx_skb; in bnxt_set_rx_skb_mode()
4386 struct pci_dev *pdev = bp->pdev; in bnxt_free_vnic_attributes()
4388 if (!bp->vnic_info) in bnxt_free_vnic_attributes()
4391 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_free_vnic_attributes()
4392 vnic = &bp->vnic_info[i]; in bnxt_free_vnic_attributes()
4394 kfree(vnic->fw_grp_ids); in bnxt_free_vnic_attributes()
4395 vnic->fw_grp_ids = NULL; in bnxt_free_vnic_attributes()
4397 kfree(vnic->uc_list); in bnxt_free_vnic_attributes()
4398 vnic->uc_list = NULL; in bnxt_free_vnic_attributes()
4400 if (vnic->mc_list) { in bnxt_free_vnic_attributes()
4401 dma_free_coherent(&pdev->dev, vnic->mc_list_size, in bnxt_free_vnic_attributes()
4402 vnic->mc_list, vnic->mc_list_mapping); in bnxt_free_vnic_attributes()
4403 vnic->mc_list = NULL; in bnxt_free_vnic_attributes()
4406 if (vnic->rss_table) { in bnxt_free_vnic_attributes()
4407 dma_free_coherent(&pdev->dev, vnic->rss_table_size, in bnxt_free_vnic_attributes()
4408 vnic->rss_table, in bnxt_free_vnic_attributes()
4409 vnic->rss_table_dma_addr); in bnxt_free_vnic_attributes()
4410 vnic->rss_table = NULL; in bnxt_free_vnic_attributes()
4413 vnic->rss_hash_key = NULL; in bnxt_free_vnic_attributes()
4414 vnic->flags = 0; in bnxt_free_vnic_attributes()
4422 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_vnic_attributes()
4425 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_alloc_vnic_attributes()
4426 vnic = &bp->vnic_info[i]; in bnxt_alloc_vnic_attributes()
4428 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { in bnxt_alloc_vnic_attributes()
4429 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; in bnxt_alloc_vnic_attributes()
4432 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); in bnxt_alloc_vnic_attributes()
4433 if (!vnic->uc_list) { in bnxt_alloc_vnic_attributes()
4434 rc = -ENOMEM; in bnxt_alloc_vnic_attributes()
4440 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { in bnxt_alloc_vnic_attributes()
4441 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; in bnxt_alloc_vnic_attributes()
4442 vnic->mc_list = in bnxt_alloc_vnic_attributes()
4443 dma_alloc_coherent(&pdev->dev, in bnxt_alloc_vnic_attributes()
4444 vnic->mc_list_size, in bnxt_alloc_vnic_attributes()
4445 &vnic->mc_list_mapping, in bnxt_alloc_vnic_attributes()
4447 if (!vnic->mc_list) { in bnxt_alloc_vnic_attributes()
4448 rc = -ENOMEM; in bnxt_alloc_vnic_attributes()
4453 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_vnic_attributes()
4456 if (vnic->flags & BNXT_VNIC_RSS_FLAG) in bnxt_alloc_vnic_attributes()
4457 max_rings = bp->rx_nr_rings; in bnxt_alloc_vnic_attributes()
4461 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); in bnxt_alloc_vnic_attributes()
4462 if (!vnic->fw_grp_ids) { in bnxt_alloc_vnic_attributes()
4463 rc = -ENOMEM; in bnxt_alloc_vnic_attributes()
4467 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && in bnxt_alloc_vnic_attributes()
4468 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) in bnxt_alloc_vnic_attributes()
4473 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_vnic_attributes()
4476 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; in bnxt_alloc_vnic_attributes()
4477 vnic->rss_table = dma_alloc_coherent(&pdev->dev, in bnxt_alloc_vnic_attributes()
4478 vnic->rss_table_size, in bnxt_alloc_vnic_attributes()
4479 &vnic->rss_table_dma_addr, in bnxt_alloc_vnic_attributes()
4481 if (!vnic->rss_table) { in bnxt_alloc_vnic_attributes()
4482 rc = -ENOMEM; in bnxt_alloc_vnic_attributes()
4486 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; in bnxt_alloc_vnic_attributes()
4487 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; in bnxt_alloc_vnic_attributes()
4499 dma_pool_destroy(bp->hwrm_dma_pool); in bnxt_free_hwrm_resources()
4500 bp->hwrm_dma_pool = NULL; in bnxt_free_hwrm_resources()
4503 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) in bnxt_free_hwrm_resources()
4504 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); in bnxt_free_hwrm_resources()
4510 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, in bnxt_alloc_hwrm_resources()
4513 if (!bp->hwrm_dma_pool) in bnxt_alloc_hwrm_resources()
4514 return -ENOMEM; in bnxt_alloc_hwrm_resources()
4516 INIT_HLIST_HEAD(&bp->hwrm_pending_list); in bnxt_alloc_hwrm_resources()
4523 kfree(stats->hw_masks); in bnxt_free_stats_mem()
4524 stats->hw_masks = NULL; in bnxt_free_stats_mem()
4525 kfree(stats->sw_stats); in bnxt_free_stats_mem()
4526 stats->sw_stats = NULL; in bnxt_free_stats_mem()
4527 if (stats->hw_stats) { in bnxt_free_stats_mem()
4528 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, in bnxt_free_stats_mem()
4529 stats->hw_stats_map); in bnxt_free_stats_mem()
4530 stats->hw_stats = NULL; in bnxt_free_stats_mem()
4537 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, in bnxt_alloc_stats_mem()
4538 &stats->hw_stats_map, GFP_KERNEL); in bnxt_alloc_stats_mem()
4539 if (!stats->hw_stats) in bnxt_alloc_stats_mem()
4540 return -ENOMEM; in bnxt_alloc_stats_mem()
4542 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); in bnxt_alloc_stats_mem()
4543 if (!stats->sw_stats) in bnxt_alloc_stats_mem()
4547 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); in bnxt_alloc_stats_mem()
4548 if (!stats->hw_masks) in bnxt_alloc_stats_mem()
4555 return -ENOMEM; in bnxt_alloc_stats_mem()
4582 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || in bnxt_hwrm_func_qstat_ext()
4583 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_func_qstat_ext()
4584 return -EOPNOTSUPP; in bnxt_hwrm_func_qstat_ext()
4590 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qstat_ext()
4591 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; in bnxt_hwrm_func_qstat_ext()
4596 hw_masks = &resp->rx_ucast_pkts; in bnxt_hwrm_func_qstat_ext()
4597 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); in bnxt_hwrm_func_qstat_ext()
4608 struct bnxt_napi *bnapi = bp->bnapi[0]; in bnxt_init_stats()
4617 cpr = &bnapi->cp_ring; in bnxt_init_stats()
4618 stats = &cpr->stats; in bnxt_init_stats()
4621 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_init_stats()
4622 mask = (1ULL << 48) - 1; in bnxt_init_stats()
4624 mask = -1ULL; in bnxt_init_stats()
4625 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); in bnxt_init_stats()
4627 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_init_stats()
4628 stats = &bp->port_stats; in bnxt_init_stats()
4629 rx_stats = stats->hw_stats; in bnxt_init_stats()
4630 rx_masks = stats->hw_masks; in bnxt_init_stats()
4639 mask = (1ULL << 40) - 1; in bnxt_init_stats()
4649 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { in bnxt_init_stats()
4650 stats = &bp->rx_port_stats_ext; in bnxt_init_stats()
4651 rx_stats = stats->hw_stats; in bnxt_init_stats()
4652 rx_masks = stats->hw_masks; in bnxt_init_stats()
4654 stats = &bp->tx_port_stats_ext; in bnxt_init_stats()
4655 tx_stats = stats->hw_stats; in bnxt_init_stats()
4656 tx_masks = stats->hw_masks; in bnxt_init_stats()
4662 mask = (1ULL << 40) - 1; in bnxt_init_stats()
4679 bp->flags &= ~BNXT_FLAG_PORT_STATS; in bnxt_free_port_stats()
4680 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; in bnxt_free_port_stats()
4682 bnxt_free_stats_mem(bp, &bp->port_stats); in bnxt_free_port_stats()
4683 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); in bnxt_free_port_stats()
4684 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); in bnxt_free_port_stats()
4691 if (!bp->bnapi) in bnxt_free_ring_stats()
4694 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_ring_stats()
4695 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_ring_stats()
4696 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_free_ring_stats()
4698 bnxt_free_stats_mem(bp, &cpr->stats); in bnxt_free_ring_stats()
4707 size = bp->hw_ring_stats_size; in bnxt_alloc_stats()
4709 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_stats()
4710 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_stats()
4711 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_alloc_stats()
4713 cpr->stats.len = size; in bnxt_alloc_stats()
4714 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); in bnxt_alloc_stats()
4718 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; in bnxt_alloc_stats()
4721 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) in bnxt_alloc_stats()
4724 if (bp->port_stats.hw_stats) in bnxt_alloc_stats()
4727 bp->port_stats.len = BNXT_PORT_STATS_SIZE; in bnxt_alloc_stats()
4728 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); in bnxt_alloc_stats()
4732 bp->flags |= BNXT_FLAG_PORT_STATS; in bnxt_alloc_stats()
4736 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) in bnxt_alloc_stats()
4737 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) in bnxt_alloc_stats()
4740 if (bp->rx_port_stats_ext.hw_stats) in bnxt_alloc_stats()
4743 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); in bnxt_alloc_stats()
4744 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); in bnxt_alloc_stats()
4750 if (bp->tx_port_stats_ext.hw_stats) in bnxt_alloc_stats()
4753 if (bp->hwrm_spec_code >= 0x10902 || in bnxt_alloc_stats()
4754 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { in bnxt_alloc_stats()
4755 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); in bnxt_alloc_stats()
4756 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); in bnxt_alloc_stats()
4761 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; in bnxt_alloc_stats()
4769 if (!bp->bnapi) in bnxt_clear_ring_indices()
4772 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_clear_ring_indices()
4773 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_clear_ring_indices()
4781 cpr = &bnapi->cp_ring; in bnxt_clear_ring_indices()
4782 cpr->cp_raw_cons = 0; in bnxt_clear_ring_indices()
4785 txr->tx_prod = 0; in bnxt_clear_ring_indices()
4786 txr->tx_cons = 0; in bnxt_clear_ring_indices()
4787 txr->tx_hw_cons = 0; in bnxt_clear_ring_indices()
4790 rxr = bnapi->rx_ring; in bnxt_clear_ring_indices()
4792 rxr->rx_prod = 0; in bnxt_clear_ring_indices()
4793 rxr->rx_agg_prod = 0; in bnxt_clear_ring_indices()
4794 rxr->rx_sw_agg_prod = 0; in bnxt_clear_ring_indices()
4795 rxr->rx_next_cons = 0; in bnxt_clear_ring_indices()
4797 bnapi->events = 0; in bnxt_clear_ring_indices()
4813 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_free_ntp_fltrs()
4815 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_free_ntp_fltrs()
4816 if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) in bnxt_free_ntp_fltrs()
4818 hlist_del(&fltr->base.hash); in bnxt_free_ntp_fltrs()
4819 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_free_ntp_fltrs()
4820 bp->ntp_fltr_count--; in bnxt_free_ntp_fltrs()
4827 bitmap_free(bp->ntp_fltr_bmap); in bnxt_free_ntp_fltrs()
4828 bp->ntp_fltr_bmap = NULL; in bnxt_free_ntp_fltrs()
4829 bp->ntp_fltr_count = 0; in bnxt_free_ntp_fltrs()
4836 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) in bnxt_alloc_ntp_fltrs()
4840 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); in bnxt_alloc_ntp_fltrs()
4842 bp->ntp_fltr_count = 0; in bnxt_alloc_ntp_fltrs()
4843 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL); in bnxt_alloc_ntp_fltrs()
4845 if (!bp->ntp_fltr_bmap) in bnxt_alloc_ntp_fltrs()
4846 rc = -ENOMEM; in bnxt_alloc_ntp_fltrs()
4860 head = &bp->l2_fltr_hash_tbl[i]; in bnxt_free_l2_filters()
4862 if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) in bnxt_free_l2_filters()
4864 hlist_del(&fltr->base.hash); in bnxt_free_l2_filters()
4865 if (fltr->base.flags) { in bnxt_free_l2_filters()
4866 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_free_l2_filters()
4867 bp->ntp_fltr_count--; in bnxt_free_l2_filters()
4879 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); in bnxt_init_l2_fltr_tbl()
4880 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); in bnxt_init_l2_fltr_tbl()
4894 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || in bnxt_free_mem()
4895 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_free_mem()
4899 kfree(bp->tx_ring_map); in bnxt_free_mem()
4900 bp->tx_ring_map = NULL; in bnxt_free_mem()
4901 kfree(bp->tx_ring); in bnxt_free_mem()
4902 bp->tx_ring = NULL; in bnxt_free_mem()
4903 kfree(bp->rx_ring); in bnxt_free_mem()
4904 bp->rx_ring = NULL; in bnxt_free_mem()
4905 kfree(bp->bnapi); in bnxt_free_mem()
4906 bp->bnapi = NULL; in bnxt_free_mem()
4922 bp->cp_nr_rings); in bnxt_alloc_mem()
4924 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); in bnxt_alloc_mem()
4926 return -ENOMEM; in bnxt_alloc_mem()
4928 bp->bnapi = bnapi; in bnxt_alloc_mem()
4930 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { in bnxt_alloc_mem()
4931 bp->bnapi[i] = bnapi; in bnxt_alloc_mem()
4932 bp->bnapi[i]->index = i; in bnxt_alloc_mem()
4933 bp->bnapi[i]->bp = bp; in bnxt_alloc_mem()
4934 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_mem()
4936 &bp->bnapi[i]->cp_ring; in bnxt_alloc_mem()
4938 cpr->cp_ring_struct.ring_mem.flags = in bnxt_alloc_mem()
4943 bp->rx_ring = kcalloc(bp->rx_nr_rings, in bnxt_alloc_mem()
4946 if (!bp->rx_ring) in bnxt_alloc_mem()
4947 return -ENOMEM; in bnxt_alloc_mem()
4949 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_mem()
4950 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_alloc_mem()
4952 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_alloc_mem()
4953 rxr->rx_ring_struct.ring_mem.flags = in bnxt_alloc_mem()
4955 rxr->rx_agg_ring_struct.ring_mem.flags = in bnxt_alloc_mem()
4958 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; in bnxt_alloc_mem()
4960 rxr->bnapi = bp->bnapi[i]; in bnxt_alloc_mem()
4961 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; in bnxt_alloc_mem()
4964 bp->tx_ring = kcalloc(bp->tx_nr_rings, in bnxt_alloc_mem()
4967 if (!bp->tx_ring) in bnxt_alloc_mem()
4968 return -ENOMEM; in bnxt_alloc_mem()
4970 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), in bnxt_alloc_mem()
4973 if (!bp->tx_ring_map) in bnxt_alloc_mem()
4974 return -ENOMEM; in bnxt_alloc_mem()
4976 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_alloc_mem()
4979 j = bp->rx_nr_rings; in bnxt_alloc_mem()
4981 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_mem()
4982 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_alloc_mem()
4985 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_mem()
4986 txr->tx_ring_struct.ring_mem.flags = in bnxt_alloc_mem()
4988 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; in bnxt_alloc_mem()
4989 if (i >= bp->tx_nr_rings_xdp) { in bnxt_alloc_mem()
4992 bnapi2 = bp->bnapi[k]; in bnxt_alloc_mem()
4993 txr->txq_index = i - bp->tx_nr_rings_xdp; in bnxt_alloc_mem()
4994 txr->tx_napi_idx = in bnxt_alloc_mem()
4995 BNXT_RING_TO_TC(bp, txr->txq_index); in bnxt_alloc_mem()
4996 bnapi2->tx_ring[txr->tx_napi_idx] = txr; in bnxt_alloc_mem()
4997 bnapi2->tx_int = bnxt_tx_int; in bnxt_alloc_mem()
4999 bnapi2 = bp->bnapi[j]; in bnxt_alloc_mem()
5000 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; in bnxt_alloc_mem()
5001 bnapi2->tx_ring[0] = txr; in bnxt_alloc_mem()
5002 bnapi2->tx_int = bnxt_tx_int_xdp; in bnxt_alloc_mem()
5005 txr->bnapi = bnapi2; in bnxt_alloc_mem()
5006 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_alloc_mem()
5007 txr->tx_cpr = &bnapi2->cp_ring; in bnxt_alloc_mem()
5042 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | in bnxt_alloc_mem()
5058 if (!bp->bnapi) in bnxt_disable_int()
5061 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int()
5062 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_disable_int()
5063 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_disable_int()
5064 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; in bnxt_disable_int()
5066 if (ring->fw_ring_id != INVALID_HW_RING_ID) in bnxt_disable_int()
5067 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_disable_int()
5073 struct bnxt_napi *bnapi = bp->bnapi[n]; in bnxt_cp_num_to_irq_num()
5076 cpr = &bnapi->cp_ring; in bnxt_cp_num_to_irq_num()
5077 return cpr->cp_ring_struct.map_idx; in bnxt_cp_num_to_irq_num()
5084 if (!bp->irq_tbl) in bnxt_disable_int_sync()
5087 atomic_inc(&bp->intr_sem); in bnxt_disable_int_sync()
5090 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int_sync()
5093 synchronize_irq(bp->irq_tbl[map_idx].vector); in bnxt_disable_int_sync()
5101 atomic_set(&bp->intr_sem, 0); in bnxt_enable_int()
5102 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_int()
5103 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_enable_int()
5104 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_enable_int()
5106 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_enable_int()
5124 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | in bnxt_hwrm_func_drv_rgtr()
5128 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); in bnxt_hwrm_func_drv_rgtr()
5130 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) in bnxt_hwrm_func_drv_rgtr()
5132 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) in bnxt_hwrm_func_drv_rgtr()
5135 req->flags = cpu_to_le32(flags); in bnxt_hwrm_func_drv_rgtr()
5136 req->ver_maj_8b = DRV_VER_MAJ; in bnxt_hwrm_func_drv_rgtr()
5137 req->ver_min_8b = DRV_VER_MIN; in bnxt_hwrm_func_drv_rgtr()
5138 req->ver_upd_8b = DRV_VER_UPD; in bnxt_hwrm_func_drv_rgtr()
5139 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); in bnxt_hwrm_func_drv_rgtr()
5140 req->ver_min = cpu_to_le16(DRV_VER_MIN); in bnxt_hwrm_func_drv_rgtr()
5141 req->ver_upd = cpu_to_le16(DRV_VER_UPD); in bnxt_hwrm_func_drv_rgtr()
5158 req->vf_req_fwd[i] = cpu_to_le32(data[i]); in bnxt_hwrm_func_drv_rgtr()
5160 req->enables |= in bnxt_hwrm_func_drv_rgtr()
5164 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) in bnxt_hwrm_func_drv_rgtr()
5165 req->flags |= cpu_to_le32( in bnxt_hwrm_func_drv_rgtr()
5173 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_hwrm_func_drv_rgtr()
5176 !bp->ptp_cfg) in bnxt_hwrm_func_drv_rgtr()
5187 req->async_event_fwd[i] |= cpu_to_le32(events[i]); in bnxt_hwrm_func_drv_rgtr()
5190 req->enables = in bnxt_hwrm_func_drv_rgtr()
5196 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); in bnxt_hwrm_func_drv_rgtr()
5197 if (resp->flags & in bnxt_hwrm_func_drv_rgtr()
5199 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; in bnxt_hwrm_func_drv_rgtr()
5210 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) in bnxt_hwrm_func_drv_unrgtr()
5227 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) in bnxt_hwrm_tunnel_dst_port_free()
5230 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) in bnxt_hwrm_tunnel_dst_port_free()
5237 req->tunnel_type = tunnel_type; in bnxt_hwrm_tunnel_dst_port_free()
5241 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5242 bp->vxlan_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5243 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5246 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5247 bp->nge_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5248 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5251 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); in bnxt_hwrm_tunnel_dst_port_free()
5252 bp->vxlan_gpe_port = 0; in bnxt_hwrm_tunnel_dst_port_free()
5253 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_hwrm_tunnel_dst_port_free()
5261 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_free()
5263 if (bp->flags & BNXT_FLAG_TPA) in bnxt_hwrm_tunnel_dst_port_free()
5279 req->tunnel_type = tunnel_type; in bnxt_hwrm_tunnel_dst_port_alloc()
5280 req->tunnel_dst_port_val = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5285 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_alloc()
5292 bp->vxlan_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5293 bp->vxlan_fw_dst_port_id = in bnxt_hwrm_tunnel_dst_port_alloc()
5294 le16_to_cpu(resp->tunnel_dst_port_id); in bnxt_hwrm_tunnel_dst_port_alloc()
5297 bp->nge_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5298 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); in bnxt_hwrm_tunnel_dst_port_alloc()
5301 bp->vxlan_gpe_port = port; in bnxt_hwrm_tunnel_dst_port_alloc()
5302 bp->vxlan_gpe_fw_dst_port_id = in bnxt_hwrm_tunnel_dst_port_alloc()
5303 le16_to_cpu(resp->tunnel_dst_port_id); in bnxt_hwrm_tunnel_dst_port_alloc()
5308 if (bp->flags & BNXT_FLAG_TPA) in bnxt_hwrm_tunnel_dst_port_alloc()
5319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_cfa_l2_set_rx_mask()
5326 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); in bnxt_hwrm_cfa_l2_set_rx_mask()
5327 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { in bnxt_hwrm_cfa_l2_set_rx_mask()
5328 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); in bnxt_hwrm_cfa_l2_set_rx_mask()
5329 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); in bnxt_hwrm_cfa_l2_set_rx_mask()
5331 req->mask = cpu_to_le32(vnic->rx_mask); in bnxt_hwrm_cfa_l2_set_rx_mask()
5337 if (!atomic_dec_and_test(&fltr->refcnt)) in bnxt_del_l2_filter()
5339 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5340 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { in bnxt_del_l2_filter()
5341 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5344 hlist_del_rcu(&fltr->base.hash); in bnxt_del_l2_filter()
5345 if (fltr->base.flags) { in bnxt_del_l2_filter()
5346 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_l2_filter()
5347 bp->ntp_fltr_count--; in bnxt_del_l2_filter()
5349 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_l2_filter()
5357 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; in __bnxt_lookup_l2_filter()
5361 struct bnxt_l2_key *l2_key = &fltr->l2_key; in __bnxt_lookup_l2_filter()
5363 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && in __bnxt_lookup_l2_filter()
5364 l2_key->vlan == key->vlan) in __bnxt_lookup_l2_filter()
5379 atomic_inc(&fltr->refcnt); in bnxt_lookup_l2_filter()
5385 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5386 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5387 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5388 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5391 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5392 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5393 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5394 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5398 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { in bnxt_get_rss_flow_tuple_len()
5400 return sizeof(fkeys->addrs.v4addrs) + in bnxt_get_rss_flow_tuple_len()
5401 sizeof(fkeys->ports); in bnxt_get_rss_flow_tuple_len()
5403 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) in bnxt_get_rss_flow_tuple_len()
5404 return sizeof(fkeys->addrs.v4addrs); in bnxt_get_rss_flow_tuple_len()
5407 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { in bnxt_get_rss_flow_tuple_len()
5409 return sizeof(fkeys->addrs.v6addrs) + in bnxt_get_rss_flow_tuple_len()
5410 sizeof(fkeys->ports); in bnxt_get_rss_flow_tuple_len()
5412 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) in bnxt_get_rss_flow_tuple_len()
5413 return sizeof(fkeys->addrs.v6addrs); in bnxt_get_rss_flow_tuple_len()
5422 u64 prefix = bp->toeplitz_prefix, hash = 0; in bnxt_toeplitz()
5432 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { in bnxt_toeplitz()
5433 tuple4.v4addrs = fkeys->addrs.v4addrs; in bnxt_toeplitz()
5434 tuple4.ports = fkeys->ports; in bnxt_toeplitz()
5437 tuple6.v6addrs = fkeys->addrs.v6addrs; in bnxt_toeplitz()
5438 tuple6.ports = fkeys->ports; in bnxt_toeplitz()
5464 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & in bnxt_lookup_l2_filter_from_key()
5476 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); in bnxt_init_l2_filter()
5477 fltr->l2_key.vlan = key->vlan; in bnxt_init_l2_filter()
5478 fltr->base.type = BNXT_FLTR_TYPE_L2; in bnxt_init_l2_filter()
5479 if (fltr->base.flags) { in bnxt_init_l2_filter()
5482 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, in bnxt_init_l2_filter()
5485 return -ENOMEM; in bnxt_init_l2_filter()
5486 fltr->base.sw_id = (u16)bit_id; in bnxt_init_l2_filter()
5488 head = &bp->l2_fltr_hash_tbl[idx]; in bnxt_init_l2_filter()
5489 hlist_add_head_rcu(&fltr->base.hash, head); in bnxt_init_l2_filter()
5490 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); in bnxt_init_l2_filter()
5491 atomic_set(&fltr->refcnt, 1); in bnxt_init_l2_filter()
5503 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & in bnxt_alloc_l2_filter()
5511 return ERR_PTR(-ENOMEM); in bnxt_alloc_l2_filter()
5512 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_l2_filter()
5514 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_alloc_l2_filter()
5525 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; in bnxt_vf_target_id()
5527 return vf->fw_fid; in bnxt_vf_target_id()
5539 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { in bnxt_hwrm_l2_filter_free()
5540 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_l2_filter_free()
5542 if (fltr->base.vf_idx >= pf->active_vfs) in bnxt_hwrm_l2_filter_free()
5543 return -EINVAL; in bnxt_hwrm_l2_filter_free()
5545 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); in bnxt_hwrm_l2_filter_free()
5547 return -EINVAL; in bnxt_hwrm_l2_filter_free()
5554 req->target_id = cpu_to_le16(target_id); in bnxt_hwrm_l2_filter_free()
5555 req->l2_filter_id = fltr->base.filter_id; in bnxt_hwrm_l2_filter_free()
5566 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { in bnxt_hwrm_l2_filter_alloc()
5567 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_l2_filter_alloc()
5569 if (fltr->base.vf_idx >= pf->active_vfs) in bnxt_hwrm_l2_filter_alloc()
5570 return -EINVAL; in bnxt_hwrm_l2_filter_alloc()
5572 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); in bnxt_hwrm_l2_filter_alloc()
5578 req->target_id = cpu_to_le16(target_id); in bnxt_hwrm_l2_filter_alloc()
5579 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); in bnxt_hwrm_l2_filter_alloc()
5582 req->flags |= in bnxt_hwrm_l2_filter_alloc()
5584 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); in bnxt_hwrm_l2_filter_alloc()
5585 req->enables = in bnxt_hwrm_l2_filter_alloc()
5589 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); in bnxt_hwrm_l2_filter_alloc()
5590 eth_broadcast_addr(req->l2_addr_mask); in bnxt_hwrm_l2_filter_alloc()
5592 if (fltr->l2_key.vlan) { in bnxt_hwrm_l2_filter_alloc()
5593 req->enables |= in bnxt_hwrm_l2_filter_alloc()
5597 req->num_vlans = 1; in bnxt_hwrm_l2_filter_alloc()
5598 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); in bnxt_hwrm_l2_filter_alloc()
5599 req->l2_ivlan_mask = cpu_to_le16(0xfff); in bnxt_hwrm_l2_filter_alloc()
5605 fltr->base.filter_id = resp->l2_filter_id; in bnxt_hwrm_l2_filter_alloc()
5606 set_bit(BNXT_FLTR_VALID, &fltr->base.state); in bnxt_hwrm_l2_filter_alloc()
5618 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); in bnxt_hwrm_cfa_ntuple_filter_free()
5623 req->ntuple_filter_id = fltr->base.filter_id; in bnxt_hwrm_cfa_ntuple_filter_free()
5658 struct flow_keys *keys = &fltr->fkeys; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5668 l2_fltr = fltr->l2_fltr; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5669 req->l2_filter_id = l2_fltr->base.filter_id; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5672 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5674 req->dst_id = cpu_to_le16(fltr->base.rxq); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5676 vnic = &bp->vnic_info[fltr->base.rxq + 1]; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5677 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5679 req->flags = cpu_to_le32(flags); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5680 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5682 req->ethertype = htons(ETH_P_IP); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5683 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5684 req->ip_protocol = keys->basic.ip_proto; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5686 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5687 req->ethertype = htons(ETH_P_IPV6); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5688 req->ip_addr_type = in bnxt_hwrm_cfa_ntuple_filter_alloc()
5690 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5691 *(struct in6_addr *)&req->src_ipaddr[0] = in bnxt_hwrm_cfa_ntuple_filter_alloc()
5692 keys->addrs.v6addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5693 bnxt_fill_ipv6_mask(req->src_ipaddr_mask); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5695 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5696 *(struct in6_addr *)&req->dst_ipaddr[0] = in bnxt_hwrm_cfa_ntuple_filter_alloc()
5697 keys->addrs.v6addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5698 bnxt_fill_ipv6_mask(req->dst_ipaddr_mask); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5701 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5702 req->src_ipaddr[0] = keys->addrs.v4addrs.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5703 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5705 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5706 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5707 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5710 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5711 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5712 req->tunnel_type = in bnxt_hwrm_cfa_ntuple_filter_alloc()
5716 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5717 req->src_port = keys->ports.src; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5718 req->src_port_mask = cpu_to_be16(0xffff); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5720 if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { in bnxt_hwrm_cfa_ntuple_filter_alloc()
5721 req->dst_port = keys->ports.dst; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5722 req->dst_port_mask = cpu_to_be16(0xffff); in bnxt_hwrm_cfa_ntuple_filter_alloc()
5728 fltr->base.filter_id = resp->ntuple_filter_id; in bnxt_hwrm_cfa_ntuple_filter_alloc()
5746 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; in bnxt_hwrm_set_vnic_filter()
5751 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; in bnxt_hwrm_set_vnic_filter()
5761 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_clear_vnic_filter()
5763 for (j = 0; j < vnic->uc_filter_count; j++) { in bnxt_hwrm_clear_vnic_filter()
5764 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; in bnxt_hwrm_clear_vnic_filter()
5769 vnic->uc_filter_count = 0; in bnxt_hwrm_clear_vnic_filter()
5783 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) in bnxt_hwrm_vnic_update_tunl_tpa()
5786 if (bp->vxlan_port) in bnxt_hwrm_vnic_update_tunl_tpa()
5788 if (bp->vxlan_gpe_port) in bnxt_hwrm_vnic_update_tunl_tpa()
5790 if (bp->nge_port) in bnxt_hwrm_vnic_update_tunl_tpa()
5793 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); in bnxt_hwrm_vnic_update_tunl_tpa()
5794 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); in bnxt_hwrm_vnic_update_tunl_tpa()
5799 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_tpa()
5804 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) in bnxt_hwrm_vnic_set_tpa()
5812 u16 mss = bp->dev->mtu - 40; in bnxt_hwrm_vnic_set_tpa()
5823 req->flags = cpu_to_le32(flags); in bnxt_hwrm_vnic_set_tpa()
5825 req->enables = in bnxt_hwrm_vnic_set_tpa()
5835 nsegs = (MAX_SKB_FRAGS - 1) * n; in bnxt_hwrm_vnic_set_tpa()
5838 if (mss & (BNXT_RX_PAGE_SIZE - 1)) in bnxt_hwrm_vnic_set_tpa()
5840 nsegs = (MAX_SKB_FRAGS - n) / n; in bnxt_hwrm_vnic_set_tpa()
5843 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_vnic_set_tpa()
5845 max_aggs = bp->max_tpa; in bnxt_hwrm_vnic_set_tpa()
5849 req->max_agg_segs = cpu_to_le16(segs); in bnxt_hwrm_vnic_set_tpa()
5850 req->max_aggs = cpu_to_le16(max_aggs); in bnxt_hwrm_vnic_set_tpa()
5852 req->min_agg_len = cpu_to_le32(512); in bnxt_hwrm_vnic_set_tpa()
5855 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); in bnxt_hwrm_vnic_set_tpa()
5864 grp_info = &bp->grp_info[ring->grp_idx]; in bnxt_cp_ring_from_grp()
5865 return grp_info->cp_fw_ring_id; in bnxt_cp_ring_from_grp()
5870 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_cp_ring_for_rx()
5871 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; in bnxt_cp_ring_for_rx()
5873 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); in bnxt_cp_ring_for_rx()
5878 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_cp_ring_for_tx()
5879 return txr->tx_cpr->cp_ring_struct.fw_ring_id; in bnxt_cp_ring_for_tx()
5881 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); in bnxt_cp_ring_for_tx()
5888 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_rss_indir_tbl()
5893 bp->rss_indir_tbl_entries = entries; in bnxt_alloc_rss_indir_tbl()
5894 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), in bnxt_alloc_rss_indir_tbl()
5896 if (!bp->rss_indir_tbl) in bnxt_alloc_rss_indir_tbl()
5897 return -ENOMEM; in bnxt_alloc_rss_indir_tbl()
5905 if (!bp->rx_nr_rings) in bnxt_set_dflt_rss_indir_tbl()
5909 max_rings = bp->rx_nr_rings - 1; in bnxt_set_dflt_rss_indir_tbl()
5911 max_rings = bp->rx_nr_rings; in bnxt_set_dflt_rss_indir_tbl()
5913 max_entries = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_set_dflt_rss_indir_tbl()
5916 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); in bnxt_set_dflt_rss_indir_tbl()
5918 pad = bp->rss_indir_tbl_entries - max_entries; in bnxt_set_dflt_rss_indir_tbl()
5920 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); in bnxt_set_dflt_rss_indir_tbl()
5927 if (!bp->rss_indir_tbl) in bnxt_get_max_rss_ring()
5930 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_get_max_rss_ring()
5932 max_ring = max(max_ring, bp->rss_indir_tbl[i]); in bnxt_get_max_rss_ring()
5938 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_get_nr_rss_ctxs()
5941 return bnxt_calc_nr_ring_pages(rx_rings - 1, in bnxt_get_nr_rss_ctxs()
5951 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); in bnxt_fill_hw_rss_tbl()
5957 j = bp->rss_indir_tbl[i]; in bnxt_fill_hw_rss_tbl()
5958 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); in bnxt_fill_hw_rss_tbl()
5965 __le16 *ring_tbl = vnic->rss_table; in bnxt_fill_hw_rss_tbl_p5()
5969 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); in bnxt_fill_hw_rss_tbl_p5()
5974 j = bp->rss_indir_tbl[i]; in bnxt_fill_hw_rss_tbl_p5()
5975 rxr = &bp->rx_ring[j]; in bnxt_fill_hw_rss_tbl_p5()
5977 ring_id = rxr->rx_ring_struct.fw_ring_id; in bnxt_fill_hw_rss_tbl_p5()
5988 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in __bnxt_hwrm_vnic_set_rss()
5993 if (bp->rss_hash_delta) { in __bnxt_hwrm_vnic_set_rss()
5994 req->hash_type = cpu_to_le32(bp->rss_hash_delta); in __bnxt_hwrm_vnic_set_rss()
5995 if (bp->rss_hash_cfg & bp->rss_hash_delta) in __bnxt_hwrm_vnic_set_rss()
5996 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; in __bnxt_hwrm_vnic_set_rss()
5998 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; in __bnxt_hwrm_vnic_set_rss()
6000 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); in __bnxt_hwrm_vnic_set_rss()
6002 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; in __bnxt_hwrm_vnic_set_rss()
6003 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); in __bnxt_hwrm_vnic_set_rss()
6004 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); in __bnxt_hwrm_vnic_set_rss()
6009 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_rss()
6013 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || in bnxt_hwrm_vnic_set_rss()
6014 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) in bnxt_hwrm_vnic_set_rss()
6023 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_set_rss()
6029 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_rss_p5()
6039 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); in bnxt_hwrm_vnic_set_rss_p5()
6044 ring_tbl_map = vnic->rss_table_dma_addr; in bnxt_hwrm_vnic_set_rss_p5()
6045 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); in bnxt_hwrm_vnic_set_rss_p5()
6049 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); in bnxt_hwrm_vnic_set_rss_p5()
6050 req->ring_table_pair_index = i; in bnxt_hwrm_vnic_set_rss_p5()
6051 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); in bnxt_hwrm_vnic_set_rss_p5()
6064 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_hwrm_update_rss_hash_cfg()
6071 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); in bnxt_hwrm_update_rss_hash_cfg()
6073 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_update_rss_hash_cfg()
6076 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; in bnxt_hwrm_update_rss_hash_cfg()
6077 bp->rss_hash_delta = 0; in bnxt_hwrm_update_rss_hash_cfg()
6084 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_hds()
6092 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); in bnxt_hwrm_vnic_set_hds()
6093 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); in bnxt_hwrm_vnic_set_hds()
6096 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); in bnxt_hwrm_vnic_set_hds()
6098 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | in bnxt_hwrm_vnic_set_hds()
6100 req->enables |= in bnxt_hwrm_vnic_set_hds()
6102 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); in bnxt_hwrm_vnic_set_hds()
6103 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); in bnxt_hwrm_vnic_set_hds()
6105 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); in bnxt_hwrm_vnic_set_hds()
6117 req->rss_cos_lb_ctx_id = in bnxt_hwrm_vnic_ctx_free_one()
6118 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); in bnxt_hwrm_vnic_ctx_free_one()
6121 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_ctx_free_one()
6128 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_hwrm_vnic_ctx_free()
6129 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_vnic_ctx_free()
6132 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) in bnxt_hwrm_vnic_ctx_free()
6136 bp->rsscos_nr_ctxs = 0; in bnxt_hwrm_vnic_ctx_free()
6152 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = in bnxt_hwrm_vnic_ctx_alloc()
6153 le16_to_cpu(resp->rss_cos_lb_ctx_id); in bnxt_hwrm_vnic_ctx_alloc()
6161 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) in bnxt_get_roce_vnic_mode()
6168 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_cfg()
6178 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_vnic_cfg()
6179 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; in bnxt_hwrm_vnic_cfg()
6181 req->default_rx_ring_id = in bnxt_hwrm_vnic_cfg()
6182 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); in bnxt_hwrm_vnic_cfg()
6183 req->default_cmpl_ring_id = in bnxt_hwrm_vnic_cfg()
6185 req->enables = in bnxt_hwrm_vnic_cfg()
6190 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); in bnxt_hwrm_vnic_cfg()
6192 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_cfg()
6193 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6194 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | in bnxt_hwrm_vnic_cfg()
6196 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { in bnxt_hwrm_vnic_cfg()
6197 req->rss_rule = in bnxt_hwrm_vnic_cfg()
6198 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); in bnxt_hwrm_vnic_cfg()
6199 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | in bnxt_hwrm_vnic_cfg()
6201 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); in bnxt_hwrm_vnic_cfg()
6203 req->rss_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6207 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { in bnxt_hwrm_vnic_cfg()
6208 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); in bnxt_hwrm_vnic_cfg()
6209 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); in bnxt_hwrm_vnic_cfg()
6211 req->cos_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6214 if (vnic->flags & BNXT_VNIC_RSS_FLAG) in bnxt_hwrm_vnic_cfg()
6216 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) in bnxt_hwrm_vnic_cfg()
6217 ring = vnic_id - 1; in bnxt_hwrm_vnic_cfg()
6219 ring = bp->rx_nr_rings - 1; in bnxt_hwrm_vnic_cfg()
6221 grp_idx = bp->rx_ring[ring].bnapi->index; in bnxt_hwrm_vnic_cfg()
6222 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); in bnxt_hwrm_vnic_cfg()
6223 req->lb_rule = cpu_to_le16(0xffff); in bnxt_hwrm_vnic_cfg()
6225 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); in bnxt_hwrm_vnic_cfg()
6227 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); in bnxt_hwrm_vnic_cfg()
6230 def_vlan = bp->vf.vlan; in bnxt_hwrm_vnic_cfg()
6232 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) in bnxt_hwrm_vnic_cfg()
6233 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); in bnxt_hwrm_vnic_cfg()
6234 if (!vnic_id && bnxt_ulp_registered(bp->edev)) in bnxt_hwrm_vnic_cfg()
6235 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); in bnxt_hwrm_vnic_cfg()
6242 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_free_one()
6248 req->vnic_id = in bnxt_hwrm_vnic_free_one()
6249 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); in bnxt_hwrm_vnic_free_one()
6252 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_free_one()
6260 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_vnic_free()
6269 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_alloc()
6278 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_vnic_alloc()
6283 grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_hwrm_vnic_alloc()
6284 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_alloc()
6285 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", in bnxt_hwrm_vnic_alloc()
6289 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; in bnxt_hwrm_vnic_alloc()
6294 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_alloc()
6296 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); in bnxt_hwrm_vnic_alloc()
6301 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); in bnxt_hwrm_vnic_alloc()
6312 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); in bnxt_hwrm_vnic_qcaps()
6313 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; in bnxt_hwrm_vnic_qcaps()
6314 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6315 if (bp->hwrm_spec_code < 0x10600) in bnxt_hwrm_vnic_qcaps()
6325 u32 flags = le32_to_cpu(resp->flags); in bnxt_hwrm_vnic_qcaps()
6327 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_hwrm_vnic_qcaps()
6329 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; in bnxt_hwrm_vnic_qcaps()
6332 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; in bnxt_hwrm_vnic_qcaps()
6339 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) in bnxt_hwrm_vnic_qcaps()
6340 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; in bnxt_hwrm_vnic_qcaps()
6342 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; in bnxt_hwrm_vnic_qcaps()
6344 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; in bnxt_hwrm_vnic_qcaps()
6345 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); in bnxt_hwrm_vnic_qcaps()
6346 if (bp->max_tpa_v2) { in bnxt_hwrm_vnic_qcaps()
6348 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; in bnxt_hwrm_vnic_qcaps()
6350 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; in bnxt_hwrm_vnic_qcaps()
6353 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; in bnxt_hwrm_vnic_qcaps()
6366 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_grp_alloc()
6374 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_grp_alloc()
6375 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; in bnxt_hwrm_ring_grp_alloc()
6377 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
6378 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
6379 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
6380 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); in bnxt_hwrm_ring_grp_alloc()
6387 bp->grp_info[grp_idx].fw_grp_id = in bnxt_hwrm_ring_grp_alloc()
6388 le32_to_cpu(resp->ring_group_id); in bnxt_hwrm_ring_grp_alloc()
6399 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_ring_grp_free()
6406 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_grp_free()
6407 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) in bnxt_hwrm_ring_grp_free()
6409 req->ring_group_id = in bnxt_hwrm_ring_grp_free()
6410 cpu_to_le32(bp->grp_info[i].fw_grp_id); in bnxt_hwrm_ring_grp_free()
6413 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_grp_free()
6424 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; in hwrm_ring_alloc_send_msg()
6433 req->enables = 0; in hwrm_ring_alloc_send_msg()
6434 if (rmem->nr_pages > 1) { in hwrm_ring_alloc_send_msg()
6435 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); in hwrm_ring_alloc_send_msg()
6437 req->page_size = BNXT_PAGE_SHIFT; in hwrm_ring_alloc_send_msg()
6438 req->page_tbl_depth = 1; in hwrm_ring_alloc_send_msg()
6440 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); in hwrm_ring_alloc_send_msg()
6442 req->fbo = 0; in hwrm_ring_alloc_send_msg()
6444 req->logical_id = cpu_to_le16(map_index); in hwrm_ring_alloc_send_msg()
6452 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; in hwrm_ring_alloc_send_msg()
6454 grp_info = &bp->grp_info[ring->grp_idx]; in hwrm_ring_alloc_send_msg()
6455 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); in hwrm_ring_alloc_send_msg()
6456 req->length = cpu_to_le32(bp->tx_ring_mask + 1); in hwrm_ring_alloc_send_msg()
6457 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); in hwrm_ring_alloc_send_msg()
6458 req->queue_id = cpu_to_le16(ring->queue_id); in hwrm_ring_alloc_send_msg()
6459 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) in hwrm_ring_alloc_send_msg()
6460 req->cmpl_coal_cnt = in hwrm_ring_alloc_send_msg()
6465 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; in hwrm_ring_alloc_send_msg()
6466 req->length = cpu_to_le32(bp->rx_ring_mask + 1); in hwrm_ring_alloc_send_msg()
6467 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in hwrm_ring_alloc_send_msg()
6471 grp_info = &bp->grp_info[ring->grp_idx]; in hwrm_ring_alloc_send_msg()
6472 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); in hwrm_ring_alloc_send_msg()
6473 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); in hwrm_ring_alloc_send_msg()
6474 req->enables |= cpu_to_le32( in hwrm_ring_alloc_send_msg()
6478 req->flags = cpu_to_le16(flags); in hwrm_ring_alloc_send_msg()
6482 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in hwrm_ring_alloc_send_msg()
6483 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; in hwrm_ring_alloc_send_msg()
6485 grp_info = &bp->grp_info[ring->grp_idx]; in hwrm_ring_alloc_send_msg()
6486 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); in hwrm_ring_alloc_send_msg()
6487 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); in hwrm_ring_alloc_send_msg()
6488 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); in hwrm_ring_alloc_send_msg()
6489 req->enables |= cpu_to_le32( in hwrm_ring_alloc_send_msg()
6493 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; in hwrm_ring_alloc_send_msg()
6495 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); in hwrm_ring_alloc_send_msg()
6498 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; in hwrm_ring_alloc_send_msg()
6499 req->length = cpu_to_le32(bp->cp_ring_mask + 1); in hwrm_ring_alloc_send_msg()
6500 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in hwrm_ring_alloc_send_msg()
6502 grp_info = &bp->grp_info[map_index]; in hwrm_ring_alloc_send_msg()
6503 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); in hwrm_ring_alloc_send_msg()
6504 req->cq_handle = cpu_to_le64(ring->handle); in hwrm_ring_alloc_send_msg()
6505 req->enables |= cpu_to_le32( in hwrm_ring_alloc_send_msg()
6507 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { in hwrm_ring_alloc_send_msg()
6508 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; in hwrm_ring_alloc_send_msg()
6512 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; in hwrm_ring_alloc_send_msg()
6513 req->length = cpu_to_le32(bp->cp_ring_mask + 1); in hwrm_ring_alloc_send_msg()
6514 if (bp->flags & BNXT_FLAG_USING_MSIX) in hwrm_ring_alloc_send_msg()
6515 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; in hwrm_ring_alloc_send_msg()
6518 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", in hwrm_ring_alloc_send_msg()
6520 return -1; in hwrm_ring_alloc_send_msg()
6525 err = le16_to_cpu(resp->error_code); in hwrm_ring_alloc_send_msg()
6526 ring_id = le16_to_cpu(resp->ring_id); in hwrm_ring_alloc_send_msg()
6531 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", in hwrm_ring_alloc_send_msg()
6533 return -EIO; in hwrm_ring_alloc_send_msg()
6535 ring->fw_ring_id = ring_id; in hwrm_ring_alloc_send_msg()
6550 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_async_event_cr()
6551 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); in bnxt_hwrm_set_async_event_cr()
6552 req->async_event_cr = cpu_to_le16(idx); in bnxt_hwrm_set_async_event_cr()
6561 req->enables = in bnxt_hwrm_set_async_event_cr()
6563 req->async_event_cr = cpu_to_le16(idx); in bnxt_hwrm_set_async_event_cr()
6573 db->db_ring_mask = bp->tx_ring_mask; in bnxt_set_db_mask()
6576 db->db_ring_mask = bp->rx_ring_mask; in bnxt_set_db_mask()
6579 db->db_ring_mask = bp->rx_agg_ring_mask; in bnxt_set_db_mask()
6583 db->db_ring_mask = bp->cp_ring_mask; in bnxt_set_db_mask()
6586 if (bp->flags & BNXT_FLAG_CHIP_P7) { in bnxt_set_db_mask()
6587 db->db_epoch_mask = db->db_ring_mask + 1; in bnxt_set_db_mask()
6588 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); in bnxt_set_db_mask()
6595 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_set_db()
6598 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; in bnxt_set_db()
6602 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; in bnxt_set_db()
6605 db->db_key64 = DBR_PATH_L2; in bnxt_set_db()
6608 db->db_key64 = DBR_PATH_L2; in bnxt_set_db()
6611 db->db_key64 |= (u64)xid << DBR_XID_SFT; in bnxt_set_db()
6613 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_set_db()
6614 db->db_key64 |= DBR_VALID; in bnxt_set_db()
6616 db->doorbell = bp->bar1 + bp->db_offset; in bnxt_set_db()
6618 db->doorbell = bp->bar1 + map_idx * 0x80; in bnxt_set_db()
6621 db->db_key32 = DB_KEY_TX; in bnxt_set_db()
6625 db->db_key32 = DB_KEY_RX; in bnxt_set_db()
6628 db->db_key32 = DB_KEY_CP; in bnxt_set_db()
6637 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); in bnxt_hwrm_ring_alloc()
6641 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_alloc()
6645 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
6646 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
6647 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_hwrm_ring_alloc()
6648 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; in bnxt_hwrm_ring_alloc()
6649 u32 map_idx = ring->map_idx; in bnxt_hwrm_ring_alloc()
6652 vector = bp->irq_tbl[map_idx].vector; in bnxt_hwrm_ring_alloc()
6659 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6660 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); in bnxt_hwrm_ring_alloc()
6662 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
6665 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6667 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); in bnxt_hwrm_ring_alloc()
6672 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
6673 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_hwrm_ring_alloc()
6677 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_ring_alloc()
6678 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; in bnxt_hwrm_ring_alloc()
6679 struct bnxt_napi *bnapi = txr->bnapi; in bnxt_hwrm_ring_alloc()
6682 ring = &cpr2->cp_ring_struct; in bnxt_hwrm_ring_alloc()
6683 ring->handle = BNXT_SET_NQ_HDL(cpr2); in bnxt_hwrm_ring_alloc()
6684 map_idx = bnapi->index; in bnxt_hwrm_ring_alloc()
6688 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, in bnxt_hwrm_ring_alloc()
6689 ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6690 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); in bnxt_hwrm_ring_alloc()
6692 ring = &txr->tx_ring_struct; in bnxt_hwrm_ring_alloc()
6697 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6701 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
6702 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_hwrm_ring_alloc()
6703 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; in bnxt_hwrm_ring_alloc()
6704 struct bnxt_napi *bnapi = rxr->bnapi; in bnxt_hwrm_ring_alloc()
6705 u32 map_idx = bnapi->index; in bnxt_hwrm_ring_alloc()
6710 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6713 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_hwrm_ring_alloc()
6714 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
6715 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_ring_alloc()
6716 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; in bnxt_hwrm_ring_alloc()
6719 ring = &cpr2->cp_ring_struct; in bnxt_hwrm_ring_alloc()
6720 ring->handle = BNXT_SET_NQ_HDL(cpr2); in bnxt_hwrm_ring_alloc()
6724 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, in bnxt_hwrm_ring_alloc()
6725 ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6726 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); in bnxt_hwrm_ring_alloc()
6732 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
6733 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_hwrm_ring_alloc()
6735 &rxr->rx_agg_ring_struct; in bnxt_hwrm_ring_alloc()
6736 u32 grp_idx = ring->grp_idx; in bnxt_hwrm_ring_alloc()
6737 u32 map_idx = grp_idx + bp->rx_nr_rings; in bnxt_hwrm_ring_alloc()
6743 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, in bnxt_hwrm_ring_alloc()
6744 ring->fw_ring_id); in bnxt_hwrm_ring_alloc()
6745 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_hwrm_ring_alloc()
6746 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_hwrm_ring_alloc()
6747 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
6770 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); in hwrm_ring_free_send_msg()
6771 req->ring_type = ring_type; in hwrm_ring_free_send_msg()
6772 req->ring_id = cpu_to_le16(ring->fw_ring_id); in hwrm_ring_free_send_msg()
6776 error_code = le16_to_cpu(resp->error_code); in hwrm_ring_free_send_msg()
6780 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", in hwrm_ring_free_send_msg()
6782 return -EIO; in hwrm_ring_free_send_msg()
6792 if (!bp->bnapi) in bnxt_hwrm_ring_free()
6795 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_free()
6796 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; in bnxt_hwrm_ring_free()
6797 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; in bnxt_hwrm_ring_free()
6799 if (ring->fw_ring_id != INVALID_HW_RING_ID) { in bnxt_hwrm_ring_free()
6806 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6810 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
6811 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_hwrm_ring_free()
6812 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; in bnxt_hwrm_ring_free()
6813 u32 grp_idx = rxr->bnapi->index; in bnxt_hwrm_ring_free()
6815 if (ring->fw_ring_id != INVALID_HW_RING_ID) { in bnxt_hwrm_ring_free()
6822 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6823 bp->grp_info[grp_idx].rx_fw_ring_id = in bnxt_hwrm_ring_free()
6828 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_free()
6832 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
6833 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_hwrm_ring_free()
6834 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; in bnxt_hwrm_ring_free()
6835 u32 grp_idx = rxr->bnapi->index; in bnxt_hwrm_ring_free()
6837 if (ring->fw_ring_id != INVALID_HW_RING_ID) { in bnxt_hwrm_ring_free()
6843 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6844 bp->grp_info[grp_idx].agg_fw_ring_id = in bnxt_hwrm_ring_free()
6855 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_ring_free()
6859 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_free()
6860 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
6861 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_hwrm_ring_free()
6865 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { in bnxt_hwrm_ring_free()
6866 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; in bnxt_hwrm_ring_free()
6868 ring = &cpr2->cp_ring_struct; in bnxt_hwrm_ring_free()
6869 if (ring->fw_ring_id == INVALID_HW_RING_ID) in bnxt_hwrm_ring_free()
6874 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6876 ring = &cpr->cp_ring_struct; in bnxt_hwrm_ring_free()
6877 if (ring->fw_ring_id != INVALID_HW_RING_ID) { in bnxt_hwrm_ring_free()
6880 ring->fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6881 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_free()
6886 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6888 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6893 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_get_rings()
6898 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_get_rings()
6905 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_get_rings()
6913 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); in bnxt_hwrm_get_rings()
6917 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); in bnxt_hwrm_get_rings()
6918 hw_resc->resv_hw_ring_grps = in bnxt_hwrm_get_rings()
6919 le32_to_cpu(resp->alloc_hw_ring_grps); in bnxt_hwrm_get_rings()
6920 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); in bnxt_hwrm_get_rings()
6921 cp = le16_to_cpu(resp->alloc_cmpl_rings); in bnxt_hwrm_get_rings()
6922 stats = le16_to_cpu(resp->alloc_stat_ctx); in bnxt_hwrm_get_rings()
6923 hw_resc->resv_irqs = cp; in bnxt_hwrm_get_rings()
6924 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_get_rings()
6925 int rx = hw_resc->resv_rx_rings; in bnxt_hwrm_get_rings()
6926 int tx = hw_resc->resv_tx_rings; in bnxt_hwrm_get_rings() local
6928 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_get_rings()
6930 if (cp < (rx + tx)) { in bnxt_hwrm_get_rings()
6931 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); in bnxt_hwrm_get_rings()
6934 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_get_rings()
6936 hw_resc->resv_rx_rings = rx; in bnxt_hwrm_get_rings()
6937 hw_resc->resv_tx_rings = tx; in bnxt_hwrm_get_rings()
6939 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); in bnxt_hwrm_get_rings()
6940 hw_resc->resv_hw_ring_grps = rx; in bnxt_hwrm_get_rings()
6942 hw_resc->resv_cp_rings = cp; in bnxt_hwrm_get_rings()
6943 hw_resc->resv_stat_ctxs = stats; in bnxt_hwrm_get_rings()
6956 if (bp->hwrm_spec_code < 0x10601) in __bnxt_hwrm_get_tx_rings()
6963 req->fid = cpu_to_le16(fid); in __bnxt_hwrm_get_tx_rings()
6967 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); in __bnxt_hwrm_get_tx_rings()
6985 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_reserve_pf_rings()
6987 req->num_tx_rings = cpu_to_le16(tx_rings); in __bnxt_hwrm_reserve_pf_rings()
6991 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_pf_rings()
7006 req->num_rx_rings = cpu_to_le16(rx_rings); in __bnxt_hwrm_reserve_pf_rings()
7007 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_pf_rings()
7010 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); in __bnxt_hwrm_reserve_pf_rings()
7011 req->num_msix = cpu_to_le16(cp_rings); in __bnxt_hwrm_reserve_pf_rings()
7012 req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); in __bnxt_hwrm_reserve_pf_rings()
7014 req->num_cmpl_rings = cpu_to_le16(cp_rings); in __bnxt_hwrm_reserve_pf_rings()
7015 req->num_hw_ring_grps = cpu_to_le16(ring_grps); in __bnxt_hwrm_reserve_pf_rings()
7016 req->num_rsscos_ctxs = cpu_to_le16(1); in __bnxt_hwrm_reserve_pf_rings()
7017 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && in __bnxt_hwrm_reserve_pf_rings()
7019 req->num_rsscos_ctxs = in __bnxt_hwrm_reserve_pf_rings()
7022 req->num_stat_ctxs = cpu_to_le16(stats); in __bnxt_hwrm_reserve_pf_rings()
7023 req->num_vnics = cpu_to_le16(vnics); in __bnxt_hwrm_reserve_pf_rings()
7025 req->enables = cpu_to_le32(enables); in __bnxt_hwrm_reserve_pf_rings()
7043 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_vf_rings()
7055 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); in __bnxt_hwrm_reserve_vf_rings()
7056 req->num_tx_rings = cpu_to_le16(tx_rings); in __bnxt_hwrm_reserve_vf_rings()
7057 req->num_rx_rings = cpu_to_le16(rx_rings); in __bnxt_hwrm_reserve_vf_rings()
7058 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_reserve_vf_rings()
7061 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); in __bnxt_hwrm_reserve_vf_rings()
7062 req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); in __bnxt_hwrm_reserve_vf_rings()
7064 req->num_cmpl_rings = cpu_to_le16(cp_rings); in __bnxt_hwrm_reserve_vf_rings()
7065 req->num_hw_ring_grps = cpu_to_le16(ring_grps); in __bnxt_hwrm_reserve_vf_rings()
7066 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); in __bnxt_hwrm_reserve_vf_rings()
7068 req->num_stat_ctxs = cpu_to_le16(stats); in __bnxt_hwrm_reserve_vf_rings()
7069 req->num_vnics = cpu_to_le16(vnics); in __bnxt_hwrm_reserve_vf_rings()
7071 req->enables = cpu_to_le32(enables); in __bnxt_hwrm_reserve_vf_rings()
7085 return -ENOMEM; in bnxt_hwrm_reserve_pf_rings()
7087 if (!req->enables) { in bnxt_hwrm_reserve_pf_rings()
7096 if (bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_reserve_pf_rings()
7097 bp->hw_resc.resv_tx_rings = tx_rings; in bnxt_hwrm_reserve_pf_rings()
7110 bp->hw_resc.resv_tx_rings = tx_rings; in bnxt_hwrm_reserve_vf_rings()
7117 return -ENOMEM; in bnxt_hwrm_reserve_vf_rings()
7126 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, in bnxt_hwrm_reserve_rings() argument
7130 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, in bnxt_hwrm_reserve_rings()
7133 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, in bnxt_hwrm_reserve_rings()
7139 int cp = bp->cp_nr_rings; in bnxt_nq_rings_in_use()
7156 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_cp_rings_in_use()
7159 cp = bp->tx_nr_rings + bp->rx_nr_rings; in bnxt_cp_rings_in_use()
7166 int cp = bp->cp_nr_rings; in bnxt_get_func_stat_ctxs()
7182 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_check_rss_tbl_no_rmgr()
7185 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { in bnxt_check_rss_tbl_no_rmgr()
7186 hw_resc->resv_rx_rings = bp->rx_nr_rings; in bnxt_check_rss_tbl_no_rmgr()
7187 if (!netif_is_rxfh_configured(bp->dev)) in bnxt_check_rss_tbl_no_rmgr()
7194 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_need_reserve_rings()
7197 int rx = bp->rx_nr_rings, stat; in bnxt_need_reserve_rings()
7200 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && in bnxt_need_reserve_rings()
7201 bp->hwrm_spec_code >= 0x10601) in bnxt_need_reserve_rings()
7213 if ((bp->flags & BNXT_FLAG_RFS) && in bnxt_need_reserve_rings()
7214 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_need_reserve_rings()
7216 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_need_reserve_rings()
7219 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || in bnxt_need_reserve_rings()
7220 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || in bnxt_need_reserve_rings()
7221 (hw_resc->resv_hw_ring_grps != grp && in bnxt_need_reserve_rings()
7222 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) in bnxt_need_reserve_rings()
7224 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && in bnxt_need_reserve_rings()
7225 hw_resc->resv_irqs != nq) in bnxt_need_reserve_rings()
7232 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in __bnxt_reserve_rings()
7234 int tx = bp->tx_nr_rings; in __bnxt_reserve_rings() local
7235 int rx = bp->rx_nr_rings; in __bnxt_reserve_rings()
7244 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in __bnxt_reserve_rings()
7246 if ((bp->flags & BNXT_FLAG_RFS) && in __bnxt_reserve_rings()
7247 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in __bnxt_reserve_rings()
7249 if (bp->flags & BNXT_FLAG_AGG_RINGS) in __bnxt_reserve_rings()
7251 grp = bp->rx_nr_rings; in __bnxt_reserve_rings()
7254 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); in __bnxt_reserve_rings()
7258 tx = hw_resc->resv_tx_rings; in __bnxt_reserve_rings()
7260 rx = hw_resc->resv_rx_rings; in __bnxt_reserve_rings()
7261 cp = hw_resc->resv_irqs; in __bnxt_reserve_rings()
7262 grp = hw_resc->resv_hw_ring_grps; in __bnxt_reserve_rings()
7263 vnic = hw_resc->resv_vnics; in __bnxt_reserve_rings()
7264 stat = hw_resc->resv_stat_ctxs; in __bnxt_reserve_rings()
7268 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_reserve_rings()
7272 if (netif_running(bp->dev)) in __bnxt_reserve_rings()
7273 return -ENOMEM; in __bnxt_reserve_rings()
7275 bp->flags &= ~BNXT_FLAG_AGG_RINGS; in __bnxt_reserve_rings()
7276 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in __bnxt_reserve_rings()
7277 bp->dev->hw_features &= ~NETIF_F_LRO; in __bnxt_reserve_rings()
7278 bp->dev->features &= ~NETIF_F_LRO; in __bnxt_reserve_rings()
7283 cp = min_t(int, cp, bp->cp_nr_rings); in __bnxt_reserve_rings()
7285 stat -= bnxt_get_ulp_stat_ctxs(bp); in __bnxt_reserve_rings()
7287 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); in __bnxt_reserve_rings()
7288 if (bp->flags & BNXT_FLAG_AGG_RINGS) in __bnxt_reserve_rings()
7290 tx_cp = bnxt_num_tx_to_cp(bp, tx); in __bnxt_reserve_rings()
7292 bp->tx_nr_rings = tx; in __bnxt_reserve_rings()
7297 if (rx_rings != bp->rx_nr_rings) { in __bnxt_reserve_rings()
7298 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", in __bnxt_reserve_rings()
7299 rx_rings, bp->rx_nr_rings); in __bnxt_reserve_rings()
7300 if (netif_is_rxfh_configured(bp->dev) && in __bnxt_reserve_rings()
7301 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != in __bnxt_reserve_rings()
7304 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); in __bnxt_reserve_rings()
7305 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; in __bnxt_reserve_rings()
7308 bp->rx_nr_rings = rx_rings; in __bnxt_reserve_rings()
7309 bp->cp_nr_rings = cp; in __bnxt_reserve_rings()
7311 if (!tx || !rx || !cp || !grp || !vnic || !stat) in __bnxt_reserve_rings()
7312 return -ENOMEM; in __bnxt_reserve_rings()
7314 if (!netif_is_rxfh_configured(bp->dev)) in __bnxt_reserve_rings()
7338 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_check_vf_rings()
7341 req->flags = cpu_to_le32(flags); in bnxt_hwrm_check_vf_rings()
7360 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_hwrm_check_pf_rings()
7367 req->flags = cpu_to_le32(flags); in bnxt_hwrm_check_pf_rings()
7375 if (bp->hwrm_spec_code < 0x10801) in bnxt_hwrm_check_rings()
7389 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_hwrm_coal_params_qcaps()
7394 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; in bnxt_hwrm_coal_params_qcaps()
7395 coal_cap->num_cmpl_dma_aggr_max = 63; in bnxt_hwrm_coal_params_qcaps()
7396 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; in bnxt_hwrm_coal_params_qcaps()
7397 coal_cap->cmpl_aggr_dma_tmr_max = 65535; in bnxt_hwrm_coal_params_qcaps()
7398 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; in bnxt_hwrm_coal_params_qcaps()
7399 coal_cap->int_lat_tmr_min_max = 65535; in bnxt_hwrm_coal_params_qcaps()
7400 coal_cap->int_lat_tmr_max_max = 65535; in bnxt_hwrm_coal_params_qcaps()
7401 coal_cap->num_cmpl_aggr_int_max = 65535; in bnxt_hwrm_coal_params_qcaps()
7402 coal_cap->timer_units = 80; in bnxt_hwrm_coal_params_qcaps()
7404 if (bp->hwrm_spec_code < 0x10902) in bnxt_hwrm_coal_params_qcaps()
7413 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); in bnxt_hwrm_coal_params_qcaps()
7414 coal_cap->nq_params = le32_to_cpu(resp->nq_params); in bnxt_hwrm_coal_params_qcaps()
7415 coal_cap->num_cmpl_dma_aggr_max = in bnxt_hwrm_coal_params_qcaps()
7416 le16_to_cpu(resp->num_cmpl_dma_aggr_max); in bnxt_hwrm_coal_params_qcaps()
7417 coal_cap->num_cmpl_dma_aggr_during_int_max = in bnxt_hwrm_coal_params_qcaps()
7418 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); in bnxt_hwrm_coal_params_qcaps()
7419 coal_cap->cmpl_aggr_dma_tmr_max = in bnxt_hwrm_coal_params_qcaps()
7420 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); in bnxt_hwrm_coal_params_qcaps()
7421 coal_cap->cmpl_aggr_dma_tmr_during_int_max = in bnxt_hwrm_coal_params_qcaps()
7422 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); in bnxt_hwrm_coal_params_qcaps()
7423 coal_cap->int_lat_tmr_min_max = in bnxt_hwrm_coal_params_qcaps()
7424 le16_to_cpu(resp->int_lat_tmr_min_max); in bnxt_hwrm_coal_params_qcaps()
7425 coal_cap->int_lat_tmr_max_max = in bnxt_hwrm_coal_params_qcaps()
7426 le16_to_cpu(resp->int_lat_tmr_max_max); in bnxt_hwrm_coal_params_qcaps()
7427 coal_cap->num_cmpl_aggr_int_max = in bnxt_hwrm_coal_params_qcaps()
7428 le16_to_cpu(resp->num_cmpl_aggr_int_max); in bnxt_hwrm_coal_params_qcaps()
7429 coal_cap->timer_units = le16_to_cpu(resp->timer_units); in bnxt_hwrm_coal_params_qcaps()
7436 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_usec_to_coal_tmr()
7438 return usec * 1000 / coal_cap->timer_units; in bnxt_usec_to_coal_tmr()
7445 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_hwrm_set_coal_params()
7446 u16 val, tmr, max, flags = hw_coal->flags; in bnxt_hwrm_set_coal_params()
7447 u32 cmpl_params = coal_cap->cmpl_params; in bnxt_hwrm_set_coal_params()
7449 max = hw_coal->bufs_per_record * 128; in bnxt_hwrm_set_coal_params()
7450 if (hw_coal->budget) in bnxt_hwrm_set_coal_params()
7451 max = hw_coal->bufs_per_record * hw_coal->budget; in bnxt_hwrm_set_coal_params()
7452 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); in bnxt_hwrm_set_coal_params()
7454 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); in bnxt_hwrm_set_coal_params()
7455 req->num_cmpl_aggr_int = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7457 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); in bnxt_hwrm_set_coal_params()
7458 req->num_cmpl_dma_aggr = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7460 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, in bnxt_hwrm_set_coal_params()
7461 coal_cap->num_cmpl_dma_aggr_during_int_max); in bnxt_hwrm_set_coal_params()
7462 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7464 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); in bnxt_hwrm_set_coal_params()
7465 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); in bnxt_hwrm_set_coal_params()
7466 req->int_lat_tmr_max = cpu_to_le16(tmr); in bnxt_hwrm_set_coal_params()
7471 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); in bnxt_hwrm_set_coal_params()
7472 req->int_lat_tmr_min = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7473 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); in bnxt_hwrm_set_coal_params()
7477 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); in bnxt_hwrm_set_coal_params()
7478 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7482 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); in bnxt_hwrm_set_coal_params()
7484 coal_cap->cmpl_aggr_dma_tmr_during_int_max); in bnxt_hwrm_set_coal_params()
7485 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); in bnxt_hwrm_set_coal_params()
7486 req->enables |= in bnxt_hwrm_set_coal_params()
7491 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) in bnxt_hwrm_set_coal_params()
7493 req->flags = cpu_to_le16(flags); in bnxt_hwrm_set_coal_params()
7494 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); in bnxt_hwrm_set_coal_params()
7501 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in __bnxt_hwrm_set_coal_nq()
7502 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in __bnxt_hwrm_set_coal_nq()
7503 u32 nq_params = coal_cap->nq_params; in __bnxt_hwrm_set_coal_nq()
7514 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); in __bnxt_hwrm_set_coal_nq()
7515 req->flags = in __bnxt_hwrm_set_coal_nq()
7518 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; in __bnxt_hwrm_set_coal_nq()
7519 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); in __bnxt_hwrm_set_coal_nq()
7520 req->int_lat_tmr_min = cpu_to_le16(tmr); in __bnxt_hwrm_set_coal_nq()
7521 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); in __bnxt_hwrm_set_coal_nq()
7528 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_hwrm_set_ring_coal()
7535 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); in bnxt_hwrm_set_ring_coal()
7537 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; in bnxt_hwrm_set_ring_coal()
7538 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; in bnxt_hwrm_set_ring_coal()
7540 if (!bnapi->rx_ring) in bnxt_hwrm_set_ring_coal()
7541 return -ENODEV; in bnxt_hwrm_set_ring_coal()
7549 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); in bnxt_hwrm_set_ring_coal()
7558 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); in bnxt_hwrm_set_rx_coal()
7560 req->ring_id = cpu_to_le16(ring_id); in bnxt_hwrm_set_rx_coal()
7575 req->ring_id = cpu_to_le16(ring_id); in bnxt_hwrm_set_tx_coal()
7579 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_set_tx_coal()
7600 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); in bnxt_hwrm_set_coal()
7601 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); in bnxt_hwrm_set_coal()
7605 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_set_coal()
7606 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_set_coal()
7609 if (!bnapi->rx_ring) in bnxt_hwrm_set_coal()
7616 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_hwrm_set_coal()
7619 if (bnapi->rx_ring && bnapi->tx_ring[0]) { in bnxt_hwrm_set_coal()
7624 if (bnapi->rx_ring) in bnxt_hwrm_set_coal()
7625 hw_coal = &bp->rx_coal; in bnxt_hwrm_set_coal()
7627 hw_coal = &bp->tx_coal; in bnxt_hwrm_set_coal()
7641 if (!bp->bnapi) in bnxt_hwrm_stat_ctx_free()
7657 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_free()
7658 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_free()
7659 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_hwrm_stat_ctx_free()
7661 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { in bnxt_hwrm_stat_ctx_free()
7662 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); in bnxt_hwrm_stat_ctx_free()
7664 req0->stat_ctx_id = req->stat_ctx_id; in bnxt_hwrm_stat_ctx_free()
7669 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; in bnxt_hwrm_stat_ctx_free()
7690 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); in bnxt_hwrm_stat_ctx_alloc()
7691 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); in bnxt_hwrm_stat_ctx_alloc()
7694 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_alloc()
7695 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_alloc()
7696 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_hwrm_stat_ctx_alloc()
7698 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); in bnxt_hwrm_stat_ctx_alloc()
7704 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); in bnxt_hwrm_stat_ctx_alloc()
7706 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; in bnxt_hwrm_stat_ctx_alloc()
7723 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_qcfg()
7731 struct bnxt_vf_info *vf = &bp->vf; in bnxt_hwrm_func_qcfg()
7733 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; in bnxt_hwrm_func_qcfg()
7735 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); in bnxt_hwrm_func_qcfg()
7738 flags = le16_to_cpu(resp->flags); in bnxt_hwrm_func_qcfg()
7741 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; in bnxt_hwrm_func_qcfg()
7743 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; in bnxt_hwrm_func_qcfg()
7746 bp->flags |= BNXT_FLAG_MULTI_HOST; in bnxt_hwrm_func_qcfg()
7749 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; in bnxt_hwrm_func_qcfg()
7751 switch (resp->port_partition_type) { in bnxt_hwrm_func_qcfg()
7755 bp->port_partition_type = resp->port_partition_type; in bnxt_hwrm_func_qcfg()
7758 if (bp->hwrm_spec_code < 0x10707 || in bnxt_hwrm_func_qcfg()
7759 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) in bnxt_hwrm_func_qcfg()
7760 bp->br_mode = BRIDGE_MODE_VEB; in bnxt_hwrm_func_qcfg()
7761 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) in bnxt_hwrm_func_qcfg()
7762 bp->br_mode = BRIDGE_MODE_VEPA; in bnxt_hwrm_func_qcfg()
7764 bp->br_mode = BRIDGE_MODE_UNDEF; in bnxt_hwrm_func_qcfg()
7766 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); in bnxt_hwrm_func_qcfg()
7767 if (!bp->max_mtu) in bnxt_hwrm_func_qcfg()
7768 bp->max_mtu = BNXT_MAX_MTU; in bnxt_hwrm_func_qcfg()
7770 if (bp->db_size) in bnxt_hwrm_func_qcfg()
7773 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; in bnxt_hwrm_func_qcfg()
7776 bp->db_offset = DB_PF_OFFSET_P5; in bnxt_hwrm_func_qcfg()
7778 bp->db_offset = DB_VF_OFFSET_P5; in bnxt_hwrm_func_qcfg()
7780 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * in bnxt_hwrm_func_qcfg()
7782 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || in bnxt_hwrm_func_qcfg()
7783 bp->db_size <= bp->db_offset) in bnxt_hwrm_func_qcfg()
7784 bp->db_size = pci_resource_len(bp->pdev, 2); in bnxt_hwrm_func_qcfg()
7795 ctxm->init_value = init_val; in bnxt_init_ctx_initializer()
7796 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; in bnxt_init_ctx_initializer()
7798 ctxm->init_offset = init_offset * 4; in bnxt_init_ctx_initializer()
7800 ctxm->init_value = 0; in bnxt_init_ctx_initializer()
7805 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_alloc_all_ctx_pg_info()
7809 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; in bnxt_alloc_all_ctx_pg_info()
7812 if (!ctxm->max_entries) in bnxt_alloc_all_ctx_pg_info()
7815 if (ctxm->instance_bmap) in bnxt_alloc_all_ctx_pg_info()
7816 n = hweight32(ctxm->instance_bmap); in bnxt_alloc_all_ctx_pg_info()
7817 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); in bnxt_alloc_all_ctx_pg_info()
7818 if (!ctxm->pg_info) in bnxt_alloc_all_ctx_pg_info()
7819 return -ENOMEM; in bnxt_alloc_all_ctx_pg_info()
7842 return -ENOMEM; in bnxt_hwrm_func_backing_store_qcaps_v2()
7843 bp->ctx = ctx; in bnxt_hwrm_func_backing_store_qcaps_v2()
7848 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; in bnxt_hwrm_func_backing_store_qcaps_v2()
7853 req->type = cpu_to_le16(type); in bnxt_hwrm_func_backing_store_qcaps_v2()
7857 flags = le32_to_cpu(resp->flags); in bnxt_hwrm_func_backing_store_qcaps_v2()
7858 type = le16_to_cpu(resp->next_valid_type); in bnxt_hwrm_func_backing_store_qcaps_v2()
7862 ctxm->type = le16_to_cpu(resp->type); in bnxt_hwrm_func_backing_store_qcaps_v2()
7863 ctxm->entry_size = le16_to_cpu(resp->entry_size); in bnxt_hwrm_func_backing_store_qcaps_v2()
7864 ctxm->flags = flags; in bnxt_hwrm_func_backing_store_qcaps_v2()
7865 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); in bnxt_hwrm_func_backing_store_qcaps_v2()
7866 ctxm->entry_multiple = resp->entry_multiple; in bnxt_hwrm_func_backing_store_qcaps_v2()
7867 ctxm->max_entries = le32_to_cpu(resp->max_num_entries); in bnxt_hwrm_func_backing_store_qcaps_v2()
7868 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); in bnxt_hwrm_func_backing_store_qcaps_v2()
7869 init_val = resp->ctx_init_value; in bnxt_hwrm_func_backing_store_qcaps_v2()
7870 init_off = resp->ctx_init_offset; in bnxt_hwrm_func_backing_store_qcaps_v2()
7873 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, in bnxt_hwrm_func_backing_store_qcaps_v2()
7875 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; in bnxt_hwrm_func_backing_store_qcaps_v2()
7877 ctxm->split[i] = le32_to_cpu(*p); in bnxt_hwrm_func_backing_store_qcaps_v2()
7892 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) in bnxt_hwrm_func_backing_store_qcaps()
7895 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) in bnxt_hwrm_func_backing_store_qcaps()
7910 ctx = bp->ctx; in bnxt_hwrm_func_backing_store_qcaps()
7914 rc = -ENOMEM; in bnxt_hwrm_func_backing_store_qcaps()
7917 bp->ctx = ctx; in bnxt_hwrm_func_backing_store_qcaps()
7919 init_val = resp->ctx_kind_initializer; in bnxt_hwrm_func_backing_store_qcaps()
7920 init_mask = le16_to_cpu(resp->ctx_init_mask); in bnxt_hwrm_func_backing_store_qcaps()
7922 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; in bnxt_hwrm_func_backing_store_qcaps()
7923 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7924 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); in bnxt_hwrm_func_backing_store_qcaps()
7925 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); in bnxt_hwrm_func_backing_store_qcaps()
7926 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); in bnxt_hwrm_func_backing_store_qcaps()
7927 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7928 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7931 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; in bnxt_hwrm_func_backing_store_qcaps()
7932 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); in bnxt_hwrm_func_backing_store_qcaps()
7933 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7934 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7935 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7938 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; in bnxt_hwrm_func_backing_store_qcaps()
7939 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); in bnxt_hwrm_func_backing_store_qcaps()
7940 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7941 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7942 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7945 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; in bnxt_hwrm_func_backing_store_qcaps()
7946 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); in bnxt_hwrm_func_backing_store_qcaps()
7947 ctxm->max_entries = ctxm->vnic_entries + in bnxt_hwrm_func_backing_store_qcaps()
7948 le16_to_cpu(resp->vnic_max_ring_table_entries); in bnxt_hwrm_func_backing_store_qcaps()
7949 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7951 resp->vnic_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7954 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; in bnxt_hwrm_func_backing_store_qcaps()
7955 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7956 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7958 resp->stat_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7961 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; in bnxt_hwrm_func_backing_store_qcaps()
7962 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7963 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); in bnxt_hwrm_func_backing_store_qcaps()
7964 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); in bnxt_hwrm_func_backing_store_qcaps()
7965 ctxm->entry_multiple = resp->tqm_entries_multiple; in bnxt_hwrm_func_backing_store_qcaps()
7966 if (!ctxm->entry_multiple) in bnxt_hwrm_func_backing_store_qcaps()
7967 ctxm->entry_multiple = 1; in bnxt_hwrm_func_backing_store_qcaps()
7969 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); in bnxt_hwrm_func_backing_store_qcaps()
7971 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; in bnxt_hwrm_func_backing_store_qcaps()
7972 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7973 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7974 ctxm->mrav_num_entries_units = in bnxt_hwrm_func_backing_store_qcaps()
7975 le16_to_cpu(resp->mrav_num_entries_units); in bnxt_hwrm_func_backing_store_qcaps()
7977 resp->mrav_init_offset, in bnxt_hwrm_func_backing_store_qcaps()
7980 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; in bnxt_hwrm_func_backing_store_qcaps()
7981 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); in bnxt_hwrm_func_backing_store_qcaps()
7982 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); in bnxt_hwrm_func_backing_store_qcaps()
7984 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; in bnxt_hwrm_func_backing_store_qcaps()
7985 if (!ctx->tqm_fp_rings_count) in bnxt_hwrm_func_backing_store_qcaps()
7986 ctx->tqm_fp_rings_count = bp->max_q; in bnxt_hwrm_func_backing_store_qcaps()
7987 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) in bnxt_hwrm_func_backing_store_qcaps()
7988 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; in bnxt_hwrm_func_backing_store_qcaps()
7990 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; in bnxt_hwrm_func_backing_store_qcaps()
7991 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); in bnxt_hwrm_func_backing_store_qcaps()
7992 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; in bnxt_hwrm_func_backing_store_qcaps()
8006 if (!rmem->nr_pages) in bnxt_hwrm_set_pg_attr()
8010 if (rmem->depth >= 1) { in bnxt_hwrm_set_pg_attr()
8011 if (rmem->depth == 2) in bnxt_hwrm_set_pg_attr()
8015 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); in bnxt_hwrm_set_pg_attr()
8017 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); in bnxt_hwrm_set_pg_attr()
8031 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_hwrm_func_backing_store_cfg()
8047 if (req_len > bp->hwrm_max_ext_req_len) in bnxt_hwrm_func_backing_store_cfg()
8053 req->enables = cpu_to_le32(enables); in bnxt_hwrm_func_backing_store_cfg()
8055 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; in bnxt_hwrm_func_backing_store_cfg()
8056 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8057 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8058 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); in bnxt_hwrm_func_backing_store_cfg()
8059 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); in bnxt_hwrm_func_backing_store_cfg()
8060 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8061 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8062 &req->qpc_pg_size_qpc_lvl, in bnxt_hwrm_func_backing_store_cfg()
8063 &req->qpc_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8066 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); in bnxt_hwrm_func_backing_store_cfg()
8069 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; in bnxt_hwrm_func_backing_store_cfg()
8070 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8071 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8072 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); in bnxt_hwrm_func_backing_store_cfg()
8073 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8074 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8075 &req->srq_pg_size_srq_lvl, in bnxt_hwrm_func_backing_store_cfg()
8076 &req->srq_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8079 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; in bnxt_hwrm_func_backing_store_cfg()
8080 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8081 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8082 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); in bnxt_hwrm_func_backing_store_cfg()
8083 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8084 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8085 &req->cq_pg_size_cq_lvl, in bnxt_hwrm_func_backing_store_cfg()
8086 &req->cq_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8089 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; in bnxt_hwrm_func_backing_store_cfg()
8090 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8091 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); in bnxt_hwrm_func_backing_store_cfg()
8092 req->vnic_num_ring_table_entries = in bnxt_hwrm_func_backing_store_cfg()
8093 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); in bnxt_hwrm_func_backing_store_cfg()
8094 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8095 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8096 &req->vnic_pg_size_vnic_lvl, in bnxt_hwrm_func_backing_store_cfg()
8097 &req->vnic_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8100 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; in bnxt_hwrm_func_backing_store_cfg()
8101 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8102 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); in bnxt_hwrm_func_backing_store_cfg()
8103 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8104 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8105 &req->stat_pg_size_stat_lvl, in bnxt_hwrm_func_backing_store_cfg()
8106 &req->stat_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8111 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; in bnxt_hwrm_func_backing_store_cfg()
8112 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8113 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8114 units = ctxm->mrav_num_entries_units; in bnxt_hwrm_func_backing_store_cfg()
8116 u32 num_mr, num_ah = ctxm->mrav_av_entries; in bnxt_hwrm_func_backing_store_cfg()
8119 num_mr = ctx_pg->entries - num_ah; in bnxt_hwrm_func_backing_store_cfg()
8121 req->mrav_num_entries = cpu_to_le32(entries); in bnxt_hwrm_func_backing_store_cfg()
8124 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8125 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8126 &req->mrav_pg_size_mrav_lvl, in bnxt_hwrm_func_backing_store_cfg()
8127 &req->mrav_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8130 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; in bnxt_hwrm_func_backing_store_cfg()
8131 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8132 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8133 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8134 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg()
8135 &req->tim_pg_size_tim_lvl, in bnxt_hwrm_func_backing_store_cfg()
8136 &req->tim_page_dir); in bnxt_hwrm_func_backing_store_cfg()
8138 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; in bnxt_hwrm_func_backing_store_cfg()
8139 for (i = 0, num_entries = &req->tqm_sp_num_entries, in bnxt_hwrm_func_backing_store_cfg()
8140 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, in bnxt_hwrm_func_backing_store_cfg()
8141 pg_dir = &req->tqm_sp_page_dir, in bnxt_hwrm_func_backing_store_cfg()
8143 ctx_pg = ctxm->pg_info; in bnxt_hwrm_func_backing_store_cfg()
8145 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], in bnxt_hwrm_func_backing_store_cfg()
8150 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg()
8151 *num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg()
8152 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); in bnxt_hwrm_func_backing_store_cfg()
8154 req->flags = cpu_to_le32(flags); in bnxt_hwrm_func_backing_store_cfg()
8161 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; in bnxt_alloc_ctx_mem_blk()
8163 rmem->page_size = BNXT_PAGE_SIZE; in bnxt_alloc_ctx_mem_blk()
8164 rmem->pg_arr = ctx_pg->ctx_pg_arr; in bnxt_alloc_ctx_mem_blk()
8165 rmem->dma_arr = ctx_pg->ctx_dma_arr; in bnxt_alloc_ctx_mem_blk()
8166 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; in bnxt_alloc_ctx_mem_blk()
8167 if (rmem->depth >= 1) in bnxt_alloc_ctx_mem_blk()
8168 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; in bnxt_alloc_ctx_mem_blk()
8176 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; in bnxt_alloc_ctx_pg_tbls()
8180 return -EINVAL; in bnxt_alloc_ctx_pg_tbls()
8182 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); in bnxt_alloc_ctx_pg_tbls()
8183 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { in bnxt_alloc_ctx_pg_tbls()
8184 ctx_pg->nr_pages = 0; in bnxt_alloc_ctx_pg_tbls()
8185 return -EINVAL; in bnxt_alloc_ctx_pg_tbls()
8187 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { in bnxt_alloc_ctx_pg_tbls()
8190 rmem->depth = 2; in bnxt_alloc_ctx_pg_tbls()
8191 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), in bnxt_alloc_ctx_pg_tbls()
8193 if (!ctx_pg->ctx_pg_tbl) in bnxt_alloc_ctx_pg_tbls()
8194 return -ENOMEM; in bnxt_alloc_ctx_pg_tbls()
8195 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); in bnxt_alloc_ctx_pg_tbls()
8196 rmem->nr_pages = nr_tbls; in bnxt_alloc_ctx_pg_tbls()
8205 return -ENOMEM; in bnxt_alloc_ctx_pg_tbls()
8206 ctx_pg->ctx_pg_tbl[i] = pg_tbl; in bnxt_alloc_ctx_pg_tbls()
8207 rmem = &pg_tbl->ring_mem; in bnxt_alloc_ctx_pg_tbls()
8208 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; in bnxt_alloc_ctx_pg_tbls()
8209 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; in bnxt_alloc_ctx_pg_tbls()
8210 rmem->depth = 1; in bnxt_alloc_ctx_pg_tbls()
8211 rmem->nr_pages = MAX_CTX_PAGES; in bnxt_alloc_ctx_pg_tbls()
8212 rmem->ctx_mem = ctxm; in bnxt_alloc_ctx_pg_tbls()
8213 if (i == (nr_tbls - 1)) { in bnxt_alloc_ctx_pg_tbls()
8214 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; in bnxt_alloc_ctx_pg_tbls()
8217 rmem->nr_pages = rem; in bnxt_alloc_ctx_pg_tbls()
8224 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); in bnxt_alloc_ctx_pg_tbls()
8225 if (rmem->nr_pages > 1 || depth) in bnxt_alloc_ctx_pg_tbls()
8226 rmem->depth = 1; in bnxt_alloc_ctx_pg_tbls()
8227 rmem->ctx_mem = ctxm; in bnxt_alloc_ctx_pg_tbls()
8236 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; in bnxt_free_ctx_pg_tbls()
8238 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || in bnxt_free_ctx_pg_tbls()
8239 ctx_pg->ctx_pg_tbl) { in bnxt_free_ctx_pg_tbls()
8240 int i, nr_tbls = rmem->nr_pages; in bnxt_free_ctx_pg_tbls()
8246 pg_tbl = ctx_pg->ctx_pg_tbl[i]; in bnxt_free_ctx_pg_tbls()
8249 rmem2 = &pg_tbl->ring_mem; in bnxt_free_ctx_pg_tbls()
8251 ctx_pg->ctx_pg_arr[i] = NULL; in bnxt_free_ctx_pg_tbls()
8253 ctx_pg->ctx_pg_tbl[i] = NULL; in bnxt_free_ctx_pg_tbls()
8255 kfree(ctx_pg->ctx_pg_tbl); in bnxt_free_ctx_pg_tbls()
8256 ctx_pg->ctx_pg_tbl = NULL; in bnxt_free_ctx_pg_tbls()
8259 ctx_pg->nr_pages = 0; in bnxt_free_ctx_pg_tbls()
8266 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; in bnxt_setup_ctxm_pg_tbls()
8270 if (!ctxm->entry_size || !ctx_pg) in bnxt_setup_ctxm_pg_tbls()
8271 return -EINVAL; in bnxt_setup_ctxm_pg_tbls()
8272 if (ctxm->instance_bmap) in bnxt_setup_ctxm_pg_tbls()
8273 n = hweight32(ctxm->instance_bmap); in bnxt_setup_ctxm_pg_tbls()
8274 if (ctxm->entry_multiple) in bnxt_setup_ctxm_pg_tbls()
8275 entries = roundup(entries, ctxm->entry_multiple); in bnxt_setup_ctxm_pg_tbls()
8276 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); in bnxt_setup_ctxm_pg_tbls()
8277 mem_size = entries * ctxm->entry_size; in bnxt_setup_ctxm_pg_tbls()
8281 ctxm->init_value ? ctxm : NULL); in bnxt_setup_ctxm_pg_tbls()
8291 u32 instance_bmap = ctxm->instance_bmap; in bnxt_hwrm_func_backing_store_cfg_v2()
8295 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) in bnxt_hwrm_func_backing_store_cfg_v2()
8299 n = hweight32(ctxm->instance_bmap); in bnxt_hwrm_func_backing_store_cfg_v2()
8307 req->type = cpu_to_le16(ctxm->type); in bnxt_hwrm_func_backing_store_cfg_v2()
8308 req->entry_size = cpu_to_le16(ctxm->entry_size); in bnxt_hwrm_func_backing_store_cfg_v2()
8309 req->subtype_valid_cnt = ctxm->split_entry_cnt; in bnxt_hwrm_func_backing_store_cfg_v2()
8310 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) in bnxt_hwrm_func_backing_store_cfg_v2()
8311 p[i] = cpu_to_le32(ctxm->split[i]); in bnxt_hwrm_func_backing_store_cfg_v2()
8317 req->instance = cpu_to_le16(i); in bnxt_hwrm_func_backing_store_cfg_v2()
8318 ctx_pg = &ctxm->pg_info[j++]; in bnxt_hwrm_func_backing_store_cfg_v2()
8319 if (!ctx_pg->entries) in bnxt_hwrm_func_backing_store_cfg_v2()
8321 req->num_entries = cpu_to_le32(ctx_pg->entries); in bnxt_hwrm_func_backing_store_cfg_v2()
8322 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, in bnxt_hwrm_func_backing_store_cfg_v2()
8323 &req->page_size_pbl_level, in bnxt_hwrm_func_backing_store_cfg_v2()
8324 &req->page_dir); in bnxt_hwrm_func_backing_store_cfg_v2()
8326 req->flags = in bnxt_hwrm_func_backing_store_cfg_v2()
8336 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_backing_store_cfg_v2()
8345 last_type = BNXT_CTX_MAX - 1; in bnxt_backing_store_cfg_v2()
8347 last_type = BNXT_CTX_L2_MAX - 1; in bnxt_backing_store_cfg_v2()
8348 ctx->ctx_arr[last_type].last = 1; in bnxt_backing_store_cfg_v2()
8351 ctxm = &ctx->ctx_arr[type]; in bnxt_backing_store_cfg_v2()
8353 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); in bnxt_backing_store_cfg_v2()
8362 struct bnxt_ctx_mem_info *ctx = bp->ctx; in bnxt_free_ctx_mem()
8369 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; in bnxt_free_ctx_mem()
8370 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; in bnxt_free_ctx_mem()
8375 if (ctxm->instance_bmap) in bnxt_free_ctx_mem()
8376 n = hweight32(ctxm->instance_bmap); in bnxt_free_ctx_mem()
8381 ctxm->pg_info = NULL; in bnxt_free_ctx_mem()
8384 ctx->flags &= ~BNXT_CTX_FLAG_INITED; in bnxt_free_ctx_mem()
8386 bp->ctx = NULL; in bnxt_free_ctx_mem()
8405 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", in bnxt_alloc_ctx_mem()
8409 ctx = bp->ctx; in bnxt_alloc_ctx_mem()
8410 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) in bnxt_alloc_ctx_mem()
8413 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; in bnxt_alloc_ctx_mem()
8414 l2_qps = ctxm->qp_l2_entries; in bnxt_alloc_ctx_mem()
8415 qp1_qps = ctxm->qp_qp1_entries; in bnxt_alloc_ctx_mem()
8416 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; in bnxt_alloc_ctx_mem()
8417 max_qps = ctxm->max_entries; in bnxt_alloc_ctx_mem()
8418 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; in bnxt_alloc_ctx_mem()
8419 srqs = ctxm->srq_l2_entries; in bnxt_alloc_ctx_mem()
8420 max_srqs = ctxm->max_entries; in bnxt_alloc_ctx_mem()
8422 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { in bnxt_alloc_ctx_mem()
8424 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); in bnxt_alloc_ctx_mem()
8427 extra_srqs = min_t(u32, 8192, max_srqs - srqs); in bnxt_alloc_ctx_mem()
8432 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; in bnxt_alloc_ctx_mem()
8438 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; in bnxt_alloc_ctx_mem()
8443 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; in bnxt_alloc_ctx_mem()
8444 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + in bnxt_alloc_ctx_mem()
8449 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; in bnxt_alloc_ctx_mem()
8450 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); in bnxt_alloc_ctx_mem()
8454 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; in bnxt_alloc_ctx_mem()
8455 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); in bnxt_alloc_ctx_mem()
8459 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) in bnxt_alloc_ctx_mem()
8462 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; in bnxt_alloc_ctx_mem()
8466 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); in bnxt_alloc_ctx_mem()
8468 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; in bnxt_alloc_ctx_mem()
8469 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) in bnxt_alloc_ctx_mem()
8470 ctxm->mrav_av_entries = num_ah; in bnxt_alloc_ctx_mem()
8477 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; in bnxt_alloc_ctx_mem()
8484 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; in bnxt_alloc_ctx_mem()
8485 min = ctxm->min_entries; in bnxt_alloc_ctx_mem()
8486 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + in bnxt_alloc_ctx_mem()
8492 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; in bnxt_alloc_ctx_mem()
8497 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) in bnxt_alloc_ctx_mem()
8501 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) in bnxt_alloc_ctx_mem()
8506 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", in bnxt_alloc_ctx_mem()
8510 ctx->flags |= BNXT_CTX_FLAG_INITED; in bnxt_alloc_ctx_mem()
8518 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_func_resc_qcaps()
8525 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_func_resc_qcaps()
8531 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); in bnxt_hwrm_func_resc_qcaps()
8535 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); in bnxt_hwrm_func_resc_qcaps()
8536 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); in bnxt_hwrm_func_resc_qcaps()
8537 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); in bnxt_hwrm_func_resc_qcaps()
8538 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); in bnxt_hwrm_func_resc_qcaps()
8539 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); in bnxt_hwrm_func_resc_qcaps()
8540 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); in bnxt_hwrm_func_resc_qcaps()
8541 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); in bnxt_hwrm_func_resc_qcaps()
8542 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); in bnxt_hwrm_func_resc_qcaps()
8543 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); in bnxt_hwrm_func_resc_qcaps()
8544 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); in bnxt_hwrm_func_resc_qcaps()
8545 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); in bnxt_hwrm_func_resc_qcaps()
8546 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); in bnxt_hwrm_func_resc_qcaps()
8547 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); in bnxt_hwrm_func_resc_qcaps()
8548 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); in bnxt_hwrm_func_resc_qcaps()
8549 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); in bnxt_hwrm_func_resc_qcaps()
8550 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); in bnxt_hwrm_func_resc_qcaps()
8552 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_hwrm_func_resc_qcaps()
8553 u16 max_msix = le16_to_cpu(resp->max_msix); in bnxt_hwrm_func_resc_qcaps()
8555 hw_resc->max_nqs = max_msix; in bnxt_hwrm_func_resc_qcaps()
8556 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; in bnxt_hwrm_func_resc_qcaps()
8560 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_resc_qcaps()
8562 pf->vf_resv_strategy = in bnxt_hwrm_func_resc_qcaps()
8563 le16_to_cpu(resp->vf_reservation_strategy); in bnxt_hwrm_func_resc_qcaps()
8564 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) in bnxt_hwrm_func_resc_qcaps()
8565 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; in bnxt_hwrm_func_resc_qcaps()
8576 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in __bnxt_hwrm_ptp_qcfg()
8581 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { in __bnxt_hwrm_ptp_qcfg()
8582 rc = -ENODEV; in __bnxt_hwrm_ptp_qcfg()
8590 req->port_id = cpu_to_le16(bp->pf.port_id); in __bnxt_hwrm_ptp_qcfg()
8596 flags = resp->flags; in __bnxt_hwrm_ptp_qcfg()
8598 rc = -ENODEV; in __bnxt_hwrm_ptp_qcfg()
8604 rc = -ENOMEM; in __bnxt_hwrm_ptp_qcfg()
8607 ptp->bp = bp; in __bnxt_hwrm_ptp_qcfg()
8608 bp->ptp_cfg = ptp; in __bnxt_hwrm_ptp_qcfg()
8611 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); in __bnxt_hwrm_ptp_qcfg()
8612 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); in __bnxt_hwrm_ptp_qcfg()
8613 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in __bnxt_hwrm_ptp_qcfg()
8614 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; in __bnxt_hwrm_ptp_qcfg()
8615 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; in __bnxt_hwrm_ptp_qcfg()
8617 rc = -ENODEV; in __bnxt_hwrm_ptp_qcfg()
8623 netdev_warn(bp->dev, "PTP initialization failed.\n"); in __bnxt_hwrm_ptp_qcfg()
8632 bp->ptp_cfg = NULL; in __bnxt_hwrm_ptp_qcfg()
8640 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in __bnxt_hwrm_func_qcaps()
8648 req->fid = cpu_to_le16(0xffff); in __bnxt_hwrm_func_qcaps()
8654 flags = le32_to_cpu(resp->flags); in __bnxt_hwrm_func_qcaps()
8656 bp->flags |= BNXT_FLAG_ROCEV1_CAP; in __bnxt_hwrm_func_qcaps()
8658 bp->flags |= BNXT_FLAG_ROCEV2_CAP; in __bnxt_hwrm_func_qcaps()
8660 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
8662 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; in __bnxt_hwrm_func_qcaps()
8664 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
8666 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; in __bnxt_hwrm_func_qcaps()
8668 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; in __bnxt_hwrm_func_qcaps()
8670 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; in __bnxt_hwrm_func_qcaps()
8672 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; in __bnxt_hwrm_func_qcaps()
8674 flags_ext = le32_to_cpu(resp->flags_ext); in __bnxt_hwrm_func_qcaps()
8676 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; in __bnxt_hwrm_func_qcaps()
8678 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; in __bnxt_hwrm_func_qcaps()
8680 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; in __bnxt_hwrm_func_qcaps()
8682 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; in __bnxt_hwrm_func_qcaps()
8684 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; in __bnxt_hwrm_func_qcaps()
8686 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; in __bnxt_hwrm_func_qcaps()
8688 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; in __bnxt_hwrm_func_qcaps()
8690 flags_ext2 = le32_to_cpu(resp->flags_ext2); in __bnxt_hwrm_func_qcaps()
8692 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; in __bnxt_hwrm_func_qcaps()
8694 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; in __bnxt_hwrm_func_qcaps()
8696 bp->tx_push_thresh = 0; in __bnxt_hwrm_func_qcaps()
8699 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; in __bnxt_hwrm_func_qcaps()
8701 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); in __bnxt_hwrm_func_qcaps()
8702 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); in __bnxt_hwrm_func_qcaps()
8703 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); in __bnxt_hwrm_func_qcaps()
8704 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); in __bnxt_hwrm_func_qcaps()
8705 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); in __bnxt_hwrm_func_qcaps()
8706 if (!hw_resc->max_hw_ring_grps) in __bnxt_hwrm_func_qcaps()
8707 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; in __bnxt_hwrm_func_qcaps()
8708 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); in __bnxt_hwrm_func_qcaps()
8709 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); in __bnxt_hwrm_func_qcaps()
8710 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); in __bnxt_hwrm_func_qcaps()
8713 struct bnxt_pf_info *pf = &bp->pf; in __bnxt_hwrm_func_qcaps()
8715 pf->fw_fid = le16_to_cpu(resp->fid); in __bnxt_hwrm_func_qcaps()
8716 pf->port_id = le16_to_cpu(resp->port_id); in __bnxt_hwrm_func_qcaps()
8717 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); in __bnxt_hwrm_func_qcaps()
8718 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); in __bnxt_hwrm_func_qcaps()
8719 pf->max_vfs = le16_to_cpu(resp->max_vfs); in __bnxt_hwrm_func_qcaps()
8720 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); in __bnxt_hwrm_func_qcaps()
8721 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); in __bnxt_hwrm_func_qcaps()
8722 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); in __bnxt_hwrm_func_qcaps()
8723 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); in __bnxt_hwrm_func_qcaps()
8724 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); in __bnxt_hwrm_func_qcaps()
8725 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); in __bnxt_hwrm_func_qcaps()
8726 bp->flags &= ~BNXT_FLAG_WOL_CAP; in __bnxt_hwrm_func_qcaps()
8728 bp->flags |= BNXT_FLAG_WOL_CAP; in __bnxt_hwrm_func_qcaps()
8730 bp->fw_cap |= BNXT_FW_CAP_PTP; in __bnxt_hwrm_func_qcaps()
8733 kfree(bp->ptp_cfg); in __bnxt_hwrm_func_qcaps()
8734 bp->ptp_cfg = NULL; in __bnxt_hwrm_func_qcaps()
8738 struct bnxt_vf_info *vf = &bp->vf; in __bnxt_hwrm_func_qcaps()
8740 vf->fw_fid = le16_to_cpu(resp->fid); in __bnxt_hwrm_func_qcaps()
8741 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); in __bnxt_hwrm_func_qcaps()
8756 bp->fw_dbg_cap = 0; in bnxt_hwrm_dbg_qcaps()
8757 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) in bnxt_hwrm_dbg_qcaps()
8764 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_dbg_qcaps()
8770 bp->fw_dbg_cap = le32_to_cpu(resp->flags); in bnxt_hwrm_dbg_qcaps()
8790 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); in bnxt_hwrm_func_qcaps()
8793 if (bp->hwrm_spec_code >= 0x10803) { in bnxt_hwrm_func_qcaps()
8799 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; in bnxt_hwrm_func_qcaps()
8811 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
8823 flags = le32_to_cpu(resp->flags); in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
8826 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; in bnxt_hwrm_cfa_adv_flow_mgnt_qcaps()
8835 if (bp->fw_health) in __bnxt_alloc_fw_health()
8838 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); in __bnxt_alloc_fw_health()
8839 if (!bp->fw_health) in __bnxt_alloc_fw_health()
8840 return -ENOMEM; in __bnxt_alloc_fw_health()
8842 mutex_init(&bp->fw_health->lock); in __bnxt_alloc_fw_health()
8850 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && in bnxt_alloc_fw_health()
8851 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_alloc_fw_health()
8856 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; in bnxt_alloc_fw_health()
8857 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; in bnxt_alloc_fw_health()
8866 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + in __bnxt_map_fw_health_reg()
8873 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_inv_fw_health_reg()
8879 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); in bnxt_inv_fw_health_reg()
8881 fw_health->status_reliable = false; in bnxt_inv_fw_health_reg()
8883 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); in bnxt_inv_fw_health_reg()
8885 fw_health->resets_reliable = false; in bnxt_inv_fw_health_reg()
8895 if (bp->fw_health) in bnxt_try_map_fw_health_reg()
8896 bp->fw_health->status_reliable = false; in bnxt_try_map_fw_health_reg()
8899 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); in bnxt_try_map_fw_health_reg()
8903 if (!bp->chip_num) { in bnxt_try_map_fw_health_reg()
8905 bp->chip_num = readl(bp->bar0 + in bnxt_try_map_fw_health_reg()
8920 netdev_warn(bp->dev, "no memory for firmware status checks\n"); in bnxt_try_map_fw_health_reg()
8924 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; in bnxt_try_map_fw_health_reg()
8928 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = in bnxt_try_map_fw_health_reg()
8932 bp->fw_health->status_reliable = true; in bnxt_try_map_fw_health_reg()
8937 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_map_fw_health_regs()
8941 bp->fw_health->status_reliable = false; in bnxt_map_fw_health_regs()
8942 bp->fw_health->resets_reliable = false; in bnxt_map_fw_health_regs()
8943 /* Only pre-map the monitoring GRC registers using window 3 */ in bnxt_map_fw_health_regs()
8945 u32 reg = fw_health->regs[i]; in bnxt_map_fw_health_regs()
8952 return -ERANGE; in bnxt_map_fw_health_regs()
8953 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); in bnxt_map_fw_health_regs()
8955 bp->fw_health->status_reliable = true; in bnxt_map_fw_health_regs()
8956 bp->fw_health->resets_reliable = true; in bnxt_map_fw_health_regs()
8966 if (!bp->fw_health) in bnxt_remap_fw_health_regs()
8969 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { in bnxt_remap_fw_health_regs()
8970 bp->fw_health->status_reliable = true; in bnxt_remap_fw_health_regs()
8971 bp->fw_health->resets_reliable = true; in bnxt_remap_fw_health_regs()
8979 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_hwrm_error_recovery_qcfg()
8984 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) in bnxt_hwrm_error_recovery_qcfg()
8995 fw_health->flags = le32_to_cpu(resp->flags); in bnxt_hwrm_error_recovery_qcfg()
8996 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && in bnxt_hwrm_error_recovery_qcfg()
8997 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { in bnxt_hwrm_error_recovery_qcfg()
8998 rc = -EINVAL; in bnxt_hwrm_error_recovery_qcfg()
9001 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); in bnxt_hwrm_error_recovery_qcfg()
9002 fw_health->master_func_wait_dsecs = in bnxt_hwrm_error_recovery_qcfg()
9003 le32_to_cpu(resp->master_func_wait_period); in bnxt_hwrm_error_recovery_qcfg()
9004 fw_health->normal_func_wait_dsecs = in bnxt_hwrm_error_recovery_qcfg()
9005 le32_to_cpu(resp->normal_func_wait_period); in bnxt_hwrm_error_recovery_qcfg()
9006 fw_health->post_reset_wait_dsecs = in bnxt_hwrm_error_recovery_qcfg()
9007 le32_to_cpu(resp->master_func_wait_period_after_reset); in bnxt_hwrm_error_recovery_qcfg()
9008 fw_health->post_reset_max_wait_dsecs = in bnxt_hwrm_error_recovery_qcfg()
9009 le32_to_cpu(resp->max_bailout_time_after_reset); in bnxt_hwrm_error_recovery_qcfg()
9010 fw_health->regs[BNXT_FW_HEALTH_REG] = in bnxt_hwrm_error_recovery_qcfg()
9011 le32_to_cpu(resp->fw_health_status_reg); in bnxt_hwrm_error_recovery_qcfg()
9012 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = in bnxt_hwrm_error_recovery_qcfg()
9013 le32_to_cpu(resp->fw_heartbeat_reg); in bnxt_hwrm_error_recovery_qcfg()
9014 fw_health->regs[BNXT_FW_RESET_CNT_REG] = in bnxt_hwrm_error_recovery_qcfg()
9015 le32_to_cpu(resp->fw_reset_cnt_reg); in bnxt_hwrm_error_recovery_qcfg()
9016 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = in bnxt_hwrm_error_recovery_qcfg()
9017 le32_to_cpu(resp->reset_inprogress_reg); in bnxt_hwrm_error_recovery_qcfg()
9018 fw_health->fw_reset_inprog_reg_mask = in bnxt_hwrm_error_recovery_qcfg()
9019 le32_to_cpu(resp->reset_inprogress_reg_mask); in bnxt_hwrm_error_recovery_qcfg()
9020 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; in bnxt_hwrm_error_recovery_qcfg()
9021 if (fw_health->fw_reset_seq_cnt >= 16) { in bnxt_hwrm_error_recovery_qcfg()
9022 rc = -EINVAL; in bnxt_hwrm_error_recovery_qcfg()
9025 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { in bnxt_hwrm_error_recovery_qcfg()
9026 fw_health->fw_reset_seq_regs[i] = in bnxt_hwrm_error_recovery_qcfg()
9027 le32_to_cpu(resp->reset_reg[i]); in bnxt_hwrm_error_recovery_qcfg()
9028 fw_health->fw_reset_seq_vals[i] = in bnxt_hwrm_error_recovery_qcfg()
9029 le32_to_cpu(resp->reset_reg_val[i]); in bnxt_hwrm_error_recovery_qcfg()
9030 fw_health->fw_reset_seq_delay_msec[i] = in bnxt_hwrm_error_recovery_qcfg()
9031 resp->delay_after_reset[i]; in bnxt_hwrm_error_recovery_qcfg()
9038 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; in bnxt_hwrm_error_recovery_qcfg()
9051 req->enables = 0; in bnxt_hwrm_func_reset()
9061 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_nvm_cfg_ver_get()
9083 if (!resp->max_configurable_queues) { in bnxt_hwrm_queue_qportcfg()
9084 rc = -EINVAL; in bnxt_hwrm_queue_qportcfg()
9087 bp->max_tc = resp->max_configurable_queues; in bnxt_hwrm_queue_qportcfg()
9088 bp->max_lltc = resp->max_configurable_lossless_queues; in bnxt_hwrm_queue_qportcfg()
9089 if (bp->max_tc > BNXT_MAX_QUEUE) in bnxt_hwrm_queue_qportcfg()
9090 bp->max_tc = BNXT_MAX_QUEUE; in bnxt_hwrm_queue_qportcfg()
9092 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); in bnxt_hwrm_queue_qportcfg()
9093 qptr = &resp->queue_id0; in bnxt_hwrm_queue_qportcfg()
9094 for (i = 0, j = 0; i < bp->max_tc; i++) { in bnxt_hwrm_queue_qportcfg()
9095 bp->q_info[j].queue_id = *qptr; in bnxt_hwrm_queue_qportcfg()
9096 bp->q_ids[i] = *qptr++; in bnxt_hwrm_queue_qportcfg()
9097 bp->q_info[j].queue_profile = *qptr++; in bnxt_hwrm_queue_qportcfg()
9098 bp->tc_to_qidx[j] = j; in bnxt_hwrm_queue_qportcfg()
9099 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || in bnxt_hwrm_queue_qportcfg()
9103 bp->max_q = bp->max_tc; in bnxt_hwrm_queue_qportcfg()
9104 bp->max_tc = max_t(u8, j, 1); in bnxt_hwrm_queue_qportcfg()
9106 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) in bnxt_hwrm_queue_qportcfg()
9107 bp->max_tc = 1; in bnxt_hwrm_queue_qportcfg()
9109 if (bp->max_lltc > bp->max_tc) in bnxt_hwrm_queue_qportcfg()
9110 bp->max_lltc = bp->max_tc; in bnxt_hwrm_queue_qportcfg()
9126 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; in bnxt_hwrm_poll()
9127 req->hwrm_intf_min = HWRM_VERSION_MINOR; in bnxt_hwrm_poll()
9128 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; in bnxt_hwrm_poll()
9148 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; in bnxt_hwrm_ver_get()
9149 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; in bnxt_hwrm_ver_get()
9150 req->hwrm_intf_min = HWRM_VERSION_MINOR; in bnxt_hwrm_ver_get()
9151 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; in bnxt_hwrm_ver_get()
9158 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); in bnxt_hwrm_ver_get()
9160 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | in bnxt_hwrm_ver_get()
9161 resp->hwrm_intf_min_8b << 8 | in bnxt_hwrm_ver_get()
9162 resp->hwrm_intf_upd_8b; in bnxt_hwrm_ver_get()
9163 if (resp->hwrm_intf_maj_8b < 1) { in bnxt_hwrm_ver_get()
9164 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", in bnxt_hwrm_ver_get()
9165 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, in bnxt_hwrm_ver_get()
9166 resp->hwrm_intf_upd_8b); in bnxt_hwrm_ver_get()
9167 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); in bnxt_hwrm_ver_get()
9173 if (bp->hwrm_spec_code > hwrm_ver) in bnxt_hwrm_ver_get()
9174 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_hwrm_ver_get()
9178 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", in bnxt_hwrm_ver_get()
9179 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, in bnxt_hwrm_ver_get()
9180 resp->hwrm_intf_upd_8b); in bnxt_hwrm_ver_get()
9182 fw_maj = le16_to_cpu(resp->hwrm_fw_major); in bnxt_hwrm_ver_get()
9183 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { in bnxt_hwrm_ver_get()
9184 fw_min = le16_to_cpu(resp->hwrm_fw_minor); in bnxt_hwrm_ver_get()
9185 fw_bld = le16_to_cpu(resp->hwrm_fw_build); in bnxt_hwrm_ver_get()
9186 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); in bnxt_hwrm_ver_get()
9189 fw_maj = resp->hwrm_fw_maj_8b; in bnxt_hwrm_ver_get()
9190 fw_min = resp->hwrm_fw_min_8b; in bnxt_hwrm_ver_get()
9191 fw_bld = resp->hwrm_fw_bld_8b; in bnxt_hwrm_ver_get()
9192 fw_rsv = resp->hwrm_fw_rsvd_8b; in bnxt_hwrm_ver_get()
9195 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); in bnxt_hwrm_ver_get()
9196 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, in bnxt_hwrm_ver_get()
9199 if (strlen(resp->active_pkg_name)) { in bnxt_hwrm_ver_get()
9200 int fw_ver_len = strlen(bp->fw_ver_str); in bnxt_hwrm_ver_get()
9202 snprintf(bp->fw_ver_str + fw_ver_len, in bnxt_hwrm_ver_get()
9203 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", in bnxt_hwrm_ver_get()
9204 resp->active_pkg_name); in bnxt_hwrm_ver_get()
9205 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; in bnxt_hwrm_ver_get()
9208 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); in bnxt_hwrm_ver_get()
9209 if (!bp->hwrm_cmd_timeout) in bnxt_hwrm_ver_get()
9210 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; in bnxt_hwrm_ver_get()
9211 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; in bnxt_hwrm_ver_get()
9212 if (!bp->hwrm_cmd_max_timeout) in bnxt_hwrm_ver_get()
9213 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; in bnxt_hwrm_ver_get()
9214 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) in bnxt_hwrm_ver_get()
9215 …netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", in bnxt_hwrm_ver_get()
9216 bp->hwrm_cmd_max_timeout / 1000); in bnxt_hwrm_ver_get()
9218 if (resp->hwrm_intf_maj_8b >= 1) { in bnxt_hwrm_ver_get()
9219 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); in bnxt_hwrm_ver_get()
9220 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); in bnxt_hwrm_ver_get()
9222 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) in bnxt_hwrm_ver_get()
9223 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; in bnxt_hwrm_ver_get()
9225 bp->chip_num = le16_to_cpu(resp->chip_num); in bnxt_hwrm_ver_get()
9226 bp->chip_rev = resp->chip_rev; in bnxt_hwrm_ver_get()
9227 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && in bnxt_hwrm_ver_get()
9228 !resp->chip_metal) in bnxt_hwrm_ver_get()
9229 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; in bnxt_hwrm_ver_get()
9231 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); in bnxt_hwrm_ver_get()
9234 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; in bnxt_hwrm_ver_get()
9237 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; in bnxt_hwrm_ver_get()
9241 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; in bnxt_hwrm_ver_get()
9245 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; in bnxt_hwrm_ver_get()
9249 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; in bnxt_hwrm_ver_get()
9263 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || in bnxt_hwrm_fw_set_time()
9264 bp->hwrm_spec_code < 0x10400) in bnxt_hwrm_fw_set_time()
9265 return -EOPNOTSUPP; in bnxt_hwrm_fw_set_time()
9272 req->year = cpu_to_le16(1900 + tm.tm_year); in bnxt_hwrm_fw_set_time()
9273 req->month = 1 + tm.tm_mon; in bnxt_hwrm_fw_set_time()
9274 req->day = tm.tm_mday; in bnxt_hwrm_fw_set_time()
9275 req->hour = tm.tm_hour; in bnxt_hwrm_fw_set_time()
9276 req->minute = tm.tm_min; in bnxt_hwrm_fw_set_time()
9277 req->second = tm.tm_sec; in bnxt_hwrm_fw_set_time()
9303 if (masks[i] == -1ULL) in __bnxt_accumulate_stats()
9312 if (!stats->hw_stats) in bnxt_accumulate_stats()
9315 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, in bnxt_accumulate_stats()
9316 stats->hw_masks, stats->len / 8, false); in bnxt_accumulate_stats()
9326 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_accumulate_all_stats()
9329 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_accumulate_all_stats()
9330 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_accumulate_all_stats()
9334 cpr = &bnapi->cp_ring; in bnxt_accumulate_all_stats()
9335 stats = &cpr->stats; in bnxt_accumulate_all_stats()
9338 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, in bnxt_accumulate_all_stats()
9339 ring0_stats->hw_masks, in bnxt_accumulate_all_stats()
9340 ring0_stats->len / 8, ignore_zero); in bnxt_accumulate_all_stats()
9342 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_accumulate_all_stats()
9343 struct bnxt_stats_mem *stats = &bp->port_stats; in bnxt_accumulate_all_stats()
9344 __le64 *hw_stats = stats->hw_stats; in bnxt_accumulate_all_stats()
9345 u64 *sw_stats = stats->sw_stats; in bnxt_accumulate_all_stats()
9346 u64 *masks = stats->hw_masks; in bnxt_accumulate_all_stats()
9358 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { in bnxt_accumulate_all_stats()
9359 bnxt_accumulate_stats(&bp->rx_port_stats_ext); in bnxt_accumulate_all_stats()
9360 bnxt_accumulate_stats(&bp->tx_port_stats_ext); in bnxt_accumulate_all_stats()
9367 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_qstats()
9370 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) in bnxt_hwrm_port_qstats()
9373 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) in bnxt_hwrm_port_qstats()
9374 return -EOPNOTSUPP; in bnxt_hwrm_port_qstats()
9380 req->flags = flags; in bnxt_hwrm_port_qstats()
9381 req->port_id = cpu_to_le16(pf->port_id); in bnxt_hwrm_port_qstats()
9382 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + in bnxt_hwrm_port_qstats()
9384 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); in bnxt_hwrm_port_qstats()
9394 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_qstats_ext()
9398 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) in bnxt_hwrm_port_qstats_ext()
9401 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) in bnxt_hwrm_port_qstats_ext()
9402 return -EOPNOTSUPP; in bnxt_hwrm_port_qstats_ext()
9408 req_qs->flags = flags; in bnxt_hwrm_port_qstats_ext()
9409 req_qs->port_id = cpu_to_le16(pf->port_id); in bnxt_hwrm_port_qstats_ext()
9410 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); in bnxt_hwrm_port_qstats_ext()
9411 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); in bnxt_hwrm_port_qstats_ext()
9412 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? in bnxt_hwrm_port_qstats_ext()
9414 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); in bnxt_hwrm_port_qstats_ext()
9415 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); in bnxt_hwrm_port_qstats_ext()
9419 bp->fw_rx_stats_ext_size = in bnxt_hwrm_port_qstats_ext()
9420 le16_to_cpu(resp_qs->rx_stat_size) / 8; in bnxt_hwrm_port_qstats_ext()
9422 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) in bnxt_hwrm_port_qstats_ext()
9423 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; in bnxt_hwrm_port_qstats_ext()
9425 bp->fw_tx_stats_ext_size = tx_stat_size ? in bnxt_hwrm_port_qstats_ext()
9426 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; in bnxt_hwrm_port_qstats_ext()
9428 bp->fw_rx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
9429 bp->fw_tx_stats_ext_size = 0; in bnxt_hwrm_port_qstats_ext()
9436 if (bp->fw_tx_stats_ext_size <= in bnxt_hwrm_port_qstats_ext()
9438 bp->pri2cos_valid = 0; in bnxt_hwrm_port_qstats_ext()
9446 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); in bnxt_hwrm_port_qstats_ext()
9454 pri2cos = &resp_qc->pri0_cos_queue_id; in bnxt_hwrm_port_qstats_ext()
9462 bp->pri2cos_valid = false; in bnxt_hwrm_port_qstats_ext()
9466 for (j = 0; j < bp->max_q; j++) { in bnxt_hwrm_port_qstats_ext()
9467 if (bp->q_ids[j] == queue_id) in bnxt_hwrm_port_qstats_ext()
9468 bp->pri2cos_idx[i] = queue_idx; in bnxt_hwrm_port_qstats_ext()
9471 bp->pri2cos_valid = true; in bnxt_hwrm_port_qstats_ext()
9492 tpa_flags = bp->flags & BNXT_FLAG_TPA; in bnxt_set_tpa()
9495 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_set_tpa()
9498 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", in bnxt_set_tpa()
9510 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_clear_vnic_rss()
9516 if (!bp->vnic_info) in bnxt_clear_vnic()
9520 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { in bnxt_clear_vnic()
9526 if (bp->flags & BNXT_FLAG_TPA) in bnxt_clear_vnic()
9529 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_clear_vnic()
9556 return -EINVAL; in bnxt_hwrm_set_br_mode()
9562 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_br_mode()
9563 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); in bnxt_hwrm_set_br_mode()
9564 req->evb_mode = evb_mode; in bnxt_hwrm_set_br_mode()
9573 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) in bnxt_hwrm_set_cache_line_size()
9580 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_set_cache_line_size()
9581 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); in bnxt_hwrm_set_cache_line_size()
9582 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; in bnxt_hwrm_set_cache_line_size()
9584 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; in bnxt_hwrm_set_cache_line_size()
9591 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in __bnxt_setup_vnic()
9594 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) in __bnxt_setup_vnic()
9600 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in __bnxt_setup_vnic()
9604 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic()
9609 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", in __bnxt_setup_vnic()
9613 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic()
9620 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", in __bnxt_setup_vnic()
9628 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", in __bnxt_setup_vnic()
9633 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_setup_vnic()
9636 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", in __bnxt_setup_vnic()
9649 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); in __bnxt_setup_vnic_p5()
9653 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", in __bnxt_setup_vnic_p5()
9657 bp->rsscos_nr_ctxs++; in __bnxt_setup_vnic_p5()
9660 return -ENOMEM; in __bnxt_setup_vnic_p5()
9664 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", in __bnxt_setup_vnic_p5()
9670 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", in __bnxt_setup_vnic_p5()
9674 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in __bnxt_setup_vnic_p5()
9677 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", in __bnxt_setup_vnic_p5()
9686 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_setup_vnic()
9696 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_alloc_rfs_vnics()
9699 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rfs_vnics()
9704 if (vnic_id >= bp->nr_vnics) in bnxt_alloc_rfs_vnics()
9707 vnic = &bp->vnic_info[vnic_id]; in bnxt_alloc_rfs_vnics()
9708 vnic->flags |= BNXT_VNIC_RFS_FLAG; in bnxt_alloc_rfs_vnics()
9709 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) in bnxt_alloc_rfs_vnics()
9710 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; in bnxt_alloc_rfs_vnics()
9713 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in bnxt_alloc_rfs_vnics()
9728 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) in bnxt_promisc_ok()
9738 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); in bnxt_setup_nitroa0_vnic()
9740 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", in bnxt_setup_nitroa0_vnic()
9747 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", in bnxt_setup_nitroa0_vnic()
9759 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_init_chip()
9761 unsigned int rx_nr_rings = bp->rx_nr_rings; in bnxt_init_chip()
9766 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", in bnxt_init_chip()
9774 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); in bnxt_init_chip()
9780 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); in bnxt_init_chip()
9785 rx_nr_rings--; in bnxt_init_chip()
9790 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); in bnxt_init_chip()
9800 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) in bnxt_init_chip()
9803 if (bp->flags & BNXT_FLAG_RFS) { in bnxt_init_chip()
9809 if (bp->flags & BNXT_FLAG_TPA) { in bnxt_init_chip()
9819 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); in bnxt_init_chip()
9821 if (BNXT_VF(bp) && rc == -ENODEV) in bnxt_init_chip()
9822 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); in bnxt_init_chip()
9824 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); in bnxt_init_chip()
9827 vnic->uc_filter_count = 1; in bnxt_init_chip()
9829 vnic->rx_mask = 0; in bnxt_init_chip()
9830 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) in bnxt_init_chip()
9833 if (bp->dev->flags & IFF_BROADCAST) in bnxt_init_chip()
9834 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; in bnxt_init_chip()
9836 if (bp->dev->flags & IFF_PROMISC) in bnxt_init_chip()
9837 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; in bnxt_init_chip()
9839 if (bp->dev->flags & IFF_ALLMULTI) { in bnxt_init_chip()
9840 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; in bnxt_init_chip()
9841 vnic->mc_list_count = 0; in bnxt_init_chip()
9842 } else if (bp->dev->flags & IFF_MULTICAST) { in bnxt_init_chip()
9846 vnic->rx_mask |= mask; in bnxt_init_chip()
9856 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", in bnxt_init_chip()
9862 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", in bnxt_init_chip()
9868 netdev_update_features(bp->dev); in bnxt_init_chip()
9899 struct net_device *dev = bp->dev; in bnxt_set_real_num_queues()
9901 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - in bnxt_set_real_num_queues()
9902 bp->tx_nr_rings_xdp); in bnxt_set_real_num_queues()
9906 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); in bnxt_set_real_num_queues()
9911 if (bp->flags & BNXT_FLAG_RFS) in bnxt_set_real_num_queues()
9912 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); in bnxt_set_real_num_queues()
9918 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, in __bnxt_trim_rings() argument
9921 int _rx = *rx, _tx = *tx; in __bnxt_trim_rings()
9925 *tx = min_t(int, _tx, max); in __bnxt_trim_rings()
9928 return -ENOMEM; in __bnxt_trim_rings()
9932 _rx--; in __bnxt_trim_rings()
9934 _tx--; in __bnxt_trim_rings()
9937 *tx = _tx; in __bnxt_trim_rings()
9942 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) in __bnxt_num_tx_to_cp() argument
9944 return (tx - tx_xdp) / tx_sets + tx_xdp; in __bnxt_num_tx_to_cp()
9947 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) in bnxt_num_tx_to_cp() argument
9949 int tcs = bp->num_tc; in bnxt_num_tx_to_cp()
9953 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); in bnxt_num_tx_to_cp()
9958 int tcs = bp->num_tc; in bnxt_num_cp_to_tx()
9960 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + in bnxt_num_cp_to_tx()
9961 bp->tx_nr_rings_xdp; in bnxt_num_cp_to_tx()
9964 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, in bnxt_trim_rings() argument
9967 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); in bnxt_trim_rings()
9969 if (tx_cp != *tx) { in bnxt_trim_rings()
9976 *tx = bnxt_num_cp_to_tx(bp, tx_cp); in bnxt_trim_rings()
9979 return __bnxt_trim_rings(bp, rx, tx, max, sh); in bnxt_trim_rings()
9984 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_msix()
9985 struct net_device *dev = bp->dev; in bnxt_setup_msix()
9988 tcs = bp->num_tc; in bnxt_setup_msix()
9993 count = bp->tx_nr_rings_per_tc; in bnxt_setup_msix()
9999 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_setup_msix()
10003 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_setup_msix()
10005 else if (i < bp->rx_nr_rings) in bnxt_setup_msix()
10008 attr = "tx"; in bnxt_setup_msix()
10010 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, in bnxt_setup_msix()
10012 bp->irq_tbl[map_idx].handler = bnxt_msix; in bnxt_setup_msix()
10018 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_inta()
10020 if (bp->num_tc) { in bnxt_setup_inta()
10021 netdev_reset_tc(bp->dev); in bnxt_setup_inta()
10022 bp->num_tc = 0; in bnxt_setup_inta()
10025 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", in bnxt_setup_inta()
10027 bp->irq_tbl[0].handler = bnxt_inta; in bnxt_setup_inta()
10036 if (!bp->irq_tbl) { in bnxt_setup_int_mode()
10038 if (rc || !bp->irq_tbl) in bnxt_setup_int_mode()
10039 return rc ?: -ENODEV; in bnxt_setup_int_mode()
10042 if (bp->flags & BNXT_FLAG_USING_MSIX) in bnxt_setup_int_mode()
10053 return bp->hw_resc.max_rsscos_ctxs; in bnxt_get_max_func_rss_ctxs()
10058 return bp->hw_resc.max_vnics; in bnxt_get_max_func_vnics()
10063 return bp->hw_resc.max_stat_ctxs; in bnxt_get_max_func_stat_ctxs()
10068 return bp->hw_resc.max_cp_rings; in bnxt_get_max_func_cp_rings()
10073 unsigned int cp = bp->hw_resc.max_cp_rings; in bnxt_get_max_func_cp_rings_for_en()
10075 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_get_max_func_cp_rings_for_en()
10076 cp -= bnxt_get_ulp_msix_num(bp); in bnxt_get_max_func_cp_rings_for_en()
10083 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_get_max_func_irqs()
10085 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_get_max_func_irqs()
10086 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); in bnxt_get_max_func_irqs()
10088 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); in bnxt_get_max_func_irqs()
10093 bp->hw_resc.max_irqs = max_irqs; in bnxt_set_max_func_irqs()
10101 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_get_avail_cp_rings_for_en()
10102 return cp - bp->rx_nr_rings - bp->tx_nr_rings; in bnxt_get_avail_cp_rings_for_en()
10104 return cp - bp->cp_nr_rings; in bnxt_get_avail_cp_rings_for_en()
10109 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); in bnxt_get_avail_stat_ctxs_for_en()
10116 int total_req = bp->cp_nr_rings + num; in bnxt_get_avail_msix()
10119 max_idx = bp->total_irqs; in bnxt_get_avail_msix()
10120 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_get_avail_msix()
10121 max_idx = min_t(int, bp->total_irqs, max_cp); in bnxt_get_avail_msix()
10122 avail_msix = max_idx - bp->cp_nr_rings; in bnxt_get_avail_msix()
10127 num = max_irq - bp->cp_nr_rings; in bnxt_get_avail_msix()
10157 return -ENOMEM; in bnxt_init_msix()
10164 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) in bnxt_init_msix()
10167 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); in bnxt_init_msix()
10170 rc = -ENODEV; in bnxt_init_msix()
10174 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); in bnxt_init_msix()
10175 if (bp->irq_tbl) { in bnxt_init_msix()
10177 bp->irq_tbl[i].vector = msix_ent[i].vector; in bnxt_init_msix()
10179 bp->total_irqs = total_vecs; in bnxt_init_msix()
10181 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, in bnxt_init_msix()
10182 total_vecs - ulp_msix, min == 1); in bnxt_init_msix()
10186 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_init_msix()
10187 bp->cp_nr_rings = (min == 1) ? in bnxt_init_msix()
10188 max_t(int, tx_cp, bp->rx_nr_rings) : in bnxt_init_msix()
10189 tx_cp + bp->rx_nr_rings; in bnxt_init_msix()
10192 rc = -ENOMEM; in bnxt_init_msix()
10195 bp->flags |= BNXT_FLAG_USING_MSIX; in bnxt_init_msix()
10200 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); in bnxt_init_msix()
10201 kfree(bp->irq_tbl); in bnxt_init_msix()
10202 bp->irq_tbl = NULL; in bnxt_init_msix()
10203 pci_disable_msix(bp->pdev); in bnxt_init_msix()
10210 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); in bnxt_init_inta()
10211 if (!bp->irq_tbl) in bnxt_init_inta()
10212 return -ENOMEM; in bnxt_init_inta()
10214 bp->total_irqs = 1; in bnxt_init_inta()
10215 bp->rx_nr_rings = 1; in bnxt_init_inta()
10216 bp->tx_nr_rings = 1; in bnxt_init_inta()
10217 bp->cp_nr_rings = 1; in bnxt_init_inta()
10218 bp->flags |= BNXT_FLAG_SHARED_RINGS; in bnxt_init_inta()
10219 bp->irq_tbl[0].vector = bp->pdev->irq; in bnxt_init_inta()
10225 int rc = -ENODEV; in bnxt_init_int_mode()
10227 if (bp->flags & BNXT_FLAG_MSIX_CAP) in bnxt_init_int_mode()
10230 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { in bnxt_init_int_mode()
10239 if (bp->flags & BNXT_FLAG_USING_MSIX) in bnxt_clear_int_mode()
10240 pci_disable_msix(bp->pdev); in bnxt_clear_int_mode()
10242 kfree(bp->irq_tbl); in bnxt_clear_int_mode()
10243 bp->irq_tbl = NULL; in bnxt_clear_int_mode()
10244 bp->flags &= ~BNXT_FLAG_USING_MSIX; in bnxt_clear_int_mode()
10250 int tcs = bp->num_tc; in bnxt_reserve_rings()
10257 bnxt_get_num_msix(bp) != bp->total_irqs) { in bnxt_reserve_rings()
10269 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); in bnxt_reserve_rings()
10272 if (tcs && (bp->tx_nr_rings_per_tc * tcs != in bnxt_reserve_rings()
10273 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { in bnxt_reserve_rings()
10274 netdev_err(bp->dev, "tx ring reservation failure\n"); in bnxt_reserve_rings()
10275 netdev_reset_tc(bp->dev); in bnxt_reserve_rings()
10276 bp->num_tc = 0; in bnxt_reserve_rings()
10277 if (bp->tx_nr_rings_xdp) in bnxt_reserve_rings()
10278 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; in bnxt_reserve_rings()
10280 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_reserve_rings()
10281 return -ENOMEM; in bnxt_reserve_rings()
10292 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); in bnxt_free_irq()
10293 bp->dev->rx_cpu_rmap = NULL; in bnxt_free_irq()
10295 if (!bp->irq_tbl || !bp->bnapi) in bnxt_free_irq()
10298 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_irq()
10301 irq = &bp->irq_tbl[map_idx]; in bnxt_free_irq()
10302 if (irq->requested) { in bnxt_free_irq()
10303 if (irq->have_cpumask) { in bnxt_free_irq()
10304 irq_set_affinity_hint(irq->vector, NULL); in bnxt_free_irq()
10305 free_cpumask_var(irq->cpu_mask); in bnxt_free_irq()
10306 irq->have_cpumask = 0; in bnxt_free_irq()
10308 free_irq(irq->vector, bp->bnapi[i]); in bnxt_free_irq()
10311 irq->requested = 0; in bnxt_free_irq()
10325 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", in bnxt_request_irq()
10330 rmap = bp->dev->rx_cpu_rmap; in bnxt_request_irq()
10332 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) in bnxt_request_irq()
10335 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { in bnxt_request_irq()
10337 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; in bnxt_request_irq()
10340 if (rmap && bp->bnapi[i]->rx_ring) { in bnxt_request_irq()
10341 rc = irq_cpu_rmap_add(rmap, irq->vector); in bnxt_request_irq()
10343 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", in bnxt_request_irq()
10348 rc = request_irq(irq->vector, irq->handler, flags, irq->name, in bnxt_request_irq()
10349 bp->bnapi[i]); in bnxt_request_irq()
10353 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); in bnxt_request_irq()
10354 irq->requested = 1; in bnxt_request_irq()
10356 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { in bnxt_request_irq()
10357 int numa_node = dev_to_node(&bp->pdev->dev); in bnxt_request_irq()
10359 irq->have_cpumask = 1; in bnxt_request_irq()
10361 irq->cpu_mask); in bnxt_request_irq()
10362 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); in bnxt_request_irq()
10364 netdev_warn(bp->dev, in bnxt_request_irq()
10366 irq->vector); in bnxt_request_irq()
10378 if (!bp->bnapi) in bnxt_del_napi()
10381 for (i = 0; i < bp->rx_nr_rings; i++) in bnxt_del_napi()
10382 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); in bnxt_del_napi()
10383 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) in bnxt_del_napi()
10384 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); in bnxt_del_napi()
10386 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_del_napi()
10387 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_del_napi()
10389 __netif_napi_del(&bnapi->napi); in bnxt_del_napi()
10400 unsigned int cp_nr_rings = bp->cp_nr_rings; in bnxt_init_napi()
10403 if (bp->flags & BNXT_FLAG_USING_MSIX) { in bnxt_init_napi()
10406 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_init_napi()
10409 cp_nr_rings--; in bnxt_init_napi()
10411 bnapi = bp->bnapi[i]; in bnxt_init_napi()
10412 netif_napi_add(bp->dev, &bnapi->napi, poll_fn); in bnxt_init_napi()
10415 bnapi = bp->bnapi[cp_nr_rings]; in bnxt_init_napi()
10416 netif_napi_add(bp->dev, &bnapi->napi, in bnxt_init_napi()
10420 bnapi = bp->bnapi[0]; in bnxt_init_napi()
10421 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); in bnxt_init_napi()
10429 if (!bp->bnapi || in bnxt_disable_napi()
10430 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) in bnxt_disable_napi()
10433 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_napi()
10434 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_disable_napi()
10437 cpr = &bnapi->cp_ring; in bnxt_disable_napi()
10438 if (bnapi->tx_fault) in bnxt_disable_napi()
10439 cpr->sw_stats.tx.tx_resets++; in bnxt_disable_napi()
10440 if (bnapi->in_reset) in bnxt_disable_napi()
10441 cpr->sw_stats.rx.rx_resets++; in bnxt_disable_napi()
10442 napi_disable(&bnapi->napi); in bnxt_disable_napi()
10443 if (bnapi->rx_ring) in bnxt_disable_napi()
10444 cancel_work_sync(&cpr->dim.work); in bnxt_disable_napi()
10452 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); in bnxt_enable_napi()
10453 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_napi()
10454 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_enable_napi()
10457 bnapi->tx_fault = 0; in bnxt_enable_napi()
10459 cpr = &bnapi->cp_ring; in bnxt_enable_napi()
10460 bnapi->in_reset = false; in bnxt_enable_napi()
10462 if (bnapi->rx_ring) { in bnxt_enable_napi()
10463 INIT_WORK(&cpr->dim.work, bnxt_dim_work); in bnxt_enable_napi()
10464 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bnxt_enable_napi()
10466 napi_enable(&bnapi->napi); in bnxt_enable_napi()
10475 if (bp->tx_ring) { in bnxt_tx_disable()
10476 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_disable()
10477 txr = &bp->tx_ring[i]; in bnxt_tx_disable()
10478 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); in bnxt_tx_disable()
10483 /* Drop carrier first to prevent TX timeout */ in bnxt_tx_disable()
10484 netif_carrier_off(bp->dev); in bnxt_tx_disable()
10485 /* Stop all TX queues */ in bnxt_tx_disable()
10486 netif_tx_disable(bp->dev); in bnxt_tx_disable()
10494 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_enable()
10495 txr = &bp->tx_ring[i]; in bnxt_tx_enable()
10496 WRITE_ONCE(txr->dev_state, 0); in bnxt_tx_enable()
10500 netif_tx_wake_all_queues(bp->dev); in bnxt_tx_enable()
10502 netif_carrier_on(bp->dev); in bnxt_tx_enable()
10507 u8 active_fec = link_info->active_fec_sig_mode & in bnxt_report_fec()
10538 netif_carrier_on(bp->dev); in bnxt_report_link()
10539 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); in bnxt_report_link()
10541 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); in bnxt_report_link()
10544 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) in bnxt_report_link()
10548 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) in bnxt_report_link()
10549 flow_ctrl = "ON - receive & transmit"; in bnxt_report_link()
10550 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) in bnxt_report_link()
10551 flow_ctrl = "ON - transmit"; in bnxt_report_link()
10552 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) in bnxt_report_link()
10553 flow_ctrl = "ON - receive"; in bnxt_report_link()
10556 if (bp->link_info.phy_qcfg_resp.option_flags & in bnxt_report_link()
10558 u8 sig_mode = bp->link_info.active_fec_sig_mode & in bnxt_report_link()
10574 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", in bnxt_report_link()
10576 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) in bnxt_report_link()
10577 netdev_info(bp->dev, "EEE is %s\n", in bnxt_report_link()
10578 bp->eee.eee_active ? "active" : in bnxt_report_link()
10580 fec = bp->link_info.fec_cfg; in bnxt_report_link()
10582 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", in bnxt_report_link()
10584 bnxt_report_fec(&bp->link_info)); in bnxt_report_link()
10586 netif_carrier_off(bp->dev); in bnxt_report_link()
10587 netdev_err(bp->dev, "NIC Link is Down\n"); in bnxt_report_link()
10593 if (!resp->supported_speeds_auto_mode && in bnxt_phy_qcaps_no_speed()
10594 !resp->supported_speeds_force_mode && in bnxt_phy_qcaps_no_speed()
10595 !resp->supported_pam4_speeds_auto_mode && in bnxt_phy_qcaps_no_speed()
10596 !resp->supported_pam4_speeds_force_mode && in bnxt_phy_qcaps_no_speed()
10597 !resp->supported_speeds2_auto_mode && in bnxt_phy_qcaps_no_speed()
10598 !resp->supported_speeds2_force_mode) in bnxt_phy_qcaps_no_speed()
10605 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_hwrm_phy_qcaps()
10610 if (bp->hwrm_spec_code < 0x10201) in bnxt_hwrm_phy_qcaps()
10622 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); in bnxt_hwrm_phy_qcaps()
10623 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { in bnxt_hwrm_phy_qcaps()
10624 struct ethtool_eee *eee = &bp->eee; in bnxt_hwrm_phy_qcaps() local
10625 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); in bnxt_hwrm_phy_qcaps()
10627 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); in bnxt_hwrm_phy_qcaps()
10628 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & in bnxt_hwrm_phy_qcaps()
10630 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & in bnxt_hwrm_phy_qcaps()
10634 if (bp->hwrm_spec_code >= 0x10a01) { in bnxt_hwrm_phy_qcaps()
10636 link_info->phy_state = BNXT_PHY_STATE_DISABLED; in bnxt_hwrm_phy_qcaps()
10637 netdev_warn(bp->dev, "Ethernet link disabled\n"); in bnxt_hwrm_phy_qcaps()
10638 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { in bnxt_hwrm_phy_qcaps()
10639 link_info->phy_state = BNXT_PHY_STATE_ENABLED; in bnxt_hwrm_phy_qcaps()
10640 netdev_info(bp->dev, "Ethernet link enabled\n"); in bnxt_hwrm_phy_qcaps()
10641 /* Phy re-enabled, reprobe the speeds */ in bnxt_hwrm_phy_qcaps()
10642 link_info->support_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
10643 link_info->support_pam4_auto_speeds = 0; in bnxt_hwrm_phy_qcaps()
10644 link_info->support_auto_speeds2 = 0; in bnxt_hwrm_phy_qcaps()
10647 if (resp->supported_speeds_auto_mode) in bnxt_hwrm_phy_qcaps()
10648 link_info->support_auto_speeds = in bnxt_hwrm_phy_qcaps()
10649 le16_to_cpu(resp->supported_speeds_auto_mode); in bnxt_hwrm_phy_qcaps()
10650 if (resp->supported_pam4_speeds_auto_mode) in bnxt_hwrm_phy_qcaps()
10651 link_info->support_pam4_auto_speeds = in bnxt_hwrm_phy_qcaps()
10652 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); in bnxt_hwrm_phy_qcaps()
10653 if (resp->supported_speeds2_auto_mode) in bnxt_hwrm_phy_qcaps()
10654 link_info->support_auto_speeds2 = in bnxt_hwrm_phy_qcaps()
10655 le16_to_cpu(resp->supported_speeds2_auto_mode); in bnxt_hwrm_phy_qcaps()
10657 bp->port_count = resp->port_cnt; in bnxt_hwrm_phy_qcaps()
10678 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_support_speed_dropped()
10679 if (bnxt_support_dropped(link_info->advertising, in bnxt_support_speed_dropped()
10680 link_info->support_auto_speeds2)) { in bnxt_support_speed_dropped()
10681 link_info->advertising = link_info->support_auto_speeds2; in bnxt_support_speed_dropped()
10686 if (bnxt_support_dropped(link_info->advertising, in bnxt_support_speed_dropped()
10687 link_info->support_auto_speeds)) { in bnxt_support_speed_dropped()
10688 link_info->advertising = link_info->support_auto_speeds; in bnxt_support_speed_dropped()
10691 if (bnxt_support_dropped(link_info->advertising_pam4, in bnxt_support_speed_dropped()
10692 link_info->support_pam4_auto_speeds)) { in bnxt_support_speed_dropped()
10693 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; in bnxt_support_speed_dropped()
10701 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_link()
10704 u8 link_state = link_info->link_state; in bnxt_update_link()
10716 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_update_link()
10717 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); in bnxt_update_link()
10723 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); in bnxt_update_link()
10724 link_info->phy_link_status = resp->link; in bnxt_update_link()
10725 link_info->duplex = resp->duplex_cfg; in bnxt_update_link()
10726 if (bp->hwrm_spec_code >= 0x10800) in bnxt_update_link()
10727 link_info->duplex = resp->duplex_state; in bnxt_update_link()
10728 link_info->pause = resp->pause; in bnxt_update_link()
10729 link_info->auto_mode = resp->auto_mode; in bnxt_update_link()
10730 link_info->auto_pause_setting = resp->auto_pause; in bnxt_update_link()
10731 link_info->lp_pause = resp->link_partner_adv_pause; in bnxt_update_link()
10732 link_info->force_pause_setting = resp->force_pause; in bnxt_update_link()
10733 link_info->duplex_setting = resp->duplex_cfg; in bnxt_update_link()
10734 if (link_info->phy_link_status == BNXT_LINK_LINK) { in bnxt_update_link()
10735 link_info->link_speed = le16_to_cpu(resp->link_speed); in bnxt_update_link()
10736 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) in bnxt_update_link()
10737 link_info->active_lanes = resp->active_lanes; in bnxt_update_link()
10739 link_info->link_speed = 0; in bnxt_update_link()
10740 link_info->active_lanes = 0; in bnxt_update_link()
10742 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); in bnxt_update_link()
10743 link_info->force_pam4_link_speed = in bnxt_update_link()
10744 le16_to_cpu(resp->force_pam4_link_speed); in bnxt_update_link()
10745 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); in bnxt_update_link()
10746 link_info->support_speeds = le16_to_cpu(resp->support_speeds); in bnxt_update_link()
10747 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); in bnxt_update_link()
10748 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); in bnxt_update_link()
10749 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); in bnxt_update_link()
10750 link_info->auto_pam4_link_speeds = in bnxt_update_link()
10751 le16_to_cpu(resp->auto_pam4_link_speed_mask); in bnxt_update_link()
10752 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); in bnxt_update_link()
10753 link_info->lp_auto_link_speeds = in bnxt_update_link()
10754 le16_to_cpu(resp->link_partner_adv_speeds); in bnxt_update_link()
10755 link_info->lp_auto_pam4_link_speeds = in bnxt_update_link()
10756 resp->link_partner_pam4_adv_speeds; in bnxt_update_link()
10757 link_info->preemphasis = le32_to_cpu(resp->preemphasis); in bnxt_update_link()
10758 link_info->phy_ver[0] = resp->phy_maj; in bnxt_update_link()
10759 link_info->phy_ver[1] = resp->phy_min; in bnxt_update_link()
10760 link_info->phy_ver[2] = resp->phy_bld; in bnxt_update_link()
10761 link_info->media_type = resp->media_type; in bnxt_update_link()
10762 link_info->phy_type = resp->phy_type; in bnxt_update_link()
10763 link_info->transceiver = resp->xcvr_pkg_type; in bnxt_update_link()
10764 link_info->phy_addr = resp->eee_config_phy_addr & in bnxt_update_link()
10766 link_info->module_status = resp->module_status; in bnxt_update_link()
10768 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { in bnxt_update_link()
10769 struct ethtool_eee *eee = &bp->eee; in bnxt_update_link() local
10772 eee->eee_active = 0; in bnxt_update_link()
10773 if (resp->eee_config_phy_addr & in bnxt_update_link()
10775 eee->eee_active = 1; in bnxt_update_link()
10777 resp->link_partner_adv_eee_link_speed_mask); in bnxt_update_link()
10778 eee->lp_advertised = in bnxt_update_link()
10782 /* Pull initial EEE config */ in bnxt_update_link()
10784 if (resp->eee_config_phy_addr & in bnxt_update_link()
10786 eee->eee_enabled = 1; in bnxt_update_link()
10788 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); in bnxt_update_link()
10789 eee->advertised = in bnxt_update_link()
10792 if (resp->eee_config_phy_addr & in bnxt_update_link()
10796 eee->tx_lpi_enabled = 1; in bnxt_update_link()
10797 tmr = resp->xcvr_identifier_type_tx_lpi_timer; in bnxt_update_link()
10798 eee->tx_lpi_timer = le32_to_cpu(tmr) & in bnxt_update_link()
10804 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; in bnxt_update_link()
10805 if (bp->hwrm_spec_code >= 0x10504) { in bnxt_update_link()
10806 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); in bnxt_update_link()
10807 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; in bnxt_update_link()
10811 if (link_info->phy_link_status == BNXT_LINK_LINK) in bnxt_update_link()
10812 link_info->link_state = BNXT_LINK_STATE_UP; in bnxt_update_link()
10814 link_info->link_state = BNXT_LINK_STATE_DOWN; in bnxt_update_link()
10815 if (link_state != link_info->link_state) in bnxt_update_link()
10819 link_info->link_state = BNXT_LINK_STATE_DOWN; in bnxt_update_link()
10827 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) in bnxt_update_link()
10834 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_get_port_module_status()
10835 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; in bnxt_get_port_module_status()
10841 module_status = link_info->module_status; in bnxt_get_port_module_status()
10846 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", in bnxt_get_port_module_status()
10847 bp->pf.port_id); in bnxt_get_port_module_status()
10848 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_get_port_module_status()
10849 netdev_warn(bp->dev, "Module part number %s\n", in bnxt_get_port_module_status()
10850 resp->phy_vendor_partnumber); in bnxt_get_port_module_status()
10853 netdev_warn(bp->dev, "TX is disabled\n"); in bnxt_get_port_module_status()
10855 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); in bnxt_get_port_module_status()
10862 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { in bnxt_hwrm_set_pause_common()
10863 if (bp->hwrm_spec_code >= 0x10201) in bnxt_hwrm_set_pause_common()
10864 req->auto_pause = in bnxt_hwrm_set_pause_common()
10866 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
10867 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; in bnxt_hwrm_set_pause_common()
10868 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
10869 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; in bnxt_hwrm_set_pause_common()
10870 req->enables |= in bnxt_hwrm_set_pause_common()
10873 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
10874 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; in bnxt_hwrm_set_pause_common()
10875 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
10876 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; in bnxt_hwrm_set_pause_common()
10877 req->enables |= in bnxt_hwrm_set_pause_common()
10879 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_hwrm_set_pause_common()
10880 req->auto_pause = req->force_pause; in bnxt_hwrm_set_pause_common()
10881 req->enables |= cpu_to_le32( in bnxt_hwrm_set_pause_common()
10889 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { in bnxt_hwrm_set_link_common()
10890 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; in bnxt_hwrm_set_link_common()
10891 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_hwrm_set_link_common()
10892 req->enables |= in bnxt_hwrm_set_link_common()
10894 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); in bnxt_hwrm_set_link_common()
10895 } else if (bp->link_info.advertising) { in bnxt_hwrm_set_link_common()
10896 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); in bnxt_hwrm_set_link_common()
10897 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); in bnxt_hwrm_set_link_common()
10899 if (bp->link_info.advertising_pam4) { in bnxt_hwrm_set_link_common()
10900 req->enables |= in bnxt_hwrm_set_link_common()
10902 req->auto_link_pam4_speed_mask = in bnxt_hwrm_set_link_common()
10903 cpu_to_le16(bp->link_info.advertising_pam4); in bnxt_hwrm_set_link_common()
10905 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); in bnxt_hwrm_set_link_common()
10906 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); in bnxt_hwrm_set_link_common()
10908 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); in bnxt_hwrm_set_link_common()
10909 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { in bnxt_hwrm_set_link_common()
10910 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
10911 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); in bnxt_hwrm_set_link_common()
10912 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", in bnxt_hwrm_set_link_common()
10913 (u32)bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
10914 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { in bnxt_hwrm_set_link_common()
10915 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
10916 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); in bnxt_hwrm_set_link_common()
10918 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); in bnxt_hwrm_set_link_common()
10923 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); in bnxt_hwrm_set_link_common()
10937 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || in bnxt_hwrm_set_pause()
10938 bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
10942 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { in bnxt_hwrm_set_pause()
10947 bp->link_info.pause = in bnxt_hwrm_set_pause()
10948 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; in bnxt_hwrm_set_pause()
10949 bp->link_info.auto_pause_setting = 0; in bnxt_hwrm_set_pause()
10950 if (!bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
10953 bp->link_info.force_link_chng = false; in bnxt_hwrm_set_pause()
10960 struct ethtool_eee *eee = &bp->eee; in bnxt_hwrm_set_eee() local
10962 if (eee->eee_enabled) { in bnxt_hwrm_set_eee()
10966 if (eee->tx_lpi_enabled) in bnxt_hwrm_set_eee()
10971 req->flags |= cpu_to_le32(flags); in bnxt_hwrm_set_eee()
10972 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); in bnxt_hwrm_set_eee()
10973 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); in bnxt_hwrm_set_eee()
10974 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); in bnxt_hwrm_set_eee()
10976 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); in bnxt_hwrm_set_eee()
11007 if (pci_num_vf(bp->pdev) && in bnxt_hwrm_shutdown_link()
11008 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) in bnxt_hwrm_shutdown_link()
11015 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); in bnxt_hwrm_shutdown_link()
11018 mutex_lock(&bp->link_lock); in bnxt_hwrm_shutdown_link()
11024 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; in bnxt_hwrm_shutdown_link()
11025 mutex_unlock(&bp->link_lock); in bnxt_hwrm_shutdown_link()
11036 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); in bnxt_fw_reset_via_optee()
11040 netdev_err(bp->dev, "OP-TEE not supported\n"); in bnxt_fw_reset_via_optee()
11041 return -ENODEV; in bnxt_fw_reset_via_optee()
11047 if (bp->fw_health && bp->fw_health->status_reliable) { in bnxt_try_recover_fw()
11058 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); in bnxt_try_recover_fw()
11061 netdev_err(bp->dev, in bnxt_try_recover_fw()
11064 rc = -ENODEV; in bnxt_try_recover_fw()
11067 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); in bnxt_try_recover_fw()
11073 return -ENODEV; in bnxt_try_recover_fw()
11078 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_clear_reservations()
11083 hw_resc->resv_cp_rings = 0; in bnxt_clear_reservations()
11084 hw_resc->resv_stat_ctxs = 0; in bnxt_clear_reservations()
11085 hw_resc->resv_irqs = 0; in bnxt_clear_reservations()
11086 hw_resc->resv_tx_rings = 0; in bnxt_clear_reservations()
11087 hw_resc->resv_rx_rings = 0; in bnxt_clear_reservations()
11088 hw_resc->resv_hw_ring_grps = 0; in bnxt_clear_reservations()
11089 hw_resc->resv_vnics = 0; in bnxt_clear_reservations()
11091 bp->tx_nr_rings = 0; in bnxt_clear_reservations()
11092 bp->rx_nr_rings = 0; in bnxt_clear_reservations()
11105 netdev_err(bp->dev, "resc_qcaps failed\n"); in bnxt_cancel_reservations()
11116 bool fw_reset = !bp->irq_tbl; in bnxt_hwrm_if_change()
11121 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) in bnxt_hwrm_if_change()
11129 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); in bnxt_hwrm_if_change()
11135 if (rc != -EAGAIN) in bnxt_hwrm_if_change()
11142 if (rc == -EAGAIN) { in bnxt_hwrm_if_change()
11146 flags = le32_to_cpu(resp->flags); in bnxt_hwrm_if_change()
11163 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) in bnxt_hwrm_if_change()
11168 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { in bnxt_hwrm_if_change()
11169 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); in bnxt_hwrm_if_change()
11170 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_hwrm_if_change()
11171 return -ENODEV; in bnxt_hwrm_if_change()
11175 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_hwrm_if_change()
11176 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_hwrm_if_change()
11182 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_hwrm_if_change()
11183 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_hwrm_if_change()
11189 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_hwrm_if_change()
11190 netdev_err(bp->dev, "init int mode failed\n"); in bnxt_hwrm_if_change()
11203 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_port_led_qcaps()
11206 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
11207 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) in bnxt_hwrm_port_led_qcaps()
11214 req->port_id = cpu_to_le16(pf->port_id); in bnxt_hwrm_port_led_qcaps()
11221 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { in bnxt_hwrm_port_led_qcaps()
11224 bp->num_leds = resp->num_leds; in bnxt_hwrm_port_led_qcaps()
11225 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * in bnxt_hwrm_port_led_qcaps()
11226 bp->num_leds); in bnxt_hwrm_port_led_qcaps()
11227 for (i = 0; i < bp->num_leds; i++) { in bnxt_hwrm_port_led_qcaps()
11228 struct bnxt_led_info *led = &bp->leds[i]; in bnxt_hwrm_port_led_qcaps()
11229 __le16 caps = led->led_state_caps; in bnxt_hwrm_port_led_qcaps()
11231 if (!led->led_group_id || in bnxt_hwrm_port_led_qcaps()
11233 bp->num_leds = 0; in bnxt_hwrm_port_led_qcaps()
11252 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_alloc_wol_fltr()
11253 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; in bnxt_hwrm_alloc_wol_fltr()
11254 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); in bnxt_hwrm_alloc_wol_fltr()
11255 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); in bnxt_hwrm_alloc_wol_fltr()
11260 bp->wol_filter_id = resp->wol_filter_id; in bnxt_hwrm_alloc_wol_fltr()
11274 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_free_wol_fltr()
11275 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); in bnxt_hwrm_free_wol_fltr()
11276 req->wol_filter_id = bp->wol_filter_id; in bnxt_hwrm_free_wol_fltr()
11292 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_get_wol_fltrs()
11293 req->handle = cpu_to_le16(handle); in bnxt_hwrm_get_wol_fltrs()
11297 next_handle = le16_to_cpu(resp->next_handle); in bnxt_hwrm_get_wol_fltrs()
11299 if (resp->wol_type == in bnxt_hwrm_get_wol_fltrs()
11301 bp->wol = 1; in bnxt_hwrm_get_wol_fltrs()
11302 bp->wol_filter_id = resp->wol_filter_id; in bnxt_hwrm_get_wol_fltrs()
11314 bp->wol = 0; in bnxt_get_wol_settings()
11315 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) in bnxt_get_wol_settings()
11325 struct ethtool_eee *eee = &bp->eee; in bnxt_eee_config_ok() local
11326 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_eee_config_ok()
11328 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) in bnxt_eee_config_ok()
11331 if (eee->eee_enabled) { in bnxt_eee_config_ok()
11333 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); in bnxt_eee_config_ok()
11335 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { in bnxt_eee_config_ok()
11336 eee->eee_enabled = 0; in bnxt_eee_config_ok()
11339 if (eee->advertised & ~advertising) { in bnxt_eee_config_ok()
11340 eee->advertised = advertising & eee->supported; in bnxt_eee_config_ok()
11353 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_phy_setting()
11357 netdev_err(bp->dev, "failed to update link (rc: %x)\n", in bnxt_update_phy_setting()
11364 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && in bnxt_update_phy_setting()
11365 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != in bnxt_update_phy_setting()
11366 link_info->req_flow_ctrl) in bnxt_update_phy_setting()
11368 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && in bnxt_update_phy_setting()
11369 link_info->force_pause_setting != link_info->req_flow_ctrl) in bnxt_update_phy_setting()
11371 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { in bnxt_update_phy_setting()
11372 if (BNXT_AUTO_MODE(link_info->auto_mode)) in bnxt_update_phy_setting()
11376 if (link_info->req_duplex != link_info->duplex_setting) in bnxt_update_phy_setting()
11379 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) in bnxt_update_phy_setting()
11399 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", in bnxt_update_phy_setting()
11407 /* Common routine to pre-map certain register block to different GRC window.
11417 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); in bnxt_preset_reg_win()
11427 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_reinit_after_abort()
11428 return -EBUSY; in bnxt_reinit_after_abort()
11430 if (bp->dev->reg_state == NETREG_UNREGISTERED) in bnxt_reinit_after_abort()
11431 return -ENODEV; in bnxt_reinit_after_abort()
11438 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_reinit_after_abort()
11439 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); in bnxt_reinit_after_abort()
11450 netif_carrier_off(bp->dev); in __bnxt_open_nic()
11455 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); in __bnxt_open_nic()
11462 if ((bp->flags & BNXT_FLAG_RFS) && in __bnxt_open_nic()
11463 !(bp->flags & BNXT_FLAG_USING_MSIX)) { in __bnxt_open_nic()
11465 bp->dev->hw_features &= ~NETIF_F_NTUPLE; in __bnxt_open_nic()
11466 bp->flags &= ~BNXT_FLAG_RFS; in __bnxt_open_nic()
11471 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); in __bnxt_open_nic()
11479 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); in __bnxt_open_nic()
11486 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); in __bnxt_open_nic()
11494 mutex_lock(&bp->link_lock); in __bnxt_open_nic()
11496 mutex_unlock(&bp->link_lock); in __bnxt_open_nic()
11498 netdev_warn(bp->dev, "failed to update phy settings\n"); in __bnxt_open_nic()
11500 bp->link_info.phy_retry = true; in __bnxt_open_nic()
11501 bp->link_info.phy_retry_expires = in __bnxt_open_nic()
11508 udp_tunnel_nic_reset_ntf(bp->dev); in __bnxt_open_nic()
11510 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { in __bnxt_open_nic()
11516 set_bit(BNXT_STATE_OPEN, &bp->state); in __bnxt_open_nic()
11518 /* Enable TX queues */ in __bnxt_open_nic()
11520 mod_timer(&bp->timer, jiffies + bp->current_interval); in __bnxt_open_nic()
11522 mutex_lock(&bp->link_lock); in __bnxt_open_nic()
11524 mutex_unlock(&bp->link_lock); in __bnxt_open_nic()
11526 /* VF-reps may need to be re-opened after the PF is re-opened */ in __bnxt_open_nic()
11548 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) in bnxt_open_nic()
11549 rc = -EIO; in bnxt_open_nic()
11553 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); in bnxt_open_nic()
11554 dev_close(bp->dev); in bnxt_open_nic()
11560 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
11567 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_half_open_nic()
11568 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); in bnxt_half_open_nic()
11569 rc = -ENODEV; in bnxt_half_open_nic()
11575 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); in bnxt_half_open_nic()
11579 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_open_nic()
11582 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_open_nic()
11584 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); in bnxt_half_open_nic()
11592 dev_close(bp->dev); in bnxt_half_open_nic()
11605 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); in bnxt_half_close_nic()
11611 struct bnxt_pf_info *pf = &bp->pf; in bnxt_reenable_sriov()
11612 int n = pf->active_vfs; in bnxt_reenable_sriov()
11624 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_open()
11627 if (rc == -EBUSY) in bnxt_open()
11628 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); in bnxt_open()
11630 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); in bnxt_open()
11631 return -ENODEV; in bnxt_open()
11643 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { in bnxt_open()
11644 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_open()
11656 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || in bnxt_drv_busy()
11657 test_bit(BNXT_STATE_READ_STATS, &bp->state)); in bnxt_drv_busy()
11666 /* Close the VF-reps before closing PF */ in __bnxt_close_nic()
11670 /* Change device state to avoid TX queue wake up's */ in __bnxt_close_nic()
11673 clear_bit(BNXT_STATE_OPEN, &bp->state); in __bnxt_close_nic()
11685 del_timer_sync(&bp->timer); in __bnxt_close_nic()
11689 if (bp->bnapi && irq_re_init) { in __bnxt_close_nic()
11690 bnxt_get_ring_stats(bp, &bp->net_stats_prev); in __bnxt_close_nic()
11691 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); in __bnxt_close_nic()
11702 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_close_nic()
11710 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); in bnxt_close_nic()
11711 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); in bnxt_close_nic()
11715 if (bp->sriov_cfg) { in bnxt_close_nic()
11718 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, in bnxt_close_nic()
11719 !bp->sriov_cfg, in bnxt_close_nic()
11722 …netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!… in bnxt_close_nic()
11724 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); in bnxt_close_nic()
11747 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_read()
11748 return -EOPNOTSUPP; in bnxt_hwrm_port_phy_read()
11754 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_port_phy_read()
11755 req->phy_addr = phy_addr; in bnxt_hwrm_port_phy_read()
11756 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_read()
11758 req->cl45_mdio = 1; in bnxt_hwrm_port_phy_read()
11759 req->phy_addr = mdio_phy_id_prtad(phy_addr); in bnxt_hwrm_port_phy_read()
11760 req->dev_addr = mdio_phy_id_devad(phy_addr); in bnxt_hwrm_port_phy_read()
11761 req->reg_addr = cpu_to_le16(reg); in bnxt_hwrm_port_phy_read()
11767 *val = le16_to_cpu(resp->reg_data); in bnxt_hwrm_port_phy_read()
11778 if (bp->hwrm_spec_code < 0x10a00) in bnxt_hwrm_port_phy_write()
11779 return -EOPNOTSUPP; in bnxt_hwrm_port_phy_write()
11785 req->port_id = cpu_to_le16(bp->pf.port_id); in bnxt_hwrm_port_phy_write()
11786 req->phy_addr = phy_addr; in bnxt_hwrm_port_phy_write()
11787 req->reg_addr = cpu_to_le16(reg & 0x1f); in bnxt_hwrm_port_phy_write()
11789 req->cl45_mdio = 1; in bnxt_hwrm_port_phy_write()
11790 req->phy_addr = mdio_phy_id_prtad(phy_addr); in bnxt_hwrm_port_phy_write()
11791 req->dev_addr = mdio_phy_id_devad(phy_addr); in bnxt_hwrm_port_phy_write()
11792 req->reg_addr = cpu_to_le16(reg); in bnxt_hwrm_port_phy_write()
11794 req->reg_data = cpu_to_le16(val); in bnxt_hwrm_port_phy_write()
11808 mdio->phy_id = bp->link_info.phy_addr; in bnxt_ioctl()
11815 return -EAGAIN; in bnxt_ioctl()
11817 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, in bnxt_ioctl()
11819 mdio->val_out = mii_regval; in bnxt_ioctl()
11825 return -EAGAIN; in bnxt_ioctl()
11827 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, in bnxt_ioctl()
11828 mdio->val_in); in bnxt_ioctl()
11840 return -EOPNOTSUPP; in bnxt_ioctl()
11848 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_get_ring_stats()
11849 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_get_ring_stats()
11850 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_get_ring_stats()
11851 u64 *sw = cpr->stats.sw_stats; in bnxt_get_ring_stats()
11853 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); in bnxt_get_ring_stats()
11854 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); in bnxt_get_ring_stats()
11855 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); in bnxt_get_ring_stats()
11857 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); in bnxt_get_ring_stats()
11858 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); in bnxt_get_ring_stats()
11859 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); in bnxt_get_ring_stats()
11861 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); in bnxt_get_ring_stats()
11862 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); in bnxt_get_ring_stats()
11863 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); in bnxt_get_ring_stats()
11865 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); in bnxt_get_ring_stats()
11866 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); in bnxt_get_ring_stats()
11867 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); in bnxt_get_ring_stats()
11869 stats->rx_missed_errors += in bnxt_get_ring_stats()
11872 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); in bnxt_get_ring_stats()
11874 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); in bnxt_get_ring_stats()
11876 stats->rx_dropped += in bnxt_get_ring_stats()
11877 cpr->sw_stats.rx.rx_netpoll_discards + in bnxt_get_ring_stats()
11878 cpr->sw_stats.rx.rx_oom_discards; in bnxt_get_ring_stats()
11885 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; in bnxt_add_prev_stats()
11887 stats->rx_packets += prev_stats->rx_packets; in bnxt_add_prev_stats()
11888 stats->tx_packets += prev_stats->tx_packets; in bnxt_add_prev_stats()
11889 stats->rx_bytes += prev_stats->rx_bytes; in bnxt_add_prev_stats()
11890 stats->tx_bytes += prev_stats->tx_bytes; in bnxt_add_prev_stats()
11891 stats->rx_missed_errors += prev_stats->rx_missed_errors; in bnxt_add_prev_stats()
11892 stats->multicast += prev_stats->multicast; in bnxt_add_prev_stats()
11893 stats->rx_dropped += prev_stats->rx_dropped; in bnxt_add_prev_stats()
11894 stats->tx_dropped += prev_stats->tx_dropped; in bnxt_add_prev_stats()
11902 set_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
11907 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_get_stats64()
11908 clear_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
11909 *stats = bp->net_stats_prev; in bnxt_get_stats64()
11916 if (bp->flags & BNXT_FLAG_PORT_STATS) { in bnxt_get_stats64()
11917 u64 *rx = bp->port_stats.sw_stats; in bnxt_get_stats64()
11918 u64 *tx = bp->port_stats.sw_stats + in bnxt_get_stats64() local
11921 stats->rx_crc_errors = in bnxt_get_stats64()
11923 stats->rx_frame_errors = in bnxt_get_stats64()
11925 stats->rx_length_errors = in bnxt_get_stats64()
11929 stats->rx_errors = in bnxt_get_stats64()
11932 stats->collisions = in bnxt_get_stats64()
11933 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); in bnxt_get_stats64()
11934 stats->tx_fifo_errors = in bnxt_get_stats64()
11935 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); in bnxt_get_stats64()
11936 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); in bnxt_get_stats64()
11938 clear_bit(BNXT_STATE_READ_STATS, &bp->state); in bnxt_get_stats64()
11945 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; in bnxt_get_one_ring_err_stats()
11946 u64 *hw_stats = cpr->stats.sw_stats; in bnxt_get_one_ring_err_stats()
11948 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; in bnxt_get_one_ring_err_stats()
11949 stats->rx_total_resets += sw_stats->rx.rx_resets; in bnxt_get_one_ring_err_stats()
11950 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; in bnxt_get_one_ring_err_stats()
11951 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; in bnxt_get_one_ring_err_stats()
11952 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; in bnxt_get_one_ring_err_stats()
11953 stats->rx_total_ring_discards += in bnxt_get_one_ring_err_stats()
11955 stats->tx_total_resets += sw_stats->tx.tx_resets; in bnxt_get_one_ring_err_stats()
11956 stats->tx_total_ring_discards += in bnxt_get_one_ring_err_stats()
11958 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; in bnxt_get_one_ring_err_stats()
11966 for (i = 0; i < bp->cp_nr_rings; i++) in bnxt_get_ring_err_stats()
11967 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); in bnxt_get_ring_err_stats()
11972 struct net_device *dev = bp->dev; in bnxt_mc_list_updated()
11973 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_mc_list_updated()
11983 vnic->mc_list_count = 0; in bnxt_mc_list_updated()
11986 haddr = ha->addr; in bnxt_mc_list_updated()
11987 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { in bnxt_mc_list_updated()
11988 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); in bnxt_mc_list_updated()
11997 if (mc_count != vnic->mc_list_count) { in bnxt_mc_list_updated()
11998 vnic->mc_list_count = mc_count; in bnxt_mc_list_updated()
12006 struct net_device *dev = bp->dev; in bnxt_uc_list_updated()
12007 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_uc_list_updated()
12011 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) in bnxt_uc_list_updated()
12015 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) in bnxt_uc_list_updated()
12031 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_set_rx_mode()
12034 vnic = &bp->vnic_info[0]; in bnxt_set_rx_mode()
12035 mask = vnic->rx_mask; in bnxt_set_rx_mode()
12041 if (dev->flags & IFF_PROMISC) in bnxt_set_rx_mode()
12046 if (dev->flags & IFF_BROADCAST) in bnxt_set_rx_mode()
12048 if (dev->flags & IFF_ALLMULTI) { in bnxt_set_rx_mode()
12050 vnic->mc_list_count = 0; in bnxt_set_rx_mode()
12051 } else if (dev->flags & IFF_MULTICAST) { in bnxt_set_rx_mode()
12055 if (mask != vnic->rx_mask || uc_update || mc_update) { in bnxt_set_rx_mode()
12056 vnic->rx_mask = mask; in bnxt_set_rx_mode()
12064 struct net_device *dev = bp->dev; in bnxt_cfg_rx_mode()
12065 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_cfg_rx_mode()
12077 for (i = 1; i < vnic->uc_filter_count; i++) { in bnxt_cfg_rx_mode()
12078 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; in bnxt_cfg_rx_mode()
12084 vnic->uc_filter_count = 1; in bnxt_cfg_rx_mode()
12087 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { in bnxt_cfg_rx_mode()
12088 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; in bnxt_cfg_rx_mode()
12091 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); in bnxt_cfg_rx_mode()
12093 vnic->uc_filter_count++; in bnxt_cfg_rx_mode()
12098 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { in bnxt_cfg_rx_mode()
12099 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); in bnxt_cfg_rx_mode()
12101 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_cfg_rx_mode()
12102 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_cfg_rx_mode()
12103 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); in bnxt_cfg_rx_mode()
12105 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); in bnxt_cfg_rx_mode()
12108 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); in bnxt_cfg_rx_mode()
12110 vnic->uc_filter_count = i; in bnxt_cfg_rx_mode()
12114 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_cfg_rx_mode()
12115 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); in bnxt_cfg_rx_mode()
12118 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && in bnxt_cfg_rx_mode()
12120 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; in bnxt_cfg_rx_mode()
12122 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { in bnxt_cfg_rx_mode()
12123 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", in bnxt_cfg_rx_mode()
12125 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; in bnxt_cfg_rx_mode()
12126 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; in bnxt_cfg_rx_mode()
12127 vnic->mc_list_count = 0; in bnxt_cfg_rx_mode()
12131 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", in bnxt_cfg_rx_mode()
12141 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_can_reserve_rings()
12146 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) in bnxt_can_reserve_rings()
12149 if (!netif_running(bp->dev)) in bnxt_can_reserve_rings()
12159 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in bnxt_rfs_supported()
12160 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) in bnxt_rfs_supported()
12164 /* 212 firmware is broken for aRFS */ in bnxt_rfs_supported()
12169 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) in bnxt_rfs_supported()
12179 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) in bnxt_rfs_capable()
12181 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) in bnxt_rfs_capable()
12184 vnics = 1 + bp->rx_nr_rings; in bnxt_rfs_capable()
12189 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) in bnxt_rfs_capable()
12192 if (bp->rx_nr_rings > 1) in bnxt_rfs_capable()
12193 netdev_warn(bp->dev, in bnxt_rfs_capable()
12195 min(max_rss_ctxs - 1, max_vnics - 1)); in bnxt_rfs_capable()
12202 if (vnics == bp->hw_resc.resv_vnics) in bnxt_rfs_capable()
12206 if (vnics <= bp->hw_resc.resv_vnics) in bnxt_rfs_capable()
12209 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); in bnxt_rfs_capable()
12223 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) in bnxt_fix_features()
12237 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) in bnxt_fix_features()
12243 if (BNXT_VF(bp) && bp->vf.vlan) in bnxt_fix_features()
12252 u32 flags = bp->flags; in bnxt_set_features()
12264 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) in bnxt_set_features()
12273 changes = flags ^ bp->flags; in bnxt_set_features()
12276 if ((bp->flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
12278 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_set_features()
12285 if (flags != bp->flags) { in bnxt_set_features()
12286 u32 old_flags = bp->flags; in bnxt_set_features()
12288 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_set_features()
12289 bp->flags = flags; in bnxt_set_features()
12297 bp->flags = flags; in bnxt_set_features()
12304 bp->flags = flags; in bnxt_set_features()
12309 bp->flags = old_flags; in bnxt_set_features()
12318 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); in bnxt_exthdr_check()
12328 nexthdr = &ip6h->nexthdr; in bnxt_exthdr_check()
12336 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, in bnxt_exthdr_check()
12348 /* The ext header may be a hop-by-hop header inserted for in bnxt_exthdr_check()
12353 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) in bnxt_exthdr_check()
12357 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || in bnxt_exthdr_check()
12358 jhdr->nexthdr != IPPROTO_TCP) in bnxt_exthdr_check()
12366 nexthdr = &hp->nexthdr; in bnxt_exthdr_check()
12371 if (skb->encapsulation) { in bnxt_exthdr_check()
12377 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ in bnxt_exthdr_check()
12385 __be16 udp_port = uh->dest; in bnxt_udp_tunl_check()
12387 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && in bnxt_udp_tunl_check()
12388 udp_port != bp->vxlan_gpe_port) in bnxt_udp_tunl_check()
12390 if (skb->inner_protocol == htons(ETH_P_TEB)) { in bnxt_udp_tunl_check()
12393 switch (eh->h_proto) { in bnxt_udp_tunl_check()
12401 } else if (skb->inner_protocol == htons(ETH_P_IP)) { in bnxt_udp_tunl_check()
12403 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { in bnxt_udp_tunl_check()
12418 switch (skb->inner_protocol) { in bnxt_tunl_check()
12445 if (!skb->encapsulation) in bnxt_features_check()
12447 l4_proto = &ip_hdr(skb)->protocol; in bnxt_features_check()
12478 rc = -ENOMEM; in bnxt_dbg_hwrm_rd_reg()
12482 req->host_dest_addr = cpu_to_le64(mapping); in bnxt_dbg_hwrm_rd_reg()
12485 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); in bnxt_dbg_hwrm_rd_reg()
12486 req->read_len32 = cpu_to_le32(num_words); in bnxt_dbg_hwrm_rd_reg()
12489 if (rc || resp->error_code) { in bnxt_dbg_hwrm_rd_reg()
12490 rc = -EIO; in bnxt_dbg_hwrm_rd_reg()
12512 req->ring_type = ring_type; in bnxt_dbg_hwrm_ring_info_get()
12513 req->fw_ring_id = cpu_to_le32(ring_id); in bnxt_dbg_hwrm_ring_info_get()
12517 *prod = le32_to_cpu(resp->producer_index); in bnxt_dbg_hwrm_ring_info_get()
12518 *cons = le32_to_cpu(resp->consumer_index); in bnxt_dbg_hwrm_ring_info_get()
12527 int i = bnapi->index, j; in bnxt_dump_tx_sw_state()
12530 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", in bnxt_dump_tx_sw_state()
12531 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, in bnxt_dump_tx_sw_state()
12532 txr->tx_cons); in bnxt_dump_tx_sw_state()
12537 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_dump_rx_sw_state()
12538 int i = bnapi->index; in bnxt_dump_rx_sw_state()
12543 …netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg… in bnxt_dump_rx_sw_state()
12544 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, in bnxt_dump_rx_sw_state()
12545 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, in bnxt_dump_rx_sw_state()
12546 rxr->rx_sw_agg_prod); in bnxt_dump_rx_sw_state()
12551 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; in bnxt_dump_cp_sw_state()
12552 int i = bnapi->index; in bnxt_dump_cp_sw_state()
12554 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", in bnxt_dump_cp_sw_state()
12555 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); in bnxt_dump_cp_sw_state()
12563 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_dbg_dump_states()
12564 bnapi = bp->bnapi[i]; in bnxt_dbg_dump_states()
12575 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; in bnxt_hwrm_rx_ring_reset()
12577 struct bnxt_napi *bnapi = rxr->bnapi; in bnxt_hwrm_rx_ring_reset()
12586 cpr = &bnapi->cp_ring; in bnxt_hwrm_rx_ring_reset()
12587 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; in bnxt_hwrm_rx_ring_reset()
12588 req->cmpl_ring = cpu_to_le16(cp_ring_id); in bnxt_hwrm_rx_ring_reset()
12589 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; in bnxt_hwrm_rx_ring_reset()
12590 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); in bnxt_hwrm_rx_ring_reset()
12598 if (netif_running(bp->dev)) { in bnxt_reset_task()
12617 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); in bnxt_tx_timeout()
12623 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_health_check()
12624 struct pci_dev *pdev = bp->pdev; in bnxt_fw_health_check()
12627 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_fw_health_check()
12632 if (fw_health->tmr_counter) { in bnxt_fw_health_check()
12633 fw_health->tmr_counter--; in bnxt_fw_health_check()
12638 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { in bnxt_fw_health_check()
12639 fw_health->arrests++; in bnxt_fw_health_check()
12643 fw_health->last_fw_heartbeat = val; in bnxt_fw_health_check()
12646 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { in bnxt_fw_health_check()
12647 fw_health->discoveries++; in bnxt_fw_health_check()
12651 fw_health->tmr_counter = fw_health->tmr_multiplier; in bnxt_fw_health_check()
12661 struct net_device *dev = bp->dev; in bnxt_timer()
12663 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_timer()
12666 if (atomic_read(&bp->intr_sem) != 0) in bnxt_timer()
12669 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) in bnxt_timer()
12672 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) in bnxt_timer()
12679 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) in bnxt_timer()
12683 if (bp->link_info.phy_retry) { in bnxt_timer()
12684 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { in bnxt_timer()
12685 bp->link_info.phy_retry = false; in bnxt_timer()
12686 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); in bnxt_timer()
12692 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) in bnxt_timer()
12695 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) in bnxt_timer()
12699 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnxt_timer()
12709 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_rtnl_lock_sp()
12715 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_rtnl_unlock_sp()
12723 if (test_bit(BNXT_STATE_OPEN, &bp->state)) in bnxt_reset()
12734 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_rx_ring_reset()
12739 if (bp->flags & BNXT_FLAG_TPA) in bnxt_rx_ring_reset()
12741 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_rx_ring_reset()
12742 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; in bnxt_rx_ring_reset()
12746 if (!rxr->bnapi->in_reset) in bnxt_rx_ring_reset()
12751 if (rc == -EINVAL || rc == -EOPNOTSUPP) in bnxt_rx_ring_reset()
12752 …netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n… in bnxt_rx_ring_reset()
12754 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", in bnxt_rx_ring_reset()
12760 rxr->rx_prod = 0; in bnxt_rx_ring_reset()
12761 rxr->rx_agg_prod = 0; in bnxt_rx_ring_reset()
12762 rxr->rx_sw_agg_prod = 0; in bnxt_rx_ring_reset()
12763 rxr->rx_next_cons = 0; in bnxt_rx_ring_reset()
12764 rxr->bnapi->in_reset = false; in bnxt_rx_ring_reset()
12766 cpr = &rxr->bnapi->cp_ring; in bnxt_rx_ring_reset()
12767 cpr->sw_stats.rx.rx_resets++; in bnxt_rx_ring_reset()
12768 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_rx_ring_reset()
12769 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); in bnxt_rx_ring_reset()
12770 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); in bnxt_rx_ring_reset()
12772 if (bp->flags & BNXT_FLAG_TPA) in bnxt_rx_ring_reset()
12784 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { in bnxt_fw_reset_close()
12787 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); in bnxt_fw_reset_close()
12789 bp->fw_reset_min_dsecs = 0; in bnxt_fw_reset_close()
12795 pci_disable_device(bp->pdev); in bnxt_fw_reset_close()
12801 if (pci_is_enabled(bp->pdev)) in bnxt_fw_reset_close()
12802 pci_disable_device(bp->pdev); in bnxt_fw_reset_close()
12808 struct bnxt_fw_health *fw_health = bp->fw_health; in is_bnxt_fw_ok()
12813 if (val == fw_health->last_fw_heartbeat) in is_bnxt_fw_ok()
12817 if (val != fw_health->last_fw_reset_cnt) in is_bnxt_fw_ok()
12829 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_force_fw_reset()
12830 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_force_fw_reset()
12833 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_force_fw_reset()
12834 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) in bnxt_force_fw_reset()
12838 spin_lock_bh(&ptp->ptp_lock); in bnxt_force_fw_reset()
12839 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_force_fw_reset()
12840 spin_unlock_bh(&ptp->ptp_lock); in bnxt_force_fw_reset()
12842 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_force_fw_reset()
12845 wait_dsecs = fw_health->master_func_wait_dsecs; in bnxt_force_fw_reset()
12846 if (fw_health->primary) { in bnxt_force_fw_reset()
12847 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) in bnxt_force_fw_reset()
12849 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; in bnxt_force_fw_reset()
12851 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; in bnxt_force_fw_reset()
12852 wait_dsecs = fw_health->normal_func_wait_dsecs; in bnxt_force_fw_reset()
12853 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_force_fw_reset()
12856 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; in bnxt_force_fw_reset()
12857 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; in bnxt_force_fw_reset()
12863 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); in bnxt_fw_exception()
12864 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_fw_exception()
12883 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); in bnxt_get_registered_vfs()
12886 if (bp->pf.registered_vfs) in bnxt_get_registered_vfs()
12887 return bp->pf.registered_vfs; in bnxt_get_registered_vfs()
12888 if (bp->sriov_cfg) in bnxt_get_registered_vfs()
12897 if (test_bit(BNXT_STATE_OPEN, &bp->state) && in bnxt_fw_reset()
12898 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_fw_reset()
12899 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; in bnxt_fw_reset()
12903 spin_lock_bh(&ptp->ptp_lock); in bnxt_fw_reset()
12904 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
12905 spin_unlock_bh(&ptp->ptp_lock); in bnxt_fw_reset()
12907 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
12909 if (bp->pf.active_vfs && in bnxt_fw_reset()
12910 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) in bnxt_fw_reset()
12913 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", in bnxt_fw_reset()
12915 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset()
12916 dev_close(bp->dev); in bnxt_fw_reset()
12921 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) in bnxt_fw_reset()
12922 bp->fw_reset_max_dsecs = vf_tmo_dsecs; in bnxt_fw_reset()
12923 bp->fw_reset_state = in bnxt_fw_reset()
12929 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_fw_reset()
12930 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; in bnxt_fw_reset()
12933 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset()
12934 tmo = bp->fw_reset_min_dsecs * HZ / 10; in bnxt_fw_reset()
12946 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in bnxt_chk_missed_irq()
12949 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_chk_missed_irq()
12950 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_chk_missed_irq()
12958 cpr = &bnapi->cp_ring; in bnxt_chk_missed_irq()
12959 for (j = 0; j < cpr->cp_ring_count; j++) { in bnxt_chk_missed_irq()
12960 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; in bnxt_chk_missed_irq()
12963 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) in bnxt_chk_missed_irq()
12966 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { in bnxt_chk_missed_irq()
12967 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; in bnxt_chk_missed_irq()
12970 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; in bnxt_chk_missed_irq()
12974 cpr->sw_stats.cmn.missed_irqs++; in bnxt_chk_missed_irq()
12983 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_init_ethtool_link_settings()
12985 if (BNXT_AUTO_MODE(link_info->auto_mode)) { in bnxt_init_ethtool_link_settings()
12986 link_info->autoneg = BNXT_AUTONEG_SPEED; in bnxt_init_ethtool_link_settings()
12987 if (bp->hwrm_spec_code >= 0x10201) { in bnxt_init_ethtool_link_settings()
12988 if (link_info->auto_pause_setting & in bnxt_init_ethtool_link_settings()
12990 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; in bnxt_init_ethtool_link_settings()
12992 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; in bnxt_init_ethtool_link_settings()
12997 link_info->req_duplex = link_info->duplex_setting; in bnxt_init_ethtool_link_settings()
12999 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) in bnxt_init_ethtool_link_settings()
13000 link_info->req_flow_ctrl = in bnxt_init_ethtool_link_settings()
13001 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; in bnxt_init_ethtool_link_settings()
13003 link_info->req_flow_ctrl = link_info->force_pause_setting; in bnxt_init_ethtool_link_settings()
13008 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_echo_reply()
13015 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); in bnxt_fw_echo_reply()
13016 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); in bnxt_fw_echo_reply()
13024 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
13026 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_sp_task()
13027 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
13031 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13034 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13036 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13038 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13039 netdev_info(bp->dev, "Receive PF driver unload event!\n"); in bnxt_sp_task()
13040 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13046 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13049 mutex_lock(&bp->link_lock); in bnxt_sp_task()
13051 &bp->sp_event)) in bnxt_sp_task()
13056 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", in bnxt_sp_task()
13060 &bp->sp_event)) in bnxt_sp_task()
13062 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
13064 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13067 mutex_lock(&bp->link_lock); in bnxt_sp_task()
13069 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
13071 netdev_warn(bp->dev, "update phy settings retry failed\n"); in bnxt_sp_task()
13073 bp->link_info.phy_retry = false; in bnxt_sp_task()
13074 netdev_info(bp->dev, "update phy settings retry succeeded\n"); in bnxt_sp_task()
13077 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13078 mutex_lock(&bp->link_lock); in bnxt_sp_task()
13080 mutex_unlock(&bp->link_lock); in bnxt_sp_task()
13083 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13086 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13089 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13092 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13098 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13101 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13104 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
13107 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13108 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || in bnxt_sp_task()
13109 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) in bnxt_sp_task()
13115 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
13121 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
13128 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, in bnxt_check_rings() argument
13142 return -ENOMEM; in bnxt_check_rings()
13144 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_check_rings()
13147 tx_rings_needed = tx * tx_sets + tx_xdp; in bnxt_check_rings()
13149 return -ENOMEM; in bnxt_check_rings()
13152 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == in bnxt_check_rings()
13159 return -ENOMEM; in bnxt_check_rings()
13171 if (bp->bar2) { in bnxt_unmap_bars()
13172 pci_iounmap(pdev, bp->bar2); in bnxt_unmap_bars()
13173 bp->bar2 = NULL; in bnxt_unmap_bars()
13176 if (bp->bar1) { in bnxt_unmap_bars()
13177 pci_iounmap(pdev, bp->bar1); in bnxt_unmap_bars()
13178 bp->bar1 = NULL; in bnxt_unmap_bars()
13181 if (bp->bar0) { in bnxt_unmap_bars()
13182 pci_iounmap(pdev, bp->bar0); in bnxt_unmap_bars()
13183 bp->bar0 = NULL; in bnxt_unmap_bars()
13189 bnxt_unmap_bars(bp, bp->pdev); in bnxt_cleanup_pci()
13190 pci_release_regions(bp->pdev); in bnxt_cleanup_pci()
13191 if (pci_is_enabled(bp->pdev)) in bnxt_cleanup_pci()
13192 pci_disable_device(bp->pdev); in bnxt_cleanup_pci()
13197 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; in bnxt_init_dflt_coal()
13201 if (coal_cap->cmpl_params & in bnxt_init_dflt_coal()
13208 coal = &bp->rx_coal; in bnxt_init_dflt_coal()
13209 coal->coal_ticks = 10; in bnxt_init_dflt_coal()
13210 coal->coal_bufs = 30; in bnxt_init_dflt_coal()
13211 coal->coal_ticks_irq = 1; in bnxt_init_dflt_coal()
13212 coal->coal_bufs_irq = 2; in bnxt_init_dflt_coal()
13213 coal->idle_thresh = 50; in bnxt_init_dflt_coal()
13214 coal->bufs_per_record = 2; in bnxt_init_dflt_coal()
13215 coal->budget = 64; /* NAPI budget */ in bnxt_init_dflt_coal()
13216 coal->flags = flags; in bnxt_init_dflt_coal()
13218 coal = &bp->tx_coal; in bnxt_init_dflt_coal()
13219 coal->coal_ticks = 28; in bnxt_init_dflt_coal()
13220 coal->coal_bufs = 30; in bnxt_init_dflt_coal()
13221 coal->coal_ticks_irq = 2; in bnxt_init_dflt_coal()
13222 coal->coal_bufs_irq = 2; in bnxt_init_dflt_coal()
13223 coal->bufs_per_record = 1; in bnxt_init_dflt_coal()
13224 coal->flags = flags; in bnxt_init_dflt_coal()
13226 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; in bnxt_init_dflt_coal()
13229 /* FW that pre-reserves 1 VNIC per function */
13234 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_fw_pre_resv_vnics()
13237 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && in bnxt_fw_pre_resv_vnics()
13247 bp->fw_cap = 0; in bnxt_fw_init_one_p1()
13249 /* FW may be unresponsive after FLR. FLR must complete within 100 msec in bnxt_fw_init_one_p1()
13253 msleep(100); in bnxt_fw_init_one_p1()
13268 return -ENODEV; in bnxt_fw_init_one_p1()
13281 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", in bnxt_fw_init_one_p2()
13283 return -ENODEV; in bnxt_fw_init_one_p2()
13288 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", in bnxt_fw_init_one_p2()
13292 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); in bnxt_fw_init_one_p2()
13296 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", in bnxt_fw_init_one_p2()
13302 return -ENODEV; in bnxt_fw_init_one_p2()
13305 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; in bnxt_fw_init_one_p2()
13311 if (bp->fw_cap & BNXT_FW_CAP_PTP) in bnxt_fw_init_one_p2()
13320 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; in bnxt_set_dflt_rss_hash_type()
13321 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | in bnxt_set_dflt_rss_hash_type()
13325 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) in bnxt_set_dflt_rss_hash_type()
13326 bp->rss_hash_delta = bp->rss_hash_cfg; in bnxt_set_dflt_rss_hash_type()
13327 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { in bnxt_set_dflt_rss_hash_type()
13328 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; in bnxt_set_dflt_rss_hash_type()
13329 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | in bnxt_set_dflt_rss_hash_type()
13336 struct net_device *dev = bp->dev; in bnxt_set_dflt_rfs()
13338 dev->hw_features &= ~NETIF_F_NTUPLE; in bnxt_set_dflt_rfs()
13339 dev->features &= ~NETIF_F_NTUPLE; in bnxt_set_dflt_rfs()
13340 bp->flags &= ~BNXT_FLAG_RFS; in bnxt_set_dflt_rfs()
13342 dev->hw_features |= NETIF_F_NTUPLE; in bnxt_set_dflt_rfs()
13344 bp->flags |= BNXT_FLAG_RFS; in bnxt_set_dflt_rfs()
13345 dev->features |= NETIF_F_NTUPLE; in bnxt_set_dflt_rfs()
13352 struct pci_dev *pdev = bp->pdev; in bnxt_fw_init_one_p3()
13358 if (bp->flags & BNXT_FLAG_WOL_CAP) in bnxt_fw_init_one_p3()
13359 device_set_wakeup_enable(&pdev->dev, bp->wol); in bnxt_fw_init_one_p3()
13361 device_set_wakeup_capable(&pdev->dev, false); in bnxt_fw_init_one_p3()
13375 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); in bnxt_fw_init_one()
13380 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); in bnxt_fw_init_one()
13386 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); in bnxt_fw_init_one()
13396 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_fw_reset_writel()
13397 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; in bnxt_fw_reset_writel()
13398 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; in bnxt_fw_reset_writel()
13401 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; in bnxt_fw_reset_writel()
13406 pci_write_config_dword(bp->pdev, reg_off, val); in bnxt_fw_reset_writel()
13410 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); in bnxt_fw_reset_writel()
13414 writel(val, bp->bar0 + reg_off); in bnxt_fw_reset_writel()
13417 writel(val, bp->bar1 + reg_off); in bnxt_fw_reset_writel()
13421 pci_read_config_dword(bp->pdev, 0, &val); in bnxt_fw_reset_writel()
13432 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) in bnxt_hwrm_reset_permitted()
13438 req->fid = cpu_to_le16(0xffff); in bnxt_hwrm_reset_permitted()
13441 result = !!(le16_to_cpu(resp->flags) & in bnxt_hwrm_reset_permitted()
13449 struct bnxt_fw_health *fw_health = bp->fw_health; in bnxt_reset_all()
13452 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_reset_all()
13454 bp->fw_reset_timestamp = jiffies; in bnxt_reset_all()
13458 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { in bnxt_reset_all()
13459 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) in bnxt_reset_all()
13461 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { in bnxt_reset_all()
13466 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); in bnxt_reset_all()
13467 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; in bnxt_reset_all()
13468 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; in bnxt_reset_all()
13469 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; in bnxt_reset_all()
13472 if (rc != -ENODEV) in bnxt_reset_all()
13473 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); in bnxt_reset_all()
13475 bp->fw_reset_timestamp = jiffies; in bnxt_reset_all()
13480 return time_after(jiffies, bp->fw_reset_timestamp + in bnxt_fw_reset_timeout()
13481 (bp->fw_reset_max_dsecs * HZ / 10)); in bnxt_fw_reset_timeout()
13486 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_abort()
13487 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { in bnxt_fw_reset_abort()
13491 bp->fw_reset_state = 0; in bnxt_fw_reset_abort()
13492 dev_close(bp->dev); in bnxt_fw_reset_abort()
13500 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_fw_reset_task()
13501 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); in bnxt_fw_reset_task()
13505 switch (bp->fw_reset_state) { in bnxt_fw_reset_task()
13511 …netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs si… in bnxt_fw_reset_task()
13512 n, jiffies_to_msecs(jiffies - in bnxt_fw_reset_task()
13513 bp->fw_reset_timestamp)); in bnxt_fw_reset_task()
13517 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_task()
13518 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
13519 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", in bnxt_fw_reset_task()
13526 bp->fw_reset_timestamp = jiffies; in bnxt_fw_reset_task()
13528 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { in bnxt_fw_reset_task()
13534 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { in bnxt_fw_reset_task()
13535 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; in bnxt_fw_reset_task()
13538 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
13539 tmo = bp->fw_reset_min_dsecs * HZ / 10; in bnxt_fw_reset_task()
13555 if (!bp->fw_health->primary) { in bnxt_fw_reset_task()
13556 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; in bnxt_fw_reset_task()
13558 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
13562 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; in bnxt_fw_reset_task()
13567 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; in bnxt_fw_reset_task()
13568 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); in bnxt_fw_reset_task()
13572 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && in bnxt_fw_reset_task()
13573 !bp->fw_reset_min_dsecs) { in bnxt_fw_reset_task()
13576 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); in bnxt_fw_reset_task()
13579 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); in bnxt_fw_reset_task()
13580 rc = -ETIMEDOUT; in bnxt_fw_reset_task()
13587 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); in bnxt_fw_reset_task()
13588 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); in bnxt_fw_reset_task()
13589 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && in bnxt_fw_reset_task()
13590 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) in bnxt_fw_reset_task()
13592 if (pci_enable_device(bp->pdev)) { in bnxt_fw_reset_task()
13593 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); in bnxt_fw_reset_task()
13594 rc = -ENODEV; in bnxt_fw_reset_task()
13597 pci_set_master(bp->pdev); in bnxt_fw_reset_task()
13598 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; in bnxt_fw_reset_task()
13601 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; in bnxt_fw_reset_task()
13605 netdev_err(bp->dev, "Firmware reset aborted\n"); in bnxt_fw_reset_task()
13611 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; in bnxt_fw_reset_task()
13612 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; in bnxt_fw_reset_task()
13619 rc = bnxt_open(bp->dev); in bnxt_fw_reset_task()
13621 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); in bnxt_fw_reset_task()
13627 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && in bnxt_fw_reset_task()
13628 bp->fw_health->enabled) { in bnxt_fw_reset_task()
13629 bp->fw_health->last_fw_reset_cnt = in bnxt_fw_reset_task()
13632 bp->fw_reset_state = 0; in bnxt_fw_reset_task()
13635 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_fw_reset_task()
13641 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); in bnxt_fw_reset_task()
13642 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { in bnxt_fw_reset_task()
13652 if (bp->fw_health->status_reliable || in bnxt_fw_reset_task()
13653 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { in bnxt_fw_reset_task()
13656 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); in bnxt_fw_reset_task()
13669 SET_NETDEV_DEV(dev, &pdev->dev); in bnxt_init_board()
13671 /* enable device (incl. PCI PM wakeup), and bus-mastering */ in bnxt_init_board()
13674 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); in bnxt_init_board()
13679 dev_err(&pdev->dev, in bnxt_init_board()
13681 rc = -ENODEV; in bnxt_init_board()
13687 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); in bnxt_init_board()
13691 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && in bnxt_init_board()
13692 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { in bnxt_init_board()
13693 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); in bnxt_init_board()
13694 rc = -EIO; in bnxt_init_board()
13700 bp->dev = dev; in bnxt_init_board()
13701 bp->pdev = pdev; in bnxt_init_board()
13703 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() in bnxt_init_board()
13706 bp->bar0 = pci_ioremap_bar(pdev, 0); in bnxt_init_board()
13707 if (!bp->bar0) { in bnxt_init_board()
13708 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); in bnxt_init_board()
13709 rc = -ENOMEM; in bnxt_init_board()
13713 bp->bar2 = pci_ioremap_bar(pdev, 4); in bnxt_init_board()
13714 if (!bp->bar2) { in bnxt_init_board()
13715 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); in bnxt_init_board()
13716 rc = -ENOMEM; in bnxt_init_board()
13720 INIT_WORK(&bp->sp_task, bnxt_sp_task); in bnxt_init_board()
13721 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); in bnxt_init_board()
13723 spin_lock_init(&bp->ntp_fltr_lock); in bnxt_init_board()
13725 spin_lock_init(&bp->db_lock); in bnxt_init_board()
13728 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; in bnxt_init_board()
13729 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; in bnxt_init_board()
13731 timer_setup(&bp->timer, bnxt_timer, 0); in bnxt_init_board()
13732 bp->current_interval = BNXT_TIMER_INTERVAL; in bnxt_init_board()
13734 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_init_board()
13735 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; in bnxt_init_board()
13737 clear_bit(BNXT_STATE_OPEN, &bp->state); in bnxt_init_board()
13758 if (!is_valid_ether_addr(addr->sa_data)) in bnxt_change_mac_addr()
13759 return -EADDRNOTAVAIL; in bnxt_change_mac_addr()
13761 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) in bnxt_change_mac_addr()
13764 rc = bnxt_approve_mac(bp, addr->sa_data, true); in bnxt_change_mac_addr()
13768 eth_hw_addr_set(dev, addr->sa_data); in bnxt_change_mac_addr()
13785 dev->mtu = new_mtu; in bnxt_change_mtu()
13800 if (tc > bp->max_tc) { in bnxt_setup_mq_tc()
13802 tc, bp->max_tc); in bnxt_setup_mq_tc()
13803 return -EINVAL; in bnxt_setup_mq_tc()
13806 if (bp->num_tc == tc) in bnxt_setup_mq_tc()
13809 if (bp->flags & BNXT_FLAG_SHARED_RINGS) in bnxt_setup_mq_tc()
13812 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_setup_mq_tc()
13813 sh, tc, bp->tx_nr_rings_xdp); in bnxt_setup_mq_tc()
13817 /* Needs to close the device and do hw resource re-allocations */ in bnxt_setup_mq_tc()
13818 if (netif_running(bp->dev)) in bnxt_setup_mq_tc()
13822 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; in bnxt_setup_mq_tc()
13824 bp->num_tc = tc; in bnxt_setup_mq_tc()
13826 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_setup_mq_tc()
13828 bp->num_tc = 0; in bnxt_setup_mq_tc()
13830 bp->tx_nr_rings += bp->tx_nr_rings_xdp; in bnxt_setup_mq_tc()
13831 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_setup_mq_tc()
13832 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : in bnxt_setup_mq_tc()
13833 tx_cp + bp->rx_nr_rings; in bnxt_setup_mq_tc()
13835 if (netif_running(bp->dev)) in bnxt_setup_mq_tc()
13847 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) in bnxt_setup_tc_block_cb()
13848 return -EOPNOTSUPP; in bnxt_setup_tc_block_cb()
13852 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); in bnxt_setup_tc_block_cb()
13854 return -EOPNOTSUPP; in bnxt_setup_tc_block_cb()
13874 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in bnxt_setup_tc()
13876 return bnxt_setup_mq_tc(dev, mqprio->num_tc); in bnxt_setup_tc()
13879 return -EOPNOTSUPP; in bnxt_setup_tc()
13891 vnic = &bp->vnic_info[0]; in bnxt_get_ntp_filter_idx()
13892 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); in bnxt_get_ntp_filter_idx()
13901 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
13902 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0); in bnxt_insert_ntp_filter()
13904 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
13905 return -ENOMEM; in bnxt_insert_ntp_filter()
13908 fltr->base.sw_id = (u16)bit_id; in bnxt_insert_ntp_filter()
13909 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; in bnxt_insert_ntp_filter()
13910 fltr->base.flags |= BNXT_ACT_RING_DST; in bnxt_insert_ntp_filter()
13911 head = &bp->ntp_fltr_hash_tbl[idx]; in bnxt_insert_ntp_filter()
13912 hlist_add_head_rcu(&fltr->base.hash, head); in bnxt_insert_ntp_filter()
13913 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); in bnxt_insert_ntp_filter()
13914 bp->ntp_fltr_count++; in bnxt_insert_ntp_filter()
13915 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_insert_ntp_filter()
13922 struct flow_keys *keys1 = &f1->fkeys; in bnxt_fltr_match()
13923 struct flow_keys *keys2 = &f2->fkeys; in bnxt_fltr_match()
13925 if (f1->ntuple_flags != f2->ntuple_flags) in bnxt_fltr_match()
13928 if (keys1->basic.n_proto != keys2->basic.n_proto || in bnxt_fltr_match()
13929 keys1->basic.ip_proto != keys2->basic.ip_proto) in bnxt_fltr_match()
13932 if (keys1->basic.n_proto == htons(ETH_P_IP)) { in bnxt_fltr_match()
13933 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && in bnxt_fltr_match()
13934 keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) || in bnxt_fltr_match()
13935 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && in bnxt_fltr_match()
13936 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)) in bnxt_fltr_match()
13939 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && in bnxt_fltr_match()
13940 memcmp(&keys1->addrs.v6addrs.src, in bnxt_fltr_match()
13941 &keys2->addrs.v6addrs.src, in bnxt_fltr_match()
13942 sizeof(keys1->addrs.v6addrs.src))) || in bnxt_fltr_match()
13943 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && in bnxt_fltr_match()
13944 memcmp(&keys1->addrs.v6addrs.dst, in bnxt_fltr_match()
13945 &keys2->addrs.v6addrs.dst, in bnxt_fltr_match()
13946 sizeof(keys1->addrs.v6addrs.dst)))) in bnxt_fltr_match()
13950 if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) && in bnxt_fltr_match()
13951 keys1->ports.src != keys2->ports.src) || in bnxt_fltr_match()
13952 ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) && in bnxt_fltr_match()
13953 keys1->ports.dst != keys2->ports.dst)) in bnxt_fltr_match()
13956 if (keys1->control.flags == keys2->control.flags && in bnxt_fltr_match()
13957 f1->l2_fltr == f2->l2_fltr) in bnxt_fltr_match()
13970 head = &bp->ntp_fltr_hash_tbl[idx]; in bnxt_lookup_ntp_filter_from_idx()
13990 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { in bnxt_rx_flow_steer()
13991 l2_fltr = bp->vnic_info[0].l2_filters[0]; in bnxt_rx_flow_steer()
13992 atomic_inc(&l2_fltr->refcnt); in bnxt_rx_flow_steer()
13996 ether_addr_copy(key.dst_mac_addr, eth->h_dest); in bnxt_rx_flow_steer()
14000 return -EINVAL; in bnxt_rx_flow_steer()
14001 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { in bnxt_rx_flow_steer()
14003 return -EINVAL; in bnxt_rx_flow_steer()
14009 return -ENOMEM; in bnxt_rx_flow_steer()
14012 fkeys = &new_fltr->fkeys; in bnxt_rx_flow_steer()
14014 rc = -EPROTONOSUPPORT; in bnxt_rx_flow_steer()
14018 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && in bnxt_rx_flow_steer()
14019 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || in bnxt_rx_flow_steer()
14020 ((fkeys->basic.ip_proto != IPPROTO_TCP) && in bnxt_rx_flow_steer()
14021 (fkeys->basic.ip_proto != IPPROTO_UDP))) { in bnxt_rx_flow_steer()
14022 rc = -EPROTONOSUPPORT; in bnxt_rx_flow_steer()
14025 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && in bnxt_rx_flow_steer()
14026 bp->hwrm_spec_code < 0x10601) { in bnxt_rx_flow_steer()
14027 rc = -EPROTONOSUPPORT; in bnxt_rx_flow_steer()
14030 flags = fkeys->control.flags; in bnxt_rx_flow_steer()
14032 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { in bnxt_rx_flow_steer()
14033 rc = -EPROTONOSUPPORT; in bnxt_rx_flow_steer()
14037 new_fltr->l2_fltr = l2_fltr; in bnxt_rx_flow_steer()
14038 new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL; in bnxt_rx_flow_steer()
14044 rc = fltr->base.sw_id; in bnxt_rx_flow_steer()
14050 new_fltr->flow_id = flow_id; in bnxt_rx_flow_steer()
14051 new_fltr->base.rxq = rxq_index; in bnxt_rx_flow_steer()
14055 return new_fltr->base.sw_id; in bnxt_rx_flow_steer()
14067 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
14068 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { in bnxt_del_ntp_filter()
14069 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
14072 hlist_del_rcu(&fltr->base.hash); in bnxt_del_ntp_filter()
14073 bp->ntp_fltr_count--; in bnxt_del_ntp_filter()
14074 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_del_ntp_filter()
14075 bnxt_del_l2_filter(bp, fltr->l2_fltr); in bnxt_del_ntp_filter()
14076 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); in bnxt_del_ntp_filter()
14091 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_cfg_ntp_filters()
14095 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { in bnxt_cfg_ntp_filters()
14096 if (fltr->base.flags & BNXT_ACT_NO_AGING) in bnxt_cfg_ntp_filters()
14098 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, in bnxt_cfg_ntp_filters()
14099 fltr->flow_id, in bnxt_cfg_ntp_filters()
14100 fltr->base.sw_id)) { in bnxt_cfg_ntp_filters()
14111 set_bit(BNXT_FLTR_VALID, &fltr->base.state); in bnxt_cfg_ntp_filters()
14127 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) in bnxt_udp_tunnel_set_port()
14129 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) in bnxt_udp_tunnel_set_port()
14134 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); in bnxt_udp_tunnel_set_port()
14143 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) in bnxt_udp_tunnel_unset_port()
14145 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) in bnxt_udp_tunnel_unset_port()
14180 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, in bnxt_bridge_getlink()
14191 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) in bnxt_bridge_setlink()
14192 return -EOPNOTSUPP; in bnxt_bridge_setlink()
14196 return -EINVAL; in bnxt_bridge_setlink()
14205 if (mode == bp->br_mode) in bnxt_bridge_setlink()
14210 bp->br_mode = mode; in bnxt_bridge_setlink()
14221 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) in bnxt_get_port_parent_id()
14222 return -EOPNOTSUPP; in bnxt_get_port_parent_id()
14224 /* The PF and it's VF-reps only support the switchdev framework */ in bnxt_get_port_parent_id()
14225 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) in bnxt_get_port_parent_id()
14226 return -EOPNOTSUPP; in bnxt_get_port_parent_id()
14228 ppid->id_len = sizeof(bp->dsn); in bnxt_get_port_parent_id()
14229 memcpy(ppid->id, bp->dsn, ppid->id_len); in bnxt_get_port_parent_id()
14281 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); in bnxt_remove_one()
14283 cancel_work_sync(&bp->sp_task); in bnxt_remove_one()
14284 cancel_delayed_work_sync(&bp->fw_reset_task); in bnxt_remove_one()
14285 bp->sp_event = 0; in bnxt_remove_one()
14297 kfree(bp->ptp_cfg); in bnxt_remove_one()
14298 bp->ptp_cfg = NULL; in bnxt_remove_one()
14299 kfree(bp->fw_health); in bnxt_remove_one()
14300 bp->fw_health = NULL; in bnxt_remove_one()
14303 kfree(bp->rss_indir_tbl); in bnxt_remove_one()
14304 bp->rss_indir_tbl = NULL; in bnxt_remove_one()
14312 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_probe_phy()
14314 bp->phy_flags = 0; in bnxt_probe_phy()
14317 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", in bnxt_probe_phy()
14321 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) in bnxt_probe_phy()
14322 bp->dev->priv_flags |= IFF_SUPP_NOFCS; in bnxt_probe_phy()
14324 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; in bnxt_probe_phy()
14328 mutex_lock(&bp->link_lock); in bnxt_probe_phy()
14331 mutex_unlock(&bp->link_lock); in bnxt_probe_phy()
14332 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", in bnxt_probe_phy()
14340 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) in bnxt_probe_phy()
14341 link_info->support_auto_speeds = link_info->support_speeds; in bnxt_probe_phy()
14344 mutex_unlock(&bp->link_lock); in bnxt_probe_phy()
14352 if (!pdev->msix_cap) in bnxt_get_max_irq()
14355 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); in bnxt_get_max_irq()
14362 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in _bnxt_get_max_rings()
14365 *max_tx = hw_resc->max_tx_rings; in _bnxt_get_max_rings()
14366 *max_rx = hw_resc->max_rx_rings; in _bnxt_get_max_rings()
14368 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - in _bnxt_get_max_rings()
14370 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); in _bnxt_get_max_rings()
14371 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) in _bnxt_get_max_rings()
14373 max_ring_grps = hw_resc->max_hw_ring_grps; in _bnxt_get_max_rings()
14375 *max_cp -= 1; in _bnxt_get_max_rings()
14376 *max_rx -= 2; in _bnxt_get_max_rings()
14378 if (bp->flags & BNXT_FLAG_AGG_RINGS) in _bnxt_get_max_rings()
14380 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { in _bnxt_get_max_rings()
14396 int rx, tx, cp; in bnxt_get_max_rings() local
14398 _bnxt_get_max_rings(bp, &rx, &tx, &cp); in bnxt_get_max_rings()
14400 *max_tx = tx; in bnxt_get_max_rings()
14401 if (!rx || !tx || !cp) in bnxt_get_max_rings()
14402 return -ENOMEM; in bnxt_get_max_rings()
14413 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { in bnxt_get_dflt_rings()
14415 bp->flags &= ~BNXT_FLAG_AGG_RINGS; in bnxt_get_dflt_rings()
14419 bp->flags |= BNXT_FLAG_AGG_RINGS; in bnxt_get_dflt_rings()
14422 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; in bnxt_get_dflt_rings()
14423 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnxt_get_dflt_rings()
14424 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnxt_get_dflt_rings()
14428 if (bp->flags & BNXT_FLAG_ROCE_CAP) { in bnxt_get_dflt_rings()
14440 max_cp -= BNXT_MIN_ROCE_CP_RINGS; in bnxt_get_dflt_rings()
14441 max_irq -= BNXT_MIN_ROCE_CP_RINGS; in bnxt_get_dflt_rings()
14442 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; in bnxt_get_dflt_rings()
14453 * RX/TX ring pair.
14457 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); in bnxt_trim_dflt_sh_rings()
14458 bp->rx_nr_rings = bp->cp_nr_rings; in bnxt_trim_dflt_sh_rings()
14459 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; in bnxt_trim_dflt_sh_rings()
14460 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_trim_dflt_sh_rings()
14471 bp->flags |= BNXT_FLAG_SHARED_RINGS; in bnxt_set_dflt_rings()
14473 /* Reduce default rings on multi-port cards so that total default in bnxt_set_dflt_rings()
14476 if (bp->port_count > 1) { in bnxt_set_dflt_rings()
14478 max_t(int, num_online_cpus() / bp->port_count, 1); in bnxt_set_dflt_rings()
14485 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); in bnxt_set_dflt_rings()
14486 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); in bnxt_set_dflt_rings()
14490 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; in bnxt_set_dflt_rings()
14491 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_set_dflt_rings()
14494 if (rc && rc != -ENODEV) in bnxt_set_dflt_rings()
14495 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); in bnxt_set_dflt_rings()
14496 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_set_dflt_rings()
14500 /* Rings may have been trimmed, re-reserve the trimmed rings. */ in bnxt_set_dflt_rings()
14503 if (rc && rc != -ENODEV) in bnxt_set_dflt_rings()
14504 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); in bnxt_set_dflt_rings()
14505 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_set_dflt_rings()
14508 bp->rx_nr_rings++; in bnxt_set_dflt_rings()
14509 bp->cp_nr_rings++; in bnxt_set_dflt_rings()
14512 bp->tx_nr_rings = 0; in bnxt_set_dflt_rings()
14513 bp->rx_nr_rings = 0; in bnxt_set_dflt_rings()
14522 if (bp->tx_nr_rings) in bnxt_init_dflt_ring_mode()
14529 if (BNXT_VF(bp) && rc == -ENODEV) in bnxt_init_dflt_ring_mode()
14530 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); in bnxt_init_dflt_ring_mode()
14532 netdev_err(bp->dev, "Not enough rings available.\n"); in bnxt_init_dflt_ring_mode()
14539 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_init_dflt_ring_mode()
14555 if (netif_running(bp->dev)) in bnxt_restore_pf_fw_resources()
14563 if (netif_running(bp->dev)) { in bnxt_restore_pf_fw_resources()
14565 dev_close(bp->dev); in bnxt_restore_pf_fw_resources()
14578 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); in bnxt_init_mac_addr()
14581 struct bnxt_vf_info *vf = &bp->vf; in bnxt_init_mac_addr()
14584 if (is_valid_ether_addr(vf->mac_addr)) { in bnxt_init_mac_addr()
14586 eth_hw_addr_set(bp->dev, vf->mac_addr); in bnxt_init_mac_addr()
14592 eth_hw_addr_random(bp->dev); in bnxt_init_mac_addr()
14594 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); in bnxt_init_mac_addr()
14602 struct pci_dev *pdev = bp->pdev; in bnxt_vpd_read_info()
14618 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); in bnxt_vpd_read_info()
14619 memcpy(bp->board_partno, &vpd_data[pos], size); in bnxt_vpd_read_info()
14628 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); in bnxt_vpd_read_info()
14629 memcpy(bp->board_serialno, &vpd_data[pos], size); in bnxt_vpd_read_info()
14636 struct pci_dev *pdev = bp->pdev; in bnxt_pcie_dsn_get()
14641 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); in bnxt_pcie_dsn_get()
14642 return -EOPNOTSUPP; in bnxt_pcie_dsn_get()
14647 bp->flags |= BNXT_FLAG_DSN_VALID; in bnxt_pcie_dsn_get()
14653 if (!bp->db_size) in bnxt_map_db_bar()
14654 return -ENODEV; in bnxt_map_db_bar()
14655 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); in bnxt_map_db_bar()
14656 if (!bp->bar1) in bnxt_map_db_bar()
14657 return -ENOMEM; in bnxt_map_db_bar()
14663 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", in bnxt_print_device_info()
14664 board_info[bp->board_idx].name, in bnxt_print_device_info()
14665 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); in bnxt_print_device_info()
14667 pcie_print_link_status(bp->pdev); in bnxt_print_device_info()
14677 return -ENODEV; in bnxt_init_one()
14691 return -ENOMEM; in bnxt_init_one()
14694 bp->board_idx = ent->driver_data; in bnxt_init_one()
14695 bp->msg_enable = BNXT_DEF_MSG_ENABLE; in bnxt_init_one()
14698 if (bnxt_vf_pciid(bp->board_idx)) in bnxt_init_one()
14699 bp->flags |= BNXT_FLAG_VF; in bnxt_init_one()
14703 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); in bnxt_init_one()
14705 if (pdev->msix_cap) in bnxt_init_one()
14706 bp->flags |= BNXT_FLAG_MSIX_CAP; in bnxt_init_one()
14712 dev->netdev_ops = &bnxt_netdev_ops; in bnxt_init_one()
14713 dev->watchdog_timeo = BNXT_TX_TIMEOUT; in bnxt_init_one()
14714 dev->ethtool_ops = &bnxt_ethtool_ops; in bnxt_init_one()
14721 mutex_init(&bp->hwrm_cmd_lock); in bnxt_init_one()
14722 mutex_init(&bp->link_lock); in bnxt_init_one()
14732 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; in bnxt_init_one()
14734 bp->flags |= BNXT_FLAG_CHIP_P7; in bnxt_init_one()
14747 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", in bnxt_init_one()
14752 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | in bnxt_init_one()
14759 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) in bnxt_init_one()
14760 dev->hw_features |= NETIF_F_GSO_UDP_L4; in bnxt_init_one()
14763 dev->hw_features |= NETIF_F_LRO; in bnxt_init_one()
14765 dev->hw_enc_features = in bnxt_init_one()
14771 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) in bnxt_init_one()
14772 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; in bnxt_init_one()
14773 if (bp->flags & BNXT_FLAG_CHIP_P7) in bnxt_init_one()
14774 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; in bnxt_init_one()
14776 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; in bnxt_init_one()
14778 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | in bnxt_init_one()
14780 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; in bnxt_init_one()
14781 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) in bnxt_init_one()
14782 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; in bnxt_init_one()
14783 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) in bnxt_init_one()
14784 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; in bnxt_init_one()
14786 dev->hw_features |= NETIF_F_GRO_HW; in bnxt_init_one()
14787 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; in bnxt_init_one()
14788 if (dev->features & NETIF_F_GRO_HW) in bnxt_init_one()
14789 dev->features &= ~NETIF_F_LRO; in bnxt_init_one()
14790 dev->priv_flags |= IFF_UNICAST_FLT; in bnxt_init_one()
14794 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in bnxt_init_one()
14798 init_waitqueue_head(&bp->sriov_cfg_wait); in bnxt_init_one()
14801 bp->gro_func = bnxt_gro_func_5730x; in bnxt_init_one()
14803 bp->gro_func = bnxt_gro_func_5731x; in bnxt_init_one()
14805 bp->gro_func = bnxt_gro_func_5750x; in bnxt_init_one()
14808 bp->flags |= BNXT_FLAG_DOUBLE_DB; in bnxt_init_one()
14812 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); in bnxt_init_one()
14813 rc = -EADDRNOTAVAIL; in bnxt_init_one()
14819 rc = bnxt_pcie_dsn_get(bp, bp->dsn); in bnxt_init_one()
14822 /* MTU range: 60 - FW defined max */ in bnxt_init_one()
14823 dev->min_mtu = ETH_ZLEN; in bnxt_init_one()
14824 dev->max_mtu = bp->max_mtu; in bnxt_init_one()
14836 if (BNXT_VF(bp) && rc == -ENODEV) { in bnxt_init_one()
14837 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); in bnxt_init_one()
14839 netdev_err(bp->dev, "Not enough rings available.\n"); in bnxt_init_one()
14840 rc = -ENOMEM; in bnxt_init_one()
14849 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) in bnxt_init_one()
14850 bp->flags |= BNXT_FLAG_STRIP_VLAN; in bnxt_init_one()
14857 * limited MSIX, so we re-initialize the TX rings per TC. in bnxt_init_one()
14859 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_init_one()
14866 dev_err(&pdev->dev, "Unable to create workqueue.\n"); in bnxt_init_one()
14867 rc = -ENOMEM; in bnxt_init_one()
14907 kfree(bp->ptp_cfg); in bnxt_init_one()
14908 bp->ptp_cfg = NULL; in bnxt_init_one()
14909 kfree(bp->fw_health); in bnxt_init_one()
14910 bp->fw_health = NULL; in bnxt_init_one()
14913 kfree(bp->rss_indir_tbl); in bnxt_init_one()
14914 bp->rss_indir_tbl = NULL; in bnxt_init_one()
14941 pci_wake_from_d3(pdev, bp->wol); in bnxt_shutdown()
14963 pci_disable_device(bp->pdev); in bnxt_suspend()
14976 rc = pci_enable_device(bp->pdev); in bnxt_resume()
14978 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", in bnxt_resume()
14982 pci_set_master(bp->pdev); in bnxt_resume()
14984 rc = -ENODEV; in bnxt_resume()
14989 rc = -EBUSY; in bnxt_resume()
15000 rc = -ENODEV; in bnxt_resume()
15029 * bnxt_io_error_detected - called when PCI error is detected
15055 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); in bnxt_io_error_detected()
15070 * bnxt_io_slot_reset - called after the pci bus has been reset.
15073 * Restart the card from scratch, as if from a cold-boot.
15087 netdev_info(bp->dev, "PCI Slot Reset\n"); in bnxt_io_slot_reset()
15092 dev_err(&pdev->dev, in bnxt_io_slot_reset()
15093 "Cannot re-enable PCI device after reset.\n"); in bnxt_io_slot_reset()
15100 * As pci_restore_state() does not re-write the BARs if the in bnxt_io_slot_reset()
15105 &bp->state)) { in bnxt_io_slot_reset()
15108 pci_write_config_dword(bp->pdev, off, 0); in bnxt_io_slot_reset()
15127 dev_err(&pdev->dev, "Firmware not ready\n"); in bnxt_io_slot_reset()
15149 * bnxt_io_resume - called when traffic can start flowing again.
15161 netdev_info(bp->dev, "PCI Slot Resume\n"); in bnxt_io_resume()