1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2025 Broadcom */
3
4 #ifndef _BNGE_TXRX_H_
5 #define _BNGE_TXRX_H_
6
7 #include <linux/bnge/hsi.h>
8 #include "bnge_netdev.h"
9
bnge_tx_avail(struct bnge_net * bn,const struct bnge_tx_ring_info * txr)10 static inline u32 bnge_tx_avail(struct bnge_net *bn,
11 const struct bnge_tx_ring_info *txr)
12 {
13 u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
14
15 return bn->tx_ring_size - (used & bn->tx_ring_mask);
16 }
17
bnge_writeq_relaxed(struct bnge_dev * bd,u64 val,void __iomem * addr)18 static inline void bnge_writeq_relaxed(struct bnge_dev *bd, u64 val,
19 void __iomem *addr)
20 {
21 #if BITS_PER_LONG == 32
22 spin_lock(&bd->db_lock);
23 lo_hi_writeq_relaxed(val, addr);
24 spin_unlock(&bd->db_lock);
25 #else
26 writeq_relaxed(val, addr);
27 #endif
28 }
29
30 /* For TX and RX ring doorbells with no ordering guarantee*/
bnge_db_write_relaxed(struct bnge_net * bn,struct bnge_db_info * db,u32 idx)31 static inline void bnge_db_write_relaxed(struct bnge_net *bn,
32 struct bnge_db_info *db, u32 idx)
33 {
34 bnge_writeq_relaxed(bn->bd, db->db_key64 | DB_RING_IDX(db, idx),
35 db->doorbell);
36 }
37
38 #define TX_OPAQUE_IDX_MASK 0x0000ffff
39 #define TX_OPAQUE_BDS_MASK 0x00ff0000
40 #define TX_OPAQUE_BDS_SHIFT 16
41 #define TX_OPAQUE_RING_MASK 0xff000000
42 #define TX_OPAQUE_RING_SHIFT 24
43
44 #define SET_TX_OPAQUE(bn, txr, idx, bds) \
45 (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
46 ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bn)->tx_ring_mask))
47
48 #define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
49 #define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
50 TX_OPAQUE_RING_SHIFT)
51 #define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
52 TX_OPAQUE_BDS_SHIFT)
53 #define TX_OPAQUE_PROD(bn, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
54 (bn)->tx_ring_mask)
55 #define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
56
57 #define TX_MAX_BD_CNT 32
58
59 #define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
60
61 /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
62 * BD because the first TX BD is always a long BD.
63 */
64 #define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
65
66 #define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
67 #define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
68 (BNGE_PAGE_SHIFT - 4))
69 #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
70
71 #define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
72 #define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
73
74 #define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
75 #define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
76
77 #define TX_CMP_VALID(bn, txcmp, raw_cons) \
78 (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
79 !((raw_cons) & (bn)->cp_bit))
80
81 #define RX_CMP_VALID(bn, rxcmp1, raw_cons) \
82 (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
83 !((raw_cons) & (bn)->cp_bit))
84
85 #define RX_AGG_CMP_VALID(bn, agg, raw_cons) \
86 (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
87 !((raw_cons) & (bn)->cp_bit))
88
89 #define NQ_CMP_VALID(bn, nqcmp, raw_cons) \
90 (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & (bn)->cp_bit))
91
92 #define TX_CMP_TYPE(txcmp) \
93 (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
94
95 #define RX_CMP_TYPE(rxcmp) \
96 (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
97
98 #define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
99 #define NEXT_RX(idx) ((idx) + 1)
100
101 #define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
102 #define NEXT_RX_AGG(idx) ((idx) + 1)
103
104 #define SW_TX_RING(bn, idx) ((idx) & (bn)->tx_ring_mask)
105 #define NEXT_TX(idx) ((idx) + 1)
106
107 #define ADV_RAW_CMP(idx, n) ((idx) + (n))
108 #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
109 #define RING_CMP(bn, idx) ((idx) & (bn)->cp_ring_mask)
110 #define NEXT_CMP(bn, idx) RING_CMP(bn, ADV_RAW_CMP(idx, 1))
111
112 #define RX_CMP_ITYPES(rxcmp) \
113 (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
114
115 #define RX_CMP_CFA_CODE(rxcmpl1) \
116 ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
117 RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
118
119 irqreturn_t bnge_msix(int irq, void *dev_instance);
120 netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev);
121 void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data);
122 int bnge_napi_poll(struct napi_struct *napi, int budget);
123 netdev_features_t bnge_features_check(struct sk_buff *skb,
124 struct net_device *dev,
125 netdev_features_t features);
126 #endif /* _BNGE_TXRX_H_ */
127