xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
3 
4 #ifndef _HINIC3_TX_H_
5 #define _HINIC3_TX_H_
6 
7 #include <linux/bitops.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/netdevice.h>
11 #include <net/checksum.h>
12 
13 #define VXLAN_OFFLOAD_PORT_LE            cpu_to_be16(4789)
14 #define TCP_HDR_DATA_OFF_UNIT_SHIFT      2
15 #define TRANSPORT_OFFSET(l4_hdr, skb)    ((l4_hdr) - (skb)->data)
16 
17 #define HINIC3_COMPACT_WQEE_SKB_MAX_LEN  16383
18 #define HINIC3_TX_POLL_WEIGHT		 64
19 #define HINIC3_DEFAULT_STOP_THRS	 6
20 #define HINIC3_DEFAULT_START_THRS	 24
21 
22 enum sq_wqe_data_format {
23 	SQ_NORMAL_WQE = 0,
24 };
25 
26 enum sq_wqe_ec_type {
27 	SQ_WQE_COMPACT_TYPE  = 0,
28 	SQ_WQE_EXTENDED_TYPE = 1,
29 };
30 
31 enum sq_wqe_tasksect_len_type {
32 	SQ_WQE_TASKSECT_46BITS  = 0,
33 	SQ_WQE_TASKSECT_16BYTES = 1,
34 };
35 
36 enum hinic3_tx_offload_type {
37 	HINIC3_TX_OFFLOAD_TSO     = BIT(0),
38 	HINIC3_TX_OFFLOAD_CSUM    = BIT(1),
39 	HINIC3_TX_OFFLOAD_VLAN    = BIT(2),
40 	HINIC3_TX_OFFLOAD_INVALID = BIT(3),
41 	HINIC3_TX_OFFLOAD_ESP     = BIT(4),
42 };
43 
44 #define SQ_CTRL_BUFDESC_NUM_MASK   GENMASK(26, 19)
45 #define SQ_CTRL_TASKSECT_LEN_MASK  BIT(27)
46 #define SQ_CTRL_DATA_FORMAT_MASK   BIT(28)
47 #define SQ_CTRL_EXTENDED_MASK      BIT(30)
48 #define SQ_CTRL_OWNER_MASK         BIT(31)
49 #define SQ_CTRL_SET(val, member) \
50 	FIELD_PREP(SQ_CTRL_##member##_MASK, val)
51 
52 #define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK  GENMASK(9, 2)
53 #define SQ_CTRL_QUEUE_INFO_UFO_MASK     BIT(10)
54 #define SQ_CTRL_QUEUE_INFO_TSO_MASK     BIT(11)
55 #define SQ_CTRL_QUEUE_INFO_MSS_MASK     GENMASK(26, 13)
56 #define SQ_CTRL_QUEUE_INFO_UC_MASK      BIT(28)
57 
58 #define SQ_CTRL_QUEUE_INFO_SET(val, member) \
59 	FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
60 #define SQ_CTRL_QUEUE_INFO_GET(val, member) \
61 	FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val))
62 
63 #define SQ_CTRL_MAX_PLDOFF  221
64 
65 #define SQ_TASK_INFO0_TUNNEL_FLAG_MASK  BIT(19)
66 #define SQ_TASK_INFO0_INNER_L4_EN_MASK  BIT(24)
67 #define SQ_TASK_INFO0_INNER_L3_EN_MASK  BIT(25)
68 #define SQ_TASK_INFO0_OUT_L4_EN_MASK    BIT(27)
69 #define SQ_TASK_INFO0_OUT_L3_EN_MASK    BIT(28)
70 #define SQ_TASK_INFO0_SET(val, member) \
71 	FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
72 
73 #define SQ_TASK_INFO3_VLAN_TAG_MASK        GENMASK(15, 0)
74 #define SQ_TASK_INFO3_VLAN_TPID_MASK       GENMASK(18, 16)
75 #define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK  BIT(19)
76 #define SQ_TASK_INFO3_SET(val, member) \
77 	FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
78 
79 struct hinic3_sq_wqe_desc {
80 	__le32 ctrl_len;
81 	__le32 queue_info;
82 	__le32 hi_addr;
83 	__le32 lo_addr;
84 };
85 
86 struct hinic3_sq_task {
87 	__le32 pkt_info0;
88 	__le32 ip_identify;
89 	__le32 rsvd;
90 	__le32 vlan_offload;
91 };
92 
93 struct hinic3_sq_wqe_combo {
94 	struct hinic3_sq_wqe_desc *ctrl_bd0;
95 	struct hinic3_sq_task     *task;
96 	struct hinic3_sq_bufdesc  *bds_head;
97 	struct hinic3_sq_bufdesc  *bds_sec2;
98 	u16                       first_bds_num;
99 	u32                       wqe_type;
100 	u32                       task_type;
101 };
102 
103 struct hinic3_txq_stats {
104 	u64                   packets;
105 	u64                   bytes;
106 	u64                   busy;
107 	u64                   dropped;
108 	u64                   skb_pad_err;
109 	u64                   frag_len_overflow;
110 	u64                   offload_cow_skb_err;
111 	u64                   map_frag_err;
112 	u64                   unknown_tunnel_pkt;
113 	u64                   frag_size_err;
114 	struct u64_stats_sync syncp;
115 };
116 
117 struct hinic3_dma_info {
118 	dma_addr_t dma;
119 	u32        len;
120 };
121 
122 struct hinic3_tx_info {
123 	struct sk_buff         *skb;
124 	u16                    wqebb_cnt;
125 	struct hinic3_dma_info *dma_info;
126 };
127 
128 struct hinic3_txq {
129 	struct net_device       *netdev;
130 	struct device           *dev;
131 
132 	u16                     q_id;
133 	u16                     tx_stop_thrs;
134 	u16                     tx_start_thrs;
135 	u32                     q_mask;
136 	u32                     q_depth;
137 
138 	struct hinic3_tx_info   *tx_info;
139 	struct hinic3_io_queue  *sq;
140 
141 	struct hinic3_txq_stats txq_stats;
142 } ____cacheline_aligned;
143 
144 struct hinic3_dyna_txq_res {
145 	struct hinic3_tx_info  *tx_info;
146 	struct hinic3_dma_info *bds;
147 };
148 
149 int hinic3_alloc_txqs(struct net_device *netdev);
150 void hinic3_free_txqs(struct net_device *netdev);
151 
152 int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
153 			  u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
154 void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
155 			  u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
156 int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
157 			  u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
158 
159 netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
160 bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
161 void hinic3_flush_txqs(struct net_device *netdev);
162 
163 #endif
164