1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #ifndef MLX5HWS_SEND_H_
5 #define MLX5HWS_SEND_H_
6
7 /* As a single operation requires at least two WQEBBS.
8 * This means a maximum of 16 such operations per rule.
9 */
10 #define MAX_WQES_PER_RULE 32
11
12 enum mlx5hws_wqe_opcode {
13 MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
14 };
15
16 enum mlx5hws_wqe_opmod {
17 MLX5HWS_WQE_OPMOD_GTA_STE = 0,
18 MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
19 };
20
21 enum mlx5hws_wqe_gta_opcode {
22 MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
23 MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
24 };
25
26 enum mlx5hws_wqe_gta_opmod {
27 MLX5HWS_WQE_GTA_OPMOD_STE = 0,
28 MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
29 };
30
31 enum mlx5hws_wqe_gta_sz {
32 MLX5HWS_WQE_SZ_GTA_CTRL = 48,
33 MLX5HWS_WQE_SZ_GTA_DATA = 64,
34 };
35
36 /* WQE Control segment. */
37 struct mlx5hws_wqe_ctrl_seg {
38 __be32 opmod_idx_opcode;
39 __be32 qpn_ds;
40 __be32 flags;
41 __be32 imm;
42 };
43
44 struct mlx5hws_wqe_gta_ctrl_seg {
45 __be32 op_dirix;
46 __be32 stc_ix[5];
47 __be32 rsvd0[6];
48 };
49
50 struct mlx5hws_wqe_gta_data_seg_ste {
51 __be32 rsvd0_ctr_id;
52 __be32 rsvd1_definer;
53 __be32 rsvd2[3];
54 union {
55 struct {
56 __be32 action[3];
57 __be32 tag[8];
58 };
59 __be32 jumbo[11];
60 };
61 };
62
63 struct mlx5hws_wqe_gta_data_seg_arg {
64 __be32 action_args[8];
65 };
66
67 struct mlx5hws_wqe_gta {
68 struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
69 union {
70 struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
71 struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
72 };
73 };
74
75 struct mlx5hws_send_ring_cq {
76 struct mlx5_core_dev *mdev;
77 struct mlx5_cqwq wq;
78 struct mlx5_wq_ctrl wq_ctrl;
79 struct mlx5_core_cq mcq;
80 u16 poll_wqe;
81 };
82
83 struct mlx5hws_send_ring_priv {
84 struct mlx5hws_rule *rule;
85 void *user_data;
86 u32 num_wqebbs;
87 u32 id;
88 u32 retry_id;
89 u32 *used_id;
90 };
91
92 struct mlx5hws_send_ring_dep_wqe {
93 struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
94 struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
95 struct mlx5hws_rule *rule;
96 u32 rtc_0;
97 u32 rtc_1;
98 u32 retry_rtc_0;
99 u32 retry_rtc_1;
100 u32 direct_index;
101 void *user_data;
102 };
103
104 struct mlx5hws_send_ring_sq {
105 struct mlx5_core_dev *mdev;
106 u16 cur_post;
107 u16 buf_mask;
108 struct mlx5hws_send_ring_priv *wr_priv;
109 unsigned int last_idx;
110 struct mlx5hws_send_ring_dep_wqe *dep_wqe;
111 unsigned int head_dep_idx;
112 unsigned int tail_dep_idx;
113 u32 sqn;
114 struct mlx5_wq_cyc wq;
115 struct mlx5_wq_ctrl wq_ctrl;
116 void __iomem *uar_map;
117 };
118
119 struct mlx5hws_send_ring {
120 struct mlx5hws_send_ring_cq send_cq;
121 struct mlx5hws_send_ring_sq send_sq;
122 };
123
124 struct mlx5hws_completed_poll_entry {
125 void *user_data;
126 enum mlx5hws_flow_op_status status;
127 };
128
129 struct mlx5hws_completed_poll {
130 struct mlx5hws_completed_poll_entry *entries;
131 u16 ci;
132 u16 pi;
133 u16 mask;
134 };
135
136 struct mlx5hws_send_engine {
137 struct mlx5hws_send_ring send_ring;
138 struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
139 struct mlx5hws_completed_poll completed;
140 u16 used_entries;
141 u16 num_entries;
142 bool err;
143 bool error_cqe_printed;
144 struct mutex lock; /* Protects the send engine */
145 };
146
147 struct mlx5hws_send_engine_post_ctrl {
148 struct mlx5hws_send_engine *queue;
149 struct mlx5hws_send_ring *send_ring;
150 size_t num_wqebbs;
151 };
152
153 struct mlx5hws_send_engine_post_attr {
154 u8 opcode;
155 u8 opmod;
156 u8 notify_hw;
157 u8 fence;
158 u8 match_definer_id;
159 u8 range_definer_id;
160 size_t len;
161 struct mlx5hws_rule *rule;
162 u32 id;
163 u32 retry_id;
164 u32 *used_id;
165 void *user_data;
166 };
167
168 struct mlx5hws_send_ste_attr {
169 u32 rtc_0;
170 u32 rtc_1;
171 u32 retry_rtc_0;
172 u32 retry_rtc_1;
173 u32 *used_id_rtc_0;
174 u32 *used_id_rtc_1;
175 bool wqe_tag_is_jumbo;
176 u8 gta_opcode;
177 u32 direct_index;
178 struct mlx5hws_send_engine_post_attr send_attr;
179 struct mlx5hws_rule_match_tag *wqe_tag;
180 struct mlx5hws_rule_match_tag *range_wqe_tag;
181 struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
182 struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
183 struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
184 };
185
186 struct mlx5hws_send_ring_dep_wqe *
187 mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
188
189 void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
190
191 void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
192
193 void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
194
195 int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
196 u16 queues,
197 u16 queue_size);
198
199 int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
200 u16 queue_id,
201 u32 actions);
202
203 int mlx5hws_send_test(struct mlx5hws_context *ctx,
204 u16 queues,
205 u16 queue_size);
206
207 struct mlx5hws_send_engine_post_ctrl
208 mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
209
210 void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
211 char **buf, size_t *len);
212
213 void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
214 struct mlx5hws_send_engine_post_attr *attr);
215
216 void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
217 struct mlx5hws_send_ste_attr *ste_attr);
218
219 void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
220 struct mlx5hws_send_engine *queue,
221 struct mlx5hws_send_ste_attr *ste_attr);
222
223 void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
224
mlx5hws_send_engine_empty(struct mlx5hws_send_engine * queue)225 static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
226 {
227 struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
228 struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
229
230 return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
231 }
232
mlx5hws_send_engine_full(struct mlx5hws_send_engine * queue)233 static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
234 {
235 return queue->used_entries >= queue->num_entries;
236 }
237
mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine * queue)238 static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
239 {
240 queue->used_entries++;
241 }
242
mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine * queue)243 static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
244 {
245 queue->used_entries--;
246 }
247
mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine * queue,void * user_data,int comp_status)248 static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
249 void *user_data,
250 int comp_status)
251 {
252 struct mlx5hws_completed_poll *comp = &queue->completed;
253
254 comp->entries[comp->pi].status = comp_status;
255 comp->entries[comp->pi].user_data = user_data;
256
257 comp->pi = (comp->pi + 1) & comp->mask;
258 }
259
mlx5hws_send_engine_err(struct mlx5hws_send_engine * queue)260 static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
261 {
262 return queue->err;
263 }
264
265 #endif /* MLX5HWS_SEND_H_ */
266