1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #include "internal.h"
5
6 /* Pattern tunnel Layer bits. */
7 #define MLX5_FLOW_LAYER_VXLAN BIT(12)
8 #define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
9 #define MLX5_FLOW_LAYER_GRE BIT(14)
10 #define MLX5_FLOW_LAYER_MPLS BIT(15)
11
12 /* Pattern tunnel Layer bits (continued). */
13 #define MLX5_FLOW_LAYER_IPIP BIT(23)
14 #define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
15 #define MLX5_FLOW_LAYER_NVGRE BIT(25)
16 #define MLX5_FLOW_LAYER_GENEVE BIT(26)
17
18 #define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
19
20 /* Tunnel Masks. */
21 #define MLX5_FLOW_LAYER_TUNNEL \
22 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
23 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
24 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
25 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
26 MLX5_FLOW_ITEM_FLEX_TUNNEL)
27
28 #define GTP_PDU_SC 0x85
29 #define BAD_PORT 0xBAD
30 #define ETH_TYPE_IPV4_VXLAN 0x0800
31 #define ETH_TYPE_IPV6_VXLAN 0x86DD
32 #define UDP_GTPU_PORT 2152
33 #define UDP_PORT_MPLS 6635
34 #define UDP_GENEVE_PORT 6081
35 #define UDP_ROCEV2_PORT 4791
36 #define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
37
38 #define STE_NO_VLAN 0x0
39 #define STE_SVLAN 0x1
40 #define STE_CVLAN 0x2
41 #define STE_NO_L3 0x0
42 #define STE_IPV4 0x1
43 #define STE_IPV6 0x2
44 #define STE_NO_L4 0x0
45 #define STE_TCP 0x1
46 #define STE_UDP 0x2
47 #define STE_ICMP 0x3
48 #define STE_ESP 0x3
49
50 #define IPV4 0x4
51 #define IPV6 0x6
52
53 /* Setter function based on bit offset and mask, for 32bit DW */
54 #define _HWS_SET32(p, v, byte_off, bit_off, mask) \
55 do { \
56 u32 _v = v; \
57 *((__be32 *)(p) + ((byte_off) / 4)) = \
58 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
59 ((byte_off) / 4))) & \
60 (~((mask) << (bit_off)))) | \
61 (((_v) & (mask)) << \
62 (bit_off))); \
63 } while (0)
64
65 /* Setter function based on bit offset and mask, for unaligned 32bit DW */
66 #define HWS_SET32(p, v, byte_off, bit_off, mask) \
67 do { \
68 if (unlikely((bit_off) < 0)) { \
69 u32 _bit_off = -1 * (bit_off); \
70 u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
71 _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
72 _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
73 (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
74 } else { \
75 _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
76 } \
77 } while (0)
78
79 /* Getter for up to aligned 32bit DW */
80 #define HWS_GET32(p, byte_off, bit_off, mask) \
81 ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
82
83 #define HWS_CALC_FNAME(field, inner) \
84 ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
85 MLX5HWS_DEFINER_FNAME_##field##_O)
86
87 #define HWS_GET_MATCH_PARAM(match_param, hdr) \
88 MLX5_GET(fte_match_param, match_param, hdr)
89
90 #define HWS_IS_FLD_SET(match_param, hdr) \
91 (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
92
93 #define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
94 BUILD_BUG_ON((sz_in_bits) % 32); \
95 u32 sz = sz_in_bits; \
96 u32 res = 0; \
97 u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
98 while (!res && sz >= 32) { \
99 res = *((match_param) + (dw_off++)); \
100 sz -= 32; \
101 } \
102 res; \
103 })
104
105 #define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
106 (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
107 !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
108
109 #define HWS_GET64_MATCH_PARAM(match_param, hdr) \
110 MLX5_GET64(fte_match_param, match_param, hdr)
111
112 #define HWS_IS_FLD64_SET(match_param, hdr) \
113 (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
114
115 #define HWS_CALC_HDR_SRC(fc, s_hdr) \
116 do { \
117 (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
118 (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
119 (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
120 } while (0)
121
122 #define HWS_CALC_HDR_DST(fc, d_hdr) \
123 do { \
124 (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
125 (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
126 (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
127 } while (0)
128
129 #define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
130 do { \
131 HWS_CALC_HDR_SRC(fc, s_hdr); \
132 HWS_CALC_HDR_DST(fc, d_hdr); \
133 (fc)->tag_set = &hws_definer_generic_set; \
134 } while (0)
135
136 #define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
137 do { \
138 if (HWS_IS_FLD_SET(match_param, s_hdr)) \
139 HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
140 } while (0)
141
142 struct mlx5hws_definer_sel_ctrl {
143 u8 allowed_full_dw; /* Full DW selectors cover all offsets */
144 u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
145 u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
146 u8 used_full_dw;
147 u8 used_lim_dw;
148 u8 used_bytes;
149 u8 full_dw_selector[DW_SELECTORS];
150 u8 lim_dw_selector[DW_SELECTORS_LIMITED];
151 u8 byte_selector[BYTE_SELECTORS];
152 };
153
154 struct mlx5hws_definer_conv_data {
155 struct mlx5hws_context *ctx;
156 struct mlx5hws_definer_fc *fc;
157 /* enum mlx5hws_definer_match_flag */
158 u32 match_flags;
159 };
160
161 static void
hws_definer_ones_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)162 hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
163 void *match_param,
164 u8 *tag)
165 {
166 HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
167 }
168
169 static void
hws_definer_generic_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)170 hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
171 void *match_param,
172 u8 *tag)
173 {
174 /* Can be optimized */
175 u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
176
177 HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
178 }
179
180 static void
hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)181 hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
182 void *match_param,
183 u8 *tag)
184 {
185 if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
186 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
187 else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
188 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
189 else
190 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
191 }
192
193 static void
hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)194 hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
195 void *match_param,
196 u8 *tag)
197 {
198 if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
199 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
200 else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
201 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
202 else
203 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
204 }
205
206 static void
hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,bool inner)207 hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
208 void *match_param,
209 u8 *tag,
210 bool inner)
211 {
212 u32 second_cvlan_tag = inner ?
213 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
214 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
215 u32 second_svlan_tag = inner ?
216 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
217 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
218
219 if (second_cvlan_tag)
220 HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
221 else if (second_svlan_tag)
222 HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
223 else
224 HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
225 }
226
227 static void
hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)228 hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
229 void *match_param,
230 u8 *tag)
231 {
232 hws_definer_second_vlan_type_set(fc, match_param, tag, true);
233 }
234
235 static void
hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)236 hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
237 void *match_param,
238 u8 *tag)
239 {
240 hws_definer_second_vlan_type_set(fc, match_param, tag, false);
241 }
242
hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)243 static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
244 void *match_param,
245 u8 *tag)
246 {
247 u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
248 u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
249 u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
250 (code << __mlx5_dw_bit_off(header_icmp, code));
251
252 HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
253 }
254
255 static void
hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)256 hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
257 void *match_param,
258 u8 *tag)
259 {
260 u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
261 u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
262 u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
263 (code << __mlx5_dw_bit_off(header_icmp, code));
264
265 HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
266 }
267
268 static void
hws_definer_l3_type_set(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)269 hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
270 void *match_param,
271 u8 *tag)
272 {
273 u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
274
275 if (val == IPV4)
276 HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
277 else if (val == IPV6)
278 HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
279 else
280 HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
281 }
282
283 static void
hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag,struct mlx5hws_context * peer_ctx)284 hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
285 void *match_param,
286 u8 *tag,
287 struct mlx5hws_context *peer_ctx)
288 {
289 u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
290 u16 vport_gvmi = 0;
291 int ret;
292
293 ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
294 if (ret) {
295 HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
296 mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
297 return;
298 }
299
300 if (vport_gvmi)
301 HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
302 }
303
304 static void
hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)305 hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
306 void *match_param,
307 u8 *tag)
308 __must_hold(&fc->ctx->ctrl_lock)
309 {
310 int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
311 struct mlx5hws_context *peer_ctx;
312
313 if (id == fc->ctx->caps->vhca_id)
314 peer_ctx = fc->ctx;
315 else
316 peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
317
318 if (!peer_ctx) {
319 HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
320 mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
321 return;
322 }
323
324 hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
325 }
326
327 static void
hws_definer_set_source_gvmi(struct mlx5hws_definer_fc * fc,void * match_param,u8 * tag)328 hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
329 void *match_param,
330 u8 *tag)
331 {
332 hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
333 }
334
335 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)336 hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
337 u8 parser_id)
338 {
339 struct mlx5hws_definer_fc *fc;
340
341 switch (parser_id) {
342 case 0:
343 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
344 HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
345 fc->tag_set = &hws_definer_generic_set;
346 break;
347 case 1:
348 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
349 HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
350 fc->tag_set = &hws_definer_generic_set;
351 break;
352 case 2:
353 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
354 HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
355 fc->tag_set = &hws_definer_generic_set;
356 break;
357 case 3:
358 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
359 HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
360 fc->tag_set = &hws_definer_generic_set;
361 break;
362 case 4:
363 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
364 HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
365 fc->tag_set = &hws_definer_generic_set;
366 break;
367 case 5:
368 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
369 HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
370 fc->tag_set = &hws_definer_generic_set;
371 break;
372 case 6:
373 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
374 HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
375 fc->tag_set = &hws_definer_generic_set;
376 break;
377 case 7:
378 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
379 HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
380 fc->tag_set = &hws_definer_generic_set;
381 break;
382 default:
383 mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
384 return NULL;
385 }
386
387 return fc;
388 }
389
390 static struct mlx5hws_definer_fc *
hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data * cd,u8 parser_id)391 hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
392 u8 parser_id)
393 {
394 struct mlx5hws_definer_fc *fc;
395
396 switch (parser_id) {
397 case 0:
398 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
399 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
400 fc->tag_set = &hws_definer_generic_set;
401 break;
402 case 1:
403 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
404 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
405 fc->tag_set = &hws_definer_generic_set;
406 break;
407 case 2:
408 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
409 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
410 fc->tag_set = &hws_definer_generic_set;
411 break;
412 case 3:
413 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
414 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
415 fc->tag_set = &hws_definer_generic_set;
416 break;
417 case 4:
418 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
419 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
420 fc->tag_set = &hws_definer_generic_set;
421 break;
422 case 5:
423 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
424 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
425 fc->tag_set = &hws_definer_generic_set;
426 break;
427 case 6:
428 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
429 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
430 fc->tag_set = &hws_definer_generic_set;
431 break;
432 case 7:
433 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
434 HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
435 fc->tag_set = &hws_definer_generic_set;
436 break;
437 default:
438 mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
439 return NULL;
440 }
441
442 return fc;
443 }
444
445 static struct mlx5hws_definer_fc *
hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data * cd,bool * parser_is_used,u32 id,u32 value)446 hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
447 bool *parser_is_used,
448 u32 id,
449 u32 value)
450 {
451 if (id || value) {
452 if (id >= HWS_NUM_OF_FLEX_PARSERS) {
453 mlx5hws_err(cd->ctx, "Unsupported parser id\n");
454 return NULL;
455 }
456
457 if (parser_is_used[id]) {
458 mlx5hws_err(cd->ctx, "Parser id have been used\n");
459 return NULL;
460 }
461 }
462
463 parser_is_used[id] = true;
464
465 return hws_definer_flex_parser_handler(cd, id);
466 }
467
468 static int
hws_definer_check_match_flags(struct mlx5hws_definer_conv_data * cd)469 hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
470 {
471 u32 flags;
472
473 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
474 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
475 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
476 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
477 MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
478 MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
479 if (flags & (flags - 1))
480 goto err_conflict;
481
482 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
483 MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
484
485 if (flags & (flags - 1))
486 goto err_conflict;
487
488 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
489 MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
490 if (flags & (flags - 1))
491 goto err_conflict;
492
493 flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
494 MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
495 MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
496 MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
497 if (flags & (flags - 1))
498 goto err_conflict;
499
500 return 0;
501
502 err_conflict:
503 mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
504 cd->match_flags);
505 return -EINVAL;
506 }
507
508 static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data * cd,u32 * match_param)509 hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
510 u32 *match_param)
511 {
512 bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
513 struct mlx5hws_definer_fc *fc = cd->fc;
514 struct mlx5hws_definer_fc *curr_fc;
515 u32 *s_ipv6, *d_ipv6;
516
517 if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
518 HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
519 HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
520 mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
521 return -EINVAL;
522 }
523
524 /* L2 Check ethertype */
525 HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
526 outer_headers.ethertype,
527 eth_l2_outer.l3_ethertype);
528 /* L2 Check SMAC 47_16 */
529 HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
530 outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
531 /* L2 Check SMAC 15_0 */
532 HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
533 outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
534 /* L2 Check DMAC 47_16 */
535 HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
536 outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
537 /* L2 Check DMAC 15_0 */
538 HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
539 outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
540
541 /* L2 VLAN */
542 HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
543 outer_headers.first_prio, eth_l2_outer.first_priority);
544 HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
545 outer_headers.first_cfi, eth_l2_outer.first_cfi);
546 HWS_SET_HDR(fc, match_param, VLAN_ID_O,
547 outer_headers.first_vid, eth_l2_outer.first_vlan_id);
548
549 /* L2 CVLAN and SVLAN */
550 if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
551 HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
552 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
553 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
554 curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
555 curr_fc->tag_mask_set = &hws_definer_ones_set;
556 }
557
558 /* L3 Check IP header */
559 HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
560 outer_headers.ip_protocol,
561 eth_l3_outer.protocol_next_header);
562 HWS_SET_HDR(fc, match_param, IP_TTL_O,
563 outer_headers.ttl_hoplimit,
564 eth_l3_outer.time_to_live_hop_limit);
565
566 /* L3 Check IPv4/IPv6 addresses */
567 s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
568 outer_headers.src_ipv4_src_ipv6.ipv6_layout);
569 d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
570 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
571
572 /* Assume IPv6 is used if ipv6 bits are set */
573 is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
574 is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
575
576 if (is_s_ipv6) {
577 /* Handle IPv6 source address */
578 HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
579 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
580 ipv6_src_outer.ipv6_address_127_96);
581 HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
582 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
583 ipv6_src_outer.ipv6_address_95_64);
584 HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
585 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
586 ipv6_src_outer.ipv6_address_63_32);
587 HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
588 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
589 ipv6_src_outer.ipv6_address_31_0);
590 } else {
591 /* Handle IPv4 source address */
592 HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
593 outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
594 ipv4_src_dest_outer.source_address);
595 }
596 if (is_d_ipv6) {
597 /* Handle IPv6 destination address */
598 HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
599 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
600 ipv6_dst_outer.ipv6_address_127_96);
601 HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
602 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
603 ipv6_dst_outer.ipv6_address_95_64);
604 HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
605 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
606 ipv6_dst_outer.ipv6_address_63_32);
607 HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
608 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
609 ipv6_dst_outer.ipv6_address_31_0);
610 } else {
611 /* Handle IPv4 destination address */
612 HWS_SET_HDR(fc, match_param, IPV4_DST_O,
613 outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
614 ipv4_src_dest_outer.destination_address);
615 }
616
617 /* L4 Handle TCP/UDP */
618 HWS_SET_HDR(fc, match_param, L4_SPORT_O,
619 outer_headers.tcp_sport, eth_l4_outer.source_port);
620 HWS_SET_HDR(fc, match_param, L4_DPORT_O,
621 outer_headers.tcp_dport, eth_l4_outer.destination_port);
622 HWS_SET_HDR(fc, match_param, L4_SPORT_O,
623 outer_headers.udp_sport, eth_l4_outer.source_port);
624 HWS_SET_HDR(fc, match_param, L4_DPORT_O,
625 outer_headers.udp_dport, eth_l4_outer.destination_port);
626 HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
627 outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
628
629 /* L3 Handle DSCP, ECN and IHL */
630 HWS_SET_HDR(fc, match_param, IP_DSCP_O,
631 outer_headers.ip_dscp, eth_l3_outer.dscp);
632 HWS_SET_HDR(fc, match_param, IP_ECN_O,
633 outer_headers.ip_ecn, eth_l3_outer.ecn);
634 HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
635 outer_headers.ipv4_ihl, eth_l3_outer.ihl);
636
637 /* Set IP fragmented bit */
638 if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
639 smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
640 HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
641 dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
642 HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
643 if (smac_set == dmac_set) {
644 HWS_SET_HDR(fc, match_param, IP_FRAG_O,
645 outer_headers.frag, eth_l4_outer.ip_fragmented);
646 } else {
647 HWS_SET_HDR(fc, match_param, IP_FRAG_O,
648 outer_headers.frag, eth_l2_src_outer.ip_fragmented);
649 }
650 }
651
652 /* L3_type set */
653 if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
654 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
655 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
656 curr_fc->tag_set = &hws_definer_l3_type_set;
657 curr_fc->tag_mask_set = &hws_definer_ones_set;
658 HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
659 }
660
661 return 0;
662 }
663
664 static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data * cd,u32 * match_param)665 hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
666 u32 *match_param)
667 {
668 bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
669 struct mlx5hws_definer_fc *fc = cd->fc;
670 struct mlx5hws_definer_fc *curr_fc;
671 u32 *s_ipv6, *d_ipv6;
672
673 if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
674 HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
675 HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
676 mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
677 return -EINVAL;
678 }
679
680 /* L2 Check ethertype */
681 HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
682 inner_headers.ethertype,
683 eth_l2_inner.l3_ethertype);
684 /* L2 Check SMAC 47_16 */
685 HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
686 inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
687 /* L2 Check SMAC 15_0 */
688 HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
689 inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
690 /* L2 Check DMAC 47_16 */
691 HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
692 inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
693 /* L2 Check DMAC 15_0 */
694 HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
695 inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
696
697 /* L2 VLAN */
698 HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
699 inner_headers.first_prio, eth_l2_inner.first_priority);
700 HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
701 inner_headers.first_cfi, eth_l2_inner.first_cfi);
702 HWS_SET_HDR(fc, match_param, VLAN_ID_I,
703 inner_headers.first_vid, eth_l2_inner.first_vlan_id);
704
705 /* L2 CVLAN and SVLAN */
706 if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
707 HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
708 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
709 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
710 curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
711 curr_fc->tag_mask_set = &hws_definer_ones_set;
712 }
713 /* L3 Check IP header */
714 HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
715 inner_headers.ip_protocol,
716 eth_l3_inner.protocol_next_header);
717 HWS_SET_HDR(fc, match_param, IP_VERSION_I,
718 inner_headers.ip_version,
719 eth_l3_inner.ip_version);
720 HWS_SET_HDR(fc, match_param, IP_TTL_I,
721 inner_headers.ttl_hoplimit,
722 eth_l3_inner.time_to_live_hop_limit);
723
724 /* L3 Check IPv4/IPv6 addresses */
725 s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
726 inner_headers.src_ipv4_src_ipv6.ipv6_layout);
727 d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
728 inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
729
730 /* Assume IPv6 is used if ipv6 bits are set */
731 is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
732 is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
733
734 if (is_s_ipv6) {
735 /* Handle IPv6 source address */
736 HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
737 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
738 ipv6_src_inner.ipv6_address_127_96);
739 HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
740 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
741 ipv6_src_inner.ipv6_address_95_64);
742 HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
743 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
744 ipv6_src_inner.ipv6_address_63_32);
745 HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
746 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
747 ipv6_src_inner.ipv6_address_31_0);
748 } else {
749 /* Handle IPv4 source address */
750 HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
751 inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
752 ipv4_src_dest_inner.source_address);
753 }
754 if (is_d_ipv6) {
755 /* Handle IPv6 destination address */
756 HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
757 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
758 ipv6_dst_inner.ipv6_address_127_96);
759 HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
760 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
761 ipv6_dst_inner.ipv6_address_95_64);
762 HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
763 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
764 ipv6_dst_inner.ipv6_address_63_32);
765 HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
766 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
767 ipv6_dst_inner.ipv6_address_31_0);
768 } else {
769 /* Handle IPv4 destination address */
770 HWS_SET_HDR(fc, match_param, IPV4_DST_I,
771 inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
772 ipv4_src_dest_inner.destination_address);
773 }
774
775 /* L4 Handle TCP/UDP */
776 HWS_SET_HDR(fc, match_param, L4_SPORT_I,
777 inner_headers.tcp_sport, eth_l4_inner.source_port);
778 HWS_SET_HDR(fc, match_param, L4_DPORT_I,
779 inner_headers.tcp_dport, eth_l4_inner.destination_port);
780 HWS_SET_HDR(fc, match_param, L4_SPORT_I,
781 inner_headers.udp_sport, eth_l4_inner.source_port);
782 HWS_SET_HDR(fc, match_param, L4_DPORT_I,
783 inner_headers.udp_dport, eth_l4_inner.destination_port);
784 HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
785 inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
786
787 /* L3 Handle DSCP, ECN and IHL */
788 HWS_SET_HDR(fc, match_param, IP_DSCP_I,
789 inner_headers.ip_dscp, eth_l3_inner.dscp);
790 HWS_SET_HDR(fc, match_param, IP_ECN_I,
791 inner_headers.ip_ecn, eth_l3_inner.ecn);
792 HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
793 inner_headers.ipv4_ihl, eth_l3_inner.ihl);
794
795 /* Set IP fragmented bit */
796 if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
797 if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
798 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
799 inner_headers.frag, eth_l2_inner.ip_fragmented);
800 } else {
801 smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
802 HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
803 dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
804 HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
805 if (smac_set == dmac_set) {
806 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
807 inner_headers.frag, eth_l4_inner.ip_fragmented);
808 } else {
809 HWS_SET_HDR(fc, match_param, IP_FRAG_I,
810 inner_headers.frag, eth_l2_src_inner.ip_fragmented);
811 }
812 }
813 }
814
815 /* L3_type set */
816 if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
817 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
818 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
819 curr_fc->tag_set = &hws_definer_l3_type_set;
820 curr_fc->tag_mask_set = &hws_definer_ones_set;
821 HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
822 }
823
824 return 0;
825 }
826
827 static int
hws_definer_conv_misc(struct mlx5hws_definer_conv_data * cd,u32 * match_param)828 hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
829 u32 *match_param)
830 {
831 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
832 struct mlx5hws_definer_fc *fc = cd->fc;
833 struct mlx5hws_definer_fc *curr_fc;
834
835 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
836 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
837 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
838 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
839 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
840 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
841 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
842 HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
843 HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
844 HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
845 HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
846 HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
847 HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
848 mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
849 return -EINVAL;
850 }
851
852 /* Check GRE related fields */
853 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
854 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
855 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
856 HWS_CALC_HDR(curr_fc,
857 misc_parameters.gre_c_present,
858 tunnel_header.tunnel_header_0);
859 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
860 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
861 }
862
863 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
864 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
865 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
866 HWS_CALC_HDR(curr_fc,
867 misc_parameters.gre_k_present,
868 tunnel_header.tunnel_header_0);
869 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
870 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
871 }
872
873 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
874 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
875 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
876 HWS_CALC_HDR(curr_fc,
877 misc_parameters.gre_s_present,
878 tunnel_header.tunnel_header_0);
879 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
880 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
881 }
882
883 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
884 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
885 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
886 HWS_CALC_HDR(curr_fc,
887 misc_parameters.gre_protocol,
888 tunnel_header.tunnel_header_0);
889 curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
890 curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
891 }
892
893 if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
894 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
895 MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
896 HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
897 misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
898 }
899
900 /* Check GENEVE related fields */
901 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
902 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
903 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
904 HWS_CALC_HDR(curr_fc,
905 misc_parameters.geneve_vni,
906 tunnel_header.tunnel_header_1);
907 curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
908 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
909 }
910
911 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
912 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
913 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
914 HWS_CALC_HDR(curr_fc,
915 misc_parameters.geneve_opt_len,
916 tunnel_header.tunnel_header_0);
917 curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
918 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
919 }
920
921 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
922 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
923 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
924 HWS_CALC_HDR(curr_fc,
925 misc_parameters.geneve_protocol_type,
926 tunnel_header.tunnel_header_0);
927 curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
928 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
929 }
930
931 if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
932 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
933 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
934 HWS_CALC_HDR(curr_fc,
935 misc_parameters.geneve_oam,
936 tunnel_header.tunnel_header_0);
937 curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
938 curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
939 }
940
941 HWS_SET_HDR(fc, match_param, SOURCE_QP,
942 misc_parameters.source_sqn, source_qp_gvmi.source_qp);
943 HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
944 misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
945 HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
946 misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
947
948 /* L2 Second VLAN */
949 HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
950 misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
951 HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
952 misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
953 HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
954 misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
955 HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
956 misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
957 HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
958 misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
959 HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
960 misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
961
962 /* L2 Second CVLAN and SVLAN */
963 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
964 HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
965 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
966 HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
967 curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
968 curr_fc->tag_mask_set = &hws_definer_ones_set;
969 }
970
971 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
972 HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
973 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
974 HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
975 curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
976 curr_fc->tag_mask_set = &hws_definer_ones_set;
977 }
978
979 /* VXLAN VNI */
980 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
981 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
982 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
983 HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
984 curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
985 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
986 }
987
988 /* Flex protocol steering ok bits */
989 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
990 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
991
992 if (!caps->flex_parser_ok_bits_supp) {
993 mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
994 return -EOPNOTSUPP;
995 }
996
997 curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
998 cd, caps->flex_parser_id_geneve_tlv_option_0);
999 if (!curr_fc)
1000 return -EINVAL;
1001
1002 HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
1003 }
1004
1005 if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
1006 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
1007 HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
1008 curr_fc->tag_mask_set = &hws_definer_ones_set;
1009 curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
1010 misc_parameters.source_eswitch_owner_vhca_id) ?
1011 &hws_definer_set_source_gvmi_vhca_id :
1012 &hws_definer_set_source_gvmi;
1013 } else {
1014 if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
1015 mlx5hws_err(cd->ctx,
1016 "Unsupported source_eswitch_owner_vhca_id field usage\n");
1017 return -EOPNOTSUPP;
1018 }
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int
hws_definer_conv_misc2(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1025 hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
1026 u32 *match_param)
1027 {
1028 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1029 struct mlx5hws_definer_fc *fc = cd->fc;
1030 struct mlx5hws_definer_fc *curr_fc;
1031
1032 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
1033 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
1034 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
1035 HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
1036 HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
1037 mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
1038 return -EINVAL;
1039 }
1040
1041 HWS_SET_HDR(fc, match_param, MPLS0_O,
1042 misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
1043 HWS_SET_HDR(fc, match_param, MPLS0_I,
1044 misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
1045 HWS_SET_HDR(fc, match_param, REG_0,
1046 misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
1047 HWS_SET_HDR(fc, match_param, REG_1,
1048 misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
1049 HWS_SET_HDR(fc, match_param, REG_2,
1050 misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
1051 HWS_SET_HDR(fc, match_param, REG_3,
1052 misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
1053 HWS_SET_HDR(fc, match_param, REG_4,
1054 misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
1055 HWS_SET_HDR(fc, match_param, REG_5,
1056 misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
1057 HWS_SET_HDR(fc, match_param, REG_6,
1058 misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
1059 HWS_SET_HDR(fc, match_param, REG_7,
1060 misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
1061 HWS_SET_HDR(fc, match_param, REG_A,
1062 misc_parameters_2.metadata_reg_a, metadata.general_purpose);
1063
1064 if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
1065 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
1066
1067 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
1068 mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
1069 return -EOPNOTSUPP;
1070 }
1071
1072 curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
1073 if (!curr_fc)
1074 return -EINVAL;
1075
1076 HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
1077 }
1078
1079 if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
1080 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
1081
1082 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
1083 mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
1084 return -EOPNOTSUPP;
1085 }
1086
1087 curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
1088 if (!curr_fc)
1089 return -EINVAL;
1090
1091 HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
1092 }
1093
1094 return 0;
1095 }
1096
1097 static int
hws_definer_conv_misc3(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1098 hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
1099 {
1100 struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
1101 struct mlx5hws_definer_fc *fc = cd->fc;
1102 struct mlx5hws_definer_fc *curr_fc;
1103 bool vxlan_gpe_flex_parser_enabled;
1104
1105 /* Check reserved and unsupported fields */
1106 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
1107 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
1108 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
1109 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
1110 mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
1111 return -EINVAL;
1112 }
1113
1114 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
1115 HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
1116 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
1117 HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1118 misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
1119 HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1120 misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
1121 }
1122
1123 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
1124 HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
1125 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
1126 HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
1127 misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
1128 HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
1129 misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
1130 }
1131
1132 vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
1133
1134 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
1135 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1136
1137 if (!vxlan_gpe_flex_parser_enabled) {
1138 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1139 return -EOPNOTSUPP;
1140 }
1141
1142 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
1143 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
1144 tunnel_header.tunnel_header_1);
1145 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
1146 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
1147 }
1148
1149 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
1150 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1151
1152 if (!vxlan_gpe_flex_parser_enabled) {
1153 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1154 return -EOPNOTSUPP;
1155 }
1156
1157 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
1158 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
1159 tunnel_header.tunnel_header_0);
1160 curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
1161 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
1162 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
1163 }
1164
1165 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
1166 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
1167
1168 if (!vxlan_gpe_flex_parser_enabled) {
1169 mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
1170 return -EOPNOTSUPP;
1171 }
1172
1173 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
1174 HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
1175 tunnel_header.tunnel_header_0);
1176 curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
1177 curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
1178 }
1179
1180 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
1181 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1182 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1183 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
1184
1185 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
1186 mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
1187 return -EOPNOTSUPP;
1188 }
1189
1190 HWS_SET_HDR(fc, match_param, ICMP_DW3,
1191 misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
1192
1193 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
1194 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
1195 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1196 HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1197 curr_fc->tag_set = &hws_definer_icmp_dw1_set;
1198 }
1199 }
1200
1201 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
1202 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1203 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1204 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
1205
1206 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
1207 mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
1208 return -EOPNOTSUPP;
1209 }
1210
1211 HWS_SET_HDR(fc, match_param, ICMP_DW3,
1212 misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
1213
1214 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
1215 HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
1216 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
1217 HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
1218 curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
1219 }
1220 }
1221
1222 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
1223 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
1224
1225 curr_fc =
1226 hws_definer_flex_parser_handler(cd,
1227 caps->flex_parser_id_geneve_tlv_option_0);
1228 if (!curr_fc)
1229 return -EINVAL;
1230
1231 HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
1232 }
1233
1234 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
1235 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1236
1237 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
1238 mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
1239 return -EOPNOTSUPP;
1240 }
1241
1242 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
1243 fc->tag_set = &hws_definer_generic_set;
1244 fc->bit_mask = __mlx5_mask(header_gtp, teid);
1245 fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1246 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
1247 }
1248
1249 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
1250 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1251
1252 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1253 mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1254 return -EOPNOTSUPP;
1255 }
1256
1257 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1258 fc->tag_set = &hws_definer_generic_set;
1259 fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1260 fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1261 fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1262 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
1263 }
1264
1265 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
1266 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1267
1268 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
1269 mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
1270 return -EOPNOTSUPP;
1271 }
1272
1273 fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
1274 fc->tag_set = &hws_definer_generic_set;
1275 fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
1276 fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
1277 fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1278 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
1279 }
1280
1281 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
1282 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1283
1284 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
1285 mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
1286 return -EOPNOTSUPP;
1287 }
1288
1289 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
1290 curr_fc->tag_set = &hws_definer_generic_set;
1291 curr_fc->bit_mask = -1;
1292 curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1293 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
1294 }
1295
1296 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
1297 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1298
1299 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1300 mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
1301 return -EOPNOTSUPP;
1302 }
1303
1304 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
1305 curr_fc->tag_set = &hws_definer_generic_set;
1306 curr_fc->bit_mask = -1;
1307 curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1308 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
1309 }
1310
1311 if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
1312 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
1313
1314 if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
1315 mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
1316 return -EOPNOTSUPP;
1317 }
1318
1319 curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
1320 curr_fc->tag_set = &hws_definer_generic_set;
1321 curr_fc->bit_mask = -1;
1322 curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1323 HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
1324 }
1325
1326 return 0;
1327 }
1328
1329 static int
hws_definer_conv_misc4(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1330 hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
1331 u32 *match_param)
1332 {
1333 bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
1334 struct mlx5hws_definer_fc *fc;
1335 u32 id, value;
1336
1337 if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
1338 mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
1339 return -EINVAL;
1340 }
1341
1342 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
1343 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
1344 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1345 if (!fc)
1346 return -EINVAL;
1347
1348 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
1349
1350 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
1351 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
1352 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1353 if (!fc)
1354 return -EINVAL;
1355
1356 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
1357
1358 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
1359 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
1360 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1361 if (!fc)
1362 return -EINVAL;
1363
1364 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
1365
1366 id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
1367 value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
1368 fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
1369 if (!fc)
1370 return -EINVAL;
1371
1372 HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
1373
1374 return 0;
1375 }
1376
1377 static int
hws_definer_conv_misc5(struct mlx5hws_definer_conv_data * cd,u32 * match_param)1378 hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
1379 u32 *match_param)
1380 {
1381 struct mlx5hws_definer_fc *fc = cd->fc;
1382
1383 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
1384 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
1385 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
1386 HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
1387 HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
1388 mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
1389 return -EINVAL;
1390 }
1391
1392 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
1393 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1394 HWS_SET_HDR(fc, match_param, TNL_HDR_0,
1395 misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
1396 }
1397
1398 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
1399 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
1400 HWS_SET_HDR(fc, match_param, TNL_HDR_1,
1401 misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
1402 }
1403
1404 if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
1405 cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
1406 HWS_SET_HDR(fc, match_param, TNL_HDR_2,
1407 misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
1408 }
1409
1410 HWS_SET_HDR(fc, match_param, TNL_HDR_3,
1411 misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
1412
1413 return 0;
1414 }
1415
hws_definer_get_fc_size(struct mlx5hws_definer_fc * fc)1416 static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
1417 {
1418 u32 fc_sz = 0;
1419 int i;
1420
1421 /* For empty matcher, ZERO_SIZE_PTR is returned */
1422 if (fc == ZERO_SIZE_PTR)
1423 return 0;
1424
1425 for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
1426 if (fc[i].tag_set)
1427 fc_sz++;
1428 return fc_sz;
1429 }
1430
1431 static struct mlx5hws_definer_fc *
hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc * fc)1432 hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
1433 {
1434 struct mlx5hws_definer_fc *compressed_fc = NULL;
1435 u32 definer_size = hws_definer_get_fc_size(fc);
1436 u32 fc_sz = 0;
1437 int i;
1438
1439 compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
1440 if (!compressed_fc)
1441 return NULL;
1442
1443 /* For empty matcher, ZERO_SIZE_PTR is returned */
1444 if (!definer_size)
1445 return compressed_fc;
1446
1447 for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1448 if (!fc[i].tag_set)
1449 continue;
1450
1451 fc[i].fname = i;
1452 memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
1453 }
1454
1455 return compressed_fc;
1456 }
1457
1458 static void
hws_definer_set_hl(u8 * hl,struct mlx5hws_definer_fc * fc)1459 hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
1460 {
1461 int i;
1462
1463 /* nothing to do for empty matcher */
1464 if (fc == ZERO_SIZE_PTR)
1465 return;
1466
1467 for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
1468 if (!fc[i].tag_set)
1469 continue;
1470
1471 HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
1472 }
1473 }
1474
1475 static struct mlx5hws_definer_fc *
hws_definer_alloc_fc(struct mlx5hws_context * ctx,size_t len)1476 hws_definer_alloc_fc(struct mlx5hws_context *ctx,
1477 size_t len)
1478 {
1479 struct mlx5hws_definer_fc *fc;
1480 int i;
1481
1482 fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
1483 if (!fc)
1484 return NULL;
1485
1486 for (i = 0; i < len; i++)
1487 fc[i].ctx = ctx;
1488
1489 return fc;
1490 }
1491
1492 static int
hws_definer_conv_match_params_to_hl(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,u8 * hl)1493 hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
1494 struct mlx5hws_match_template *mt,
1495 u8 *hl)
1496 {
1497 struct mlx5hws_definer_conv_data cd = {0};
1498 struct mlx5hws_definer_fc *fc;
1499 int ret;
1500
1501 fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1502 if (!fc)
1503 return -ENOMEM;
1504
1505 cd.fc = fc;
1506 cd.ctx = ctx;
1507
1508 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
1509 mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
1510 ret = -EOPNOTSUPP;
1511 goto err_free_fc;
1512 }
1513
1514 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1515 ret = hws_definer_conv_outer(&cd, mt->match_param);
1516 if (ret)
1517 goto err_free_fc;
1518 }
1519
1520 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1521 ret = hws_definer_conv_inner(&cd, mt->match_param);
1522 if (ret)
1523 goto err_free_fc;
1524 }
1525
1526 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1527 ret = hws_definer_conv_misc(&cd, mt->match_param);
1528 if (ret)
1529 goto err_free_fc;
1530 }
1531
1532 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1533 ret = hws_definer_conv_misc2(&cd, mt->match_param);
1534 if (ret)
1535 goto err_free_fc;
1536 }
1537
1538 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1539 ret = hws_definer_conv_misc3(&cd, mt->match_param);
1540 if (ret)
1541 goto err_free_fc;
1542 }
1543
1544 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1545 ret = hws_definer_conv_misc4(&cd, mt->match_param);
1546 if (ret)
1547 goto err_free_fc;
1548 }
1549
1550 if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1551 ret = hws_definer_conv_misc5(&cd, mt->match_param);
1552 if (ret)
1553 goto err_free_fc;
1554 }
1555
1556 /* Check there is no conflicted fields set together */
1557 ret = hws_definer_check_match_flags(&cd);
1558 if (ret)
1559 goto err_free_fc;
1560
1561 /* Allocate fc array on mt */
1562 mt->fc = hws_definer_alloc_compressed_fc(fc);
1563 if (!mt->fc) {
1564 mlx5hws_err(ctx,
1565 "Convert match params: failed to set field copy to match template\n");
1566 ret = -ENOMEM;
1567 goto err_free_fc;
1568 }
1569 mt->fc_sz = hws_definer_get_fc_size(fc);
1570
1571 /* Fill in headers layout */
1572 hws_definer_set_hl(hl, fc);
1573
1574 kfree(fc);
1575 return 0;
1576
1577 err_free_fc:
1578 kfree(fc);
1579 return ret;
1580 }
1581
1582 struct mlx5hws_definer_fc *
mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context * ctx,u8 match_criteria_enable,u32 * match_param,int * fc_sz)1583 mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
1584 u8 match_criteria_enable,
1585 u32 *match_param,
1586 int *fc_sz)
1587 {
1588 struct mlx5hws_definer_fc *compressed_fc = NULL;
1589 struct mlx5hws_definer_conv_data cd = {0};
1590 struct mlx5hws_definer_fc *fc;
1591 int ret;
1592
1593 fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
1594 if (!fc)
1595 return NULL;
1596
1597 cd.fc = fc;
1598 cd.ctx = ctx;
1599
1600 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
1601 ret = hws_definer_conv_outer(&cd, match_param);
1602 if (ret)
1603 goto err_free_fc;
1604 }
1605
1606 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
1607 ret = hws_definer_conv_inner(&cd, match_param);
1608 if (ret)
1609 goto err_free_fc;
1610 }
1611
1612 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
1613 ret = hws_definer_conv_misc(&cd, match_param);
1614 if (ret)
1615 goto err_free_fc;
1616 }
1617
1618 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
1619 ret = hws_definer_conv_misc2(&cd, match_param);
1620 if (ret)
1621 goto err_free_fc;
1622 }
1623
1624 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
1625 ret = hws_definer_conv_misc3(&cd, match_param);
1626 if (ret)
1627 goto err_free_fc;
1628 }
1629
1630 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
1631 ret = hws_definer_conv_misc4(&cd, match_param);
1632 if (ret)
1633 goto err_free_fc;
1634 }
1635
1636 if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
1637 ret = hws_definer_conv_misc5(&cd, match_param);
1638 if (ret)
1639 goto err_free_fc;
1640 }
1641
1642 /* Allocate fc array on mt */
1643 compressed_fc = hws_definer_alloc_compressed_fc(fc);
1644 if (!compressed_fc) {
1645 mlx5hws_err(ctx,
1646 "Convert to compressed fc: failed to set field copy to match template\n");
1647 goto err_free_fc;
1648 }
1649 *fc_sz = hws_definer_get_fc_size(fc);
1650
1651 err_free_fc:
1652 kfree(fc);
1653 return compressed_fc;
1654 }
1655
1656 static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer * definer,u32 hl_byte_off,u32 * tag_byte_off)1657 hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
1658 u32 hl_byte_off,
1659 u32 *tag_byte_off)
1660 {
1661 int i, dw_to_scan;
1662 u8 byte_offset;
1663
1664 /* Avoid accessing unused DW selectors */
1665 dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
1666 DW_SELECTORS : DW_SELECTORS_MATCH;
1667
1668 /* Add offset since each DW covers multiple BYTEs */
1669 byte_offset = hl_byte_off % DW_SIZE;
1670 for (i = 0; i < dw_to_scan; i++) {
1671 if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
1672 *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
1673 return 0;
1674 }
1675 }
1676
1677 /* Add offset to skip DWs in definer */
1678 byte_offset = DW_SIZE * DW_SELECTORS;
1679 /* Iterate in reverse since the code uses bytes from 7 -> 0 */
1680 for (i = BYTE_SELECTORS; i-- > 0 ;) {
1681 if (definer->byte_selector[i] == hl_byte_off) {
1682 *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
1683 return 0;
1684 }
1685 }
1686
1687 return -EINVAL;
1688 }
1689
1690 static int
hws_definer_fc_bind(struct mlx5hws_definer * definer,struct mlx5hws_definer_fc * fc,u32 fc_sz)1691 hws_definer_fc_bind(struct mlx5hws_definer *definer,
1692 struct mlx5hws_definer_fc *fc,
1693 u32 fc_sz)
1694 {
1695 u32 tag_offset = 0;
1696 int ret, byte_diff;
1697 u32 i;
1698
1699 for (i = 0; i < fc_sz; i++) {
1700 /* Map header layout byte offset to byte offset in tag */
1701 ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
1702 if (ret)
1703 return ret;
1704
1705 /* Move setter based on the location in the definer */
1706 byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
1707 fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
1708
1709 /* Update offset in headers layout to offset in tag */
1710 fc->byte_off = tag_offset;
1711 fc++;
1712 }
1713
1714 return 0;
1715 }
1716
1717 static bool
hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl * ctrl,u32 cur_dw,u32 * data)1718 hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
1719 u32 cur_dw,
1720 u32 *data)
1721 {
1722 u8 bytes_set;
1723 int byte_idx;
1724 bool ret;
1725 int i;
1726
1727 /* Reached end, nothing left to do */
1728 if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1729 return true;
1730
1731 /* No data set, can skip to next DW */
1732 while (!*data) {
1733 cur_dw++;
1734 data++;
1735
1736 /* Reached end, nothing left to do */
1737 if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
1738 return true;
1739 }
1740
1741 /* Used all DW selectors and Byte selectors, no possible solution */
1742 if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
1743 ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
1744 ctrl->allowed_bytes == ctrl->used_bytes)
1745 return false;
1746
1747 /* Try to use limited DW selectors */
1748 if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
1749 ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
1750
1751 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1752 if (ret)
1753 return ret;
1754
1755 ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
1756 }
1757
1758 /* Try to use DW selectors */
1759 if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
1760 ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
1761
1762 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1763 if (ret)
1764 return ret;
1765
1766 ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
1767 }
1768
1769 /* No byte selector for offset bigger than 255 */
1770 if (cur_dw * DW_SIZE > 255)
1771 return false;
1772
1773 bytes_set = !!(0x000000ff & *data) +
1774 !!(0x0000ff00 & *data) +
1775 !!(0x00ff0000 & *data) +
1776 !!(0xff000000 & *data);
1777
1778 /* Check if there are enough byte selectors left */
1779 if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
1780 return false;
1781
1782 /* Try to use Byte selectors */
1783 for (i = 0; i < DW_SIZE; i++)
1784 if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
1785 /* Use byte selectors high to low */
1786 byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
1787 ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
1788 ctrl->used_bytes++;
1789 }
1790
1791 ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
1792 if (ret)
1793 return ret;
1794
1795 for (i = 0; i < DW_SIZE; i++)
1796 if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
1797 ctrl->used_bytes--;
1798 byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
1799 ctrl->byte_selector[byte_idx] = 0;
1800 }
1801
1802 return false;
1803 }
1804
1805 static void
hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl * ctrl,struct mlx5hws_definer * definer)1806 hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
1807 struct mlx5hws_definer *definer)
1808 {
1809 memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
1810 memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
1811 memcpy(definer->dw_selector + ctrl->allowed_full_dw,
1812 ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
1813 }
1814
1815 static int
hws_definer_find_best_match_fit(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer,u8 * hl)1816 hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
1817 struct mlx5hws_definer *definer,
1818 u8 *hl)
1819 {
1820 struct mlx5hws_definer_sel_ctrl ctrl = {0};
1821 bool found;
1822
1823 /* Try to create a match definer */
1824 ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
1825 ctrl.allowed_lim_dw = 0;
1826 ctrl.allowed_bytes = BYTE_SELECTORS;
1827
1828 found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
1829 if (found) {
1830 hws_definer_copy_sel_ctrl(&ctrl, definer);
1831 definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
1832 return 0;
1833 }
1834
1835 /* Try to create a full/limited jumbo definer */
1836 ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
1837 DW_SELECTORS_MATCH;
1838 ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
1839 DW_SELECTORS_LIMITED;
1840 ctrl.allowed_bytes = BYTE_SELECTORS;
1841
1842 found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
1843 if (found) {
1844 hws_definer_copy_sel_ctrl(&ctrl, definer);
1845 definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
1846 return 0;
1847 }
1848
1849 return -E2BIG;
1850 }
1851
1852 static void
hws_definer_create_tag_mask(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)1853 hws_definer_create_tag_mask(u32 *match_param,
1854 struct mlx5hws_definer_fc *fc,
1855 u32 fc_sz,
1856 u8 *tag)
1857 {
1858 u32 i;
1859
1860 for (i = 0; i < fc_sz; i++) {
1861 if (fc->tag_mask_set)
1862 fc->tag_mask_set(fc, match_param, tag);
1863 else
1864 fc->tag_set(fc, match_param, tag);
1865 fc++;
1866 }
1867 }
1868
mlx5hws_definer_create_tag(u32 * match_param,struct mlx5hws_definer_fc * fc,u32 fc_sz,u8 * tag)1869 void mlx5hws_definer_create_tag(u32 *match_param,
1870 struct mlx5hws_definer_fc *fc,
1871 u32 fc_sz,
1872 u8 *tag)
1873 {
1874 u32 i;
1875
1876 for (i = 0; i < fc_sz; i++) {
1877 fc->tag_set(fc, match_param, tag);
1878 fc++;
1879 }
1880 }
1881
mlx5hws_definer_get_id(struct mlx5hws_definer * definer)1882 int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
1883 {
1884 return definer->obj_id;
1885 }
1886
mlx5hws_definer_compare(struct mlx5hws_definer * definer_a,struct mlx5hws_definer * definer_b)1887 int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
1888 struct mlx5hws_definer *definer_b)
1889 {
1890 int i;
1891
1892 /* Future: Optimize by comparing selectors with valid mask only */
1893 for (i = 0; i < BYTE_SELECTORS; i++)
1894 if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
1895 return 1;
1896
1897 for (i = 0; i < DW_SELECTORS; i++)
1898 if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
1899 return 1;
1900
1901 for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
1902 if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
1903 return 1;
1904
1905 return 0;
1906 }
1907
1908 int
mlx5hws_definer_calc_layout(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_definer)1909 mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
1910 struct mlx5hws_match_template *mt,
1911 struct mlx5hws_definer *match_definer)
1912 {
1913 u8 *match_hl;
1914 int ret;
1915
1916 /* Union header-layout (hl) is used for creating a single definer
1917 * field layout used with different bitmasks for hash and match.
1918 */
1919 match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
1920 if (!match_hl)
1921 return -ENOMEM;
1922
1923 /* Convert all mt items to header layout (hl)
1924 * and allocate the match and range field copy array (fc & fcr).
1925 */
1926 ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
1927 if (ret) {
1928 mlx5hws_err(ctx, "Failed to convert items to header layout\n");
1929 goto free_match_hl;
1930 }
1931
1932 /* Find the match definer layout for header layout match union */
1933 ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
1934 if (ret) {
1935 if (ret == -E2BIG)
1936 mlx5hws_dbg(ctx,
1937 "Failed to create match definer from header layout - E2BIG\n");
1938 else
1939 mlx5hws_err(ctx,
1940 "Failed to create match definer from header layout (%d)\n",
1941 ret);
1942 goto free_fc;
1943 }
1944
1945 kfree(match_hl);
1946 return 0;
1947
1948 free_fc:
1949 kfree(mt->fc);
1950 free_match_hl:
1951 kfree(match_hl);
1952 return ret;
1953 }
1954
mlx5hws_definer_init_cache(struct mlx5hws_definer_cache ** cache)1955 int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
1956 {
1957 struct mlx5hws_definer_cache *new_cache;
1958
1959 new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
1960 if (!new_cache)
1961 return -ENOMEM;
1962
1963 INIT_LIST_HEAD(&new_cache->list_head);
1964 *cache = new_cache;
1965
1966 return 0;
1967 }
1968
mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache * cache)1969 void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
1970 {
1971 kfree(cache);
1972 }
1973
mlx5hws_definer_get_obj(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)1974 int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
1975 struct mlx5hws_definer *definer)
1976 {
1977 struct mlx5hws_definer_cache *cache = ctx->definer_cache;
1978 struct mlx5hws_cmd_definer_create_attr def_attr = {0};
1979 struct mlx5hws_definer_cache_item *cached_definer;
1980 u32 obj_id;
1981 int ret;
1982
1983 /* Search definer cache for requested definer */
1984 list_for_each_entry(cached_definer, &cache->list_head, list_node) {
1985 if (mlx5hws_definer_compare(&cached_definer->definer, definer))
1986 continue;
1987
1988 /* Reuse definer and set LRU (move to be first in the list) */
1989 list_move(&cached_definer->list_node, &cache->list_head);
1990 cached_definer->refcount++;
1991 return cached_definer->definer.obj_id;
1992 }
1993
1994 /* Allocate and create definer based on the bitmask tag */
1995 def_attr.match_mask = definer->mask.jumbo;
1996 def_attr.dw_selector = definer->dw_selector;
1997 def_attr.byte_selector = definer->byte_selector;
1998
1999 ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
2000 if (ret)
2001 return -1;
2002
2003 cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
2004 if (!cached_definer)
2005 goto free_definer_obj;
2006
2007 memcpy(&cached_definer->definer, definer, sizeof(*definer));
2008 cached_definer->definer.obj_id = obj_id;
2009 cached_definer->refcount = 1;
2010 list_add(&cached_definer->list_node, &cache->list_head);
2011
2012 return obj_id;
2013
2014 free_definer_obj:
2015 mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
2016 return -1;
2017 }
2018
2019 static void
hws_definer_put_obj(struct mlx5hws_context * ctx,u32 obj_id)2020 hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
2021 {
2022 struct mlx5hws_definer_cache_item *cached_definer;
2023
2024 list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
2025 if (cached_definer->definer.obj_id != obj_id)
2026 continue;
2027
2028 /* Object found */
2029 if (--cached_definer->refcount)
2030 return;
2031
2032 list_del_init(&cached_definer->list_node);
2033 mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
2034 kfree(cached_definer);
2035 return;
2036 }
2037
2038 /* Programming error, object must be part of cache */
2039 pr_warn("HWS: failed putting definer object\n");
2040 }
2041
2042 static struct mlx5hws_definer *
hws_definer_alloc(struct mlx5hws_context * ctx,struct mlx5hws_definer_fc * fc,int fc_sz,u32 * match_param,struct mlx5hws_definer * layout,bool bind_fc)2043 hws_definer_alloc(struct mlx5hws_context *ctx,
2044 struct mlx5hws_definer_fc *fc,
2045 int fc_sz,
2046 u32 *match_param,
2047 struct mlx5hws_definer *layout,
2048 bool bind_fc)
2049 {
2050 struct mlx5hws_definer *definer;
2051 int ret;
2052
2053 definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
2054 if (!definer)
2055 return NULL;
2056
2057 /* Align field copy array based on given layout */
2058 if (bind_fc) {
2059 ret = hws_definer_fc_bind(definer, fc, fc_sz);
2060 if (ret) {
2061 mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
2062 goto free_definer;
2063 }
2064 }
2065
2066 /* Create the tag mask used for definer creation */
2067 hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
2068
2069 ret = mlx5hws_definer_get_obj(ctx, definer);
2070 if (ret < 0)
2071 goto free_definer;
2072
2073 definer->obj_id = ret;
2074 return definer;
2075
2076 free_definer:
2077 kfree(definer);
2078 return NULL;
2079 }
2080
mlx5hws_definer_free(struct mlx5hws_context * ctx,struct mlx5hws_definer * definer)2081 void mlx5hws_definer_free(struct mlx5hws_context *ctx,
2082 struct mlx5hws_definer *definer)
2083 {
2084 hws_definer_put_obj(ctx, definer->obj_id);
2085 kfree(definer);
2086 }
2087
2088 static int
hws_definer_mt_match_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt,struct mlx5hws_definer * match_layout)2089 hws_definer_mt_match_init(struct mlx5hws_context *ctx,
2090 struct mlx5hws_match_template *mt,
2091 struct mlx5hws_definer *match_layout)
2092 {
2093 /* Create mandatory match definer */
2094 mt->definer = hws_definer_alloc(ctx,
2095 mt->fc,
2096 mt->fc_sz,
2097 mt->match_param,
2098 match_layout,
2099 true);
2100 if (!mt->definer) {
2101 mlx5hws_err(ctx, "Failed to create match definer\n");
2102 return -EINVAL;
2103 }
2104
2105 return 0;
2106 }
2107
2108 static void
hws_definer_mt_match_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2109 hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
2110 struct mlx5hws_match_template *mt)
2111 {
2112 mlx5hws_definer_free(ctx, mt->definer);
2113 }
2114
mlx5hws_definer_mt_init(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2115 int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
2116 struct mlx5hws_match_template *mt)
2117 {
2118 struct mlx5hws_definer match_layout = {0};
2119 int ret;
2120
2121 ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
2122 if (ret) {
2123 mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
2124 return ret;
2125 }
2126
2127 /* Calculate definers needed for exact match */
2128 ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
2129 if (ret) {
2130 mlx5hws_err(ctx, "Failed to init match definers\n");
2131 goto free_fc;
2132 }
2133
2134 return 0;
2135
2136 free_fc:
2137 kfree(mt->fc);
2138 return ret;
2139 }
2140
mlx5hws_definer_mt_uninit(struct mlx5hws_context * ctx,struct mlx5hws_match_template * mt)2141 void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
2142 struct mlx5hws_match_template *mt)
2143 {
2144 hws_definer_mt_match_uninit(ctx, mt);
2145 kfree(mt->fc);
2146 }
2147