1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_types.h"
7
8 #define DR_STE_CRC_POLY 0xEDB88320L
9 #define STE_IPV4 0x1
10 #define STE_IPV6 0x2
11 #define STE_TCP 0x1
12 #define STE_UDP 0x2
13 #define STE_SPI 0x3
14 #define IP_VERSION_IPV4 0x4
15 #define IP_VERSION_IPV6 0x6
16 #define STE_SVLAN 0x1
17 #define STE_CVLAN 0x2
18
19 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
20
21 /* Set to STE a specific value using DR_STE_SET */
22 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
23 if ((spec)->s_fname) { \
24 MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
25 (spec)->s_fname = 0; \
26 } \
27 } while (0)
28
29 /* Set to STE spec->s_fname to tag->t_fname */
30 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
31 DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
32
33 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
34 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
35 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
36
37 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
38 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
39 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
40
41 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
42 MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
43 MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
44 MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
45 MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
46 MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
47 MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
48 MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
49 MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
50 MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
51 } while (0)
52
53 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
54 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
55 in_out##_first_mpls_label);\
56 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
57 in_out##_first_mpls_s_bos); \
58 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
59 in_out##_first_mpls_exp); \
60 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
61 in_out##_first_mpls_ttl); \
62 } while (0)
63
64 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
65 DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
66 in_out##_first_mpls_label);\
67 DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
68 in_out##_first_mpls_s_bos); \
69 DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
70 in_out##_first_mpls_exp); \
71 DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
72 in_out##_first_mpls_ttl); \
73 } while (0)
74
75 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
76 (_misc)->outer_first_mpls_over_gre_label || \
77 (_misc)->outer_first_mpls_over_gre_exp || \
78 (_misc)->outer_first_mpls_over_gre_s_bos || \
79 (_misc)->outer_first_mpls_over_gre_ttl)
80 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
81 (_misc)->outer_first_mpls_over_udp_label || \
82 (_misc)->outer_first_mpls_over_udp_exp || \
83 (_misc)->outer_first_mpls_over_udp_s_bos || \
84 (_misc)->outer_first_mpls_over_udp_ttl)
85
86 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
87 ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
88 (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
89 MLX5DR_STE_LU_TYPE_##lookup_type##_O)
90
91 enum dr_ste_tunl_action {
92 DR_STE_TUNL_ACTION_NONE = 0,
93 DR_STE_TUNL_ACTION_ENABLE = 1,
94 DR_STE_TUNL_ACTION_DECAP = 2,
95 DR_STE_TUNL_ACTION_L3_DECAP = 3,
96 DR_STE_TUNL_ACTION_POP_VLAN = 4,
97 };
98
99 enum dr_ste_action_type {
100 DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
101 DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
102 DR_STE_ACTION_TYPE_ENCAP = 4,
103 };
104
105 struct dr_hw_ste_format {
106 u8 ctrl[DR_STE_SIZE_CTRL];
107 u8 tag[DR_STE_SIZE_TAG];
108 u8 mask[DR_STE_SIZE_MASK];
109 };
110
dr_ste_crc32_calc(const void * input_data,size_t length)111 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
112 {
113 u32 crc = crc32(0, input_data, length);
114
115 return (__force u32)htonl(crc);
116 }
117
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)118 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
119 {
120 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
121 u8 masked[DR_STE_SIZE_TAG] = {};
122 u32 crc32, index;
123 u16 bit;
124 int i;
125
126 /* Don't calculate CRC if the result is predicted */
127 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
128 return 0;
129
130 /* Mask tag using byte mask, bit per byte */
131 bit = 1 << (DR_STE_SIZE_TAG - 1);
132 for (i = 0; i < DR_STE_SIZE_TAG; i++) {
133 if (htbl->byte_mask & bit)
134 masked[i] = hw_ste->tag[i];
135
136 bit = bit >> 1;
137 }
138
139 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
140 index = crc32 & (htbl->chunk->num_of_entries - 1);
141
142 return index;
143 }
144
dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)145 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
146 {
147 u16 byte_mask = 0;
148 int i;
149
150 for (i = 0; i < DR_STE_SIZE_MASK; i++) {
151 byte_mask = byte_mask << 1;
152 if (bit_mask[i] == 0xff)
153 byte_mask |= 1;
154 }
155 return byte_mask;
156 }
157
mlx5dr_ste_get_tag(u8 * hw_ste_p)158 static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
159 {
160 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
161
162 return hw_ste->tag;
163 }
164
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)165 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
166 {
167 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
168
169 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
170 }
171
mlx5dr_ste_rx_set_flow_tag(u8 * hw_ste_p,u32 flow_tag)172 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
173 {
174 MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
175 DR_STE_ENABLE_FLOW_TAG | flow_tag);
176 }
177
mlx5dr_ste_set_counter_id(u8 * hw_ste_p,u32 ctr_id)178 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
179 {
180 /* This can be used for both rx_steering_mult and for sx_transmit */
181 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
182 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
183 }
184
mlx5dr_ste_set_go_back_bit(u8 * hw_ste_p)185 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
186 {
187 MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
188 }
189
mlx5dr_ste_set_tx_push_vlan(u8 * hw_ste_p,u32 vlan_hdr,bool go_back)190 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
191 bool go_back)
192 {
193 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
194 DR_STE_ACTION_TYPE_PUSH_VLAN);
195 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
196 /* Due to HW limitation we need to set this bit, otherwise reforamt +
197 * push vlan will not work.
198 */
199 if (go_back)
200 mlx5dr_ste_set_go_back_bit(hw_ste_p);
201 }
202
mlx5dr_ste_set_tx_encap(void * hw_ste_p,u32 reformat_id,int size,bool encap_l3)203 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
204 {
205 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
206 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
207 /* The hardware expects here size in words (2 byte) */
208 MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
209 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
210 }
211
mlx5dr_ste_set_rx_decap(u8 * hw_ste_p)212 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
213 {
214 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
215 DR_STE_TUNL_ACTION_DECAP);
216 }
217
mlx5dr_ste_set_rx_pop_vlan(u8 * hw_ste_p)218 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
219 {
220 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
221 DR_STE_TUNL_ACTION_POP_VLAN);
222 }
223
mlx5dr_ste_set_rx_decap_l3(u8 * hw_ste_p,bool vlan)224 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
225 {
226 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
227 DR_STE_TUNL_ACTION_L3_DECAP);
228 MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
229 }
230
mlx5dr_ste_set_entry_type(u8 * hw_ste_p,u8 entry_type)231 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
232 {
233 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
234 }
235
mlx5dr_ste_get_entry_type(u8 * hw_ste_p)236 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
237 {
238 return MLX5_GET(ste_general, hw_ste_p, entry_type);
239 }
240
mlx5dr_ste_set_rewrite_actions(u8 * hw_ste_p,u16 num_of_actions,u32 re_write_index)241 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
242 u32 re_write_index)
243 {
244 MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
245 num_of_actions);
246 MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
247 re_write_index);
248 }
249
mlx5dr_ste_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)250 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
251 {
252 MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
253 }
254
mlx5dr_ste_init(u8 * hw_ste_p,u8 lu_type,u8 entry_type,u16 gvmi)255 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
256 u16 gvmi)
257 {
258 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
259 MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
260 MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
261
262 /* Set GVMI once, this is the same for RX/TX
263 * bits 63_48 of next table base / miss address encode the next GVMI
264 */
265 MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
266 MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
267 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
268 }
269
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)270 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
271 {
272 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
273 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
274 }
275
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)276 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
277 {
278 hw_ste->tag[0] = 0xdc;
279 hw_ste->mask[0] = 0;
280 }
281
mlx5dr_ste_get_miss_addr(u8 * hw_ste)282 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
283 {
284 u64 index =
285 (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
286 MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
287
288 return index << 6;
289 }
290
mlx5dr_ste_set_hit_addr(u8 * hw_ste,u64 icm_addr,u32 ht_size)291 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
292 {
293 u64 index = (icm_addr >> 5) | ht_size;
294
295 MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
296 MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
297 }
298
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)299 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
300 {
301 u32 index = ste - ste->htbl->ste_arr;
302
303 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
304 }
305
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)306 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
307 {
308 u32 index = ste - ste->htbl->ste_arr;
309
310 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
311 }
312
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)313 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
314 {
315 u32 index = ste - ste->htbl->ste_arr;
316
317 return &ste->htbl->miss_list[index];
318 }
319
dr_ste_always_hit_htbl(struct mlx5dr_ste * ste,struct mlx5dr_ste_htbl * next_htbl)320 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
321 struct mlx5dr_ste_htbl *next_htbl)
322 {
323 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
324 u8 *hw_ste = ste->hw_ste;
325
326 MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
327 MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
328 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
329
330 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
331 }
332
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)333 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
334 u8 ste_location)
335 {
336 return ste_location == nic_matcher->num_of_builders;
337 }
338
339 /* Replace relevant fields, except of:
340 * htbl - keep the origin htbl
341 * miss_list + list - already took the src from the list.
342 * icm_addr/mr_addr - depends on the hosting table.
343 *
344 * Before:
345 * | a | -> | b | -> | c | ->
346 *
347 * After:
348 * | a | -> | c | ->
349 * While the data that was in b copied to a.
350 */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)351 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
352 {
353 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
354 dst->next_htbl = src->next_htbl;
355 if (dst->next_htbl)
356 dst->next_htbl->pointing_ste = dst;
357
358 dst->refcount = src->refcount;
359
360 INIT_LIST_HEAD(&dst->rule_list);
361 list_splice_tail_init(&src->rule_list, &dst->rule_list);
362 }
363
364 /* Free ste which is the head and the only one in miss_list */
365 static void
dr_ste_remove_head_ste(struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)366 dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
367 struct mlx5dr_matcher_rx_tx *nic_matcher,
368 struct mlx5dr_ste_send_info *ste_info_head,
369 struct list_head *send_ste_list,
370 struct mlx5dr_ste_htbl *stats_tbl)
371 {
372 u8 tmp_data_ste[DR_STE_SIZE] = {};
373 struct mlx5dr_ste tmp_ste = {};
374 u64 miss_addr;
375
376 tmp_ste.hw_ste = tmp_data_ste;
377
378 /* Use temp ste because dr_ste_always_miss_addr
379 * touches bit_mask area which doesn't exist at ste->hw_ste.
380 */
381 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
382 miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
383 mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
384 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
385
386 list_del_init(&ste->miss_list_node);
387
388 /* Write full STE size in order to have "always_miss" */
389 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
390 0, tmp_data_ste,
391 ste_info_head,
392 send_ste_list,
393 true /* Copy data */);
394
395 stats_tbl->ctrl.num_of_valid_entries--;
396 }
397
398 /* Free ste which is the head but NOT the only one in miss_list:
399 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
400 */
401 static void
dr_ste_replace_head_ste(struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)402 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
403 struct mlx5dr_ste_send_info *ste_info_head,
404 struct list_head *send_ste_list,
405 struct mlx5dr_ste_htbl *stats_tbl)
406
407 {
408 struct mlx5dr_ste_htbl *next_miss_htbl;
409
410 next_miss_htbl = next_ste->htbl;
411
412 /* Remove from the miss_list the next_ste before copy */
413 list_del_init(&next_ste->miss_list_node);
414
415 /* All rule-members that use next_ste should know about that */
416 mlx5dr_rule_update_rule_member(next_ste, ste);
417
418 /* Move data from next into ste */
419 dr_ste_replace(ste, next_ste);
420
421 /* Del the htbl that contains the next_ste.
422 * The origin htbl stay with the same number of entries.
423 */
424 mlx5dr_htbl_put(next_miss_htbl);
425
426 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
427 0, ste->hw_ste,
428 ste_info_head,
429 send_ste_list,
430 true /* Copy data */);
431
432 stats_tbl->ctrl.num_of_collisions--;
433 stats_tbl->ctrl.num_of_valid_entries--;
434 }
435
436 /* Free ste that is located in the middle of the miss list:
437 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
438 */
dr_ste_remove_middle_ste(struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)439 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
440 struct mlx5dr_ste_send_info *ste_info,
441 struct list_head *send_ste_list,
442 struct mlx5dr_ste_htbl *stats_tbl)
443 {
444 struct mlx5dr_ste *prev_ste;
445 u64 miss_addr;
446
447 prev_ste = list_prev_entry(ste, miss_list_node);
448 if (WARN_ON(!prev_ste))
449 return;
450
451 miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
452 mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
453
454 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
455 prev_ste->hw_ste, ste_info,
456 send_ste_list, true /* Copy data*/);
457
458 list_del_init(&ste->miss_list_node);
459
460 stats_tbl->ctrl.num_of_valid_entries--;
461 stats_tbl->ctrl.num_of_collisions--;
462 }
463
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)464 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
465 struct mlx5dr_matcher *matcher,
466 struct mlx5dr_matcher_rx_tx *nic_matcher)
467 {
468 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
469 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
470 struct mlx5dr_ste_send_info ste_info_head;
471 struct mlx5dr_ste *next_ste, *first_ste;
472 bool put_on_origin_table = true;
473 struct mlx5dr_ste_htbl *stats_tbl;
474 LIST_HEAD(send_ste_list);
475
476 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
477 struct mlx5dr_ste, miss_list_node);
478 stats_tbl = first_ste->htbl;
479
480 /* Two options:
481 * 1. ste is head:
482 * a. head ste is the only ste in the miss list
483 * b. head ste is not the only ste in the miss-list
484 * 2. ste is not head
485 */
486 if (first_ste == ste) { /* Ste is the head */
487 struct mlx5dr_ste *last_ste;
488
489 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
490 struct mlx5dr_ste, miss_list_node);
491 if (last_ste == first_ste)
492 next_ste = NULL;
493 else
494 next_ste = list_next_entry(ste, miss_list_node);
495
496 if (!next_ste) {
497 /* One and only entry in the list */
498 dr_ste_remove_head_ste(ste, nic_matcher,
499 &ste_info_head,
500 &send_ste_list,
501 stats_tbl);
502 } else {
503 /* First but not only entry in the list */
504 dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
505 &send_ste_list, stats_tbl);
506 put_on_origin_table = false;
507 }
508 } else { /* Ste in the middle of the list */
509 dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
510 }
511
512 /* Update HW */
513 list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
514 &send_ste_list, send_list) {
515 list_del(&cur_ste_info->send_list);
516 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
517 cur_ste_info->data, cur_ste_info->size,
518 cur_ste_info->offset);
519 }
520
521 if (put_on_origin_table)
522 mlx5dr_htbl_put(ste->htbl);
523 }
524
mlx5dr_ste_equal_tag(void * src,void * dst)525 bool mlx5dr_ste_equal_tag(void *src, void *dst)
526 {
527 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
528 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
529
530 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
531 }
532
mlx5dr_ste_set_hit_addr_by_next_htbl(u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)533 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
534 struct mlx5dr_ste_htbl *next_htbl)
535 {
536 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
537
538 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
539 }
540
mlx5dr_ste_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)541 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
542 {
543 u64 index = miss_addr >> 6;
544
545 /* Miss address for TX and RX STEs located in the same offsets */
546 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
547 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
548 }
549
mlx5dr_ste_always_miss_addr(struct mlx5dr_ste * ste,u64 miss_addr)550 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
551 {
552 u8 *hw_ste = ste->hw_ste;
553
554 MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
555 mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
556 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
557 }
558
559 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(u16 gvmi,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)560 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
561 struct mlx5dr_domain_rx_tx *nic_dmn,
562 struct mlx5dr_ste_htbl *htbl,
563 u8 *formatted_ste,
564 struct mlx5dr_htbl_connect_info *connect_info)
565 {
566 struct mlx5dr_ste ste = {};
567
568 mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
569 ste.hw_ste = formatted_ste;
570
571 if (connect_info->type == CONNECT_HIT)
572 dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
573 else
574 mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
575 }
576
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)577 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
578 struct mlx5dr_domain_rx_tx *nic_dmn,
579 struct mlx5dr_ste_htbl *htbl,
580 struct mlx5dr_htbl_connect_info *connect_info,
581 bool update_hw_ste)
582 {
583 u8 formatted_ste[DR_STE_SIZE] = {};
584
585 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
586 nic_dmn,
587 htbl,
588 formatted_ste,
589 connect_info);
590
591 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
592 }
593
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)594 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
595 struct mlx5dr_matcher_rx_tx *nic_matcher,
596 struct mlx5dr_ste *ste,
597 u8 *cur_hw_ste,
598 enum mlx5dr_icm_chunk_size log_table_size)
599 {
600 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
601 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
602 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
603 struct mlx5dr_htbl_connect_info info;
604 struct mlx5dr_ste_htbl *next_htbl;
605
606 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
607 u8 next_lu_type;
608 u16 byte_mask;
609
610 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
611 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
612
613 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
614 log_table_size,
615 next_lu_type,
616 byte_mask);
617 if (!next_htbl) {
618 mlx5dr_dbg(dmn, "Failed allocating table\n");
619 return -ENOMEM;
620 }
621
622 /* Write new table to HW */
623 info.type = CONNECT_MISS;
624 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
625 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
626 &info, false)) {
627 mlx5dr_info(dmn, "Failed writing table to HW\n");
628 goto free_table;
629 }
630
631 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
632 ste->next_htbl = next_htbl;
633 next_htbl->pointing_ste = ste;
634 }
635
636 return 0;
637
638 free_table:
639 mlx5dr_ste_htbl_free(next_htbl);
640 return -ENOENT;
641 }
642
dr_ste_set_ctrl(struct mlx5dr_ste_htbl * htbl)643 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
644 {
645 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
646 int num_of_entries;
647
648 htbl->ctrl.may_grow = true;
649
650 if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
651 htbl->ctrl.may_grow = false;
652
653 /* Threshold is 50%, one is added to table of size 1 */
654 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
655 ctrl->increase_threshold = (num_of_entries + 1) / 2;
656 }
657
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u8 lu_type,u16 byte_mask)658 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
659 enum mlx5dr_icm_chunk_size chunk_size,
660 u8 lu_type, u16 byte_mask)
661 {
662 struct mlx5dr_icm_chunk *chunk;
663 struct mlx5dr_ste_htbl *htbl;
664 int i;
665
666 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
667 if (!htbl)
668 return NULL;
669
670 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
671 if (!chunk)
672 goto out_free_htbl;
673
674 htbl->chunk = chunk;
675 htbl->lu_type = lu_type;
676 htbl->byte_mask = byte_mask;
677 htbl->ste_arr = chunk->ste_arr;
678 htbl->hw_ste_arr = chunk->hw_ste_arr;
679 htbl->miss_list = chunk->miss_list;
680 htbl->refcount = 0;
681
682 for (i = 0; i < chunk->num_of_entries; i++) {
683 struct mlx5dr_ste *ste = &htbl->ste_arr[i];
684
685 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
686 ste->htbl = htbl;
687 ste->refcount = 0;
688 INIT_LIST_HEAD(&ste->miss_list_node);
689 INIT_LIST_HEAD(&htbl->miss_list[i]);
690 INIT_LIST_HEAD(&ste->rule_list);
691 }
692
693 htbl->chunk_size = chunk_size;
694 dr_ste_set_ctrl(htbl);
695 return htbl;
696
697 out_free_htbl:
698 kfree(htbl);
699 return NULL;
700 }
701
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)702 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
703 {
704 if (htbl->refcount)
705 return -EBUSY;
706
707 mlx5dr_icm_free_chunk(htbl->chunk);
708 kfree(htbl);
709 return 0;
710 }
711
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)712 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
713 u8 match_criteria,
714 struct mlx5dr_match_param *mask,
715 struct mlx5dr_match_param *value)
716 {
717 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
718 if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
719 mlx5dr_err(dmn,
720 "Partial mask source_port is not supported\n");
721 return -EINVAL;
722 }
723 if (mask->misc.source_eswitch_owner_vhca_id &&
724 mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
725 mlx5dr_err(dmn,
726 "Partial mask source_eswitch_owner_vhca_id is not supported\n");
727 return -EINVAL;
728 }
729 }
730
731 return 0;
732 }
733
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)734 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
735 struct mlx5dr_matcher_rx_tx *nic_matcher,
736 struct mlx5dr_match_param *value,
737 u8 *ste_arr)
738 {
739 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
740 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
741 struct mlx5dr_ste_build *sb;
742 int ret, i;
743
744 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
745 &matcher->mask, value);
746 if (ret)
747 return ret;
748
749 sb = nic_matcher->ste_builder;
750 for (i = 0; i < nic_matcher->num_of_builders; i++) {
751 mlx5dr_ste_init(ste_arr,
752 sb->lu_type,
753 nic_dmn->ste_type,
754 dmn->info.caps.gvmi);
755
756 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
757
758 ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
759 if (ret)
760 return ret;
761
762 /* Connect the STEs */
763 if (i < (nic_matcher->num_of_builders - 1)) {
764 /* Need the next builder for these fields,
765 * not relevant for the last ste in the chain.
766 */
767 sb++;
768 MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
769 MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
770 }
771 ste_arr += DR_STE_SIZE;
772 }
773 return 0;
774 }
775
dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)776 static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
777 bool inner, u8 *bit_mask)
778 {
779 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
780
781 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
782 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
783
784 if (mask->smac_47_16 || mask->smac_15_0) {
785 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
786 mask->smac_47_16 >> 16);
787 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
788 mask->smac_47_16 << 16 | mask->smac_15_0);
789 mask->smac_47_16 = 0;
790 mask->smac_15_0 = 0;
791 }
792
793 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
794 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
795 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
796 DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
797
798 if (mask->cvlan_tag) {
799 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
800 mask->cvlan_tag = 0;
801 } else if (mask->svlan_tag) {
802 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
803 mask->svlan_tag = 0;
804 }
805 }
806
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec)807 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
808 {
809 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
810 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
811 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
812 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
813 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
814
815 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
816 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
817 source_eswitch_owner_vhca_id);
818
819 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
820 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
821 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
822 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
823 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
824 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
825
826 spec->outer_second_cvlan_tag =
827 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
828 spec->inner_second_cvlan_tag =
829 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
830 spec->outer_second_svlan_tag =
831 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
832 spec->inner_second_svlan_tag =
833 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
834
835 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
836
837 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
838 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
839
840 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
841
842 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
843 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
844
845 spec->outer_ipv6_flow_label =
846 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
847
848 spec->inner_ipv6_flow_label =
849 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
850
851 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
852 spec->geneve_protocol_type =
853 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
854
855 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
856 }
857
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec)858 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
859 {
860 __be32 raw_ip[4];
861
862 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
863
864 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
865 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
866
867 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
868
869 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
870 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
871 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
872 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
873
874 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
875 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
876 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
877 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
878 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
879 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
880 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
881 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
882 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
883 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
884
885 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
886
887 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
888 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
889
890 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
891 src_ipv4_src_ipv6.ipv6_layout.ipv6),
892 sizeof(raw_ip));
893
894 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
895 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
896 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
897 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
898
899 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
900 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
901 sizeof(raw_ip));
902
903 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
904 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
905 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
906 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
907 }
908
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec)909 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
910 {
911 spec->outer_first_mpls_label =
912 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
913 spec->outer_first_mpls_exp =
914 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
915 spec->outer_first_mpls_s_bos =
916 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
917 spec->outer_first_mpls_ttl =
918 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
919 spec->inner_first_mpls_label =
920 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
921 spec->inner_first_mpls_exp =
922 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
923 spec->inner_first_mpls_s_bos =
924 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
925 spec->inner_first_mpls_ttl =
926 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
927 spec->outer_first_mpls_over_gre_label =
928 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
929 spec->outer_first_mpls_over_gre_exp =
930 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
931 spec->outer_first_mpls_over_gre_s_bos =
932 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
933 spec->outer_first_mpls_over_gre_ttl =
934 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
935 spec->outer_first_mpls_over_udp_label =
936 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
937 spec->outer_first_mpls_over_udp_exp =
938 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
939 spec->outer_first_mpls_over_udp_s_bos =
940 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
941 spec->outer_first_mpls_over_udp_ttl =
942 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
943 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
944 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
945 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
946 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
947 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
948 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
949 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
950 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
951 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
952 }
953
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec)954 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
955 {
956 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
957 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
958 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
959 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
960 spec->outer_vxlan_gpe_vni =
961 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
962 spec->outer_vxlan_gpe_next_protocol =
963 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
964 spec->outer_vxlan_gpe_flags =
965 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
966 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
967 spec->icmpv6_header_data =
968 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
969 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
970 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
971 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
972 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
973 }
974
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask)975 void mlx5dr_ste_copy_param(u8 match_criteria,
976 struct mlx5dr_match_param *set_param,
977 struct mlx5dr_match_parameters *mask)
978 {
979 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
980 u8 *data = (u8 *)mask->match_buf;
981 size_t param_location;
982 void *buff;
983
984 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
985 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
986 memcpy(tail_param, data, mask->match_sz);
987 buff = tail_param;
988 } else {
989 buff = mask->match_buf;
990 }
991 dr_ste_copy_mask_spec(buff, &set_param->outer);
992 }
993 param_location = sizeof(struct mlx5dr_match_spec);
994
995 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
996 if (mask->match_sz < param_location +
997 sizeof(struct mlx5dr_match_misc)) {
998 memcpy(tail_param, data + param_location,
999 mask->match_sz - param_location);
1000 buff = tail_param;
1001 } else {
1002 buff = data + param_location;
1003 }
1004 dr_ste_copy_mask_misc(buff, &set_param->misc);
1005 }
1006 param_location += sizeof(struct mlx5dr_match_misc);
1007
1008 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1009 if (mask->match_sz < param_location +
1010 sizeof(struct mlx5dr_match_spec)) {
1011 memcpy(tail_param, data + param_location,
1012 mask->match_sz - param_location);
1013 buff = tail_param;
1014 } else {
1015 buff = data + param_location;
1016 }
1017 dr_ste_copy_mask_spec(buff, &set_param->inner);
1018 }
1019 param_location += sizeof(struct mlx5dr_match_spec);
1020
1021 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1022 if (mask->match_sz < param_location +
1023 sizeof(struct mlx5dr_match_misc2)) {
1024 memcpy(tail_param, data + param_location,
1025 mask->match_sz - param_location);
1026 buff = tail_param;
1027 } else {
1028 buff = data + param_location;
1029 }
1030 dr_ste_copy_mask_misc2(buff, &set_param->misc2);
1031 }
1032
1033 param_location += sizeof(struct mlx5dr_match_misc2);
1034
1035 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1036 if (mask->match_sz < param_location +
1037 sizeof(struct mlx5dr_match_misc3)) {
1038 memcpy(tail_param, data + param_location,
1039 mask->match_sz - param_location);
1040 buff = tail_param;
1041 } else {
1042 buff = data + param_location;
1043 }
1044 dr_ste_copy_mask_misc3(buff, &set_param->misc3);
1045 }
1046 }
1047
dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1048 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
1049 struct mlx5dr_ste_build *sb,
1050 u8 *tag)
1051 {
1052 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1053
1054 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
1055 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
1056
1057 if (spec->smac_47_16 || spec->smac_15_0) {
1058 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
1059 spec->smac_47_16 >> 16);
1060 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
1061 spec->smac_47_16 << 16 | spec->smac_15_0);
1062 spec->smac_47_16 = 0;
1063 spec->smac_15_0 = 0;
1064 }
1065
1066 if (spec->ip_version) {
1067 if (spec->ip_version == IP_VERSION_IPV4) {
1068 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
1069 spec->ip_version = 0;
1070 } else if (spec->ip_version == IP_VERSION_IPV6) {
1071 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
1072 spec->ip_version = 0;
1073 } else {
1074 pr_info("Unsupported ip_version value\n");
1075 return -EINVAL;
1076 }
1077 }
1078
1079 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
1080 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
1081 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
1082
1083 if (spec->cvlan_tag) {
1084 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
1085 spec->cvlan_tag = 0;
1086 } else if (spec->svlan_tag) {
1087 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
1088 spec->svlan_tag = 0;
1089 }
1090 return 0;
1091 }
1092
mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1093 void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
1094 struct mlx5dr_match_param *mask,
1095 bool inner, bool rx)
1096 {
1097 dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
1098
1099 sb->rx = rx;
1100 sb->inner = inner;
1101 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
1102 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1103 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
1104 }
1105
dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1106 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
1107 bool inner, u8 *bit_mask)
1108 {
1109 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1110
1111 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
1112 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
1113 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
1114 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
1115 }
1116
dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1117 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1118 struct mlx5dr_ste_build *sb,
1119 u8 *tag)
1120 {
1121 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1122
1123 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1124 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1125 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1126 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1127
1128 return 0;
1129 }
1130
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1131 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
1132 struct mlx5dr_match_param *mask,
1133 bool inner, bool rx)
1134 {
1135 dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
1136
1137 sb->rx = rx;
1138 sb->inner = inner;
1139 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
1140 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1141 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
1142 }
1143
dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1144 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
1145 bool inner, u8 *bit_mask)
1146 {
1147 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1148
1149 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
1150 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
1151 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
1152 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
1153 }
1154
dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1155 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1156 struct mlx5dr_ste_build *sb,
1157 u8 *tag)
1158 {
1159 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1160
1161 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1162 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1163 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1164 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1165
1166 return 0;
1167 }
1168
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1169 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
1170 struct mlx5dr_match_param *mask,
1171 bool inner, bool rx)
1172 {
1173 dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
1174
1175 sb->rx = rx;
1176 sb->inner = inner;
1177 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
1178 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1179 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
1180 }
1181
dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1182 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
1183 bool inner,
1184 u8 *bit_mask)
1185 {
1186 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1187
1188 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1189 destination_address, mask, dst_ip_31_0);
1190 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1191 source_address, mask, src_ip_31_0);
1192 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1193 destination_port, mask, tcp_dport);
1194 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1195 destination_port, mask, udp_dport);
1196 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1197 source_port, mask, tcp_sport);
1198 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1199 source_port, mask, udp_sport);
1200 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1201 protocol, mask, ip_protocol);
1202 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1203 fragmented, mask, frag);
1204 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1205 dscp, mask, ip_dscp);
1206 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1207 ecn, mask, ip_ecn);
1208
1209 if (mask->tcp_flags) {
1210 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
1211 mask->tcp_flags = 0;
1212 }
1213 }
1214
dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1215 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1216 struct mlx5dr_ste_build *sb,
1217 u8 *tag)
1218 {
1219 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1220
1221 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
1222 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
1223 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
1224 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
1225 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
1226 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
1227 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
1228 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
1229 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
1230 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
1231
1232 if (spec->tcp_flags) {
1233 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
1234 spec->tcp_flags = 0;
1235 }
1236
1237 return 0;
1238 }
1239
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1240 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
1241 struct mlx5dr_match_param *mask,
1242 bool inner, bool rx)
1243 {
1244 dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
1245
1246 sb->rx = rx;
1247 sb->inner = inner;
1248 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
1249 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1250 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
1251 }
1252
1253 static void
dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1254 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1255 bool inner, u8 *bit_mask)
1256 {
1257 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1258 struct mlx5dr_match_misc *misc_mask = &value->misc;
1259
1260 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
1261 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
1262 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
1263 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
1264 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
1265 DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
1266
1267 if (mask->svlan_tag || mask->cvlan_tag) {
1268 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
1269 mask->cvlan_tag = 0;
1270 mask->svlan_tag = 0;
1271 }
1272
1273 if (inner) {
1274 if (misc_mask->inner_second_cvlan_tag ||
1275 misc_mask->inner_second_svlan_tag) {
1276 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1277 misc_mask->inner_second_cvlan_tag = 0;
1278 misc_mask->inner_second_svlan_tag = 0;
1279 }
1280
1281 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1282 second_vlan_id, misc_mask, inner_second_vid);
1283 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1284 second_cfi, misc_mask, inner_second_cfi);
1285 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1286 second_priority, misc_mask, inner_second_prio);
1287 } else {
1288 if (misc_mask->outer_second_cvlan_tag ||
1289 misc_mask->outer_second_svlan_tag) {
1290 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1291 misc_mask->outer_second_cvlan_tag = 0;
1292 misc_mask->outer_second_svlan_tag = 0;
1293 }
1294
1295 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1296 second_vlan_id, misc_mask, outer_second_vid);
1297 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1298 second_cfi, misc_mask, outer_second_cfi);
1299 DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1300 second_priority, misc_mask, outer_second_prio);
1301 }
1302 }
1303
dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)1304 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1305 bool inner, u8 *tag)
1306 {
1307 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1308 struct mlx5dr_match_misc *misc_spec = &value->misc;
1309
1310 DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
1311 DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
1312 DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
1313 DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
1314 DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
1315
1316 if (spec->ip_version) {
1317 if (spec->ip_version == IP_VERSION_IPV4) {
1318 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
1319 spec->ip_version = 0;
1320 } else if (spec->ip_version == IP_VERSION_IPV6) {
1321 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
1322 spec->ip_version = 0;
1323 } else {
1324 pr_info("Unsupported ip_version value\n");
1325 return -EINVAL;
1326 }
1327 }
1328
1329 if (spec->cvlan_tag) {
1330 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
1331 spec->cvlan_tag = 0;
1332 } else if (spec->svlan_tag) {
1333 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
1334 spec->svlan_tag = 0;
1335 }
1336
1337 if (inner) {
1338 if (misc_spec->inner_second_cvlan_tag) {
1339 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1340 misc_spec->inner_second_cvlan_tag = 0;
1341 } else if (misc_spec->inner_second_svlan_tag) {
1342 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1343 misc_spec->inner_second_svlan_tag = 0;
1344 }
1345
1346 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
1347 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
1348 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
1349 } else {
1350 if (misc_spec->outer_second_cvlan_tag) {
1351 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1352 misc_spec->outer_second_cvlan_tag = 0;
1353 } else if (misc_spec->outer_second_svlan_tag) {
1354 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1355 misc_spec->outer_second_svlan_tag = 0;
1356 }
1357 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
1358 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
1359 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
1360 }
1361
1362 return 0;
1363 }
1364
dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1365 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1366 bool inner, u8 *bit_mask)
1367 {
1368 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1369
1370 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
1371 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1372
1373 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1374 }
1375
dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1376 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1377 struct mlx5dr_ste_build *sb,
1378 u8 *tag)
1379 {
1380 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1381
1382 DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1383 DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1384
1385 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1386 }
1387
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1388 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
1389 struct mlx5dr_match_param *mask,
1390 bool inner, bool rx)
1391 {
1392 dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
1393 sb->rx = rx;
1394 sb->inner = inner;
1395 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
1396 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1397 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
1398 }
1399
dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1400 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1401 bool inner, u8 *bit_mask)
1402 {
1403 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1404
1405 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1406 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1407
1408 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1409 }
1410
dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1411 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1412 struct mlx5dr_ste_build *sb,
1413 u8 *tag)
1414 {
1415 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1416
1417 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1418 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1419
1420 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1421 }
1422
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1423 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
1424 struct mlx5dr_match_param *mask,
1425 bool inner, bool rx)
1426 {
1427 dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
1428
1429 sb->rx = rx;
1430 sb->inner = inner;
1431 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
1432 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1433 sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
1434 }
1435
dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1436 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1437 bool inner, u8 *bit_mask)
1438 {
1439 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1440 struct mlx5dr_match_misc *misc = &value->misc;
1441
1442 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1443 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1444 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1445 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1446 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1447 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1448 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1449 DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1450
1451 if (misc->vxlan_vni) {
1452 MLX5_SET(ste_eth_l2_tnl, bit_mask,
1453 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1454 misc->vxlan_vni = 0;
1455 }
1456
1457 if (mask->svlan_tag || mask->cvlan_tag) {
1458 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1459 mask->cvlan_tag = 0;
1460 mask->svlan_tag = 0;
1461 }
1462 }
1463
dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1464 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1465 struct mlx5dr_ste_build *sb,
1466 u8 *tag)
1467 {
1468 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1469 struct mlx5dr_match_misc *misc = &value->misc;
1470
1471 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1472 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1473 DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1474 DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1475 DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1476 DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1477 DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1478
1479 if (misc->vxlan_vni) {
1480 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1481 (misc->vxlan_vni << 8));
1482 misc->vxlan_vni = 0;
1483 }
1484
1485 if (spec->cvlan_tag) {
1486 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1487 spec->cvlan_tag = 0;
1488 } else if (spec->svlan_tag) {
1489 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1490 spec->svlan_tag = 0;
1491 }
1492
1493 if (spec->ip_version) {
1494 if (spec->ip_version == IP_VERSION_IPV4) {
1495 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1496 spec->ip_version = 0;
1497 } else if (spec->ip_version == IP_VERSION_IPV6) {
1498 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1499 spec->ip_version = 0;
1500 } else {
1501 return -EINVAL;
1502 }
1503 }
1504
1505 return 0;
1506 }
1507
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1508 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
1509 struct mlx5dr_match_param *mask, bool inner, bool rx)
1510 {
1511 dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
1512
1513 sb->rx = rx;
1514 sb->inner = inner;
1515 sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
1516 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1517 sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
1518 }
1519
dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1520 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
1521 bool inner, u8 *bit_mask)
1522 {
1523 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1524
1525 DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
1526 }
1527
dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1528 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1529 struct mlx5dr_ste_build *sb,
1530 u8 *tag)
1531 {
1532 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1533
1534 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1535
1536 return 0;
1537 }
1538
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1539 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
1540 struct mlx5dr_match_param *mask,
1541 bool inner, bool rx)
1542 {
1543 dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
1544
1545 sb->rx = rx;
1546 sb->inner = inner;
1547 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
1548 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1549 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
1550 }
1551
dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1552 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
1553 bool inner, u8 *bit_mask)
1554 {
1555 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1556
1557 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
1558 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
1559 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
1560 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
1561 DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
1562 DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
1563 DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
1564 DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
1565 DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
1566
1567 if (mask->tcp_flags) {
1568 DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
1569 mask->tcp_flags = 0;
1570 }
1571 }
1572
dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1573 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1574 struct mlx5dr_ste_build *sb,
1575 u8 *tag)
1576 {
1577 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1578
1579 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1580 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1581 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1582 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1583 DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1584 DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1585 DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1586 DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1587 DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1588
1589 if (spec->tcp_flags) {
1590 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1591 spec->tcp_flags = 0;
1592 }
1593
1594 return 0;
1595 }
1596
mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1597 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
1598 struct mlx5dr_match_param *mask,
1599 bool inner, bool rx)
1600 {
1601 dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
1602
1603 sb->rx = rx;
1604 sb->inner = inner;
1605 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
1606 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1607 sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
1608 }
1609
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1610 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1611 struct mlx5dr_ste_build *sb,
1612 u8 *tag)
1613 {
1614 return 0;
1615 }
1616
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1617 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1618 {
1619 sb->rx = rx;
1620 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1621 sb->byte_mask = 0;
1622 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1623 }
1624
dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1625 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
1626 bool inner, u8 *bit_mask)
1627 {
1628 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1629
1630 if (inner)
1631 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
1632 else
1633 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
1634 }
1635
dr_ste_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1636 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
1637 struct mlx5dr_ste_build *sb,
1638 u8 *tag)
1639 {
1640 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1641
1642 if (sb->inner)
1643 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
1644 else
1645 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
1646
1647 return 0;
1648 }
1649
mlx5dr_ste_build_mpls(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1650 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
1651 struct mlx5dr_match_param *mask,
1652 bool inner, bool rx)
1653 {
1654 dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
1655
1656 sb->rx = rx;
1657 sb->inner = inner;
1658 sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
1659 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1660 sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
1661 }
1662
dr_ste_build_gre_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1663 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
1664 bool inner, u8 *bit_mask)
1665 {
1666 struct mlx5dr_match_misc *misc_mask = &value->misc;
1667
1668 DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
1669 DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
1670 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
1671 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
1672
1673 DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
1674 DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
1675 }
1676
dr_ste_build_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1677 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
1678 struct mlx5dr_ste_build *sb,
1679 u8 *tag)
1680 {
1681 struct mlx5dr_match_misc *misc = &value->misc;
1682
1683 DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1684
1685 DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1686 DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1687 DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1688
1689 DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1690
1691 DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1692
1693 return 0;
1694 }
1695
mlx5dr_ste_build_gre(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1696 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
1697 struct mlx5dr_match_param *mask, bool inner, bool rx)
1698 {
1699 dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
1700
1701 sb->rx = rx;
1702 sb->inner = inner;
1703 sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
1704 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1705 sb->ste_build_tag_func = &dr_ste_build_gre_tag;
1706 }
1707
dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1708 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
1709 bool inner, u8 *bit_mask)
1710 {
1711 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1712
1713 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1714 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1715 misc_2_mask, outer_first_mpls_over_gre_label);
1716
1717 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1718 misc_2_mask, outer_first_mpls_over_gre_exp);
1719
1720 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1721 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1722
1723 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1724 misc_2_mask, outer_first_mpls_over_gre_ttl);
1725 } else {
1726 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1727 misc_2_mask, outer_first_mpls_over_udp_label);
1728
1729 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1730 misc_2_mask, outer_first_mpls_over_udp_exp);
1731
1732 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1733 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1734
1735 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1736 misc_2_mask, outer_first_mpls_over_udp_ttl);
1737 }
1738 }
1739
dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1740 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
1741 struct mlx5dr_ste_build *sb,
1742 u8 *tag)
1743 {
1744 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1745
1746 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1747 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1748 misc_2_mask, outer_first_mpls_over_gre_label);
1749
1750 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1751 misc_2_mask, outer_first_mpls_over_gre_exp);
1752
1753 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1754 misc_2_mask, outer_first_mpls_over_gre_s_bos);
1755
1756 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1757 misc_2_mask, outer_first_mpls_over_gre_ttl);
1758 } else {
1759 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1760 misc_2_mask, outer_first_mpls_over_udp_label);
1761
1762 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1763 misc_2_mask, outer_first_mpls_over_udp_exp);
1764
1765 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1766 misc_2_mask, outer_first_mpls_over_udp_s_bos);
1767
1768 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1769 misc_2_mask, outer_first_mpls_over_udp_ttl);
1770 }
1771 return 0;
1772 }
1773
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1774 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
1775 struct mlx5dr_match_param *mask,
1776 bool inner, bool rx)
1777 {
1778 dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
1779
1780 sb->rx = rx;
1781 sb->inner = inner;
1782 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
1783 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1784 sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
1785 }
1786
1787 #define ICMP_TYPE_OFFSET_FIRST_DW 24
1788 #define ICMP_CODE_OFFSET_FIRST_DW 16
1789 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
1790
dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,u8 * bit_mask)1791 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
1792 struct mlx5dr_cmd_caps *caps,
1793 u8 *bit_mask)
1794 {
1795 struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
1796 bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
1797 u32 icmp_header_data_mask;
1798 u32 icmp_type_mask;
1799 u32 icmp_code_mask;
1800 int dw0_location;
1801 int dw1_location;
1802
1803 if (is_ipv4_mask) {
1804 icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
1805 icmp_type_mask = misc_3_mask->icmpv4_type;
1806 icmp_code_mask = misc_3_mask->icmpv4_code;
1807 dw0_location = caps->flex_parser_id_icmp_dw0;
1808 dw1_location = caps->flex_parser_id_icmp_dw1;
1809 } else {
1810 icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
1811 icmp_type_mask = misc_3_mask->icmpv6_type;
1812 icmp_code_mask = misc_3_mask->icmpv6_code;
1813 dw0_location = caps->flex_parser_id_icmpv6_dw0;
1814 dw1_location = caps->flex_parser_id_icmpv6_dw1;
1815 }
1816
1817 switch (dw0_location) {
1818 case 4:
1819 if (icmp_type_mask) {
1820 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1821 (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
1822 if (is_ipv4_mask)
1823 misc_3_mask->icmpv4_type = 0;
1824 else
1825 misc_3_mask->icmpv6_type = 0;
1826 }
1827 if (icmp_code_mask) {
1828 u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
1829 flex_parser_4);
1830 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1831 cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
1832 if (is_ipv4_mask)
1833 misc_3_mask->icmpv4_code = 0;
1834 else
1835 misc_3_mask->icmpv6_code = 0;
1836 }
1837 break;
1838 default:
1839 return -EINVAL;
1840 }
1841
1842 switch (dw1_location) {
1843 case 5:
1844 if (icmp_header_data_mask) {
1845 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
1846 (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1847 if (is_ipv4_mask)
1848 misc_3_mask->icmpv4_header_data = 0;
1849 else
1850 misc_3_mask->icmpv6_header_data = 0;
1851 }
1852 break;
1853 default:
1854 return -EINVAL;
1855 }
1856
1857 return 0;
1858 }
1859
dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1860 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
1861 struct mlx5dr_ste_build *sb,
1862 u8 *tag)
1863 {
1864 struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1865 u32 icmp_header_data;
1866 int dw0_location;
1867 int dw1_location;
1868 u32 icmp_type;
1869 u32 icmp_code;
1870 bool is_ipv4;
1871
1872 is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
1873 if (is_ipv4) {
1874 icmp_header_data = misc_3->icmpv4_header_data;
1875 icmp_type = misc_3->icmpv4_type;
1876 icmp_code = misc_3->icmpv4_code;
1877 dw0_location = sb->caps->flex_parser_id_icmp_dw0;
1878 dw1_location = sb->caps->flex_parser_id_icmp_dw1;
1879 } else {
1880 icmp_header_data = misc_3->icmpv6_header_data;
1881 icmp_type = misc_3->icmpv6_type;
1882 icmp_code = misc_3->icmpv6_code;
1883 dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
1884 dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
1885 }
1886
1887 switch (dw0_location) {
1888 case 4:
1889 if (icmp_type) {
1890 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1891 (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
1892 if (is_ipv4)
1893 misc_3->icmpv4_type = 0;
1894 else
1895 misc_3->icmpv6_type = 0;
1896 }
1897
1898 if (icmp_code) {
1899 u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
1900 flex_parser_4);
1901 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1902 cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
1903 if (is_ipv4)
1904 misc_3->icmpv4_code = 0;
1905 else
1906 misc_3->icmpv6_code = 0;
1907 }
1908 break;
1909 default:
1910 return -EINVAL;
1911 }
1912
1913 switch (dw1_location) {
1914 case 5:
1915 if (icmp_header_data) {
1916 MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1917 (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1918 if (is_ipv4)
1919 misc_3->icmpv4_header_data = 0;
1920 else
1921 misc_3->icmpv6_header_data = 0;
1922 }
1923 break;
1924 default:
1925 return -EINVAL;
1926 }
1927
1928 return 0;
1929 }
1930
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1931 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
1932 struct mlx5dr_match_param *mask,
1933 struct mlx5dr_cmd_caps *caps,
1934 bool inner, bool rx)
1935 {
1936 int ret;
1937
1938 ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
1939 if (ret)
1940 return ret;
1941
1942 sb->rx = rx;
1943 sb->inner = inner;
1944 sb->caps = caps;
1945 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
1946 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1947 sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
1948
1949 return 0;
1950 }
1951
dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1952 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
1953 bool inner, u8 *bit_mask)
1954 {
1955 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1956
1957 DR_STE_SET_MASK_V(general_purpose, bit_mask,
1958 general_purpose_lookup_field, misc_2_mask,
1959 metadata_reg_a);
1960 }
1961
dr_ste_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1962 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
1963 struct mlx5dr_ste_build *sb,
1964 u8 *tag)
1965 {
1966 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1967
1968 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1969 misc_2_mask, metadata_reg_a);
1970
1971 return 0;
1972 }
1973
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1974 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
1975 struct mlx5dr_match_param *mask,
1976 bool inner, bool rx)
1977 {
1978 dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
1979
1980 sb->rx = rx;
1981 sb->inner = inner;
1982 sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
1983 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1984 sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
1985 }
1986
dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1987 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
1988 bool inner, u8 *bit_mask)
1989 {
1990 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
1991
1992 if (inner) {
1993 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
1994 inner_tcp_seq_num);
1995 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
1996 inner_tcp_ack_num);
1997 } else {
1998 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
1999 outer_tcp_seq_num);
2000 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2001 outer_tcp_ack_num);
2002 }
2003 }
2004
dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2005 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
2006 struct mlx5dr_ste_build *sb,
2007 u8 *tag)
2008 {
2009 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2010
2011 if (sb->inner) {
2012 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
2013 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
2014 } else {
2015 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
2016 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
2017 }
2018
2019 return 0;
2020 }
2021
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2022 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
2023 struct mlx5dr_match_param *mask,
2024 bool inner, bool rx)
2025 {
2026 dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
2027
2028 sb->rx = rx;
2029 sb->inner = inner;
2030 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
2031 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2032 sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
2033 }
2034
2035 static void
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)2036 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
2037 bool inner, u8 *bit_mask)
2038 {
2039 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2040
2041 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2042 outer_vxlan_gpe_flags,
2043 misc_3_mask, outer_vxlan_gpe_flags);
2044 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2045 outer_vxlan_gpe_next_protocol,
2046 misc_3_mask, outer_vxlan_gpe_next_protocol);
2047 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2048 outer_vxlan_gpe_vni,
2049 misc_3_mask, outer_vxlan_gpe_vni);
2050 }
2051
2052 static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2053 dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
2054 struct mlx5dr_ste_build *sb,
2055 u8 *tag)
2056 {
2057 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2058
2059 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2060 outer_vxlan_gpe_flags, misc3,
2061 outer_vxlan_gpe_flags);
2062 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2063 outer_vxlan_gpe_next_protocol, misc3,
2064 outer_vxlan_gpe_next_protocol);
2065 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2066 outer_vxlan_gpe_vni, misc3,
2067 outer_vxlan_gpe_vni);
2068
2069 return 0;
2070 }
2071
mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2072 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
2073 struct mlx5dr_match_param *mask,
2074 bool inner, bool rx)
2075 {
2076 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
2077 sb->bit_mask);
2078
2079 sb->rx = rx;
2080 sb->inner = inner;
2081 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2082 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2083 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
2084 }
2085
2086 static void
dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2087 dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
2088 u8 *bit_mask)
2089 {
2090 struct mlx5dr_match_misc *misc_mask = &value->misc;
2091
2092 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2093 geneve_protocol_type,
2094 misc_mask, geneve_protocol_type);
2095 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2096 geneve_oam,
2097 misc_mask, geneve_oam);
2098 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2099 geneve_opt_len,
2100 misc_mask, geneve_opt_len);
2101 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2102 geneve_vni,
2103 misc_mask, geneve_vni);
2104 }
2105
2106 static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2107 dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
2108 struct mlx5dr_ste_build *sb,
2109 u8 *tag)
2110 {
2111 struct mlx5dr_match_misc *misc = &value->misc;
2112
2113 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2114 geneve_protocol_type, misc, geneve_protocol_type);
2115 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2116 geneve_oam, misc, geneve_oam);
2117 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2118 geneve_opt_len, misc, geneve_opt_len);
2119 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2120 geneve_vni, misc, geneve_vni);
2121
2122 return 0;
2123 }
2124
mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2125 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
2126 struct mlx5dr_match_param *mask,
2127 bool inner, bool rx)
2128 {
2129 dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
2130 sb->rx = rx;
2131 sb->inner = inner;
2132 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2133 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2134 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
2135 }
2136
dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2137 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
2138 u8 *bit_mask)
2139 {
2140 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2141
2142 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
2143 misc_2_mask, metadata_reg_c_0);
2144 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
2145 misc_2_mask, metadata_reg_c_1);
2146 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
2147 misc_2_mask, metadata_reg_c_2);
2148 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
2149 misc_2_mask, metadata_reg_c_3);
2150 }
2151
dr_ste_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2152 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
2153 struct mlx5dr_ste_build *sb,
2154 u8 *tag)
2155 {
2156 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2157
2158 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
2159 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
2160 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
2161 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
2162
2163 return 0;
2164 }
2165
mlx5dr_ste_build_register_0(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2166 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
2167 struct mlx5dr_match_param *mask,
2168 bool inner, bool rx)
2169 {
2170 dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
2171
2172 sb->rx = rx;
2173 sb->inner = inner;
2174 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
2175 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2176 sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
2177 }
2178
dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2179 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
2180 u8 *bit_mask)
2181 {
2182 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2183
2184 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
2185 misc_2_mask, metadata_reg_c_4);
2186 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
2187 misc_2_mask, metadata_reg_c_5);
2188 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
2189 misc_2_mask, metadata_reg_c_6);
2190 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
2191 misc_2_mask, metadata_reg_c_7);
2192 }
2193
dr_ste_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2194 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
2195 struct mlx5dr_ste_build *sb,
2196 u8 *tag)
2197 {
2198 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2199
2200 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
2201 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
2202 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
2203 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
2204
2205 return 0;
2206 }
2207
mlx5dr_ste_build_register_1(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2208 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
2209 struct mlx5dr_match_param *mask,
2210 bool inner, bool rx)
2211 {
2212 dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
2213
2214 sb->rx = rx;
2215 sb->inner = inner;
2216 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
2217 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2218 sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
2219 }
2220
dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2221 static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
2222 u8 *bit_mask)
2223 {
2224 struct mlx5dr_match_misc *misc_mask = &value->misc;
2225
2226 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
2227 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
2228 misc_mask->source_eswitch_owner_vhca_id = 0;
2229 }
2230
dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2231 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
2232 struct mlx5dr_ste_build *sb,
2233 u8 *tag)
2234 {
2235 struct mlx5dr_match_misc *misc = &value->misc;
2236 struct mlx5dr_cmd_vport_cap *vport_cap;
2237 struct mlx5dr_domain *dmn = sb->dmn;
2238 struct mlx5dr_cmd_caps *caps;
2239 u8 *bit_mask = sb->bit_mask;
2240 bool source_gvmi_set;
2241
2242 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
2243
2244 if (sb->vhca_id_valid) {
2245 /* Find port GVMI based on the eswitch_owner_vhca_id */
2246 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
2247 caps = &dmn->info.caps;
2248 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
2249 dmn->peer_dmn->info.caps.gvmi))
2250 caps = &dmn->peer_dmn->info.caps;
2251 else
2252 return -EINVAL;
2253 } else {
2254 caps = &dmn->info.caps;
2255 }
2256
2257 vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
2258 if (!vport_cap)
2259 return -EINVAL;
2260
2261 source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
2262 if (vport_cap->vport_gvmi && source_gvmi_set)
2263 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
2264
2265 misc->source_eswitch_owner_vhca_id = 0;
2266 misc->source_port = 0;
2267
2268 return 0;
2269 }
2270
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)2271 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
2272 struct mlx5dr_match_param *mask,
2273 struct mlx5dr_domain *dmn,
2274 bool inner, bool rx)
2275 {
2276 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
2277 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
2278
2279 dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2280
2281 sb->rx = rx;
2282 sb->dmn = dmn;
2283 sb->inner = inner;
2284 sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
2285 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2286 sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
2287 }
2288