1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
3
4 #ifndef _DR_TYPES_
5 #define _DR_TYPES_
6
7 #include <linux/mlx5/driver.h>
8 #include <linux/refcount.h>
9 #include "fs_core.h"
10 #include "wq.h"
11 #include "lib/mlx5.h"
12 #include "mlx5_ifc_dr.h"
13 #include "mlx5dr.h"
14
15 #define DR_RULE_MAX_STES 17
16 #define DR_ACTION_MAX_STES 5
17 #define WIRE_PORT 0xFFFF
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20
21 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
22 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
23 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
24
25 enum mlx5dr_icm_chunk_size {
26 DR_CHUNK_SIZE_1,
27 DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
28 DR_CHUNK_SIZE_2,
29 DR_CHUNK_SIZE_4,
30 DR_CHUNK_SIZE_8,
31 DR_CHUNK_SIZE_16,
32 DR_CHUNK_SIZE_32,
33 DR_CHUNK_SIZE_64,
34 DR_CHUNK_SIZE_128,
35 DR_CHUNK_SIZE_256,
36 DR_CHUNK_SIZE_512,
37 DR_CHUNK_SIZE_1K,
38 DR_CHUNK_SIZE_2K,
39 DR_CHUNK_SIZE_4K,
40 DR_CHUNK_SIZE_8K,
41 DR_CHUNK_SIZE_16K,
42 DR_CHUNK_SIZE_32K,
43 DR_CHUNK_SIZE_64K,
44 DR_CHUNK_SIZE_128K,
45 DR_CHUNK_SIZE_256K,
46 DR_CHUNK_SIZE_512K,
47 DR_CHUNK_SIZE_1024K,
48 DR_CHUNK_SIZE_2048K,
49 DR_CHUNK_SIZE_MAX,
50 };
51
52 enum mlx5dr_icm_type {
53 DR_ICM_TYPE_STE,
54 DR_ICM_TYPE_MODIFY_ACTION,
55 };
56
57 static inline enum mlx5dr_icm_chunk_size
mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)58 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
59 {
60 chunk += 2;
61 if (chunk < DR_CHUNK_SIZE_MAX)
62 return chunk;
63
64 return DR_CHUNK_SIZE_MAX;
65 }
66
67 enum {
68 DR_STE_SIZE = 64,
69 DR_STE_SIZE_CTRL = 32,
70 DR_STE_SIZE_TAG = 16,
71 DR_STE_SIZE_MASK = 16,
72 };
73
74 enum {
75 DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
76 };
77
78 enum {
79 DR_MODIFY_ACTION_SIZE = 8,
80 };
81
82 enum mlx5dr_matcher_criteria {
83 DR_MATCHER_CRITERIA_EMPTY = 0,
84 DR_MATCHER_CRITERIA_OUTER = 1 << 0,
85 DR_MATCHER_CRITERIA_MISC = 1 << 1,
86 DR_MATCHER_CRITERIA_INNER = 1 << 2,
87 DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
88 DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
89 DR_MATCHER_CRITERIA_MAX = 1 << 5,
90 };
91
92 enum mlx5dr_action_type {
93 DR_ACTION_TYP_TNL_L2_TO_L2,
94 DR_ACTION_TYP_L2_TO_TNL_L2,
95 DR_ACTION_TYP_TNL_L3_TO_L2,
96 DR_ACTION_TYP_L2_TO_TNL_L3,
97 DR_ACTION_TYP_DROP,
98 DR_ACTION_TYP_QP,
99 DR_ACTION_TYP_FT,
100 DR_ACTION_TYP_CTR,
101 DR_ACTION_TYP_TAG,
102 DR_ACTION_TYP_MODIFY_HDR,
103 DR_ACTION_TYP_VPORT,
104 DR_ACTION_TYP_POP_VLAN,
105 DR_ACTION_TYP_PUSH_VLAN,
106 DR_ACTION_TYP_MAX,
107 };
108
109 enum mlx5dr_ipv {
110 DR_RULE_IPV4,
111 DR_RULE_IPV6,
112 DR_RULE_IPV_MAX,
113 };
114
115 struct mlx5dr_icm_pool;
116 struct mlx5dr_icm_chunk;
117 struct mlx5dr_icm_bucket;
118 struct mlx5dr_ste_htbl;
119 struct mlx5dr_match_param;
120 struct mlx5dr_cmd_caps;
121 struct mlx5dr_matcher_rx_tx;
122
123 struct mlx5dr_ste {
124 u8 *hw_ste;
125 /* refcount: indicates the num of rules that using this ste */
126 u32 refcount;
127
128 /* attached to the miss_list head at each htbl entry */
129 struct list_head miss_list_node;
130
131 /* each rule member that uses this ste attached here */
132 struct list_head rule_list;
133
134 /* this ste is member of htbl */
135 struct mlx5dr_ste_htbl *htbl;
136
137 struct mlx5dr_ste_htbl *next_htbl;
138
139 /* this ste is part of a rule, located in ste's chain */
140 u8 ste_chain_location;
141 };
142
143 struct mlx5dr_ste_htbl_ctrl {
144 /* total number of valid entries belonging to this hash table. This
145 * includes the non collision and collision entries
146 */
147 unsigned int num_of_valid_entries;
148
149 /* total number of collisions entries attached to this table */
150 unsigned int num_of_collisions;
151 unsigned int increase_threshold;
152 u8 may_grow:1;
153 };
154
155 struct mlx5dr_ste_htbl {
156 u8 lu_type;
157 u16 byte_mask;
158 u32 refcount;
159 struct mlx5dr_icm_chunk *chunk;
160 struct mlx5dr_ste *ste_arr;
161 u8 *hw_ste_arr;
162
163 struct list_head *miss_list;
164
165 enum mlx5dr_icm_chunk_size chunk_size;
166 struct mlx5dr_ste *pointing_ste;
167
168 struct mlx5dr_ste_htbl_ctrl ctrl;
169 };
170
171 struct mlx5dr_ste_send_info {
172 struct mlx5dr_ste *ste;
173 struct list_head send_list;
174 u16 size;
175 u16 offset;
176 u8 data_cont[DR_STE_SIZE];
177 u8 *data;
178 };
179
180 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
181 u16 offset, u8 *data,
182 struct mlx5dr_ste_send_info *ste_info,
183 struct list_head *send_list,
184 bool copy_data);
185
186 struct mlx5dr_ste_build {
187 u8 inner:1;
188 u8 rx:1;
189 u8 vhca_id_valid:1;
190 struct mlx5dr_domain *dmn;
191 struct mlx5dr_cmd_caps *caps;
192 u8 lu_type;
193 u16 byte_mask;
194 u8 bit_mask[DR_STE_SIZE_MASK];
195 int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
196 struct mlx5dr_ste_build *sb,
197 u8 *tag);
198 };
199
200 struct mlx5dr_ste_htbl *
201 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
202 enum mlx5dr_icm_chunk_size chunk_size,
203 u8 lu_type, u16 byte_mask);
204
205 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
206
mlx5dr_htbl_put(struct mlx5dr_ste_htbl * htbl)207 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
208 {
209 htbl->refcount--;
210 if (!htbl->refcount)
211 mlx5dr_ste_htbl_free(htbl);
212 }
213
mlx5dr_htbl_get(struct mlx5dr_ste_htbl * htbl)214 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
215 {
216 htbl->refcount++;
217 }
218
219 /* STE utils */
220 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
221 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
222 void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
223 struct mlx5dr_ste_htbl *next_htbl);
224 void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
225 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
226 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
227 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
228 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
229 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
230 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
231 u8 ste_location);
232 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
233 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
234 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
235 int size, bool encap_l3);
236 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
237 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
238 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
239 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
240 bool go_back);
241 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
242 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
243 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
244 u32 re_write_index);
245 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
246 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
247 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
248 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
249
250 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
251 struct mlx5dr_matcher *matcher,
252 struct mlx5dr_matcher_rx_tx *nic_matcher);
mlx5dr_ste_put(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)253 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
254 struct mlx5dr_matcher *matcher,
255 struct mlx5dr_matcher_rx_tx *nic_matcher)
256 {
257 ste->refcount--;
258 if (!ste->refcount)
259 mlx5dr_ste_free(ste, matcher, nic_matcher);
260 }
261
262 /* initial as 0, increased only when ste appears in a new rule */
mlx5dr_ste_get(struct mlx5dr_ste * ste)263 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
264 {
265 ste->refcount++;
266 }
267
mlx5dr_ste_is_not_used(struct mlx5dr_ste * ste)268 static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
269 {
270 return !ste->refcount;
271 }
272
273 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
274 struct mlx5dr_ste_htbl *next_htbl);
275 bool mlx5dr_ste_equal_tag(void *src, void *dst);
276 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
277 struct mlx5dr_matcher_rx_tx *nic_matcher,
278 struct mlx5dr_ste *ste,
279 u8 *cur_hw_ste,
280 enum mlx5dr_icm_chunk_size log_table_size);
281
282 /* STE build functions */
283 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
284 u8 match_criteria,
285 struct mlx5dr_match_param *mask,
286 struct mlx5dr_match_param *value);
287 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
288 struct mlx5dr_matcher_rx_tx *nic_matcher,
289 struct mlx5dr_match_param *value,
290 u8 *ste_arr);
291 void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
292 struct mlx5dr_match_param *mask,
293 bool inner, bool rx);
294 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
295 struct mlx5dr_match_param *mask,
296 bool inner, bool rx);
297 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
298 struct mlx5dr_match_param *mask,
299 bool inner, bool rx);
300 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
301 struct mlx5dr_match_param *mask,
302 bool inner, bool rx);
303 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
304 struct mlx5dr_match_param *mask,
305 bool inner, bool rx);
306 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
307 struct mlx5dr_match_param *mask,
308 bool inner, bool rx);
309 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
310 struct mlx5dr_match_param *mask,
311 bool inner, bool rx);
312 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
313 struct mlx5dr_match_param *mask,
314 bool inner, bool rx);
315 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
316 struct mlx5dr_match_param *mask,
317 bool inner, bool rx);
318 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
319 struct mlx5dr_match_param *mask,
320 bool inner, bool rx);
321 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
322 struct mlx5dr_match_param *mask,
323 bool inner, bool rx);
324 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
325 struct mlx5dr_match_param *mask,
326 bool inner, bool rx);
327 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
328 struct mlx5dr_match_param *mask,
329 bool inner, bool rx);
330 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
331 struct mlx5dr_match_param *mask,
332 struct mlx5dr_cmd_caps *caps,
333 bool inner, bool rx);
334 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
335 struct mlx5dr_match_param *mask,
336 bool inner, bool rx);
337 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
338 struct mlx5dr_match_param *mask,
339 bool inner, bool rx);
340 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
341 struct mlx5dr_match_param *mask,
342 bool inner, bool rx);
343 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
344 struct mlx5dr_match_param *mask,
345 bool inner, bool rx);
346 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
347 struct mlx5dr_match_param *mask,
348 bool inner, bool rx);
349 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
350 struct mlx5dr_match_param *mask,
351 struct mlx5dr_domain *dmn,
352 bool inner, bool rx);
353 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
354
355 /* Actions utils */
356 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
357 struct mlx5dr_matcher_rx_tx *nic_matcher,
358 struct mlx5dr_action *actions[],
359 u32 num_actions,
360 u8 *ste_arr,
361 u32 *new_hw_ste_arr_sz);
362
363 struct mlx5dr_match_spec {
364 u32 smac_47_16; /* Source MAC address of incoming packet */
365 /* Incoming packet Ethertype - this is the Ethertype
366 * following the last VLAN tag of the packet
367 */
368 u32 ethertype:16;
369 u32 smac_15_0:16; /* Source MAC address of incoming packet */
370 u32 dmac_47_16; /* Destination MAC address of incoming packet */
371 /* VLAN ID of first VLAN tag in the incoming packet.
372 * Valid only when cvlan_tag==1 or svlan_tag==1
373 */
374 u32 first_vid:12;
375 /* CFI bit of first VLAN tag in the incoming packet.
376 * Valid only when cvlan_tag==1 or svlan_tag==1
377 */
378 u32 first_cfi:1;
379 /* Priority of first VLAN tag in the incoming packet.
380 * Valid only when cvlan_tag==1 or svlan_tag==1
381 */
382 u32 first_prio:3;
383 u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
384 /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
385 * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
386 */
387 u32 tcp_flags:9;
388 u32 ip_version:4; /* IP version */
389 u32 frag:1; /* Packet is an IP fragment */
390 /* The first vlan in the packet is s-vlan (0x8a88).
391 * cvlan_tag and svlan_tag cannot be set together
392 */
393 u32 svlan_tag:1;
394 /* The first vlan in the packet is c-vlan (0x8100).
395 * cvlan_tag and svlan_tag cannot be set together
396 */
397 u32 cvlan_tag:1;
398 /* Explicit Congestion Notification derived from
399 * Traffic Class/TOS field of IPv6/v4
400 */
401 u32 ip_ecn:2;
402 /* Differentiated Services Code Point derived from
403 * Traffic Class/TOS field of IPv6/v4
404 */
405 u32 ip_dscp:6;
406 u32 ip_protocol:8; /* IP protocol */
407 /* TCP destination port.
408 * tcp and udp sport/dport are mutually exclusive
409 */
410 u32 tcp_dport:16;
411 /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
412 u32 tcp_sport:16;
413 u32 ttl_hoplimit:8;
414 u32 reserved:24;
415 /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
416 u32 udp_dport:16;
417 /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
418 u32 udp_sport:16;
419 /* IPv6 source address of incoming packets
420 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
421 * This field should be qualified by an appropriate ethertype
422 */
423 u32 src_ip_127_96;
424 /* IPv6 source address of incoming packets
425 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
426 * This field should be qualified by an appropriate ethertype
427 */
428 u32 src_ip_95_64;
429 /* IPv6 source address of incoming packets
430 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
431 * This field should be qualified by an appropriate ethertype
432 */
433 u32 src_ip_63_32;
434 /* IPv6 source address of incoming packets
435 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
436 * This field should be qualified by an appropriate ethertype
437 */
438 u32 src_ip_31_0;
439 /* IPv6 destination address of incoming packets
440 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
441 * This field should be qualified by an appropriate ethertype
442 */
443 u32 dst_ip_127_96;
444 /* IPv6 destination address of incoming packets
445 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
446 * This field should be qualified by an appropriate ethertype
447 */
448 u32 dst_ip_95_64;
449 /* IPv6 destination address of incoming packets
450 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
451 * This field should be qualified by an appropriate ethertype
452 */
453 u32 dst_ip_63_32;
454 /* IPv6 destination address of incoming packets
455 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
456 * This field should be qualified by an appropriate ethertype
457 */
458 u32 dst_ip_31_0;
459 };
460
461 struct mlx5dr_match_misc {
462 u32 source_sqn:24; /* Source SQN */
463 u32 source_vhca_port:4;
464 /* used with GRE, sequence number exist when gre_s_present == 1 */
465 u32 gre_s_present:1;
466 /* used with GRE, key exist when gre_k_present == 1 */
467 u32 gre_k_present:1;
468 u32 reserved_auto1:1;
469 /* used with GRE, checksum exist when gre_c_present == 1 */
470 u32 gre_c_present:1;
471 /* Source port.;0xffff determines wire port */
472 u32 source_port:16;
473 u32 source_eswitch_owner_vhca_id:16;
474 /* VLAN ID of first VLAN tag the inner header of the incoming packet.
475 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
476 */
477 u32 inner_second_vid:12;
478 /* CFI bit of first VLAN tag in the inner header of the incoming packet.
479 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
480 */
481 u32 inner_second_cfi:1;
482 /* Priority of second VLAN tag in the inner header of the incoming packet.
483 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
484 */
485 u32 inner_second_prio:3;
486 /* VLAN ID of first VLAN tag the outer header of the incoming packet.
487 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
488 */
489 u32 outer_second_vid:12;
490 /* CFI bit of first VLAN tag in the outer header of the incoming packet.
491 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
492 */
493 u32 outer_second_cfi:1;
494 /* Priority of second VLAN tag in the outer header of the incoming packet.
495 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
496 */
497 u32 outer_second_prio:3;
498 u32 gre_protocol:16; /* GRE Protocol (outer) */
499 u32 reserved_auto3:12;
500 /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
501 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
502 */
503 u32 inner_second_svlan_tag:1;
504 /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
505 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
506 */
507 u32 outer_second_svlan_tag:1;
508 /* The second vlan in the inner header of the packet is c-vlan (0x8100).
509 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
510 */
511 u32 inner_second_cvlan_tag:1;
512 /* The second vlan in the outer header of the packet is c-vlan (0x8100).
513 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
514 */
515 u32 outer_second_cvlan_tag:1;
516 u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
517 u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
518 u32 reserved_auto4:8;
519 u32 vxlan_vni:24; /* VXLAN VNI (outer) */
520 u32 geneve_oam:1; /* GENEVE OAM field (outer) */
521 u32 reserved_auto5:7;
522 u32 geneve_vni:24; /* GENEVE VNI field (outer) */
523 u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
524 u32 reserved_auto6:12;
525 u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
526 u32 reserved_auto7:12;
527 u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
528 u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
529 u32 reserved_auto8:10;
530 u32 bth_dst_qp:24; /* Destination QP in BTH header */
531 u32 reserved_auto9:8;
532 u8 reserved_auto10[20];
533 };
534
535 struct mlx5dr_match_misc2 {
536 u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
537 u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
538 u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
539 u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
540 u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
541 u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
542 u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
543 u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
544 u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
545 u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
546 u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
547 u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
548 u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
549 u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
550 u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
551 u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
552 u32 metadata_reg_c_7; /* metadata_reg_c_7 */
553 u32 metadata_reg_c_6; /* metadata_reg_c_6 */
554 u32 metadata_reg_c_5; /* metadata_reg_c_5 */
555 u32 metadata_reg_c_4; /* metadata_reg_c_4 */
556 u32 metadata_reg_c_3; /* metadata_reg_c_3 */
557 u32 metadata_reg_c_2; /* metadata_reg_c_2 */
558 u32 metadata_reg_c_1; /* metadata_reg_c_1 */
559 u32 metadata_reg_c_0; /* metadata_reg_c_0 */
560 u32 metadata_reg_a; /* metadata_reg_a */
561 u8 reserved_auto2[12];
562 };
563
564 struct mlx5dr_match_misc3 {
565 u32 inner_tcp_seq_num;
566 u32 outer_tcp_seq_num;
567 u32 inner_tcp_ack_num;
568 u32 outer_tcp_ack_num;
569 u32 outer_vxlan_gpe_vni:24;
570 u32 reserved_auto1:8;
571 u32 reserved_auto2:16;
572 u32 outer_vxlan_gpe_flags:8;
573 u32 outer_vxlan_gpe_next_protocol:8;
574 u32 icmpv4_header_data;
575 u32 icmpv6_header_data;
576 u32 icmpv6_code:8;
577 u32 icmpv6_type:8;
578 u32 icmpv4_code:8;
579 u32 icmpv4_type:8;
580 u8 reserved_auto3[0x1c];
581 };
582
583 struct mlx5dr_match_param {
584 struct mlx5dr_match_spec outer;
585 struct mlx5dr_match_misc misc;
586 struct mlx5dr_match_spec inner;
587 struct mlx5dr_match_misc2 misc2;
588 struct mlx5dr_match_misc3 misc3;
589 };
590
591 #define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
592 (_misc3)->icmpv4_code || \
593 (_misc3)->icmpv4_header_data)
594
595 struct mlx5dr_esw_caps {
596 u64 drop_icm_address_rx;
597 u64 drop_icm_address_tx;
598 u64 uplink_icm_address_rx;
599 u64 uplink_icm_address_tx;
600 bool sw_owner;
601 };
602
603 struct mlx5dr_cmd_vport_cap {
604 u16 vport_gvmi;
605 u16 vhca_gvmi;
606 u64 icm_address_rx;
607 u64 icm_address_tx;
608 u32 num;
609 };
610
611 struct mlx5dr_cmd_caps {
612 u16 gvmi;
613 u64 nic_rx_drop_address;
614 u64 nic_tx_drop_address;
615 u64 nic_tx_allow_address;
616 u64 esw_rx_drop_address;
617 u64 esw_tx_drop_address;
618 u32 log_icm_size;
619 u64 hdr_modify_icm_addr;
620 u32 flex_protocols;
621 u8 flex_parser_id_icmp_dw0;
622 u8 flex_parser_id_icmp_dw1;
623 u8 flex_parser_id_icmpv6_dw0;
624 u8 flex_parser_id_icmpv6_dw1;
625 u8 max_ft_level;
626 u16 roce_min_src_udp;
627 u8 num_esw_ports;
628 u8 sw_format_ver;
629 bool eswitch_manager;
630 bool rx_sw_owner;
631 bool tx_sw_owner;
632 bool fdb_sw_owner;
633 u32 num_vports;
634 struct mlx5dr_esw_caps esw_caps;
635 struct mlx5dr_cmd_vport_cap *vports_caps;
636 bool prio_tag_required;
637 };
638
639 struct mlx5dr_domain_rx_tx {
640 u64 drop_icm_addr;
641 u64 default_icm_addr;
642 enum mlx5dr_ste_entry_type ste_type;
643 struct mutex mutex; /* protect rx/tx domain */
644 };
645
646 struct mlx5dr_domain_info {
647 bool supp_sw_steering;
648 u32 max_inline_size;
649 u32 max_send_wr;
650 u32 max_log_sw_icm_sz;
651 u32 max_log_action_icm_sz;
652 struct mlx5dr_domain_rx_tx rx;
653 struct mlx5dr_domain_rx_tx tx;
654 struct mlx5dr_cmd_caps caps;
655 };
656
657 struct mlx5dr_domain_cache {
658 struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
659 };
660
661 struct mlx5dr_domain {
662 struct mlx5dr_domain *peer_dmn;
663 struct mlx5_core_dev *mdev;
664 u32 pdn;
665 struct mlx5_uars_page *uar;
666 enum mlx5dr_domain_type type;
667 refcount_t refcount;
668 struct mlx5dr_icm_pool *ste_icm_pool;
669 struct mlx5dr_icm_pool *action_icm_pool;
670 struct mlx5dr_send_ring *send_ring;
671 struct mlx5dr_domain_info info;
672 struct mlx5dr_domain_cache cache;
673 };
674
675 struct mlx5dr_table_rx_tx {
676 struct mlx5dr_ste_htbl *s_anchor;
677 struct mlx5dr_domain_rx_tx *nic_dmn;
678 u64 default_icm_addr;
679 };
680
681 struct mlx5dr_table {
682 struct mlx5dr_domain *dmn;
683 struct mlx5dr_table_rx_tx rx;
684 struct mlx5dr_table_rx_tx tx;
685 u32 level;
686 u32 table_type;
687 u32 table_id;
688 u32 flags;
689 struct list_head matcher_list;
690 struct mlx5dr_action *miss_action;
691 refcount_t refcount;
692 };
693
694 struct mlx5dr_matcher_rx_tx {
695 struct mlx5dr_ste_htbl *s_htbl;
696 struct mlx5dr_ste_htbl *e_anchor;
697 struct mlx5dr_ste_build *ste_builder;
698 struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
699 [DR_RULE_IPV_MAX]
700 [DR_RULE_MAX_STES];
701 u8 num_of_builders;
702 u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
703 u64 default_icm_addr;
704 struct mlx5dr_table_rx_tx *nic_tbl;
705 };
706
707 struct mlx5dr_matcher {
708 struct mlx5dr_table *tbl;
709 struct mlx5dr_matcher_rx_tx rx;
710 struct mlx5dr_matcher_rx_tx tx;
711 struct list_head matcher_list;
712 u32 prio;
713 struct mlx5dr_match_param mask;
714 u8 match_criteria;
715 refcount_t refcount;
716 struct mlx5dv_flow_matcher *dv_matcher;
717 };
718
719 struct mlx5dr_rule_member {
720 struct mlx5dr_ste *ste;
721 /* attached to mlx5dr_rule via this */
722 struct list_head list;
723 /* attached to mlx5dr_ste via this */
724 struct list_head use_ste_list;
725 };
726
727 struct mlx5dr_action {
728 enum mlx5dr_action_type action_type;
729 refcount_t refcount;
730 union {
731 struct {
732 struct mlx5dr_domain *dmn;
733 struct mlx5dr_icm_chunk *chunk;
734 u8 *data;
735 u32 data_size;
736 u16 num_of_actions;
737 u32 index;
738 u8 allow_rx:1;
739 u8 allow_tx:1;
740 u8 modify_ttl:1;
741 } rewrite;
742 struct {
743 struct mlx5dr_domain *dmn;
744 u32 reformat_id;
745 u32 reformat_size;
746 } reformat;
747 struct {
748 u8 is_fw_tbl:1;
749 union {
750 struct mlx5dr_table *tbl;
751 struct {
752 struct mlx5dr_domain *dmn;
753 u32 id;
754 u32 group_id;
755 enum fs_flow_table_type type;
756 u64 rx_icm_addr;
757 u64 tx_icm_addr;
758 struct mlx5dr_action **ref_actions;
759 u32 num_of_ref_actions;
760 } fw_tbl;
761 };
762 } dest_tbl;
763 struct {
764 u32 ctr_id;
765 u32 offeset;
766 } ctr;
767 struct {
768 struct mlx5dr_domain *dmn;
769 struct mlx5dr_cmd_vport_cap *caps;
770 } vport;
771 struct {
772 u32 vlan_hdr; /* tpid_pcp_dei_vid */
773 } push_vlan;
774 u32 flow_tag;
775 };
776 };
777
778 enum mlx5dr_connect_type {
779 CONNECT_HIT = 1,
780 CONNECT_MISS = 2,
781 };
782
783 struct mlx5dr_htbl_connect_info {
784 enum mlx5dr_connect_type type;
785 union {
786 struct mlx5dr_ste_htbl *hit_next_htbl;
787 u64 miss_icm_addr;
788 };
789 };
790
791 struct mlx5dr_rule_rx_tx {
792 struct list_head rule_members_list;
793 struct mlx5dr_matcher_rx_tx *nic_matcher;
794 };
795
796 struct mlx5dr_rule {
797 struct mlx5dr_matcher *matcher;
798 struct mlx5dr_rule_rx_tx rx;
799 struct mlx5dr_rule_rx_tx tx;
800 struct list_head rule_actions_list;
801 u32 flow_source;
802 };
803
804 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
805 struct mlx5dr_ste *ste);
806
807 struct mlx5dr_icm_chunk {
808 struct mlx5dr_icm_bucket *bucket;
809 struct list_head chunk_list;
810 u32 rkey;
811 u32 num_of_entries;
812 u32 byte_size;
813 u64 icm_addr;
814 u64 mr_addr;
815
816 /* Memory optimisation */
817 struct mlx5dr_ste *ste_arr;
818 u8 *hw_ste_arr;
819 struct list_head *miss_list;
820 };
821
mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx * nic_dmn)822 static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
823 {
824 mutex_lock(&nic_dmn->mutex);
825 }
826
mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx * nic_dmn)827 static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
828 {
829 mutex_unlock(&nic_dmn->mutex);
830 }
831
mlx5dr_domain_lock(struct mlx5dr_domain * dmn)832 static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
833 {
834 mlx5dr_domain_nic_lock(&dmn->info.rx);
835 mlx5dr_domain_nic_lock(&dmn->info.tx);
836 }
837
mlx5dr_domain_unlock(struct mlx5dr_domain * dmn)838 static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
839 {
840 mlx5dr_domain_nic_unlock(&dmn->info.tx);
841 mlx5dr_domain_nic_unlock(&dmn->info.rx);
842 }
843
844 static inline int
mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps * caps)845 mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
846 {
847 return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
848 }
849
850 static inline int
mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps * caps)851 mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
852 {
853 return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
854 }
855
856 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
857 struct mlx5dr_matcher_rx_tx *nic_matcher,
858 enum mlx5dr_ipv outer_ipv,
859 enum mlx5dr_ipv inner_ipv);
860
861 static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)862 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
863 {
864 return 1 << chunk_size;
865 }
866
867 static inline int
mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,enum mlx5dr_icm_type icm_type)868 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
869 enum mlx5dr_icm_type icm_type)
870 {
871 int num_of_entries;
872 int entry_size;
873
874 if (icm_type == DR_ICM_TYPE_STE)
875 entry_size = DR_STE_SIZE;
876 else
877 entry_size = DR_MODIFY_ACTION_SIZE;
878
879 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
880
881 return entry_size * num_of_entries;
882 }
883
884 static inline struct mlx5dr_cmd_vport_cap *
mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps * caps,u32 vport)885 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
886 {
887 if (!caps->vports_caps ||
888 (vport >= caps->num_vports && vport != WIRE_PORT))
889 return NULL;
890
891 if (vport == WIRE_PORT)
892 vport = caps->num_vports;
893
894 return &caps->vports_caps[vport];
895 }
896
897 struct mlx5dr_cmd_query_flow_table_details {
898 u8 status;
899 u8 level;
900 u64 sw_owner_icm_root_1;
901 u64 sw_owner_icm_root_0;
902 };
903
904 struct mlx5dr_cmd_create_flow_table_attr {
905 u32 table_type;
906 u64 icm_addr_rx;
907 u64 icm_addr_tx;
908 u8 level;
909 bool sw_owner;
910 bool term_tbl;
911 bool decap_en;
912 bool reformat_en;
913 };
914
915 /* internal API functions */
916 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
917 struct mlx5dr_cmd_caps *caps);
918 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
919 bool other_vport, u16 vport_number,
920 u64 *icm_address_rx,
921 u64 *icm_address_tx);
922 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
923 bool other_vport, u16 vport_number, u16 *gvmi);
924 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
925 struct mlx5dr_esw_caps *caps);
926 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
927 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
928 u32 table_type,
929 u32 table_id,
930 u32 group_id,
931 u32 modify_header_id,
932 u32 vport_id);
933 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
934 u32 table_type,
935 u32 table_id);
936 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
937 u32 table_type,
938 u8 num_of_actions,
939 u64 *actions,
940 u32 *modify_header_id);
941 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
942 u32 modify_header_id);
943 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
944 u32 table_type,
945 u32 table_id,
946 u32 *group_id);
947 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
948 u32 table_type,
949 u32 table_id,
950 u32 group_id);
951 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
952 struct mlx5dr_cmd_create_flow_table_attr *attr,
953 u64 *fdb_rx_icm_addr,
954 u32 *table_id);
955 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
956 u32 table_id,
957 u32 table_type);
958 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
959 enum fs_flow_table_type type,
960 u32 table_id,
961 struct mlx5dr_cmd_query_flow_table_details *output);
962 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
963 enum mlx5_reformat_ctx_type rt,
964 size_t reformat_size,
965 void *reformat_data,
966 u32 *reformat_id);
967 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
968 u32 reformat_id);
969
970 struct mlx5dr_cmd_gid_attr {
971 u8 gid[16];
972 u8 mac[6];
973 u32 roce_ver;
974 };
975
976 struct mlx5dr_cmd_qp_create_attr {
977 u32 page_id;
978 u32 pdn;
979 u32 cqn;
980 u32 pm_state;
981 u32 service_type;
982 u32 buff_umem_id;
983 u32 db_umem_id;
984 u32 sq_wqe_cnt;
985 u32 rq_wqe_cnt;
986 u32 rq_wqe_shift;
987 };
988
989 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
990 u16 index, struct mlx5dr_cmd_gid_attr *attr);
991
992 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
993 enum mlx5dr_icm_type icm_type);
994 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
995
996 struct mlx5dr_icm_chunk *
997 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
998 enum mlx5dr_icm_chunk_size chunk_size);
999 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
1000 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
1001 struct mlx5dr_domain_rx_tx *nic_dmn,
1002 struct mlx5dr_ste_htbl *htbl,
1003 struct mlx5dr_htbl_connect_info *connect_info,
1004 bool update_hw_ste);
1005 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
1006 struct mlx5dr_domain_rx_tx *nic_dmn,
1007 struct mlx5dr_ste_htbl *htbl,
1008 u8 *formatted_ste,
1009 struct mlx5dr_htbl_connect_info *connect_info);
1010 void mlx5dr_ste_copy_param(u8 match_criteria,
1011 struct mlx5dr_match_param *set_param,
1012 struct mlx5dr_match_parameters *mask);
1013
1014 struct mlx5dr_qp {
1015 struct mlx5_core_dev *mdev;
1016 struct mlx5_wq_qp wq;
1017 struct mlx5_uars_page *uar;
1018 struct mlx5_wq_ctrl wq_ctrl;
1019 u32 qpn;
1020 struct {
1021 unsigned int pc;
1022 unsigned int cc;
1023 unsigned int size;
1024 unsigned int *wqe_head;
1025 unsigned int wqe_cnt;
1026 } sq;
1027 struct {
1028 unsigned int pc;
1029 unsigned int cc;
1030 unsigned int size;
1031 unsigned int wqe_cnt;
1032 } rq;
1033 int max_inline_data;
1034 };
1035
1036 struct mlx5dr_cq {
1037 struct mlx5_core_dev *mdev;
1038 struct mlx5_cqwq wq;
1039 struct mlx5_wq_ctrl wq_ctrl;
1040 struct mlx5_core_cq mcq;
1041 struct mlx5dr_qp *qp;
1042 };
1043
1044 struct mlx5dr_mr {
1045 struct mlx5_core_dev *mdev;
1046 struct mlx5_core_mkey mkey;
1047 dma_addr_t dma_addr;
1048 void *addr;
1049 size_t size;
1050 };
1051
1052 #define MAX_SEND_CQE 64
1053 #define MIN_READ_SYNC 64
1054
1055 struct mlx5dr_send_ring {
1056 struct mlx5dr_cq *cq;
1057 struct mlx5dr_qp *qp;
1058 struct mlx5dr_mr *mr;
1059 /* How much wqes are waiting for completion */
1060 u32 pending_wqe;
1061 /* Signal request per this trash hold value */
1062 u16 signal_th;
1063 /* Each post_send_size less than max_post_send_size */
1064 u32 max_post_send_size;
1065 /* manage the send queue */
1066 u32 tx_head;
1067 void *buf;
1068 u32 buf_size;
1069 struct ib_wc wc[MAX_SEND_CQE];
1070 u8 sync_buff[MIN_READ_SYNC];
1071 struct mlx5dr_mr *sync_mr;
1072 spinlock_t lock; /* Protect the data path of the send ring */
1073 };
1074
1075 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1076 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1077 struct mlx5dr_send_ring *send_ring);
1078 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1079 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1080 struct mlx5dr_ste *ste,
1081 u8 *data,
1082 u16 size,
1083 u16 offset);
1084 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1085 struct mlx5dr_ste_htbl *htbl,
1086 u8 *formatted_ste, u8 *mask);
1087 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1088 struct mlx5dr_ste_htbl *htbl,
1089 u8 *ste_init_data,
1090 bool update_hw_ste);
1091 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1092 struct mlx5dr_action *action);
1093
1094 struct mlx5dr_cmd_ft_info {
1095 u32 id;
1096 u16 vport;
1097 enum fs_flow_table_type type;
1098 };
1099
1100 struct mlx5dr_cmd_flow_destination_hw_info {
1101 enum mlx5_flow_destination_type type;
1102 union {
1103 u32 tir_num;
1104 u32 ft_num;
1105 u32 ft_id;
1106 u32 counter_id;
1107 struct {
1108 u16 num;
1109 u16 vhca_id;
1110 u32 reformat_id;
1111 u8 flags;
1112 } vport;
1113 };
1114 };
1115
1116 struct mlx5dr_cmd_fte_info {
1117 u32 dests_size;
1118 u32 index;
1119 struct mlx5_flow_context flow_context;
1120 u32 *val;
1121 struct mlx5_flow_act action;
1122 struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
1123 };
1124
1125 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
1126 int opmod, int modify_mask,
1127 struct mlx5dr_cmd_ft_info *ft,
1128 u32 group_id,
1129 struct mlx5dr_cmd_fte_info *fte);
1130
1131 struct mlx5dr_fw_recalc_cs_ft {
1132 u64 rx_icm_addr;
1133 u32 table_id;
1134 u32 group_id;
1135 u32 modify_hdr_id;
1136 };
1137
1138 struct mlx5dr_fw_recalc_cs_ft *
1139 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1140 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1141 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1142 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1143 u32 vport_num,
1144 u64 *rx_icm_addr);
1145 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
1146 struct mlx5dr_cmd_flow_destination_hw_info *dest,
1147 int num_dest,
1148 bool reformat_req,
1149 u32 *tbl_id,
1150 u32 *group_id);
1151 void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
1152 u32 group_id);
1153 #endif /* _DR_TYPES_H_ */
1154