xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (revision 2ace52718376fdb56aca863da2eebe70d7e2ddb1)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 #include "eswitch.h"
6 
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)7 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
8 				       bool other_vport,
9 				       u16 vport_number,
10 				       u64 *icm_address_rx,
11 				       u64 *icm_address_tx)
12 {
13 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
14 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
15 	int err;
16 
17 	MLX5_SET(query_esw_vport_context_in, in, opcode,
18 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
19 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
20 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
21 
22 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
23 	if (err)
24 		return err;
25 
26 	*icm_address_rx =
27 		MLX5_GET64(query_esw_vport_context_out, out,
28 			   esw_vport_context.sw_steering_vport_icm_address_rx);
29 	*icm_address_tx =
30 		MLX5_GET64(query_esw_vport_context_out, out,
31 			   esw_vport_context.sw_steering_vport_icm_address_tx);
32 	return 0;
33 }
34 
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)35 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
36 			  u16 vport_number, u16 *gvmi)
37 {
38 	int err;
39 
40 	if (!other_vport) {
41 		/* self vhca_id */
42 		*gvmi = MLX5_CAP_GEN(mdev, vhca_id);
43 		return 0;
44 	}
45 
46 	err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
47 	if (err) {
48 		mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
49 			      vport_number);
50 		return err;
51 	}
52 
53 	return 0;
54 }
55 
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)56 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
57 			      struct mlx5dr_esw_caps *caps)
58 {
59 	caps->drop_icm_address_rx =
60 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
61 					 sw_steering_fdb_action_drop_icm_address_rx);
62 	caps->drop_icm_address_tx =
63 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
64 					 sw_steering_fdb_action_drop_icm_address_tx);
65 	caps->uplink_icm_address_rx =
66 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
67 					 sw_steering_uplink_icm_address_rx);
68 	caps->uplink_icm_address_tx =
69 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
70 					 sw_steering_uplink_icm_address_tx);
71 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
72 	if (!caps->sw_owner_v2)
73 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
74 
75 	return 0;
76 }
77 
dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev * mdev,u16 vport,bool * roce_en)78 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
79 					  u16 vport, bool *roce_en)
80 {
81 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
82 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
83 	int err;
84 
85 	MLX5_SET(query_nic_vport_context_in, in, opcode,
86 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
87 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
88 	MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
89 
90 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
91 	if (err)
92 		return err;
93 
94 	*roce_en = MLX5_GET(query_nic_vport_context_out, out,
95 			    nic_vport_context.roce_en);
96 	return 0;
97 }
98 
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)99 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
100 			    struct mlx5dr_cmd_caps *caps)
101 {
102 	bool roce_en;
103 	int err;
104 
105 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
106 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
107 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
108 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
109 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
110 	caps->roce_caps.fl_rc_qp_when_roce_disabled =
111 		MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
112 
113 	if (MLX5_CAP_GEN(mdev, roce)) {
114 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
115 		if (err)
116 			return err;
117 
118 		caps->roce_caps.roce_en = roce_en;
119 		caps->roce_caps.fl_rc_qp_when_roce_disabled |=
120 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
121 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
122 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
123 	}
124 
125 	caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
126 
127 	caps->support_modify_argument =
128 		MLX5_CAP_GEN_64(mdev, general_obj_types) &
129 		MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
130 
131 	if (caps->support_modify_argument) {
132 		caps->log_header_modify_argument_granularity =
133 			MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
134 		caps->log_header_modify_argument_max_alloc =
135 			MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
136 	}
137 
138 	/* geneve_tlv_option_0_exist is the indication of
139 	 * STE support for lookup type flex_parser_ok
140 	 */
141 	caps->flex_parser_ok_bits_supp =
142 		MLX5_CAP_FLOWTABLE(mdev,
143 				   flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
144 
145 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
146 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
147 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
148 	}
149 
150 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
151 		caps->flex_parser_id_icmpv6_dw0 =
152 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
153 		caps->flex_parser_id_icmpv6_dw1 =
154 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
155 	}
156 
157 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
158 		caps->flex_parser_id_geneve_tlv_option_0 =
159 			MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
160 
161 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
162 		caps->flex_parser_id_mpls_over_gre =
163 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
164 
165 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
166 		caps->flex_parser_id_mpls_over_udp =
167 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
168 
169 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
170 		caps->flex_parser_id_gtpu_dw_0 =
171 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
172 
173 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
174 		caps->flex_parser_id_gtpu_teid =
175 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
176 
177 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
178 		caps->flex_parser_id_gtpu_dw_2 =
179 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
180 
181 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
182 		caps->flex_parser_id_gtpu_first_ext_dw_0 =
183 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
184 
185 	caps->nic_rx_drop_address =
186 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
187 	caps->nic_tx_drop_address =
188 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
189 	caps->nic_tx_allow_address =
190 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
191 
192 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
193 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
194 
195 	if (!caps->rx_sw_owner_v2)
196 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
197 	if (!caps->tx_sw_owner_v2)
198 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
199 
200 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
201 
202 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
203 	caps->hdr_modify_icm_addr =
204 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
205 
206 	caps->log_modify_pattern_icm_size =
207 		MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
208 
209 	caps->hdr_modify_pattern_icm_addr =
210 		MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
211 
212 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
213 
214 	caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
215 
216 	return 0;
217 }
218 
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)219 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
220 				enum fs_flow_table_type type,
221 				u32 table_id,
222 				struct mlx5dr_cmd_query_flow_table_details *output)
223 {
224 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
225 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
226 	int err;
227 
228 	MLX5_SET(query_flow_table_in, in, opcode,
229 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
230 
231 	MLX5_SET(query_flow_table_in, in, table_type, type);
232 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
233 
234 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
235 	if (err)
236 		return err;
237 
238 	output->status = MLX5_GET(query_flow_table_out, out, status);
239 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
240 
241 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
242 						 flow_table_context.sws.sw_owner_icm_root_1);
243 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
244 						 flow_table_context.sws.sw_owner_icm_root_0);
245 
246 	return 0;
247 }
248 
mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev * dev,u32 sampler_id,u64 * rx_icm_addr,u64 * tx_icm_addr)249 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
250 				  u32 sampler_id,
251 				  u64 *rx_icm_addr,
252 				  u64 *tx_icm_addr)
253 {
254 	u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
255 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
256 	void *attr;
257 	int ret;
258 
259 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
260 		 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
261 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
262 		 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
263 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
264 
265 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
266 	if (ret)
267 		return ret;
268 
269 	attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
270 
271 	*rx_icm_addr = MLX5_GET64(sampler_obj, attr,
272 				  sw_steering_icm_address_rx);
273 	*tx_icm_addr = MLX5_GET64(sampler_obj, attr,
274 				  sw_steering_icm_address_tx);
275 
276 	return 0;
277 }
278 
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)279 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
280 {
281 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
282 
283 	/* Skip SYNC in case the device is internal error state.
284 	 * Besides a device error, this also happens when we're
285 	 * in fast teardown
286 	 */
287 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
288 		return 0;
289 
290 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
291 
292 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
293 }
294 
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u16 vport)295 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
296 					u32 table_type,
297 					u32 table_id,
298 					u32 group_id,
299 					u32 modify_header_id,
300 					u16 vport)
301 {
302 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
303 	void *in_flow_context;
304 	unsigned int inlen;
305 	void *in_dests;
306 	u32 *in;
307 	int err;
308 
309 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
310 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
311 
312 	in = kvzalloc(inlen, GFP_KERNEL);
313 	if (!in)
314 		return -ENOMEM;
315 
316 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
317 	MLX5_SET(set_fte_in, in, table_type, table_type);
318 	MLX5_SET(set_fte_in, in, table_id, table_id);
319 
320 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
321 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
322 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
323 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
324 	MLX5_SET(flow_context, in_flow_context, action,
325 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
326 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
327 
328 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
329 	MLX5_SET(dest_format_struct, in_dests, destination_type,
330 		 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
331 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
332 
333 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
334 	kvfree(in);
335 
336 	return err;
337 }
338 
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)339 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
340 				    u32 table_type,
341 				    u32 table_id)
342 {
343 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
344 
345 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
346 	MLX5_SET(delete_fte_in, in, table_type, table_type);
347 	MLX5_SET(delete_fte_in, in, table_id, table_id);
348 
349 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
350 }
351 
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)352 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
353 				   u32 table_type,
354 				   u8 num_of_actions,
355 				   u64 *actions,
356 				   u32 *modify_header_id)
357 {
358 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
359 	void *p_actions;
360 	u32 inlen;
361 	u32 *in;
362 	int err;
363 
364 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
365 		 num_of_actions * sizeof(u64);
366 	in = kvzalloc(inlen, GFP_KERNEL);
367 	if (!in)
368 		return -ENOMEM;
369 
370 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
371 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
372 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
373 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
374 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
375 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
376 
377 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
378 	if (err)
379 		goto out;
380 
381 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
382 				     modify_header_id);
383 out:
384 	kvfree(in);
385 	return err;
386 }
387 
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)388 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
389 				     u32 modify_header_id)
390 {
391 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
392 
393 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
394 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
395 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
396 		 modify_header_id);
397 
398 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
399 }
400 
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)401 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
402 				       u32 table_type,
403 				       u32 table_id,
404 				       u32 *group_id)
405 {
406 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
407 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
408 	u32 *in;
409 	int err;
410 
411 	in = kvzalloc(inlen, GFP_KERNEL);
412 	if (!in)
413 		return -ENOMEM;
414 
415 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
416 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
417 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
418 
419 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
420 	if (err)
421 		goto out;
422 
423 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
424 
425 out:
426 	kvfree(in);
427 	return err;
428 }
429 
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)430 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
431 				  u32 table_type,
432 				  u32 table_id,
433 				  u32 group_id)
434 {
435 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
436 
437 	MLX5_SET(destroy_flow_group_in, in, opcode,
438 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
439 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
440 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
441 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
442 
443 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
444 }
445 
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)446 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
447 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
448 				 u64 *fdb_rx_icm_addr,
449 				 u32 *table_id)
450 {
451 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
452 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
453 	void *ft_mdev;
454 	int err;
455 
456 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
457 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
458 	MLX5_SET(create_flow_table_in, in, uid, attr->uid);
459 
460 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
461 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
462 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
463 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
464 
465 	if (attr->sw_owner) {
466 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
467 		 * icm_addr_1 used for FDB TX
468 		 */
469 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
470 			MLX5_SET64(flow_table_context, ft_mdev,
471 				   sws.sw_owner_icm_root_0, attr->icm_addr_rx);
472 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
473 			MLX5_SET64(flow_table_context, ft_mdev,
474 				   sws.sw_owner_icm_root_0, attr->icm_addr_tx);
475 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
476 			MLX5_SET64(flow_table_context, ft_mdev,
477 				   sws.sw_owner_icm_root_0, attr->icm_addr_rx);
478 			MLX5_SET64(flow_table_context, ft_mdev,
479 				   sws.sw_owner_icm_root_1, attr->icm_addr_tx);
480 		}
481 	}
482 
483 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
484 		 attr->decap_en);
485 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
486 		 attr->reformat_en);
487 
488 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
489 	if (err)
490 		return err;
491 
492 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
493 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
494 	    fdb_rx_icm_addr)
495 		*fdb_rx_icm_addr =
496 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
497 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
498 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
499 
500 	return 0;
501 }
502 
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)503 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
504 				  u32 table_id,
505 				  u32 table_type)
506 {
507 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
508 
509 	MLX5_SET(destroy_flow_table_in, in, opcode,
510 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
511 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
512 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
513 
514 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
515 }
516 
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,u8 reformat_param_0,u8 reformat_param_1,size_t reformat_size,void * reformat_data,u32 * reformat_id)517 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
518 				   enum mlx5_reformat_ctx_type rt,
519 				   u8 reformat_param_0,
520 				   u8 reformat_param_1,
521 				   size_t reformat_size,
522 				   void *reformat_data,
523 				   u32 *reformat_id)
524 {
525 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
526 	size_t inlen, cmd_data_sz, cmd_total_sz;
527 	void *prctx;
528 	void *pdata;
529 	void *in;
530 	int err;
531 
532 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
533 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
534 					packet_reformat_context.reformat_data);
535 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
536 	in = kvzalloc(inlen, GFP_KERNEL);
537 	if (!in)
538 		return -ENOMEM;
539 
540 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
541 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
542 
543 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
544 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
545 
546 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
547 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
548 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
549 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
550 	if (reformat_data && reformat_size)
551 		memcpy(pdata, reformat_data, reformat_size);
552 
553 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
554 	if (err)
555 		goto err_free_in;
556 
557 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
558 
559 err_free_in:
560 	kvfree(in);
561 	return err;
562 }
563 
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)564 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
565 				     u32 reformat_id)
566 {
567 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
568 
569 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
570 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
571 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
572 		 reformat_id);
573 
574 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
575 }
576 
dr_cmd_set_definer_format(void * ptr,u16 format_id,u8 * dw_selectors,u8 * byte_selectors)577 static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
578 				      u8 *dw_selectors,
579 				      u8 *byte_selectors)
580 {
581 	if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
582 		return;
583 
584 	MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
585 	MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
586 	MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
587 	MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
588 	MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
589 	MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
590 	MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
591 	MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
592 	MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
593 
594 	MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
595 	MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
596 	MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
597 	MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
598 	MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
599 	MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
600 	MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
601 	MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
602 }
603 
mlx5dr_cmd_create_definer(struct mlx5_core_dev * mdev,u16 format_id,u8 * dw_selectors,u8 * byte_selectors,u8 * match_mask,u32 * definer_id)604 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
605 			      u16 format_id,
606 			      u8 *dw_selectors,
607 			      u8 *byte_selectors,
608 			      u8 *match_mask,
609 			      u32 *definer_id)
610 {
611 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
612 	u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
613 	void *ptr;
614 	int err;
615 
616 	ptr = MLX5_ADDR_OF(create_match_definer_in, in,
617 			   general_obj_in_cmd_hdr);
618 	MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
619 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
620 	MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
621 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
622 
623 	ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
624 	MLX5_SET(match_definer, ptr, format_id, format_id);
625 
626 	dr_cmd_set_definer_format(ptr, format_id,
627 				  dw_selectors, byte_selectors);
628 
629 	ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
630 	memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
631 
632 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
633 	if (err)
634 		return err;
635 
636 	*definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
637 
638 	return 0;
639 }
640 
641 void
mlx5dr_cmd_destroy_definer(struct mlx5_core_dev * mdev,u32 definer_id)642 mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
643 {
644 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
645 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
646 
647 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
648 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
649 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
650 
651 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
652 }
653 
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)654 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
655 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
656 {
657 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
658 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
659 	int err;
660 
661 	MLX5_SET(query_roce_address_in, in, opcode,
662 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
663 
664 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
665 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
666 
667 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
668 	if (err)
669 		return err;
670 
671 	memcpy(&attr->gid,
672 	       MLX5_ADDR_OF(query_roce_address_out,
673 			    out, roce_address.source_l3_address),
674 	       sizeof(attr->gid));
675 	memcpy(attr->mac,
676 	       MLX5_ADDR_OF(query_roce_address_out, out,
677 			    roce_address.source_mac_47_32),
678 	       sizeof(attr->mac));
679 
680 	if (MLX5_GET(query_roce_address_out, out,
681 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
682 		attr->roce_ver = MLX5_ROCE_VERSION_2;
683 	else
684 		attr->roce_ver = MLX5_ROCE_VERSION_1;
685 
686 	return 0;
687 }
688 
mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev * dev,u16 log_obj_range,u32 pd,u32 * obj_id)689 int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
690 					u16 log_obj_range, u32 pd,
691 					u32 *obj_id)
692 {
693 	u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
694 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
695 	void *attr;
696 	int ret;
697 
698 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
699 	MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
700 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
701 	MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
702 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
703 	MLX5_SET(general_obj_in_cmd_hdr, attr,
704 		 op_param.create.log_obj_range, log_obj_range);
705 
706 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
707 	MLX5_SET(modify_header_arg, attr, access_pd, pd);
708 
709 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
710 	if (ret)
711 		return ret;
712 
713 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
714 	return 0;
715 }
716 
mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev * dev,u32 obj_id)717 void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
718 					  u32 obj_id)
719 {
720 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
721 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
722 
723 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
724 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
725 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
726 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
727 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
728 
729 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
730 }
731 
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)732 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
733 					struct mlx5dr_cmd_fte_info *fte,
734 					bool *extended_dest)
735 {
736 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
737 	int num_fwd_destinations = 0;
738 	int num_encap = 0;
739 	int i;
740 
741 	*extended_dest = false;
742 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
743 		return 0;
744 	for (i = 0; i < fte->dests_size; i++) {
745 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
746 		    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
747 			continue;
748 		if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
749 		     fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
750 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
751 			num_encap++;
752 		num_fwd_destinations++;
753 	}
754 
755 	if (num_fwd_destinations > 1 && num_encap > 0)
756 		*extended_dest = true;
757 
758 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
759 		mlx5_core_warn(dev, "FW does not support extended destination");
760 		return -EOPNOTSUPP;
761 	}
762 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
763 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
764 			       1 << fw_log_max_fdb_encap_uplink);
765 		return -EOPNOTSUPP;
766 	}
767 
768 	return 0;
769 }
770 
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)771 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
772 		       int opmod, int modify_mask,
773 		       struct mlx5dr_cmd_ft_info *ft,
774 		       u32 group_id,
775 		       struct mlx5dr_cmd_fte_info *fte)
776 {
777 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
778 	void *in_flow_context, *vlan;
779 	bool extended_dest = false;
780 	void *in_match_value;
781 	unsigned int inlen;
782 	int dst_cnt_size;
783 	void *in_dests;
784 	u32 *in;
785 	int err;
786 	int i;
787 
788 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
789 		return -EOPNOTSUPP;
790 
791 	if (!extended_dest)
792 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
793 	else
794 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
795 
796 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
797 	in = kvzalloc(inlen, GFP_KERNEL);
798 	if (!in)
799 		return -ENOMEM;
800 
801 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
802 	MLX5_SET(set_fte_in, in, op_mod, opmod);
803 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
804 	MLX5_SET(set_fte_in, in, table_type, ft->type);
805 	MLX5_SET(set_fte_in, in, table_id, ft->id);
806 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
807 	MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
808 	if (ft->vport) {
809 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
810 		MLX5_SET(set_fte_in, in, other_vport, 1);
811 	}
812 
813 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
814 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
815 
816 	MLX5_SET(flow_context, in_flow_context, flow_tag,
817 		 fte->flow_context.flow_tag);
818 	MLX5_SET(flow_context, in_flow_context, flow_source,
819 		 fte->flow_context.flow_source);
820 
821 	MLX5_SET(flow_context, in_flow_context, extended_destination,
822 		 extended_dest);
823 	if (extended_dest) {
824 		u32 action;
825 
826 		action = fte->action.action &
827 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
828 		MLX5_SET(flow_context, in_flow_context, action, action);
829 	} else {
830 		MLX5_SET(flow_context, in_flow_context, action,
831 			 fte->action.action);
832 		if (fte->action.pkt_reformat)
833 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
834 				 fte->action.pkt_reformat->id);
835 	}
836 	if (fte->action.modify_hdr)
837 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
838 			 fte->action.modify_hdr->id);
839 
840 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
841 
842 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
843 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
844 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
845 
846 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
847 
848 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
849 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
850 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
851 
852 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
853 				      match_value);
854 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
855 
856 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
857 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
858 		int list_size = 0;
859 
860 		for (i = 0; i < fte->dests_size; i++) {
861 			enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
862 			enum mlx5_ifc_flow_destination_type ifc_type;
863 			unsigned int id;
864 
865 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
866 				continue;
867 
868 			switch (type) {
869 			case MLX5_FLOW_DESTINATION_TYPE_NONE:
870 				continue;
871 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
872 				id = fte->dest_arr[i].ft_num;
873 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
874 				break;
875 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
876 				id = fte->dest_arr[i].ft_id;
877 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
878 
879 				break;
880 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
881 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
882 				if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
883 					id = fte->dest_arr[i].vport.num;
884 					MLX5_SET(dest_format_struct, in_dests,
885 						 destination_eswitch_owner_vhca_id_valid,
886 						 !!(fte->dest_arr[i].vport.flags &
887 						    MLX5_FLOW_DEST_VPORT_VHCA_ID));
888 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
889 				} else {
890 					id = 0;
891 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
892 					MLX5_SET(dest_format_struct, in_dests,
893 						 destination_eswitch_owner_vhca_id_valid, 1);
894 				}
895 				MLX5_SET(dest_format_struct, in_dests,
896 					 destination_eswitch_owner_vhca_id,
897 					 fte->dest_arr[i].vport.vhca_id);
898 				if (extended_dest && (fte->dest_arr[i].vport.flags &
899 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
900 					MLX5_SET(dest_format_struct, in_dests,
901 						 packet_reformat,
902 						 !!(fte->dest_arr[i].vport.flags &
903 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
904 					MLX5_SET(extended_dest_format, in_dests,
905 						 packet_reformat_id,
906 						 fte->dest_arr[i].vport.reformat_id);
907 				}
908 				break;
909 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
910 				id = fte->dest_arr[i].sampler_id;
911 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
912 				break;
913 			default:
914 				id = fte->dest_arr[i].tir_num;
915 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
916 			}
917 
918 			MLX5_SET(dest_format_struct, in_dests, destination_type,
919 				 ifc_type);
920 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
921 			in_dests += dst_cnt_size;
922 			list_size++;
923 		}
924 
925 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
926 			 list_size);
927 	}
928 
929 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
930 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
931 					log_max_flow_counter,
932 					ft->type));
933 		int list_size = 0;
934 
935 		for (i = 0; i < fte->dests_size; i++) {
936 			if (fte->dest_arr[i].type !=
937 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
938 				continue;
939 
940 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
941 				 fte->dest_arr[i].counter_id);
942 			in_dests += dst_cnt_size;
943 			list_size++;
944 		}
945 		if (list_size > max_list_size) {
946 			err = -EINVAL;
947 			goto err_out;
948 		}
949 
950 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
951 			 list_size);
952 	}
953 
954 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
955 err_out:
956 	kvfree(in);
957 	return err;
958 }
959