1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 	int out_size;
39 	void *out;
40 	int err;
41 
42 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 	out = kzalloc(out_size, GFP_KERNEL);
44 	if (!out)
45 		return -ENOMEM;
46 
47 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 	MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 	MLX5_SET(query_hca_cap_in, in, op_mod,
51 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 		 HCA_CAP_OPMOD_GET_CUR);
53 
54 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 	if (err) {
56 		kfree(out);
57 		return err;
58 	}
59 
60 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61 
62 	kfree(out);
63 	return 0;
64 }
65 
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 			      struct mlx5dr_esw_caps *caps)
68 {
69 	caps->drop_icm_address_rx =
70 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 					 sw_steering_fdb_action_drop_icm_address_rx);
72 	caps->drop_icm_address_tx =
73 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 					 sw_steering_fdb_action_drop_icm_address_tx);
75 	caps->uplink_icm_address_rx =
76 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 					 sw_steering_uplink_icm_address_rx);
78 	caps->uplink_icm_address_tx =
79 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 					 sw_steering_uplink_icm_address_tx);
81 	caps->sw_owner =
82 		MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
83 					   sw_owner);
84 
85 	return 0;
86 }
87 
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)88 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
89 			    struct mlx5dr_cmd_caps *caps)
90 {
91 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
92 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
93 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
94 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
95 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
96 
97 	if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
98 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
99 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
100 	}
101 
102 	if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
103 		caps->flex_parser_id_icmpv6_dw0 =
104 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
105 		caps->flex_parser_id_icmpv6_dw1 =
106 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
107 	}
108 
109 	caps->nic_rx_drop_address =
110 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
111 	caps->nic_tx_drop_address =
112 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
113 	caps->nic_tx_allow_address =
114 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
115 
116 	caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
117 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
118 
119 	caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
120 
121 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
122 	caps->hdr_modify_icm_addr =
123 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
124 
125 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
126 
127 	return 0;
128 }
129 
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)130 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
131 				enum fs_flow_table_type type,
132 				u32 table_id,
133 				struct mlx5dr_cmd_query_flow_table_details *output)
134 {
135 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
136 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
137 	int err;
138 
139 	MLX5_SET(query_flow_table_in, in, opcode,
140 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
141 
142 	MLX5_SET(query_flow_table_in, in, table_type, type);
143 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
144 
145 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
146 	if (err)
147 		return err;
148 
149 	output->status = MLX5_GET(query_flow_table_out, out, status);
150 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
151 
152 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
153 						 flow_table_context.sw_owner_icm_root_1);
154 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
155 						 flow_table_context.sw_owner_icm_root_0);
156 
157 	return 0;
158 }
159 
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)160 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
161 {
162 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
163 
164 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
165 
166 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
167 }
168 
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u32 vport_id)169 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
170 					u32 table_type,
171 					u32 table_id,
172 					u32 group_id,
173 					u32 modify_header_id,
174 					u32 vport_id)
175 {
176 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
177 	void *in_flow_context;
178 	unsigned int inlen;
179 	void *in_dests;
180 	u32 *in;
181 	int err;
182 
183 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
184 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
185 
186 	in = kvzalloc(inlen, GFP_KERNEL);
187 	if (!in)
188 		return -ENOMEM;
189 
190 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
191 	MLX5_SET(set_fte_in, in, table_type, table_type);
192 	MLX5_SET(set_fte_in, in, table_id, table_id);
193 
194 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
195 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
196 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
197 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
198 	MLX5_SET(flow_context, in_flow_context, action,
199 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
200 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
201 
202 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
203 	MLX5_SET(dest_format_struct, in_dests, destination_type,
204 		 MLX5_FLOW_DESTINATION_TYPE_VPORT);
205 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
206 
207 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
208 	kvfree(in);
209 
210 	return err;
211 }
212 
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)213 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
214 				    u32 table_type,
215 				    u32 table_id)
216 {
217 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
218 
219 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
220 	MLX5_SET(delete_fte_in, in, table_type, table_type);
221 	MLX5_SET(delete_fte_in, in, table_id, table_id);
222 
223 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
224 }
225 
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)226 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
227 				   u32 table_type,
228 				   u8 num_of_actions,
229 				   u64 *actions,
230 				   u32 *modify_header_id)
231 {
232 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
233 	void *p_actions;
234 	u32 inlen;
235 	u32 *in;
236 	int err;
237 
238 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
239 		 num_of_actions * sizeof(u64);
240 	in = kvzalloc(inlen, GFP_KERNEL);
241 	if (!in)
242 		return -ENOMEM;
243 
244 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
245 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
246 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
247 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
248 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
249 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
250 
251 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
252 	if (err)
253 		goto out;
254 
255 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
256 				     modify_header_id);
257 out:
258 	kvfree(in);
259 	return err;
260 }
261 
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)262 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
263 				     u32 modify_header_id)
264 {
265 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
266 
267 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
268 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
269 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
270 		 modify_header_id);
271 
272 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
273 }
274 
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)275 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
276 				       u32 table_type,
277 				       u32 table_id,
278 				       u32 *group_id)
279 {
280 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
281 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
282 	u32 *in;
283 	int err;
284 
285 	in = kzalloc(inlen, GFP_KERNEL);
286 	if (!in)
287 		return -ENOMEM;
288 
289 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
290 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
291 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
292 
293 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
294 	if (err)
295 		goto out;
296 
297 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
298 
299 out:
300 	kfree(in);
301 	return err;
302 }
303 
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)304 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
305 				  u32 table_type,
306 				  u32 table_id,
307 				  u32 group_id)
308 {
309 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
310 
311 	MLX5_SET(destroy_flow_group_in, in, opcode,
312 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
313 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
314 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
315 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
316 
317 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
318 }
319 
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)320 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
321 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
322 				 u64 *fdb_rx_icm_addr,
323 				 u32 *table_id)
324 {
325 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
326 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
327 	void *ft_mdev;
328 	int err;
329 
330 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
331 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
332 
333 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
334 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
335 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
336 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
337 
338 	if (attr->sw_owner) {
339 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
340 		 * icm_addr_1 used for FDB TX
341 		 */
342 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
343 			MLX5_SET64(flow_table_context, ft_mdev,
344 				   sw_owner_icm_root_0, attr->icm_addr_rx);
345 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
346 			MLX5_SET64(flow_table_context, ft_mdev,
347 				   sw_owner_icm_root_0, attr->icm_addr_tx);
348 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
349 			MLX5_SET64(flow_table_context, ft_mdev,
350 				   sw_owner_icm_root_0, attr->icm_addr_rx);
351 			MLX5_SET64(flow_table_context, ft_mdev,
352 				   sw_owner_icm_root_1, attr->icm_addr_tx);
353 		}
354 	}
355 
356 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
357 		 attr->decap_en);
358 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
359 		 attr->reformat_en);
360 
361 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
362 	if (err)
363 		return err;
364 
365 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
366 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
367 	    fdb_rx_icm_addr)
368 		*fdb_rx_icm_addr =
369 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
370 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
371 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
372 
373 	return 0;
374 }
375 
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)376 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
377 				  u32 table_id,
378 				  u32 table_type)
379 {
380 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
381 
382 	MLX5_SET(destroy_flow_table_in, in, opcode,
383 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
384 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
385 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
386 
387 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
388 }
389 
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,size_t reformat_size,void * reformat_data,u32 * reformat_id)390 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
391 				   enum mlx5_reformat_ctx_type rt,
392 				   size_t reformat_size,
393 				   void *reformat_data,
394 				   u32 *reformat_id)
395 {
396 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
397 	size_t inlen, cmd_data_sz, cmd_total_sz;
398 	void *prctx;
399 	void *pdata;
400 	void *in;
401 	int err;
402 
403 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
404 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
405 					packet_reformat_context.reformat_data);
406 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
407 	in = kvzalloc(inlen, GFP_KERNEL);
408 	if (!in)
409 		return -ENOMEM;
410 
411 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
412 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
413 
414 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
415 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
416 
417 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
418 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
419 	memcpy(pdata, reformat_data, reformat_size);
420 
421 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
422 	if (err)
423 		return err;
424 
425 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
426 	kvfree(in);
427 
428 	return err;
429 }
430 
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)431 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
432 				     u32 reformat_id)
433 {
434 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
435 
436 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
437 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
438 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
439 		 reformat_id);
440 
441 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
442 }
443 
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)444 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
445 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
446 {
447 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
448 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
449 	int err;
450 
451 	MLX5_SET(query_roce_address_in, in, opcode,
452 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
453 
454 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
455 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
456 
457 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
458 	if (err)
459 		return err;
460 
461 	memcpy(&attr->gid,
462 	       MLX5_ADDR_OF(query_roce_address_out,
463 			    out, roce_address.source_l3_address),
464 	       sizeof(attr->gid));
465 	memcpy(attr->mac,
466 	       MLX5_ADDR_OF(query_roce_address_out, out,
467 			    roce_address.source_mac_47_32),
468 	       sizeof(attr->mac));
469 
470 	if (MLX5_GET(query_roce_address_out, out,
471 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
472 		attr->roce_ver = MLX5_ROCE_VERSION_2;
473 	else
474 		attr->roce_ver = MLX5_ROCE_VERSION_1;
475 
476 	return 0;
477 }
478 
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)479 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
480 					struct mlx5dr_cmd_fte_info *fte,
481 					bool *extended_dest)
482 {
483 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
484 	int num_fwd_destinations = 0;
485 	int num_encap = 0;
486 	int i;
487 
488 	*extended_dest = false;
489 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
490 		return 0;
491 	for (i = 0; i < fte->dests_size; i++) {
492 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
493 			continue;
494 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
495 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
496 			num_encap++;
497 		num_fwd_destinations++;
498 	}
499 
500 	if (num_fwd_destinations > 1 && num_encap > 0)
501 		*extended_dest = true;
502 
503 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
504 		mlx5_core_warn(dev, "FW does not support extended destination");
505 		return -EOPNOTSUPP;
506 	}
507 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
508 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
509 			       1 << fw_log_max_fdb_encap_uplink);
510 		return -EOPNOTSUPP;
511 	}
512 
513 	return 0;
514 }
515 
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)516 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
517 		       int opmod, int modify_mask,
518 		       struct mlx5dr_cmd_ft_info *ft,
519 		       u32 group_id,
520 		       struct mlx5dr_cmd_fte_info *fte)
521 {
522 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
523 	void *in_flow_context, *vlan;
524 	bool extended_dest = false;
525 	void *in_match_value;
526 	unsigned int inlen;
527 	int dst_cnt_size;
528 	void *in_dests;
529 	u32 *in;
530 	int err;
531 	int i;
532 
533 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
534 		return -EOPNOTSUPP;
535 
536 	if (!extended_dest)
537 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
538 	else
539 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
540 
541 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
542 	in = kvzalloc(inlen, GFP_KERNEL);
543 	if (!in)
544 		return -ENOMEM;
545 
546 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
547 	MLX5_SET(set_fte_in, in, op_mod, opmod);
548 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
549 	MLX5_SET(set_fte_in, in, table_type, ft->type);
550 	MLX5_SET(set_fte_in, in, table_id, ft->id);
551 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
552 	if (ft->vport) {
553 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
554 		MLX5_SET(set_fte_in, in, other_vport, 1);
555 	}
556 
557 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
558 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
559 
560 	MLX5_SET(flow_context, in_flow_context, flow_tag,
561 		 fte->flow_context.flow_tag);
562 	MLX5_SET(flow_context, in_flow_context, flow_source,
563 		 fte->flow_context.flow_source);
564 
565 	MLX5_SET(flow_context, in_flow_context, extended_destination,
566 		 extended_dest);
567 	if (extended_dest) {
568 		u32 action;
569 
570 		action = fte->action.action &
571 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
572 		MLX5_SET(flow_context, in_flow_context, action, action);
573 	} else {
574 		MLX5_SET(flow_context, in_flow_context, action,
575 			 fte->action.action);
576 		if (fte->action.pkt_reformat)
577 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
578 				 fte->action.pkt_reformat->id);
579 	}
580 	if (fte->action.modify_hdr)
581 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
582 			 fte->action.modify_hdr->id);
583 
584 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
585 
586 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
587 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
588 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
589 
590 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
591 
592 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
593 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
594 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
595 
596 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
597 				      match_value);
598 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
599 
600 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
601 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
602 		int list_size = 0;
603 
604 		for (i = 0; i < fte->dests_size; i++) {
605 			unsigned int id, type = fte->dest_arr[i].type;
606 
607 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
608 				continue;
609 
610 			switch (type) {
611 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
612 				id = fte->dest_arr[i].ft_num;
613 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
614 				break;
615 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
616 				id = fte->dest_arr[i].ft_id;
617 				break;
618 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
619 				id = fte->dest_arr[i].vport.num;
620 				MLX5_SET(dest_format_struct, in_dests,
621 					 destination_eswitch_owner_vhca_id_valid,
622 					 !!(fte->dest_arr[i].vport.flags &
623 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
624 				MLX5_SET(dest_format_struct, in_dests,
625 					 destination_eswitch_owner_vhca_id,
626 					 fte->dest_arr[i].vport.vhca_id);
627 				if (extended_dest && (fte->dest_arr[i].vport.flags &
628 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
629 					MLX5_SET(dest_format_struct, in_dests,
630 						 packet_reformat,
631 						 !!(fte->dest_arr[i].vport.flags &
632 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
633 					MLX5_SET(extended_dest_format, in_dests,
634 						 packet_reformat_id,
635 						 fte->dest_arr[i].vport.reformat_id);
636 				}
637 				break;
638 			default:
639 				id = fte->dest_arr[i].tir_num;
640 			}
641 
642 			MLX5_SET(dest_format_struct, in_dests, destination_type,
643 				 type);
644 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
645 			in_dests += dst_cnt_size;
646 			list_size++;
647 		}
648 
649 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
650 			 list_size);
651 	}
652 
653 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
654 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
655 					log_max_flow_counter,
656 					ft->type));
657 		int list_size = 0;
658 
659 		for (i = 0; i < fte->dests_size; i++) {
660 			if (fte->dest_arr[i].type !=
661 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
662 				continue;
663 
664 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
665 				 fte->dest_arr[i].counter_id);
666 			in_dests += dst_cnt_size;
667 			list_size++;
668 		}
669 		if (list_size > max_list_size) {
670 			err = -EINVAL;
671 			goto err_out;
672 		}
673 
674 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
675 			 list_size);
676 	}
677 
678 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
679 err_out:
680 	kvfree(in);
681 	return err;
682 }
683