1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3 
4 #include <linux/mlx5/vport.h>
5 #include <mlx5_core.h>
6 #include <fs_core.h>
7 #include <fs_cmd.h>
8 #include "fs_hws_pools.h"
9 #include "mlx5hws.h"
10 
11 #define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
12 #define MLX5HWS_CTX_QUEUE_SIZE 256
13 
14 static struct mlx5hws_action *
15 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
16 static void
17 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
18 			unsigned long index);
19 static void
20 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
21 			unsigned long index);
22 
mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev * dev,struct mlx5_fs_hws_context * fs_ctx)23 static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
24 					 struct mlx5_fs_hws_context *fs_ctx)
25 {
26 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
27 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
28 	struct mlx5hws_action_reformat_header reformat_hdr = {};
29 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
30 	enum mlx5hws_action_type action_type;
31 	int err = -ENOSPC;
32 
33 	hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
34 	if (!hws_pool->tag_action)
35 		return err;
36 	hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
37 	if (!hws_pool->pop_vlan_action)
38 		goto destroy_tag;
39 	hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
40 	if (!hws_pool->push_vlan_action)
41 		goto destroy_pop_vlan;
42 	hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
43 	if (!hws_pool->drop_action)
44 		goto destroy_push_vlan;
45 	action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
46 	hws_pool->decapl2_action =
47 		mlx5hws_action_create_reformat(ctx, action_type, 1,
48 					       &reformat_hdr, 0, flags);
49 	if (!hws_pool->decapl2_action)
50 		goto destroy_drop;
51 	hws_pool->remove_hdr_vlan_action =
52 		mlx5_fs_create_action_remove_header_vlan(ctx);
53 	if (!hws_pool->remove_hdr_vlan_action)
54 		goto destroy_decapl2;
55 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
56 				       MLX5HWS_ACTION_TYP_INSERT_HEADER);
57 	if (err)
58 		goto destroy_remove_hdr;
59 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
60 				       MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
61 	if (err)
62 		goto cleanup_insert_hdr;
63 	xa_init(&hws_pool->el2tol3tnl_pools);
64 	xa_init(&hws_pool->el2tol2tnl_pools);
65 	xa_init(&hws_pool->mh_pools);
66 	xa_init(&hws_pool->table_dests);
67 	xa_init(&hws_pool->vport_dests);
68 	xa_init(&hws_pool->vport_vhca_dests);
69 	xa_init(&hws_pool->aso_meters);
70 	xa_init(&hws_pool->sample_dests);
71 	return 0;
72 
73 cleanup_insert_hdr:
74 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
75 destroy_remove_hdr:
76 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
77 destroy_decapl2:
78 	mlx5hws_action_destroy(hws_pool->decapl2_action);
79 destroy_drop:
80 	mlx5hws_action_destroy(hws_pool->drop_action);
81 destroy_push_vlan:
82 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
83 destroy_pop_vlan:
84 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
85 destroy_tag:
86 	mlx5hws_action_destroy(hws_pool->tag_action);
87 	return err;
88 }
89 
mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context * fs_ctx)90 static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
91 {
92 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
93 	struct mlx5_fs_hws_data *fs_hws_data;
94 	struct mlx5hws_action *action;
95 	struct mlx5_fs_pool *pool;
96 	unsigned long i;
97 
98 	xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
99 		kfree(fs_hws_data);
100 	xa_destroy(&hws_pool->sample_dests);
101 	xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
102 		kfree(fs_hws_data);
103 	xa_destroy(&hws_pool->aso_meters);
104 	xa_for_each(&hws_pool->vport_vhca_dests, i, action)
105 		mlx5hws_action_destroy(action);
106 	xa_destroy(&hws_pool->vport_vhca_dests);
107 	xa_for_each(&hws_pool->vport_dests, i, action)
108 		mlx5hws_action_destroy(action);
109 	xa_destroy(&hws_pool->vport_dests);
110 	xa_destroy(&hws_pool->table_dests);
111 	xa_for_each(&hws_pool->mh_pools, i, pool)
112 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
113 	xa_destroy(&hws_pool->mh_pools);
114 	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
115 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
116 	xa_destroy(&hws_pool->el2tol2tnl_pools);
117 	xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
118 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
119 	xa_destroy(&hws_pool->el2tol3tnl_pools);
120 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
121 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
122 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
123 	mlx5hws_action_destroy(hws_pool->decapl2_action);
124 	mlx5hws_action_destroy(hws_pool->drop_action);
125 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
126 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
127 	mlx5hws_action_destroy(hws_pool->tag_action);
128 }
129 
mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace * ns)130 static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
131 {
132 	struct mlx5hws_context_attr hws_ctx_attr = {};
133 	int err;
134 
135 	hws_ctx_attr.queues = min_t(int, num_online_cpus(),
136 				    MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
137 	hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
138 
139 	ns->fs_hws_context.hws_ctx =
140 		mlx5hws_context_open(ns->dev, &hws_ctx_attr);
141 	if (!ns->fs_hws_context.hws_ctx) {
142 		mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
143 		return -EINVAL;
144 	}
145 	err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
146 	if (err) {
147 		mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
148 		mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
149 		return err;
150 	}
151 	return 0;
152 }
153 
mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace * ns)154 static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
155 {
156 	mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
157 	return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
158 }
159 
mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)160 static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
161 				 struct mlx5_flow_root_namespace *peer_ns,
162 				 u16 peer_vhca_id)
163 {
164 	struct mlx5hws_context *peer_ctx = NULL;
165 
166 	if (peer_ns)
167 		peer_ctx = peer_ns->fs_hws_context.hws_ctx;
168 	mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
169 				 peer_vhca_id);
170 	return 0;
171 }
172 
mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)173 static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
174 				       struct mlx5_flow_table *ft,
175 				       struct mlx5_flow_table *next_ft)
176 {
177 	struct mlx5hws_table *next_tbl;
178 	int err;
179 
180 	if (!ns->fs_hws_context.hws_ctx)
181 		return -EINVAL;
182 
183 	/* if no change required, return */
184 	if (!next_ft && !ft->fs_hws_table.miss_ft_set)
185 		return 0;
186 
187 	next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
188 	err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
189 	if (err) {
190 		mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
191 		return err;
192 	}
193 	ft->fs_hws_table.miss_ft_set = !!next_tbl;
194 	return 0;
195 }
196 
mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)197 static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
198 					      struct mlx5_flow_table *ft)
199 {
200 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
201 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
202 	struct mlx5hws_action *dest_ft_action;
203 	struct xarray *dests_xa;
204 	int err;
205 
206 	dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
207 							      ft->id, flags);
208 	if (!dest_ft_action) {
209 		mlx5_core_err(ns->dev, "Failed creating dest table action\n");
210 		return -ENOMEM;
211 	}
212 
213 	dests_xa = &fs_ctx->hws_pool.table_dests;
214 	err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
215 	if (err)
216 		mlx5hws_action_destroy(dest_ft_action);
217 	return err;
218 }
219 
mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)220 static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
221 					      struct mlx5_flow_table *ft)
222 {
223 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
224 	struct mlx5hws_action *dest_ft_action;
225 	struct xarray *dests_xa;
226 	int err;
227 
228 	dests_xa = &fs_ctx->hws_pool.table_dests;
229 	dest_ft_action = xa_erase(dests_xa, ft->id);
230 	if (!dest_ft_action) {
231 		mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
232 		return -ENOENT;
233 	}
234 
235 	err = mlx5hws_action_destroy(dest_ft_action);
236 	if (err)
237 		mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
238 	return err;
239 }
240 
mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)241 static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
242 					  struct mlx5_flow_table *ft,
243 					  struct mlx5_flow_table_attr *ft_attr,
244 					  struct mlx5_flow_table *next_ft)
245 {
246 	struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
247 	struct mlx5hws_table_attr tbl_attr = {};
248 	struct mlx5hws_table *tbl;
249 	int err;
250 
251 	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
252 		err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
253 								   next_ft);
254 		if (err)
255 			return err;
256 		err = mlx5_fs_add_flow_table_dest_action(ns, ft);
257 		if (err)
258 			mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
259 		return err;
260 	}
261 
262 	if (ns->table_type != FS_FT_FDB) {
263 		mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
264 			      ns->table_type);
265 		return -EOPNOTSUPP;
266 	}
267 
268 	tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
269 	tbl_attr.level = ft_attr->level;
270 	tbl = mlx5hws_table_create(ctx, &tbl_attr);
271 	if (!tbl) {
272 		mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
273 		return -EINVAL;
274 	}
275 
276 	ft->fs_hws_table.hws_table = tbl;
277 	ft->id = mlx5hws_table_get_id(tbl);
278 
279 	if (next_ft) {
280 		err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
281 		if (err)
282 			goto destroy_table;
283 	}
284 
285 	ft->max_fte = INT_MAX;
286 
287 	err = mlx5_fs_add_flow_table_dest_action(ns, ft);
288 	if (err)
289 		goto clear_ft_miss;
290 	return 0;
291 
292 clear_ft_miss:
293 	mlx5_fs_set_ft_default_miss(ns, ft, NULL);
294 destroy_table:
295 	mlx5hws_table_destroy(tbl);
296 	ft->fs_hws_table.hws_table = NULL;
297 	return err;
298 }
299 
mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)300 static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
301 					   struct mlx5_flow_table *ft)
302 {
303 	int err;
304 
305 	err = mlx5_fs_del_flow_table_dest_action(ns, ft);
306 	if (err)
307 		mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
308 
309 	if (mlx5_fs_cmd_is_fw_term_table(ft))
310 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
311 
312 	err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
313 	if (err)
314 		mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
315 
316 	err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
317 	if (err)
318 		mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
319 
320 	return err;
321 }
322 
mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)323 static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
324 					  struct mlx5_flow_table *ft,
325 					  struct mlx5_flow_table *next_ft)
326 {
327 	if (mlx5_fs_cmd_is_fw_term_table(ft))
328 		return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
329 
330 	return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
331 }
332 
mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)333 static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
334 				       struct mlx5_flow_table *ft,
335 				       u32 underlay_qpn,
336 				       bool disconnect)
337 {
338 	return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
339 							 disconnect);
340 }
341 
mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)342 static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
343 					  struct mlx5_flow_table *ft, u32 *in,
344 					  struct mlx5_flow_group *fg)
345 {
346 	struct mlx5hws_match_parameters mask;
347 	struct mlx5hws_bwc_matcher *matcher;
348 	u8 match_criteria_enable;
349 	u32 priority;
350 
351 	if (mlx5_fs_cmd_is_fw_term_table(ft))
352 		return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
353 
354 	mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
355 	mask.match_sz = sizeof(fg->mask.match_criteria);
356 
357 	match_criteria_enable = MLX5_GET(create_flow_group_in, in,
358 					 match_criteria_enable);
359 	priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
360 	matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
361 					     priority, match_criteria_enable,
362 					     &mask);
363 	if (!matcher) {
364 		mlx5_core_err(ns->dev, "Failed creating matcher\n");
365 		return -EINVAL;
366 	}
367 
368 	fg->fs_hws_matcher.matcher = matcher;
369 	return 0;
370 }
371 
mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)372 static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
373 					   struct mlx5_flow_table *ft,
374 					   struct mlx5_flow_group *fg)
375 {
376 	if (mlx5_fs_cmd_is_fw_term_table(ft))
377 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
378 
379 	return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
380 }
381 
382 static struct mlx5hws_action *
mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)383 mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
384 			   struct mlx5_flow_rule *dst)
385 {
386 	return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
387 }
388 
389 static struct mlx5hws_action *
mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)390 mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
391 				  struct mlx5_flow_rule *dst)
392 {
393 	u32 table_num = dst->dest_attr.ft_num;
394 
395 	return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
396 }
397 
398 static struct mlx5hws_action *
mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)399 mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
400 				     struct mlx5_flow_rule *dst)
401 {
402 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
403 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
404 	u32 table_num = dst->dest_attr.ft_num;
405 
406 	return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
407 }
408 
409 static struct mlx5hws_action *
mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst,bool is_dest_type_uplink)410 mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
411 			      struct mlx5_flow_rule *dst,
412 			      bool is_dest_type_uplink)
413 {
414 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
415 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
416 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
417 	struct mlx5hws_action *dest;
418 	struct xarray *dests_xa;
419 	bool vhca_id_valid;
420 	unsigned long idx;
421 	u16 vport_num;
422 	int err;
423 
424 	vhca_id_valid = is_dest_type_uplink ||
425 			(dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
426 	vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
427 	if (vhca_id_valid) {
428 		dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
429 		idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
430 	} else {
431 		dests_xa = &fs_ctx->hws_pool.vport_dests;
432 		idx = vport_num;
433 	}
434 dest_load:
435 	dest = xa_load(dests_xa, idx);
436 	if (dest)
437 		return dest;
438 
439 	dest = mlx5hws_action_create_dest_vport(ctx, vport_num,	vhca_id_valid,
440 						dest_attr->vport.vhca_id, flags);
441 
442 	err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
443 	if (err) {
444 		mlx5hws_action_destroy(dest);
445 		dest = NULL;
446 
447 		if (err == -EBUSY)
448 			/* xarray entry was already stored by another thread */
449 			goto dest_load;
450 	}
451 
452 	return dest;
453 }
454 
455 static struct mlx5hws_action *
mlx5_fs_create_dest_action_range(struct mlx5hws_context * ctx,struct mlx5_flow_rule * dst)456 mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
457 				 struct mlx5_flow_rule *dst)
458 {
459 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
460 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
461 
462 	return mlx5hws_action_create_dest_match_range(ctx,
463 						      dest_attr->range.field,
464 						      dest_attr->range.hit_ft,
465 						      dest_attr->range.miss_ft,
466 						      dest_attr->range.min,
467 						      dest_attr->range.max,
468 						      flags);
469 }
470 
471 static struct mlx5_fs_hws_data *
mlx5_fs_get_cached_hws_data(struct xarray * cache_xa,unsigned long index)472 mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
473 {
474 	struct mlx5_fs_hws_data *fs_hws_data;
475 	int err;
476 
477 	xa_lock(cache_xa);
478 	fs_hws_data = xa_load(cache_xa, index);
479 	if (!fs_hws_data) {
480 		fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
481 		if (!fs_hws_data) {
482 			xa_unlock(cache_xa);
483 			return NULL;
484 		}
485 		refcount_set(&fs_hws_data->hws_action_refcount, 0);
486 		mutex_init(&fs_hws_data->lock);
487 		err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
488 		if (err) {
489 			kfree(fs_hws_data);
490 			xa_unlock(cache_xa);
491 			return NULL;
492 		}
493 	}
494 	xa_unlock(cache_xa);
495 
496 	return fs_hws_data;
497 }
498 
499 static struct mlx5hws_action *
mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)500 mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
501 			     struct mlx5_exe_aso *exe_aso)
502 {
503 	struct mlx5_fs_hws_create_action_ctx create_ctx;
504 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
505 	struct mlx5_fs_hws_data *meter_hws_data;
506 	u32 id = exe_aso->base_id;
507 	struct xarray *meters_xa;
508 
509 	meters_xa = &fs_ctx->hws_pool.aso_meters;
510 	meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
511 	if (!meter_hws_data)
512 		return NULL;
513 
514 	create_ctx.hws_ctx = ctx;
515 	create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
516 	create_ctx.id = id;
517 	create_ctx.return_reg_id = exe_aso->return_reg_id;
518 
519 	return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
520 }
521 
mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_exe_aso * exe_aso)522 static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
523 					 struct mlx5_exe_aso *exe_aso)
524 {
525 	struct mlx5_fs_hws_data *meter_hws_data;
526 	struct xarray *meters_xa;
527 
528 	meters_xa = &fs_ctx->hws_pool.aso_meters;
529 	meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
530 	if (!meter_hws_data)
531 		return;
532 	return mlx5_fs_put_hws_action(meter_hws_data);
533 }
534 
535 static struct mlx5hws_action *
mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)536 mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
537 				struct mlx5_flow_rule *dst)
538 {
539 	struct mlx5_fs_hws_create_action_ctx create_ctx;
540 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
541 	struct mlx5_fs_hws_data *sampler_hws_data;
542 	u32 id = dst->dest_attr.sampler_id;
543 	struct xarray *sampler_xa;
544 
545 	sampler_xa = &fs_ctx->hws_pool.sample_dests;
546 	sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
547 	if (!sampler_hws_data)
548 		return NULL;
549 
550 	create_ctx.hws_ctx = ctx;
551 	create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
552 	create_ctx.id = id;
553 
554 	return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
555 }
556 
mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context * fs_ctx,u32 sampler_id)557 static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
558 					    u32 sampler_id)
559 {
560 	struct mlx5_fs_hws_data *sampler_hws_data;
561 	struct xarray *sampler_xa;
562 
563 	sampler_xa = &fs_ctx->hws_pool.sample_dests;
564 	sampler_hws_data = xa_load(sampler_xa, sampler_id);
565 	if (!sampler_hws_data)
566 		return;
567 
568 	mlx5_fs_put_hws_action(sampler_hws_data);
569 }
570 
571 static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context * ctx,struct mlx5hws_action_dest_attr * dests,u32 num_of_dests,bool ignore_flow_level,u32 flow_source)572 mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
573 				 struct mlx5hws_action_dest_attr *dests,
574 				 u32 num_of_dests, bool ignore_flow_level,
575 				 u32 flow_source)
576 {
577 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
578 
579 	return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
580 						ignore_flow_level,
581 						flow_source, flags);
582 }
583 
584 static struct mlx5hws_action *
mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context * fs_ctx)585 mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
586 {
587 	return fs_ctx->hws_pool.push_vlan_action;
588 }
589 
mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan * vlan)590 static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
591 {
592 	u16 n_ethtype = vlan->ethtype;
593 	u8 prio = vlan->prio;
594 	u16 vid = vlan->vid;
595 
596 	return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
597 }
598 
599 static struct mlx5hws_action *
mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context * fs_ctx)600 mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
601 {
602 	return fs_ctx->hws_pool.pop_vlan_action;
603 }
604 
605 static struct mlx5hws_action *
mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context * fs_ctx)606 mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
607 {
608 	return fs_ctx->hws_pool.decapl2_action;
609 }
610 
611 static struct mlx5hws_action *
mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context * fs_ctx)612 mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
613 {
614 	return fs_ctx->hws_pool.drop_action;
615 }
616 
617 static struct mlx5hws_action *
mlx5_fs_get_action_tag(struct mlx5_fs_hws_context * fs_ctx)618 mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
619 {
620 	return fs_ctx->hws_pool.tag_action;
621 }
622 
623 static struct mlx5hws_action *
mlx5_fs_create_action_last(struct mlx5hws_context * ctx)624 mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
625 {
626 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
627 
628 	return mlx5hws_action_create_last(ctx, flags);
629 }
630 
631 static struct mlx5hws_action *
mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx * create_ctx)632 mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
633 {
634 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
635 
636 	switch (create_ctx->actions_type) {
637 	case MLX5HWS_ACTION_TYP_CTR:
638 		return mlx5hws_action_create_counter(create_ctx->hws_ctx,
639 						     create_ctx->id, flags);
640 	case MLX5HWS_ACTION_TYP_ASO_METER:
641 		return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
642 						       create_ctx->id,
643 						       create_ctx->return_reg_id,
644 						       flags);
645 	case MLX5HWS_ACTION_TYP_SAMPLER:
646 		return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
647 							  create_ctx->id, flags);
648 	default:
649 		return NULL;
650 	}
651 }
652 
653 struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data * fs_hws_data,struct mlx5_fs_hws_create_action_ctx * create_ctx)654 mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
655 		       struct mlx5_fs_hws_create_action_ctx *create_ctx)
656 {
657 	/* try avoid locking if not necessary */
658 	if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
659 		return fs_hws_data->hws_action;
660 
661 	mutex_lock(&fs_hws_data->lock);
662 	if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
663 		mutex_unlock(&fs_hws_data->lock);
664 		return fs_hws_data->hws_action;
665 	}
666 	fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
667 	if (!fs_hws_data->hws_action) {
668 		mutex_unlock(&fs_hws_data->lock);
669 		return NULL;
670 	}
671 	refcount_set(&fs_hws_data->hws_action_refcount, 1);
672 	mutex_unlock(&fs_hws_data->lock);
673 
674 	return fs_hws_data->hws_action;
675 }
676 
mlx5_fs_put_hws_action(struct mlx5_fs_hws_data * fs_hws_data)677 void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
678 {
679 	if (!fs_hws_data)
680 		return;
681 
682 	/* try avoid locking if not necessary */
683 	if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
684 		return;
685 
686 	mutex_lock(&fs_hws_data->lock);
687 	if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
688 		mutex_unlock(&fs_hws_data->lock);
689 		return;
690 	}
691 	mlx5hws_action_destroy(fs_hws_data->hws_action);
692 	fs_hws_data->hws_action = NULL;
693 	mutex_unlock(&fs_hws_data->lock);
694 }
695 
mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action * fs_action)696 static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
697 				      struct mlx5_fs_hws_rule_action *fs_action)
698 {
699 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
700 
701 	switch (mlx5hws_action_get_type(fs_action->action)) {
702 	case MLX5HWS_ACTION_TYP_CTR:
703 		mlx5_fc_put_hws_action(fs_action->counter);
704 		break;
705 	case MLX5HWS_ACTION_TYP_ASO_METER:
706 		mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
707 		break;
708 	case MLX5HWS_ACTION_TYP_SAMPLER:
709 		mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
710 		break;
711 	default:
712 		mlx5hws_action_destroy(fs_action->action);
713 	}
714 }
715 
716 static void
mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_fs_hws_rule_action ** fs_actions,int * num_fs_actions)717 mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
718 			   struct mlx5_fs_hws_rule_action **fs_actions,
719 			   int *num_fs_actions)
720 {
721 	int i;
722 
723 	/* Free in reverse order to handle action dependencies */
724 	for (i = *num_fs_actions - 1; i >= 0; i--)
725 		mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
726 	*num_fs_actions = 0;
727 	kfree(*fs_actions);
728 	*fs_actions = NULL;
729 }
730 
731 /* Splits FTE's actions into cached, rule and destination actions.
732  * The cached and destination actions are saved on the fte hws rule.
733  * The rule actions are returned as a parameter, together with their count.
734  * We want to support a rule with 32 destinations, which means we need to
735  * account for 32 destinations plus usually a counter plus one more action
736  * for a multi-destination flow table.
737  * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
738  */
739 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte,struct mlx5hws_rule_action ** ractions)740 static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
741 				       struct mlx5_flow_table *ft,
742 				       struct mlx5_flow_group *group,
743 				       struct fs_fte *fte,
744 				       struct mlx5hws_rule_action **ractions)
745 {
746 	struct mlx5_flow_act *fte_action = &fte->act_dests.action;
747 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
748 	struct mlx5hws_action_dest_attr *dest_actions;
749 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
750 	struct mlx5_fs_hws_rule_action *fs_actions;
751 	struct mlx5_core_dev *dev = ns->dev;
752 	struct mlx5hws_action *dest_action;
753 	struct mlx5hws_action *tmp_action;
754 	struct mlx5_fs_hws_pr *pr_data;
755 	struct mlx5_fs_hws_mh *mh_data;
756 	bool delay_encap_set = false;
757 	struct mlx5_flow_rule *dst;
758 	int num_dest_actions = 0;
759 	int num_fs_actions = 0;
760 	int num_actions = 0;
761 	int err;
762 
763 	*ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
764 			    GFP_KERNEL);
765 	if (!*ractions) {
766 		err = -ENOMEM;
767 		goto out_err;
768 	}
769 
770 	fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
771 			     sizeof(*fs_actions), GFP_KERNEL);
772 	if (!fs_actions) {
773 		err = -ENOMEM;
774 		goto free_actions_alloc;
775 	}
776 
777 	dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
778 			       sizeof(*dest_actions), GFP_KERNEL);
779 	if (!dest_actions) {
780 		err = -ENOMEM;
781 		goto free_fs_actions_alloc;
782 	}
783 
784 	/* The order of the actions are must to be kept, only the following
785 	 * order is supported by HW steering:
786 	 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
787 	 *      -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
788 	 *      -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
789 	 */
790 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
791 		tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
792 		if (!tmp_action) {
793 			err = -ENOMEM;
794 			goto free_dest_actions_alloc;
795 		}
796 		(*ractions)[num_actions++].action = tmp_action;
797 	}
798 
799 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
800 		int reformat_type = fte_action->pkt_reformat->reformat_type;
801 
802 		if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
803 			mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
804 			err = -EINVAL;
805 			goto free_actions;
806 		}
807 
808 		if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
809 			pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
810 			(*ractions)[num_actions].reformat.offset = pr_data->offset;
811 			(*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
812 			(*ractions)[num_actions].reformat.data = pr_data->data;
813 			(*ractions)[num_actions++].action =
814 				fte_action->pkt_reformat->fs_hws_action.hws_action;
815 		} else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
816 			(*ractions)[num_actions++].action =
817 				fte_action->pkt_reformat->fs_hws_action.hws_action;
818 		} else {
819 			delay_encap_set = true;
820 		}
821 	}
822 
823 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
824 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
825 		if (!tmp_action) {
826 			err = -ENOMEM;
827 			goto free_actions;
828 		}
829 		(*ractions)[num_actions++].action = tmp_action;
830 	}
831 
832 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
833 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
834 		if (!tmp_action) {
835 			err = -ENOMEM;
836 			goto free_actions;
837 		}
838 		(*ractions)[num_actions++].action = tmp_action;
839 	}
840 
841 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
842 		mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
843 		(*ractions)[num_actions].modify_header.offset = mh_data->offset;
844 		(*ractions)[num_actions].modify_header.data = mh_data->data;
845 		(*ractions)[num_actions++].action =
846 			fte_action->modify_hdr->fs_hws_action.hws_action;
847 	}
848 
849 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
850 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
851 		if (!tmp_action) {
852 			err = -ENOMEM;
853 			goto free_actions;
854 		}
855 		(*ractions)[num_actions].push_vlan.vlan_hdr =
856 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
857 		(*ractions)[num_actions++].action = tmp_action;
858 	}
859 
860 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
861 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
862 		if (!tmp_action) {
863 			err = -ENOMEM;
864 			goto free_actions;
865 		}
866 		(*ractions)[num_actions].push_vlan.vlan_hdr =
867 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
868 		(*ractions)[num_actions++].action = tmp_action;
869 	}
870 
871 	if (delay_encap_set) {
872 		pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
873 		(*ractions)[num_actions].reformat.offset = pr_data->offset;
874 		(*ractions)[num_actions].reformat.data = pr_data->data;
875 		(*ractions)[num_actions++].action =
876 			fte_action->pkt_reformat->fs_hws_action.hws_action;
877 	}
878 
879 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
880 		list_for_each_entry(dst, &fte->node.children, node.list) {
881 			struct mlx5_fc *counter;
882 
883 			if (dst->dest_attr.type !=
884 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
885 				continue;
886 
887 			if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
888 				err = -EOPNOTSUPP;
889 				goto free_actions;
890 			}
891 
892 			counter = dst->dest_attr.counter;
893 			tmp_action = mlx5_fc_get_hws_action(ctx, counter);
894 			if (!tmp_action) {
895 				err = -EINVAL;
896 				goto free_actions;
897 			}
898 
899 			(*ractions)[num_actions].counter.offset =
900 				mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
901 			(*ractions)[num_actions++].action = tmp_action;
902 			fs_actions[num_fs_actions].action = tmp_action;
903 			fs_actions[num_fs_actions++].counter = counter;
904 		}
905 	}
906 
907 	if (fte->act_dests.flow_context.flow_tag) {
908 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
909 			err = -EOPNOTSUPP;
910 			goto free_actions;
911 		}
912 		tmp_action = mlx5_fs_get_action_tag(fs_ctx);
913 		if (!tmp_action) {
914 			err = -ENOMEM;
915 			goto free_actions;
916 		}
917 		(*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
918 		(*ractions)[num_actions++].action = tmp_action;
919 	}
920 
921 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
922 		if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
923 		    num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
924 			err = -EOPNOTSUPP;
925 			goto free_actions;
926 		}
927 
928 		tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
929 							  &fte_action->exe_aso);
930 		if (!tmp_action) {
931 			err = -ENOMEM;
932 			goto free_actions;
933 		}
934 		(*ractions)[num_actions].aso_meter.offset =
935 			fte_action->exe_aso.flow_meter.meter_idx;
936 		(*ractions)[num_actions].aso_meter.init_color =
937 			fte_action->exe_aso.flow_meter.init_color;
938 		(*ractions)[num_actions++].action = tmp_action;
939 		fs_actions[num_fs_actions].action = tmp_action;
940 		fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
941 	}
942 
943 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
944 		dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
945 		if (!dest_action) {
946 			err = -ENOMEM;
947 			goto free_actions;
948 		}
949 		dest_actions[num_dest_actions++].dest = dest_action;
950 	}
951 
952 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
953 		list_for_each_entry(dst, &fte->node.children, node.list) {
954 			struct mlx5_flow_destination *attr = &dst->dest_attr;
955 			bool type_uplink =
956 				attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
957 
958 			if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
959 			    num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
960 				err = -EOPNOTSUPP;
961 				goto free_actions;
962 			}
963 			if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
964 				continue;
965 
966 			switch (attr->type) {
967 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
968 				dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
969 				break;
970 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
971 				dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
972 										dst);
973 				if (dest_action)
974 					break;
975 				dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
976 										   dst);
977 				fs_actions[num_fs_actions++].action = dest_action;
978 				break;
979 			case MLX5_FLOW_DESTINATION_TYPE_RANGE:
980 				dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
981 				fs_actions[num_fs_actions++].action = dest_action;
982 				break;
983 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
984 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
985 				dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
986 									    type_uplink);
987 				break;
988 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
989 				dest_action =
990 					mlx5_fs_get_dest_action_sampler(fs_ctx,
991 									dst);
992 				fs_actions[num_fs_actions].action = dest_action;
993 				fs_actions[num_fs_actions++].sampler_id =
994 							dst->dest_attr.sampler_id;
995 				break;
996 			default:
997 				err = -EOPNOTSUPP;
998 				goto free_actions;
999 			}
1000 			if (!dest_action) {
1001 				err = -ENOMEM;
1002 				goto free_actions;
1003 			}
1004 			dest_actions[num_dest_actions++].dest = dest_action;
1005 		}
1006 	}
1007 
1008 	if (num_dest_actions == 1) {
1009 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1010 			err = -EOPNOTSUPP;
1011 			goto free_actions;
1012 		}
1013 		(*ractions)[num_actions++].action = dest_actions->dest;
1014 	} else if (num_dest_actions > 1) {
1015 		u32 flow_source = fte->act_dests.flow_context.flow_source;
1016 		bool ignore_flow_level;
1017 
1018 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1019 		    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1020 			err = -EOPNOTSUPP;
1021 			goto free_actions;
1022 		}
1023 		ignore_flow_level =
1024 			!!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
1025 		tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
1026 							      num_dest_actions,
1027 							      ignore_flow_level,
1028 							      flow_source);
1029 		if (!tmp_action) {
1030 			err = -EOPNOTSUPP;
1031 			goto free_actions;
1032 		}
1033 		fs_actions[num_fs_actions++].action = tmp_action;
1034 		(*ractions)[num_actions++].action = tmp_action;
1035 	}
1036 
1037 	if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
1038 	    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
1039 		err = -EOPNOTSUPP;
1040 		goto free_actions;
1041 	}
1042 
1043 	tmp_action = mlx5_fs_create_action_last(ctx);
1044 	if (!tmp_action) {
1045 		err = -ENOMEM;
1046 		goto free_actions;
1047 	}
1048 	fs_actions[num_fs_actions++].action = tmp_action;
1049 	(*ractions)[num_actions++].action = tmp_action;
1050 
1051 	kfree(dest_actions);
1052 
1053 	/* Actions created specifically for this rule will be destroyed
1054 	 * once rule is deleted.
1055 	 */
1056 	fte->fs_hws_rule.num_fs_actions = num_fs_actions;
1057 	fte->fs_hws_rule.hws_fs_actions = fs_actions;
1058 
1059 	return 0;
1060 
1061 free_actions:
1062 	mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
1063 free_dest_actions_alloc:
1064 	kfree(dest_actions);
1065 free_fs_actions_alloc:
1066 	kfree(fs_actions);
1067 free_actions_alloc:
1068 	kfree(*ractions);
1069 	*ractions = NULL;
1070 out_err:
1071 	return err;
1072 }
1073 
mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)1074 static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
1075 				   struct mlx5_flow_table *ft,
1076 				   struct mlx5_flow_group *group,
1077 				   struct fs_fte *fte)
1078 {
1079 	struct mlx5hws_match_parameters params;
1080 	struct mlx5hws_rule_action *ractions;
1081 	struct mlx5hws_bwc_rule *rule;
1082 	int err = 0;
1083 
1084 	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
1085 		/* Packet reformat on terminamtion table not supported yet */
1086 		if (fte->act_dests.action.action &
1087 		    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
1088 			return -EOPNOTSUPP;
1089 		return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
1090 	}
1091 
1092 	err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1093 	if (err)
1094 		goto out_err;
1095 
1096 	params.match_sz = sizeof(fte->val);
1097 	params.match_buf = fte->val;
1098 
1099 	rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
1100 				       fte->act_dests.flow_context.flow_source,
1101 				       ractions);
1102 	kfree(ractions);
1103 	if (!rule) {
1104 		err = -EINVAL;
1105 		goto free_actions;
1106 	}
1107 
1108 	fte->fs_hws_rule.bwc_rule = rule;
1109 	return 0;
1110 
1111 free_actions:
1112 	mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1113 				   &fte->fs_hws_rule.num_fs_actions);
1114 out_err:
1115 	mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
1116 	return err;
1117 }
1118 
mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1119 static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
1120 				   struct mlx5_flow_table *ft,
1121 				   struct fs_fte *fte)
1122 {
1123 	struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
1124 	int err;
1125 
1126 	if (mlx5_fs_cmd_is_fw_term_table(ft))
1127 		return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
1128 
1129 	err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
1130 	rule->bwc_rule = NULL;
1131 
1132 	mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
1133 				   &rule->num_fs_actions);
1134 
1135 	return err;
1136 }
1137 
mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)1138 static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
1139 				   struct mlx5_flow_table *ft,
1140 				   struct mlx5_flow_group *group,
1141 				   int modify_mask,
1142 				   struct fs_fte *fte)
1143 {
1144 	int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
1145 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
1146 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1147 	struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
1148 	struct mlx5hws_rule_action *ractions;
1149 	int saved_num_fs_actions;
1150 	int ret;
1151 
1152 	if (mlx5_fs_cmd_is_fw_term_table(ft))
1153 		return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
1154 							     modify_mask, fte);
1155 
1156 	if ((modify_mask & ~allowed_mask) != 0)
1157 		return -EINVAL;
1158 
1159 	saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
1160 	saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
1161 
1162 	ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
1163 	if (ret)
1164 		return ret;
1165 
1166 	ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
1167 	kfree(ractions);
1168 	if (ret)
1169 		goto restore_actions;
1170 
1171 	mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
1172 				   &saved_num_fs_actions);
1173 	return ret;
1174 
1175 restore_actions:
1176 	mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
1177 				   &fte->fs_hws_rule.num_fs_actions);
1178 	fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
1179 	fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
1180 	return ret;
1181 }
1182 
1183 static struct mlx5hws_action *
mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context * ctx)1184 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
1185 {
1186 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
1187 	struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
1188 
1189 	/* MAC anchor not supported in HWS reformat, use VLAN anchor */
1190 	remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
1191 	remove_hdr_vlan.offset = 0;
1192 	remove_hdr_vlan.size = sizeof(struct vlan_hdr);
1193 	return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
1194 }
1195 
1196 static struct mlx5hws_action *
mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_pkt_reformat_params * params)1197 mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
1198 				      struct mlx5_pkt_reformat_params *params)
1199 {
1200 	if (!params ||
1201 	    params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
1202 	    params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
1203 	    params->size != sizeof(struct vlan_hdr))
1204 		return NULL;
1205 
1206 	return fs_ctx->hws_pool.remove_hdr_vlan_action;
1207 }
1208 
1209 static int
mlx5_fs_verify_insert_header_params(struct mlx5_core_dev * mdev,struct mlx5_pkt_reformat_params * params)1210 mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
1211 				    struct mlx5_pkt_reformat_params *params)
1212 {
1213 	if ((!params->data && params->size) || (params->data && !params->size) ||
1214 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
1215 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
1216 		mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
1217 		return -EINVAL;
1218 	}
1219 	if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
1220 	    params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
1221 	    params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
1222 		mlx5_core_err(mdev, "Only vlan insert header supported\n");
1223 		return -EOPNOTSUPP;
1224 	}
1225 	return 0;
1226 }
1227 
1228 static int
mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params)1229 mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
1230 				  struct mlx5_pkt_reformat_params *params)
1231 {
1232 	if (params->param_0 || params->param_1) {
1233 		mlx5_core_err(dev, "Invalid reformat params\n");
1234 		return -EINVAL;
1235 	}
1236 	return 0;
1237 }
1238 
1239 static struct mlx5_fs_pool *
mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev * dev,struct xarray * pr_pools,enum mlx5hws_action_type reformat_type,size_t size)1240 mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
1241 			  enum mlx5hws_action_type reformat_type, size_t size)
1242 {
1243 	struct mlx5_fs_pool *pr_pool;
1244 	unsigned long index = size;
1245 	int err;
1246 
1247 	pr_pool = xa_load(pr_pools, index);
1248 	if (pr_pool)
1249 		return pr_pool;
1250 
1251 	pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
1252 	if (!pr_pool)
1253 		return ERR_PTR(-ENOMEM);
1254 	err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
1255 	if (err)
1256 		goto free_pr_pool;
1257 	err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
1258 	if (err)
1259 		goto cleanup_pr_pool;
1260 	return pr_pool;
1261 
1262 cleanup_pr_pool:
1263 	mlx5_fs_hws_pr_pool_cleanup(pr_pool);
1264 free_pr_pool:
1265 	kfree(pr_pool);
1266 	return ERR_PTR(err);
1267 }
1268 
1269 static void
mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool * pool,struct xarray * pr_pools,unsigned long index)1270 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
1271 			unsigned long index)
1272 {
1273 	xa_erase(pr_pools, index);
1274 	mlx5_fs_hws_pr_pool_cleanup(pool);
1275 	kfree(pool);
1276 }
1277 
1278 static int
mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)1279 mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
1280 				   struct mlx5_pkt_reformat_params *params,
1281 				   enum mlx5_flow_namespace_type namespace,
1282 				   struct mlx5_pkt_reformat *pkt_reformat)
1283 {
1284 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
1285 	struct mlx5_fs_hws_actions_pool *hws_pool;
1286 	struct mlx5hws_action *hws_action = NULL;
1287 	struct mlx5_fs_hws_pr *pr_data = NULL;
1288 	struct mlx5_fs_pool *pr_pool = NULL;
1289 	struct mlx5_core_dev *dev = ns->dev;
1290 	u8 hdr_idx = 0;
1291 	int err;
1292 
1293 	if (!params)
1294 		return -EINVAL;
1295 
1296 	hws_pool = &fs_ctx->hws_pool;
1297 
1298 	switch (params->type) {
1299 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1300 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1301 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1302 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1303 			return -EINVAL;
1304 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1305 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1306 						    params->size);
1307 		if (IS_ERR(pr_pool))
1308 			return PTR_ERR(pr_pool);
1309 		break;
1310 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1311 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1312 			return -EINVAL;
1313 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
1314 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
1315 						    params->size);
1316 		if (IS_ERR(pr_pool))
1317 			return PTR_ERR(pr_pool);
1318 		break;
1319 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1320 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1321 			return -EINVAL;
1322 		pr_pool = &hws_pool->dl3tnltol2_pool;
1323 		hdr_idx = params->size == ETH_HLEN ?
1324 			  MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
1325 			  MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
1326 		break;
1327 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1328 		err = mlx5_fs_verify_insert_header_params(dev, params);
1329 		if (err)
1330 			return err;
1331 		pr_pool = &hws_pool->insert_hdr_pool;
1332 		break;
1333 	case MLX5_REFORMAT_TYPE_REMOVE_HDR:
1334 		hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
1335 		if (!hws_action)
1336 			mlx5_core_err(dev, "Only vlan remove header supported\n");
1337 		break;
1338 	default:
1339 		mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
1340 			      params->type);
1341 		return -EOPNOTSUPP;
1342 	}
1343 
1344 	if (pr_pool) {
1345 		pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
1346 		if (IS_ERR_OR_NULL(pr_data))
1347 			return !pr_data ? -EINVAL : PTR_ERR(pr_data);
1348 		hws_action = pr_data->bulk->hws_action;
1349 		if (!hws_action) {
1350 			mlx5_core_err(dev,
1351 				      "Failed allocating packet-reformat action\n");
1352 			err = -EINVAL;
1353 			goto release_pr;
1354 		}
1355 		pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
1356 		if (!pr_data->data) {
1357 			err = -ENOMEM;
1358 			goto release_pr;
1359 		}
1360 		pr_data->hdr_idx = hdr_idx;
1361 		pr_data->data_size = params->size;
1362 		pkt_reformat->fs_hws_action.pr_data = pr_data;
1363 	}
1364 
1365 	pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1366 	pkt_reformat->fs_hws_action.hws_action = hws_action;
1367 	return 0;
1368 
1369 release_pr:
1370 	if (pr_pool && pr_data)
1371 		mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1372 	return err;
1373 }
1374 
mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)1375 static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
1376 						 struct mlx5_pkt_reformat *pkt_reformat)
1377 {
1378 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1379 	struct mlx5_core_dev *dev = ns->dev;
1380 	struct mlx5_fs_hws_pr *pr_data;
1381 	struct mlx5_fs_pool *pr_pool;
1382 
1383 	if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
1384 		return;
1385 
1386 	if (!pkt_reformat->fs_hws_action.pr_data) {
1387 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1388 		return;
1389 	}
1390 	pr_data = pkt_reformat->fs_hws_action.pr_data;
1391 
1392 	switch (pkt_reformat->reformat_type) {
1393 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1394 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1395 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1396 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1397 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1398 						    pr_data->data_size);
1399 		break;
1400 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1401 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1402 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1403 						    pr_data->data_size);
1404 		break;
1405 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1406 		pr_pool = &hws_pool->dl3tnltol2_pool;
1407 		break;
1408 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1409 		pr_pool = &hws_pool->insert_hdr_pool;
1410 		break;
1411 	default:
1412 		mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
1413 		return;
1414 	}
1415 	if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
1416 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1417 		return;
1418 	}
1419 	kfree(pr_data->data);
1420 	mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1421 	pkt_reformat->fs_hws_action.pr_data = NULL;
1422 }
1423 
1424 static struct mlx5_fs_pool *
mlx5_fs_create_mh_pool(struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern,struct xarray * mh_pools,unsigned long index)1425 mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
1426 		       struct mlx5hws_action_mh_pattern *pattern,
1427 		       struct xarray *mh_pools, unsigned long index)
1428 {
1429 	struct mlx5_fs_pool *pool;
1430 	int err;
1431 
1432 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1433 	if (!pool)
1434 		return ERR_PTR(-ENOMEM);
1435 	err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
1436 	if (err)
1437 		goto free_pool;
1438 	err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
1439 	if (err)
1440 		goto cleanup_pool;
1441 	return pool;
1442 
1443 cleanup_pool:
1444 	mlx5_fs_hws_mh_pool_cleanup(pool);
1445 free_pool:
1446 	kfree(pool);
1447 	return ERR_PTR(err);
1448 }
1449 
1450 static void
mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool * pool,struct xarray * mh_pools,unsigned long index)1451 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
1452 			unsigned long index)
1453 {
1454 	xa_erase(mh_pools, index);
1455 	mlx5_fs_hws_mh_pool_cleanup(pool);
1456 	kfree(pool);
1457 }
1458 
mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)1459 static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
1460 					    u8 namespace, u8 num_actions,
1461 					    void *modify_actions,
1462 					    struct mlx5_modify_hdr *modify_hdr)
1463 {
1464 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1465 	struct mlx5hws_action_mh_pattern pattern = {};
1466 	struct mlx5_fs_hws_mh *mh_data = NULL;
1467 	struct mlx5hws_action *hws_action;
1468 	struct mlx5_fs_pool *pool;
1469 	unsigned long i, cnt = 0;
1470 	bool known_pattern;
1471 	int err;
1472 
1473 	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1474 	pattern.data = modify_actions;
1475 
1476 	known_pattern = false;
1477 	xa_for_each(&hws_pool->mh_pools, i, pool) {
1478 		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
1479 			known_pattern = true;
1480 			break;
1481 		}
1482 		cnt++;
1483 	}
1484 
1485 	if (!known_pattern) {
1486 		pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
1487 					      &hws_pool->mh_pools, cnt);
1488 		if (IS_ERR(pool))
1489 			return PTR_ERR(pool);
1490 	}
1491 	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
1492 	if (IS_ERR(mh_data)) {
1493 		err = PTR_ERR(mh_data);
1494 		goto destroy_pool;
1495 	}
1496 	hws_action = mh_data->bulk->hws_action;
1497 	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
1498 	if (!mh_data->data) {
1499 		err = -ENOMEM;
1500 		goto release_mh;
1501 	}
1502 	modify_hdr->fs_hws_action.mh_data = mh_data;
1503 	modify_hdr->fs_hws_action.fs_pool = pool;
1504 	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1505 	modify_hdr->fs_hws_action.hws_action = hws_action;
1506 
1507 	return 0;
1508 
1509 release_mh:
1510 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1511 destroy_pool:
1512 	if (!known_pattern)
1513 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
1514 	return err;
1515 }
1516 
mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1517 static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1518 					       struct mlx5_modify_hdr *modify_hdr)
1519 {
1520 	struct mlx5_fs_hws_mh *mh_data;
1521 	struct mlx5_fs_pool *pool;
1522 
1523 	if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
1524 		mlx5_core_err(ns->dev, "Failed release modify-header\n");
1525 		return;
1526 	}
1527 
1528 	mh_data = modify_hdr->fs_hws_action.mh_data;
1529 	kfree(mh_data->data);
1530 	pool = modify_hdr->fs_hws_action.fs_pool;
1531 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1532 	modify_hdr->fs_hws_action.mh_data = NULL;
1533 }
1534 
mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1535 static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
1536 					     u16 format_id, u32 *match_mask)
1537 {
1538 	return -EOPNOTSUPP;
1539 }
1540 
mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1541 static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1542 					      int definer_id)
1543 {
1544 	return -EOPNOTSUPP;
1545 }
1546 
mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1547 static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
1548 					 enum fs_flow_table_type ft_type)
1549 {
1550 	if (ft_type != FS_FT_FDB)
1551 		return 0;
1552 
1553 	return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
1554 	       MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
1555 	       MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
1556 }
1557 
mlx5_fs_hws_is_supported(struct mlx5_core_dev * dev)1558 bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
1559 {
1560 	return mlx5hws_is_supported(dev);
1561 }
1562 
1563 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
1564 	.create_flow_table = mlx5_cmd_hws_create_flow_table,
1565 	.destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
1566 	.modify_flow_table = mlx5_cmd_hws_modify_flow_table,
1567 	.update_root_ft = mlx5_cmd_hws_update_root_ft,
1568 	.create_flow_group = mlx5_cmd_hws_create_flow_group,
1569 	.destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
1570 	.create_fte = mlx5_cmd_hws_create_fte,
1571 	.delete_fte = mlx5_cmd_hws_delete_fte,
1572 	.update_fte = mlx5_cmd_hws_update_fte,
1573 	.packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
1574 	.packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
1575 	.modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
1576 	.modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
1577 	.create_match_definer = mlx5_cmd_hws_create_match_definer,
1578 	.destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
1579 	.create_ns = mlx5_cmd_hws_create_ns,
1580 	.destroy_ns = mlx5_cmd_hws_destroy_ns,
1581 	.set_peer = mlx5_cmd_hws_set_peer,
1582 	.get_capabilities = mlx5_cmd_hws_get_capabilities,
1583 };
1584 
mlx5_fs_cmd_get_hws_cmds(void)1585 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
1586 {
1587 	return &mlx5_flow_cmds_hws;
1588 }
1589