1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include "internal.h"
5 
6 enum mlx5hws_arg_chunk_size
mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)7 mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
8 {
9 	/* Return the roundup of log2(data_size) */
10 	if (data_size <= MLX5HWS_ARG_DATA_SIZE)
11 		return MLX5HWS_ARG_CHUNK_SIZE_1;
12 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
13 		return MLX5HWS_ARG_CHUNK_SIZE_2;
14 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
15 		return MLX5HWS_ARG_CHUNK_SIZE_3;
16 	if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
17 		return MLX5HWS_ARG_CHUNK_SIZE_4;
18 
19 	return MLX5HWS_ARG_CHUNK_SIZE_MAX;
20 }
21 
mlx5hws_arg_data_size_to_arg_size(u16 data_size)22 u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
23 {
24 	return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
25 }
26 
27 enum mlx5hws_arg_chunk_size
mlx5hws_arg_get_arg_log_size(u16 num_of_actions)28 mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
29 {
30 	return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
31 						    MLX5HWS_MODIFY_ACTION_SIZE);
32 }
33 
mlx5hws_arg_get_arg_size(u16 num_of_actions)34 u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
35 {
36 	return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
37 }
38 
mlx5hws_pat_require_reparse(__be64 * actions,u16 num_of_actions)39 bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
40 {
41 	u16 i, field;
42 	u8 action_id;
43 
44 	for (i = 0; i < num_of_actions; i++) {
45 		action_id = MLX5_GET(set_action_in, &actions[i], action_type);
46 
47 		switch (action_id) {
48 		case MLX5_MODIFICATION_TYPE_NOP:
49 			field = MLX5_MODI_OUT_NONE;
50 			break;
51 
52 		case MLX5_MODIFICATION_TYPE_SET:
53 		case MLX5_MODIFICATION_TYPE_ADD:
54 			field = MLX5_GET(set_action_in, &actions[i], field);
55 			break;
56 
57 		case MLX5_MODIFICATION_TYPE_COPY:
58 		case MLX5_MODIFICATION_TYPE_ADD_FIELD:
59 			field = MLX5_GET(copy_action_in, &actions[i], dst_field);
60 			break;
61 
62 		default:
63 			/* Insert/Remove/Unknown actions require reparse */
64 			return true;
65 		}
66 
67 		/* Below fields can change packet structure require a reparse */
68 		if (field == MLX5_MODI_OUT_ETHERTYPE ||
69 		    field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
70 			return true;
71 	}
72 
73 	return false;
74 }
75 
76 /* Cache and cache element handling */
mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache ** cache)77 int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
78 {
79 	struct mlx5hws_pattern_cache *new_cache;
80 
81 	new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
82 	if (!new_cache)
83 		return -ENOMEM;
84 
85 	INIT_LIST_HEAD(&new_cache->ptrn_list);
86 	mutex_init(&new_cache->lock);
87 
88 	*cache = new_cache;
89 
90 	return 0;
91 }
92 
mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache * cache)93 void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
94 {
95 	mutex_destroy(&cache->lock);
96 	kfree(cache);
97 }
98 
mlx5hws_pat_compare_pattern(int cur_num_of_actions,__be64 cur_actions[],int num_of_actions,__be64 actions[])99 static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
100 					__be64 cur_actions[],
101 					int num_of_actions,
102 					__be64 actions[])
103 {
104 	int i;
105 
106 	if (cur_num_of_actions != num_of_actions)
107 		return false;
108 
109 	for (i = 0; i < num_of_actions; i++) {
110 		u8 action_id =
111 			MLX5_GET(set_action_in, &actions[i], action_type);
112 
113 		if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
114 		    action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
115 			if (actions[i] != cur_actions[i])
116 				return false;
117 		} else {
118 			/* Compare just the control, not the values */
119 			if ((__force __be32)actions[i] !=
120 			    (__force __be32)cur_actions[i])
121 				return false;
122 		}
123 	}
124 
125 	return true;
126 }
127 
128 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache * cache,u16 num_of_actions,__be64 * actions)129 mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
130 				u16 num_of_actions,
131 				__be64 *actions)
132 {
133 	struct mlx5hws_pattern_cache_item *cached_pat = NULL;
134 
135 	list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
136 		if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
137 						(__be64 *)cached_pat->mh_data.data,
138 						num_of_actions,
139 						actions))
140 			return cached_pat;
141 	}
142 
143 	return NULL;
144 }
145 
146 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache * cache,u16 num_of_actions,__be64 * actions)147 mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
148 					u16 num_of_actions,
149 					__be64 *actions)
150 {
151 	struct mlx5hws_pattern_cache_item *cached_pattern;
152 
153 	cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
154 	if (cached_pattern) {
155 		/* LRU: move it to be first in the list */
156 		list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
157 		cached_pattern->refcount++;
158 	}
159 
160 	return cached_pattern;
161 }
162 
163 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache * cache,u32 pattern_id,u16 num_of_actions,__be64 * actions)164 mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
165 				 u32 pattern_id,
166 				 u16 num_of_actions,
167 				 __be64 *actions)
168 {
169 	struct mlx5hws_pattern_cache_item *cached_pattern;
170 
171 	cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
172 	if (!cached_pattern)
173 		return NULL;
174 
175 	cached_pattern->mh_data.num_of_actions = num_of_actions;
176 	cached_pattern->mh_data.pattern_id = pattern_id;
177 	cached_pattern->mh_data.data =
178 		kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
179 	if (!cached_pattern->mh_data.data)
180 		goto free_cached_obj;
181 
182 	list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
183 	cached_pattern->refcount = 1;
184 
185 	return cached_pattern;
186 
187 free_cached_obj:
188 	kfree(cached_pattern);
189 	return NULL;
190 }
191 
192 static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache * cache,u32 ptrn_id)193 mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
194 				      u32 ptrn_id)
195 {
196 	struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
197 
198 	list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
199 		if (cached_pattern->mh_data.pattern_id == ptrn_id)
200 			return cached_pattern;
201 	}
202 
203 	return NULL;
204 }
205 
206 static void
mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item * cached_pattern)207 mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
208 {
209 	list_del_init(&cached_pattern->ptrn_list_node);
210 
211 	kfree(cached_pattern->mh_data.data);
212 	kfree(cached_pattern);
213 }
214 
mlx5hws_pat_put_pattern(struct mlx5hws_context * ctx,u32 ptrn_id)215 void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
216 {
217 	struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
218 	struct mlx5hws_pattern_cache_item *cached_pattern;
219 
220 	mutex_lock(&cache->lock);
221 	cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
222 	if (!cached_pattern) {
223 		mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
224 		pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
225 		goto out;
226 	}
227 
228 	if (--cached_pattern->refcount)
229 		goto out;
230 
231 	mlx5hws_pat_remove_pattern(cached_pattern);
232 	mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
233 
234 out:
235 	mutex_unlock(&cache->lock);
236 }
237 
mlx5hws_pat_get_pattern(struct mlx5hws_context * ctx,__be64 * pattern,size_t pattern_sz,u32 * pattern_id)238 int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
239 			    __be64 *pattern, size_t pattern_sz,
240 			    u32 *pattern_id)
241 {
242 	u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
243 	struct mlx5hws_pattern_cache_item *cached_pattern;
244 	u32 ptrn_id = 0;
245 	int ret = 0;
246 
247 	mutex_lock(&ctx->pattern_cache->lock);
248 
249 	cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
250 								 num_of_actions,
251 								 pattern);
252 	if (cached_pattern) {
253 		*pattern_id = cached_pattern->mh_data.pattern_id;
254 		goto out_unlock;
255 	}
256 
257 	ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
258 						       pattern_sz,
259 						       (u8 *)pattern,
260 						       &ptrn_id);
261 	if (ret) {
262 		mlx5hws_err(ctx, "Failed to create pattern FW object\n");
263 		goto out_unlock;
264 	}
265 
266 	cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
267 							  ptrn_id,
268 							  num_of_actions,
269 							  pattern);
270 	if (!cached_pattern) {
271 		mlx5hws_err(ctx, "Failed to add pattern to cache\n");
272 		ret = -EINVAL;
273 		goto clean_pattern;
274 	}
275 
276 	mutex_unlock(&ctx->pattern_cache->lock);
277 	*pattern_id = ptrn_id;
278 
279 	return ret;
280 
281 clean_pattern:
282 	mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
283 out_unlock:
284 	mutex_unlock(&ctx->pattern_cache->lock);
285 	return ret;
286 }
287 
288 static void
mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr * send_attr,void * comp_data,u32 arg_idx)289 mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
290 			 void *comp_data,
291 			 u32 arg_idx)
292 {
293 	send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
294 	send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
295 	send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
296 	send_attr->id = arg_idx;
297 	send_attr->user_data = comp_data;
298 }
299 
mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine * queue,u32 arg_idx,u8 * arg_data,u16 num_of_actions)300 void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
301 			       u32 arg_idx,
302 			       u8 *arg_data,
303 			       u16 num_of_actions)
304 {
305 	struct mlx5hws_send_engine_post_attr send_attr = {0};
306 	struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
307 	struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
308 	struct mlx5hws_send_engine_post_ctrl ctrl;
309 	size_t wqe_len;
310 
311 	mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
312 
313 	ctrl = mlx5hws_send_engine_post_start(queue);
314 	mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
315 	memset(wqe_ctrl, 0, wqe_len);
316 	mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
317 	mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
318 					     num_of_actions);
319 	mlx5hws_send_engine_post_end(&ctrl, &send_attr);
320 }
321 
mlx5hws_arg_write(struct mlx5hws_send_engine * queue,void * comp_data,u32 arg_idx,u8 * arg_data,size_t data_size)322 void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
323 		       void *comp_data,
324 		       u32 arg_idx,
325 		       u8 *arg_data,
326 		       size_t data_size)
327 {
328 	struct mlx5hws_send_engine_post_attr send_attr = {0};
329 	struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
330 	struct mlx5hws_send_engine_post_ctrl ctrl;
331 	struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
332 	int i, full_iter, leftover;
333 	size_t wqe_len;
334 
335 	mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
336 
337 	/* Each WQE can hold 64B of data, it might require multiple iteration */
338 	full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
339 	leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
340 
341 	for (i = 0; i < full_iter; i++) {
342 		ctrl = mlx5hws_send_engine_post_start(queue);
343 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
344 		memset(wqe_ctrl, 0, wqe_len);
345 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
346 		memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
347 		send_attr.id = arg_idx++;
348 		mlx5hws_send_engine_post_end(&ctrl, &send_attr);
349 
350 		/* Move to next argument data */
351 		arg_data += MLX5HWS_ARG_DATA_SIZE;
352 	}
353 
354 	if (leftover) {
355 		ctrl = mlx5hws_send_engine_post_start(queue);
356 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
357 		memset(wqe_ctrl, 0, wqe_len);
358 		mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
359 		memcpy(wqe_arg, arg_data, leftover);
360 		send_attr.id = arg_idx;
361 		mlx5hws_send_engine_post_end(&ctrl, &send_attr);
362 	}
363 }
364 
mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context * ctx,u32 arg_idx,u8 * arg_data,size_t data_size)365 int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
366 				      u32 arg_idx,
367 				      u8 *arg_data,
368 				      size_t data_size)
369 {
370 	struct mlx5hws_send_engine *queue;
371 	int ret;
372 
373 	mutex_lock(&ctx->ctrl_lock);
374 
375 	/* Get the control queue */
376 	queue = &ctx->send_queue[ctx->queues - 1];
377 
378 	mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
379 
380 	mlx5hws_send_engine_flush_queue(queue);
381 
382 	/* Poll for completion */
383 	ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
384 					MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
385 
386 	if (ret)
387 		mlx5hws_err(ctx, "Failed to drain arg queue\n");
388 
389 	mutex_unlock(&ctx->ctrl_lock);
390 
391 	return ret;
392 }
393 
mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context * ctx,u32 arg_size)394 bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
395 					   u32 arg_size)
396 {
397 	if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
398 	    arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
399 		return false;
400 	}
401 	return true;
402 }
403 
mlx5hws_arg_create(struct mlx5hws_context * ctx,u8 * data,size_t data_sz,u32 log_bulk_sz,bool write_data,u32 * arg_id)404 int mlx5hws_arg_create(struct mlx5hws_context *ctx,
405 		       u8 *data,
406 		       size_t data_sz,
407 		       u32 log_bulk_sz,
408 		       bool write_data,
409 		       u32 *arg_id)
410 {
411 	u16 single_arg_log_sz;
412 	u16 multi_arg_log_sz;
413 	int ret;
414 	u32 id;
415 
416 	single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
417 	multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
418 
419 	if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
420 		mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
421 		return -EOPNOTSUPP;
422 	}
423 
424 	if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
425 		mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
426 		return -EOPNOTSUPP;
427 	}
428 
429 	/* Alloc bulk of args */
430 	ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
431 	if (ret) {
432 		mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
433 		return ret;
434 	}
435 
436 	if (write_data) {
437 		ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
438 							data, data_sz);
439 		if (ret) {
440 			mlx5hws_err(ctx, "Failed writing arg data\n");
441 			mlx5hws_cmd_arg_destroy(ctx->mdev, id);
442 			return ret;
443 		}
444 	}
445 
446 	*arg_id = id;
447 	return ret;
448 }
449 
mlx5hws_arg_destroy(struct mlx5hws_context * ctx,u32 arg_id)450 void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
451 {
452 	mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
453 }
454 
mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context * ctx,__be64 * data,u8 num_of_actions,u32 log_bulk_sz,bool write_data,u32 * arg_id)455 int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
456 					 __be64 *data,
457 					 u8 num_of_actions,
458 					 u32 log_bulk_sz,
459 					 bool write_data,
460 					 u32 *arg_id)
461 {
462 	size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
463 	int ret;
464 
465 	ret = mlx5hws_arg_create(ctx,
466 				 (u8 *)data,
467 				 data_sz,
468 				 log_bulk_sz,
469 				 write_data,
470 				 arg_id);
471 	if (ret)
472 		mlx5hws_err(ctx, "Failed creating modify header arg\n");
473 
474 	return ret;
475 }
476 
477 static int
hws_action_modify_check_field_limitation(u8 action_type,__be64 * pattern)478 hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
479 {
480 	/* Need to check field limitation here, but for now - return OK */
481 	return 0;
482 }
483 
484 #define INVALID_FIELD 0xffff
485 
486 static void
hws_action_modify_get_target_fields(u8 action_type,__be64 * pattern,u16 * src_field,u16 * dst_field)487 hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
488 				    u16 *src_field, u16 *dst_field)
489 {
490 	switch (action_type) {
491 	case MLX5_ACTION_TYPE_SET:
492 	case MLX5_ACTION_TYPE_ADD:
493 		*src_field = MLX5_GET(set_action_in, pattern, field);
494 		*dst_field = INVALID_FIELD;
495 		break;
496 	case MLX5_ACTION_TYPE_COPY:
497 		*src_field = MLX5_GET(copy_action_in, pattern, src_field);
498 		*dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
499 		break;
500 	default:
501 		pr_warn("HWS: invalid modify header action type %d\n", action_type);
502 	}
503 }
504 
mlx5hws_pat_verify_actions(struct mlx5hws_context * ctx,__be64 pattern[],size_t sz)505 bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
506 {
507 	size_t i;
508 
509 	for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
510 		u8 action_type =
511 			MLX5_GET(set_action_in, &pattern[i], action_type);
512 		if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
513 			mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
514 			return false;
515 		}
516 		if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
517 			mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
518 			return false;
519 		}
520 	}
521 
522 	return true;
523 }
524 
mlx5hws_pat_calc_nope(__be64 * pattern,size_t num_actions,size_t max_actions,size_t * new_size,u32 * nope_location,__be64 * new_pat)525 void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
526 			   size_t max_actions, size_t *new_size,
527 			   u32 *nope_location, __be64 *new_pat)
528 {
529 	u16 prev_src_field = 0, prev_dst_field = 0;
530 	u16 src_field, dst_field;
531 	u8 action_type;
532 	size_t i, j;
533 
534 	*new_size = num_actions;
535 	*nope_location = 0;
536 
537 	if (num_actions == 1)
538 		return;
539 
540 	for (i = 0, j = 0; i < num_actions; i++, j++) {
541 		action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
542 
543 		hws_action_modify_get_target_fields(action_type, &pattern[i],
544 						    &src_field, &dst_field);
545 		if (i % 2) {
546 			if (action_type == MLX5_ACTION_TYPE_COPY &&
547 			    (prev_src_field == src_field ||
548 			     prev_dst_field == dst_field)) {
549 				/* need Nope */
550 				*new_size += 1;
551 				*nope_location |= BIT(i);
552 				memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
553 				MLX5_SET(set_action_in, &new_pat[j],
554 					 action_type,
555 					 MLX5_MODIFICATION_TYPE_NOP);
556 				j++;
557 			} else if (prev_src_field == src_field) {
558 				/* need Nope*/
559 				*new_size += 1;
560 				*nope_location |= BIT(i);
561 				MLX5_SET(set_action_in, &new_pat[j],
562 					 action_type,
563 					 MLX5_MODIFICATION_TYPE_NOP);
564 				j++;
565 			}
566 		}
567 		memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
568 		/* check if no more space */
569 		if (j > max_actions) {
570 			*new_size = num_actions;
571 			*nope_location = 0;
572 			return;
573 		}
574 
575 		prev_src_field = src_field;
576 		prev_dst_field = dst_field;
577 	}
578 }
579