1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
8 
9 #include "en.h"
10 #include "lib/aso.h"
11 #include "lib/crypto.h"
12 #include "en_accel/macsec.h"
13 
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
16 
17 enum mlx5_macsec_aso_event_arm {
18 	MLX5E_ASO_EPN_ARM = BIT(0),
19 };
20 
21 enum {
22 	MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
23 };
24 
25 struct mlx5e_macsec_handle {
26 	struct mlx5e_macsec *macsec;
27 	u32 obj_id;
28 	u8 idx;
29 };
30 
31 enum {
32 	MLX5_MACSEC_EPN,
33 };
34 
35 struct mlx5e_macsec_aso_out {
36 	u8 event_arm;
37 	u32 mode_param;
38 };
39 
40 struct mlx5e_macsec_aso_in {
41 	u8 mode;
42 	u32 obj_id;
43 };
44 
45 struct mlx5e_macsec_epn_state {
46 	u32 epn_msb;
47 	u8 epn_enabled;
48 	u8 overlap;
49 };
50 
51 struct mlx5e_macsec_async_work {
52 	struct mlx5e_macsec *macsec;
53 	struct mlx5_core_dev *mdev;
54 	struct work_struct work;
55 	u32 obj_id;
56 };
57 
58 struct mlx5e_macsec_sa {
59 	bool active;
60 	u8  assoc_num;
61 	u32 macsec_obj_id;
62 	u32 enc_key_id;
63 	u32 next_pn;
64 	sci_t sci;
65 	ssci_t ssci;
66 	salt_t salt;
67 
68 	union mlx5_macsec_rule *macsec_rule;
69 	struct rcu_head rcu_head;
70 	struct mlx5e_macsec_epn_state epn_state;
71 };
72 
73 struct mlx5e_macsec_rx_sc;
74 struct mlx5e_macsec_rx_sc_xarray_element {
75 	u32 fs_id;
76 	struct mlx5e_macsec_rx_sc *rx_sc;
77 };
78 
79 struct mlx5e_macsec_rx_sc {
80 	bool active;
81 	sci_t sci;
82 	struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
83 	struct list_head rx_sc_list_element;
84 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
85 	struct metadata_dst *md_dst;
86 	struct rcu_head rcu_head;
87 };
88 
89 struct mlx5e_macsec_umr {
90 	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
91 	dma_addr_t dma_addr;
92 	u32 mkey;
93 };
94 
95 struct mlx5e_macsec_aso {
96 	/* ASO */
97 	struct mlx5_aso *maso;
98 	/* Protects macsec ASO */
99 	struct mutex aso_lock;
100 	/* UMR */
101 	struct mlx5e_macsec_umr *umr;
102 
103 	u32 pdn;
104 };
105 
106 struct mlx5e_macsec_device {
107 	const struct net_device *netdev;
108 	struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
109 	struct list_head macsec_rx_sc_list_head;
110 	unsigned char *dev_addr;
111 	struct list_head macsec_device_list_element;
112 };
113 
114 struct mlx5e_macsec {
115 	struct list_head macsec_device_list_head;
116 	int num_of_devices;
117 	struct mutex lock; /* Protects mlx5e_macsec internal contexts */
118 
119 	/* Rx fs_id -> rx_sc mapping */
120 	struct xarray sc_xarray;
121 
122 	struct mlx5_core_dev *mdev;
123 
124 	/* ASO */
125 	struct mlx5e_macsec_aso aso;
126 
127 	struct notifier_block nb;
128 	struct workqueue_struct *wq;
129 };
130 
131 struct mlx5_macsec_obj_attrs {
132 	u32 aso_pdn;
133 	u32 next_pn;
134 	__be64 sci;
135 	u32 enc_key_id;
136 	bool encrypt;
137 	struct mlx5e_macsec_epn_state epn_state;
138 	salt_t salt;
139 	__be32 ssci;
140 	bool replay_protect;
141 	u32 replay_window;
142 };
143 
144 struct mlx5_aso_ctrl_param {
145 	u8   data_mask_mode;
146 	u8   condition_0_operand;
147 	u8   condition_1_operand;
148 	u8   condition_0_offset;
149 	u8   condition_1_offset;
150 	u8   data_offset;
151 	u8   condition_operand;
152 	u32  condition_0_data;
153 	u32  condition_0_mask;
154 	u32  condition_1_data;
155 	u32  condition_1_mask;
156 	u64  bitwise_data;
157 	u64  data_mask;
158 };
159 
mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)160 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
161 {
162 	struct mlx5e_macsec_umr *umr;
163 	struct device *dma_device;
164 	dma_addr_t dma_addr;
165 	int err;
166 
167 	umr = kzalloc(sizeof(*umr), GFP_KERNEL);
168 	if (!umr) {
169 		err = -ENOMEM;
170 		return err;
171 	}
172 
173 	dma_device = mlx5_core_dma_dev(mdev);
174 	dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
175 	err = dma_mapping_error(dma_device, dma_addr);
176 	if (err) {
177 		mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
178 		goto out_dma;
179 	}
180 
181 	err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
182 	if (err) {
183 		mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
184 		goto out_mkey;
185 	}
186 
187 	umr->dma_addr = dma_addr;
188 
189 	aso->umr = umr;
190 
191 	return 0;
192 
193 out_mkey:
194 	dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
195 out_dma:
196 	kfree(umr);
197 	return err;
198 }
199 
mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)200 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
201 {
202 	struct mlx5e_macsec_umr *umr = aso->umr;
203 
204 	mlx5_core_destroy_mkey(mdev, umr->mkey);
205 	dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
206 	kfree(umr);
207 }
208 
macsec_set_replay_protection(struct mlx5_macsec_obj_attrs * attrs,void * aso_ctx)209 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
210 {
211 	u8 window_sz;
212 
213 	if (!attrs->replay_protect)
214 		return 0;
215 
216 	switch (attrs->replay_window) {
217 	case 256:
218 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
219 		break;
220 	case 128:
221 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
222 		break;
223 	case 64:
224 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
225 		break;
226 	case 32:
227 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
228 		break;
229 	default:
230 		return -EINVAL;
231 	}
232 	MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
233 	MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
234 
235 	return 0;
236 }
237 
mlx5e_macsec_create_object(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,bool is_tx,u32 * macsec_obj_id)238 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
239 				      struct mlx5_macsec_obj_attrs *attrs,
240 				      bool is_tx,
241 				      u32 *macsec_obj_id)
242 {
243 	u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
244 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
245 	void *aso_ctx;
246 	void *obj;
247 	int err;
248 
249 	obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
250 	aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
251 
252 	MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
253 	MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
254 	MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
255 	MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
256 	MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
257 
258 	/* Epn */
259 	if (attrs->epn_state.epn_enabled) {
260 		void *salt_p;
261 		int i;
262 
263 		MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
264 		MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
265 		MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
266 		MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
267 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
268 		salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
269 		for (i = 0; i < 3 ; i++)
270 			memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
271 	} else {
272 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
273 	}
274 
275 	MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
276 	if (is_tx) {
277 		MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
278 	} else {
279 		err = macsec_set_replay_protection(attrs, aso_ctx);
280 		if (err)
281 			return err;
282 	}
283 
284 	/* general object fields set */
285 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
287 
288 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
289 	if (err) {
290 		mlx5_core_err(mdev,
291 			      "MACsec offload: Failed to create MACsec object (err = %d)\n",
292 			      err);
293 		return err;
294 	}
295 
296 	*macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
297 
298 	return err;
299 }
300 
mlx5e_macsec_destroy_object(struct mlx5_core_dev * mdev,u32 macsec_obj_id)301 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
302 {
303 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
304 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
305 
306 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
307 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
308 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
309 
310 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
311 }
312 
mlx5e_macsec_cleanup_sa(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)313 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
314 				    struct mlx5e_macsec_sa *sa,
315 				    bool is_tx, struct net_device *netdev, u32 fs_id)
316 {
317 	int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
318 				 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
319 
320 	if (!sa->macsec_rule)
321 		return;
322 
323 	mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
324 				fs_id);
325 	mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
326 	sa->macsec_rule = NULL;
327 }
328 
mlx5e_macsec_init_sa(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)329 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
330 				struct mlx5e_macsec_sa *sa,
331 				bool encrypt, bool is_tx, u32 *fs_id)
332 {
333 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
334 	struct mlx5e_macsec *macsec = priv->macsec;
335 	struct mlx5_macsec_rule_attrs rule_attrs;
336 	struct mlx5_core_dev *mdev = priv->mdev;
337 	struct mlx5_macsec_obj_attrs obj_attrs;
338 	union mlx5_macsec_rule *macsec_rule;
339 	int err;
340 
341 	obj_attrs.next_pn = sa->next_pn;
342 	obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
343 	obj_attrs.enc_key_id = sa->enc_key_id;
344 	obj_attrs.encrypt = encrypt;
345 	obj_attrs.aso_pdn = macsec->aso.pdn;
346 	obj_attrs.epn_state = sa->epn_state;
347 
348 	if (sa->epn_state.epn_enabled) {
349 		obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
350 		memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
351 	}
352 
353 	obj_attrs.replay_window = ctx->secy->replay_window;
354 	obj_attrs.replay_protect = ctx->secy->replay_protect;
355 
356 	err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
357 	if (err)
358 		return err;
359 
360 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
361 	rule_attrs.sci = sa->sci;
362 	rule_attrs.assoc_num = sa->assoc_num;
363 	rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
364 				      MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
365 
366 	macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
367 	if (!macsec_rule) {
368 		err = -ENOMEM;
369 		goto destroy_macsec_object;
370 	}
371 
372 	sa->macsec_rule = macsec_rule;
373 
374 	return 0;
375 
376 destroy_macsec_object:
377 	mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
378 
379 	return err;
380 }
381 
382 static struct mlx5e_macsec_rx_sc *
mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head * list,sci_t sci)383 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
384 {
385 	struct mlx5e_macsec_rx_sc *iter;
386 
387 	list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
388 		if (iter->sci == sci)
389 			return iter;
390 	}
391 
392 	return NULL;
393 }
394 
macsec_rx_sa_active_update(struct macsec_context * ctx,struct mlx5e_macsec_sa * rx_sa,bool active,u32 * fs_id)395 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
396 				      struct mlx5e_macsec_sa *rx_sa,
397 				      bool active, u32 *fs_id)
398 {
399 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
400 	struct mlx5e_macsec *macsec = priv->macsec;
401 	int err = 0;
402 
403 	if (rx_sa->active == active)
404 		return 0;
405 
406 	rx_sa->active = active;
407 	if (!active) {
408 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
409 		return 0;
410 	}
411 
412 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
413 	if (err)
414 		rx_sa->active = false;
415 
416 	return err;
417 }
418 
mlx5e_macsec_secy_features_validate(struct macsec_context * ctx)419 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
420 {
421 	const struct net_device *netdev = ctx->netdev;
422 	const struct macsec_secy *secy = ctx->secy;
423 
424 	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
425 		netdev_err(netdev,
426 			   "MACsec offload is supported only when validate_frame is in strict mode\n");
427 		return false;
428 	}
429 
430 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
431 		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
432 			   MACSEC_DEFAULT_ICV_LEN);
433 		return false;
434 	}
435 
436 	if (!secy->protect_frames) {
437 		netdev_err(netdev,
438 			   "MACsec offload is supported only when protect_frames is set\n");
439 		return false;
440 	}
441 
442 	if (!ctx->secy->tx_sc.encrypt) {
443 		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
444 		return false;
445 	}
446 
447 	return true;
448 }
449 
450 static struct mlx5e_macsec_device *
mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec * macsec,const struct macsec_context * ctx)451 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
452 				       const struct macsec_context *ctx)
453 {
454 	struct mlx5e_macsec_device *iter;
455 	const struct list_head *list;
456 
457 	list = &macsec->macsec_device_list_head;
458 	list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
459 		if (iter->netdev == ctx->secy->netdev)
460 			return iter;
461 	}
462 
463 	return NULL;
464 }
465 
update_macsec_epn(struct mlx5e_macsec_sa * sa,const struct macsec_key * key,const pn_t * next_pn_halves,ssci_t ssci)466 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
467 			      const pn_t *next_pn_halves, ssci_t ssci)
468 {
469 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
470 
471 	sa->ssci = ssci;
472 	sa->salt = key->salt;
473 	epn_state->epn_enabled = 1;
474 	epn_state->epn_msb = next_pn_halves->upper;
475 	epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
476 }
477 
mlx5e_macsec_add_txsa(struct macsec_context * ctx)478 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
479 {
480 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
481 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
482 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
483 	const struct macsec_secy *secy = ctx->secy;
484 	struct mlx5e_macsec_device *macsec_device;
485 	struct mlx5_core_dev *mdev = priv->mdev;
486 	u8 assoc_num = ctx->sa.assoc_num;
487 	struct mlx5e_macsec_sa *tx_sa;
488 	struct mlx5e_macsec *macsec;
489 	int err = 0;
490 
491 	mutex_lock(&priv->macsec->lock);
492 
493 	macsec = priv->macsec;
494 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
495 	if (!macsec_device) {
496 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
497 		err = -EEXIST;
498 		goto out;
499 	}
500 
501 	if (macsec_device->tx_sa[assoc_num]) {
502 		netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
503 		err = -EEXIST;
504 		goto out;
505 	}
506 
507 	tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
508 	if (!tx_sa) {
509 		err = -ENOMEM;
510 		goto out;
511 	}
512 
513 	tx_sa->active = ctx_tx_sa->active;
514 	tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
515 	tx_sa->sci = secy->sci;
516 	tx_sa->assoc_num = assoc_num;
517 
518 	if (secy->xpn)
519 		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
520 				  ctx_tx_sa->ssci);
521 
522 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
523 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
524 					 &tx_sa->enc_key_id);
525 	if (err)
526 		goto destroy_sa;
527 
528 	macsec_device->tx_sa[assoc_num] = tx_sa;
529 	if (!secy->operational ||
530 	    assoc_num != tx_sc->encoding_sa ||
531 	    !tx_sa->active)
532 		goto out;
533 
534 	err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
535 	if (err)
536 		goto destroy_encryption_key;
537 
538 	mutex_unlock(&macsec->lock);
539 
540 	return 0;
541 
542 destroy_encryption_key:
543 	macsec_device->tx_sa[assoc_num] = NULL;
544 	mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
545 destroy_sa:
546 	kfree(tx_sa);
547 out:
548 	mutex_unlock(&macsec->lock);
549 
550 	return err;
551 }
552 
mlx5e_macsec_upd_txsa(struct macsec_context * ctx)553 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
554 {
555 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
556 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
557 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
558 	struct mlx5e_macsec_device *macsec_device;
559 	u8 assoc_num = ctx->sa.assoc_num;
560 	struct mlx5e_macsec_sa *tx_sa;
561 	struct mlx5e_macsec *macsec;
562 	struct net_device *netdev;
563 	int err = 0;
564 
565 	mutex_lock(&priv->macsec->lock);
566 
567 	macsec = priv->macsec;
568 	netdev = ctx->netdev;
569 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
570 	if (!macsec_device) {
571 		netdev_err(netdev, "MACsec offload: Failed to find device context\n");
572 		err = -EINVAL;
573 		goto out;
574 	}
575 
576 	tx_sa = macsec_device->tx_sa[assoc_num];
577 	if (!tx_sa) {
578 		netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
579 		err = -EEXIST;
580 		goto out;
581 	}
582 
583 	if (ctx->sa.update_pn) {
584 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
585 			   assoc_num);
586 		err = -EINVAL;
587 		goto out;
588 	}
589 
590 	if (tx_sa->active == ctx_tx_sa->active)
591 		goto out;
592 
593 	tx_sa->active = ctx_tx_sa->active;
594 	if (tx_sa->assoc_num != tx_sc->encoding_sa)
595 		goto out;
596 
597 	if (ctx_tx_sa->active) {
598 		err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
599 		if (err)
600 			goto out;
601 	} else {
602 		if (!tx_sa->macsec_rule) {
603 			err = -EINVAL;
604 			goto out;
605 		}
606 
607 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
608 	}
609 out:
610 	mutex_unlock(&macsec->lock);
611 
612 	return err;
613 }
614 
mlx5e_macsec_del_txsa(struct macsec_context * ctx)615 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
616 {
617 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
618 	struct mlx5e_macsec_device *macsec_device;
619 	u8 assoc_num = ctx->sa.assoc_num;
620 	struct mlx5e_macsec_sa *tx_sa;
621 	struct mlx5e_macsec *macsec;
622 	int err = 0;
623 
624 	mutex_lock(&priv->macsec->lock);
625 	macsec = priv->macsec;
626 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
627 	if (!macsec_device) {
628 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
629 		err = -EINVAL;
630 		goto out;
631 	}
632 
633 	tx_sa = macsec_device->tx_sa[assoc_num];
634 	if (!tx_sa) {
635 		netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
636 		err = -EEXIST;
637 		goto out;
638 	}
639 
640 	mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
641 	mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
642 	kfree_rcu_mightsleep(tx_sa);
643 	macsec_device->tx_sa[assoc_num] = NULL;
644 
645 out:
646 	mutex_unlock(&macsec->lock);
647 
648 	return err;
649 }
650 
mlx5e_macsec_add_rxsc(struct macsec_context * ctx)651 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
652 {
653 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
654 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
655 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
656 	struct mlx5e_macsec_device *macsec_device;
657 	struct mlx5e_macsec_rx_sc *rx_sc;
658 	struct list_head *rx_sc_list;
659 	struct mlx5e_macsec *macsec;
660 	int err = 0;
661 
662 	mutex_lock(&priv->macsec->lock);
663 	macsec = priv->macsec;
664 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
665 	if (!macsec_device) {
666 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
667 		err = -EINVAL;
668 		goto out;
669 	}
670 
671 	rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
672 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
673 	if (rx_sc) {
674 		netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
675 			   ctx_rx_sc->sci);
676 		err = -EEXIST;
677 		goto out;
678 	}
679 
680 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
681 	if (!rx_sc) {
682 		err = -ENOMEM;
683 		goto out;
684 	}
685 
686 	sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
687 	if (!sc_xarray_element) {
688 		err = -ENOMEM;
689 		goto destroy_rx_sc;
690 	}
691 
692 	sc_xarray_element->rx_sc = rx_sc;
693 	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
694 		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
695 	if (err) {
696 		if (err == -EBUSY)
697 			netdev_err(ctx->netdev,
698 				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
699 				   MLX5_MACEC_RX_FS_ID_MAX);
700 		goto destroy_sc_xarray_elemenet;
701 	}
702 
703 	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
704 	if (!rx_sc->md_dst) {
705 		err = -ENOMEM;
706 		goto erase_xa_alloc;
707 	}
708 
709 	rx_sc->sci = ctx_rx_sc->sci;
710 	rx_sc->active = ctx_rx_sc->active;
711 	list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
712 
713 	rx_sc->sc_xarray_element = sc_xarray_element;
714 	rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
715 	mutex_unlock(&macsec->lock);
716 
717 	return 0;
718 
719 erase_xa_alloc:
720 	xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
721 destroy_sc_xarray_elemenet:
722 	kfree(sc_xarray_element);
723 destroy_rx_sc:
724 	kfree(rx_sc);
725 
726 out:
727 	mutex_unlock(&macsec->lock);
728 
729 	return err;
730 }
731 
mlx5e_macsec_upd_rxsc(struct macsec_context * ctx)732 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
733 {
734 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
735 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
736 	struct mlx5e_macsec_device *macsec_device;
737 	struct mlx5e_macsec_rx_sc *rx_sc;
738 	struct mlx5e_macsec_sa *rx_sa;
739 	struct mlx5e_macsec *macsec;
740 	struct list_head *list;
741 	int i;
742 	int err = 0;
743 
744 	mutex_lock(&priv->macsec->lock);
745 
746 	macsec = priv->macsec;
747 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
748 	if (!macsec_device) {
749 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
750 		err = -EINVAL;
751 		goto out;
752 	}
753 
754 	list = &macsec_device->macsec_rx_sc_list_head;
755 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
756 	if (!rx_sc) {
757 		err = -EINVAL;
758 		goto out;
759 	}
760 
761 	if (rx_sc->active == ctx_rx_sc->active)
762 		goto out;
763 
764 	rx_sc->active = ctx_rx_sc->active;
765 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
766 		rx_sa = rx_sc->rx_sa[i];
767 		if (!rx_sa)
768 			continue;
769 
770 		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
771 						 &rx_sc->sc_xarray_element->fs_id);
772 		if (err)
773 			goto out;
774 	}
775 
776 out:
777 	mutex_unlock(&macsec->lock);
778 
779 	return err;
780 }
781 
macsec_del_rxsc_ctx(struct mlx5e_macsec * macsec,struct mlx5e_macsec_rx_sc * rx_sc,struct net_device * netdev)782 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
783 				struct net_device *netdev)
784 {
785 	struct mlx5e_macsec_sa *rx_sa;
786 	int i;
787 
788 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
789 		rx_sa = rx_sc->rx_sa[i];
790 		if (!rx_sa)
791 			continue;
792 
793 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
794 					rx_sc->sc_xarray_element->fs_id);
795 		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
796 
797 		kfree(rx_sa);
798 		rx_sc->rx_sa[i] = NULL;
799 	}
800 
801 	/* At this point the relevant MACsec offload Rx rule already removed at
802 	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
803 	 * Rx related data propagating using xa_erase which uses rcu to sync,
804 	 * once fs_id is erased then this rx_sc is hidden from datapath.
805 	 */
806 	list_del_rcu(&rx_sc->rx_sc_list_element);
807 	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
808 	metadata_dst_free(rx_sc->md_dst);
809 	kfree(rx_sc->sc_xarray_element);
810 	kfree_rcu_mightsleep(rx_sc);
811 }
812 
mlx5e_macsec_del_rxsc(struct macsec_context * ctx)813 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
814 {
815 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
816 	struct mlx5e_macsec_device *macsec_device;
817 	struct mlx5e_macsec_rx_sc *rx_sc;
818 	struct mlx5e_macsec *macsec;
819 	struct list_head *list;
820 	int err = 0;
821 
822 	mutex_lock(&priv->macsec->lock);
823 
824 	macsec = priv->macsec;
825 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
826 	if (!macsec_device) {
827 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
828 		err = -EINVAL;
829 		goto out;
830 	}
831 
832 	list = &macsec_device->macsec_rx_sc_list_head;
833 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
834 	if (!rx_sc) {
835 		netdev_err(ctx->netdev,
836 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
837 			   ctx->sa.rx_sa->sc->sci);
838 		err = -EINVAL;
839 		goto out;
840 	}
841 
842 	macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
843 out:
844 	mutex_unlock(&macsec->lock);
845 
846 	return err;
847 }
848 
mlx5e_macsec_add_rxsa(struct macsec_context * ctx)849 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
850 {
851 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
852 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
853 	struct mlx5e_macsec_device *macsec_device;
854 	struct mlx5_core_dev *mdev = priv->mdev;
855 	u8 assoc_num = ctx->sa.assoc_num;
856 	struct mlx5e_macsec_rx_sc *rx_sc;
857 	sci_t sci = ctx_rx_sa->sc->sci;
858 	struct mlx5e_macsec_sa *rx_sa;
859 	struct mlx5e_macsec *macsec;
860 	struct list_head *list;
861 	int err = 0;
862 
863 	mutex_lock(&priv->macsec->lock);
864 
865 	macsec = priv->macsec;
866 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
867 	if (!macsec_device) {
868 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
869 		err = -EINVAL;
870 		goto out;
871 	}
872 
873 	list = &macsec_device->macsec_rx_sc_list_head;
874 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
875 	if (!rx_sc) {
876 		netdev_err(ctx->netdev,
877 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
878 			   ctx->sa.rx_sa->sc->sci);
879 		err = -EINVAL;
880 		goto out;
881 	}
882 
883 	if (rx_sc->rx_sa[assoc_num]) {
884 		netdev_err(ctx->netdev,
885 			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
886 			   sci, assoc_num);
887 		err = -EEXIST;
888 		goto out;
889 	}
890 
891 	rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
892 	if (!rx_sa) {
893 		err = -ENOMEM;
894 		goto out;
895 	}
896 
897 	rx_sa->active = ctx_rx_sa->active;
898 	rx_sa->next_pn = ctx_rx_sa->next_pn;
899 	rx_sa->sci = sci;
900 	rx_sa->assoc_num = assoc_num;
901 
902 	if (ctx->secy->xpn)
903 		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
904 				  ctx_rx_sa->ssci);
905 
906 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
907 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
908 					 &rx_sa->enc_key_id);
909 	if (err)
910 		goto destroy_sa;
911 
912 	rx_sc->rx_sa[assoc_num] = rx_sa;
913 	if (!rx_sa->active)
914 		goto out;
915 
916 	//TODO - add support for both authentication and encryption flows
917 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
918 	if (err)
919 		goto destroy_encryption_key;
920 
921 	goto out;
922 
923 destroy_encryption_key:
924 	rx_sc->rx_sa[assoc_num] = NULL;
925 	mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
926 destroy_sa:
927 	kfree(rx_sa);
928 out:
929 	mutex_unlock(&macsec->lock);
930 
931 	return err;
932 }
933 
mlx5e_macsec_upd_rxsa(struct macsec_context * ctx)934 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
935 {
936 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
937 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
938 	struct mlx5e_macsec_device *macsec_device;
939 	u8 assoc_num = ctx->sa.assoc_num;
940 	struct mlx5e_macsec_rx_sc *rx_sc;
941 	sci_t sci = ctx_rx_sa->sc->sci;
942 	struct mlx5e_macsec_sa *rx_sa;
943 	struct mlx5e_macsec *macsec;
944 	struct list_head *list;
945 	int err = 0;
946 
947 	mutex_lock(&priv->macsec->lock);
948 
949 	macsec = priv->macsec;
950 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
951 	if (!macsec_device) {
952 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
953 		err = -EINVAL;
954 		goto out;
955 	}
956 
957 	list = &macsec_device->macsec_rx_sc_list_head;
958 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
959 	if (!rx_sc) {
960 		netdev_err(ctx->netdev,
961 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
962 			   ctx->sa.rx_sa->sc->sci);
963 		err = -EINVAL;
964 		goto out;
965 	}
966 
967 	rx_sa = rx_sc->rx_sa[assoc_num];
968 	if (!rx_sa) {
969 		netdev_err(ctx->netdev,
970 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
971 			   sci, assoc_num);
972 		err = -EINVAL;
973 		goto out;
974 	}
975 
976 	if (ctx->sa.update_pn) {
977 		netdev_err(ctx->netdev,
978 			   "MACsec offload update RX sa %d PN isn't supported\n",
979 			   assoc_num);
980 		err = -EINVAL;
981 		goto out;
982 	}
983 
984 	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
985 					 &rx_sc->sc_xarray_element->fs_id);
986 out:
987 	mutex_unlock(&macsec->lock);
988 
989 	return err;
990 }
991 
mlx5e_macsec_del_rxsa(struct macsec_context * ctx)992 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
993 {
994 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
995 	struct mlx5e_macsec_device *macsec_device;
996 	sci_t sci = ctx->sa.rx_sa->sc->sci;
997 	struct mlx5e_macsec_rx_sc *rx_sc;
998 	u8 assoc_num = ctx->sa.assoc_num;
999 	struct mlx5e_macsec_sa *rx_sa;
1000 	struct mlx5e_macsec *macsec;
1001 	struct list_head *list;
1002 	int err = 0;
1003 
1004 	mutex_lock(&priv->macsec->lock);
1005 
1006 	macsec = priv->macsec;
1007 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1008 	if (!macsec_device) {
1009 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1010 		err = -EINVAL;
1011 		goto out;
1012 	}
1013 
1014 	list = &macsec_device->macsec_rx_sc_list_head;
1015 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1016 	if (!rx_sc) {
1017 		netdev_err(ctx->netdev,
1018 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1019 			   ctx->sa.rx_sa->sc->sci);
1020 		err = -EINVAL;
1021 		goto out;
1022 	}
1023 
1024 	rx_sa = rx_sc->rx_sa[assoc_num];
1025 	if (!rx_sa) {
1026 		netdev_err(ctx->netdev,
1027 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1028 			   sci, assoc_num);
1029 		err = -EINVAL;
1030 		goto out;
1031 	}
1032 
1033 	mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1034 				rx_sc->sc_xarray_element->fs_id);
1035 	mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1036 	kfree(rx_sa);
1037 	rx_sc->rx_sa[assoc_num] = NULL;
1038 
1039 out:
1040 	mutex_unlock(&macsec->lock);
1041 
1042 	return err;
1043 }
1044 
mlx5e_macsec_add_secy(struct macsec_context * ctx)1045 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1046 {
1047 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1048 	const struct net_device *dev = ctx->secy->netdev;
1049 	const struct net_device *netdev = ctx->netdev;
1050 	struct mlx5e_macsec_device *macsec_device;
1051 	struct mlx5e_macsec *macsec;
1052 	int err = 0;
1053 
1054 	if (!mlx5e_macsec_secy_features_validate(ctx))
1055 		return -EINVAL;
1056 
1057 	mutex_lock(&priv->macsec->lock);
1058 	macsec = priv->macsec;
1059 	if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1060 		netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1061 		goto out;
1062 	}
1063 
1064 	if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1065 		netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1066 			   MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1067 		err = -EBUSY;
1068 		goto out;
1069 	}
1070 
1071 	macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1072 	if (!macsec_device) {
1073 		err = -ENOMEM;
1074 		goto out;
1075 	}
1076 
1077 	macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1078 	if (!macsec_device->dev_addr) {
1079 		kfree(macsec_device);
1080 		err = -ENOMEM;
1081 		goto out;
1082 	}
1083 
1084 	macsec_device->netdev = dev;
1085 
1086 	INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1087 	list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1088 
1089 	++macsec->num_of_devices;
1090 out:
1091 	mutex_unlock(&macsec->lock);
1092 
1093 	return err;
1094 }
1095 
macsec_upd_secy_hw_address(struct macsec_context * ctx,struct mlx5e_macsec_device * macsec_device)1096 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1097 				      struct mlx5e_macsec_device *macsec_device)
1098 {
1099 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1100 	const struct net_device *dev = ctx->secy->netdev;
1101 	struct mlx5e_macsec *macsec = priv->macsec;
1102 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1103 	struct mlx5e_macsec_sa *rx_sa;
1104 	struct list_head *list;
1105 	int i, err = 0;
1106 
1107 
1108 	list = &macsec_device->macsec_rx_sc_list_head;
1109 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1110 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1111 			rx_sa = rx_sc->rx_sa[i];
1112 			if (!rx_sa || !rx_sa->macsec_rule)
1113 				continue;
1114 
1115 			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1116 						rx_sc->sc_xarray_element->fs_id);
1117 		}
1118 	}
1119 
1120 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1121 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1122 			rx_sa = rx_sc->rx_sa[i];
1123 			if (!rx_sa)
1124 				continue;
1125 
1126 			if (rx_sa->active) {
1127 				err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
1128 							   &rx_sc->sc_xarray_element->fs_id);
1129 				if (err)
1130 					goto out;
1131 			}
1132 		}
1133 	}
1134 
1135 	memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1136 out:
1137 	return err;
1138 }
1139 
1140 /* this function is called from 2 macsec ops functions:
1141  *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1142  *  and create new Tx contexts(macsec object + steering).
1143  *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1144  *  destroy Tx and Rx contexts(macsec object + steering)
1145  */
mlx5e_macsec_upd_secy(struct macsec_context * ctx)1146 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1147 {
1148 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1149 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1150 	const struct net_device *dev = ctx->secy->netdev;
1151 	struct mlx5e_macsec_device *macsec_device;
1152 	struct mlx5e_macsec_sa *tx_sa;
1153 	struct mlx5e_macsec *macsec;
1154 	int i, err = 0;
1155 
1156 	if (!mlx5e_macsec_secy_features_validate(ctx))
1157 		return -EINVAL;
1158 
1159 	mutex_lock(&priv->macsec->lock);
1160 
1161 	macsec = priv->macsec;
1162 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1163 	if (!macsec_device) {
1164 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1165 		err = -EINVAL;
1166 		goto out;
1167 	}
1168 
1169 	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1170 	if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1171 		err = macsec_upd_secy_hw_address(ctx, macsec_device);
1172 		if (err)
1173 			goto out;
1174 	}
1175 
1176 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1177 		tx_sa = macsec_device->tx_sa[i];
1178 		if (!tx_sa)
1179 			continue;
1180 
1181 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1182 	}
1183 
1184 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1185 		tx_sa = macsec_device->tx_sa[i];
1186 		if (!tx_sa)
1187 			continue;
1188 
1189 		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1190 			err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
1191 			if (err)
1192 				goto out;
1193 		}
1194 	}
1195 
1196 out:
1197 	mutex_unlock(&macsec->lock);
1198 
1199 	return err;
1200 }
1201 
mlx5e_macsec_del_secy(struct macsec_context * ctx)1202 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1203 {
1204 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1205 	struct mlx5e_macsec_device *macsec_device;
1206 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1207 	struct mlx5e_macsec_sa *tx_sa;
1208 	struct mlx5e_macsec *macsec;
1209 	struct list_head *list;
1210 	int err = 0;
1211 	int i;
1212 
1213 	mutex_lock(&priv->macsec->lock);
1214 	macsec = priv->macsec;
1215 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1216 	if (!macsec_device) {
1217 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1218 		err = -EINVAL;
1219 
1220 		goto out;
1221 	}
1222 
1223 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1224 		tx_sa = macsec_device->tx_sa[i];
1225 		if (!tx_sa)
1226 			continue;
1227 
1228 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1229 		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1230 		kfree(tx_sa);
1231 		macsec_device->tx_sa[i] = NULL;
1232 	}
1233 
1234 	list = &macsec_device->macsec_rx_sc_list_head;
1235 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1236 		macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
1237 
1238 	kfree(macsec_device->dev_addr);
1239 	macsec_device->dev_addr = NULL;
1240 
1241 	list_del_rcu(&macsec_device->macsec_device_list_element);
1242 	--macsec->num_of_devices;
1243 	kfree(macsec_device);
1244 
1245 out:
1246 	mutex_unlock(&macsec->lock);
1247 
1248 	return err;
1249 }
1250 
macsec_build_accel_attrs(struct mlx5e_macsec_sa * sa,struct mlx5_macsec_obj_attrs * attrs)1251 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1252 				     struct mlx5_macsec_obj_attrs *attrs)
1253 {
1254 	attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1255 	attrs->epn_state.overlap = sa->epn_state.overlap;
1256 }
1257 
macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso * macsec_aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5_aso_ctrl_param * param)1258 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1259 					  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1260 					  struct mlx5_aso_ctrl_param *param)
1261 {
1262 	struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1263 
1264 	memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1265 	aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1266 	aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1267 	aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1268 
1269 	if (!param)
1270 		return;
1271 
1272 	aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1273 	aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1274 						param->condition_0_operand << 4;
1275 	aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1276 						param->condition_0_offset << 4;
1277 	aso_ctrl->data_offset_condition_operand = param->data_offset |
1278 						param->condition_operand << 6;
1279 	aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1280 	aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1281 	aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1282 	aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1283 	aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1284 	aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1285 }
1286 
mlx5e_macsec_modify_obj(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,u32 macsec_id)1287 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1288 				   u32 macsec_id)
1289 {
1290 	u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1291 	u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1292 	u64 modify_field_select = 0;
1293 	void *obj;
1294 	int err;
1295 
1296 	/* General object fields set */
1297 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1298 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1299 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1300 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1301 	if (err) {
1302 		mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1303 			      macsec_id, err);
1304 		return err;
1305 	}
1306 
1307 	obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1308 	modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1309 
1310 	/* EPN */
1311 	if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1312 	    !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1313 		mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1314 			      macsec_id);
1315 		return -EOPNOTSUPP;
1316 	}
1317 
1318 	obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1319 	MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1320 		   MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1321 	MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1322 	MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1323 
1324 	/* General object fields set */
1325 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1326 
1327 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1328 }
1329 
macsec_aso_build_ctrl(struct mlx5e_macsec_aso * aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5e_macsec_aso_in * in)1330 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1331 				  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1332 				  struct mlx5e_macsec_aso_in *in)
1333 {
1334 	struct mlx5_aso_ctrl_param param = {};
1335 
1336 	param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1337 	param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1338 	param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1339 	if (in->mode == MLX5_MACSEC_EPN) {
1340 		param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1341 		param.bitwise_data = BIT_ULL(54);
1342 		param.data_mask = param.bitwise_data;
1343 	}
1344 	macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
1345 }
1346 
macsec_aso_set_arm_event(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in)1347 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1348 				    struct mlx5e_macsec_aso_in *in)
1349 {
1350 	struct mlx5e_macsec_aso *aso;
1351 	struct mlx5_aso_wqe *aso_wqe;
1352 	struct mlx5_aso *maso;
1353 	int err;
1354 
1355 	aso = &macsec->aso;
1356 	maso = aso->maso;
1357 
1358 	mutex_lock(&aso->aso_lock);
1359 	aso_wqe = mlx5_aso_get_wqe(maso);
1360 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1361 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1362 	macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1363 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1364 	err = mlx5_aso_poll_cq(maso, false);
1365 	mutex_unlock(&aso->aso_lock);
1366 
1367 	return err;
1368 }
1369 
macsec_aso_query(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in,struct mlx5e_macsec_aso_out * out)1370 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1371 			    struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1372 {
1373 	struct mlx5e_macsec_aso *aso;
1374 	struct mlx5_aso_wqe *aso_wqe;
1375 	struct mlx5_aso *maso;
1376 	unsigned long expires;
1377 	int err;
1378 
1379 	aso = &macsec->aso;
1380 	maso = aso->maso;
1381 
1382 	mutex_lock(&aso->aso_lock);
1383 
1384 	aso_wqe = mlx5_aso_get_wqe(maso);
1385 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1386 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1387 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1388 
1389 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1390 	expires = jiffies + msecs_to_jiffies(10);
1391 	do {
1392 		err = mlx5_aso_poll_cq(maso, false);
1393 		if (err)
1394 			usleep_range(2, 10);
1395 	} while (err && time_is_after_jiffies(expires));
1396 
1397 	if (err)
1398 		goto err_out;
1399 
1400 	if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1401 		out->event_arm |= MLX5E_ASO_EPN_ARM;
1402 
1403 	out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1404 
1405 err_out:
1406 	mutex_unlock(&aso->aso_lock);
1407 	return err;
1408 }
1409 
get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1410 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1411 							    const u32 obj_id)
1412 {
1413 	const struct list_head *device_list;
1414 	struct mlx5e_macsec_sa *macsec_sa;
1415 	struct mlx5e_macsec_device *iter;
1416 	int i;
1417 
1418 	device_list = &macsec->macsec_device_list_head;
1419 
1420 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1421 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1422 			macsec_sa = iter->tx_sa[i];
1423 			if (!macsec_sa || !macsec_sa->active)
1424 				continue;
1425 			if (macsec_sa->macsec_obj_id == obj_id)
1426 				return macsec_sa;
1427 		}
1428 	}
1429 
1430 	return NULL;
1431 }
1432 
get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1433 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1434 							    const u32 obj_id)
1435 {
1436 	const struct list_head *device_list, *sc_list;
1437 	struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1438 	struct mlx5e_macsec_sa *macsec_sa;
1439 	struct mlx5e_macsec_device *iter;
1440 	int i;
1441 
1442 	device_list = &macsec->macsec_device_list_head;
1443 
1444 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1445 		sc_list = &iter->macsec_rx_sc_list_head;
1446 		list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1447 			for (i = 0; i < MACSEC_NUM_AN; ++i) {
1448 				macsec_sa = mlx5e_rx_sc->rx_sa[i];
1449 				if (!macsec_sa || !macsec_sa->active)
1450 					continue;
1451 				if (macsec_sa->macsec_obj_id == obj_id)
1452 					return macsec_sa;
1453 			}
1454 		}
1455 	}
1456 
1457 	return NULL;
1458 }
1459 
macsec_epn_update(struct mlx5e_macsec * macsec,struct mlx5_core_dev * mdev,struct mlx5e_macsec_sa * sa,u32 obj_id,u32 mode_param)1460 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1461 			      struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1462 {
1463 	struct mlx5_macsec_obj_attrs attrs = {};
1464 	struct mlx5e_macsec_aso_in in = {};
1465 
1466 	/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1467 	 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1468 	 * esn_overlap to OLD (1).
1469 	 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1470 	 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1471 	 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1472 	 */
1473 
1474 	if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1475 		sa->epn_state.epn_msb++;
1476 		sa->epn_state.overlap = 0;
1477 	} else {
1478 		sa->epn_state.overlap = 1;
1479 	}
1480 
1481 	macsec_build_accel_attrs(sa, &attrs);
1482 	mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1483 
1484 	/* Re-set EPN arm event */
1485 	in.obj_id = obj_id;
1486 	in.mode = MLX5_MACSEC_EPN;
1487 	macsec_aso_set_arm_event(mdev, macsec, &in);
1488 }
1489 
macsec_async_event(struct work_struct * work)1490 static void macsec_async_event(struct work_struct *work)
1491 {
1492 	struct mlx5e_macsec_async_work *async_work;
1493 	struct mlx5e_macsec_aso_out out = {};
1494 	struct mlx5e_macsec_aso_in in = {};
1495 	struct mlx5e_macsec_sa *macsec_sa;
1496 	struct mlx5e_macsec *macsec;
1497 	struct mlx5_core_dev *mdev;
1498 	u32 obj_id;
1499 
1500 	async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1501 	macsec = async_work->macsec;
1502 	mutex_lock(&macsec->lock);
1503 
1504 	mdev = async_work->mdev;
1505 	obj_id = async_work->obj_id;
1506 	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1507 	if (!macsec_sa) {
1508 		macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1509 		if (!macsec_sa) {
1510 			mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1511 			goto out_async_work;
1512 		}
1513 	}
1514 
1515 	/* Query MACsec ASO context */
1516 	in.obj_id = obj_id;
1517 	macsec_aso_query(mdev, macsec, &in, &out);
1518 
1519 	/* EPN case */
1520 	if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1521 		macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1522 
1523 out_async_work:
1524 	kfree(async_work);
1525 	mutex_unlock(&macsec->lock);
1526 }
1527 
macsec_obj_change_event(struct notifier_block * nb,unsigned long event,void * data)1528 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1529 {
1530 	struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1531 	struct mlx5e_macsec_async_work *async_work;
1532 	struct mlx5_eqe_obj_change *obj_change;
1533 	struct mlx5_eqe *eqe = data;
1534 	u16 obj_type;
1535 	u32 obj_id;
1536 
1537 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1538 		return NOTIFY_DONE;
1539 
1540 	obj_change = &eqe->data.obj_change;
1541 	obj_type = be16_to_cpu(obj_change->obj_type);
1542 	obj_id = be32_to_cpu(obj_change->obj_id);
1543 
1544 	if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1545 		return NOTIFY_DONE;
1546 
1547 	async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1548 	if (!async_work)
1549 		return NOTIFY_DONE;
1550 
1551 	async_work->macsec = macsec;
1552 	async_work->mdev = macsec->mdev;
1553 	async_work->obj_id = obj_id;
1554 
1555 	INIT_WORK(&async_work->work, macsec_async_event);
1556 
1557 	WARN_ON(!queue_work(macsec->wq, &async_work->work));
1558 
1559 	return NOTIFY_OK;
1560 }
1561 
mlx5e_macsec_aso_init(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1562 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1563 {
1564 	struct mlx5_aso *maso;
1565 	int err;
1566 
1567 	err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1568 	if (err) {
1569 		mlx5_core_err(mdev,
1570 			      "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1571 			      err);
1572 		return err;
1573 	}
1574 
1575 	maso = mlx5_aso_create(mdev, aso->pdn);
1576 	if (IS_ERR(maso)) {
1577 		err = PTR_ERR(maso);
1578 		goto err_aso;
1579 	}
1580 
1581 	err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1582 	if (err)
1583 		goto err_aso_reg;
1584 
1585 	mutex_init(&aso->aso_lock);
1586 
1587 	aso->maso = maso;
1588 
1589 	return 0;
1590 
1591 err_aso_reg:
1592 	mlx5_aso_destroy(maso);
1593 err_aso:
1594 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1595 	return err;
1596 }
1597 
mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1598 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1599 {
1600 	if (!aso)
1601 		return;
1602 
1603 	mlx5e_macsec_aso_dereg_mr(mdev, aso);
1604 
1605 	mlx5_aso_destroy(aso->maso);
1606 
1607 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1608 }
1609 
1610 static const struct macsec_ops macsec_offload_ops = {
1611 	.mdo_add_txsa = mlx5e_macsec_add_txsa,
1612 	.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1613 	.mdo_del_txsa = mlx5e_macsec_del_txsa,
1614 	.mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1615 	.mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1616 	.mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1617 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1618 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1619 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1620 	.mdo_add_secy = mlx5e_macsec_add_secy,
1621 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
1622 	.mdo_del_secy = mlx5e_macsec_del_secy,
1623 };
1624 
mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec * macsec,struct sk_buff * skb)1625 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1626 {
1627 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1628 	u32 fs_id;
1629 
1630 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1631 							&md_dst->u.macsec_info.sci);
1632 	if (!fs_id)
1633 		goto err_out;
1634 
1635 	return true;
1636 
1637 err_out:
1638 	dev_kfree_skb_any(skb);
1639 	return false;
1640 }
1641 
mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec * macsec,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)1642 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1643 				struct sk_buff *skb,
1644 				struct mlx5_wqe_eth_seg *eseg)
1645 {
1646 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1647 	u32 fs_id;
1648 
1649 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1650 							&md_dst->u.macsec_info.sci);
1651 	if (!fs_id)
1652 		return;
1653 
1654 	eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1655 }
1656 
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)1657 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1658 					struct sk_buff *skb,
1659 					struct mlx5_cqe64 *cqe)
1660 {
1661 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1662 	u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1663 	struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1664 	struct mlx5e_macsec_rx_sc *rx_sc;
1665 	struct mlx5e_macsec *macsec;
1666 	u32  fs_id;
1667 
1668 	macsec = priv->macsec;
1669 	if (!macsec)
1670 		return;
1671 
1672 	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1673 
1674 	rcu_read_lock();
1675 	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1676 	rx_sc = sc_xarray_element->rx_sc;
1677 	if (rx_sc) {
1678 		dst_hold(&rx_sc->md_dst->dst);
1679 		skb_dst_set(skb, &rx_sc->md_dst->dst);
1680 	}
1681 
1682 	rcu_read_unlock();
1683 }
1684 
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)1685 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1686 {
1687 	struct net_device *netdev = priv->netdev;
1688 
1689 	if (!mlx5e_is_macsec_device(priv->mdev))
1690 		return;
1691 
1692 	/* Enable MACsec */
1693 	mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1694 	netdev->macsec_ops = &macsec_offload_ops;
1695 	netdev->features |= NETIF_F_HW_MACSEC;
1696 	netif_keep_dst(netdev);
1697 }
1698 
mlx5e_macsec_init(struct mlx5e_priv * priv)1699 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1700 {
1701 	struct mlx5_core_dev *mdev = priv->mdev;
1702 	struct mlx5e_macsec *macsec = NULL;
1703 	struct mlx5_macsec_fs *macsec_fs;
1704 	int err;
1705 
1706 	if (!mlx5e_is_macsec_device(priv->mdev)) {
1707 		mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1708 		return 0;
1709 	}
1710 
1711 	macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1712 	if (!macsec)
1713 		return -ENOMEM;
1714 
1715 	INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1716 	mutex_init(&macsec->lock);
1717 
1718 	err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1719 	if (err) {
1720 		mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1721 		goto err_aso;
1722 	}
1723 
1724 	macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1725 	if (!macsec->wq) {
1726 		err = -ENOMEM;
1727 		goto err_wq;
1728 	}
1729 
1730 	xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1731 
1732 	priv->macsec = macsec;
1733 
1734 	macsec->mdev = mdev;
1735 
1736 	macsec_fs = mlx5_macsec_fs_init(mdev);
1737 	if (!macsec_fs) {
1738 		err = -ENOMEM;
1739 		goto err_out;
1740 	}
1741 
1742 	mdev->macsec_fs = macsec_fs;
1743 
1744 	macsec->nb.notifier_call = macsec_obj_change_event;
1745 	mlx5_notifier_register(mdev, &macsec->nb);
1746 
1747 	mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1748 
1749 	return 0;
1750 
1751 err_out:
1752 	destroy_workqueue(macsec->wq);
1753 err_wq:
1754 	mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1755 err_aso:
1756 	kfree(macsec);
1757 	priv->macsec = NULL;
1758 	return err;
1759 }
1760 
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)1761 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1762 {
1763 	struct mlx5e_macsec *macsec = priv->macsec;
1764 	struct mlx5_core_dev *mdev = priv->mdev;
1765 
1766 	if (!macsec)
1767 		return;
1768 
1769 	mlx5_notifier_unregister(mdev, &macsec->nb);
1770 	mlx5_macsec_fs_cleanup(mdev->macsec_fs);
1771 	destroy_workqueue(macsec->wq);
1772 	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1773 	mutex_destroy(&macsec->lock);
1774 	kfree(macsec);
1775 }
1776