1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #include <crypto/skcipher.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/bitfield.h>
10 #include "otx2_common.h"
11
12 #define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
13 #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
14 #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
15 #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
16
17 #define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
18
19 #define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
20 #define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
21 #define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
22 #define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
23 #define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
24 #define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
25
26 #define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
27 #define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
28 #define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
29 #define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
30 #define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
31 #define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
32 #define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
33 #define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
34
35 #define MCS_GCM_AES_128 0
36 #define MCS_GCM_AES_256 1
37 #define MCS_GCM_AES_XPN_128 2
38 #define MCS_GCM_AES_XPN_256 3
39
40 #define MCS_TCI_ES 0x40 /* end station */
41 #define MCS_TCI_SC 0x20 /* SCI present */
42 #define MCS_TCI_SCB 0x10 /* epon */
43 #define MCS_TCI_E 0x08 /* encryption */
44 #define MCS_TCI_C 0x04 /* changed text */
45
46 #define CN10K_MAX_HASH_LEN 16
47 #define CN10K_MAX_SAK_LEN 32
48
cn10k_ecb_aes_encrypt(struct otx2_nic * pfvf,u8 * sak,u16 sak_len,u8 * hash)49 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
50 u16 sak_len, u8 *hash)
51 {
52 u8 data[CN10K_MAX_HASH_LEN] = { 0 };
53 struct skcipher_request *req = NULL;
54 struct scatterlist sg_src, sg_dst;
55 struct crypto_skcipher *tfm;
56 DECLARE_CRYPTO_WAIT(wait);
57 int err;
58
59 tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
60 if (IS_ERR(tfm)) {
61 dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
62 return PTR_ERR(tfm);
63 }
64
65 req = skcipher_request_alloc(tfm, GFP_KERNEL);
66 if (!req) {
67 dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
68 err = -ENOMEM;
69 goto free_tfm;
70 }
71
72 err = crypto_skcipher_setkey(tfm, sak, sak_len);
73 if (err) {
74 dev_err(pfvf->dev, "failed to set key for skcipher\n");
75 goto free_req;
76 }
77
78 /* build sg list */
79 sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
80 sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
81
82 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
83 skcipher_request_set_crypt(req, &sg_src, &sg_dst,
84 CN10K_MAX_HASH_LEN, NULL);
85
86 err = crypto_skcipher_encrypt(req);
87 err = crypto_wait_req(err, &wait);
88
89 free_req:
90 skcipher_request_free(req);
91 free_tfm:
92 crypto_free_skcipher(tfm);
93 return err;
94 }
95
cn10k_mcs_get_txsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy)96 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
97 struct macsec_secy *secy)
98 {
99 struct cn10k_mcs_txsc *txsc;
100
101 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
102 if (txsc->sw_secy == secy)
103 return txsc;
104 }
105
106 return NULL;
107 }
108
cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy,struct macsec_rx_sc * rx_sc)109 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
110 struct macsec_secy *secy,
111 struct macsec_rx_sc *rx_sc)
112 {
113 struct cn10k_mcs_rxsc *rxsc;
114
115 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
116 if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
117 return rxsc;
118 }
119
120 return NULL;
121 }
122
rsrc_name(enum mcs_rsrc_type rsrc_type)123 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
124 {
125 switch (rsrc_type) {
126 case MCS_RSRC_TYPE_FLOWID:
127 return "FLOW";
128 case MCS_RSRC_TYPE_SC:
129 return "SC";
130 case MCS_RSRC_TYPE_SECY:
131 return "SECY";
132 case MCS_RSRC_TYPE_SA:
133 return "SA";
134 default:
135 return "Unknown";
136 }
137 }
138
cn10k_mcs_alloc_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 * rsrc_id)139 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
140 enum mcs_rsrc_type type, u16 *rsrc_id)
141 {
142 struct mbox *mbox = &pfvf->mbox;
143 struct mcs_alloc_rsrc_req *req;
144 struct mcs_alloc_rsrc_rsp *rsp;
145 int ret = -ENOMEM;
146
147 mutex_lock(&mbox->lock);
148
149 req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
150 if (!req)
151 goto fail;
152
153 req->rsrc_type = type;
154 req->rsrc_cnt = 1;
155 req->dir = dir;
156
157 ret = otx2_sync_mbox_msg(mbox);
158 if (ret)
159 goto fail;
160
161 rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
162 0, &req->hdr);
163 if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
164 req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
165 ret = -EINVAL;
166 goto fail;
167 }
168
169 switch (rsp->rsrc_type) {
170 case MCS_RSRC_TYPE_FLOWID:
171 *rsrc_id = rsp->flow_ids[0];
172 break;
173 case MCS_RSRC_TYPE_SC:
174 *rsrc_id = rsp->sc_ids[0];
175 break;
176 case MCS_RSRC_TYPE_SECY:
177 *rsrc_id = rsp->secy_ids[0];
178 break;
179 case MCS_RSRC_TYPE_SA:
180 *rsrc_id = rsp->sa_ids[0];
181 break;
182 default:
183 ret = -EINVAL;
184 goto fail;
185 }
186
187 mutex_unlock(&mbox->lock);
188
189 return 0;
190 fail:
191 dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
192 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
193 mutex_unlock(&mbox->lock);
194 return ret;
195 }
196
cn10k_mcs_free_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 hw_rsrc_id,bool all)197 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
198 enum mcs_rsrc_type type, u16 hw_rsrc_id,
199 bool all)
200 {
201 struct mcs_clear_stats *clear_req;
202 struct mbox *mbox = &pfvf->mbox;
203 struct mcs_free_rsrc_req *req;
204
205 mutex_lock(&mbox->lock);
206
207 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
208 if (!clear_req)
209 goto fail;
210
211 clear_req->id = hw_rsrc_id;
212 clear_req->type = type;
213 clear_req->dir = dir;
214
215 req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
216 if (!req)
217 goto fail;
218
219 req->rsrc_id = hw_rsrc_id;
220 req->rsrc_type = type;
221 req->dir = dir;
222 if (all)
223 req->all = 1;
224
225 if (otx2_sync_mbox_msg(&pfvf->mbox))
226 goto fail;
227
228 mutex_unlock(&mbox->lock);
229
230 return;
231 fail:
232 dev_err(pfvf->dev, "Failed to free %s %s resource\n",
233 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
234 mutex_unlock(&mbox->lock);
235 }
236
cn10k_mcs_alloc_txsa(struct otx2_nic * pfvf,u16 * hw_sa_id)237 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
238 {
239 return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
240 }
241
cn10k_mcs_alloc_rxsa(struct otx2_nic * pfvf,u16 * hw_sa_id)242 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
243 {
244 return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
245 }
246
cn10k_mcs_free_txsa(struct otx2_nic * pfvf,u16 hw_sa_id)247 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
248 {
249 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
250 }
251
cn10k_mcs_free_rxsa(struct otx2_nic * pfvf,u16 hw_sa_id)252 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
253 {
254 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
255 }
256
cn10k_mcs_write_rx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)257 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
258 struct macsec_secy *secy, u8 hw_secy_id)
259 {
260 struct mcs_secy_plcy_write_req *req;
261 struct mbox *mbox = &pfvf->mbox;
262 u64 policy;
263 u8 cipher;
264 int ret;
265
266 mutex_lock(&mbox->lock);
267
268 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
269 if (!req) {
270 ret = -ENOMEM;
271 goto fail;
272 }
273
274 policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
275 if (secy->replay_protect)
276 policy |= MCS_RX_SECY_PLCY_RP;
277
278 policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
279
280 switch (secy->key_len) {
281 case 16:
282 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
283 break;
284 case 32:
285 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
286 break;
287 default:
288 cipher = MCS_GCM_AES_128;
289 dev_warn(pfvf->dev, "Unsupported key length\n");
290 break;
291 }
292
293 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
294 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
295
296 policy |= MCS_RX_SECY_PLCY_ENA;
297
298 req->plcy = policy;
299 req->secy_id = hw_secy_id;
300 req->dir = MCS_RX;
301
302 ret = otx2_sync_mbox_msg(mbox);
303
304 fail:
305 mutex_unlock(&mbox->lock);
306 return ret;
307 }
308
cn10k_mcs_write_rx_flowid(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)309 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
310 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
311 {
312 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
313 struct macsec_secy *secy = rxsc->sw_secy;
314 struct mcs_flowid_entry_write_req *req;
315 struct mbox *mbox = &pfvf->mbox;
316 u64 mac_da;
317 int ret;
318
319 mutex_lock(&mbox->lock);
320
321 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
322 if (!req) {
323 ret = -ENOMEM;
324 goto fail;
325 }
326
327 mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
328
329 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
330 req->mask[0] = ~0ULL;
331 req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
332
333 req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
334 req->mask[1] = ~0ULL;
335 req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
336
337 req->mask[2] = ~0ULL;
338 req->mask[3] = ~0ULL;
339
340 req->flow_id = rxsc->hw_flow_id;
341 req->secy_id = hw_secy_id;
342 req->sc_id = rxsc->hw_sc_id;
343 req->dir = MCS_RX;
344
345 if (sw_rx_sc->active)
346 req->ena = 1;
347
348 ret = otx2_sync_mbox_msg(mbox);
349
350 fail:
351 mutex_unlock(&mbox->lock);
352 return ret;
353 }
354
cn10k_mcs_write_sc_cam(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)355 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
356 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
357 {
358 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
359 struct mcs_rx_sc_cam_write_req *sc_req;
360 struct mbox *mbox = &pfvf->mbox;
361 int ret;
362
363 mutex_lock(&mbox->lock);
364
365 sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
366 if (!sc_req) {
367 ret = -ENOMEM;
368 goto fail;
369 }
370
371 sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
372 sc_req->sc_id = rxsc->hw_sc_id;
373 sc_req->secy_id = hw_secy_id;
374
375 ret = otx2_sync_mbox_msg(mbox);
376
377 fail:
378 mutex_unlock(&mbox->lock);
379 return ret;
380 }
381
cn10k_mcs_write_keys(struct otx2_nic * pfvf,struct macsec_secy * secy,struct mcs_sa_plcy_write_req * req,u8 * sak,u8 * salt,ssci_t ssci)382 static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
383 struct macsec_secy *secy,
384 struct mcs_sa_plcy_write_req *req,
385 u8 *sak, u8 *salt, ssci_t ssci)
386 {
387 u8 hash_rev[CN10K_MAX_HASH_LEN];
388 u8 sak_rev[CN10K_MAX_SAK_LEN];
389 u8 salt_rev[MACSEC_SALT_LEN];
390 u8 hash[CN10K_MAX_HASH_LEN];
391 u32 ssci_63_32;
392 int err, i;
393
394 err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
395 if (err) {
396 dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
397 return err;
398 }
399
400 for (i = 0; i < secy->key_len; i++)
401 sak_rev[i] = sak[secy->key_len - 1 - i];
402
403 for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
404 hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
405
406 for (i = 0; i < MACSEC_SALT_LEN; i++)
407 salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
408
409 ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
410
411 memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
412 memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
413 memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
414 req->plcy[0][7] |= (u64)ssci_63_32 << 32;
415
416 return 0;
417 }
418
cn10k_mcs_write_rx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,bool sa_in_use)419 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
420 struct macsec_secy *secy,
421 struct cn10k_mcs_rxsc *rxsc,
422 u8 assoc_num, bool sa_in_use)
423 {
424 struct mcs_sa_plcy_write_req *plcy_req;
425 u8 *sak = rxsc->sa_key[assoc_num];
426 u8 *salt = rxsc->salt[assoc_num];
427 struct mcs_rx_sc_sa_map *map_req;
428 struct mbox *mbox = &pfvf->mbox;
429 int ret;
430
431 mutex_lock(&mbox->lock);
432
433 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
434 if (!plcy_req) {
435 ret = -ENOMEM;
436 goto fail;
437 }
438
439 map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
440 if (!map_req) {
441 otx2_mbox_reset(&mbox->mbox, 0);
442 ret = -ENOMEM;
443 goto fail;
444 }
445
446 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
447 salt, rxsc->ssci[assoc_num]);
448 if (ret)
449 goto fail;
450
451 plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
452 plcy_req->sa_cnt = 1;
453 plcy_req->dir = MCS_RX;
454
455 map_req->sa_index = rxsc->hw_sa_id[assoc_num];
456 map_req->sa_in_use = sa_in_use;
457 map_req->sc_id = rxsc->hw_sc_id;
458 map_req->an = assoc_num;
459
460 /* Send two messages together */
461 ret = otx2_sync_mbox_msg(mbox);
462
463 fail:
464 mutex_unlock(&mbox->lock);
465 return ret;
466 }
467
cn10k_mcs_write_rx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,u64 next_pn)468 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
469 struct cn10k_mcs_rxsc *rxsc,
470 u8 assoc_num, u64 next_pn)
471 {
472 struct mcs_pn_table_write_req *req;
473 struct mbox *mbox = &pfvf->mbox;
474 int ret;
475
476 mutex_lock(&mbox->lock);
477
478 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
479 if (!req) {
480 ret = -ENOMEM;
481 goto fail;
482 }
483
484 req->pn_id = rxsc->hw_sa_id[assoc_num];
485 req->next_pn = next_pn;
486 req->dir = MCS_RX;
487
488 ret = otx2_sync_mbox_msg(mbox);
489
490 fail:
491 mutex_unlock(&mbox->lock);
492 return ret;
493 }
494
cn10k_mcs_write_tx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)495 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
496 struct macsec_secy *secy,
497 struct cn10k_mcs_txsc *txsc)
498 {
499 struct mcs_secy_plcy_write_req *req;
500 struct mbox *mbox = &pfvf->mbox;
501 struct macsec_tx_sc *sw_tx_sc;
502 u8 sectag_tci = 0;
503 u8 tag_offset;
504 u64 policy;
505 u8 cipher;
506 int ret;
507
508 /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
509 * if VLAN tag needs to be sent in clear text.
510 */
511 tag_offset = txsc->vlan_dev ? 16 : 12;
512 sw_tx_sc = &secy->tx_sc;
513
514 mutex_lock(&mbox->lock);
515
516 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
517 if (!req) {
518 ret = -ENOMEM;
519 goto fail;
520 }
521
522 if (sw_tx_sc->send_sci) {
523 sectag_tci |= MCS_TCI_SC;
524 } else {
525 if (sw_tx_sc->end_station)
526 sectag_tci |= MCS_TCI_ES;
527 if (sw_tx_sc->scb)
528 sectag_tci |= MCS_TCI_SCB;
529 }
530
531 if (sw_tx_sc->encrypt)
532 sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
533
534 policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
535 pfvf->netdev->mtu + OTX2_ETH_HLEN);
536 /* Write SecTag excluding AN bits(1..0) */
537 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
538 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
539 policy |= MCS_TX_SECY_PLCY_INS_MODE;
540 policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
541
542 switch (secy->key_len) {
543 case 16:
544 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
545 break;
546 case 32:
547 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
548 break;
549 default:
550 cipher = MCS_GCM_AES_128;
551 dev_warn(pfvf->dev, "Unsupported key length\n");
552 break;
553 }
554
555 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
556
557 if (secy->protect_frames)
558 policy |= MCS_TX_SECY_PLCY_PROTECT;
559
560 /* If the encodingsa does not exist/active and protect is
561 * not set then frames can be sent out as it is. Hence enable
562 * the policy irrespective of secy operational when !protect.
563 */
564 if (!secy->protect_frames || secy->operational)
565 policy |= MCS_TX_SECY_PLCY_ENA;
566
567 req->plcy = policy;
568 req->secy_id = txsc->hw_secy_id_tx;
569 req->dir = MCS_TX;
570
571 ret = otx2_sync_mbox_msg(mbox);
572
573 fail:
574 mutex_unlock(&mbox->lock);
575 return ret;
576 }
577
cn10k_mcs_write_tx_flowid(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)578 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
579 struct macsec_secy *secy,
580 struct cn10k_mcs_txsc *txsc)
581 {
582 struct mcs_flowid_entry_write_req *req;
583 struct mbox *mbox = &pfvf->mbox;
584 u64 mac_sa;
585 int ret;
586
587 mutex_lock(&mbox->lock);
588
589 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
590 if (!req) {
591 ret = -ENOMEM;
592 goto fail;
593 }
594
595 mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
596
597 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
598 req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
599
600 req->mask[0] = ~0ULL;
601 req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
602
603 req->mask[1] = ~0ULL;
604 req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
605
606 req->mask[2] = ~0ULL;
607 req->mask[3] = ~0ULL;
608
609 req->flow_id = txsc->hw_flow_id;
610 req->secy_id = txsc->hw_secy_id_tx;
611 req->sc_id = txsc->hw_sc_id;
612 req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
613 req->dir = MCS_TX;
614 /* This can be enabled since stack xmits packets only when interface is up */
615 req->ena = 1;
616
617 ret = otx2_sync_mbox_msg(mbox);
618
619 fail:
620 mutex_unlock(&mbox->lock);
621 return ret;
622 }
623
cn10k_mcs_link_tx_sa2sc(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 sa_num,bool sa_active)624 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
625 struct macsec_secy *secy,
626 struct cn10k_mcs_txsc *txsc,
627 u8 sa_num, bool sa_active)
628 {
629 struct mcs_tx_sc_sa_map *map_req;
630 struct mbox *mbox = &pfvf->mbox;
631 int ret;
632
633 /* Link the encoding_sa only to SC out of all SAs */
634 if (txsc->encoding_sa != sa_num)
635 return 0;
636
637 mutex_lock(&mbox->lock);
638
639 map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
640 if (!map_req) {
641 otx2_mbox_reset(&mbox->mbox, 0);
642 ret = -ENOMEM;
643 goto fail;
644 }
645
646 map_req->sa_index0 = txsc->hw_sa_id[sa_num];
647 map_req->sa_index0_vld = sa_active;
648 map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
649 map_req->sc_id = txsc->hw_sc_id;
650
651 ret = otx2_sync_mbox_msg(mbox);
652
653 fail:
654 mutex_unlock(&mbox->lock);
655 return ret;
656 }
657
cn10k_mcs_write_tx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 assoc_num)658 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
659 struct macsec_secy *secy,
660 struct cn10k_mcs_txsc *txsc,
661 u8 assoc_num)
662 {
663 struct mcs_sa_plcy_write_req *plcy_req;
664 u8 *sak = txsc->sa_key[assoc_num];
665 u8 *salt = txsc->salt[assoc_num];
666 struct mbox *mbox = &pfvf->mbox;
667 int ret;
668
669 mutex_lock(&mbox->lock);
670
671 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
672 if (!plcy_req) {
673 ret = -ENOMEM;
674 goto fail;
675 }
676
677 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
678 salt, txsc->ssci[assoc_num]);
679 if (ret)
680 goto fail;
681
682 plcy_req->plcy[0][8] = assoc_num;
683 plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
684 plcy_req->sa_cnt = 1;
685 plcy_req->dir = MCS_TX;
686
687 ret = otx2_sync_mbox_msg(mbox);
688
689 fail:
690 mutex_unlock(&mbox->lock);
691 return ret;
692 }
693
cn10k_write_tx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc,u8 assoc_num,u64 next_pn)694 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
695 struct cn10k_mcs_txsc *txsc,
696 u8 assoc_num, u64 next_pn)
697 {
698 struct mcs_pn_table_write_req *req;
699 struct mbox *mbox = &pfvf->mbox;
700 int ret;
701
702 mutex_lock(&mbox->lock);
703
704 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
705 if (!req) {
706 ret = -ENOMEM;
707 goto fail;
708 }
709
710 req->pn_id = txsc->hw_sa_id[assoc_num];
711 req->next_pn = next_pn;
712 req->dir = MCS_TX;
713
714 ret = otx2_sync_mbox_msg(mbox);
715
716 fail:
717 mutex_unlock(&mbox->lock);
718 return ret;
719 }
720
cn10k_mcs_ena_dis_flowid(struct otx2_nic * pfvf,u16 hw_flow_id,bool enable,enum mcs_direction dir)721 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
722 bool enable, enum mcs_direction dir)
723 {
724 struct mcs_flowid_ena_dis_entry *req;
725 struct mbox *mbox = &pfvf->mbox;
726 int ret;
727
728 mutex_lock(&mbox->lock);
729
730 req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
731 if (!req) {
732 ret = -ENOMEM;
733 goto fail;
734 }
735
736 req->flow_id = hw_flow_id;
737 req->ena = enable;
738 req->dir = dir;
739
740 ret = otx2_sync_mbox_msg(mbox);
741
742 fail:
743 mutex_unlock(&mbox->lock);
744 return ret;
745 }
746
cn10k_mcs_sa_stats(struct otx2_nic * pfvf,u8 hw_sa_id,struct mcs_sa_stats * rsp_p,enum mcs_direction dir,bool clear)747 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
748 struct mcs_sa_stats *rsp_p,
749 enum mcs_direction dir, bool clear)
750 {
751 struct mcs_clear_stats *clear_req;
752 struct mbox *mbox = &pfvf->mbox;
753 struct mcs_stats_req *req;
754 struct mcs_sa_stats *rsp;
755 int ret;
756
757 mutex_lock(&mbox->lock);
758
759 req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
760 if (!req) {
761 ret = -ENOMEM;
762 goto fail;
763 }
764
765 req->id = hw_sa_id;
766 req->dir = dir;
767
768 if (!clear)
769 goto send_msg;
770
771 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
772 if (!clear_req) {
773 ret = -ENOMEM;
774 goto fail;
775 }
776 clear_req->id = hw_sa_id;
777 clear_req->dir = dir;
778 clear_req->type = MCS_RSRC_TYPE_SA;
779
780 send_msg:
781 ret = otx2_sync_mbox_msg(mbox);
782 if (ret)
783 goto fail;
784
785 rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
786 0, &req->hdr);
787 if (IS_ERR(rsp)) {
788 ret = PTR_ERR(rsp);
789 goto fail;
790 }
791
792 memcpy(rsp_p, rsp, sizeof(*rsp_p));
793
794 mutex_unlock(&mbox->lock);
795
796 return 0;
797 fail:
798 mutex_unlock(&mbox->lock);
799 return ret;
800 }
801
cn10k_mcs_sc_stats(struct otx2_nic * pfvf,u8 hw_sc_id,struct mcs_sc_stats * rsp_p,enum mcs_direction dir,bool clear)802 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
803 struct mcs_sc_stats *rsp_p,
804 enum mcs_direction dir, bool clear)
805 {
806 struct mcs_clear_stats *clear_req;
807 struct mbox *mbox = &pfvf->mbox;
808 struct mcs_stats_req *req;
809 struct mcs_sc_stats *rsp;
810 int ret;
811
812 mutex_lock(&mbox->lock);
813
814 req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
815 if (!req) {
816 ret = -ENOMEM;
817 goto fail;
818 }
819
820 req->id = hw_sc_id;
821 req->dir = dir;
822
823 if (!clear)
824 goto send_msg;
825
826 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
827 if (!clear_req) {
828 ret = -ENOMEM;
829 goto fail;
830 }
831 clear_req->id = hw_sc_id;
832 clear_req->dir = dir;
833 clear_req->type = MCS_RSRC_TYPE_SC;
834
835 send_msg:
836 ret = otx2_sync_mbox_msg(mbox);
837 if (ret)
838 goto fail;
839
840 rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
841 0, &req->hdr);
842 if (IS_ERR(rsp)) {
843 ret = PTR_ERR(rsp);
844 goto fail;
845 }
846
847 memcpy(rsp_p, rsp, sizeof(*rsp_p));
848
849 mutex_unlock(&mbox->lock);
850
851 return 0;
852 fail:
853 mutex_unlock(&mbox->lock);
854 return ret;
855 }
856
cn10k_mcs_secy_stats(struct otx2_nic * pfvf,u8 hw_secy_id,struct mcs_secy_stats * rsp_p,enum mcs_direction dir,bool clear)857 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
858 struct mcs_secy_stats *rsp_p,
859 enum mcs_direction dir, bool clear)
860 {
861 struct mcs_clear_stats *clear_req;
862 struct mbox *mbox = &pfvf->mbox;
863 struct mcs_secy_stats *rsp;
864 struct mcs_stats_req *req;
865 int ret;
866
867 mutex_lock(&mbox->lock);
868
869 req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
870 if (!req) {
871 ret = -ENOMEM;
872 goto fail;
873 }
874
875 req->id = hw_secy_id;
876 req->dir = dir;
877
878 if (!clear)
879 goto send_msg;
880
881 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
882 if (!clear_req) {
883 ret = -ENOMEM;
884 goto fail;
885 }
886 clear_req->id = hw_secy_id;
887 clear_req->dir = dir;
888 clear_req->type = MCS_RSRC_TYPE_SECY;
889
890 send_msg:
891 ret = otx2_sync_mbox_msg(mbox);
892 if (ret)
893 goto fail;
894
895 rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
896 0, &req->hdr);
897 if (IS_ERR(rsp)) {
898 ret = PTR_ERR(rsp);
899 goto fail;
900 }
901
902 memcpy(rsp_p, rsp, sizeof(*rsp_p));
903
904 mutex_unlock(&mbox->lock);
905
906 return 0;
907 fail:
908 mutex_unlock(&mbox->lock);
909 return ret;
910 }
911
cn10k_mcs_create_txsc(struct otx2_nic * pfvf)912 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
913 {
914 struct cn10k_mcs_txsc *txsc;
915 int ret;
916
917 txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
918 if (!txsc)
919 return ERR_PTR(-ENOMEM);
920
921 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
922 &txsc->hw_flow_id);
923 if (ret)
924 goto fail;
925
926 /* For a SecY, one TX secy and one RX secy HW resources are needed */
927 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
928 &txsc->hw_secy_id_tx);
929 if (ret)
930 goto free_flowid;
931
932 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
933 &txsc->hw_secy_id_rx);
934 if (ret)
935 goto free_tx_secy;
936
937 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
938 &txsc->hw_sc_id);
939 if (ret)
940 goto free_rx_secy;
941
942 return txsc;
943 free_rx_secy:
944 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
945 txsc->hw_secy_id_rx, false);
946 free_tx_secy:
947 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
948 txsc->hw_secy_id_tx, false);
949 free_flowid:
950 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
951 txsc->hw_flow_id, false);
952 fail:
953 kfree(txsc);
954 return ERR_PTR(ret);
955 }
956
957 /* Free Tx SC and its SAs(if any) resources to AF
958 */
cn10k_mcs_delete_txsc(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc)959 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
960 struct cn10k_mcs_txsc *txsc)
961 {
962 u8 sa_bmap = txsc->sa_bmap;
963 u8 sa_num = 0;
964
965 while (sa_bmap) {
966 if (sa_bmap & 1) {
967 cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
968 txsc, sa_num);
969 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
970 }
971 sa_num++;
972 sa_bmap >>= 1;
973 }
974
975 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
976 txsc->hw_sc_id, false);
977 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
978 txsc->hw_secy_id_rx, false);
979 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
980 txsc->hw_secy_id_tx, false);
981 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
982 txsc->hw_flow_id, false);
983 }
984
cn10k_mcs_create_rxsc(struct otx2_nic * pfvf)985 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
986 {
987 struct cn10k_mcs_rxsc *rxsc;
988 int ret;
989
990 rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
991 if (!rxsc)
992 return ERR_PTR(-ENOMEM);
993
994 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
995 &rxsc->hw_flow_id);
996 if (ret)
997 goto fail;
998
999 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1000 &rxsc->hw_sc_id);
1001 if (ret)
1002 goto free_flowid;
1003
1004 return rxsc;
1005 free_flowid:
1006 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1007 rxsc->hw_flow_id, false);
1008 fail:
1009 kfree(rxsc);
1010 return ERR_PTR(ret);
1011 }
1012
1013 /* Free Rx SC and its SAs(if any) resources to AF
1014 */
cn10k_mcs_delete_rxsc(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc)1015 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
1016 struct cn10k_mcs_rxsc *rxsc)
1017 {
1018 u8 sa_bmap = rxsc->sa_bmap;
1019 u8 sa_num = 0;
1020
1021 while (sa_bmap) {
1022 if (sa_bmap & 1) {
1023 cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
1024 sa_num, false);
1025 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1026 }
1027 sa_num++;
1028 sa_bmap >>= 1;
1029 }
1030
1031 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1032 rxsc->hw_sc_id, false);
1033 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1034 rxsc->hw_flow_id, false);
1035 }
1036
cn10k_mcs_secy_tx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,struct macsec_tx_sa * sw_tx_sa,u8 sa_num)1037 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
1038 struct cn10k_mcs_txsc *txsc,
1039 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
1040 {
1041 if (sw_tx_sa) {
1042 cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1043 cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
1044 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
1045 sw_tx_sa->active);
1046 }
1047
1048 cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
1049 cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
1050 /* When updating secy, change RX secy also */
1051 cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
1052
1053 return 0;
1054 }
1055
cn10k_mcs_secy_rx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)1056 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
1057 struct macsec_secy *secy, u8 hw_secy_id)
1058 {
1059 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1060 struct cn10k_mcs_rxsc *mcs_rx_sc;
1061 struct macsec_rx_sc *sw_rx_sc;
1062 struct macsec_rx_sa *sw_rx_sa;
1063 u8 sa_num;
1064
1065 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1066 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1067 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1068 if (unlikely(!mcs_rx_sc))
1069 continue;
1070
1071 for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
1072 sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
1073 if (!sw_rx_sa)
1074 continue;
1075
1076 cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
1077 sa_num, sw_rx_sa->active);
1078 cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
1079 sw_rx_sa->next_pn);
1080 }
1081
1082 cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
1083 cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
1084 }
1085
1086 return 0;
1087 }
1088
cn10k_mcs_disable_rxscs(struct otx2_nic * pfvf,struct macsec_secy * secy,bool delete)1089 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
1090 struct macsec_secy *secy,
1091 bool delete)
1092 {
1093 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1094 struct cn10k_mcs_rxsc *mcs_rx_sc;
1095 struct macsec_rx_sc *sw_rx_sc;
1096 int ret;
1097
1098 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1099 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1100 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1101 if (unlikely(!mcs_rx_sc))
1102 continue;
1103
1104 ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
1105 false, MCS_RX);
1106 if (ret)
1107 dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
1108 mcs_rx_sc->hw_sc_id);
1109 if (delete) {
1110 cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
1111 list_del(&mcs_rx_sc->entry);
1112 kfree(mcs_rx_sc);
1113 }
1114 }
1115
1116 return 0;
1117 }
1118
cn10k_mcs_sync_stats(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)1119 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1120 struct cn10k_mcs_txsc *txsc)
1121 {
1122 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1123 struct mcs_secy_stats rx_rsp = { 0 };
1124 struct mcs_sc_stats sc_rsp = { 0 };
1125 struct cn10k_mcs_rxsc *rxsc;
1126
1127 /* Because of shared counters for some stats in the hardware, when
1128 * updating secy policy take a snapshot of current stats and reset them.
1129 * Below are the effected stats because of shared counters.
1130 */
1131
1132 /* Check if sync is really needed */
1133 if (secy->validate_frames == txsc->last_validate_frames &&
1134 secy->replay_protect == txsc->last_replay_protect)
1135 return;
1136
1137 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1138
1139 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1140 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1141 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1142 if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1143 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1144 else
1145 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1146
1147 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1148 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1149
1150 rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1151 rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1152
1153 rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1154 rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1155
1156 if (txsc->last_replay_protect)
1157 rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1158 else
1159 rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1160
1161 if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1162 rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1163 else
1164 rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1165 }
1166
1167 txsc->last_validate_frames = secy->validate_frames;
1168 txsc->last_replay_protect = secy->replay_protect;
1169 }
1170
cn10k_mdo_open(struct macsec_context * ctx)1171 static int cn10k_mdo_open(struct macsec_context *ctx)
1172 {
1173 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1174 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1175 struct macsec_secy *secy = ctx->secy;
1176 struct macsec_tx_sa *sw_tx_sa;
1177 struct cn10k_mcs_txsc *txsc;
1178 u8 sa_num;
1179 int err;
1180
1181 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1182 if (!txsc)
1183 return -ENOENT;
1184
1185 sa_num = txsc->encoding_sa;
1186 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1187
1188 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1189 if (err)
1190 return err;
1191
1192 return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1193 }
1194
cn10k_mdo_stop(struct macsec_context * ctx)1195 static int cn10k_mdo_stop(struct macsec_context *ctx)
1196 {
1197 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1198 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1199 struct cn10k_mcs_txsc *txsc;
1200 int err;
1201
1202 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1203 if (!txsc)
1204 return -ENOENT;
1205
1206 err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1207 if (err)
1208 return err;
1209
1210 return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1211 }
1212
cn10k_mdo_add_secy(struct macsec_context * ctx)1213 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1214 {
1215 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1216 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1217 struct macsec_secy *secy = ctx->secy;
1218 struct cn10k_mcs_txsc *txsc;
1219
1220 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1221 return -EOPNOTSUPP;
1222
1223 txsc = cn10k_mcs_create_txsc(pfvf);
1224 if (IS_ERR(txsc))
1225 return -ENOSPC;
1226
1227 txsc->sw_secy = secy;
1228 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1229 txsc->last_validate_frames = secy->validate_frames;
1230 txsc->last_replay_protect = secy->replay_protect;
1231 txsc->vlan_dev = is_vlan_dev(ctx->netdev);
1232
1233 list_add(&txsc->entry, &cfg->txsc_list);
1234
1235 if (netif_running(secy->netdev))
1236 return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1237
1238 return 0;
1239 }
1240
cn10k_mdo_upd_secy(struct macsec_context * ctx)1241 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1242 {
1243 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1244 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1245 struct macsec_secy *secy = ctx->secy;
1246 struct macsec_tx_sa *sw_tx_sa;
1247 struct cn10k_mcs_txsc *txsc;
1248 bool active;
1249 u8 sa_num;
1250 int err;
1251
1252 txsc = cn10k_mcs_get_txsc(cfg, secy);
1253 if (!txsc)
1254 return -ENOENT;
1255
1256 /* Encoding SA got changed */
1257 if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1258 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1259 sa_num = txsc->encoding_sa;
1260 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1261 active = sw_tx_sa ? sw_tx_sa->active : false;
1262 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1263 }
1264
1265 if (netif_running(secy->netdev)) {
1266 cn10k_mcs_sync_stats(pfvf, secy, txsc);
1267
1268 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1269 if (err)
1270 return err;
1271 }
1272
1273 return 0;
1274 }
1275
cn10k_mdo_del_secy(struct macsec_context * ctx)1276 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1277 {
1278 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1279 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1280 struct cn10k_mcs_txsc *txsc;
1281
1282 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1283 if (!txsc)
1284 return -ENOENT;
1285
1286 cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1287 cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1288 cn10k_mcs_delete_txsc(pfvf, txsc);
1289 list_del(&txsc->entry);
1290 kfree(txsc);
1291
1292 return 0;
1293 }
1294
cn10k_mdo_add_txsa(struct macsec_context * ctx)1295 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1296 {
1297 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1298 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1299 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1300 struct macsec_secy *secy = ctx->secy;
1301 u8 sa_num = ctx->sa.assoc_num;
1302 struct cn10k_mcs_txsc *txsc;
1303 int err;
1304
1305 txsc = cn10k_mcs_get_txsc(cfg, secy);
1306 if (!txsc)
1307 return -ENOENT;
1308
1309 if (sa_num >= CN10K_MCS_SA_PER_SC)
1310 return -EOPNOTSUPP;
1311
1312 if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1313 return -ENOSPC;
1314
1315 memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1316 memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1317 txsc->ssci[sa_num] = sw_tx_sa->ssci;
1318
1319 txsc->sa_bmap |= 1 << sa_num;
1320
1321 if (netif_running(secy->netdev)) {
1322 err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1323 if (err)
1324 return err;
1325
1326 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1327 sw_tx_sa->next_pn);
1328 if (err)
1329 return err;
1330
1331 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1332 sa_num, sw_tx_sa->active);
1333 if (err)
1334 return err;
1335 }
1336
1337 return 0;
1338 }
1339
cn10k_mdo_upd_txsa(struct macsec_context * ctx)1340 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1341 {
1342 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1343 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1344 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1345 struct macsec_secy *secy = ctx->secy;
1346 u8 sa_num = ctx->sa.assoc_num;
1347 struct cn10k_mcs_txsc *txsc;
1348 int err;
1349
1350 txsc = cn10k_mcs_get_txsc(cfg, secy);
1351 if (!txsc)
1352 return -ENOENT;
1353
1354 if (sa_num >= CN10K_MCS_SA_PER_SC)
1355 return -EOPNOTSUPP;
1356
1357 if (netif_running(secy->netdev)) {
1358 /* Keys cannot be changed after creation */
1359 if (ctx->sa.update_pn) {
1360 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1361 sw_tx_sa->next_pn);
1362 if (err)
1363 return err;
1364 }
1365
1366 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1367 sa_num, sw_tx_sa->active);
1368 if (err)
1369 return err;
1370 }
1371
1372 return 0;
1373 }
1374
cn10k_mdo_del_txsa(struct macsec_context * ctx)1375 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1376 {
1377 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1378 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1379 u8 sa_num = ctx->sa.assoc_num;
1380 struct cn10k_mcs_txsc *txsc;
1381
1382 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1383 if (!txsc)
1384 return -ENOENT;
1385
1386 if (sa_num >= CN10K_MCS_SA_PER_SC)
1387 return -EOPNOTSUPP;
1388
1389 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1390 txsc->sa_bmap &= ~(1 << sa_num);
1391
1392 return 0;
1393 }
1394
cn10k_mdo_add_rxsc(struct macsec_context * ctx)1395 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1396 {
1397 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1398 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1399 struct macsec_secy *secy = ctx->secy;
1400 struct cn10k_mcs_rxsc *rxsc;
1401 struct cn10k_mcs_txsc *txsc;
1402 int err;
1403
1404 txsc = cn10k_mcs_get_txsc(cfg, secy);
1405 if (!txsc)
1406 return -ENOENT;
1407
1408 rxsc = cn10k_mcs_create_rxsc(pfvf);
1409 if (IS_ERR(rxsc))
1410 return -ENOSPC;
1411
1412 rxsc->sw_secy = ctx->secy;
1413 rxsc->sw_rxsc = ctx->rx_sc;
1414 list_add(&rxsc->entry, &cfg->rxsc_list);
1415
1416 if (netif_running(secy->netdev)) {
1417 err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1418 if (err)
1419 return err;
1420
1421 err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1422 if (err)
1423 return err;
1424 }
1425
1426 return 0;
1427 }
1428
cn10k_mdo_upd_rxsc(struct macsec_context * ctx)1429 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1430 {
1431 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1432 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1433 struct macsec_secy *secy = ctx->secy;
1434 bool enable = ctx->rx_sc->active;
1435 struct cn10k_mcs_rxsc *rxsc;
1436
1437 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1438 if (!rxsc)
1439 return -ENOENT;
1440
1441 if (netif_running(secy->netdev))
1442 return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1443 enable, MCS_RX);
1444
1445 return 0;
1446 }
1447
cn10k_mdo_del_rxsc(struct macsec_context * ctx)1448 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1449 {
1450 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1451 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1452 struct cn10k_mcs_rxsc *rxsc;
1453
1454 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1455 if (!rxsc)
1456 return -ENOENT;
1457
1458 cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1459 cn10k_mcs_delete_rxsc(pfvf, rxsc);
1460 list_del(&rxsc->entry);
1461 kfree(rxsc);
1462
1463 return 0;
1464 }
1465
cn10k_mdo_add_rxsa(struct macsec_context * ctx)1466 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1467 {
1468 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1469 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1470 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1471 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1472 struct macsec_secy *secy = ctx->secy;
1473 bool sa_in_use = rx_sa->active;
1474 u8 sa_num = ctx->sa.assoc_num;
1475 struct cn10k_mcs_rxsc *rxsc;
1476 int err;
1477
1478 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1479 if (!rxsc)
1480 return -ENOENT;
1481
1482 if (sa_num >= CN10K_MCS_SA_PER_SC)
1483 return -EOPNOTSUPP;
1484
1485 if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1486 return -ENOSPC;
1487
1488 memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1489 memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1490 rxsc->ssci[sa_num] = rx_sa->ssci;
1491
1492 rxsc->sa_bmap |= 1 << sa_num;
1493
1494 if (netif_running(secy->netdev)) {
1495 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1496 sa_num, sa_in_use);
1497 if (err)
1498 return err;
1499
1500 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1501 rx_sa->next_pn);
1502 if (err)
1503 return err;
1504 }
1505
1506 return 0;
1507 }
1508
cn10k_mdo_upd_rxsa(struct macsec_context * ctx)1509 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1510 {
1511 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1512 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1513 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1514 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1515 struct macsec_secy *secy = ctx->secy;
1516 bool sa_in_use = rx_sa->active;
1517 u8 sa_num = ctx->sa.assoc_num;
1518 struct cn10k_mcs_rxsc *rxsc;
1519 int err;
1520
1521 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1522 if (!rxsc)
1523 return -ENOENT;
1524
1525 if (sa_num >= CN10K_MCS_SA_PER_SC)
1526 return -EOPNOTSUPP;
1527
1528 if (netif_running(secy->netdev)) {
1529 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1530 if (err)
1531 return err;
1532
1533 if (!ctx->sa.update_pn)
1534 return 0;
1535
1536 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1537 rx_sa->next_pn);
1538 if (err)
1539 return err;
1540 }
1541
1542 return 0;
1543 }
1544
cn10k_mdo_del_rxsa(struct macsec_context * ctx)1545 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1546 {
1547 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1548 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1549 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1550 u8 sa_num = ctx->sa.assoc_num;
1551 struct cn10k_mcs_rxsc *rxsc;
1552
1553 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1554 if (!rxsc)
1555 return -ENOENT;
1556
1557 if (sa_num >= CN10K_MCS_SA_PER_SC)
1558 return -EOPNOTSUPP;
1559
1560 cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1561 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1562
1563 rxsc->sa_bmap &= ~(1 << sa_num);
1564
1565 return 0;
1566 }
1567
cn10k_mdo_get_dev_stats(struct macsec_context * ctx)1568 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1569 {
1570 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1571 struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1572 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1573 struct macsec_secy *secy = ctx->secy;
1574 struct cn10k_mcs_txsc *txsc;
1575
1576 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1577 if (!txsc)
1578 return -ENOENT;
1579
1580 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1581 ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1582 ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1583
1584 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1585 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1586 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1587 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1588 if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1589 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1590 else
1591 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1592 txsc->stats.InPktsOverrun = 0;
1593
1594 ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1595 ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1596 ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1597 ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1598 ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1599 ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1600
1601 return 0;
1602 }
1603
cn10k_mdo_get_tx_sc_stats(struct macsec_context * ctx)1604 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1605 {
1606 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1607 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1608 struct mcs_sc_stats rsp = { 0 };
1609 struct cn10k_mcs_txsc *txsc;
1610
1611 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1612 if (!txsc)
1613 return -ENOENT;
1614
1615 cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1616
1617 ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1618 ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1619 ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1620 ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1621
1622 return 0;
1623 }
1624
cn10k_mdo_get_tx_sa_stats(struct macsec_context * ctx)1625 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1626 {
1627 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1628 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1629 struct mcs_sa_stats rsp = { 0 };
1630 u8 sa_num = ctx->sa.assoc_num;
1631 struct cn10k_mcs_txsc *txsc;
1632
1633 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1634 if (!txsc)
1635 return -ENOENT;
1636
1637 if (sa_num >= CN10K_MCS_SA_PER_SC)
1638 return -EOPNOTSUPP;
1639
1640 cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1641
1642 ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1643 ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1644
1645 return 0;
1646 }
1647
cn10k_mdo_get_rx_sc_stats(struct macsec_context * ctx)1648 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1649 {
1650 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1651 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1652 struct macsec_secy *secy = ctx->secy;
1653 struct mcs_sc_stats rsp = { 0 };
1654 struct cn10k_mcs_rxsc *rxsc;
1655
1656 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1657 if (!rxsc)
1658 return -ENOENT;
1659
1660 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1661
1662 rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1663 rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1664
1665 rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1666 rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1667
1668 if (secy->replay_protect)
1669 rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1670 else
1671 rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1672
1673 if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1674 rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1675 else
1676 rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1677
1678 ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1679 ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1680 ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1681 ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1682 ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1683 ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1684 ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1685 ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1686
1687 return 0;
1688 }
1689
cn10k_mdo_get_rx_sa_stats(struct macsec_context * ctx)1690 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1691 {
1692 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1693 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1694 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1695 struct mcs_sa_stats rsp = { 0 };
1696 u8 sa_num = ctx->sa.assoc_num;
1697 struct cn10k_mcs_rxsc *rxsc;
1698
1699 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1700 if (!rxsc)
1701 return -ENOENT;
1702
1703 if (sa_num >= CN10K_MCS_SA_PER_SC)
1704 return -EOPNOTSUPP;
1705
1706 cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1707
1708 ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1709 ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1710 ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1711 ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1712 ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1713
1714 return 0;
1715 }
1716
1717 static const struct macsec_ops cn10k_mcs_ops = {
1718 .mdo_dev_open = cn10k_mdo_open,
1719 .mdo_dev_stop = cn10k_mdo_stop,
1720 .mdo_add_secy = cn10k_mdo_add_secy,
1721 .mdo_upd_secy = cn10k_mdo_upd_secy,
1722 .mdo_del_secy = cn10k_mdo_del_secy,
1723 .mdo_add_rxsc = cn10k_mdo_add_rxsc,
1724 .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1725 .mdo_del_rxsc = cn10k_mdo_del_rxsc,
1726 .mdo_add_rxsa = cn10k_mdo_add_rxsa,
1727 .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1728 .mdo_del_rxsa = cn10k_mdo_del_rxsa,
1729 .mdo_add_txsa = cn10k_mdo_add_txsa,
1730 .mdo_upd_txsa = cn10k_mdo_upd_txsa,
1731 .mdo_del_txsa = cn10k_mdo_del_txsa,
1732 .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1733 .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1734 .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1735 .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1736 .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1737 };
1738
cn10k_handle_mcs_event(struct otx2_nic * pfvf,struct mcs_intr_info * event)1739 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1740 {
1741 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1742 struct macsec_tx_sa *sw_tx_sa = NULL;
1743 struct macsec_secy *secy = NULL;
1744 struct cn10k_mcs_txsc *txsc;
1745 u8 an;
1746
1747 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1748 return;
1749
1750 if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1751 return;
1752
1753 /* Find the SecY to which the expired hardware SA is mapped */
1754 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1755 for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1756 if (txsc->hw_sa_id[an] == event->sa_id) {
1757 secy = txsc->sw_secy;
1758 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1759 }
1760 }
1761
1762 if (secy && sw_tx_sa)
1763 macsec_pn_wrapped(secy, sw_tx_sa);
1764 }
1765
cn10k_mcs_init(struct otx2_nic * pfvf)1766 int cn10k_mcs_init(struct otx2_nic *pfvf)
1767 {
1768 struct mbox *mbox = &pfvf->mbox;
1769 struct cn10k_mcs_cfg *cfg;
1770 struct mcs_intr_cfg *req;
1771
1772 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1773 return 0;
1774
1775 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1776 if (!cfg)
1777 return -ENOMEM;
1778
1779 INIT_LIST_HEAD(&cfg->txsc_list);
1780 INIT_LIST_HEAD(&cfg->rxsc_list);
1781 pfvf->macsec_cfg = cfg;
1782
1783 pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1784 pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1785
1786 mutex_lock(&mbox->lock);
1787
1788 req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1789 if (!req)
1790 goto fail;
1791
1792 req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1793
1794 if (otx2_sync_mbox_msg(mbox))
1795 goto fail;
1796
1797 mutex_unlock(&mbox->lock);
1798
1799 return 0;
1800 fail:
1801 dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1802 mutex_unlock(&mbox->lock);
1803 return 0;
1804 }
1805
cn10k_mcs_free(struct otx2_nic * pfvf)1806 void cn10k_mcs_free(struct otx2_nic *pfvf)
1807 {
1808 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1809 return;
1810
1811 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1812 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1813 kfree(pfvf->macsec_cfg);
1814 pfvf->macsec_cfg = NULL;
1815 }
1816