1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
36 */
37
38 #define pr_fmt(fmt) "ch_ipsec: " fmt
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/skbuff.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/highmem.h>
46 #include <linux/if_vlan.h>
47 #include <linux/ip.h>
48 #include <linux/netdevice.h>
49 #include <net/esp.h>
50 #include <net/xfrm.h>
51 #include <crypto/aes.h>
52 #include <crypto/algapi.h>
53 #include <crypto/hash.h>
54 #include <crypto/sha.h>
55 #include <crypto/authenc.h>
56 #include <crypto/internal/aead.h>
57 #include <crypto/null.h>
58 #include <crypto/internal/skcipher.h>
59 #include <crypto/aead.h>
60 #include <crypto/scatterwalk.h>
61 #include <crypto/internal/hash.h>
62
63 #include "chcr_ipsec.h"
64
65 /*
66 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
67 * into a WR.
68 */
69 #define MAX_IMM_TX_PKT_LEN 256
70 #define GCM_ESP_IV_SIZE 8
71
72 static LIST_HEAD(uld_ctx_list);
73 static DEFINE_MUTEX(dev_mutex);
74
75 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
76 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
77 static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
78 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
79 static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
80 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
81 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
82 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
83
84 static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
85 .xdo_dev_state_add = ch_ipsec_xfrm_add_state,
86 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
87 .xdo_dev_state_free = ch_ipsec_xfrm_free_state,
88 .xdo_dev_offload_ok = ch_ipsec_offload_ok,
89 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
90 };
91
92 static struct cxgb4_uld_info ch_ipsec_uld_info = {
93 .name = CHIPSEC_DRV_MODULE_NAME,
94 .nrxq = MAX_ULD_QSETS,
95 /* Max ntxq will be derived from fw config file*/
96 .rxq_size = 1024,
97 .add = ch_ipsec_uld_add,
98 .state_change = ch_ipsec_uld_state_change,
99 .tx_handler = ch_ipsec_xmit,
100 .xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
101 };
102
ch_ipsec_uld_add(const struct cxgb4_lld_info * infop)103 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
104 {
105 struct ipsec_uld_ctx *u_ctx;
106
107 pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
108 CHIPSEC_DRV_VERSION);
109 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
110 if (!u_ctx) {
111 u_ctx = ERR_PTR(-ENOMEM);
112 goto out;
113 }
114 u_ctx->lldi = *infop;
115 out:
116 return u_ctx;
117 }
118
ch_ipsec_uld_state_change(void * handle,enum cxgb4_state new_state)119 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
120 {
121 struct ipsec_uld_ctx *u_ctx = handle;
122
123 pr_debug("new_state %u\n", new_state);
124 switch (new_state) {
125 case CXGB4_STATE_UP:
126 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
127 mutex_lock(&dev_mutex);
128 list_add_tail(&u_ctx->entry, &uld_ctx_list);
129 mutex_unlock(&dev_mutex);
130 break;
131 case CXGB4_STATE_START_RECOVERY:
132 case CXGB4_STATE_DOWN:
133 case CXGB4_STATE_DETACH:
134 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
135 list_del(&u_ctx->entry);
136 break;
137 default:
138 break;
139 }
140
141 return 0;
142 }
143
ch_ipsec_setauthsize(struct xfrm_state * x,struct ipsec_sa_entry * sa_entry)144 static int ch_ipsec_setauthsize(struct xfrm_state *x,
145 struct ipsec_sa_entry *sa_entry)
146 {
147 int hmac_ctrl;
148 int authsize = x->aead->alg_icv_len / 8;
149
150 sa_entry->authsize = authsize;
151
152 switch (authsize) {
153 case ICV_8:
154 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
155 break;
156 case ICV_12:
157 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
158 break;
159 case ICV_16:
160 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
161 break;
162 default:
163 return -EINVAL;
164 }
165 return hmac_ctrl;
166 }
167
ch_ipsec_setkey(struct xfrm_state * x,struct ipsec_sa_entry * sa_entry)168 static int ch_ipsec_setkey(struct xfrm_state *x,
169 struct ipsec_sa_entry *sa_entry)
170 {
171 int keylen = (x->aead->alg_key_len + 7) / 8;
172 unsigned char *key = x->aead->alg_key;
173 int ck_size, key_ctx_size = 0;
174 unsigned char ghash_h[AEAD_H_SIZE];
175 struct crypto_aes_ctx aes;
176 int ret = 0;
177
178 if (keylen > 3) {
179 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
180 memcpy(sa_entry->salt, key + keylen, 4);
181 }
182
183 if (keylen == AES_KEYSIZE_128) {
184 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
185 } else if (keylen == AES_KEYSIZE_192) {
186 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
187 } else if (keylen == AES_KEYSIZE_256) {
188 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
189 } else {
190 pr_err("GCM: Invalid key length %d\n", keylen);
191 ret = -EINVAL;
192 goto out;
193 }
194
195 memcpy(sa_entry->key, key, keylen);
196 sa_entry->enckey_len = keylen;
197 key_ctx_size = sizeof(struct _key_ctx) +
198 ((DIV_ROUND_UP(keylen, 16)) << 4) +
199 AEAD_H_SIZE;
200
201 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
202 CHCR_KEYCTX_MAC_KEY_SIZE_128,
203 0, 0,
204 key_ctx_size >> 4);
205
206 /* Calculate the H = CIPH(K, 0 repeated 16 times).
207 * It will go in key context
208 */
209 ret = aes_expandkey(&aes, key, keylen);
210 if (ret) {
211 sa_entry->enckey_len = 0;
212 goto out;
213 }
214 memset(ghash_h, 0, AEAD_H_SIZE);
215 aes_encrypt(&aes, ghash_h, ghash_h);
216 memzero_explicit(&aes, sizeof(aes));
217
218 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
219 16), ghash_h, AEAD_H_SIZE);
220 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
221 AEAD_H_SIZE;
222 out:
223 return ret;
224 }
225
226 /*
227 * ch_ipsec_xfrm_add_state
228 * returns 0 on success, negative error if failed to send message to FPGA
229 * positive error if FPGA returned a bad response
230 */
ch_ipsec_xfrm_add_state(struct xfrm_state * x)231 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
232 {
233 struct ipsec_sa_entry *sa_entry;
234 int res = 0;
235
236 if (x->props.aalgo != SADB_AALG_NONE) {
237 pr_debug("Cannot offload authenticated xfrm states\n");
238 return -EINVAL;
239 }
240 if (x->props.calgo != SADB_X_CALG_NONE) {
241 pr_debug("Cannot offload compressed xfrm states\n");
242 return -EINVAL;
243 }
244 if (x->props.family != AF_INET &&
245 x->props.family != AF_INET6) {
246 pr_debug("Only IPv4/6 xfrm state offloaded\n");
247 return -EINVAL;
248 }
249 if (x->props.mode != XFRM_MODE_TRANSPORT &&
250 x->props.mode != XFRM_MODE_TUNNEL) {
251 pr_debug("Only transport and tunnel xfrm offload\n");
252 return -EINVAL;
253 }
254 if (x->id.proto != IPPROTO_ESP) {
255 pr_debug("Only ESP xfrm state offloaded\n");
256 return -EINVAL;
257 }
258 if (x->encap) {
259 pr_debug("Encapsulated xfrm state not offloaded\n");
260 return -EINVAL;
261 }
262 if (!x->aead) {
263 pr_debug("Cannot offload xfrm states without aead\n");
264 return -EINVAL;
265 }
266 if (x->aead->alg_icv_len != 128 &&
267 x->aead->alg_icv_len != 96) {
268 pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
269 return -EINVAL;
270 }
271 if ((x->aead->alg_key_len != 128 + 32) &&
272 (x->aead->alg_key_len != 256 + 32)) {
273 pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
274 return -EINVAL;
275 }
276 if (x->tfcpad) {
277 pr_debug("Cannot offload xfrm states with tfc padding\n");
278 return -EINVAL;
279 }
280 if (!x->geniv) {
281 pr_debug("Cannot offload xfrm states without geniv\n");
282 return -EINVAL;
283 }
284 if (strcmp(x->geniv, "seqiv")) {
285 pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
286 return -EINVAL;
287 }
288
289 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
290 if (!sa_entry) {
291 res = -ENOMEM;
292 goto out;
293 }
294
295 sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
296 if (x->props.flags & XFRM_STATE_ESN)
297 sa_entry->esn = 1;
298 ch_ipsec_setkey(x, sa_entry);
299 x->xso.offload_handle = (unsigned long)sa_entry;
300 try_module_get(THIS_MODULE);
301 out:
302 return res;
303 }
304
ch_ipsec_xfrm_del_state(struct xfrm_state * x)305 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
306 {
307 /* do nothing */
308 if (!x->xso.offload_handle)
309 return;
310 }
311
ch_ipsec_xfrm_free_state(struct xfrm_state * x)312 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
313 {
314 struct ipsec_sa_entry *sa_entry;
315
316 if (!x->xso.offload_handle)
317 return;
318
319 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
320 kfree(sa_entry);
321 module_put(THIS_MODULE);
322 }
323
ch_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)324 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
325 {
326 if (x->props.family == AF_INET) {
327 /* Offload with IP options is not supported yet */
328 if (ip_hdr(skb)->ihl > 5)
329 return false;
330 } else {
331 /* Offload with IPv6 extension headers is not support yet */
332 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
333 return false;
334 }
335 return true;
336 }
337
ch_ipsec_advance_esn_state(struct xfrm_state * x)338 static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
339 {
340 /* do nothing */
341 if (!x->xso.offload_handle)
342 return;
343 }
344
is_eth_imm(const struct sk_buff * skb,struct ipsec_sa_entry * sa_entry)345 static int is_eth_imm(const struct sk_buff *skb,
346 struct ipsec_sa_entry *sa_entry)
347 {
348 unsigned int kctx_len;
349 int hdrlen;
350
351 kctx_len = sa_entry->kctx_len;
352 hdrlen = sizeof(struct fw_ulptx_wr) +
353 sizeof(struct chcr_ipsec_req) + kctx_len;
354
355 hdrlen += sizeof(struct cpl_tx_pkt);
356 if (sa_entry->esn)
357 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
358 << 4);
359 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
360 return hdrlen;
361 return 0;
362 }
363
calc_tx_sec_flits(const struct sk_buff * skb,struct ipsec_sa_entry * sa_entry,bool * immediate)364 static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
365 struct ipsec_sa_entry *sa_entry,
366 bool *immediate)
367 {
368 unsigned int kctx_len;
369 unsigned int flits;
370 int aadivlen;
371 int hdrlen;
372
373 kctx_len = sa_entry->kctx_len;
374 hdrlen = is_eth_imm(skb, sa_entry);
375 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
376 16) : 0;
377 aadivlen <<= 4;
378
379 /* If the skb is small enough, we can pump it out as a work request
380 * with only immediate data. In that case we just have to have the
381 * TX Packet header plus the skb data in the Work Request.
382 */
383
384 if (hdrlen) {
385 *immediate = true;
386 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
387 }
388
389 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
390
391 /* Otherwise, we're going to have to construct a Scatter gather list
392 * of the skb body and fragments. We also include the flits necessary
393 * for the TX Packet Work Request and CPL. We always have a firmware
394 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
395 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
396 * message or, if we're doing a Large Send Offload, an LSO CPL message
397 * with an embedded TX Packet Write CPL message.
398 */
399 flits += (sizeof(struct fw_ulptx_wr) +
400 sizeof(struct chcr_ipsec_req) +
401 kctx_len +
402 sizeof(struct cpl_tx_pkt_core) +
403 aadivlen) / sizeof(__be64);
404 return flits;
405 }
406
copy_esn_pktxt(struct sk_buff * skb,struct net_device * dev,void * pos,struct ipsec_sa_entry * sa_entry)407 static void *copy_esn_pktxt(struct sk_buff *skb,
408 struct net_device *dev,
409 void *pos,
410 struct ipsec_sa_entry *sa_entry)
411 {
412 struct chcr_ipsec_aadiv *aadiv;
413 struct ulptx_idata *sc_imm;
414 struct ip_esp_hdr *esphdr;
415 struct xfrm_offload *xo;
416 struct sge_eth_txq *q;
417 struct adapter *adap;
418 struct port_info *pi;
419 __be64 seqno;
420 u32 qidx;
421 u32 seqlo;
422 u8 *iv;
423 int eoq;
424 int len;
425
426 pi = netdev_priv(dev);
427 adap = pi->adapter;
428 qidx = skb->queue_mapping;
429 q = &adap->sge.ethtxq[qidx + pi->first_qset];
430
431 /* end of queue, reset pos to start of queue */
432 eoq = (void *)q->q.stat - pos;
433 if (!eoq)
434 pos = q->q.desc;
435
436 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
437 memset(pos, 0, len);
438 aadiv = (struct chcr_ipsec_aadiv *)pos;
439 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
440 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
441 xo = xfrm_offload(skb);
442
443 aadiv->spi = (esphdr->spi);
444 seqlo = ntohl(esphdr->seq_no);
445 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
446 memcpy(aadiv->seq_no, &seqno, 8);
447 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
448 memcpy(aadiv->iv, iv, 8);
449
450 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
451 sc_imm = (struct ulptx_idata *)(pos +
452 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
453 sizeof(__be64)) << 3));
454 sc_imm->cmd_more = FILL_CMD_MORE(0);
455 sc_imm->len = cpu_to_be32(skb->len);
456 }
457 pos += len;
458 return pos;
459 }
460
copy_cpltx_pktxt(struct sk_buff * skb,struct net_device * dev,void * pos,struct ipsec_sa_entry * sa_entry)461 static void *copy_cpltx_pktxt(struct sk_buff *skb,
462 struct net_device *dev,
463 void *pos,
464 struct ipsec_sa_entry *sa_entry)
465 {
466 struct cpl_tx_pkt_core *cpl;
467 struct sge_eth_txq *q;
468 struct adapter *adap;
469 struct port_info *pi;
470 u32 ctrl0, qidx;
471 u64 cntrl = 0;
472 int left;
473
474 pi = netdev_priv(dev);
475 adap = pi->adapter;
476 qidx = skb->queue_mapping;
477 q = &adap->sge.ethtxq[qidx + pi->first_qset];
478
479 left = (void *)q->q.stat - pos;
480 if (!left)
481 pos = q->q.desc;
482
483 cpl = (struct cpl_tx_pkt_core *)pos;
484
485 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
486 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
487 TXPKT_PF_V(adap->pf);
488 if (skb_vlan_tag_present(skb)) {
489 q->vlan_ins++;
490 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
491 }
492
493 cpl->ctrl0 = htonl(ctrl0);
494 cpl->pack = htons(0);
495 cpl->len = htons(skb->len);
496 cpl->ctrl1 = cpu_to_be64(cntrl);
497
498 pos += sizeof(struct cpl_tx_pkt_core);
499 /* Copy ESN info for HW */
500 if (sa_entry->esn)
501 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
502 return pos;
503 }
504
copy_key_cpltx_pktxt(struct sk_buff * skb,struct net_device * dev,void * pos,struct ipsec_sa_entry * sa_entry)505 static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
506 struct net_device *dev,
507 void *pos,
508 struct ipsec_sa_entry *sa_entry)
509 {
510 struct _key_ctx *key_ctx;
511 int left, eoq, key_len;
512 struct sge_eth_txq *q;
513 struct adapter *adap;
514 struct port_info *pi;
515 unsigned int qidx;
516
517 pi = netdev_priv(dev);
518 adap = pi->adapter;
519 qidx = skb->queue_mapping;
520 q = &adap->sge.ethtxq[qidx + pi->first_qset];
521 key_len = sa_entry->kctx_len;
522
523 /* end of queue, reset pos to start of queue */
524 eoq = (void *)q->q.stat - pos;
525 left = eoq;
526 if (!eoq) {
527 pos = q->q.desc;
528 left = 64 * q->q.size;
529 }
530
531 /* Copy the Key context header */
532 key_ctx = (struct _key_ctx *)pos;
533 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
534 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
535 pos += sizeof(struct _key_ctx);
536 left -= sizeof(struct _key_ctx);
537
538 if (likely(key_len <= left)) {
539 memcpy(key_ctx->key, sa_entry->key, key_len);
540 pos += key_len;
541 } else {
542 memcpy(pos, sa_entry->key, left);
543 memcpy(q->q.desc, sa_entry->key + left,
544 key_len - left);
545 pos = (u8 *)q->q.desc + (key_len - left);
546 }
547 /* Copy CPL TX PKT XT */
548 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
549
550 return pos;
551 }
552
ch_ipsec_crypto_wreq(struct sk_buff * skb,struct net_device * dev,void * pos,int credits,struct ipsec_sa_entry * sa_entry)553 static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
554 struct net_device *dev,
555 void *pos,
556 int credits,
557 struct ipsec_sa_entry *sa_entry)
558 {
559 struct port_info *pi = netdev_priv(dev);
560 struct adapter *adap = pi->adapter;
561 unsigned int ivsize = GCM_ESP_IV_SIZE;
562 struct chcr_ipsec_wr *wr;
563 bool immediate = false;
564 u16 immdatalen = 0;
565 unsigned int flits;
566 u32 ivinoffset;
567 u32 aadstart;
568 u32 aadstop;
569 u32 ciphstart;
570 u16 sc_more = 0;
571 u32 ivdrop = 0;
572 u32 esnlen = 0;
573 u32 wr_mid;
574 u16 ndesc;
575 int qidx = skb_get_queue_mapping(skb);
576 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
577 unsigned int kctx_len = sa_entry->kctx_len;
578 int qid = q->q.cntxt_id;
579
580 atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
581
582 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
583 ndesc = DIV_ROUND_UP(flits, 2);
584 if (sa_entry->esn)
585 ivdrop = 1;
586
587 if (immediate)
588 immdatalen = skb->len;
589
590 if (sa_entry->esn) {
591 esnlen = sizeof(struct chcr_ipsec_aadiv);
592 if (!skb_is_nonlinear(skb))
593 sc_more = 1;
594 }
595
596 /* WR Header */
597 wr = (struct chcr_ipsec_wr *)pos;
598 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
599 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
600
601 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
602 netif_tx_stop_queue(q->txq);
603 q->q.stops++;
604 if (!q->dbqt)
605 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
606 }
607 wr_mid |= FW_ULPTX_WR_DATA_F;
608 wr->wreq.flowid_len16 = htonl(wr_mid);
609
610 /* ULPTX */
611 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
612 wr->req.ulptx.len = htonl(ndesc - 1);
613
614 /* Sub-command */
615 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
616 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
617 sizeof(wr->req.key_ctx) +
618 kctx_len +
619 sizeof(struct cpl_tx_pkt_core) +
620 esnlen +
621 (esnlen ? 0 : immdatalen));
622
623 /* CPL_SEC_PDU */
624 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
625 (skb_transport_offset(skb) +
626 sizeof(struct ip_esp_hdr) + 1);
627 wr->req.sec_cpl.op_ivinsrtofst = htonl(
628 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
629 CPL_TX_SEC_PDU_CPLLEN_V(2) |
630 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
631 CPL_TX_SEC_PDU_IVINSRTOFST_V(
632 ivinoffset));
633
634 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
635 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
636 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
637 (skb_transport_offset(skb) +
638 sizeof(struct ip_esp_hdr));
639 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
640 GCM_ESP_IV_SIZE + 1;
641 ciphstart += sa_entry->esn ? esnlen : 0;
642
643 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
644 aadstart,
645 aadstop,
646 ciphstart, 0);
647
648 wr->req.sec_cpl.cipherstop_lo_authinsert =
649 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
650 sa_entry->authsize,
651 sa_entry->authsize);
652 wr->req.sec_cpl.seqno_numivs =
653 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
654 CHCR_SCMD_CIPHER_MODE_AES_GCM,
655 CHCR_SCMD_AUTH_MODE_GHASH,
656 sa_entry->hmac_ctrl,
657 ivsize >> 1);
658 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
659 0, ivdrop, 0);
660
661 pos += sizeof(struct fw_ulptx_wr) +
662 sizeof(struct ulp_txpkt) +
663 sizeof(struct ulptx_idata) +
664 sizeof(struct cpl_tx_sec_pdu);
665
666 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
667
668 return pos;
669 }
670
671 /**
672 * flits_to_desc - returns the num of Tx descriptors for the given flits
673 * @n: the number of flits
674 *
675 * Returns the number of Tx descriptors needed for the supplied number
676 * of flits.
677 */
flits_to_desc(unsigned int n)678 static unsigned int flits_to_desc(unsigned int n)
679 {
680 WARN_ON(n > SGE_MAX_WR_LEN / 8);
681 return DIV_ROUND_UP(n, 8);
682 }
683
txq_avail(const struct sge_txq * q)684 static unsigned int txq_avail(const struct sge_txq *q)
685 {
686 return q->size - 1 - q->in_use;
687 }
688
eth_txq_stop(struct sge_eth_txq * q)689 static void eth_txq_stop(struct sge_eth_txq *q)
690 {
691 netif_tx_stop_queue(q->txq);
692 q->q.stops++;
693 }
694
txq_advance(struct sge_txq * q,unsigned int n)695 static void txq_advance(struct sge_txq *q, unsigned int n)
696 {
697 q->in_use += n;
698 q->pidx += n;
699 if (q->pidx >= q->size)
700 q->pidx -= q->size;
701 }
702
703 /*
704 * ch_ipsec_xmit called from ULD Tx handler
705 */
ch_ipsec_xmit(struct sk_buff * skb,struct net_device * dev)706 int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
707 {
708 struct xfrm_state *x = xfrm_input_state(skb);
709 unsigned int last_desc, ndesc, flits = 0;
710 struct ipsec_sa_entry *sa_entry;
711 u64 *pos, *end, *before, *sgl;
712 struct tx_sw_desc *sgl_sdesc;
713 int qidx, left, credits;
714 bool immediate = false;
715 struct sge_eth_txq *q;
716 struct adapter *adap;
717 struct port_info *pi;
718 struct sec_path *sp;
719
720 if (!x->xso.offload_handle)
721 return NETDEV_TX_BUSY;
722
723 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
724
725 sp = skb_sec_path(skb);
726 if (sp->len != 1) {
727 out_free: dev_kfree_skb_any(skb);
728 return NETDEV_TX_OK;
729 }
730
731 pi = netdev_priv(dev);
732 adap = pi->adapter;
733 qidx = skb->queue_mapping;
734 q = &adap->sge.ethtxq[qidx + pi->first_qset];
735
736 cxgb4_reclaim_completed_tx(adap, &q->q, true);
737
738 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
739 ndesc = flits_to_desc(flits);
740 credits = txq_avail(&q->q) - ndesc;
741
742 if (unlikely(credits < 0)) {
743 eth_txq_stop(q);
744 dev_err(adap->pdev_dev,
745 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
746 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
747 flits);
748 return NETDEV_TX_BUSY;
749 }
750
751 last_desc = q->q.pidx + ndesc - 1;
752 if (last_desc >= q->q.size)
753 last_desc -= q->q.size;
754 sgl_sdesc = &q->q.sdesc[last_desc];
755
756 if (!immediate &&
757 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
758 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
759 q->mapping_err++;
760 goto out_free;
761 }
762
763 pos = (u64 *)&q->q.desc[q->q.pidx];
764 before = (u64 *)pos;
765 end = (u64 *)pos + flits;
766 /* Setup IPSec CPL */
767 pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
768 credits, sa_entry);
769 if (before > (u64 *)pos) {
770 left = (u8 *)end - (u8 *)q->q.stat;
771 end = (void *)q->q.desc + left;
772 }
773 if (pos == (u64 *)q->q.stat) {
774 left = (u8 *)end - (u8 *)q->q.stat;
775 end = (void *)q->q.desc + left;
776 pos = (void *)q->q.desc;
777 }
778
779 sgl = (void *)pos;
780 if (immediate) {
781 cxgb4_inline_tx_skb(skb, &q->q, sgl);
782 dev_consume_skb_any(skb);
783 } else {
784 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
785 0, sgl_sdesc->addr);
786 skb_orphan(skb);
787 sgl_sdesc->skb = skb;
788 }
789 txq_advance(&q->q, ndesc);
790
791 cxgb4_ring_tx_db(adap, &q->q, ndesc);
792 return NETDEV_TX_OK;
793 }
794
ch_ipsec_init(void)795 static int __init ch_ipsec_init(void)
796 {
797 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
798
799 return 0;
800 }
801
ch_ipsec_exit(void)802 static void __exit ch_ipsec_exit(void)
803 {
804 struct ipsec_uld_ctx *u_ctx, *tmp;
805 struct adapter *adap;
806
807 mutex_lock(&dev_mutex);
808 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
809 adap = pci_get_drvdata(u_ctx->lldi.pdev);
810 atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
811 list_del(&u_ctx->entry);
812 kfree(u_ctx);
813 }
814 mutex_unlock(&dev_mutex);
815 cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
816 }
817
818 module_init(ch_ipsec_init);
819 module_exit(ch_ipsec_exit);
820
821 MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
822 MODULE_LICENSE("GPL");
823 MODULE_AUTHOR("Chelsio Communications");
824 MODULE_VERSION(CHIPSEC_DRV_VERSION);
825
826