1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/gcm.h>
55 #include <crypto/sha1.h>
56 #include <crypto/sha2.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72
73 #define IV AES_BLOCK_SIZE
74
75 static unsigned int sgl_ent_len[] = {
76 0, 0, 16, 24, 40, 48, 64, 72, 88,
77 96, 112, 120, 136, 144, 160, 168, 184,
78 192, 208, 216, 232, 240, 256, 264, 280,
79 288, 304, 312, 328, 336, 352, 360, 376
80 };
81
82 static unsigned int dsgl_ent_len[] = {
83 0, 32, 32, 48, 48, 64, 64, 80, 80,
84 112, 112, 128, 128, 144, 144, 160, 160,
85 192, 192, 208, 208, 224, 224, 240, 240,
86 272, 272, 288, 288, 304, 304, 320, 320
87 };
88
89 static u32 round_constant[11] = {
90 0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 0x1B000000, 0x36000000, 0x6C000000
93 };
94
95 static int chcr_handle_cipher_resp(struct skcipher_request *req,
96 unsigned char *input, int err);
97
AEAD_CTX(struct chcr_context * ctx)98 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99 {
100 return &ctx->crypto_ctx->aeadctx;
101 }
102
ABLK_CTX(struct chcr_context * ctx)103 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104 {
105 return &ctx->crypto_ctx->ablkctx;
106 }
107
HMAC_CTX(struct chcr_context * ctx)108 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109 {
110 return &ctx->crypto_ctx->hmacctx;
111 }
112
GCM_CTX(struct chcr_aead_ctx * gctx)113 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114 {
115 return gctx->ctx->gcm;
116 }
117
AUTHENC_CTX(struct chcr_aead_ctx * gctx)118 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119 {
120 return gctx->ctx->authenc;
121 }
122
ULD_CTX(struct chcr_context * ctx)123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124 {
125 return container_of(ctx->dev, struct uld_ctx, dev);
126 }
127
chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx * reqctx)128 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
129 {
130 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
131 }
132
sg_nents_xlen(struct scatterlist * sg,unsigned int reqlen,unsigned int entlen,unsigned int skip)133 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
134 unsigned int entlen,
135 unsigned int skip)
136 {
137 int nents = 0;
138 unsigned int less;
139 unsigned int skip_len = 0;
140
141 while (sg && skip) {
142 if (sg_dma_len(sg) <= skip) {
143 skip -= sg_dma_len(sg);
144 skip_len = 0;
145 sg = sg_next(sg);
146 } else {
147 skip_len = skip;
148 skip = 0;
149 }
150 }
151
152 while (sg && reqlen) {
153 less = min(reqlen, sg_dma_len(sg) - skip_len);
154 nents += DIV_ROUND_UP(less, entlen);
155 reqlen -= less;
156 skip_len = 0;
157 sg = sg_next(sg);
158 }
159 return nents;
160 }
161
get_aead_subtype(struct crypto_aead * aead)162 static inline int get_aead_subtype(struct crypto_aead *aead)
163 {
164 struct aead_alg *alg = crypto_aead_alg(aead);
165 struct chcr_alg_template *chcr_crypto_alg =
166 container_of(alg, struct chcr_alg_template, alg.aead);
167 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
168 }
169
chcr_verify_tag(struct aead_request * req,u8 * input,int * err)170 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
171 {
172 u8 temp[SHA512_DIGEST_SIZE];
173 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
174 int authsize = crypto_aead_authsize(tfm);
175 struct cpl_fw6_pld *fw6_pld;
176 int cmp = 0;
177
178 fw6_pld = (struct cpl_fw6_pld *)input;
179 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
180 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
181 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
182 } else {
183
184 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
185 authsize, req->assoclen +
186 req->cryptlen - authsize);
187 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
188 }
189 if (cmp)
190 *err = -EBADMSG;
191 else
192 *err = 0;
193 }
194
chcr_inc_wrcount(struct chcr_dev * dev)195 static int chcr_inc_wrcount(struct chcr_dev *dev)
196 {
197 if (dev->state == CHCR_DETACH)
198 return 1;
199 atomic_inc(&dev->inflight);
200 return 0;
201 }
202
chcr_dec_wrcount(struct chcr_dev * dev)203 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
204 {
205 atomic_dec(&dev->inflight);
206 }
207
chcr_handle_aead_resp(struct aead_request * req,unsigned char * input,int err)208 static inline int chcr_handle_aead_resp(struct aead_request *req,
209 unsigned char *input,
210 int err)
211 {
212 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
213 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
214 struct chcr_dev *dev = a_ctx(tfm)->dev;
215
216 chcr_aead_common_exit(req);
217 if (reqctx->verify == VERIFY_SW) {
218 chcr_verify_tag(req, input, &err);
219 reqctx->verify = VERIFY_HW;
220 }
221 chcr_dec_wrcount(dev);
222 aead_request_complete(req, err);
223
224 return err;
225 }
226
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)227 static void get_aes_decrypt_key(unsigned char *dec_key,
228 const unsigned char *key,
229 unsigned int keylength)
230 {
231 u32 temp;
232 u32 w_ring[MAX_NK];
233 int i, j, k;
234 u8 nr, nk;
235
236 switch (keylength) {
237 case AES_KEYLENGTH_128BIT:
238 nk = KEYLENGTH_4BYTES;
239 nr = NUMBER_OF_ROUNDS_10;
240 break;
241 case AES_KEYLENGTH_192BIT:
242 nk = KEYLENGTH_6BYTES;
243 nr = NUMBER_OF_ROUNDS_12;
244 break;
245 case AES_KEYLENGTH_256BIT:
246 nk = KEYLENGTH_8BYTES;
247 nr = NUMBER_OF_ROUNDS_14;
248 break;
249 default:
250 return;
251 }
252 for (i = 0; i < nk; i++)
253 w_ring[i] = get_unaligned_be32(&key[i * 4]);
254
255 i = 0;
256 temp = w_ring[nk - 1];
257 while (i + nk < (nr + 1) * 4) {
258 if (!(i % nk)) {
259 /* RotWord(temp) */
260 temp = (temp << 8) | (temp >> 24);
261 temp = aes_ks_subword(temp);
262 temp ^= round_constant[i / nk];
263 } else if (nk == 8 && (i % 4 == 0)) {
264 temp = aes_ks_subword(temp);
265 }
266 w_ring[i % nk] ^= temp;
267 temp = w_ring[i % nk];
268 i++;
269 }
270 i--;
271 for (k = 0, j = i % nk; k < nk; k++) {
272 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
273 j--;
274 if (j < 0)
275 j += nk;
276 }
277 }
278
chcr_prepare_hmac_key(const u8 * raw_key,unsigned int raw_key_len,int digestsize,void * istate,void * ostate)279 static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len,
280 int digestsize, void *istate, void *ostate)
281 {
282 __be32 *istate32 = istate, *ostate32 = ostate;
283 __be64 *istate64 = istate, *ostate64 = ostate;
284 union {
285 struct hmac_sha1_key sha1;
286 struct hmac_sha224_key sha224;
287 struct hmac_sha256_key sha256;
288 struct hmac_sha384_key sha384;
289 struct hmac_sha512_key sha512;
290 } k;
291
292 switch (digestsize) {
293 case SHA1_DIGEST_SIZE:
294 hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len);
295 for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) {
296 istate32[i] = cpu_to_be32(k.sha1.istate.h[i]);
297 ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]);
298 }
299 break;
300 case SHA224_DIGEST_SIZE:
301 hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len);
302 for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) {
303 istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]);
304 ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]);
305 }
306 break;
307 case SHA256_DIGEST_SIZE:
308 hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len);
309 for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) {
310 istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]);
311 ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]);
312 }
313 break;
314 case SHA384_DIGEST_SIZE:
315 hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len);
316 for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) {
317 istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]);
318 ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]);
319 }
320 break;
321 case SHA512_DIGEST_SIZE:
322 hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len);
323 for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) {
324 istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]);
325 ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]);
326 }
327 break;
328 default:
329 return -EINVAL;
330 }
331 memzero_explicit(&k, sizeof(k));
332 return 0;
333 }
334
is_hmac(struct crypto_tfm * tfm)335 static inline int is_hmac(struct crypto_tfm *tfm)
336 {
337 struct crypto_alg *alg = tfm->__crt_alg;
338 struct chcr_alg_template *chcr_crypto_alg =
339 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
340 alg.hash);
341 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
342 return 1;
343 return 0;
344 }
345
dsgl_walk_init(struct dsgl_walk * walk,struct cpl_rx_phys_dsgl * dsgl)346 static inline void dsgl_walk_init(struct dsgl_walk *walk,
347 struct cpl_rx_phys_dsgl *dsgl)
348 {
349 walk->dsgl = dsgl;
350 walk->nents = 0;
351 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
352 }
353
dsgl_walk_end(struct dsgl_walk * walk,unsigned short qid,int pci_chan_id)354 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
355 int pci_chan_id)
356 {
357 struct cpl_rx_phys_dsgl *phys_cpl;
358
359 phys_cpl = walk->dsgl;
360
361 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
362 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
363 phys_cpl->pcirlxorder_to_noofsgentr =
364 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
365 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
366 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
367 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
368 CPL_RX_PHYS_DSGL_DCAID_V(0) |
369 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
370 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
371 phys_cpl->rss_hdr_int.qid = htons(qid);
372 phys_cpl->rss_hdr_int.hash_val = 0;
373 phys_cpl->rss_hdr_int.channel = pci_chan_id;
374 }
375
dsgl_walk_add_page(struct dsgl_walk * walk,size_t size,dma_addr_t addr)376 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
377 size_t size,
378 dma_addr_t addr)
379 {
380 int j;
381
382 if (!size)
383 return;
384 j = walk->nents;
385 walk->to->len[j % 8] = htons(size);
386 walk->to->addr[j % 8] = cpu_to_be64(addr);
387 j++;
388 if ((j % 8) == 0)
389 walk->to++;
390 walk->nents = j;
391 }
392
dsgl_walk_add_sg(struct dsgl_walk * walk,struct scatterlist * sg,unsigned int slen,unsigned int skip)393 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
394 struct scatterlist *sg,
395 unsigned int slen,
396 unsigned int skip)
397 {
398 int skip_len = 0;
399 unsigned int left_size = slen, len = 0;
400 unsigned int j = walk->nents;
401 int offset, ent_len;
402
403 if (!slen)
404 return;
405 while (sg && skip) {
406 if (sg_dma_len(sg) <= skip) {
407 skip -= sg_dma_len(sg);
408 skip_len = 0;
409 sg = sg_next(sg);
410 } else {
411 skip_len = skip;
412 skip = 0;
413 }
414 }
415
416 while (left_size && sg) {
417 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
418 offset = 0;
419 while (len) {
420 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
421 walk->to->len[j % 8] = htons(ent_len);
422 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
423 offset + skip_len);
424 offset += ent_len;
425 len -= ent_len;
426 j++;
427 if ((j % 8) == 0)
428 walk->to++;
429 }
430 walk->last_sg = sg;
431 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
432 skip_len) + skip_len;
433 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
434 skip_len = 0;
435 sg = sg_next(sg);
436 }
437 walk->nents = j;
438 }
439
ulptx_walk_init(struct ulptx_walk * walk,struct ulptx_sgl * ulp)440 static inline void ulptx_walk_init(struct ulptx_walk *walk,
441 struct ulptx_sgl *ulp)
442 {
443 walk->sgl = ulp;
444 walk->nents = 0;
445 walk->pair_idx = 0;
446 walk->pair = ulp->sge;
447 walk->last_sg = NULL;
448 walk->last_sg_len = 0;
449 }
450
ulptx_walk_end(struct ulptx_walk * walk)451 static inline void ulptx_walk_end(struct ulptx_walk *walk)
452 {
453 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
454 ULPTX_NSGE_V(walk->nents));
455 }
456
457
ulptx_walk_add_page(struct ulptx_walk * walk,size_t size,dma_addr_t addr)458 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
459 size_t size,
460 dma_addr_t addr)
461 {
462 if (!size)
463 return;
464
465 if (walk->nents == 0) {
466 walk->sgl->len0 = cpu_to_be32(size);
467 walk->sgl->addr0 = cpu_to_be64(addr);
468 } else {
469 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
470 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
471 walk->pair_idx = !walk->pair_idx;
472 if (!walk->pair_idx)
473 walk->pair++;
474 }
475 walk->nents++;
476 }
477
ulptx_walk_add_sg(struct ulptx_walk * walk,struct scatterlist * sg,unsigned int len,unsigned int skip)478 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
479 struct scatterlist *sg,
480 unsigned int len,
481 unsigned int skip)
482 {
483 int small;
484 int skip_len = 0;
485 unsigned int sgmin;
486
487 if (!len)
488 return;
489 while (sg && skip) {
490 if (sg_dma_len(sg) <= skip) {
491 skip -= sg_dma_len(sg);
492 skip_len = 0;
493 sg = sg_next(sg);
494 } else {
495 skip_len = skip;
496 skip = 0;
497 }
498 }
499 WARN(!sg, "SG should not be null here\n");
500 if (sg && (walk->nents == 0)) {
501 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
502 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
503 walk->sgl->len0 = cpu_to_be32(sgmin);
504 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
505 walk->nents++;
506 len -= sgmin;
507 walk->last_sg = sg;
508 walk->last_sg_len = sgmin + skip_len;
509 skip_len += sgmin;
510 if (sg_dma_len(sg) == skip_len) {
511 sg = sg_next(sg);
512 skip_len = 0;
513 }
514 }
515
516 while (sg && len) {
517 small = min(sg_dma_len(sg) - skip_len, len);
518 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
519 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
520 walk->pair->addr[walk->pair_idx] =
521 cpu_to_be64(sg_dma_address(sg) + skip_len);
522 walk->pair_idx = !walk->pair_idx;
523 walk->nents++;
524 if (!walk->pair_idx)
525 walk->pair++;
526 len -= sgmin;
527 skip_len += sgmin;
528 walk->last_sg = sg;
529 walk->last_sg_len = skip_len;
530 if (sg_dma_len(sg) == skip_len) {
531 sg = sg_next(sg);
532 skip_len = 0;
533 }
534 }
535 }
536
get_cryptoalg_subtype(struct crypto_skcipher * tfm)537 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
538 {
539 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
540 struct chcr_alg_template *chcr_crypto_alg =
541 container_of(alg, struct chcr_alg_template, alg.skcipher);
542
543 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
544 }
545
cxgb4_is_crypto_q_full(struct net_device * dev,unsigned int idx)546 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
547 {
548 struct adapter *adap = netdev2adap(dev);
549 struct sge_uld_txq_info *txq_info =
550 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
551 struct sge_uld_txq *txq;
552 int ret = 0;
553
554 local_bh_disable();
555 txq = &txq_info->uldtxq[idx];
556 spin_lock(&txq->sendq.lock);
557 if (txq->full)
558 ret = -1;
559 spin_unlock(&txq->sendq.lock);
560 local_bh_enable();
561 return ret;
562 }
563
generate_copy_rrkey(struct ablk_ctx * ablkctx,struct _key_ctx * key_ctx)564 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
565 struct _key_ctx *key_ctx)
566 {
567 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
568 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
569 } else {
570 memcpy(key_ctx->key,
571 ablkctx->key + (ablkctx->enckey_len >> 1),
572 ablkctx->enckey_len >> 1);
573 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
574 ablkctx->rrkey, ablkctx->enckey_len >> 1);
575 }
576 return 0;
577 }
578
chcr_hash_ent_in_wr(struct scatterlist * src,unsigned int minsg,unsigned int space,unsigned int srcskip)579 static int chcr_hash_ent_in_wr(struct scatterlist *src,
580 unsigned int minsg,
581 unsigned int space,
582 unsigned int srcskip)
583 {
584 int srclen = 0;
585 int srcsg = minsg;
586 int soffset = 0, sless;
587
588 if (sg_dma_len(src) == srcskip) {
589 src = sg_next(src);
590 srcskip = 0;
591 }
592 while (src && space > (sgl_ent_len[srcsg + 1])) {
593 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
594 CHCR_SRC_SG_SIZE);
595 srclen += sless;
596 soffset += sless;
597 srcsg++;
598 if (sg_dma_len(src) == (soffset + srcskip)) {
599 src = sg_next(src);
600 soffset = 0;
601 srcskip = 0;
602 }
603 }
604 return srclen;
605 }
606
chcr_sg_ent_in_wr(struct scatterlist * src,struct scatterlist * dst,unsigned int minsg,unsigned int space,unsigned int srcskip,unsigned int dstskip)607 static int chcr_sg_ent_in_wr(struct scatterlist *src,
608 struct scatterlist *dst,
609 unsigned int minsg,
610 unsigned int space,
611 unsigned int srcskip,
612 unsigned int dstskip)
613 {
614 int srclen = 0, dstlen = 0;
615 int srcsg = minsg, dstsg = minsg;
616 int offset = 0, soffset = 0, less, sless = 0;
617
618 if (sg_dma_len(src) == srcskip) {
619 src = sg_next(src);
620 srcskip = 0;
621 }
622 if (sg_dma_len(dst) == dstskip) {
623 dst = sg_next(dst);
624 dstskip = 0;
625 }
626
627 while (src && dst &&
628 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
629 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
630 CHCR_SRC_SG_SIZE);
631 srclen += sless;
632 srcsg++;
633 offset = 0;
634 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
635 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
636 if (srclen <= dstlen)
637 break;
638 less = min_t(unsigned int, sg_dma_len(dst) - offset -
639 dstskip, CHCR_DST_SG_SIZE);
640 dstlen += less;
641 offset += less;
642 if ((offset + dstskip) == sg_dma_len(dst)) {
643 dst = sg_next(dst);
644 offset = 0;
645 }
646 dstsg++;
647 dstskip = 0;
648 }
649 soffset += sless;
650 if ((soffset + srcskip) == sg_dma_len(src)) {
651 src = sg_next(src);
652 srcskip = 0;
653 soffset = 0;
654 }
655
656 }
657 return min(srclen, dstlen);
658 }
659
chcr_cipher_fallback(struct crypto_skcipher * cipher,struct skcipher_request * req,u8 * iv,unsigned short op_type)660 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
661 struct skcipher_request *req,
662 u8 *iv,
663 unsigned short op_type)
664 {
665 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
666 int err;
667
668 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
669 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
670 req->base.complete, req->base.data);
671 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
672 req->cryptlen, iv);
673
674 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
675 crypto_skcipher_encrypt(&reqctx->fallback_req);
676
677 return err;
678
679 }
680
get_qidxs(struct crypto_async_request * req,unsigned int * txqidx,unsigned int * rxqidx)681 static inline int get_qidxs(struct crypto_async_request *req,
682 unsigned int *txqidx, unsigned int *rxqidx)
683 {
684 struct crypto_tfm *tfm = req->tfm;
685 int ret = 0;
686
687 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
688 case CRYPTO_ALG_TYPE_AEAD:
689 {
690 struct aead_request *aead_req =
691 container_of(req, struct aead_request, base);
692 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
693 *txqidx = reqctx->txqidx;
694 *rxqidx = reqctx->rxqidx;
695 break;
696 }
697 case CRYPTO_ALG_TYPE_SKCIPHER:
698 {
699 struct skcipher_request *sk_req =
700 container_of(req, struct skcipher_request, base);
701 struct chcr_skcipher_req_ctx *reqctx =
702 skcipher_request_ctx(sk_req);
703 *txqidx = reqctx->txqidx;
704 *rxqidx = reqctx->rxqidx;
705 break;
706 }
707 case CRYPTO_ALG_TYPE_AHASH:
708 {
709 struct ahash_request *ahash_req =
710 container_of(req, struct ahash_request, base);
711 struct chcr_ahash_req_ctx *reqctx =
712 ahash_request_ctx(ahash_req);
713 *txqidx = reqctx->txqidx;
714 *rxqidx = reqctx->rxqidx;
715 break;
716 }
717 default:
718 ret = -EINVAL;
719 /* should never get here */
720 BUG();
721 break;
722 }
723 return ret;
724 }
725
create_wreq(struct chcr_context * ctx,struct chcr_wr * chcr_req,struct crypto_async_request * req,unsigned int imm,int hash_sz,unsigned int len16,unsigned int sc_len,unsigned int lcb)726 static inline void create_wreq(struct chcr_context *ctx,
727 struct chcr_wr *chcr_req,
728 struct crypto_async_request *req,
729 unsigned int imm,
730 int hash_sz,
731 unsigned int len16,
732 unsigned int sc_len,
733 unsigned int lcb)
734 {
735 struct uld_ctx *u_ctx = ULD_CTX(ctx);
736 unsigned int tx_channel_id, rx_channel_id;
737 unsigned int txqidx = 0, rxqidx = 0;
738 unsigned int qid, fid, portno;
739
740 get_qidxs(req, &txqidx, &rxqidx);
741 qid = u_ctx->lldi.rxq_ids[rxqidx];
742 fid = u_ctx->lldi.rxq_ids[0];
743 portno = rxqidx / ctx->rxq_perchan;
744 tx_channel_id = txqidx / ctx->txq_perchan;
745 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
746
747
748 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
749 chcr_req->wreq.pld_size_hash_size =
750 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
751 chcr_req->wreq.len16_pkd =
752 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
753 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
754 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
755 !!lcb, txqidx);
756
757 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
758 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
759 ((sizeof(chcr_req->wreq)) >> 4)));
760 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
761 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
762 sizeof(chcr_req->key_ctx) + sc_len);
763 }
764
765 /**
766 * create_cipher_wr - form the WR for cipher operations
767 * @wrparam: Container for create_cipher_wr()'s parameters
768 */
create_cipher_wr(struct cipher_wr_param * wrparam)769 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
770 {
771 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
772 struct chcr_context *ctx = c_ctx(tfm);
773 struct uld_ctx *u_ctx = ULD_CTX(ctx);
774 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
775 struct sk_buff *skb = NULL;
776 struct chcr_wr *chcr_req;
777 struct cpl_rx_phys_dsgl *phys_cpl;
778 struct ulptx_sgl *ulptx;
779 struct chcr_skcipher_req_ctx *reqctx =
780 skcipher_request_ctx(wrparam->req);
781 unsigned int temp = 0, transhdr_len, dst_size;
782 int error;
783 int nents;
784 unsigned int kctx_len;
785 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
786 GFP_KERNEL : GFP_ATOMIC;
787 struct adapter *adap = padap(ctx->dev);
788 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
789
790 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
791 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
792 reqctx->dst_ofst);
793 dst_size = get_space_for_phys_dsgl(nents);
794 kctx_len = roundup(ablkctx->enckey_len, 16);
795 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
796 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
797 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
798 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
799 (sgl_len(nents) * 8);
800 transhdr_len += temp;
801 transhdr_len = roundup(transhdr_len, 16);
802 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
803 if (!skb) {
804 error = -ENOMEM;
805 goto err;
806 }
807 chcr_req = __skb_put_zero(skb, transhdr_len);
808 chcr_req->sec_cpl.op_ivinsrtofst =
809 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
810
811 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
812 chcr_req->sec_cpl.aadstart_cipherstop_hi =
813 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
814
815 chcr_req->sec_cpl.cipherstop_lo_authinsert =
816 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
817 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
818 ablkctx->ciph_mode,
819 0, 0, IV >> 1);
820 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
821 0, 1, dst_size);
822
823 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
824 if ((reqctx->op == CHCR_DECRYPT_OP) &&
825 (!(get_cryptoalg_subtype(tfm) ==
826 CRYPTO_ALG_SUB_TYPE_CTR)) &&
827 (!(get_cryptoalg_subtype(tfm) ==
828 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
829 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
830 } else {
831 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
832 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
833 memcpy(chcr_req->key_ctx.key, ablkctx->key,
834 ablkctx->enckey_len);
835 } else {
836 memcpy(chcr_req->key_ctx.key, ablkctx->key +
837 (ablkctx->enckey_len >> 1),
838 ablkctx->enckey_len >> 1);
839 memcpy(chcr_req->key_ctx.key +
840 (ablkctx->enckey_len >> 1),
841 ablkctx->key,
842 ablkctx->enckey_len >> 1);
843 }
844 }
845 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
846 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
847 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
848 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
849
850 atomic_inc(&adap->chcr_stats.cipher_rqst);
851 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
852 + (reqctx->imm ? (wrparam->bytes) : 0);
853 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
854 transhdr_len, temp,
855 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
856 reqctx->skb = skb;
857
858 if (reqctx->op && (ablkctx->ciph_mode ==
859 CHCR_SCMD_CIPHER_MODE_AES_CBC))
860 sg_pcopy_to_buffer(wrparam->req->src,
861 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
862 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
863
864 return skb;
865 err:
866 return ERR_PTR(error);
867 }
868
chcr_keyctx_ck_size(unsigned int keylen)869 static inline int chcr_keyctx_ck_size(unsigned int keylen)
870 {
871 int ck_size = 0;
872
873 if (keylen == AES_KEYSIZE_128)
874 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
875 else if (keylen == AES_KEYSIZE_192)
876 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
877 else if (keylen == AES_KEYSIZE_256)
878 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
879 else
880 ck_size = 0;
881
882 return ck_size;
883 }
chcr_cipher_fallback_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)884 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
885 const u8 *key,
886 unsigned int keylen)
887 {
888 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
889
890 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
891 CRYPTO_TFM_REQ_MASK);
892 crypto_skcipher_set_flags(ablkctx->sw_cipher,
893 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
894 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
895 }
896
chcr_aes_cbc_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)897 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
898 const u8 *key,
899 unsigned int keylen)
900 {
901 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
902 unsigned int ck_size, context_size;
903 u16 alignment = 0;
904 int err;
905
906 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
907 if (err)
908 goto badkey_err;
909
910 ck_size = chcr_keyctx_ck_size(keylen);
911 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
912 memcpy(ablkctx->key, key, keylen);
913 ablkctx->enckey_len = keylen;
914 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
915 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
916 keylen + alignment) >> 4;
917
918 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
919 0, 0, context_size);
920 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
921 return 0;
922 badkey_err:
923 ablkctx->enckey_len = 0;
924
925 return err;
926 }
927
chcr_aes_ctr_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)928 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
929 const u8 *key,
930 unsigned int keylen)
931 {
932 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
933 unsigned int ck_size, context_size;
934 u16 alignment = 0;
935 int err;
936
937 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
938 if (err)
939 goto badkey_err;
940 ck_size = chcr_keyctx_ck_size(keylen);
941 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
942 memcpy(ablkctx->key, key, keylen);
943 ablkctx->enckey_len = keylen;
944 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 keylen + alignment) >> 4;
946
947 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948 0, 0, context_size);
949 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
950
951 return 0;
952 badkey_err:
953 ablkctx->enckey_len = 0;
954
955 return err;
956 }
957
chcr_aes_rfc3686_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)958 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
959 const u8 *key,
960 unsigned int keylen)
961 {
962 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
963 unsigned int ck_size, context_size;
964 u16 alignment = 0;
965 int err;
966
967 if (keylen < CTR_RFC3686_NONCE_SIZE)
968 return -EINVAL;
969 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
970 CTR_RFC3686_NONCE_SIZE);
971
972 keylen -= CTR_RFC3686_NONCE_SIZE;
973 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
974 if (err)
975 goto badkey_err;
976
977 ck_size = chcr_keyctx_ck_size(keylen);
978 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
979 memcpy(ablkctx->key, key, keylen);
980 ablkctx->enckey_len = keylen;
981 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
982 keylen + alignment) >> 4;
983
984 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
985 0, 0, context_size);
986 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
987
988 return 0;
989 badkey_err:
990 ablkctx->enckey_len = 0;
991
992 return err;
993 }
ctr_add_iv(u8 * dstiv,u8 * srciv,u32 add)994 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
995 {
996 unsigned int size = AES_BLOCK_SIZE;
997 __be32 *b = (__be32 *)(dstiv + size);
998 u32 c, prev;
999
1000 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1001 for (; size >= 4; size -= 4) {
1002 prev = be32_to_cpu(*--b);
1003 c = prev + add;
1004 *b = cpu_to_be32(c);
1005 if (prev < c)
1006 break;
1007 add = 1;
1008 }
1009
1010 }
1011
adjust_ctr_overflow(u8 * iv,u32 bytes)1012 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1013 {
1014 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1015 u64 c;
1016 u32 temp = be32_to_cpu(*--b);
1017
1018 temp = ~temp;
1019 c = (u64)temp + 1; // No of block can processed without overflow
1020 if ((bytes / AES_BLOCK_SIZE) >= c)
1021 bytes = c * AES_BLOCK_SIZE;
1022 return bytes;
1023 }
1024
chcr_update_tweak(struct skcipher_request * req,u8 * iv,u32 isfinal)1025 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1026 u32 isfinal)
1027 {
1028 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1029 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1030 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1031 struct aes_key aes;
1032 int ret, i;
1033 u8 *key;
1034 unsigned int keylen;
1035 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1036 int round8 = round / 8;
1037
1038 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1039
1040 keylen = ablkctx->enckey_len / 2;
1041 key = ablkctx->key + keylen;
1042 /* For a 192 bit key remove the padded zeroes which was
1043 * added in chcr_xts_setkey
1044 */
1045 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1046 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1047 ret = aes_preparekey(&aes, key, keylen - 8);
1048 else
1049 ret = aes_preparekey(&aes, key, keylen);
1050 if (ret)
1051 return ret;
1052 aes_encrypt(&aes, iv, iv);
1053 for (i = 0; i < round8; i++)
1054 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1055
1056 for (i = 0; i < (round % 8); i++)
1057 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1058
1059 if (!isfinal)
1060 aes_decrypt(&aes, iv, iv);
1061
1062 memzero_explicit(&aes, sizeof(aes));
1063 return 0;
1064 }
1065
chcr_update_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1066 static int chcr_update_cipher_iv(struct skcipher_request *req,
1067 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1068 {
1069 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1070 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1071 int subtype = get_cryptoalg_subtype(tfm);
1072 int ret = 0;
1073
1074 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1075 ctr_add_iv(iv, req->iv, (reqctx->processed /
1076 AES_BLOCK_SIZE));
1077 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1078 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1079 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1080 AES_BLOCK_SIZE) + 1);
1081 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1082 ret = chcr_update_tweak(req, iv, 0);
1083 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1084 if (reqctx->op)
1085 /*Updated before sending last WR*/
1086 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1087 else
1088 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1089 }
1090
1091 return ret;
1092
1093 }
1094
1095 /* We need separate function for final iv because in rfc3686 Initial counter
1096 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1097 * for subsequent update requests
1098 */
1099
chcr_final_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1100 static int chcr_final_cipher_iv(struct skcipher_request *req,
1101 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1102 {
1103 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1104 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1105 int subtype = get_cryptoalg_subtype(tfm);
1106 int ret = 0;
1107
1108 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1109 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1110 AES_BLOCK_SIZE));
1111 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1112 if (!reqctx->partial_req)
1113 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1114 else
1115 ret = chcr_update_tweak(req, iv, 1);
1116 }
1117 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1118 /*Already updated for Decrypt*/
1119 if (!reqctx->op)
1120 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1121
1122 }
1123 return ret;
1124
1125 }
1126
chcr_handle_cipher_resp(struct skcipher_request * req,unsigned char * input,int err)1127 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1128 unsigned char *input, int err)
1129 {
1130 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1131 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1132 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1133 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1134 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1135 struct chcr_dev *dev = c_ctx(tfm)->dev;
1136 struct chcr_context *ctx = c_ctx(tfm);
1137 struct adapter *adap = padap(ctx->dev);
1138 struct cipher_wr_param wrparam;
1139 struct sk_buff *skb;
1140 int bytes;
1141
1142 if (err)
1143 goto unmap;
1144 if (req->cryptlen == reqctx->processed) {
1145 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1146 req);
1147 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1148 goto complete;
1149 }
1150
1151 if (!reqctx->imm) {
1152 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1153 CIP_SPACE_LEFT(ablkctx->enckey_len),
1154 reqctx->src_ofst, reqctx->dst_ofst);
1155 if ((bytes + reqctx->processed) >= req->cryptlen)
1156 bytes = req->cryptlen - reqctx->processed;
1157 else
1158 bytes = rounddown(bytes, 16);
1159 } else {
1160 /*CTR mode counter overflow*/
1161 bytes = req->cryptlen - reqctx->processed;
1162 }
1163 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1164 if (err)
1165 goto unmap;
1166
1167 if (unlikely(bytes == 0)) {
1168 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1169 req);
1170 memcpy(req->iv, reqctx->init_iv, IV);
1171 atomic_inc(&adap->chcr_stats.fallback);
1172 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1173 reqctx->op);
1174 goto complete;
1175 }
1176
1177 if (get_cryptoalg_subtype(tfm) ==
1178 CRYPTO_ALG_SUB_TYPE_CTR)
1179 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1180 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1181 wrparam.req = req;
1182 wrparam.bytes = bytes;
1183 skb = create_cipher_wr(&wrparam);
1184 if (IS_ERR(skb)) {
1185 pr_err("%s : Failed to form WR. No memory\n", __func__);
1186 err = PTR_ERR(skb);
1187 goto unmap;
1188 }
1189 skb->dev = u_ctx->lldi.ports[0];
1190 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1191 chcr_send_wr(skb);
1192 reqctx->last_req_len = bytes;
1193 reqctx->processed += bytes;
1194 if (get_cryptoalg_subtype(tfm) ==
1195 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1196 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1197 complete(&ctx->cbc_aes_aio_done);
1198 }
1199 return 0;
1200 unmap:
1201 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1202 complete:
1203 if (get_cryptoalg_subtype(tfm) ==
1204 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1205 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1206 complete(&ctx->cbc_aes_aio_done);
1207 }
1208 chcr_dec_wrcount(dev);
1209 skcipher_request_complete(req, err);
1210 return err;
1211 }
1212
process_cipher(struct skcipher_request * req,unsigned short qid,struct sk_buff ** skb,unsigned short op_type)1213 static int process_cipher(struct skcipher_request *req,
1214 unsigned short qid,
1215 struct sk_buff **skb,
1216 unsigned short op_type)
1217 {
1218 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1219 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1220 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1221 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1222 struct adapter *adap = padap(c_ctx(tfm)->dev);
1223 struct cipher_wr_param wrparam;
1224 int bytes, err = -EINVAL;
1225 int subtype;
1226
1227 reqctx->processed = 0;
1228 reqctx->partial_req = 0;
1229 if (!req->iv)
1230 goto error;
1231 subtype = get_cryptoalg_subtype(tfm);
1232 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1233 (req->cryptlen == 0) ||
1234 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1235 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1236 goto fallback;
1237 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1238 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1239 goto fallback;
1240 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1241 ablkctx->enckey_len, req->cryptlen, ivsize);
1242 goto error;
1243 }
1244
1245 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1246 if (err)
1247 goto error;
1248 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1249 AES_MIN_KEY_SIZE +
1250 sizeof(struct cpl_rx_phys_dsgl) +
1251 /*Min dsgl size*/
1252 32))) {
1253 /* Can be sent as Imm*/
1254 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1255
1256 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1257 CHCR_DST_SG_SIZE, 0);
1258 phys_dsgl = get_space_for_phys_dsgl(dnents);
1259 kctx_len = roundup(ablkctx->enckey_len, 16);
1260 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1261 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1262 SGE_MAX_WR_LEN;
1263 bytes = IV + req->cryptlen;
1264
1265 } else {
1266 reqctx->imm = 0;
1267 }
1268
1269 if (!reqctx->imm) {
1270 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1271 CIP_SPACE_LEFT(ablkctx->enckey_len),
1272 0, 0);
1273 if ((bytes + reqctx->processed) >= req->cryptlen)
1274 bytes = req->cryptlen - reqctx->processed;
1275 else
1276 bytes = rounddown(bytes, 16);
1277 } else {
1278 bytes = req->cryptlen;
1279 }
1280 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1281 bytes = adjust_ctr_overflow(req->iv, bytes);
1282 }
1283 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1284 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1285 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1286 CTR_RFC3686_IV_SIZE);
1287
1288 /* initialize counter portion of counter block */
1289 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1290 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1291 memcpy(reqctx->init_iv, reqctx->iv, IV);
1292
1293 } else {
1294
1295 memcpy(reqctx->iv, req->iv, IV);
1296 memcpy(reqctx->init_iv, req->iv, IV);
1297 }
1298 if (unlikely(bytes == 0)) {
1299 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1300 req);
1301 fallback: atomic_inc(&adap->chcr_stats.fallback);
1302 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1303 subtype ==
1304 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1305 reqctx->iv : req->iv,
1306 op_type);
1307 goto error;
1308 }
1309 reqctx->op = op_type;
1310 reqctx->srcsg = req->src;
1311 reqctx->dstsg = req->dst;
1312 reqctx->src_ofst = 0;
1313 reqctx->dst_ofst = 0;
1314 wrparam.qid = qid;
1315 wrparam.req = req;
1316 wrparam.bytes = bytes;
1317 *skb = create_cipher_wr(&wrparam);
1318 if (IS_ERR(*skb)) {
1319 err = PTR_ERR(*skb);
1320 goto unmap;
1321 }
1322 reqctx->processed = bytes;
1323 reqctx->last_req_len = bytes;
1324 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1325
1326 return 0;
1327 unmap:
1328 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1329 error:
1330 return err;
1331 }
1332
chcr_aes_encrypt(struct skcipher_request * req)1333 static int chcr_aes_encrypt(struct skcipher_request *req)
1334 {
1335 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1336 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1337 struct chcr_dev *dev = c_ctx(tfm)->dev;
1338 struct sk_buff *skb = NULL;
1339 int err;
1340 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1341 struct chcr_context *ctx = c_ctx(tfm);
1342 unsigned int cpu;
1343
1344 cpu = get_cpu();
1345 reqctx->txqidx = cpu % ctx->ntxq;
1346 reqctx->rxqidx = cpu % ctx->nrxq;
1347 put_cpu();
1348
1349 err = chcr_inc_wrcount(dev);
1350 if (err)
1351 return -ENXIO;
1352 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1353 reqctx->txqidx) &&
1354 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1355 err = -ENOSPC;
1356 goto error;
1357 }
1358
1359 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1360 &skb, CHCR_ENCRYPT_OP);
1361 if (err || !skb)
1362 return err;
1363 skb->dev = u_ctx->lldi.ports[0];
1364 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1365 chcr_send_wr(skb);
1366 if (get_cryptoalg_subtype(tfm) ==
1367 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1368 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1369 reqctx->partial_req = 1;
1370 wait_for_completion(&ctx->cbc_aes_aio_done);
1371 }
1372 return -EINPROGRESS;
1373 error:
1374 chcr_dec_wrcount(dev);
1375 return err;
1376 }
1377
chcr_aes_decrypt(struct skcipher_request * req)1378 static int chcr_aes_decrypt(struct skcipher_request *req)
1379 {
1380 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1381 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1382 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1383 struct chcr_dev *dev = c_ctx(tfm)->dev;
1384 struct sk_buff *skb = NULL;
1385 int err;
1386 struct chcr_context *ctx = c_ctx(tfm);
1387 unsigned int cpu;
1388
1389 cpu = get_cpu();
1390 reqctx->txqidx = cpu % ctx->ntxq;
1391 reqctx->rxqidx = cpu % ctx->nrxq;
1392 put_cpu();
1393
1394 err = chcr_inc_wrcount(dev);
1395 if (err)
1396 return -ENXIO;
1397
1398 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1399 reqctx->txqidx) &&
1400 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1401 return -ENOSPC;
1402 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1403 &skb, CHCR_DECRYPT_OP);
1404 if (err || !skb)
1405 return err;
1406 skb->dev = u_ctx->lldi.ports[0];
1407 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1408 chcr_send_wr(skb);
1409 return -EINPROGRESS;
1410 }
chcr_device_init(struct chcr_context * ctx)1411 static int chcr_device_init(struct chcr_context *ctx)
1412 {
1413 struct uld_ctx *u_ctx = NULL;
1414 int txq_perchan, ntxq;
1415 int err = 0, rxq_perchan;
1416
1417 if (!ctx->dev) {
1418 u_ctx = assign_chcr_device();
1419 if (!u_ctx) {
1420 err = -ENXIO;
1421 pr_err("chcr device assignment fails\n");
1422 goto out;
1423 }
1424 ctx->dev = &u_ctx->dev;
1425 ntxq = u_ctx->lldi.ntxq;
1426 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1427 txq_perchan = ntxq / u_ctx->lldi.nchan;
1428 ctx->ntxq = ntxq;
1429 ctx->nrxq = u_ctx->lldi.nrxq;
1430 ctx->rxq_perchan = rxq_perchan;
1431 ctx->txq_perchan = txq_perchan;
1432 }
1433 out:
1434 return err;
1435 }
1436
chcr_init_tfm(struct crypto_skcipher * tfm)1437 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1438 {
1439 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1440 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1441 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1442
1443 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1444 CRYPTO_ALG_NEED_FALLBACK);
1445 if (IS_ERR(ablkctx->sw_cipher)) {
1446 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1447 return PTR_ERR(ablkctx->sw_cipher);
1448 }
1449 init_completion(&ctx->cbc_aes_aio_done);
1450 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1451 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1452
1453 return chcr_device_init(ctx);
1454 }
1455
chcr_rfc3686_init(struct crypto_skcipher * tfm)1456 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1457 {
1458 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1459 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1460 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1461
1462 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1463 * cannot be used as fallback in chcr_handle_cipher_response
1464 */
1465 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1466 CRYPTO_ALG_NEED_FALLBACK);
1467 if (IS_ERR(ablkctx->sw_cipher)) {
1468 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1469 return PTR_ERR(ablkctx->sw_cipher);
1470 }
1471 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1472 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1473 return chcr_device_init(ctx);
1474 }
1475
1476
chcr_exit_tfm(struct crypto_skcipher * tfm)1477 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1478 {
1479 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1480 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1481
1482 crypto_free_skcipher(ablkctx->sw_cipher);
1483 }
1484
get_alg_config(struct algo_param * params,unsigned int auth_size)1485 static int get_alg_config(struct algo_param *params,
1486 unsigned int auth_size)
1487 {
1488 switch (auth_size) {
1489 case SHA1_DIGEST_SIZE:
1490 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1491 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1492 params->result_size = SHA1_DIGEST_SIZE;
1493 break;
1494 case SHA224_DIGEST_SIZE:
1495 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1496 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1497 params->result_size = SHA256_DIGEST_SIZE;
1498 break;
1499 case SHA256_DIGEST_SIZE:
1500 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1501 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1502 params->result_size = SHA256_DIGEST_SIZE;
1503 break;
1504 case SHA384_DIGEST_SIZE:
1505 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1506 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1507 params->result_size = SHA512_DIGEST_SIZE;
1508 break;
1509 case SHA512_DIGEST_SIZE:
1510 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1511 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1512 params->result_size = SHA512_DIGEST_SIZE;
1513 break;
1514 default:
1515 pr_err("ERROR, unsupported digest size\n");
1516 return -EINVAL;
1517 }
1518 return 0;
1519 }
1520
1521 /**
1522 * create_hash_wr - Create hash work request
1523 * @req: Cipher req base
1524 * @param: Container for create_hash_wr()'s parameters
1525 */
create_hash_wr(struct ahash_request * req,struct hash_wr_param * param)1526 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1527 struct hash_wr_param *param)
1528 {
1529 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1530 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531 struct chcr_context *ctx = h_ctx(tfm);
1532 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1533 struct sk_buff *skb = NULL;
1534 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1535 struct chcr_wr *chcr_req;
1536 struct ulptx_sgl *ulptx;
1537 unsigned int nents = 0, transhdr_len;
1538 unsigned int temp = 0;
1539 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1540 GFP_ATOMIC;
1541 struct adapter *adap = padap(h_ctx(tfm)->dev);
1542 int error = 0;
1543 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1544
1545 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1546 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1547 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1548 param->sg_len) <= SGE_MAX_WR_LEN;
1549 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1550 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1551 nents += param->bfr_len ? 1 : 0;
1552 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1553 param->sg_len, 16) : (sgl_len(nents) * 8);
1554 transhdr_len = roundup(transhdr_len, 16);
1555
1556 skb = alloc_skb(transhdr_len, flags);
1557 if (!skb)
1558 return ERR_PTR(-ENOMEM);
1559 chcr_req = __skb_put_zero(skb, transhdr_len);
1560
1561 chcr_req->sec_cpl.op_ivinsrtofst =
1562 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1563
1564 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1565
1566 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1567 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1568 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1569 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1570 chcr_req->sec_cpl.seqno_numivs =
1571 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1572 param->opad_needed, 0);
1573
1574 chcr_req->sec_cpl.ivgen_hdrlen =
1575 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1576
1577 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1578 param->alg_prm.result_size);
1579
1580 if (param->opad_needed)
1581 memcpy(chcr_req->key_ctx.key +
1582 ((param->alg_prm.result_size <= 32) ? 32 :
1583 CHCR_HASH_MAX_DIGEST_SIZE),
1584 hmacctx->opad, param->alg_prm.result_size);
1585
1586 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1587 param->alg_prm.mk_size, 0,
1588 param->opad_needed,
1589 ((param->kctx_len +
1590 sizeof(chcr_req->key_ctx)) >> 4));
1591 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1592 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1593 DUMMY_BYTES);
1594 if (param->bfr_len != 0) {
1595 req_ctx->hctx_wr.dma_addr =
1596 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1597 param->bfr_len, DMA_TO_DEVICE);
1598 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1599 req_ctx->hctx_wr. dma_addr)) {
1600 error = -ENOMEM;
1601 goto err;
1602 }
1603 req_ctx->hctx_wr.dma_len = param->bfr_len;
1604 } else {
1605 req_ctx->hctx_wr.dma_addr = 0;
1606 }
1607 chcr_add_hash_src_ent(req, ulptx, param);
1608 /* Request upto max wr size */
1609 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1610 (param->sg_len + param->bfr_len) : 0);
1611 atomic_inc(&adap->chcr_stats.digest_rqst);
1612 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1613 param->hash_size, transhdr_len,
1614 temp, 0);
1615 req_ctx->hctx_wr.skb = skb;
1616 return skb;
1617 err:
1618 kfree_skb(skb);
1619 return ERR_PTR(error);
1620 }
1621
chcr_ahash_update(struct ahash_request * req)1622 static int chcr_ahash_update(struct ahash_request *req)
1623 {
1624 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1625 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1626 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1627 struct chcr_context *ctx = h_ctx(rtfm);
1628 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1629 struct sk_buff *skb;
1630 u8 remainder = 0, bs;
1631 unsigned int nbytes = req->nbytes;
1632 struct hash_wr_param params;
1633 int error;
1634 unsigned int cpu;
1635
1636 cpu = get_cpu();
1637 req_ctx->txqidx = cpu % ctx->ntxq;
1638 req_ctx->rxqidx = cpu % ctx->nrxq;
1639 put_cpu();
1640
1641 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1642
1643 if (nbytes + req_ctx->reqlen >= bs) {
1644 remainder = (nbytes + req_ctx->reqlen) % bs;
1645 nbytes = nbytes + req_ctx->reqlen - remainder;
1646 } else {
1647 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1648 + req_ctx->reqlen, nbytes, 0);
1649 req_ctx->reqlen += nbytes;
1650 return 0;
1651 }
1652 error = chcr_inc_wrcount(dev);
1653 if (error)
1654 return -ENXIO;
1655 /* Detach state for CHCR means lldi or padap is freed. Increasing
1656 * inflight count for dev guarantees that lldi and padap is valid
1657 */
1658 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1659 req_ctx->txqidx) &&
1660 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1661 error = -ENOSPC;
1662 goto err;
1663 }
1664
1665 chcr_init_hctx_per_wr(req_ctx);
1666 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1667 if (error) {
1668 error = -ENOMEM;
1669 goto err;
1670 }
1671 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1672 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1673 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1674 HASH_SPACE_LEFT(params.kctx_len), 0);
1675 if (params.sg_len > req->nbytes)
1676 params.sg_len = req->nbytes;
1677 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1678 req_ctx->reqlen;
1679 params.opad_needed = 0;
1680 params.more = 1;
1681 params.last = 0;
1682 params.bfr_len = req_ctx->reqlen;
1683 params.scmd1 = 0;
1684 req_ctx->hctx_wr.srcsg = req->src;
1685
1686 params.hash_size = params.alg_prm.result_size;
1687 req_ctx->data_len += params.sg_len + params.bfr_len;
1688 skb = create_hash_wr(req, ¶ms);
1689 if (IS_ERR(skb)) {
1690 error = PTR_ERR(skb);
1691 goto unmap;
1692 }
1693
1694 req_ctx->hctx_wr.processed += params.sg_len;
1695 if (remainder) {
1696 /* Swap buffers */
1697 swap(req_ctx->reqbfr, req_ctx->skbfr);
1698 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1699 req_ctx->reqbfr, remainder, req->nbytes -
1700 remainder);
1701 }
1702 req_ctx->reqlen = remainder;
1703 skb->dev = u_ctx->lldi.ports[0];
1704 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1705 chcr_send_wr(skb);
1706 return -EINPROGRESS;
1707 unmap:
1708 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1709 err:
1710 chcr_dec_wrcount(dev);
1711 return error;
1712 }
1713
create_last_hash_block(char * bfr_ptr,unsigned int bs,u64 scmd1)1714 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1715 {
1716 memset(bfr_ptr, 0, bs);
1717 *bfr_ptr = 0x80;
1718 if (bs == 64)
1719 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1720 else
1721 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1722 }
1723
chcr_ahash_final(struct ahash_request * req)1724 static int chcr_ahash_final(struct ahash_request *req)
1725 {
1726 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1727 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1728 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1729 struct hash_wr_param params;
1730 struct sk_buff *skb;
1731 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1732 struct chcr_context *ctx = h_ctx(rtfm);
1733 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1734 int error;
1735 unsigned int cpu;
1736
1737 cpu = get_cpu();
1738 req_ctx->txqidx = cpu % ctx->ntxq;
1739 req_ctx->rxqidx = cpu % ctx->nrxq;
1740 put_cpu();
1741
1742 error = chcr_inc_wrcount(dev);
1743 if (error)
1744 return -ENXIO;
1745
1746 chcr_init_hctx_per_wr(req_ctx);
1747 if (is_hmac(crypto_ahash_tfm(rtfm)))
1748 params.opad_needed = 1;
1749 else
1750 params.opad_needed = 0;
1751 params.sg_len = 0;
1752 req_ctx->hctx_wr.isfinal = 1;
1753 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1754 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1755 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1756 params.opad_needed = 1;
1757 params.kctx_len *= 2;
1758 } else {
1759 params.opad_needed = 0;
1760 }
1761
1762 req_ctx->hctx_wr.result = 1;
1763 params.bfr_len = req_ctx->reqlen;
1764 req_ctx->data_len += params.bfr_len + params.sg_len;
1765 req_ctx->hctx_wr.srcsg = req->src;
1766 if (req_ctx->reqlen == 0) {
1767 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1768 params.last = 0;
1769 params.more = 1;
1770 params.scmd1 = 0;
1771 params.bfr_len = bs;
1772
1773 } else {
1774 params.scmd1 = req_ctx->data_len;
1775 params.last = 1;
1776 params.more = 0;
1777 }
1778 params.hash_size = crypto_ahash_digestsize(rtfm);
1779 skb = create_hash_wr(req, ¶ms);
1780 if (IS_ERR(skb)) {
1781 error = PTR_ERR(skb);
1782 goto err;
1783 }
1784 req_ctx->reqlen = 0;
1785 skb->dev = u_ctx->lldi.ports[0];
1786 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1787 chcr_send_wr(skb);
1788 return -EINPROGRESS;
1789 err:
1790 chcr_dec_wrcount(dev);
1791 return error;
1792 }
1793
chcr_ahash_finup(struct ahash_request * req)1794 static int chcr_ahash_finup(struct ahash_request *req)
1795 {
1796 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1797 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1798 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1799 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1800 struct chcr_context *ctx = h_ctx(rtfm);
1801 struct sk_buff *skb;
1802 struct hash_wr_param params;
1803 u8 bs;
1804 int error;
1805 unsigned int cpu;
1806
1807 cpu = get_cpu();
1808 req_ctx->txqidx = cpu % ctx->ntxq;
1809 req_ctx->rxqidx = cpu % ctx->nrxq;
1810 put_cpu();
1811
1812 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1813 error = chcr_inc_wrcount(dev);
1814 if (error)
1815 return -ENXIO;
1816
1817 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1818 req_ctx->txqidx) &&
1819 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1820 error = -ENOSPC;
1821 goto err;
1822 }
1823 chcr_init_hctx_per_wr(req_ctx);
1824 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1825 if (error) {
1826 error = -ENOMEM;
1827 goto err;
1828 }
1829
1830 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1831 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1832 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1833 params.kctx_len *= 2;
1834 params.opad_needed = 1;
1835 } else {
1836 params.opad_needed = 0;
1837 }
1838
1839 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1840 HASH_SPACE_LEFT(params.kctx_len), 0);
1841 if (params.sg_len < req->nbytes) {
1842 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1843 params.kctx_len /= 2;
1844 params.opad_needed = 0;
1845 }
1846 params.last = 0;
1847 params.more = 1;
1848 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1849 - req_ctx->reqlen;
1850 params.hash_size = params.alg_prm.result_size;
1851 params.scmd1 = 0;
1852 } else {
1853 params.last = 1;
1854 params.more = 0;
1855 params.sg_len = req->nbytes;
1856 params.hash_size = crypto_ahash_digestsize(rtfm);
1857 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1858 params.sg_len;
1859 }
1860 params.bfr_len = req_ctx->reqlen;
1861 req_ctx->data_len += params.bfr_len + params.sg_len;
1862 req_ctx->hctx_wr.result = 1;
1863 req_ctx->hctx_wr.srcsg = req->src;
1864 if ((req_ctx->reqlen + req->nbytes) == 0) {
1865 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1866 params.last = 0;
1867 params.more = 1;
1868 params.scmd1 = 0;
1869 params.bfr_len = bs;
1870 }
1871 skb = create_hash_wr(req, ¶ms);
1872 if (IS_ERR(skb)) {
1873 error = PTR_ERR(skb);
1874 goto unmap;
1875 }
1876 req_ctx->reqlen = 0;
1877 req_ctx->hctx_wr.processed += params.sg_len;
1878 skb->dev = u_ctx->lldi.ports[0];
1879 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1880 chcr_send_wr(skb);
1881 return -EINPROGRESS;
1882 unmap:
1883 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1884 err:
1885 chcr_dec_wrcount(dev);
1886 return error;
1887 }
1888
1889 static int chcr_hmac_init(struct ahash_request *areq);
1890 static int chcr_sha_init(struct ahash_request *areq);
1891
chcr_ahash_digest(struct ahash_request * req)1892 static int chcr_ahash_digest(struct ahash_request *req)
1893 {
1894 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1895 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1896 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1897 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1898 struct chcr_context *ctx = h_ctx(rtfm);
1899 struct sk_buff *skb;
1900 struct hash_wr_param params;
1901 u8 bs;
1902 int error;
1903 unsigned int cpu;
1904
1905 cpu = get_cpu();
1906 req_ctx->txqidx = cpu % ctx->ntxq;
1907 req_ctx->rxqidx = cpu % ctx->nrxq;
1908 put_cpu();
1909
1910 if (is_hmac(crypto_ahash_tfm(rtfm)))
1911 chcr_hmac_init(req);
1912 else
1913 chcr_sha_init(req);
1914
1915 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1916 error = chcr_inc_wrcount(dev);
1917 if (error)
1918 return -ENXIO;
1919
1920 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1921 req_ctx->txqidx) &&
1922 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1923 error = -ENOSPC;
1924 goto err;
1925 }
1926
1927 chcr_init_hctx_per_wr(req_ctx);
1928 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1929 if (error) {
1930 error = -ENOMEM;
1931 goto err;
1932 }
1933
1934 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1935 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1936 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1937 params.kctx_len *= 2;
1938 params.opad_needed = 1;
1939 } else {
1940 params.opad_needed = 0;
1941 }
1942 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1943 HASH_SPACE_LEFT(params.kctx_len), 0);
1944 if (params.sg_len < req->nbytes) {
1945 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1946 params.kctx_len /= 2;
1947 params.opad_needed = 0;
1948 }
1949 params.last = 0;
1950 params.more = 1;
1951 params.scmd1 = 0;
1952 params.sg_len = rounddown(params.sg_len, bs);
1953 params.hash_size = params.alg_prm.result_size;
1954 } else {
1955 params.sg_len = req->nbytes;
1956 params.hash_size = crypto_ahash_digestsize(rtfm);
1957 params.last = 1;
1958 params.more = 0;
1959 params.scmd1 = req->nbytes + req_ctx->data_len;
1960
1961 }
1962 params.bfr_len = 0;
1963 req_ctx->hctx_wr.result = 1;
1964 req_ctx->hctx_wr.srcsg = req->src;
1965 req_ctx->data_len += params.bfr_len + params.sg_len;
1966
1967 if (req->nbytes == 0) {
1968 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1969 params.more = 1;
1970 params.bfr_len = bs;
1971 }
1972
1973 skb = create_hash_wr(req, ¶ms);
1974 if (IS_ERR(skb)) {
1975 error = PTR_ERR(skb);
1976 goto unmap;
1977 }
1978 req_ctx->hctx_wr.processed += params.sg_len;
1979 skb->dev = u_ctx->lldi.ports[0];
1980 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1981 chcr_send_wr(skb);
1982 return -EINPROGRESS;
1983 unmap:
1984 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1985 err:
1986 chcr_dec_wrcount(dev);
1987 return error;
1988 }
1989
chcr_ahash_continue(struct ahash_request * req)1990 static int chcr_ahash_continue(struct ahash_request *req)
1991 {
1992 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1993 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1994 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1995 struct chcr_context *ctx = h_ctx(rtfm);
1996 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1997 struct sk_buff *skb;
1998 struct hash_wr_param params;
1999 u8 bs;
2000 int error;
2001 unsigned int cpu;
2002
2003 cpu = get_cpu();
2004 reqctx->txqidx = cpu % ctx->ntxq;
2005 reqctx->rxqidx = cpu % ctx->nrxq;
2006 put_cpu();
2007
2008 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2009 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
2010 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2011 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2012 params.kctx_len *= 2;
2013 params.opad_needed = 1;
2014 } else {
2015 params.opad_needed = 0;
2016 }
2017 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2018 HASH_SPACE_LEFT(params.kctx_len),
2019 hctx_wr->src_ofst);
2020 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2021 params.sg_len = req->nbytes - hctx_wr->processed;
2022 if (!hctx_wr->result ||
2023 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2024 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2025 params.kctx_len /= 2;
2026 params.opad_needed = 0;
2027 }
2028 params.last = 0;
2029 params.more = 1;
2030 params.sg_len = rounddown(params.sg_len, bs);
2031 params.hash_size = params.alg_prm.result_size;
2032 params.scmd1 = 0;
2033 } else {
2034 params.last = 1;
2035 params.more = 0;
2036 params.hash_size = crypto_ahash_digestsize(rtfm);
2037 params.scmd1 = reqctx->data_len + params.sg_len;
2038 }
2039 params.bfr_len = 0;
2040 reqctx->data_len += params.sg_len;
2041 skb = create_hash_wr(req, ¶ms);
2042 if (IS_ERR(skb)) {
2043 error = PTR_ERR(skb);
2044 goto err;
2045 }
2046 hctx_wr->processed += params.sg_len;
2047 skb->dev = u_ctx->lldi.ports[0];
2048 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2049 chcr_send_wr(skb);
2050 return 0;
2051 err:
2052 return error;
2053 }
2054
chcr_handle_ahash_resp(struct ahash_request * req,unsigned char * input,int err)2055 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2056 unsigned char *input,
2057 int err)
2058 {
2059 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2060 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2061 int digestsize, updated_digestsize;
2062 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2063 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2064 struct chcr_dev *dev = h_ctx(tfm)->dev;
2065
2066 if (input == NULL)
2067 goto out;
2068 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2069 updated_digestsize = digestsize;
2070 if (digestsize == SHA224_DIGEST_SIZE)
2071 updated_digestsize = SHA256_DIGEST_SIZE;
2072 else if (digestsize == SHA384_DIGEST_SIZE)
2073 updated_digestsize = SHA512_DIGEST_SIZE;
2074
2075 if (hctx_wr->dma_addr) {
2076 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2077 hctx_wr->dma_len, DMA_TO_DEVICE);
2078 hctx_wr->dma_addr = 0;
2079 }
2080 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2081 req->nbytes)) {
2082 if (hctx_wr->result == 1) {
2083 hctx_wr->result = 0;
2084 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2085 digestsize);
2086 } else {
2087 memcpy(reqctx->partial_hash,
2088 input + sizeof(struct cpl_fw6_pld),
2089 updated_digestsize);
2090
2091 }
2092 goto unmap;
2093 }
2094 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2095 updated_digestsize);
2096
2097 err = chcr_ahash_continue(req);
2098 if (err)
2099 goto unmap;
2100 return;
2101 unmap:
2102 if (hctx_wr->is_sg_map)
2103 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2104
2105
2106 out:
2107 chcr_dec_wrcount(dev);
2108 ahash_request_complete(req, err);
2109 }
2110
2111 /*
2112 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2113 * @req: crypto request
2114 */
chcr_handle_resp(struct crypto_async_request * req,unsigned char * input,int err)2115 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2116 int err)
2117 {
2118 struct crypto_tfm *tfm = req->tfm;
2119 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2120 struct adapter *adap = padap(ctx->dev);
2121
2122 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2123 case CRYPTO_ALG_TYPE_AEAD:
2124 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2125 break;
2126
2127 case CRYPTO_ALG_TYPE_SKCIPHER:
2128 chcr_handle_cipher_resp(skcipher_request_cast(req),
2129 input, err);
2130 break;
2131 case CRYPTO_ALG_TYPE_AHASH:
2132 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2133 }
2134 atomic_inc(&adap->chcr_stats.complete);
2135 return err;
2136 }
chcr_ahash_export(struct ahash_request * areq,void * out)2137 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2138 {
2139 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2140 struct chcr_ahash_req_ctx *state = out;
2141
2142 state->reqlen = req_ctx->reqlen;
2143 state->data_len = req_ctx->data_len;
2144 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2145 memcpy(state->partial_hash, req_ctx->partial_hash,
2146 CHCR_HASH_MAX_DIGEST_SIZE);
2147 chcr_init_hctx_per_wr(state);
2148 return 0;
2149 }
2150
chcr_ahash_import(struct ahash_request * areq,const void * in)2151 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2152 {
2153 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2154 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2155
2156 req_ctx->reqlen = state->reqlen;
2157 req_ctx->data_len = state->data_len;
2158 req_ctx->reqbfr = req_ctx->bfr1;
2159 req_ctx->skbfr = req_ctx->bfr2;
2160 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2161 memcpy(req_ctx->partial_hash, state->partial_hash,
2162 CHCR_HASH_MAX_DIGEST_SIZE);
2163 chcr_init_hctx_per_wr(req_ctx);
2164 return 0;
2165 }
2166
chcr_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2167 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2168 unsigned int keylen)
2169 {
2170 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2171
2172 /* use the key to calculate the ipad and opad. ipad will sent with the
2173 * first request's data. opad will be sent with the final hash result
2174 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2175 */
2176 return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm),
2177 hmacctx->ipad, hmacctx->opad);
2178 }
2179
chcr_aes_xts_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int key_len)2180 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2181 unsigned int key_len)
2182 {
2183 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2184 unsigned short context_size = 0;
2185 int err;
2186
2187 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2188 if (err)
2189 goto badkey_err;
2190
2191 memcpy(ablkctx->key, key, key_len);
2192 ablkctx->enckey_len = key_len;
2193 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2194 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2195 /* Both keys for xts must be aligned to 16 byte boundary
2196 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2197 */
2198 if (key_len == 48) {
2199 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2200 + 16) >> 4;
2201 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2202 memset(ablkctx->key + 24, 0, 8);
2203 memset(ablkctx->key + 56, 0, 8);
2204 ablkctx->enckey_len = 64;
2205 ablkctx->key_ctx_hdr =
2206 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2207 CHCR_KEYCTX_NO_KEY, 1,
2208 0, context_size);
2209 } else {
2210 ablkctx->key_ctx_hdr =
2211 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2212 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2213 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2214 CHCR_KEYCTX_NO_KEY, 1,
2215 0, context_size);
2216 }
2217 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2218 return 0;
2219 badkey_err:
2220 ablkctx->enckey_len = 0;
2221
2222 return err;
2223 }
2224
chcr_sha_init(struct ahash_request * areq)2225 static int chcr_sha_init(struct ahash_request *areq)
2226 {
2227 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2228 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2229 int digestsize = crypto_ahash_digestsize(tfm);
2230
2231 req_ctx->data_len = 0;
2232 req_ctx->reqlen = 0;
2233 req_ctx->reqbfr = req_ctx->bfr1;
2234 req_ctx->skbfr = req_ctx->bfr2;
2235 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2236
2237 return 0;
2238 }
2239
chcr_sha_cra_init(struct crypto_tfm * tfm)2240 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2241 {
2242 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2243 sizeof(struct chcr_ahash_req_ctx));
2244 return chcr_device_init(crypto_tfm_ctx(tfm));
2245 }
2246
chcr_hmac_init(struct ahash_request * areq)2247 static int chcr_hmac_init(struct ahash_request *areq)
2248 {
2249 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2250 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2251 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2252 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2253 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2254
2255 chcr_sha_init(areq);
2256 req_ctx->data_len = bs;
2257 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2258 if (digestsize == SHA224_DIGEST_SIZE)
2259 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2260 SHA256_DIGEST_SIZE);
2261 else if (digestsize == SHA384_DIGEST_SIZE)
2262 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2263 SHA512_DIGEST_SIZE);
2264 else
2265 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2266 digestsize);
2267 }
2268 return 0;
2269 }
2270
chcr_hmac_cra_init(struct crypto_tfm * tfm)2271 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2272 {
2273 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2274 sizeof(struct chcr_ahash_req_ctx));
2275 return chcr_device_init(crypto_tfm_ctx(tfm));
2276 }
2277
chcr_aead_common_exit(struct aead_request * req)2278 inline void chcr_aead_common_exit(struct aead_request *req)
2279 {
2280 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2281 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2282 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2283
2284 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2285 }
2286
chcr_aead_common_init(struct aead_request * req)2287 static int chcr_aead_common_init(struct aead_request *req)
2288 {
2289 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2290 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2291 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2292 unsigned int authsize = crypto_aead_authsize(tfm);
2293 int error = -EINVAL;
2294
2295 /* validate key size */
2296 if (aeadctx->enckey_len == 0)
2297 goto err;
2298 if (reqctx->op && req->cryptlen < authsize)
2299 goto err;
2300 if (reqctx->b0_len)
2301 reqctx->scratch_pad = reqctx->iv + IV;
2302 else
2303 reqctx->scratch_pad = NULL;
2304
2305 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2306 reqctx->op);
2307 if (error) {
2308 error = -ENOMEM;
2309 goto err;
2310 }
2311
2312 return 0;
2313 err:
2314 return error;
2315 }
2316
chcr_aead_need_fallback(struct aead_request * req,int dst_nents,int aadmax,int wrlen,unsigned short op_type)2317 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2318 int aadmax, int wrlen,
2319 unsigned short op_type)
2320 {
2321 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2322
2323 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2324 dst_nents > MAX_DSGL_ENT ||
2325 (req->assoclen > aadmax) ||
2326 (wrlen > SGE_MAX_WR_LEN))
2327 return 1;
2328 return 0;
2329 }
2330
chcr_aead_fallback(struct aead_request * req,unsigned short op_type)2331 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2332 {
2333 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2334 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2335 struct aead_request *subreq = aead_request_ctx_dma(req);
2336
2337 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2338 aead_request_set_callback(subreq, req->base.flags,
2339 req->base.complete, req->base.data);
2340 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2341 req->iv);
2342 aead_request_set_ad(subreq, req->assoclen);
2343 return op_type ? crypto_aead_decrypt(subreq) :
2344 crypto_aead_encrypt(subreq);
2345 }
2346
create_authenc_wr(struct aead_request * req,unsigned short qid,int size)2347 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2348 unsigned short qid,
2349 int size)
2350 {
2351 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2352 struct chcr_context *ctx = a_ctx(tfm);
2353 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2354 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2355 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2356 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2357 struct sk_buff *skb = NULL;
2358 struct chcr_wr *chcr_req;
2359 struct cpl_rx_phys_dsgl *phys_cpl;
2360 struct ulptx_sgl *ulptx;
2361 unsigned int transhdr_len;
2362 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2363 unsigned int kctx_len = 0, dnents, snents;
2364 unsigned int authsize = crypto_aead_authsize(tfm);
2365 int error = -EINVAL;
2366 u8 *ivptr;
2367 int null = 0;
2368 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2369 GFP_ATOMIC;
2370 struct adapter *adap = padap(ctx->dev);
2371 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2372
2373 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2374 if (req->cryptlen == 0)
2375 return NULL;
2376
2377 reqctx->b0_len = 0;
2378 error = chcr_aead_common_init(req);
2379 if (error)
2380 return ERR_PTR(error);
2381
2382 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2383 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2384 null = 1;
2385 }
2386 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2387 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2388 dnents += MIN_AUTH_SG; // For IV
2389 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2390 CHCR_SRC_SG_SIZE, 0);
2391 dst_size = get_space_for_phys_dsgl(dnents);
2392 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2393 - sizeof(chcr_req->key_ctx);
2394 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2395 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2396 SGE_MAX_WR_LEN;
2397 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2398 : (sgl_len(snents) * 8);
2399 transhdr_len += temp;
2400 transhdr_len = roundup(transhdr_len, 16);
2401
2402 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2403 transhdr_len, reqctx->op)) {
2404 atomic_inc(&adap->chcr_stats.fallback);
2405 chcr_aead_common_exit(req);
2406 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2407 }
2408 skb = alloc_skb(transhdr_len, flags);
2409 if (!skb) {
2410 error = -ENOMEM;
2411 goto err;
2412 }
2413
2414 chcr_req = __skb_put_zero(skb, transhdr_len);
2415
2416 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2417
2418 /*
2419 * Input order is AAD,IV and Payload. where IV should be included as
2420 * the part of authdata. All other fields should be filled according
2421 * to the hardware spec
2422 */
2423 chcr_req->sec_cpl.op_ivinsrtofst =
2424 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2425 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2426 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2427 null ? 0 : 1 + IV,
2428 null ? 0 : IV + req->assoclen,
2429 req->assoclen + IV + 1,
2430 (temp & 0x1F0) >> 4);
2431 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2432 temp & 0xF,
2433 null ? 0 : req->assoclen + IV + 1,
2434 temp, temp);
2435 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2436 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2437 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2438 else
2439 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2440 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2441 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2442 temp,
2443 actx->auth_mode, aeadctx->hmac_ctrl,
2444 IV >> 1);
2445 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2446 0, 0, dst_size);
2447
2448 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2449 if (reqctx->op == CHCR_ENCRYPT_OP ||
2450 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2451 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2452 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2453 aeadctx->enckey_len);
2454 else
2455 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2456 aeadctx->enckey_len);
2457
2458 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2459 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2460 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2461 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2462 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2463 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2464 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2465 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2466 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2467 CTR_RFC3686_IV_SIZE);
2468 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2469 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2470 } else {
2471 memcpy(ivptr, req->iv, IV);
2472 }
2473 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2474 chcr_add_aead_src_ent(req, ulptx);
2475 atomic_inc(&adap->chcr_stats.cipher_rqst);
2476 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2477 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2478 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2479 transhdr_len, temp, 0);
2480 reqctx->skb = skb;
2481
2482 return skb;
2483 err:
2484 chcr_aead_common_exit(req);
2485
2486 return ERR_PTR(error);
2487 }
2488
chcr_aead_dma_map(struct device * dev,struct aead_request * req,unsigned short op_type)2489 int chcr_aead_dma_map(struct device *dev,
2490 struct aead_request *req,
2491 unsigned short op_type)
2492 {
2493 int error;
2494 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2495 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2496 unsigned int authsize = crypto_aead_authsize(tfm);
2497 int src_len, dst_len;
2498
2499 /* calculate and handle src and dst sg length separately
2500 * for inplace and out-of place operations
2501 */
2502 if (req->src == req->dst) {
2503 src_len = req->assoclen + req->cryptlen + (op_type ?
2504 0 : authsize);
2505 dst_len = src_len;
2506 } else {
2507 src_len = req->assoclen + req->cryptlen;
2508 dst_len = req->assoclen + req->cryptlen + (op_type ?
2509 -authsize : authsize);
2510 }
2511
2512 if (!req->cryptlen || !src_len || !dst_len)
2513 return 0;
2514 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2515 DMA_BIDIRECTIONAL);
2516 if (dma_mapping_error(dev, reqctx->iv_dma))
2517 return -ENOMEM;
2518 if (reqctx->b0_len)
2519 reqctx->b0_dma = reqctx->iv_dma + IV;
2520 else
2521 reqctx->b0_dma = 0;
2522 if (req->src == req->dst) {
2523 error = dma_map_sg(dev, req->src,
2524 sg_nents_for_len(req->src, src_len),
2525 DMA_BIDIRECTIONAL);
2526 if (!error)
2527 goto err;
2528 } else {
2529 error = dma_map_sg(dev, req->src,
2530 sg_nents_for_len(req->src, src_len),
2531 DMA_TO_DEVICE);
2532 if (!error)
2533 goto err;
2534 error = dma_map_sg(dev, req->dst,
2535 sg_nents_for_len(req->dst, dst_len),
2536 DMA_FROM_DEVICE);
2537 if (!error) {
2538 dma_unmap_sg(dev, req->src,
2539 sg_nents_for_len(req->src, src_len),
2540 DMA_TO_DEVICE);
2541 goto err;
2542 }
2543 }
2544
2545 return 0;
2546 err:
2547 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2548 return -ENOMEM;
2549 }
2550
chcr_aead_dma_unmap(struct device * dev,struct aead_request * req,unsigned short op_type)2551 void chcr_aead_dma_unmap(struct device *dev,
2552 struct aead_request *req,
2553 unsigned short op_type)
2554 {
2555 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2556 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2557 unsigned int authsize = crypto_aead_authsize(tfm);
2558 int src_len, dst_len;
2559
2560 /* calculate and handle src and dst sg length separately
2561 * for inplace and out-of place operations
2562 */
2563 if (req->src == req->dst) {
2564 src_len = req->assoclen + req->cryptlen + (op_type ?
2565 0 : authsize);
2566 dst_len = src_len;
2567 } else {
2568 src_len = req->assoclen + req->cryptlen;
2569 dst_len = req->assoclen + req->cryptlen + (op_type ?
2570 -authsize : authsize);
2571 }
2572
2573 if (!req->cryptlen || !src_len || !dst_len)
2574 return;
2575
2576 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2577 DMA_BIDIRECTIONAL);
2578 if (req->src == req->dst) {
2579 dma_unmap_sg(dev, req->src,
2580 sg_nents_for_len(req->src, src_len),
2581 DMA_BIDIRECTIONAL);
2582 } else {
2583 dma_unmap_sg(dev, req->src,
2584 sg_nents_for_len(req->src, src_len),
2585 DMA_TO_DEVICE);
2586 dma_unmap_sg(dev, req->dst,
2587 sg_nents_for_len(req->dst, dst_len),
2588 DMA_FROM_DEVICE);
2589 }
2590 }
2591
chcr_add_aead_src_ent(struct aead_request * req,struct ulptx_sgl * ulptx)2592 void chcr_add_aead_src_ent(struct aead_request *req,
2593 struct ulptx_sgl *ulptx)
2594 {
2595 struct ulptx_walk ulp_walk;
2596 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2597
2598 if (reqctx->imm) {
2599 u8 *buf = (u8 *)ulptx;
2600
2601 if (reqctx->b0_len) {
2602 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2603 buf += reqctx->b0_len;
2604 }
2605 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2606 buf, req->cryptlen + req->assoclen, 0);
2607 } else {
2608 ulptx_walk_init(&ulp_walk, ulptx);
2609 if (reqctx->b0_len)
2610 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2611 reqctx->b0_dma);
2612 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2613 req->assoclen, 0);
2614 ulptx_walk_end(&ulp_walk);
2615 }
2616 }
2617
chcr_add_aead_dst_ent(struct aead_request * req,struct cpl_rx_phys_dsgl * phys_cpl,unsigned short qid)2618 void chcr_add_aead_dst_ent(struct aead_request *req,
2619 struct cpl_rx_phys_dsgl *phys_cpl,
2620 unsigned short qid)
2621 {
2622 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2623 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2624 struct dsgl_walk dsgl_walk;
2625 unsigned int authsize = crypto_aead_authsize(tfm);
2626 struct chcr_context *ctx = a_ctx(tfm);
2627 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2628 u32 temp;
2629 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2630
2631 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2632 dsgl_walk_init(&dsgl_walk, phys_cpl);
2633 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2634 temp = req->assoclen + req->cryptlen +
2635 (reqctx->op ? -authsize : authsize);
2636 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2637 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2638 }
2639
chcr_add_cipher_src_ent(struct skcipher_request * req,void * ulptx,struct cipher_wr_param * wrparam)2640 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2641 void *ulptx,
2642 struct cipher_wr_param *wrparam)
2643 {
2644 struct ulptx_walk ulp_walk;
2645 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2646 u8 *buf = ulptx;
2647
2648 memcpy(buf, reqctx->iv, IV);
2649 buf += IV;
2650 if (reqctx->imm) {
2651 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2652 buf, wrparam->bytes, reqctx->processed);
2653 } else {
2654 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2655 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2656 reqctx->src_ofst);
2657 reqctx->srcsg = ulp_walk.last_sg;
2658 reqctx->src_ofst = ulp_walk.last_sg_len;
2659 ulptx_walk_end(&ulp_walk);
2660 }
2661 }
2662
chcr_add_cipher_dst_ent(struct skcipher_request * req,struct cpl_rx_phys_dsgl * phys_cpl,struct cipher_wr_param * wrparam,unsigned short qid)2663 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2664 struct cpl_rx_phys_dsgl *phys_cpl,
2665 struct cipher_wr_param *wrparam,
2666 unsigned short qid)
2667 {
2668 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2669 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2670 struct chcr_context *ctx = c_ctx(tfm);
2671 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2672 struct dsgl_walk dsgl_walk;
2673 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2674
2675 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2676 dsgl_walk_init(&dsgl_walk, phys_cpl);
2677 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2678 reqctx->dst_ofst);
2679 reqctx->dstsg = dsgl_walk.last_sg;
2680 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2681 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2682 }
2683
chcr_add_hash_src_ent(struct ahash_request * req,struct ulptx_sgl * ulptx,struct hash_wr_param * param)2684 void chcr_add_hash_src_ent(struct ahash_request *req,
2685 struct ulptx_sgl *ulptx,
2686 struct hash_wr_param *param)
2687 {
2688 struct ulptx_walk ulp_walk;
2689 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2690
2691 if (reqctx->hctx_wr.imm) {
2692 u8 *buf = (u8 *)ulptx;
2693
2694 if (param->bfr_len) {
2695 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2696 buf += param->bfr_len;
2697 }
2698
2699 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2700 sg_nents(reqctx->hctx_wr.srcsg), buf,
2701 param->sg_len, 0);
2702 } else {
2703 ulptx_walk_init(&ulp_walk, ulptx);
2704 if (param->bfr_len)
2705 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2706 reqctx->hctx_wr.dma_addr);
2707 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2708 param->sg_len, reqctx->hctx_wr.src_ofst);
2709 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2710 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2711 ulptx_walk_end(&ulp_walk);
2712 }
2713 }
2714
chcr_hash_dma_map(struct device * dev,struct ahash_request * req)2715 int chcr_hash_dma_map(struct device *dev,
2716 struct ahash_request *req)
2717 {
2718 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2719 int error = 0;
2720
2721 if (!req->nbytes)
2722 return 0;
2723 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2724 DMA_TO_DEVICE);
2725 if (!error)
2726 return -ENOMEM;
2727 req_ctx->hctx_wr.is_sg_map = 1;
2728 return 0;
2729 }
2730
chcr_hash_dma_unmap(struct device * dev,struct ahash_request * req)2731 void chcr_hash_dma_unmap(struct device *dev,
2732 struct ahash_request *req)
2733 {
2734 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2735
2736 if (!req->nbytes)
2737 return;
2738
2739 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2740 DMA_TO_DEVICE);
2741 req_ctx->hctx_wr.is_sg_map = 0;
2742
2743 }
2744
chcr_cipher_dma_map(struct device * dev,struct skcipher_request * req)2745 int chcr_cipher_dma_map(struct device *dev,
2746 struct skcipher_request *req)
2747 {
2748 int error;
2749
2750 if (req->src == req->dst) {
2751 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2752 DMA_BIDIRECTIONAL);
2753 if (!error)
2754 goto err;
2755 } else {
2756 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2757 DMA_TO_DEVICE);
2758 if (!error)
2759 goto err;
2760 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2761 DMA_FROM_DEVICE);
2762 if (!error) {
2763 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2764 DMA_TO_DEVICE);
2765 goto err;
2766 }
2767 }
2768
2769 return 0;
2770 err:
2771 return -ENOMEM;
2772 }
2773
chcr_cipher_dma_unmap(struct device * dev,struct skcipher_request * req)2774 void chcr_cipher_dma_unmap(struct device *dev,
2775 struct skcipher_request *req)
2776 {
2777 if (req->src == req->dst) {
2778 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2779 DMA_BIDIRECTIONAL);
2780 } else {
2781 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2782 DMA_TO_DEVICE);
2783 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2784 DMA_FROM_DEVICE);
2785 }
2786 }
2787
set_msg_len(u8 * block,unsigned int msglen,int csize)2788 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2789 {
2790 __be32 data;
2791
2792 memset(block, 0, csize);
2793 block += csize;
2794
2795 if (csize >= 4)
2796 csize = 4;
2797 else if (msglen > (unsigned int)(1 << (8 * csize)))
2798 return -EOVERFLOW;
2799
2800 data = cpu_to_be32(msglen);
2801 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2802
2803 return 0;
2804 }
2805
generate_b0(struct aead_request * req,u8 * ivptr,unsigned short op_type)2806 static int generate_b0(struct aead_request *req, u8 *ivptr,
2807 unsigned short op_type)
2808 {
2809 unsigned int l, lp, m;
2810 int rc;
2811 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2812 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2813 u8 *b0 = reqctx->scratch_pad;
2814
2815 m = crypto_aead_authsize(aead);
2816
2817 memcpy(b0, ivptr, 16);
2818
2819 lp = b0[0];
2820 l = lp + 1;
2821
2822 /* set m, bits 3-5 */
2823 *b0 |= (8 * ((m - 2) / 2));
2824
2825 /* set adata, bit 6, if associated data is used */
2826 if (req->assoclen)
2827 *b0 |= 64;
2828 rc = set_msg_len(b0 + 16 - l,
2829 (op_type == CHCR_DECRYPT_OP) ?
2830 req->cryptlen - m : req->cryptlen, l);
2831
2832 return rc;
2833 }
2834
crypto_ccm_check_iv(const u8 * iv)2835 static inline int crypto_ccm_check_iv(const u8 *iv)
2836 {
2837 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2838 if (iv[0] < 1 || iv[0] > 7)
2839 return -EINVAL;
2840
2841 return 0;
2842 }
2843
ccm_format_packet(struct aead_request * req,u8 * ivptr,unsigned int sub_type,unsigned short op_type,unsigned int assoclen)2844 static int ccm_format_packet(struct aead_request *req,
2845 u8 *ivptr,
2846 unsigned int sub_type,
2847 unsigned short op_type,
2848 unsigned int assoclen)
2849 {
2850 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2851 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2852 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2853 int rc = 0;
2854
2855 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2856 ivptr[0] = 3;
2857 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2858 memcpy(ivptr + 4, req->iv, 8);
2859 memset(ivptr + 12, 0, 4);
2860 } else {
2861 memcpy(ivptr, req->iv, 16);
2862 }
2863 if (assoclen)
2864 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2865
2866 rc = generate_b0(req, ivptr, op_type);
2867 /* zero the ctr value */
2868 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2869 return rc;
2870 }
2871
fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu * sec_cpl,unsigned int dst_size,struct aead_request * req,unsigned short op_type)2872 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2873 unsigned int dst_size,
2874 struct aead_request *req,
2875 unsigned short op_type)
2876 {
2877 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2878 struct chcr_context *ctx = a_ctx(tfm);
2879 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2880 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2881 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2882 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2883 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2884 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2885 unsigned int ccm_xtra;
2886 unsigned int tag_offset = 0, auth_offset = 0;
2887 unsigned int assoclen;
2888
2889 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2890
2891 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2892 assoclen = req->assoclen - 8;
2893 else
2894 assoclen = req->assoclen;
2895 ccm_xtra = CCM_B0_SIZE +
2896 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2897
2898 auth_offset = req->cryptlen ?
2899 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2900 if (op_type == CHCR_DECRYPT_OP) {
2901 if (crypto_aead_authsize(tfm) != req->cryptlen)
2902 tag_offset = crypto_aead_authsize(tfm);
2903 else
2904 auth_offset = 0;
2905 }
2906
2907 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2908 sec_cpl->pldlen =
2909 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2910 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2911 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2912 1 + IV, IV + assoclen + ccm_xtra,
2913 req->assoclen + IV + 1 + ccm_xtra, 0);
2914
2915 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2916 auth_offset, tag_offset,
2917 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2918 crypto_aead_authsize(tfm));
2919 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2920 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2921 cipher_mode, mac_mode,
2922 aeadctx->hmac_ctrl, IV >> 1);
2923
2924 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2925 0, dst_size);
2926 }
2927
aead_ccm_validate_input(unsigned short op_type,struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type)2928 static int aead_ccm_validate_input(unsigned short op_type,
2929 struct aead_request *req,
2930 struct chcr_aead_ctx *aeadctx,
2931 unsigned int sub_type)
2932 {
2933 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2934 if (crypto_ccm_check_iv(req->iv)) {
2935 pr_err("CCM: IV check fails\n");
2936 return -EINVAL;
2937 }
2938 } else {
2939 if (req->assoclen != 16 && req->assoclen != 20) {
2940 pr_err("RFC4309: Invalid AAD length %d\n",
2941 req->assoclen);
2942 return -EINVAL;
2943 }
2944 }
2945 return 0;
2946 }
2947
create_aead_ccm_wr(struct aead_request * req,unsigned short qid,int size)2948 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2949 unsigned short qid,
2950 int size)
2951 {
2952 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2953 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2954 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2955 struct sk_buff *skb = NULL;
2956 struct chcr_wr *chcr_req;
2957 struct cpl_rx_phys_dsgl *phys_cpl;
2958 struct ulptx_sgl *ulptx;
2959 unsigned int transhdr_len;
2960 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2961 unsigned int sub_type, assoclen = req->assoclen;
2962 unsigned int authsize = crypto_aead_authsize(tfm);
2963 int error = -EINVAL;
2964 u8 *ivptr;
2965 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2966 GFP_ATOMIC;
2967 struct adapter *adap = padap(a_ctx(tfm)->dev);
2968
2969 sub_type = get_aead_subtype(tfm);
2970 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2971 assoclen -= 8;
2972 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2973 error = chcr_aead_common_init(req);
2974 if (error)
2975 return ERR_PTR(error);
2976
2977 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2978 if (error)
2979 goto err;
2980 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2981 + (reqctx->op ? -authsize : authsize),
2982 CHCR_DST_SG_SIZE, 0);
2983 dnents += MIN_CCM_SG; // For IV and B0
2984 dst_size = get_space_for_phys_dsgl(dnents);
2985 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2986 CHCR_SRC_SG_SIZE, 0);
2987 snents += MIN_CCM_SG; //For B0
2988 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2989 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2990 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2991 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2992 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2993 reqctx->b0_len, 16) :
2994 (sgl_len(snents) * 8);
2995 transhdr_len += temp;
2996 transhdr_len = roundup(transhdr_len, 16);
2997
2998 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2999 reqctx->b0_len, transhdr_len, reqctx->op)) {
3000 atomic_inc(&adap->chcr_stats.fallback);
3001 chcr_aead_common_exit(req);
3002 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3003 }
3004 skb = alloc_skb(transhdr_len, flags);
3005
3006 if (!skb) {
3007 error = -ENOMEM;
3008 goto err;
3009 }
3010
3011 chcr_req = __skb_put_zero(skb, transhdr_len);
3012
3013 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3014
3015 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3016 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3017 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3018 aeadctx->key, aeadctx->enckey_len);
3019
3020 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3021 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3022 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3023 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3024 if (error)
3025 goto dstmap_fail;
3026 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3027 chcr_add_aead_src_ent(req, ulptx);
3028
3029 atomic_inc(&adap->chcr_stats.aead_rqst);
3030 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3031 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3032 reqctx->b0_len) : 0);
3033 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3034 transhdr_len, temp, 0);
3035 reqctx->skb = skb;
3036
3037 return skb;
3038 dstmap_fail:
3039 kfree_skb(skb);
3040 err:
3041 chcr_aead_common_exit(req);
3042 return ERR_PTR(error);
3043 }
3044
create_gcm_wr(struct aead_request * req,unsigned short qid,int size)3045 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3046 unsigned short qid,
3047 int size)
3048 {
3049 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3050 struct chcr_context *ctx = a_ctx(tfm);
3051 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3052 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3053 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3054 struct sk_buff *skb = NULL;
3055 struct chcr_wr *chcr_req;
3056 struct cpl_rx_phys_dsgl *phys_cpl;
3057 struct ulptx_sgl *ulptx;
3058 unsigned int transhdr_len, dnents = 0, snents;
3059 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3060 unsigned int authsize = crypto_aead_authsize(tfm);
3061 int error = -EINVAL;
3062 u8 *ivptr;
3063 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3064 GFP_ATOMIC;
3065 struct adapter *adap = padap(ctx->dev);
3066 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3067
3068 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3069 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3070 assoclen = req->assoclen - 8;
3071
3072 reqctx->b0_len = 0;
3073 error = chcr_aead_common_init(req);
3074 if (error)
3075 return ERR_PTR(error);
3076 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3077 (reqctx->op ? -authsize : authsize),
3078 CHCR_DST_SG_SIZE, 0);
3079 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3080 CHCR_SRC_SG_SIZE, 0);
3081 dnents += MIN_GCM_SG; // For IV
3082 dst_size = get_space_for_phys_dsgl(dnents);
3083 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3084 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3085 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3086 SGE_MAX_WR_LEN;
3087 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3088 (sgl_len(snents) * 8);
3089 transhdr_len += temp;
3090 transhdr_len = roundup(transhdr_len, 16);
3091 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3092 transhdr_len, reqctx->op)) {
3093
3094 atomic_inc(&adap->chcr_stats.fallback);
3095 chcr_aead_common_exit(req);
3096 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3097 }
3098 skb = alloc_skb(transhdr_len, flags);
3099 if (!skb) {
3100 error = -ENOMEM;
3101 goto err;
3102 }
3103
3104 chcr_req = __skb_put_zero(skb, transhdr_len);
3105
3106 //Offset of tag from end
3107 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3108 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3109 rx_channel_id, 2, 1);
3110 chcr_req->sec_cpl.pldlen =
3111 htonl(req->assoclen + IV + req->cryptlen);
3112 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3113 assoclen ? 1 + IV : 0,
3114 assoclen ? IV + assoclen : 0,
3115 req->assoclen + IV + 1, 0);
3116 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3117 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3118 temp, temp);
3119 chcr_req->sec_cpl.seqno_numivs =
3120 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3121 CHCR_ENCRYPT_OP) ? 1 : 0,
3122 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3123 CHCR_SCMD_AUTH_MODE_GHASH,
3124 aeadctx->hmac_ctrl, IV >> 1);
3125 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3126 0, 0, dst_size);
3127 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3128 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3129 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3130 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3131
3132 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3133 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3134 /* prepare a 16 byte iv */
3135 /* S A L T | IV | 0x00000001 */
3136 if (get_aead_subtype(tfm) ==
3137 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3138 memcpy(ivptr, aeadctx->salt, 4);
3139 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3140 } else {
3141 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3142 }
3143 put_unaligned_be32(0x01, &ivptr[12]);
3144 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3145
3146 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3147 chcr_add_aead_src_ent(req, ulptx);
3148 atomic_inc(&adap->chcr_stats.aead_rqst);
3149 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3150 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3151 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3152 transhdr_len, temp, reqctx->verify);
3153 reqctx->skb = skb;
3154 return skb;
3155
3156 err:
3157 chcr_aead_common_exit(req);
3158 return ERR_PTR(error);
3159 }
3160
3161
3162
chcr_aead_cra_init(struct crypto_aead * tfm)3163 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3164 {
3165 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3166 struct aead_alg *alg = crypto_aead_alg(tfm);
3167
3168 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3169 CRYPTO_ALG_NEED_FALLBACK |
3170 CRYPTO_ALG_ASYNC);
3171 if (IS_ERR(aeadctx->sw_cipher))
3172 return PTR_ERR(aeadctx->sw_cipher);
3173 crypto_aead_set_reqsize_dma(
3174 tfm, max(sizeof(struct chcr_aead_reqctx),
3175 sizeof(struct aead_request) +
3176 crypto_aead_reqsize(aeadctx->sw_cipher)));
3177 return chcr_device_init(a_ctx(tfm));
3178 }
3179
chcr_aead_cra_exit(struct crypto_aead * tfm)3180 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3181 {
3182 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3183
3184 crypto_free_aead(aeadctx->sw_cipher);
3185 }
3186
chcr_authenc_null_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3187 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3188 unsigned int authsize)
3189 {
3190 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3191
3192 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3193 aeadctx->mayverify = VERIFY_HW;
3194 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3195 }
chcr_authenc_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3196 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3197 unsigned int authsize)
3198 {
3199 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3200 u32 maxauth = crypto_aead_maxauthsize(tfm);
3201
3202 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3203 * true for sha1. authsize == 12 condition should be before
3204 * authsize == (maxauth >> 1)
3205 */
3206 if (authsize == ICV_4) {
3207 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3208 aeadctx->mayverify = VERIFY_HW;
3209 } else if (authsize == ICV_6) {
3210 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3211 aeadctx->mayverify = VERIFY_HW;
3212 } else if (authsize == ICV_10) {
3213 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3214 aeadctx->mayverify = VERIFY_HW;
3215 } else if (authsize == ICV_12) {
3216 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3217 aeadctx->mayverify = VERIFY_HW;
3218 } else if (authsize == ICV_14) {
3219 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3220 aeadctx->mayverify = VERIFY_HW;
3221 } else if (authsize == (maxauth >> 1)) {
3222 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3223 aeadctx->mayverify = VERIFY_HW;
3224 } else if (authsize == maxauth) {
3225 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3226 aeadctx->mayverify = VERIFY_HW;
3227 } else {
3228 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3229 aeadctx->mayverify = VERIFY_SW;
3230 }
3231 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3232 }
3233
3234
chcr_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3235 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3236 {
3237 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3238
3239 switch (authsize) {
3240 case ICV_4:
3241 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3242 aeadctx->mayverify = VERIFY_HW;
3243 break;
3244 case ICV_8:
3245 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3246 aeadctx->mayverify = VERIFY_HW;
3247 break;
3248 case ICV_12:
3249 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3250 aeadctx->mayverify = VERIFY_HW;
3251 break;
3252 case ICV_14:
3253 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3254 aeadctx->mayverify = VERIFY_HW;
3255 break;
3256 case ICV_16:
3257 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3258 aeadctx->mayverify = VERIFY_HW;
3259 break;
3260 case ICV_13:
3261 case ICV_15:
3262 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3263 aeadctx->mayverify = VERIFY_SW;
3264 break;
3265 default:
3266 return -EINVAL;
3267 }
3268 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3269 }
3270
chcr_4106_4309_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3271 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3272 unsigned int authsize)
3273 {
3274 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275
3276 switch (authsize) {
3277 case ICV_8:
3278 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3279 aeadctx->mayverify = VERIFY_HW;
3280 break;
3281 case ICV_12:
3282 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3283 aeadctx->mayverify = VERIFY_HW;
3284 break;
3285 case ICV_16:
3286 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3287 aeadctx->mayverify = VERIFY_HW;
3288 break;
3289 default:
3290 return -EINVAL;
3291 }
3292 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3293 }
3294
chcr_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3295 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3296 unsigned int authsize)
3297 {
3298 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3299
3300 switch (authsize) {
3301 case ICV_4:
3302 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3303 aeadctx->mayverify = VERIFY_HW;
3304 break;
3305 case ICV_6:
3306 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3307 aeadctx->mayverify = VERIFY_HW;
3308 break;
3309 case ICV_8:
3310 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3311 aeadctx->mayverify = VERIFY_HW;
3312 break;
3313 case ICV_10:
3314 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3315 aeadctx->mayverify = VERIFY_HW;
3316 break;
3317 case ICV_12:
3318 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3319 aeadctx->mayverify = VERIFY_HW;
3320 break;
3321 case ICV_14:
3322 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3323 aeadctx->mayverify = VERIFY_HW;
3324 break;
3325 case ICV_16:
3326 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3327 aeadctx->mayverify = VERIFY_HW;
3328 break;
3329 default:
3330 return -EINVAL;
3331 }
3332 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3333 }
3334
chcr_ccm_common_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3335 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3336 const u8 *key,
3337 unsigned int keylen)
3338 {
3339 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3340 unsigned char ck_size, mk_size;
3341 int key_ctx_size = 0;
3342
3343 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3344 if (keylen == AES_KEYSIZE_128) {
3345 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3346 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3347 } else if (keylen == AES_KEYSIZE_192) {
3348 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3349 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3350 } else if (keylen == AES_KEYSIZE_256) {
3351 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3352 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3353 } else {
3354 aeadctx->enckey_len = 0;
3355 return -EINVAL;
3356 }
3357 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3358 key_ctx_size >> 4);
3359 memcpy(aeadctx->key, key, keylen);
3360 aeadctx->enckey_len = keylen;
3361
3362 return 0;
3363 }
3364
chcr_aead_ccm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3365 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3366 const u8 *key,
3367 unsigned int keylen)
3368 {
3369 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3370 int error;
3371
3372 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3373 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3374 CRYPTO_TFM_REQ_MASK);
3375 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3376 if (error)
3377 return error;
3378 return chcr_ccm_common_setkey(aead, key, keylen);
3379 }
3380
chcr_aead_rfc4309_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3381 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3382 unsigned int keylen)
3383 {
3384 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3385 int error;
3386
3387 if (keylen < 3) {
3388 aeadctx->enckey_len = 0;
3389 return -EINVAL;
3390 }
3391 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3392 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3393 CRYPTO_TFM_REQ_MASK);
3394 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3395 if (error)
3396 return error;
3397 keylen -= 3;
3398 memcpy(aeadctx->salt, key + keylen, 3);
3399 return chcr_ccm_common_setkey(aead, key, keylen);
3400 }
3401
chcr_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3402 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3403 unsigned int keylen)
3404 {
3405 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3406 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3407 unsigned int ck_size;
3408 int ret = 0, key_ctx_size = 0;
3409 struct aes_enckey aes;
3410
3411 aeadctx->enckey_len = 0;
3412 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3413 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3414 & CRYPTO_TFM_REQ_MASK);
3415 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3416 if (ret)
3417 goto out;
3418
3419 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3420 keylen > 3) {
3421 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3422 memcpy(aeadctx->salt, key + keylen, 4);
3423 }
3424 if (keylen == AES_KEYSIZE_128) {
3425 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3426 } else if (keylen == AES_KEYSIZE_192) {
3427 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3428 } else if (keylen == AES_KEYSIZE_256) {
3429 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3430 } else {
3431 pr_err("GCM: Invalid key length %d\n", keylen);
3432 ret = -EINVAL;
3433 goto out;
3434 }
3435
3436 memcpy(aeadctx->key, key, keylen);
3437 aeadctx->enckey_len = keylen;
3438 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3439 AEAD_H_SIZE;
3440 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3441 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3442 0, 0,
3443 key_ctx_size >> 4);
3444 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3445 * It will go in key context
3446 */
3447 ret = aes_prepareenckey(&aes, key, keylen);
3448 if (ret) {
3449 aeadctx->enckey_len = 0;
3450 goto out;
3451 }
3452 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3453 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3454 memzero_explicit(&aes, sizeof(aes));
3455
3456 out:
3457 return ret;
3458 }
3459
chcr_authenc_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3460 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3461 unsigned int keylen)
3462 {
3463 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3464 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3465 /* it contains auth and cipher key both*/
3466 struct crypto_authenc_keys keys;
3467 unsigned int subtype;
3468 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3469 int err = 0, key_ctx_len = 0;
3470 unsigned char ck_size = 0;
3471 struct algo_param param;
3472 int align;
3473
3474 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3475 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3476 & CRYPTO_TFM_REQ_MASK);
3477 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3478 if (err)
3479 goto out;
3480
3481 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3482 goto out;
3483
3484 if (get_alg_config(¶m, max_authsize)) {
3485 pr_err("Unsupported digest size\n");
3486 goto out;
3487 }
3488 subtype = get_aead_subtype(authenc);
3489 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3490 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3491 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3492 goto out;
3493 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3494 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3495 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3496 }
3497 if (keys.enckeylen == AES_KEYSIZE_128) {
3498 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3499 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3500 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3501 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3502 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3503 } else {
3504 pr_err("Unsupported cipher key\n");
3505 goto out;
3506 }
3507
3508 /* Copy only encryption key. We use authkey to generate h(ipad) and
3509 * h(opad) so authkey is not needed again. authkeylen size have the
3510 * size of the hash digest size.
3511 */
3512 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3513 aeadctx->enckey_len = keys.enckeylen;
3514 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3515 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3516
3517 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3518 aeadctx->enckey_len << 3);
3519 }
3520
3521 align = KEYCTX_ALIGN_PAD(max_authsize);
3522 err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize,
3523 actx->h_iopad,
3524 actx->h_iopad + param.result_size + align);
3525 if (err)
3526 goto out;
3527
3528 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) +
3529 (param.result_size + align) * 2;
3530 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1,
3531 key_ctx_len >> 4);
3532 actx->auth_mode = param.auth_mode;
3533
3534 memzero_explicit(&keys, sizeof(keys));
3535 return 0;
3536
3537 out:
3538 aeadctx->enckey_len = 0;
3539 memzero_explicit(&keys, sizeof(keys));
3540 return -EINVAL;
3541 }
3542
chcr_aead_digest_null_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3543 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3544 const u8 *key, unsigned int keylen)
3545 {
3546 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3547 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3548 struct crypto_authenc_keys keys;
3549 int err;
3550 /* it contains auth and cipher key both*/
3551 unsigned int subtype;
3552 int key_ctx_len = 0;
3553 unsigned char ck_size = 0;
3554
3555 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3556 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3557 & CRYPTO_TFM_REQ_MASK);
3558 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3559 if (err)
3560 goto out;
3561
3562 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3563 goto out;
3564
3565 subtype = get_aead_subtype(authenc);
3566 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3567 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3568 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3569 goto out;
3570 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3571 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3572 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3573 }
3574 if (keys.enckeylen == AES_KEYSIZE_128) {
3575 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3576 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3577 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3578 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3579 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3580 } else {
3581 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3582 goto out;
3583 }
3584 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3585 aeadctx->enckey_len = keys.enckeylen;
3586 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3587 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3588 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3589 aeadctx->enckey_len << 3);
3590 }
3591 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3592
3593 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3594 0, key_ctx_len >> 4);
3595 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3596 memzero_explicit(&keys, sizeof(keys));
3597 return 0;
3598 out:
3599 aeadctx->enckey_len = 0;
3600 memzero_explicit(&keys, sizeof(keys));
3601 return -EINVAL;
3602 }
3603
chcr_aead_op(struct aead_request * req,int size,create_wr_t create_wr_fn)3604 static int chcr_aead_op(struct aead_request *req,
3605 int size,
3606 create_wr_t create_wr_fn)
3607 {
3608 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3609 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3610 struct chcr_context *ctx = a_ctx(tfm);
3611 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3612 struct sk_buff *skb;
3613 struct chcr_dev *cdev;
3614
3615 cdev = a_ctx(tfm)->dev;
3616 if (!cdev) {
3617 pr_err("%s : No crypto device.\n", __func__);
3618 return -ENXIO;
3619 }
3620
3621 if (chcr_inc_wrcount(cdev)) {
3622 /* Detach state for CHCR means lldi or padap is freed.
3623 * We cannot increment fallback here.
3624 */
3625 return chcr_aead_fallback(req, reqctx->op);
3626 }
3627
3628 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3629 reqctx->txqidx) &&
3630 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3631 chcr_dec_wrcount(cdev);
3632 return -ENOSPC;
3633 }
3634
3635 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3636 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3637 pr_err("RFC4106: Invalid value of assoclen %d\n",
3638 req->assoclen);
3639 return -EINVAL;
3640 }
3641
3642 /* Form a WR from req */
3643 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3644
3645 if (IS_ERR_OR_NULL(skb)) {
3646 chcr_dec_wrcount(cdev);
3647 return PTR_ERR_OR_ZERO(skb);
3648 }
3649
3650 skb->dev = u_ctx->lldi.ports[0];
3651 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3652 chcr_send_wr(skb);
3653 return -EINPROGRESS;
3654 }
3655
chcr_aead_encrypt(struct aead_request * req)3656 static int chcr_aead_encrypt(struct aead_request *req)
3657 {
3658 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3659 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3660 struct chcr_context *ctx = a_ctx(tfm);
3661 unsigned int cpu;
3662
3663 cpu = get_cpu();
3664 reqctx->txqidx = cpu % ctx->ntxq;
3665 reqctx->rxqidx = cpu % ctx->nrxq;
3666 put_cpu();
3667
3668 reqctx->verify = VERIFY_HW;
3669 reqctx->op = CHCR_ENCRYPT_OP;
3670
3671 switch (get_aead_subtype(tfm)) {
3672 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3673 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3674 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3675 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3676 return chcr_aead_op(req, 0, create_authenc_wr);
3677 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3678 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3679 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3680 default:
3681 return chcr_aead_op(req, 0, create_gcm_wr);
3682 }
3683 }
3684
chcr_aead_decrypt(struct aead_request * req)3685 static int chcr_aead_decrypt(struct aead_request *req)
3686 {
3687 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3688 struct chcr_context *ctx = a_ctx(tfm);
3689 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3690 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3691 int size;
3692 unsigned int cpu;
3693
3694 cpu = get_cpu();
3695 reqctx->txqidx = cpu % ctx->ntxq;
3696 reqctx->rxqidx = cpu % ctx->nrxq;
3697 put_cpu();
3698
3699 if (aeadctx->mayverify == VERIFY_SW) {
3700 size = crypto_aead_maxauthsize(tfm);
3701 reqctx->verify = VERIFY_SW;
3702 } else {
3703 size = 0;
3704 reqctx->verify = VERIFY_HW;
3705 }
3706 reqctx->op = CHCR_DECRYPT_OP;
3707 switch (get_aead_subtype(tfm)) {
3708 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3709 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3710 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3711 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3712 return chcr_aead_op(req, size, create_authenc_wr);
3713 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3714 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3715 return chcr_aead_op(req, size, create_aead_ccm_wr);
3716 default:
3717 return chcr_aead_op(req, size, create_gcm_wr);
3718 }
3719 }
3720
3721 static struct chcr_alg_template driver_algs[] = {
3722 /* AES-CBC */
3723 {
3724 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3725 .is_registered = 0,
3726 .alg.skcipher = {
3727 .base.cra_name = "cbc(aes)",
3728 .base.cra_driver_name = "cbc-aes-chcr",
3729 .base.cra_blocksize = AES_BLOCK_SIZE,
3730
3731 .init = chcr_init_tfm,
3732 .exit = chcr_exit_tfm,
3733 .min_keysize = AES_MIN_KEY_SIZE,
3734 .max_keysize = AES_MAX_KEY_SIZE,
3735 .ivsize = AES_BLOCK_SIZE,
3736 .setkey = chcr_aes_cbc_setkey,
3737 .encrypt = chcr_aes_encrypt,
3738 .decrypt = chcr_aes_decrypt,
3739 }
3740 },
3741 {
3742 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3743 .is_registered = 0,
3744 .alg.skcipher = {
3745 .base.cra_name = "xts(aes)",
3746 .base.cra_driver_name = "xts-aes-chcr",
3747 .base.cra_blocksize = AES_BLOCK_SIZE,
3748
3749 .init = chcr_init_tfm,
3750 .exit = chcr_exit_tfm,
3751 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3752 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3753 .ivsize = AES_BLOCK_SIZE,
3754 .setkey = chcr_aes_xts_setkey,
3755 .encrypt = chcr_aes_encrypt,
3756 .decrypt = chcr_aes_decrypt,
3757 }
3758 },
3759 {
3760 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3761 .is_registered = 0,
3762 .alg.skcipher = {
3763 .base.cra_name = "ctr(aes)",
3764 .base.cra_driver_name = "ctr-aes-chcr",
3765 .base.cra_blocksize = 1,
3766
3767 .init = chcr_init_tfm,
3768 .exit = chcr_exit_tfm,
3769 .min_keysize = AES_MIN_KEY_SIZE,
3770 .max_keysize = AES_MAX_KEY_SIZE,
3771 .ivsize = AES_BLOCK_SIZE,
3772 .setkey = chcr_aes_ctr_setkey,
3773 .encrypt = chcr_aes_encrypt,
3774 .decrypt = chcr_aes_decrypt,
3775 }
3776 },
3777 {
3778 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3779 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3780 .is_registered = 0,
3781 .alg.skcipher = {
3782 .base.cra_name = "rfc3686(ctr(aes))",
3783 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3784 .base.cra_blocksize = 1,
3785
3786 .init = chcr_rfc3686_init,
3787 .exit = chcr_exit_tfm,
3788 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3789 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3790 .ivsize = CTR_RFC3686_IV_SIZE,
3791 .setkey = chcr_aes_rfc3686_setkey,
3792 .encrypt = chcr_aes_encrypt,
3793 .decrypt = chcr_aes_decrypt,
3794 }
3795 },
3796 /* SHA */
3797 {
3798 .type = CRYPTO_ALG_TYPE_AHASH,
3799 .is_registered = 0,
3800 .alg.hash = {
3801 .halg.digestsize = SHA1_DIGEST_SIZE,
3802 .halg.base = {
3803 .cra_name = "sha1",
3804 .cra_driver_name = "sha1-chcr",
3805 .cra_blocksize = SHA1_BLOCK_SIZE,
3806 }
3807 }
3808 },
3809 {
3810 .type = CRYPTO_ALG_TYPE_AHASH,
3811 .is_registered = 0,
3812 .alg.hash = {
3813 .halg.digestsize = SHA256_DIGEST_SIZE,
3814 .halg.base = {
3815 .cra_name = "sha256",
3816 .cra_driver_name = "sha256-chcr",
3817 .cra_blocksize = SHA256_BLOCK_SIZE,
3818 }
3819 }
3820 },
3821 {
3822 .type = CRYPTO_ALG_TYPE_AHASH,
3823 .is_registered = 0,
3824 .alg.hash = {
3825 .halg.digestsize = SHA224_DIGEST_SIZE,
3826 .halg.base = {
3827 .cra_name = "sha224",
3828 .cra_driver_name = "sha224-chcr",
3829 .cra_blocksize = SHA224_BLOCK_SIZE,
3830 }
3831 }
3832 },
3833 {
3834 .type = CRYPTO_ALG_TYPE_AHASH,
3835 .is_registered = 0,
3836 .alg.hash = {
3837 .halg.digestsize = SHA384_DIGEST_SIZE,
3838 .halg.base = {
3839 .cra_name = "sha384",
3840 .cra_driver_name = "sha384-chcr",
3841 .cra_blocksize = SHA384_BLOCK_SIZE,
3842 }
3843 }
3844 },
3845 {
3846 .type = CRYPTO_ALG_TYPE_AHASH,
3847 .is_registered = 0,
3848 .alg.hash = {
3849 .halg.digestsize = SHA512_DIGEST_SIZE,
3850 .halg.base = {
3851 .cra_name = "sha512",
3852 .cra_driver_name = "sha512-chcr",
3853 .cra_blocksize = SHA512_BLOCK_SIZE,
3854 }
3855 }
3856 },
3857 /* HMAC */
3858 {
3859 .type = CRYPTO_ALG_TYPE_HMAC,
3860 .is_registered = 0,
3861 .alg.hash = {
3862 .halg.digestsize = SHA1_DIGEST_SIZE,
3863 .halg.base = {
3864 .cra_name = "hmac(sha1)",
3865 .cra_driver_name = "hmac-sha1-chcr",
3866 .cra_blocksize = SHA1_BLOCK_SIZE,
3867 }
3868 }
3869 },
3870 {
3871 .type = CRYPTO_ALG_TYPE_HMAC,
3872 .is_registered = 0,
3873 .alg.hash = {
3874 .halg.digestsize = SHA224_DIGEST_SIZE,
3875 .halg.base = {
3876 .cra_name = "hmac(sha224)",
3877 .cra_driver_name = "hmac-sha224-chcr",
3878 .cra_blocksize = SHA224_BLOCK_SIZE,
3879 }
3880 }
3881 },
3882 {
3883 .type = CRYPTO_ALG_TYPE_HMAC,
3884 .is_registered = 0,
3885 .alg.hash = {
3886 .halg.digestsize = SHA256_DIGEST_SIZE,
3887 .halg.base = {
3888 .cra_name = "hmac(sha256)",
3889 .cra_driver_name = "hmac-sha256-chcr",
3890 .cra_blocksize = SHA256_BLOCK_SIZE,
3891 }
3892 }
3893 },
3894 {
3895 .type = CRYPTO_ALG_TYPE_HMAC,
3896 .is_registered = 0,
3897 .alg.hash = {
3898 .halg.digestsize = SHA384_DIGEST_SIZE,
3899 .halg.base = {
3900 .cra_name = "hmac(sha384)",
3901 .cra_driver_name = "hmac-sha384-chcr",
3902 .cra_blocksize = SHA384_BLOCK_SIZE,
3903 }
3904 }
3905 },
3906 {
3907 .type = CRYPTO_ALG_TYPE_HMAC,
3908 .is_registered = 0,
3909 .alg.hash = {
3910 .halg.digestsize = SHA512_DIGEST_SIZE,
3911 .halg.base = {
3912 .cra_name = "hmac(sha512)",
3913 .cra_driver_name = "hmac-sha512-chcr",
3914 .cra_blocksize = SHA512_BLOCK_SIZE,
3915 }
3916 }
3917 },
3918 /* Add AEAD Algorithms */
3919 {
3920 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3921 .is_registered = 0,
3922 .alg.aead = {
3923 .base = {
3924 .cra_name = "gcm(aes)",
3925 .cra_driver_name = "gcm-aes-chcr",
3926 .cra_blocksize = 1,
3927 .cra_priority = CHCR_AEAD_PRIORITY,
3928 .cra_ctxsize = sizeof(struct chcr_context) +
3929 sizeof(struct chcr_aead_ctx) +
3930 sizeof(struct chcr_gcm_ctx),
3931 },
3932 .ivsize = GCM_AES_IV_SIZE,
3933 .maxauthsize = GHASH_DIGEST_SIZE,
3934 .setkey = chcr_gcm_setkey,
3935 .setauthsize = chcr_gcm_setauthsize,
3936 }
3937 },
3938 {
3939 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3940 .is_registered = 0,
3941 .alg.aead = {
3942 .base = {
3943 .cra_name = "rfc4106(gcm(aes))",
3944 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3945 .cra_blocksize = 1,
3946 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3947 .cra_ctxsize = sizeof(struct chcr_context) +
3948 sizeof(struct chcr_aead_ctx) +
3949 sizeof(struct chcr_gcm_ctx),
3950
3951 },
3952 .ivsize = GCM_RFC4106_IV_SIZE,
3953 .maxauthsize = GHASH_DIGEST_SIZE,
3954 .setkey = chcr_gcm_setkey,
3955 .setauthsize = chcr_4106_4309_setauthsize,
3956 }
3957 },
3958 {
3959 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3960 .is_registered = 0,
3961 .alg.aead = {
3962 .base = {
3963 .cra_name = "ccm(aes)",
3964 .cra_driver_name = "ccm-aes-chcr",
3965 .cra_blocksize = 1,
3966 .cra_priority = CHCR_AEAD_PRIORITY,
3967 .cra_ctxsize = sizeof(struct chcr_context) +
3968 sizeof(struct chcr_aead_ctx),
3969
3970 },
3971 .ivsize = AES_BLOCK_SIZE,
3972 .maxauthsize = GHASH_DIGEST_SIZE,
3973 .setkey = chcr_aead_ccm_setkey,
3974 .setauthsize = chcr_ccm_setauthsize,
3975 }
3976 },
3977 {
3978 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3979 .is_registered = 0,
3980 .alg.aead = {
3981 .base = {
3982 .cra_name = "rfc4309(ccm(aes))",
3983 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3984 .cra_blocksize = 1,
3985 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3986 .cra_ctxsize = sizeof(struct chcr_context) +
3987 sizeof(struct chcr_aead_ctx),
3988
3989 },
3990 .ivsize = 8,
3991 .maxauthsize = GHASH_DIGEST_SIZE,
3992 .setkey = chcr_aead_rfc4309_setkey,
3993 .setauthsize = chcr_4106_4309_setauthsize,
3994 }
3995 },
3996 {
3997 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3998 .is_registered = 0,
3999 .alg.aead = {
4000 .base = {
4001 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4002 .cra_driver_name =
4003 "authenc-hmac-sha1-cbc-aes-chcr",
4004 .cra_blocksize = AES_BLOCK_SIZE,
4005 .cra_priority = CHCR_AEAD_PRIORITY,
4006 .cra_ctxsize = sizeof(struct chcr_context) +
4007 sizeof(struct chcr_aead_ctx) +
4008 sizeof(struct chcr_authenc_ctx),
4009
4010 },
4011 .ivsize = AES_BLOCK_SIZE,
4012 .maxauthsize = SHA1_DIGEST_SIZE,
4013 .setkey = chcr_authenc_setkey,
4014 .setauthsize = chcr_authenc_setauthsize,
4015 }
4016 },
4017 {
4018 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4019 .is_registered = 0,
4020 .alg.aead = {
4021 .base = {
4022
4023 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4024 .cra_driver_name =
4025 "authenc-hmac-sha256-cbc-aes-chcr",
4026 .cra_blocksize = AES_BLOCK_SIZE,
4027 .cra_priority = CHCR_AEAD_PRIORITY,
4028 .cra_ctxsize = sizeof(struct chcr_context) +
4029 sizeof(struct chcr_aead_ctx) +
4030 sizeof(struct chcr_authenc_ctx),
4031
4032 },
4033 .ivsize = AES_BLOCK_SIZE,
4034 .maxauthsize = SHA256_DIGEST_SIZE,
4035 .setkey = chcr_authenc_setkey,
4036 .setauthsize = chcr_authenc_setauthsize,
4037 }
4038 },
4039 {
4040 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4041 .is_registered = 0,
4042 .alg.aead = {
4043 .base = {
4044 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4045 .cra_driver_name =
4046 "authenc-hmac-sha224-cbc-aes-chcr",
4047 .cra_blocksize = AES_BLOCK_SIZE,
4048 .cra_priority = CHCR_AEAD_PRIORITY,
4049 .cra_ctxsize = sizeof(struct chcr_context) +
4050 sizeof(struct chcr_aead_ctx) +
4051 sizeof(struct chcr_authenc_ctx),
4052 },
4053 .ivsize = AES_BLOCK_SIZE,
4054 .maxauthsize = SHA224_DIGEST_SIZE,
4055 .setkey = chcr_authenc_setkey,
4056 .setauthsize = chcr_authenc_setauthsize,
4057 }
4058 },
4059 {
4060 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4061 .is_registered = 0,
4062 .alg.aead = {
4063 .base = {
4064 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4065 .cra_driver_name =
4066 "authenc-hmac-sha384-cbc-aes-chcr",
4067 .cra_blocksize = AES_BLOCK_SIZE,
4068 .cra_priority = CHCR_AEAD_PRIORITY,
4069 .cra_ctxsize = sizeof(struct chcr_context) +
4070 sizeof(struct chcr_aead_ctx) +
4071 sizeof(struct chcr_authenc_ctx),
4072
4073 },
4074 .ivsize = AES_BLOCK_SIZE,
4075 .maxauthsize = SHA384_DIGEST_SIZE,
4076 .setkey = chcr_authenc_setkey,
4077 .setauthsize = chcr_authenc_setauthsize,
4078 }
4079 },
4080 {
4081 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4082 .is_registered = 0,
4083 .alg.aead = {
4084 .base = {
4085 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4086 .cra_driver_name =
4087 "authenc-hmac-sha512-cbc-aes-chcr",
4088 .cra_blocksize = AES_BLOCK_SIZE,
4089 .cra_priority = CHCR_AEAD_PRIORITY,
4090 .cra_ctxsize = sizeof(struct chcr_context) +
4091 sizeof(struct chcr_aead_ctx) +
4092 sizeof(struct chcr_authenc_ctx),
4093
4094 },
4095 .ivsize = AES_BLOCK_SIZE,
4096 .maxauthsize = SHA512_DIGEST_SIZE,
4097 .setkey = chcr_authenc_setkey,
4098 .setauthsize = chcr_authenc_setauthsize,
4099 }
4100 },
4101 {
4102 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4103 .is_registered = 0,
4104 .alg.aead = {
4105 .base = {
4106 .cra_name = "authenc(digest_null,cbc(aes))",
4107 .cra_driver_name =
4108 "authenc-digest_null-cbc-aes-chcr",
4109 .cra_blocksize = AES_BLOCK_SIZE,
4110 .cra_priority = CHCR_AEAD_PRIORITY,
4111 .cra_ctxsize = sizeof(struct chcr_context) +
4112 sizeof(struct chcr_aead_ctx) +
4113 sizeof(struct chcr_authenc_ctx),
4114
4115 },
4116 .ivsize = AES_BLOCK_SIZE,
4117 .maxauthsize = 0,
4118 .setkey = chcr_aead_digest_null_setkey,
4119 .setauthsize = chcr_authenc_null_setauthsize,
4120 }
4121 },
4122 {
4123 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4124 .is_registered = 0,
4125 .alg.aead = {
4126 .base = {
4127 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4128 .cra_driver_name =
4129 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4130 .cra_blocksize = 1,
4131 .cra_priority = CHCR_AEAD_PRIORITY,
4132 .cra_ctxsize = sizeof(struct chcr_context) +
4133 sizeof(struct chcr_aead_ctx) +
4134 sizeof(struct chcr_authenc_ctx),
4135
4136 },
4137 .ivsize = CTR_RFC3686_IV_SIZE,
4138 .maxauthsize = SHA1_DIGEST_SIZE,
4139 .setkey = chcr_authenc_setkey,
4140 .setauthsize = chcr_authenc_setauthsize,
4141 }
4142 },
4143 {
4144 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4145 .is_registered = 0,
4146 .alg.aead = {
4147 .base = {
4148
4149 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4150 .cra_driver_name =
4151 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4152 .cra_blocksize = 1,
4153 .cra_priority = CHCR_AEAD_PRIORITY,
4154 .cra_ctxsize = sizeof(struct chcr_context) +
4155 sizeof(struct chcr_aead_ctx) +
4156 sizeof(struct chcr_authenc_ctx),
4157
4158 },
4159 .ivsize = CTR_RFC3686_IV_SIZE,
4160 .maxauthsize = SHA256_DIGEST_SIZE,
4161 .setkey = chcr_authenc_setkey,
4162 .setauthsize = chcr_authenc_setauthsize,
4163 }
4164 },
4165 {
4166 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4167 .is_registered = 0,
4168 .alg.aead = {
4169 .base = {
4170 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4171 .cra_driver_name =
4172 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4173 .cra_blocksize = 1,
4174 .cra_priority = CHCR_AEAD_PRIORITY,
4175 .cra_ctxsize = sizeof(struct chcr_context) +
4176 sizeof(struct chcr_aead_ctx) +
4177 sizeof(struct chcr_authenc_ctx),
4178 },
4179 .ivsize = CTR_RFC3686_IV_SIZE,
4180 .maxauthsize = SHA224_DIGEST_SIZE,
4181 .setkey = chcr_authenc_setkey,
4182 .setauthsize = chcr_authenc_setauthsize,
4183 }
4184 },
4185 {
4186 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4187 .is_registered = 0,
4188 .alg.aead = {
4189 .base = {
4190 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4191 .cra_driver_name =
4192 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4193 .cra_blocksize = 1,
4194 .cra_priority = CHCR_AEAD_PRIORITY,
4195 .cra_ctxsize = sizeof(struct chcr_context) +
4196 sizeof(struct chcr_aead_ctx) +
4197 sizeof(struct chcr_authenc_ctx),
4198
4199 },
4200 .ivsize = CTR_RFC3686_IV_SIZE,
4201 .maxauthsize = SHA384_DIGEST_SIZE,
4202 .setkey = chcr_authenc_setkey,
4203 .setauthsize = chcr_authenc_setauthsize,
4204 }
4205 },
4206 {
4207 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4208 .is_registered = 0,
4209 .alg.aead = {
4210 .base = {
4211 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4212 .cra_driver_name =
4213 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4214 .cra_blocksize = 1,
4215 .cra_priority = CHCR_AEAD_PRIORITY,
4216 .cra_ctxsize = sizeof(struct chcr_context) +
4217 sizeof(struct chcr_aead_ctx) +
4218 sizeof(struct chcr_authenc_ctx),
4219
4220 },
4221 .ivsize = CTR_RFC3686_IV_SIZE,
4222 .maxauthsize = SHA512_DIGEST_SIZE,
4223 .setkey = chcr_authenc_setkey,
4224 .setauthsize = chcr_authenc_setauthsize,
4225 }
4226 },
4227 {
4228 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4229 .is_registered = 0,
4230 .alg.aead = {
4231 .base = {
4232 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4233 .cra_driver_name =
4234 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4235 .cra_blocksize = 1,
4236 .cra_priority = CHCR_AEAD_PRIORITY,
4237 .cra_ctxsize = sizeof(struct chcr_context) +
4238 sizeof(struct chcr_aead_ctx) +
4239 sizeof(struct chcr_authenc_ctx),
4240
4241 },
4242 .ivsize = CTR_RFC3686_IV_SIZE,
4243 .maxauthsize = 0,
4244 .setkey = chcr_aead_digest_null_setkey,
4245 .setauthsize = chcr_authenc_null_setauthsize,
4246 }
4247 },
4248 };
4249
4250 /*
4251 * chcr_unregister_alg - Deregister crypto algorithms with
4252 * kernel framework.
4253 */
chcr_unregister_alg(void)4254 static int chcr_unregister_alg(void)
4255 {
4256 int i;
4257
4258 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4259 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4260 case CRYPTO_ALG_TYPE_SKCIPHER:
4261 if (driver_algs[i].is_registered && refcount_read(
4262 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4263 == 1) {
4264 crypto_unregister_skcipher(
4265 &driver_algs[i].alg.skcipher);
4266 driver_algs[i].is_registered = 0;
4267 }
4268 break;
4269 case CRYPTO_ALG_TYPE_AEAD:
4270 if (driver_algs[i].is_registered && refcount_read(
4271 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4272 crypto_unregister_aead(
4273 &driver_algs[i].alg.aead);
4274 driver_algs[i].is_registered = 0;
4275 }
4276 break;
4277 case CRYPTO_ALG_TYPE_AHASH:
4278 if (driver_algs[i].is_registered && refcount_read(
4279 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4280 == 1) {
4281 crypto_unregister_ahash(
4282 &driver_algs[i].alg.hash);
4283 driver_algs[i].is_registered = 0;
4284 }
4285 break;
4286 }
4287 }
4288 return 0;
4289 }
4290
4291 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4292 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4293 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4294
4295 /*
4296 * chcr_register_alg - Register crypto algorithms with kernel framework.
4297 */
chcr_register_alg(void)4298 static int chcr_register_alg(void)
4299 {
4300 struct crypto_alg ai;
4301 struct ahash_alg *a_hash;
4302 int err = 0, i;
4303 char *name = NULL;
4304
4305 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4306 if (driver_algs[i].is_registered)
4307 continue;
4308 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4309 case CRYPTO_ALG_TYPE_SKCIPHER:
4310 driver_algs[i].alg.skcipher.base.cra_priority =
4311 CHCR_CRA_PRIORITY;
4312 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4313 driver_algs[i].alg.skcipher.base.cra_flags =
4314 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4315 CRYPTO_ALG_ALLOCATES_MEMORY |
4316 CRYPTO_ALG_NEED_FALLBACK;
4317 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4318 sizeof(struct chcr_context) +
4319 sizeof(struct ablk_ctx);
4320 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4321
4322 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4323 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4324 break;
4325 case CRYPTO_ALG_TYPE_AEAD:
4326 driver_algs[i].alg.aead.base.cra_flags =
4327 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4328 CRYPTO_ALG_ALLOCATES_MEMORY;
4329 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4330 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4331 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4332 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4333 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4334 err = crypto_register_aead(&driver_algs[i].alg.aead);
4335 name = driver_algs[i].alg.aead.base.cra_driver_name;
4336 break;
4337 case CRYPTO_ALG_TYPE_AHASH:
4338 a_hash = &driver_algs[i].alg.hash;
4339 a_hash->update = chcr_ahash_update;
4340 a_hash->final = chcr_ahash_final;
4341 a_hash->finup = chcr_ahash_finup;
4342 a_hash->digest = chcr_ahash_digest;
4343 a_hash->export = chcr_ahash_export;
4344 a_hash->import = chcr_ahash_import;
4345 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4346 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4347 a_hash->halg.base.cra_module = THIS_MODULE;
4348 a_hash->halg.base.cra_flags =
4349 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4350 a_hash->halg.base.cra_alignmask = 0;
4351 a_hash->halg.base.cra_exit = NULL;
4352
4353 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4354 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4355 a_hash->init = chcr_hmac_init;
4356 a_hash->setkey = chcr_ahash_setkey;
4357 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4358 } else {
4359 a_hash->init = chcr_sha_init;
4360 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4361 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4362 }
4363 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4364 ai = driver_algs[i].alg.hash.halg.base;
4365 name = ai.cra_driver_name;
4366 break;
4367 }
4368 if (err) {
4369 pr_err("%s : Algorithm registration failed\n", name);
4370 goto register_err;
4371 } else {
4372 driver_algs[i].is_registered = 1;
4373 }
4374 }
4375 return 0;
4376
4377 register_err:
4378 chcr_unregister_alg();
4379 return err;
4380 }
4381
4382 /*
4383 * start_crypto - Register the crypto algorithms.
4384 * This should called once when the first device comesup. After this
4385 * kernel will start calling driver APIs for crypto operations.
4386 */
start_crypto(void)4387 int start_crypto(void)
4388 {
4389 return chcr_register_alg();
4390 }
4391
4392 /*
4393 * stop_crypto - Deregister all the crypto algorithms with kernel.
4394 * This should be called once when the last device goes down. After this
4395 * kernel will not call the driver API for crypto operations.
4396 */
stop_crypto(void)4397 int stop_crypto(void)
4398 {
4399 chcr_unregister_alg();
4400 return 0;
4401 }
4402