1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for crypto API
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Copyright 2016-2019, 2023 NXP
7 *
8 * Based on talitos crypto API driver.
9 *
10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
11 *
12 * --------------- ---------------
13 * | JobDesc #1 |-------------------->| ShareDesc |
14 * | *(packet 1) | | (PDB) |
15 * --------------- |------------->| (hashKey) |
16 * . | | (cipherKey) |
17 * . | |-------->| (operation) |
18 * --------------- | | ---------------
19 * | JobDesc #2 |------| |
20 * | *(packet 2) | |
21 * --------------- |
22 * . |
23 * . |
24 * --------------- |
25 * | JobDesc #3 |------------
26 * | *(packet 3) |
27 * ---------------
28 *
29 * The SharedDesc never changes for a connection unless rekeyed, but
30 * each packet will likely be in a different place. So all we need
31 * to know to process the packet is where the input is, where the
32 * output goes, and what context we want to process with. Context is
33 * in the SharedDesc, packet references in the JobDesc.
34 *
35 * So, a job desc looks like:
36 *
37 * ---------------------
38 * | Header |
39 * | ShareDesc Pointer |
40 * | SEQ_OUT_PTR |
41 * | (output buffer) |
42 * | (output length) |
43 * | SEQ_IN_PTR |
44 * | (input buffer) |
45 * | (input length) |
46 * ---------------------
47 */
48
49 #include "compat.h"
50
51 #include "regs.h"
52 #include "intern.h"
53 #include "desc_constr.h"
54 #include "jr.h"
55 #include "error.h"
56 #include "sg_sw_sec4.h"
57 #include "key_gen.h"
58 #include "caamalg_desc.h"
59 #include <asm/unaligned.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/internal/engine.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/xts.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/device.h>
66 #include <linux/err.h>
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/slab.h>
70 #include <linux/string.h>
71
72 /*
73 * crypto alg
74 */
75 #define CAAM_CRA_PRIORITY 3000
76 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
77 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
78 CTR_RFC3686_NONCE_SIZE + \
79 SHA512_DIGEST_SIZE * 2)
80
81 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
82 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
83 CAAM_CMD_SZ * 4)
84 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
85 CAAM_CMD_SZ * 5)
86
87 #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
88
89 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
90 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
91
92 struct caam_alg_entry {
93 int class1_alg_type;
94 int class2_alg_type;
95 bool rfc3686;
96 bool geniv;
97 bool nodkp;
98 };
99
100 struct caam_aead_alg {
101 struct aead_engine_alg aead;
102 struct caam_alg_entry caam;
103 bool registered;
104 };
105
106 struct caam_skcipher_alg {
107 struct skcipher_engine_alg skcipher;
108 struct caam_alg_entry caam;
109 bool registered;
110 };
111
112 /*
113 * per-session context
114 */
115 struct caam_ctx {
116 u32 sh_desc_enc[DESC_MAX_USED_LEN];
117 u32 sh_desc_dec[DESC_MAX_USED_LEN];
118 u8 key[CAAM_MAX_KEY_SIZE];
119 dma_addr_t sh_desc_enc_dma;
120 dma_addr_t sh_desc_dec_dma;
121 dma_addr_t key_dma;
122 enum dma_data_direction dir;
123 struct device *jrdev;
124 struct alginfo adata;
125 struct alginfo cdata;
126 unsigned int authsize;
127 bool xts_key_fallback;
128 struct crypto_skcipher *fallback;
129 };
130
131 struct caam_skcipher_req_ctx {
132 struct skcipher_edesc *edesc;
133 struct skcipher_request fallback_req;
134 };
135
136 struct caam_aead_req_ctx {
137 struct aead_edesc *edesc;
138 };
139
aead_null_set_sh_desc(struct crypto_aead * aead)140 static int aead_null_set_sh_desc(struct crypto_aead *aead)
141 {
142 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
143 struct device *jrdev = ctx->jrdev;
144 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
145 u32 *desc;
146 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
147 ctx->adata.keylen_pad;
148
149 /*
150 * Job Descriptor and Shared Descriptors
151 * must all fit into the 64-word Descriptor h/w Buffer
152 */
153 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
154 ctx->adata.key_inline = true;
155 ctx->adata.key_virt = ctx->key;
156 } else {
157 ctx->adata.key_inline = false;
158 ctx->adata.key_dma = ctx->key_dma;
159 }
160
161 /* aead_encrypt shared descriptor */
162 desc = ctx->sh_desc_enc;
163 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
164 ctrlpriv->era);
165 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
166 desc_bytes(desc), ctx->dir);
167
168 /*
169 * Job Descriptor and Shared Descriptors
170 * must all fit into the 64-word Descriptor h/w Buffer
171 */
172 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
173 ctx->adata.key_inline = true;
174 ctx->adata.key_virt = ctx->key;
175 } else {
176 ctx->adata.key_inline = false;
177 ctx->adata.key_dma = ctx->key_dma;
178 }
179
180 /* aead_decrypt shared descriptor */
181 desc = ctx->sh_desc_dec;
182 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
183 ctrlpriv->era);
184 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
185 desc_bytes(desc), ctx->dir);
186
187 return 0;
188 }
189
aead_set_sh_desc(struct crypto_aead * aead)190 static int aead_set_sh_desc(struct crypto_aead *aead)
191 {
192 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
193 struct caam_aead_alg,
194 aead.base);
195 unsigned int ivsize = crypto_aead_ivsize(aead);
196 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
197 struct device *jrdev = ctx->jrdev;
198 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
199 u32 ctx1_iv_off = 0;
200 u32 *desc, *nonce = NULL;
201 u32 inl_mask;
202 unsigned int data_len[2];
203 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
204 OP_ALG_AAI_CTR_MOD128);
205 const bool is_rfc3686 = alg->caam.rfc3686;
206
207 if (!ctx->authsize)
208 return 0;
209
210 /* NULL encryption / decryption */
211 if (!ctx->cdata.keylen)
212 return aead_null_set_sh_desc(aead);
213
214 /*
215 * AES-CTR needs to load IV in CONTEXT1 reg
216 * at an offset of 128bits (16bytes)
217 * CONTEXT1[255:128] = IV
218 */
219 if (ctr_mode)
220 ctx1_iv_off = 16;
221
222 /*
223 * RFC3686 specific:
224 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
225 */
226 if (is_rfc3686) {
227 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
228 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
229 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
230 }
231
232 /*
233 * In case |user key| > |derived key|, using DKP<imm,imm>
234 * would result in invalid opcodes (last bytes of user key) in
235 * the resulting descriptor. Use DKP<ptr,imm> instead => both
236 * virtual and dma key addresses are needed.
237 */
238 ctx->adata.key_virt = ctx->key;
239 ctx->adata.key_dma = ctx->key_dma;
240
241 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
242 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
243
244 data_len[0] = ctx->adata.keylen_pad;
245 data_len[1] = ctx->cdata.keylen;
246
247 if (alg->caam.geniv)
248 goto skip_enc;
249
250 /*
251 * Job Descriptor and Shared Descriptors
252 * must all fit into the 64-word Descriptor h/w Buffer
253 */
254 if (desc_inline_query(DESC_AEAD_ENC_LEN +
255 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
256 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
257 ARRAY_SIZE(data_len)) < 0)
258 return -EINVAL;
259
260 ctx->adata.key_inline = !!(inl_mask & 1);
261 ctx->cdata.key_inline = !!(inl_mask & 2);
262
263 /* aead_encrypt shared descriptor */
264 desc = ctx->sh_desc_enc;
265 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
266 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
267 false, ctrlpriv->era);
268 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
269 desc_bytes(desc), ctx->dir);
270
271 skip_enc:
272 /*
273 * Job Descriptor and Shared Descriptors
274 * must all fit into the 64-word Descriptor h/w Buffer
275 */
276 if (desc_inline_query(DESC_AEAD_DEC_LEN +
277 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
278 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
279 ARRAY_SIZE(data_len)) < 0)
280 return -EINVAL;
281
282 ctx->adata.key_inline = !!(inl_mask & 1);
283 ctx->cdata.key_inline = !!(inl_mask & 2);
284
285 /* aead_decrypt shared descriptor */
286 desc = ctx->sh_desc_dec;
287 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
288 ctx->authsize, alg->caam.geniv, is_rfc3686,
289 nonce, ctx1_iv_off, false, ctrlpriv->era);
290 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
291 desc_bytes(desc), ctx->dir);
292
293 if (!alg->caam.geniv)
294 goto skip_givenc;
295
296 /*
297 * Job Descriptor and Shared Descriptors
298 * must all fit into the 64-word Descriptor h/w Buffer
299 */
300 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
301 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
302 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
303 ARRAY_SIZE(data_len)) < 0)
304 return -EINVAL;
305
306 ctx->adata.key_inline = !!(inl_mask & 1);
307 ctx->cdata.key_inline = !!(inl_mask & 2);
308
309 /* aead_givencrypt shared descriptor */
310 desc = ctx->sh_desc_enc;
311 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
312 ctx->authsize, is_rfc3686, nonce,
313 ctx1_iv_off, false, ctrlpriv->era);
314 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
315 desc_bytes(desc), ctx->dir);
316
317 skip_givenc:
318 return 0;
319 }
320
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)321 static int aead_setauthsize(struct crypto_aead *authenc,
322 unsigned int authsize)
323 {
324 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
325
326 ctx->authsize = authsize;
327 aead_set_sh_desc(authenc);
328
329 return 0;
330 }
331
gcm_set_sh_desc(struct crypto_aead * aead)332 static int gcm_set_sh_desc(struct crypto_aead *aead)
333 {
334 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
335 struct device *jrdev = ctx->jrdev;
336 unsigned int ivsize = crypto_aead_ivsize(aead);
337 u32 *desc;
338 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
339 ctx->cdata.keylen;
340
341 if (!ctx->cdata.keylen || !ctx->authsize)
342 return 0;
343
344 /*
345 * AES GCM encrypt shared descriptor
346 * Job Descriptor and Shared Descriptor
347 * must fit into the 64-word Descriptor h/w Buffer
348 */
349 if (rem_bytes >= DESC_GCM_ENC_LEN) {
350 ctx->cdata.key_inline = true;
351 ctx->cdata.key_virt = ctx->key;
352 } else {
353 ctx->cdata.key_inline = false;
354 ctx->cdata.key_dma = ctx->key_dma;
355 }
356
357 desc = ctx->sh_desc_enc;
358 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
359 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
360 desc_bytes(desc), ctx->dir);
361
362 /*
363 * Job Descriptor and Shared Descriptors
364 * must all fit into the 64-word Descriptor h/w Buffer
365 */
366 if (rem_bytes >= DESC_GCM_DEC_LEN) {
367 ctx->cdata.key_inline = true;
368 ctx->cdata.key_virt = ctx->key;
369 } else {
370 ctx->cdata.key_inline = false;
371 ctx->cdata.key_dma = ctx->key_dma;
372 }
373
374 desc = ctx->sh_desc_dec;
375 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
376 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
377 desc_bytes(desc), ctx->dir);
378
379 return 0;
380 }
381
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)382 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
383 {
384 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
385 int err;
386
387 err = crypto_gcm_check_authsize(authsize);
388 if (err)
389 return err;
390
391 ctx->authsize = authsize;
392 gcm_set_sh_desc(authenc);
393
394 return 0;
395 }
396
rfc4106_set_sh_desc(struct crypto_aead * aead)397 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
398 {
399 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
400 struct device *jrdev = ctx->jrdev;
401 unsigned int ivsize = crypto_aead_ivsize(aead);
402 u32 *desc;
403 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
404 ctx->cdata.keylen;
405
406 if (!ctx->cdata.keylen || !ctx->authsize)
407 return 0;
408
409 /*
410 * RFC4106 encrypt shared descriptor
411 * Job Descriptor and Shared Descriptor
412 * must fit into the 64-word Descriptor h/w Buffer
413 */
414 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
415 ctx->cdata.key_inline = true;
416 ctx->cdata.key_virt = ctx->key;
417 } else {
418 ctx->cdata.key_inline = false;
419 ctx->cdata.key_dma = ctx->key_dma;
420 }
421
422 desc = ctx->sh_desc_enc;
423 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
424 false);
425 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
426 desc_bytes(desc), ctx->dir);
427
428 /*
429 * Job Descriptor and Shared Descriptors
430 * must all fit into the 64-word Descriptor h/w Buffer
431 */
432 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
433 ctx->cdata.key_inline = true;
434 ctx->cdata.key_virt = ctx->key;
435 } else {
436 ctx->cdata.key_inline = false;
437 ctx->cdata.key_dma = ctx->key_dma;
438 }
439
440 desc = ctx->sh_desc_dec;
441 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
442 false);
443 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
444 desc_bytes(desc), ctx->dir);
445
446 return 0;
447 }
448
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)449 static int rfc4106_setauthsize(struct crypto_aead *authenc,
450 unsigned int authsize)
451 {
452 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
453 int err;
454
455 err = crypto_rfc4106_check_authsize(authsize);
456 if (err)
457 return err;
458
459 ctx->authsize = authsize;
460 rfc4106_set_sh_desc(authenc);
461
462 return 0;
463 }
464
rfc4543_set_sh_desc(struct crypto_aead * aead)465 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
466 {
467 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
468 struct device *jrdev = ctx->jrdev;
469 unsigned int ivsize = crypto_aead_ivsize(aead);
470 u32 *desc;
471 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
472 ctx->cdata.keylen;
473
474 if (!ctx->cdata.keylen || !ctx->authsize)
475 return 0;
476
477 /*
478 * RFC4543 encrypt shared descriptor
479 * Job Descriptor and Shared Descriptor
480 * must fit into the 64-word Descriptor h/w Buffer
481 */
482 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
483 ctx->cdata.key_inline = true;
484 ctx->cdata.key_virt = ctx->key;
485 } else {
486 ctx->cdata.key_inline = false;
487 ctx->cdata.key_dma = ctx->key_dma;
488 }
489
490 desc = ctx->sh_desc_enc;
491 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
492 false);
493 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
494 desc_bytes(desc), ctx->dir);
495
496 /*
497 * Job Descriptor and Shared Descriptors
498 * must all fit into the 64-word Descriptor h/w Buffer
499 */
500 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
501 ctx->cdata.key_inline = true;
502 ctx->cdata.key_virt = ctx->key;
503 } else {
504 ctx->cdata.key_inline = false;
505 ctx->cdata.key_dma = ctx->key_dma;
506 }
507
508 desc = ctx->sh_desc_dec;
509 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
510 false);
511 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
512 desc_bytes(desc), ctx->dir);
513
514 return 0;
515 }
516
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)517 static int rfc4543_setauthsize(struct crypto_aead *authenc,
518 unsigned int authsize)
519 {
520 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
521
522 if (authsize != 16)
523 return -EINVAL;
524
525 ctx->authsize = authsize;
526 rfc4543_set_sh_desc(authenc);
527
528 return 0;
529 }
530
chachapoly_set_sh_desc(struct crypto_aead * aead)531 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
532 {
533 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
534 struct device *jrdev = ctx->jrdev;
535 unsigned int ivsize = crypto_aead_ivsize(aead);
536 u32 *desc;
537
538 if (!ctx->cdata.keylen || !ctx->authsize)
539 return 0;
540
541 desc = ctx->sh_desc_enc;
542 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
543 ctx->authsize, true, false);
544 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
545 desc_bytes(desc), ctx->dir);
546
547 desc = ctx->sh_desc_dec;
548 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
549 ctx->authsize, false, false);
550 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
551 desc_bytes(desc), ctx->dir);
552
553 return 0;
554 }
555
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)556 static int chachapoly_setauthsize(struct crypto_aead *aead,
557 unsigned int authsize)
558 {
559 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
560
561 if (authsize != POLY1305_DIGEST_SIZE)
562 return -EINVAL;
563
564 ctx->authsize = authsize;
565 return chachapoly_set_sh_desc(aead);
566 }
567
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)568 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
569 unsigned int keylen)
570 {
571 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
572 unsigned int ivsize = crypto_aead_ivsize(aead);
573 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
574
575 if (keylen != CHACHA_KEY_SIZE + saltlen)
576 return -EINVAL;
577
578 ctx->cdata.key_virt = key;
579 ctx->cdata.keylen = keylen - saltlen;
580
581 return chachapoly_set_sh_desc(aead);
582 }
583
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)584 static int aead_setkey(struct crypto_aead *aead,
585 const u8 *key, unsigned int keylen)
586 {
587 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
588 struct device *jrdev = ctx->jrdev;
589 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
590 struct crypto_authenc_keys keys;
591 int ret = 0;
592
593 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
594 goto badkey;
595
596 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
597 keys.authkeylen + keys.enckeylen, keys.enckeylen,
598 keys.authkeylen);
599 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
600 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
601
602 /*
603 * If DKP is supported, use it in the shared descriptor to generate
604 * the split key.
605 */
606 if (ctrlpriv->era >= 6) {
607 ctx->adata.keylen = keys.authkeylen;
608 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
609 OP_ALG_ALGSEL_MASK);
610
611 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
612 goto badkey;
613
614 memcpy(ctx->key, keys.authkey, keys.authkeylen);
615 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
616 keys.enckeylen);
617 dma_sync_single_for_device(jrdev, ctx->key_dma,
618 ctx->adata.keylen_pad +
619 keys.enckeylen, ctx->dir);
620 goto skip_split_key;
621 }
622
623 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
624 keys.authkeylen, CAAM_MAX_KEY_SIZE -
625 keys.enckeylen);
626 if (ret) {
627 goto badkey;
628 }
629
630 /* postpend encryption key to auth split key */
631 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
632 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
633 keys.enckeylen, ctx->dir);
634
635 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
636 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
637 ctx->adata.keylen_pad + keys.enckeylen, 1);
638
639 skip_split_key:
640 ctx->cdata.keylen = keys.enckeylen;
641 memzero_explicit(&keys, sizeof(keys));
642 return aead_set_sh_desc(aead);
643 badkey:
644 memzero_explicit(&keys, sizeof(keys));
645 return -EINVAL;
646 }
647
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)648 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
649 unsigned int keylen)
650 {
651 struct crypto_authenc_keys keys;
652 int err;
653
654 err = crypto_authenc_extractkeys(&keys, key, keylen);
655 if (unlikely(err))
656 return err;
657
658 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
659 aead_setkey(aead, key, keylen);
660
661 memzero_explicit(&keys, sizeof(keys));
662 return err;
663 }
664
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)665 static int gcm_setkey(struct crypto_aead *aead,
666 const u8 *key, unsigned int keylen)
667 {
668 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
669 struct device *jrdev = ctx->jrdev;
670 int err;
671
672 err = aes_check_keylen(keylen);
673 if (err)
674 return err;
675
676 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
677 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
678
679 memcpy(ctx->key, key, keylen);
680 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
681 ctx->cdata.keylen = keylen;
682
683 return gcm_set_sh_desc(aead);
684 }
685
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)686 static int rfc4106_setkey(struct crypto_aead *aead,
687 const u8 *key, unsigned int keylen)
688 {
689 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
690 struct device *jrdev = ctx->jrdev;
691 int err;
692
693 err = aes_check_keylen(keylen - 4);
694 if (err)
695 return err;
696
697 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
698 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
699
700 memcpy(ctx->key, key, keylen);
701
702 /*
703 * The last four bytes of the key material are used as the salt value
704 * in the nonce. Update the AES key length.
705 */
706 ctx->cdata.keylen = keylen - 4;
707 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
708 ctx->dir);
709 return rfc4106_set_sh_desc(aead);
710 }
711
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)712 static int rfc4543_setkey(struct crypto_aead *aead,
713 const u8 *key, unsigned int keylen)
714 {
715 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
716 struct device *jrdev = ctx->jrdev;
717 int err;
718
719 err = aes_check_keylen(keylen - 4);
720 if (err)
721 return err;
722
723 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
724 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
725
726 memcpy(ctx->key, key, keylen);
727
728 /*
729 * The last four bytes of the key material are used as the salt value
730 * in the nonce. Update the AES key length.
731 */
732 ctx->cdata.keylen = keylen - 4;
733 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
734 ctx->dir);
735 return rfc4543_set_sh_desc(aead);
736 }
737
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)738 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
739 unsigned int keylen, const u32 ctx1_iv_off)
740 {
741 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
742 struct caam_skcipher_alg *alg =
743 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
744 skcipher.base);
745 struct device *jrdev = ctx->jrdev;
746 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
747 u32 *desc;
748 const bool is_rfc3686 = alg->caam.rfc3686;
749
750 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
751 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
752
753 ctx->cdata.keylen = keylen;
754 ctx->cdata.key_virt = key;
755 ctx->cdata.key_inline = true;
756
757 /* skcipher_encrypt shared descriptor */
758 desc = ctx->sh_desc_enc;
759 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
760 ctx1_iv_off);
761 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
762 desc_bytes(desc), ctx->dir);
763
764 /* skcipher_decrypt shared descriptor */
765 desc = ctx->sh_desc_dec;
766 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
767 ctx1_iv_off);
768 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
769 desc_bytes(desc), ctx->dir);
770
771 return 0;
772 }
773
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)774 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
775 const u8 *key, unsigned int keylen)
776 {
777 int err;
778
779 err = aes_check_keylen(keylen);
780 if (err)
781 return err;
782
783 return skcipher_setkey(skcipher, key, keylen, 0);
784 }
785
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)786 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
787 const u8 *key, unsigned int keylen)
788 {
789 u32 ctx1_iv_off;
790 int err;
791
792 /*
793 * RFC3686 specific:
794 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
795 * | *key = {KEY, NONCE}
796 */
797 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
798 keylen -= CTR_RFC3686_NONCE_SIZE;
799
800 err = aes_check_keylen(keylen);
801 if (err)
802 return err;
803
804 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
805 }
806
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)807 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
808 const u8 *key, unsigned int keylen)
809 {
810 u32 ctx1_iv_off;
811 int err;
812
813 /*
814 * AES-CTR needs to load IV in CONTEXT1 reg
815 * at an offset of 128bits (16bytes)
816 * CONTEXT1[255:128] = IV
817 */
818 ctx1_iv_off = 16;
819
820 err = aes_check_keylen(keylen);
821 if (err)
822 return err;
823
824 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
825 }
826
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)827 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
828 const u8 *key, unsigned int keylen)
829 {
830 return verify_skcipher_des_key(skcipher, key) ?:
831 skcipher_setkey(skcipher, key, keylen, 0);
832 }
833
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)834 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
835 const u8 *key, unsigned int keylen)
836 {
837 return verify_skcipher_des3_key(skcipher, key) ?:
838 skcipher_setkey(skcipher, key, keylen, 0);
839 }
840
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)841 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
842 unsigned int keylen)
843 {
844 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
845 struct device *jrdev = ctx->jrdev;
846 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
847 u32 *desc;
848 int err;
849
850 err = xts_verify_key(skcipher, key, keylen);
851 if (err) {
852 dev_dbg(jrdev, "key size mismatch\n");
853 return err;
854 }
855
856 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
857 ctx->xts_key_fallback = true;
858
859 if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
860 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
861 if (err)
862 return err;
863 }
864
865 ctx->cdata.keylen = keylen;
866 ctx->cdata.key_virt = key;
867 ctx->cdata.key_inline = true;
868
869 /* xts_skcipher_encrypt shared descriptor */
870 desc = ctx->sh_desc_enc;
871 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
872 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
873 desc_bytes(desc), ctx->dir);
874
875 /* xts_skcipher_decrypt shared descriptor */
876 desc = ctx->sh_desc_dec;
877 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
878 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
879 desc_bytes(desc), ctx->dir);
880
881 return 0;
882 }
883
884 /*
885 * aead_edesc - s/w-extended aead descriptor
886 * @src_nents: number of segments in input s/w scatterlist
887 * @dst_nents: number of segments in output s/w scatterlist
888 * @mapped_src_nents: number of segments in input h/w link table
889 * @mapped_dst_nents: number of segments in output h/w link table
890 * @sec4_sg_bytes: length of dma mapped sec4_sg space
891 * @bklog: stored to determine if the request needs backlog
892 * @sec4_sg_dma: bus physical mapped address of h/w link table
893 * @sec4_sg: pointer to h/w link table
894 * @hw_desc: the h/w job descriptor followed by any referenced link tables
895 */
896 struct aead_edesc {
897 int src_nents;
898 int dst_nents;
899 int mapped_src_nents;
900 int mapped_dst_nents;
901 int sec4_sg_bytes;
902 bool bklog;
903 dma_addr_t sec4_sg_dma;
904 struct sec4_sg_entry *sec4_sg;
905 u32 hw_desc[];
906 };
907
908 /*
909 * skcipher_edesc - s/w-extended skcipher descriptor
910 * @src_nents: number of segments in input s/w scatterlist
911 * @dst_nents: number of segments in output s/w scatterlist
912 * @mapped_src_nents: number of segments in input h/w link table
913 * @mapped_dst_nents: number of segments in output h/w link table
914 * @iv_dma: dma address of iv for checking continuity and link table
915 * @sec4_sg_bytes: length of dma mapped sec4_sg space
916 * @bklog: stored to determine if the request needs backlog
917 * @sec4_sg_dma: bus physical mapped address of h/w link table
918 * @sec4_sg: pointer to h/w link table
919 * @hw_desc: the h/w job descriptor followed by any referenced link tables
920 * and IV
921 */
922 struct skcipher_edesc {
923 int src_nents;
924 int dst_nents;
925 int mapped_src_nents;
926 int mapped_dst_nents;
927 dma_addr_t iv_dma;
928 int sec4_sg_bytes;
929 bool bklog;
930 dma_addr_t sec4_sg_dma;
931 struct sec4_sg_entry *sec4_sg;
932 u32 hw_desc[];
933 };
934
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,dma_addr_t sec4_sg_dma,int sec4_sg_bytes)935 static void caam_unmap(struct device *dev, struct scatterlist *src,
936 struct scatterlist *dst, int src_nents,
937 int dst_nents,
938 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
939 int sec4_sg_bytes)
940 {
941 if (dst != src) {
942 if (src_nents)
943 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
944 if (dst_nents)
945 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
946 } else {
947 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
948 }
949
950 if (iv_dma)
951 dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
952 if (sec4_sg_bytes)
953 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
954 DMA_TO_DEVICE);
955 }
956
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)957 static void aead_unmap(struct device *dev,
958 struct aead_edesc *edesc,
959 struct aead_request *req)
960 {
961 caam_unmap(dev, req->src, req->dst,
962 edesc->src_nents, edesc->dst_nents, 0, 0,
963 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
964 }
965
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)966 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
967 struct skcipher_request *req)
968 {
969 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
970 int ivsize = crypto_skcipher_ivsize(skcipher);
971
972 caam_unmap(dev, req->src, req->dst,
973 edesc->src_nents, edesc->dst_nents,
974 edesc->iv_dma, ivsize,
975 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
976 }
977
aead_crypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)978 static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
979 void *context)
980 {
981 struct aead_request *req = context;
982 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
983 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
984 struct aead_edesc *edesc;
985 int ecode = 0;
986 bool has_bklog;
987
988 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
989
990 edesc = rctx->edesc;
991 has_bklog = edesc->bklog;
992
993 if (err)
994 ecode = caam_jr_strstatus(jrdev, err);
995
996 aead_unmap(jrdev, edesc, req);
997
998 kfree(edesc);
999
1000 /*
1001 * If no backlog flag, the completion of the request is done
1002 * by CAAM, not crypto engine.
1003 */
1004 if (!has_bklog)
1005 aead_request_complete(req, ecode);
1006 else
1007 crypto_finalize_aead_request(jrp->engine, req, ecode);
1008 }
1009
skcipher_edesc_iv(struct skcipher_edesc * edesc)1010 static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
1011 {
1012
1013 return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1014 dma_get_cache_alignment());
1015 }
1016
skcipher_crypt_done(struct device * jrdev,u32 * desc,u32 err,void * context)1017 static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
1018 void *context)
1019 {
1020 struct skcipher_request *req = context;
1021 struct skcipher_edesc *edesc;
1022 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1023 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1024 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
1025 int ivsize = crypto_skcipher_ivsize(skcipher);
1026 int ecode = 0;
1027 bool has_bklog;
1028
1029 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1030
1031 edesc = rctx->edesc;
1032 has_bklog = edesc->bklog;
1033 if (err)
1034 ecode = caam_jr_strstatus(jrdev, err);
1035
1036 skcipher_unmap(jrdev, edesc, req);
1037
1038 /*
1039 * The crypto API expects us to set the IV (req->iv) to the last
1040 * ciphertext block (CBC mode) or last counter (CTR mode).
1041 * This is used e.g. by the CTS mode.
1042 */
1043 if (ivsize && !ecode) {
1044 memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
1045
1046 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1047 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1048 ivsize, 1);
1049 }
1050
1051 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1052 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1053 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1054
1055 kfree(edesc);
1056
1057 /*
1058 * If no backlog flag, the completion of the request is done
1059 * by CAAM, not crypto engine.
1060 */
1061 if (!has_bklog)
1062 skcipher_request_complete(req, ecode);
1063 else
1064 crypto_finalize_skcipher_request(jrp->engine, req, ecode);
1065 }
1066
1067 /*
1068 * Fill in aead job descriptor
1069 */
init_aead_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1070 static void init_aead_job(struct aead_request *req,
1071 struct aead_edesc *edesc,
1072 bool all_contig, bool encrypt)
1073 {
1074 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1075 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1076 int authsize = ctx->authsize;
1077 u32 *desc = edesc->hw_desc;
1078 u32 out_options, in_options;
1079 dma_addr_t dst_dma, src_dma;
1080 int len, sec4_sg_index = 0;
1081 dma_addr_t ptr;
1082 u32 *sh_desc;
1083
1084 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1085 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1086
1087 len = desc_len(sh_desc);
1088 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1089
1090 if (all_contig) {
1091 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1092 0;
1093 in_options = 0;
1094 } else {
1095 src_dma = edesc->sec4_sg_dma;
1096 sec4_sg_index += edesc->mapped_src_nents;
1097 in_options = LDST_SGF;
1098 }
1099
1100 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1101 in_options);
1102
1103 dst_dma = src_dma;
1104 out_options = in_options;
1105
1106 if (unlikely(req->src != req->dst)) {
1107 if (!edesc->mapped_dst_nents) {
1108 dst_dma = 0;
1109 out_options = 0;
1110 } else if (edesc->mapped_dst_nents == 1) {
1111 dst_dma = sg_dma_address(req->dst);
1112 out_options = 0;
1113 } else {
1114 dst_dma = edesc->sec4_sg_dma +
1115 sec4_sg_index *
1116 sizeof(struct sec4_sg_entry);
1117 out_options = LDST_SGF;
1118 }
1119 }
1120
1121 if (encrypt)
1122 append_seq_out_ptr(desc, dst_dma,
1123 req->assoclen + req->cryptlen + authsize,
1124 out_options);
1125 else
1126 append_seq_out_ptr(desc, dst_dma,
1127 req->assoclen + req->cryptlen - authsize,
1128 out_options);
1129 }
1130
init_gcm_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1131 static void init_gcm_job(struct aead_request *req,
1132 struct aead_edesc *edesc,
1133 bool all_contig, bool encrypt)
1134 {
1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1136 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1137 unsigned int ivsize = crypto_aead_ivsize(aead);
1138 u32 *desc = edesc->hw_desc;
1139 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1140 unsigned int last;
1141
1142 init_aead_job(req, edesc, all_contig, encrypt);
1143 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1144
1145 /* BUG This should not be specific to generic GCM. */
1146 last = 0;
1147 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1148 last = FIFOLD_TYPE_LAST1;
1149
1150 /* Read GCM IV */
1151 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1152 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1153 /* Append Salt */
1154 if (!generic_gcm)
1155 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1156 /* Append IV */
1157 append_data(desc, req->iv, ivsize);
1158 /* End of blank commands */
1159 }
1160
init_chachapoly_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1161 static void init_chachapoly_job(struct aead_request *req,
1162 struct aead_edesc *edesc, bool all_contig,
1163 bool encrypt)
1164 {
1165 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1166 unsigned int ivsize = crypto_aead_ivsize(aead);
1167 unsigned int assoclen = req->assoclen;
1168 u32 *desc = edesc->hw_desc;
1169 u32 ctx_iv_off = 4;
1170
1171 init_aead_job(req, edesc, all_contig, encrypt);
1172
1173 if (ivsize != CHACHAPOLY_IV_SIZE) {
1174 /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1175 ctx_iv_off += 4;
1176
1177 /*
1178 * The associated data comes already with the IV but we need
1179 * to skip it when we authenticate or encrypt...
1180 */
1181 assoclen -= ivsize;
1182 }
1183
1184 append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1185
1186 /*
1187 * For IPsec load the IV further in the same register.
1188 * For RFC7539 simply load the 12 bytes nonce in a single operation
1189 */
1190 append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1191 LDST_SRCDST_BYTE_CONTEXT |
1192 ctx_iv_off << LDST_OFFSET_SHIFT);
1193 }
1194
init_authenc_job(struct aead_request * req,struct aead_edesc * edesc,bool all_contig,bool encrypt)1195 static void init_authenc_job(struct aead_request *req,
1196 struct aead_edesc *edesc,
1197 bool all_contig, bool encrypt)
1198 {
1199 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1200 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1201 struct caam_aead_alg,
1202 aead.base);
1203 unsigned int ivsize = crypto_aead_ivsize(aead);
1204 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1205 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1206 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1207 OP_ALG_AAI_CTR_MOD128);
1208 const bool is_rfc3686 = alg->caam.rfc3686;
1209 u32 *desc = edesc->hw_desc;
1210 u32 ivoffset = 0;
1211
1212 /*
1213 * AES-CTR needs to load IV in CONTEXT1 reg
1214 * at an offset of 128bits (16bytes)
1215 * CONTEXT1[255:128] = IV
1216 */
1217 if (ctr_mode)
1218 ivoffset = 16;
1219
1220 /*
1221 * RFC3686 specific:
1222 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1223 */
1224 if (is_rfc3686)
1225 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1226
1227 init_aead_job(req, edesc, all_contig, encrypt);
1228
1229 /*
1230 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1231 * having DPOVRD as destination.
1232 */
1233 if (ctrlpriv->era < 3)
1234 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1235 else
1236 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1237
1238 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1239 append_load_as_imm(desc, req->iv, ivsize,
1240 LDST_CLASS_1_CCB |
1241 LDST_SRCDST_BYTE_CONTEXT |
1242 (ivoffset << LDST_OFFSET_SHIFT));
1243 }
1244
1245 /*
1246 * Fill in skcipher job descriptor
1247 */
init_skcipher_job(struct skcipher_request * req,struct skcipher_edesc * edesc,const bool encrypt)1248 static void init_skcipher_job(struct skcipher_request *req,
1249 struct skcipher_edesc *edesc,
1250 const bool encrypt)
1251 {
1252 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1253 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1254 struct device *jrdev = ctx->jrdev;
1255 int ivsize = crypto_skcipher_ivsize(skcipher);
1256 u32 *desc = edesc->hw_desc;
1257 u32 *sh_desc;
1258 u32 in_options = 0, out_options = 0;
1259 dma_addr_t src_dma, dst_dma, ptr;
1260 int len, sec4_sg_index = 0;
1261
1262 print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1263 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1264 dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1265 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1266
1267 caam_dump_sg("src @" __stringify(__LINE__)": ",
1268 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1269 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1270
1271 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1272 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1273
1274 len = desc_len(sh_desc);
1275 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1276
1277 if (ivsize || edesc->mapped_src_nents > 1) {
1278 src_dma = edesc->sec4_sg_dma;
1279 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
1280 in_options = LDST_SGF;
1281 } else {
1282 src_dma = sg_dma_address(req->src);
1283 }
1284
1285 append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1286
1287 if (likely(req->src == req->dst)) {
1288 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1289 out_options = in_options;
1290 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1291 dst_dma = sg_dma_address(req->dst);
1292 } else {
1293 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1294 sizeof(struct sec4_sg_entry);
1295 out_options = LDST_SGF;
1296 }
1297
1298 append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
1299 }
1300
1301 /*
1302 * allocate and map the aead extended descriptor
1303 */
aead_edesc_alloc(struct aead_request * req,int desc_bytes,bool * all_contig_ptr,bool encrypt)1304 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1305 int desc_bytes, bool *all_contig_ptr,
1306 bool encrypt)
1307 {
1308 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1309 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1310 struct device *jrdev = ctx->jrdev;
1311 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1312 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1313 GFP_KERNEL : GFP_ATOMIC;
1314 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1315 int src_len, dst_len = 0;
1316 struct aead_edesc *edesc;
1317 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1318 unsigned int authsize = ctx->authsize;
1319
1320 if (unlikely(req->dst != req->src)) {
1321 src_len = req->assoclen + req->cryptlen;
1322 dst_len = src_len + (encrypt ? authsize : (-authsize));
1323
1324 src_nents = sg_nents_for_len(req->src, src_len);
1325 if (unlikely(src_nents < 0)) {
1326 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1327 src_len);
1328 return ERR_PTR(src_nents);
1329 }
1330
1331 dst_nents = sg_nents_for_len(req->dst, dst_len);
1332 if (unlikely(dst_nents < 0)) {
1333 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1334 dst_len);
1335 return ERR_PTR(dst_nents);
1336 }
1337 } else {
1338 src_len = req->assoclen + req->cryptlen +
1339 (encrypt ? authsize : 0);
1340
1341 src_nents = sg_nents_for_len(req->src, src_len);
1342 if (unlikely(src_nents < 0)) {
1343 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1344 src_len);
1345 return ERR_PTR(src_nents);
1346 }
1347 }
1348
1349 if (likely(req->src == req->dst)) {
1350 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1351 DMA_BIDIRECTIONAL);
1352 if (unlikely(!mapped_src_nents)) {
1353 dev_err(jrdev, "unable to map source\n");
1354 return ERR_PTR(-ENOMEM);
1355 }
1356 } else {
1357 /* Cover also the case of null (zero length) input data */
1358 if (src_nents) {
1359 mapped_src_nents = dma_map_sg(jrdev, req->src,
1360 src_nents, DMA_TO_DEVICE);
1361 if (unlikely(!mapped_src_nents)) {
1362 dev_err(jrdev, "unable to map source\n");
1363 return ERR_PTR(-ENOMEM);
1364 }
1365 } else {
1366 mapped_src_nents = 0;
1367 }
1368
1369 /* Cover also the case of null (zero length) output data */
1370 if (dst_nents) {
1371 mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1372 dst_nents,
1373 DMA_FROM_DEVICE);
1374 if (unlikely(!mapped_dst_nents)) {
1375 dev_err(jrdev, "unable to map destination\n");
1376 dma_unmap_sg(jrdev, req->src, src_nents,
1377 DMA_TO_DEVICE);
1378 return ERR_PTR(-ENOMEM);
1379 }
1380 } else {
1381 mapped_dst_nents = 0;
1382 }
1383 }
1384
1385 /*
1386 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1387 * the end of the table by allocating more S/G entries.
1388 */
1389 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1390 if (mapped_dst_nents > 1)
1391 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1392 else
1393 sec4_sg_len = pad_sg_nents(sec4_sg_len);
1394
1395 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1396
1397 /* allocate space for base edesc and hw desc commands, link tables */
1398 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
1399 if (!edesc) {
1400 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1401 0, 0, 0);
1402 return ERR_PTR(-ENOMEM);
1403 }
1404
1405 edesc->src_nents = src_nents;
1406 edesc->dst_nents = dst_nents;
1407 edesc->mapped_src_nents = mapped_src_nents;
1408 edesc->mapped_dst_nents = mapped_dst_nents;
1409 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1410 desc_bytes;
1411
1412 rctx->edesc = edesc;
1413
1414 *all_contig_ptr = !(mapped_src_nents > 1);
1415
1416 sec4_sg_index = 0;
1417 if (mapped_src_nents > 1) {
1418 sg_to_sec4_sg_last(req->src, src_len,
1419 edesc->sec4_sg + sec4_sg_index, 0);
1420 sec4_sg_index += mapped_src_nents;
1421 }
1422 if (mapped_dst_nents > 1) {
1423 sg_to_sec4_sg_last(req->dst, dst_len,
1424 edesc->sec4_sg + sec4_sg_index, 0);
1425 }
1426
1427 if (!sec4_sg_bytes)
1428 return edesc;
1429
1430 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1431 sec4_sg_bytes, DMA_TO_DEVICE);
1432 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1433 dev_err(jrdev, "unable to map S/G table\n");
1434 aead_unmap(jrdev, edesc, req);
1435 kfree(edesc);
1436 return ERR_PTR(-ENOMEM);
1437 }
1438
1439 edesc->sec4_sg_bytes = sec4_sg_bytes;
1440
1441 return edesc;
1442 }
1443
aead_enqueue_req(struct device * jrdev,struct aead_request * req)1444 static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
1445 {
1446 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1447 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1448 struct aead_edesc *edesc = rctx->edesc;
1449 u32 *desc = edesc->hw_desc;
1450 int ret;
1451
1452 /*
1453 * Only the backlog request are sent to crypto-engine since the others
1454 * can be handled by CAAM, if free, especially since JR has up to 1024
1455 * entries (more than the 10 entries from crypto-engine).
1456 */
1457 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1458 ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
1459 req);
1460 else
1461 ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
1462
1463 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1464 aead_unmap(jrdev, edesc, req);
1465 kfree(rctx->edesc);
1466 }
1467
1468 return ret;
1469 }
1470
chachapoly_crypt(struct aead_request * req,bool encrypt)1471 static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
1472 {
1473 struct aead_edesc *edesc;
1474 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1475 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1476 struct device *jrdev = ctx->jrdev;
1477 bool all_contig;
1478 u32 *desc;
1479
1480 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1481 encrypt);
1482 if (IS_ERR(edesc))
1483 return PTR_ERR(edesc);
1484
1485 desc = edesc->hw_desc;
1486
1487 init_chachapoly_job(req, edesc, all_contig, encrypt);
1488 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1490 1);
1491
1492 return aead_enqueue_req(jrdev, req);
1493 }
1494
chachapoly_encrypt(struct aead_request * req)1495 static int chachapoly_encrypt(struct aead_request *req)
1496 {
1497 return chachapoly_crypt(req, true);
1498 }
1499
chachapoly_decrypt(struct aead_request * req)1500 static int chachapoly_decrypt(struct aead_request *req)
1501 {
1502 return chachapoly_crypt(req, false);
1503 }
1504
aead_crypt(struct aead_request * req,bool encrypt)1505 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1506 {
1507 struct aead_edesc *edesc;
1508 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1509 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1510 struct device *jrdev = ctx->jrdev;
1511 bool all_contig;
1512
1513 /* allocate extended descriptor */
1514 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1515 &all_contig, encrypt);
1516 if (IS_ERR(edesc))
1517 return PTR_ERR(edesc);
1518
1519 /* Create and submit job descriptor */
1520 init_authenc_job(req, edesc, all_contig, encrypt);
1521
1522 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1523 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1524 desc_bytes(edesc->hw_desc), 1);
1525
1526 return aead_enqueue_req(jrdev, req);
1527 }
1528
aead_encrypt(struct aead_request * req)1529 static int aead_encrypt(struct aead_request *req)
1530 {
1531 return aead_crypt(req, true);
1532 }
1533
aead_decrypt(struct aead_request * req)1534 static int aead_decrypt(struct aead_request *req)
1535 {
1536 return aead_crypt(req, false);
1537 }
1538
aead_do_one_req(struct crypto_engine * engine,void * areq)1539 static int aead_do_one_req(struct crypto_engine *engine, void *areq)
1540 {
1541 struct aead_request *req = aead_request_cast(areq);
1542 struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
1543 struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1544 u32 *desc = rctx->edesc->hw_desc;
1545 int ret;
1546
1547 rctx->edesc->bklog = true;
1548
1549 ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
1550
1551 if (ret == -ENOSPC && engine->retry_support)
1552 return ret;
1553
1554 if (ret != -EINPROGRESS) {
1555 aead_unmap(ctx->jrdev, rctx->edesc, req);
1556 kfree(rctx->edesc);
1557 } else {
1558 ret = 0;
1559 }
1560
1561 return ret;
1562 }
1563
gcm_crypt(struct aead_request * req,bool encrypt)1564 static inline int gcm_crypt(struct aead_request *req, bool encrypt)
1565 {
1566 struct aead_edesc *edesc;
1567 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1568 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1569 struct device *jrdev = ctx->jrdev;
1570 bool all_contig;
1571
1572 /* allocate extended descriptor */
1573 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1574 encrypt);
1575 if (IS_ERR(edesc))
1576 return PTR_ERR(edesc);
1577
1578 /* Create and submit job descriptor */
1579 init_gcm_job(req, edesc, all_contig, encrypt);
1580
1581 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1582 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1583 desc_bytes(edesc->hw_desc), 1);
1584
1585 return aead_enqueue_req(jrdev, req);
1586 }
1587
gcm_encrypt(struct aead_request * req)1588 static int gcm_encrypt(struct aead_request *req)
1589 {
1590 return gcm_crypt(req, true);
1591 }
1592
gcm_decrypt(struct aead_request * req)1593 static int gcm_decrypt(struct aead_request *req)
1594 {
1595 return gcm_crypt(req, false);
1596 }
1597
ipsec_gcm_encrypt(struct aead_request * req)1598 static int ipsec_gcm_encrypt(struct aead_request *req)
1599 {
1600 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
1601 }
1602
ipsec_gcm_decrypt(struct aead_request * req)1603 static int ipsec_gcm_decrypt(struct aead_request *req)
1604 {
1605 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
1606 }
1607
1608 /*
1609 * allocate and map the skcipher extended descriptor for skcipher
1610 */
skcipher_edesc_alloc(struct skcipher_request * req,int desc_bytes)1611 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1612 int desc_bytes)
1613 {
1614 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1615 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1616 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1617 struct device *jrdev = ctx->jrdev;
1618 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1619 GFP_KERNEL : GFP_ATOMIC;
1620 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1621 struct skcipher_edesc *edesc;
1622 dma_addr_t iv_dma = 0;
1623 u8 *iv;
1624 int ivsize = crypto_skcipher_ivsize(skcipher);
1625 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1626 unsigned int aligned_size;
1627
1628 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1629 if (unlikely(src_nents < 0)) {
1630 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1631 req->cryptlen);
1632 return ERR_PTR(src_nents);
1633 }
1634
1635 if (req->dst != req->src) {
1636 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1637 if (unlikely(dst_nents < 0)) {
1638 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1639 req->cryptlen);
1640 return ERR_PTR(dst_nents);
1641 }
1642 }
1643
1644 if (likely(req->src == req->dst)) {
1645 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1646 DMA_BIDIRECTIONAL);
1647 if (unlikely(!mapped_src_nents)) {
1648 dev_err(jrdev, "unable to map source\n");
1649 return ERR_PTR(-ENOMEM);
1650 }
1651 } else {
1652 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1653 DMA_TO_DEVICE);
1654 if (unlikely(!mapped_src_nents)) {
1655 dev_err(jrdev, "unable to map source\n");
1656 return ERR_PTR(-ENOMEM);
1657 }
1658 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1659 DMA_FROM_DEVICE);
1660 if (unlikely(!mapped_dst_nents)) {
1661 dev_err(jrdev, "unable to map destination\n");
1662 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1663 return ERR_PTR(-ENOMEM);
1664 }
1665 }
1666
1667 if (!ivsize && mapped_src_nents == 1)
1668 sec4_sg_ents = 0; // no need for an input hw s/g table
1669 else
1670 sec4_sg_ents = mapped_src_nents + !!ivsize;
1671 dst_sg_idx = sec4_sg_ents;
1672
1673 /*
1674 * Input, output HW S/G tables: [IV, src][dst, IV]
1675 * IV entries point to the same buffer
1676 * If src == dst, S/G entries are reused (S/G tables overlap)
1677 *
1678 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1679 * the end of the table by allocating more S/G entries. Logic:
1680 * if (output S/G)
1681 * pad output S/G, if needed
1682 * else if (input S/G) ...
1683 * pad input S/G, if needed
1684 */
1685 if (ivsize || mapped_dst_nents > 1) {
1686 if (req->src == req->dst)
1687 sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1688 else
1689 sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1690 !!ivsize);
1691 } else {
1692 sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1693 }
1694
1695 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1696
1697 /*
1698 * allocate space for base edesc and hw desc commands, link tables, IV
1699 */
1700 aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
1701 aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
1702 aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
1703 (dma_get_cache_alignment() - 1);
1704 aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
1705 edesc = kzalloc(aligned_size, flags);
1706 if (!edesc) {
1707 dev_err(jrdev, "could not allocate extended descriptor\n");
1708 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1709 0, 0, 0);
1710 return ERR_PTR(-ENOMEM);
1711 }
1712
1713 edesc->src_nents = src_nents;
1714 edesc->dst_nents = dst_nents;
1715 edesc->mapped_src_nents = mapped_src_nents;
1716 edesc->mapped_dst_nents = mapped_dst_nents;
1717 edesc->sec4_sg_bytes = sec4_sg_bytes;
1718 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1719 desc_bytes);
1720 rctx->edesc = edesc;
1721
1722 /* Make sure IV is located in a DMAable area */
1723 if (ivsize) {
1724 iv = skcipher_edesc_iv(edesc);
1725 memcpy(iv, req->iv, ivsize);
1726
1727 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1728 if (dma_mapping_error(jrdev, iv_dma)) {
1729 dev_err(jrdev, "unable to map IV\n");
1730 caam_unmap(jrdev, req->src, req->dst, src_nents,
1731 dst_nents, 0, 0, 0, 0);
1732 kfree(edesc);
1733 return ERR_PTR(-ENOMEM);
1734 }
1735
1736 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1737 }
1738 if (dst_sg_idx)
1739 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1740 !!ivsize, 0);
1741
1742 if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1743 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1744 dst_sg_idx, 0);
1745
1746 if (ivsize)
1747 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1748 mapped_dst_nents, iv_dma, ivsize, 0);
1749
1750 if (ivsize || mapped_dst_nents > 1)
1751 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1752 mapped_dst_nents - 1 + !!ivsize);
1753
1754 if (sec4_sg_bytes) {
1755 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1756 sec4_sg_bytes,
1757 DMA_TO_DEVICE);
1758 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1759 dev_err(jrdev, "unable to map S/G table\n");
1760 caam_unmap(jrdev, req->src, req->dst, src_nents,
1761 dst_nents, iv_dma, ivsize, 0, 0);
1762 kfree(edesc);
1763 return ERR_PTR(-ENOMEM);
1764 }
1765 }
1766
1767 edesc->iv_dma = iv_dma;
1768
1769 print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1770 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1771 sec4_sg_bytes, 1);
1772
1773 return edesc;
1774 }
1775
skcipher_do_one_req(struct crypto_engine * engine,void * areq)1776 static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
1777 {
1778 struct skcipher_request *req = skcipher_request_cast(areq);
1779 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
1780 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1781 u32 *desc = rctx->edesc->hw_desc;
1782 int ret;
1783
1784 rctx->edesc->bklog = true;
1785
1786 ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
1787
1788 if (ret == -ENOSPC && engine->retry_support)
1789 return ret;
1790
1791 if (ret != -EINPROGRESS) {
1792 skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1793 kfree(rctx->edesc);
1794 } else {
1795 ret = 0;
1796 }
1797
1798 return ret;
1799 }
1800
xts_skcipher_ivsize(struct skcipher_request * req)1801 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1802 {
1803 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1804 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1805
1806 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1807 }
1808
skcipher_crypt(struct skcipher_request * req,bool encrypt)1809 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1810 {
1811 struct skcipher_edesc *edesc;
1812 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1813 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1814 struct device *jrdev = ctx->jrdev;
1815 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1816 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1817 u32 *desc;
1818 int ret = 0;
1819
1820 /*
1821 * XTS is expected to return an error even for input length = 0
1822 * Note that the case input length < block size will be caught during
1823 * HW offloading and return an error.
1824 */
1825 if (!req->cryptlen && !ctx->fallback)
1826 return 0;
1827
1828 if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1829 ctx->xts_key_fallback)) {
1830 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1831
1832 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1833 skcipher_request_set_callback(&rctx->fallback_req,
1834 req->base.flags,
1835 req->base.complete,
1836 req->base.data);
1837 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1838 req->dst, req->cryptlen, req->iv);
1839
1840 return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1841 crypto_skcipher_decrypt(&rctx->fallback_req);
1842 }
1843
1844 /* allocate extended descriptor */
1845 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1846 if (IS_ERR(edesc))
1847 return PTR_ERR(edesc);
1848
1849 /* Create and submit job descriptor*/
1850 init_skcipher_job(req, edesc, encrypt);
1851
1852 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1853 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1854 desc_bytes(edesc->hw_desc), 1);
1855
1856 desc = edesc->hw_desc;
1857 /*
1858 * Only the backlog request are sent to crypto-engine since the others
1859 * can be handled by CAAM, if free, especially since JR has up to 1024
1860 * entries (more than the 10 entries from crypto-engine).
1861 */
1862 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1863 ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
1864 req);
1865 else
1866 ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
1867
1868 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1869 skcipher_unmap(jrdev, edesc, req);
1870 kfree(edesc);
1871 }
1872
1873 return ret;
1874 }
1875
skcipher_encrypt(struct skcipher_request * req)1876 static int skcipher_encrypt(struct skcipher_request *req)
1877 {
1878 return skcipher_crypt(req, true);
1879 }
1880
skcipher_decrypt(struct skcipher_request * req)1881 static int skcipher_decrypt(struct skcipher_request *req)
1882 {
1883 return skcipher_crypt(req, false);
1884 }
1885
1886 static struct caam_skcipher_alg driver_algs[] = {
1887 {
1888 .skcipher.base = {
1889 .base = {
1890 .cra_name = "cbc(aes)",
1891 .cra_driver_name = "cbc-aes-caam",
1892 .cra_blocksize = AES_BLOCK_SIZE,
1893 },
1894 .setkey = aes_skcipher_setkey,
1895 .encrypt = skcipher_encrypt,
1896 .decrypt = skcipher_decrypt,
1897 .min_keysize = AES_MIN_KEY_SIZE,
1898 .max_keysize = AES_MAX_KEY_SIZE,
1899 .ivsize = AES_BLOCK_SIZE,
1900 },
1901 .skcipher.op = {
1902 .do_one_request = skcipher_do_one_req,
1903 },
1904 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1905 },
1906 {
1907 .skcipher.base = {
1908 .base = {
1909 .cra_name = "cbc(des3_ede)",
1910 .cra_driver_name = "cbc-3des-caam",
1911 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1912 },
1913 .setkey = des3_skcipher_setkey,
1914 .encrypt = skcipher_encrypt,
1915 .decrypt = skcipher_decrypt,
1916 .min_keysize = DES3_EDE_KEY_SIZE,
1917 .max_keysize = DES3_EDE_KEY_SIZE,
1918 .ivsize = DES3_EDE_BLOCK_SIZE,
1919 },
1920 .skcipher.op = {
1921 .do_one_request = skcipher_do_one_req,
1922 },
1923 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1924 },
1925 {
1926 .skcipher.base = {
1927 .base = {
1928 .cra_name = "cbc(des)",
1929 .cra_driver_name = "cbc-des-caam",
1930 .cra_blocksize = DES_BLOCK_SIZE,
1931 },
1932 .setkey = des_skcipher_setkey,
1933 .encrypt = skcipher_encrypt,
1934 .decrypt = skcipher_decrypt,
1935 .min_keysize = DES_KEY_SIZE,
1936 .max_keysize = DES_KEY_SIZE,
1937 .ivsize = DES_BLOCK_SIZE,
1938 },
1939 .skcipher.op = {
1940 .do_one_request = skcipher_do_one_req,
1941 },
1942 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1943 },
1944 {
1945 .skcipher.base = {
1946 .base = {
1947 .cra_name = "ctr(aes)",
1948 .cra_driver_name = "ctr-aes-caam",
1949 .cra_blocksize = 1,
1950 },
1951 .setkey = ctr_skcipher_setkey,
1952 .encrypt = skcipher_encrypt,
1953 .decrypt = skcipher_decrypt,
1954 .min_keysize = AES_MIN_KEY_SIZE,
1955 .max_keysize = AES_MAX_KEY_SIZE,
1956 .ivsize = AES_BLOCK_SIZE,
1957 .chunksize = AES_BLOCK_SIZE,
1958 },
1959 .skcipher.op = {
1960 .do_one_request = skcipher_do_one_req,
1961 },
1962 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1963 OP_ALG_AAI_CTR_MOD128,
1964 },
1965 {
1966 .skcipher.base = {
1967 .base = {
1968 .cra_name = "rfc3686(ctr(aes))",
1969 .cra_driver_name = "rfc3686-ctr-aes-caam",
1970 .cra_blocksize = 1,
1971 },
1972 .setkey = rfc3686_skcipher_setkey,
1973 .encrypt = skcipher_encrypt,
1974 .decrypt = skcipher_decrypt,
1975 .min_keysize = AES_MIN_KEY_SIZE +
1976 CTR_RFC3686_NONCE_SIZE,
1977 .max_keysize = AES_MAX_KEY_SIZE +
1978 CTR_RFC3686_NONCE_SIZE,
1979 .ivsize = CTR_RFC3686_IV_SIZE,
1980 .chunksize = AES_BLOCK_SIZE,
1981 },
1982 .skcipher.op = {
1983 .do_one_request = skcipher_do_one_req,
1984 },
1985 .caam = {
1986 .class1_alg_type = OP_ALG_ALGSEL_AES |
1987 OP_ALG_AAI_CTR_MOD128,
1988 .rfc3686 = true,
1989 },
1990 },
1991 {
1992 .skcipher.base = {
1993 .base = {
1994 .cra_name = "xts(aes)",
1995 .cra_driver_name = "xts-aes-caam",
1996 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1997 .cra_blocksize = AES_BLOCK_SIZE,
1998 },
1999 .setkey = xts_skcipher_setkey,
2000 .encrypt = skcipher_encrypt,
2001 .decrypt = skcipher_decrypt,
2002 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2003 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2004 .ivsize = AES_BLOCK_SIZE,
2005 },
2006 .skcipher.op = {
2007 .do_one_request = skcipher_do_one_req,
2008 },
2009 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2010 },
2011 {
2012 .skcipher.base = {
2013 .base = {
2014 .cra_name = "ecb(des)",
2015 .cra_driver_name = "ecb-des-caam",
2016 .cra_blocksize = DES_BLOCK_SIZE,
2017 },
2018 .setkey = des_skcipher_setkey,
2019 .encrypt = skcipher_encrypt,
2020 .decrypt = skcipher_decrypt,
2021 .min_keysize = DES_KEY_SIZE,
2022 .max_keysize = DES_KEY_SIZE,
2023 },
2024 .skcipher.op = {
2025 .do_one_request = skcipher_do_one_req,
2026 },
2027 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
2028 },
2029 {
2030 .skcipher.base = {
2031 .base = {
2032 .cra_name = "ecb(aes)",
2033 .cra_driver_name = "ecb-aes-caam",
2034 .cra_blocksize = AES_BLOCK_SIZE,
2035 },
2036 .setkey = aes_skcipher_setkey,
2037 .encrypt = skcipher_encrypt,
2038 .decrypt = skcipher_decrypt,
2039 .min_keysize = AES_MIN_KEY_SIZE,
2040 .max_keysize = AES_MAX_KEY_SIZE,
2041 },
2042 .skcipher.op = {
2043 .do_one_request = skcipher_do_one_req,
2044 },
2045 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
2046 },
2047 {
2048 .skcipher.base = {
2049 .base = {
2050 .cra_name = "ecb(des3_ede)",
2051 .cra_driver_name = "ecb-des3-caam",
2052 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2053 },
2054 .setkey = des3_skcipher_setkey,
2055 .encrypt = skcipher_encrypt,
2056 .decrypt = skcipher_decrypt,
2057 .min_keysize = DES3_EDE_KEY_SIZE,
2058 .max_keysize = DES3_EDE_KEY_SIZE,
2059 },
2060 .skcipher.op = {
2061 .do_one_request = skcipher_do_one_req,
2062 },
2063 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
2064 },
2065 };
2066
2067 static struct caam_aead_alg driver_aeads[] = {
2068 {
2069 .aead.base = {
2070 .base = {
2071 .cra_name = "rfc4106(gcm(aes))",
2072 .cra_driver_name = "rfc4106-gcm-aes-caam",
2073 .cra_blocksize = 1,
2074 },
2075 .setkey = rfc4106_setkey,
2076 .setauthsize = rfc4106_setauthsize,
2077 .encrypt = ipsec_gcm_encrypt,
2078 .decrypt = ipsec_gcm_decrypt,
2079 .ivsize = GCM_RFC4106_IV_SIZE,
2080 .maxauthsize = AES_BLOCK_SIZE,
2081 },
2082 .aead.op = {
2083 .do_one_request = aead_do_one_req,
2084 },
2085 .caam = {
2086 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2087 .nodkp = true,
2088 },
2089 },
2090 {
2091 .aead.base = {
2092 .base = {
2093 .cra_name = "rfc4543(gcm(aes))",
2094 .cra_driver_name = "rfc4543-gcm-aes-caam",
2095 .cra_blocksize = 1,
2096 },
2097 .setkey = rfc4543_setkey,
2098 .setauthsize = rfc4543_setauthsize,
2099 .encrypt = ipsec_gcm_encrypt,
2100 .decrypt = ipsec_gcm_decrypt,
2101 .ivsize = GCM_RFC4543_IV_SIZE,
2102 .maxauthsize = AES_BLOCK_SIZE,
2103 },
2104 .aead.op = {
2105 .do_one_request = aead_do_one_req,
2106 },
2107 .caam = {
2108 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2109 .nodkp = true,
2110 },
2111 },
2112 /* Galois Counter Mode */
2113 {
2114 .aead.base = {
2115 .base = {
2116 .cra_name = "gcm(aes)",
2117 .cra_driver_name = "gcm-aes-caam",
2118 .cra_blocksize = 1,
2119 },
2120 .setkey = gcm_setkey,
2121 .setauthsize = gcm_setauthsize,
2122 .encrypt = gcm_encrypt,
2123 .decrypt = gcm_decrypt,
2124 .ivsize = GCM_AES_IV_SIZE,
2125 .maxauthsize = AES_BLOCK_SIZE,
2126 },
2127 .aead.op = {
2128 .do_one_request = aead_do_one_req,
2129 },
2130 .caam = {
2131 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2132 .nodkp = true,
2133 },
2134 },
2135 /* single-pass ipsec_esp descriptor */
2136 {
2137 .aead.base = {
2138 .base = {
2139 .cra_name = "authenc(hmac(md5),"
2140 "ecb(cipher_null))",
2141 .cra_driver_name = "authenc-hmac-md5-"
2142 "ecb-cipher_null-caam",
2143 .cra_blocksize = NULL_BLOCK_SIZE,
2144 },
2145 .setkey = aead_setkey,
2146 .setauthsize = aead_setauthsize,
2147 .encrypt = aead_encrypt,
2148 .decrypt = aead_decrypt,
2149 .ivsize = NULL_IV_SIZE,
2150 .maxauthsize = MD5_DIGEST_SIZE,
2151 },
2152 .aead.op = {
2153 .do_one_request = aead_do_one_req,
2154 },
2155 .caam = {
2156 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2157 OP_ALG_AAI_HMAC_PRECOMP,
2158 },
2159 },
2160 {
2161 .aead.base = {
2162 .base = {
2163 .cra_name = "authenc(hmac(sha1),"
2164 "ecb(cipher_null))",
2165 .cra_driver_name = "authenc-hmac-sha1-"
2166 "ecb-cipher_null-caam",
2167 .cra_blocksize = NULL_BLOCK_SIZE,
2168 },
2169 .setkey = aead_setkey,
2170 .setauthsize = aead_setauthsize,
2171 .encrypt = aead_encrypt,
2172 .decrypt = aead_decrypt,
2173 .ivsize = NULL_IV_SIZE,
2174 .maxauthsize = SHA1_DIGEST_SIZE,
2175 },
2176 .aead.op = {
2177 .do_one_request = aead_do_one_req,
2178 },
2179 .caam = {
2180 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2181 OP_ALG_AAI_HMAC_PRECOMP,
2182 },
2183 },
2184 {
2185 .aead.base = {
2186 .base = {
2187 .cra_name = "authenc(hmac(sha224),"
2188 "ecb(cipher_null))",
2189 .cra_driver_name = "authenc-hmac-sha224-"
2190 "ecb-cipher_null-caam",
2191 .cra_blocksize = NULL_BLOCK_SIZE,
2192 },
2193 .setkey = aead_setkey,
2194 .setauthsize = aead_setauthsize,
2195 .encrypt = aead_encrypt,
2196 .decrypt = aead_decrypt,
2197 .ivsize = NULL_IV_SIZE,
2198 .maxauthsize = SHA224_DIGEST_SIZE,
2199 },
2200 .aead.op = {
2201 .do_one_request = aead_do_one_req,
2202 },
2203 .caam = {
2204 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2205 OP_ALG_AAI_HMAC_PRECOMP,
2206 },
2207 },
2208 {
2209 .aead.base = {
2210 .base = {
2211 .cra_name = "authenc(hmac(sha256),"
2212 "ecb(cipher_null))",
2213 .cra_driver_name = "authenc-hmac-sha256-"
2214 "ecb-cipher_null-caam",
2215 .cra_blocksize = NULL_BLOCK_SIZE,
2216 },
2217 .setkey = aead_setkey,
2218 .setauthsize = aead_setauthsize,
2219 .encrypt = aead_encrypt,
2220 .decrypt = aead_decrypt,
2221 .ivsize = NULL_IV_SIZE,
2222 .maxauthsize = SHA256_DIGEST_SIZE,
2223 },
2224 .aead.op = {
2225 .do_one_request = aead_do_one_req,
2226 },
2227 .caam = {
2228 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2229 OP_ALG_AAI_HMAC_PRECOMP,
2230 },
2231 },
2232 {
2233 .aead.base = {
2234 .base = {
2235 .cra_name = "authenc(hmac(sha384),"
2236 "ecb(cipher_null))",
2237 .cra_driver_name = "authenc-hmac-sha384-"
2238 "ecb-cipher_null-caam",
2239 .cra_blocksize = NULL_BLOCK_SIZE,
2240 },
2241 .setkey = aead_setkey,
2242 .setauthsize = aead_setauthsize,
2243 .encrypt = aead_encrypt,
2244 .decrypt = aead_decrypt,
2245 .ivsize = NULL_IV_SIZE,
2246 .maxauthsize = SHA384_DIGEST_SIZE,
2247 },
2248 .aead.op = {
2249 .do_one_request = aead_do_one_req,
2250 },
2251 .caam = {
2252 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2253 OP_ALG_AAI_HMAC_PRECOMP,
2254 },
2255 },
2256 {
2257 .aead.base = {
2258 .base = {
2259 .cra_name = "authenc(hmac(sha512),"
2260 "ecb(cipher_null))",
2261 .cra_driver_name = "authenc-hmac-sha512-"
2262 "ecb-cipher_null-caam",
2263 .cra_blocksize = NULL_BLOCK_SIZE,
2264 },
2265 .setkey = aead_setkey,
2266 .setauthsize = aead_setauthsize,
2267 .encrypt = aead_encrypt,
2268 .decrypt = aead_decrypt,
2269 .ivsize = NULL_IV_SIZE,
2270 .maxauthsize = SHA512_DIGEST_SIZE,
2271 },
2272 .aead.op = {
2273 .do_one_request = aead_do_one_req,
2274 },
2275 .caam = {
2276 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2277 OP_ALG_AAI_HMAC_PRECOMP,
2278 },
2279 },
2280 {
2281 .aead.base = {
2282 .base = {
2283 .cra_name = "authenc(hmac(md5),cbc(aes))",
2284 .cra_driver_name = "authenc-hmac-md5-"
2285 "cbc-aes-caam",
2286 .cra_blocksize = AES_BLOCK_SIZE,
2287 },
2288 .setkey = aead_setkey,
2289 .setauthsize = aead_setauthsize,
2290 .encrypt = aead_encrypt,
2291 .decrypt = aead_decrypt,
2292 .ivsize = AES_BLOCK_SIZE,
2293 .maxauthsize = MD5_DIGEST_SIZE,
2294 },
2295 .aead.op = {
2296 .do_one_request = aead_do_one_req,
2297 },
2298 .caam = {
2299 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2300 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2301 OP_ALG_AAI_HMAC_PRECOMP,
2302 },
2303 },
2304 {
2305 .aead.base = {
2306 .base = {
2307 .cra_name = "echainiv(authenc(hmac(md5),"
2308 "cbc(aes)))",
2309 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2310 "cbc-aes-caam",
2311 .cra_blocksize = AES_BLOCK_SIZE,
2312 },
2313 .setkey = aead_setkey,
2314 .setauthsize = aead_setauthsize,
2315 .encrypt = aead_encrypt,
2316 .decrypt = aead_decrypt,
2317 .ivsize = AES_BLOCK_SIZE,
2318 .maxauthsize = MD5_DIGEST_SIZE,
2319 },
2320 .aead.op = {
2321 .do_one_request = aead_do_one_req,
2322 },
2323 .caam = {
2324 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2325 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2326 OP_ALG_AAI_HMAC_PRECOMP,
2327 .geniv = true,
2328 },
2329 },
2330 {
2331 .aead.base = {
2332 .base = {
2333 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2334 .cra_driver_name = "authenc-hmac-sha1-"
2335 "cbc-aes-caam",
2336 .cra_blocksize = AES_BLOCK_SIZE,
2337 },
2338 .setkey = aead_setkey,
2339 .setauthsize = aead_setauthsize,
2340 .encrypt = aead_encrypt,
2341 .decrypt = aead_decrypt,
2342 .ivsize = AES_BLOCK_SIZE,
2343 .maxauthsize = SHA1_DIGEST_SIZE,
2344 },
2345 .aead.op = {
2346 .do_one_request = aead_do_one_req,
2347 },
2348 .caam = {
2349 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2350 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2351 OP_ALG_AAI_HMAC_PRECOMP,
2352 },
2353 },
2354 {
2355 .aead.base = {
2356 .base = {
2357 .cra_name = "echainiv(authenc(hmac(sha1),"
2358 "cbc(aes)))",
2359 .cra_driver_name = "echainiv-authenc-"
2360 "hmac-sha1-cbc-aes-caam",
2361 .cra_blocksize = AES_BLOCK_SIZE,
2362 },
2363 .setkey = aead_setkey,
2364 .setauthsize = aead_setauthsize,
2365 .encrypt = aead_encrypt,
2366 .decrypt = aead_decrypt,
2367 .ivsize = AES_BLOCK_SIZE,
2368 .maxauthsize = SHA1_DIGEST_SIZE,
2369 },
2370 .aead.op = {
2371 .do_one_request = aead_do_one_req,
2372 },
2373 .caam = {
2374 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2375 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2376 OP_ALG_AAI_HMAC_PRECOMP,
2377 .geniv = true,
2378 },
2379 },
2380 {
2381 .aead.base = {
2382 .base = {
2383 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2385 "cbc-aes-caam",
2386 .cra_blocksize = AES_BLOCK_SIZE,
2387 },
2388 .setkey = aead_setkey,
2389 .setauthsize = aead_setauthsize,
2390 .encrypt = aead_encrypt,
2391 .decrypt = aead_decrypt,
2392 .ivsize = AES_BLOCK_SIZE,
2393 .maxauthsize = SHA224_DIGEST_SIZE,
2394 },
2395 .aead.op = {
2396 .do_one_request = aead_do_one_req,
2397 },
2398 .caam = {
2399 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2400 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2401 OP_ALG_AAI_HMAC_PRECOMP,
2402 },
2403 },
2404 {
2405 .aead.base = {
2406 .base = {
2407 .cra_name = "echainiv(authenc(hmac(sha224),"
2408 "cbc(aes)))",
2409 .cra_driver_name = "echainiv-authenc-"
2410 "hmac-sha224-cbc-aes-caam",
2411 .cra_blocksize = AES_BLOCK_SIZE,
2412 },
2413 .setkey = aead_setkey,
2414 .setauthsize = aead_setauthsize,
2415 .encrypt = aead_encrypt,
2416 .decrypt = aead_decrypt,
2417 .ivsize = AES_BLOCK_SIZE,
2418 .maxauthsize = SHA224_DIGEST_SIZE,
2419 },
2420 .aead.op = {
2421 .do_one_request = aead_do_one_req,
2422 },
2423 .caam = {
2424 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2425 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2426 OP_ALG_AAI_HMAC_PRECOMP,
2427 .geniv = true,
2428 },
2429 },
2430 {
2431 .aead.base = {
2432 .base = {
2433 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2434 .cra_driver_name = "authenc-hmac-sha256-"
2435 "cbc-aes-caam",
2436 .cra_blocksize = AES_BLOCK_SIZE,
2437 },
2438 .setkey = aead_setkey,
2439 .setauthsize = aead_setauthsize,
2440 .encrypt = aead_encrypt,
2441 .decrypt = aead_decrypt,
2442 .ivsize = AES_BLOCK_SIZE,
2443 .maxauthsize = SHA256_DIGEST_SIZE,
2444 },
2445 .aead.op = {
2446 .do_one_request = aead_do_one_req,
2447 },
2448 .caam = {
2449 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2450 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2451 OP_ALG_AAI_HMAC_PRECOMP,
2452 },
2453 },
2454 {
2455 .aead.base = {
2456 .base = {
2457 .cra_name = "echainiv(authenc(hmac(sha256),"
2458 "cbc(aes)))",
2459 .cra_driver_name = "echainiv-authenc-"
2460 "hmac-sha256-cbc-aes-caam",
2461 .cra_blocksize = AES_BLOCK_SIZE,
2462 },
2463 .setkey = aead_setkey,
2464 .setauthsize = aead_setauthsize,
2465 .encrypt = aead_encrypt,
2466 .decrypt = aead_decrypt,
2467 .ivsize = AES_BLOCK_SIZE,
2468 .maxauthsize = SHA256_DIGEST_SIZE,
2469 },
2470 .aead.op = {
2471 .do_one_request = aead_do_one_req,
2472 },
2473 .caam = {
2474 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2475 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2476 OP_ALG_AAI_HMAC_PRECOMP,
2477 .geniv = true,
2478 },
2479 },
2480 {
2481 .aead.base = {
2482 .base = {
2483 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2484 .cra_driver_name = "authenc-hmac-sha384-"
2485 "cbc-aes-caam",
2486 .cra_blocksize = AES_BLOCK_SIZE,
2487 },
2488 .setkey = aead_setkey,
2489 .setauthsize = aead_setauthsize,
2490 .encrypt = aead_encrypt,
2491 .decrypt = aead_decrypt,
2492 .ivsize = AES_BLOCK_SIZE,
2493 .maxauthsize = SHA384_DIGEST_SIZE,
2494 },
2495 .aead.op = {
2496 .do_one_request = aead_do_one_req,
2497 },
2498 .caam = {
2499 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2500 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2501 OP_ALG_AAI_HMAC_PRECOMP,
2502 },
2503 },
2504 {
2505 .aead.base = {
2506 .base = {
2507 .cra_name = "echainiv(authenc(hmac(sha384),"
2508 "cbc(aes)))",
2509 .cra_driver_name = "echainiv-authenc-"
2510 "hmac-sha384-cbc-aes-caam",
2511 .cra_blocksize = AES_BLOCK_SIZE,
2512 },
2513 .setkey = aead_setkey,
2514 .setauthsize = aead_setauthsize,
2515 .encrypt = aead_encrypt,
2516 .decrypt = aead_decrypt,
2517 .ivsize = AES_BLOCK_SIZE,
2518 .maxauthsize = SHA384_DIGEST_SIZE,
2519 },
2520 .aead.op = {
2521 .do_one_request = aead_do_one_req,
2522 },
2523 .caam = {
2524 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2525 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2526 OP_ALG_AAI_HMAC_PRECOMP,
2527 .geniv = true,
2528 },
2529 },
2530 {
2531 .aead.base = {
2532 .base = {
2533 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2534 .cra_driver_name = "authenc-hmac-sha512-"
2535 "cbc-aes-caam",
2536 .cra_blocksize = AES_BLOCK_SIZE,
2537 },
2538 .setkey = aead_setkey,
2539 .setauthsize = aead_setauthsize,
2540 .encrypt = aead_encrypt,
2541 .decrypt = aead_decrypt,
2542 .ivsize = AES_BLOCK_SIZE,
2543 .maxauthsize = SHA512_DIGEST_SIZE,
2544 },
2545 .aead.op = {
2546 .do_one_request = aead_do_one_req,
2547 },
2548 .caam = {
2549 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2550 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2551 OP_ALG_AAI_HMAC_PRECOMP,
2552 },
2553 },
2554 {
2555 .aead.base = {
2556 .base = {
2557 .cra_name = "echainiv(authenc(hmac(sha512),"
2558 "cbc(aes)))",
2559 .cra_driver_name = "echainiv-authenc-"
2560 "hmac-sha512-cbc-aes-caam",
2561 .cra_blocksize = AES_BLOCK_SIZE,
2562 },
2563 .setkey = aead_setkey,
2564 .setauthsize = aead_setauthsize,
2565 .encrypt = aead_encrypt,
2566 .decrypt = aead_decrypt,
2567 .ivsize = AES_BLOCK_SIZE,
2568 .maxauthsize = SHA512_DIGEST_SIZE,
2569 },
2570 .aead.op = {
2571 .do_one_request = aead_do_one_req,
2572 },
2573 .caam = {
2574 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2575 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2576 OP_ALG_AAI_HMAC_PRECOMP,
2577 .geniv = true,
2578 },
2579 },
2580 {
2581 .aead.base = {
2582 .base = {
2583 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2584 .cra_driver_name = "authenc-hmac-md5-"
2585 "cbc-des3_ede-caam",
2586 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2587 },
2588 .setkey = des3_aead_setkey,
2589 .setauthsize = aead_setauthsize,
2590 .encrypt = aead_encrypt,
2591 .decrypt = aead_decrypt,
2592 .ivsize = DES3_EDE_BLOCK_SIZE,
2593 .maxauthsize = MD5_DIGEST_SIZE,
2594 },
2595 .aead.op = {
2596 .do_one_request = aead_do_one_req,
2597 },
2598 .caam = {
2599 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2600 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2601 OP_ALG_AAI_HMAC_PRECOMP,
2602 }
2603 },
2604 {
2605 .aead.base = {
2606 .base = {
2607 .cra_name = "echainiv(authenc(hmac(md5),"
2608 "cbc(des3_ede)))",
2609 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2610 "cbc-des3_ede-caam",
2611 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2612 },
2613 .setkey = des3_aead_setkey,
2614 .setauthsize = aead_setauthsize,
2615 .encrypt = aead_encrypt,
2616 .decrypt = aead_decrypt,
2617 .ivsize = DES3_EDE_BLOCK_SIZE,
2618 .maxauthsize = MD5_DIGEST_SIZE,
2619 },
2620 .aead.op = {
2621 .do_one_request = aead_do_one_req,
2622 },
2623 .caam = {
2624 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2625 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2626 OP_ALG_AAI_HMAC_PRECOMP,
2627 .geniv = true,
2628 }
2629 },
2630 {
2631 .aead.base = {
2632 .base = {
2633 .cra_name = "authenc(hmac(sha1),"
2634 "cbc(des3_ede))",
2635 .cra_driver_name = "authenc-hmac-sha1-"
2636 "cbc-des3_ede-caam",
2637 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2638 },
2639 .setkey = des3_aead_setkey,
2640 .setauthsize = aead_setauthsize,
2641 .encrypt = aead_encrypt,
2642 .decrypt = aead_decrypt,
2643 .ivsize = DES3_EDE_BLOCK_SIZE,
2644 .maxauthsize = SHA1_DIGEST_SIZE,
2645 },
2646 .aead.op = {
2647 .do_one_request = aead_do_one_req,
2648 },
2649 .caam = {
2650 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2651 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2652 OP_ALG_AAI_HMAC_PRECOMP,
2653 },
2654 },
2655 {
2656 .aead.base = {
2657 .base = {
2658 .cra_name = "echainiv(authenc(hmac(sha1),"
2659 "cbc(des3_ede)))",
2660 .cra_driver_name = "echainiv-authenc-"
2661 "hmac-sha1-"
2662 "cbc-des3_ede-caam",
2663 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2664 },
2665 .setkey = des3_aead_setkey,
2666 .setauthsize = aead_setauthsize,
2667 .encrypt = aead_encrypt,
2668 .decrypt = aead_decrypt,
2669 .ivsize = DES3_EDE_BLOCK_SIZE,
2670 .maxauthsize = SHA1_DIGEST_SIZE,
2671 },
2672 .aead.op = {
2673 .do_one_request = aead_do_one_req,
2674 },
2675 .caam = {
2676 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2677 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2678 OP_ALG_AAI_HMAC_PRECOMP,
2679 .geniv = true,
2680 },
2681 },
2682 {
2683 .aead.base = {
2684 .base = {
2685 .cra_name = "authenc(hmac(sha224),"
2686 "cbc(des3_ede))",
2687 .cra_driver_name = "authenc-hmac-sha224-"
2688 "cbc-des3_ede-caam",
2689 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2690 },
2691 .setkey = des3_aead_setkey,
2692 .setauthsize = aead_setauthsize,
2693 .encrypt = aead_encrypt,
2694 .decrypt = aead_decrypt,
2695 .ivsize = DES3_EDE_BLOCK_SIZE,
2696 .maxauthsize = SHA224_DIGEST_SIZE,
2697 },
2698 .aead.op = {
2699 .do_one_request = aead_do_one_req,
2700 },
2701 .caam = {
2702 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2703 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2704 OP_ALG_AAI_HMAC_PRECOMP,
2705 },
2706 },
2707 {
2708 .aead.base = {
2709 .base = {
2710 .cra_name = "echainiv(authenc(hmac(sha224),"
2711 "cbc(des3_ede)))",
2712 .cra_driver_name = "echainiv-authenc-"
2713 "hmac-sha224-"
2714 "cbc-des3_ede-caam",
2715 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2716 },
2717 .setkey = des3_aead_setkey,
2718 .setauthsize = aead_setauthsize,
2719 .encrypt = aead_encrypt,
2720 .decrypt = aead_decrypt,
2721 .ivsize = DES3_EDE_BLOCK_SIZE,
2722 .maxauthsize = SHA224_DIGEST_SIZE,
2723 },
2724 .aead.op = {
2725 .do_one_request = aead_do_one_req,
2726 },
2727 .caam = {
2728 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2729 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2730 OP_ALG_AAI_HMAC_PRECOMP,
2731 .geniv = true,
2732 },
2733 },
2734 {
2735 .aead.base = {
2736 .base = {
2737 .cra_name = "authenc(hmac(sha256),"
2738 "cbc(des3_ede))",
2739 .cra_driver_name = "authenc-hmac-sha256-"
2740 "cbc-des3_ede-caam",
2741 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2742 },
2743 .setkey = des3_aead_setkey,
2744 .setauthsize = aead_setauthsize,
2745 .encrypt = aead_encrypt,
2746 .decrypt = aead_decrypt,
2747 .ivsize = DES3_EDE_BLOCK_SIZE,
2748 .maxauthsize = SHA256_DIGEST_SIZE,
2749 },
2750 .aead.op = {
2751 .do_one_request = aead_do_one_req,
2752 },
2753 .caam = {
2754 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2755 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2756 OP_ALG_AAI_HMAC_PRECOMP,
2757 },
2758 },
2759 {
2760 .aead.base = {
2761 .base = {
2762 .cra_name = "echainiv(authenc(hmac(sha256),"
2763 "cbc(des3_ede)))",
2764 .cra_driver_name = "echainiv-authenc-"
2765 "hmac-sha256-"
2766 "cbc-des3_ede-caam",
2767 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2768 },
2769 .setkey = des3_aead_setkey,
2770 .setauthsize = aead_setauthsize,
2771 .encrypt = aead_encrypt,
2772 .decrypt = aead_decrypt,
2773 .ivsize = DES3_EDE_BLOCK_SIZE,
2774 .maxauthsize = SHA256_DIGEST_SIZE,
2775 },
2776 .aead.op = {
2777 .do_one_request = aead_do_one_req,
2778 },
2779 .caam = {
2780 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2781 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2782 OP_ALG_AAI_HMAC_PRECOMP,
2783 .geniv = true,
2784 },
2785 },
2786 {
2787 .aead.base = {
2788 .base = {
2789 .cra_name = "authenc(hmac(sha384),"
2790 "cbc(des3_ede))",
2791 .cra_driver_name = "authenc-hmac-sha384-"
2792 "cbc-des3_ede-caam",
2793 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2794 },
2795 .setkey = des3_aead_setkey,
2796 .setauthsize = aead_setauthsize,
2797 .encrypt = aead_encrypt,
2798 .decrypt = aead_decrypt,
2799 .ivsize = DES3_EDE_BLOCK_SIZE,
2800 .maxauthsize = SHA384_DIGEST_SIZE,
2801 },
2802 .aead.op = {
2803 .do_one_request = aead_do_one_req,
2804 },
2805 .caam = {
2806 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2807 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2808 OP_ALG_AAI_HMAC_PRECOMP,
2809 },
2810 },
2811 {
2812 .aead.base = {
2813 .base = {
2814 .cra_name = "echainiv(authenc(hmac(sha384),"
2815 "cbc(des3_ede)))",
2816 .cra_driver_name = "echainiv-authenc-"
2817 "hmac-sha384-"
2818 "cbc-des3_ede-caam",
2819 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820 },
2821 .setkey = des3_aead_setkey,
2822 .setauthsize = aead_setauthsize,
2823 .encrypt = aead_encrypt,
2824 .decrypt = aead_decrypt,
2825 .ivsize = DES3_EDE_BLOCK_SIZE,
2826 .maxauthsize = SHA384_DIGEST_SIZE,
2827 },
2828 .aead.op = {
2829 .do_one_request = aead_do_one_req,
2830 },
2831 .caam = {
2832 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2833 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2834 OP_ALG_AAI_HMAC_PRECOMP,
2835 .geniv = true,
2836 },
2837 },
2838 {
2839 .aead.base = {
2840 .base = {
2841 .cra_name = "authenc(hmac(sha512),"
2842 "cbc(des3_ede))",
2843 .cra_driver_name = "authenc-hmac-sha512-"
2844 "cbc-des3_ede-caam",
2845 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2846 },
2847 .setkey = des3_aead_setkey,
2848 .setauthsize = aead_setauthsize,
2849 .encrypt = aead_encrypt,
2850 .decrypt = aead_decrypt,
2851 .ivsize = DES3_EDE_BLOCK_SIZE,
2852 .maxauthsize = SHA512_DIGEST_SIZE,
2853 },
2854 .aead.op = {
2855 .do_one_request = aead_do_one_req,
2856 },
2857 .caam = {
2858 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2859 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2860 OP_ALG_AAI_HMAC_PRECOMP,
2861 },
2862 },
2863 {
2864 .aead.base = {
2865 .base = {
2866 .cra_name = "echainiv(authenc(hmac(sha512),"
2867 "cbc(des3_ede)))",
2868 .cra_driver_name = "echainiv-authenc-"
2869 "hmac-sha512-"
2870 "cbc-des3_ede-caam",
2871 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2872 },
2873 .setkey = des3_aead_setkey,
2874 .setauthsize = aead_setauthsize,
2875 .encrypt = aead_encrypt,
2876 .decrypt = aead_decrypt,
2877 .ivsize = DES3_EDE_BLOCK_SIZE,
2878 .maxauthsize = SHA512_DIGEST_SIZE,
2879 },
2880 .aead.op = {
2881 .do_one_request = aead_do_one_req,
2882 },
2883 .caam = {
2884 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2885 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2886 OP_ALG_AAI_HMAC_PRECOMP,
2887 .geniv = true,
2888 },
2889 },
2890 {
2891 .aead.base = {
2892 .base = {
2893 .cra_name = "authenc(hmac(md5),cbc(des))",
2894 .cra_driver_name = "authenc-hmac-md5-"
2895 "cbc-des-caam",
2896 .cra_blocksize = DES_BLOCK_SIZE,
2897 },
2898 .setkey = aead_setkey,
2899 .setauthsize = aead_setauthsize,
2900 .encrypt = aead_encrypt,
2901 .decrypt = aead_decrypt,
2902 .ivsize = DES_BLOCK_SIZE,
2903 .maxauthsize = MD5_DIGEST_SIZE,
2904 },
2905 .aead.op = {
2906 .do_one_request = aead_do_one_req,
2907 },
2908 .caam = {
2909 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2910 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2911 OP_ALG_AAI_HMAC_PRECOMP,
2912 },
2913 },
2914 {
2915 .aead.base = {
2916 .base = {
2917 .cra_name = "echainiv(authenc(hmac(md5),"
2918 "cbc(des)))",
2919 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2920 "cbc-des-caam",
2921 .cra_blocksize = DES_BLOCK_SIZE,
2922 },
2923 .setkey = aead_setkey,
2924 .setauthsize = aead_setauthsize,
2925 .encrypt = aead_encrypt,
2926 .decrypt = aead_decrypt,
2927 .ivsize = DES_BLOCK_SIZE,
2928 .maxauthsize = MD5_DIGEST_SIZE,
2929 },
2930 .aead.op = {
2931 .do_one_request = aead_do_one_req,
2932 },
2933 .caam = {
2934 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2935 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2936 OP_ALG_AAI_HMAC_PRECOMP,
2937 .geniv = true,
2938 },
2939 },
2940 {
2941 .aead.base = {
2942 .base = {
2943 .cra_name = "authenc(hmac(sha1),cbc(des))",
2944 .cra_driver_name = "authenc-hmac-sha1-"
2945 "cbc-des-caam",
2946 .cra_blocksize = DES_BLOCK_SIZE,
2947 },
2948 .setkey = aead_setkey,
2949 .setauthsize = aead_setauthsize,
2950 .encrypt = aead_encrypt,
2951 .decrypt = aead_decrypt,
2952 .ivsize = DES_BLOCK_SIZE,
2953 .maxauthsize = SHA1_DIGEST_SIZE,
2954 },
2955 .aead.op = {
2956 .do_one_request = aead_do_one_req,
2957 },
2958 .caam = {
2959 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2960 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2961 OP_ALG_AAI_HMAC_PRECOMP,
2962 },
2963 },
2964 {
2965 .aead.base = {
2966 .base = {
2967 .cra_name = "echainiv(authenc(hmac(sha1),"
2968 "cbc(des)))",
2969 .cra_driver_name = "echainiv-authenc-"
2970 "hmac-sha1-cbc-des-caam",
2971 .cra_blocksize = DES_BLOCK_SIZE,
2972 },
2973 .setkey = aead_setkey,
2974 .setauthsize = aead_setauthsize,
2975 .encrypt = aead_encrypt,
2976 .decrypt = aead_decrypt,
2977 .ivsize = DES_BLOCK_SIZE,
2978 .maxauthsize = SHA1_DIGEST_SIZE,
2979 },
2980 .aead.op = {
2981 .do_one_request = aead_do_one_req,
2982 },
2983 .caam = {
2984 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2985 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2986 OP_ALG_AAI_HMAC_PRECOMP,
2987 .geniv = true,
2988 },
2989 },
2990 {
2991 .aead.base = {
2992 .base = {
2993 .cra_name = "authenc(hmac(sha224),cbc(des))",
2994 .cra_driver_name = "authenc-hmac-sha224-"
2995 "cbc-des-caam",
2996 .cra_blocksize = DES_BLOCK_SIZE,
2997 },
2998 .setkey = aead_setkey,
2999 .setauthsize = aead_setauthsize,
3000 .encrypt = aead_encrypt,
3001 .decrypt = aead_decrypt,
3002 .ivsize = DES_BLOCK_SIZE,
3003 .maxauthsize = SHA224_DIGEST_SIZE,
3004 },
3005 .aead.op = {
3006 .do_one_request = aead_do_one_req,
3007 },
3008 .caam = {
3009 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3010 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3011 OP_ALG_AAI_HMAC_PRECOMP,
3012 },
3013 },
3014 {
3015 .aead.base = {
3016 .base = {
3017 .cra_name = "echainiv(authenc(hmac(sha224),"
3018 "cbc(des)))",
3019 .cra_driver_name = "echainiv-authenc-"
3020 "hmac-sha224-cbc-des-caam",
3021 .cra_blocksize = DES_BLOCK_SIZE,
3022 },
3023 .setkey = aead_setkey,
3024 .setauthsize = aead_setauthsize,
3025 .encrypt = aead_encrypt,
3026 .decrypt = aead_decrypt,
3027 .ivsize = DES_BLOCK_SIZE,
3028 .maxauthsize = SHA224_DIGEST_SIZE,
3029 },
3030 .aead.op = {
3031 .do_one_request = aead_do_one_req,
3032 },
3033 .caam = {
3034 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3035 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3036 OP_ALG_AAI_HMAC_PRECOMP,
3037 .geniv = true,
3038 },
3039 },
3040 {
3041 .aead.base = {
3042 .base = {
3043 .cra_name = "authenc(hmac(sha256),cbc(des))",
3044 .cra_driver_name = "authenc-hmac-sha256-"
3045 "cbc-des-caam",
3046 .cra_blocksize = DES_BLOCK_SIZE,
3047 },
3048 .setkey = aead_setkey,
3049 .setauthsize = aead_setauthsize,
3050 .encrypt = aead_encrypt,
3051 .decrypt = aead_decrypt,
3052 .ivsize = DES_BLOCK_SIZE,
3053 .maxauthsize = SHA256_DIGEST_SIZE,
3054 },
3055 .aead.op = {
3056 .do_one_request = aead_do_one_req,
3057 },
3058 .caam = {
3059 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3060 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3061 OP_ALG_AAI_HMAC_PRECOMP,
3062 },
3063 },
3064 {
3065 .aead.base = {
3066 .base = {
3067 .cra_name = "echainiv(authenc(hmac(sha256),"
3068 "cbc(des)))",
3069 .cra_driver_name = "echainiv-authenc-"
3070 "hmac-sha256-cbc-des-caam",
3071 .cra_blocksize = DES_BLOCK_SIZE,
3072 },
3073 .setkey = aead_setkey,
3074 .setauthsize = aead_setauthsize,
3075 .encrypt = aead_encrypt,
3076 .decrypt = aead_decrypt,
3077 .ivsize = DES_BLOCK_SIZE,
3078 .maxauthsize = SHA256_DIGEST_SIZE,
3079 },
3080 .aead.op = {
3081 .do_one_request = aead_do_one_req,
3082 },
3083 .caam = {
3084 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3085 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3086 OP_ALG_AAI_HMAC_PRECOMP,
3087 .geniv = true,
3088 },
3089 },
3090 {
3091 .aead.base = {
3092 .base = {
3093 .cra_name = "authenc(hmac(sha384),cbc(des))",
3094 .cra_driver_name = "authenc-hmac-sha384-"
3095 "cbc-des-caam",
3096 .cra_blocksize = DES_BLOCK_SIZE,
3097 },
3098 .setkey = aead_setkey,
3099 .setauthsize = aead_setauthsize,
3100 .encrypt = aead_encrypt,
3101 .decrypt = aead_decrypt,
3102 .ivsize = DES_BLOCK_SIZE,
3103 .maxauthsize = SHA384_DIGEST_SIZE,
3104 },
3105 .aead.op = {
3106 .do_one_request = aead_do_one_req,
3107 },
3108 .caam = {
3109 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3110 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3111 OP_ALG_AAI_HMAC_PRECOMP,
3112 },
3113 },
3114 {
3115 .aead.base = {
3116 .base = {
3117 .cra_name = "echainiv(authenc(hmac(sha384),"
3118 "cbc(des)))",
3119 .cra_driver_name = "echainiv-authenc-"
3120 "hmac-sha384-cbc-des-caam",
3121 .cra_blocksize = DES_BLOCK_SIZE,
3122 },
3123 .setkey = aead_setkey,
3124 .setauthsize = aead_setauthsize,
3125 .encrypt = aead_encrypt,
3126 .decrypt = aead_decrypt,
3127 .ivsize = DES_BLOCK_SIZE,
3128 .maxauthsize = SHA384_DIGEST_SIZE,
3129 },
3130 .aead.op = {
3131 .do_one_request = aead_do_one_req,
3132 },
3133 .caam = {
3134 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3135 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3136 OP_ALG_AAI_HMAC_PRECOMP,
3137 .geniv = true,
3138 },
3139 },
3140 {
3141 .aead.base = {
3142 .base = {
3143 .cra_name = "authenc(hmac(sha512),cbc(des))",
3144 .cra_driver_name = "authenc-hmac-sha512-"
3145 "cbc-des-caam",
3146 .cra_blocksize = DES_BLOCK_SIZE,
3147 },
3148 .setkey = aead_setkey,
3149 .setauthsize = aead_setauthsize,
3150 .encrypt = aead_encrypt,
3151 .decrypt = aead_decrypt,
3152 .ivsize = DES_BLOCK_SIZE,
3153 .maxauthsize = SHA512_DIGEST_SIZE,
3154 },
3155 .aead.op = {
3156 .do_one_request = aead_do_one_req,
3157 },
3158 .caam = {
3159 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3160 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3161 OP_ALG_AAI_HMAC_PRECOMP,
3162 },
3163 },
3164 {
3165 .aead.base = {
3166 .base = {
3167 .cra_name = "echainiv(authenc(hmac(sha512),"
3168 "cbc(des)))",
3169 .cra_driver_name = "echainiv-authenc-"
3170 "hmac-sha512-cbc-des-caam",
3171 .cra_blocksize = DES_BLOCK_SIZE,
3172 },
3173 .setkey = aead_setkey,
3174 .setauthsize = aead_setauthsize,
3175 .encrypt = aead_encrypt,
3176 .decrypt = aead_decrypt,
3177 .ivsize = DES_BLOCK_SIZE,
3178 .maxauthsize = SHA512_DIGEST_SIZE,
3179 },
3180 .aead.op = {
3181 .do_one_request = aead_do_one_req,
3182 },
3183 .caam = {
3184 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3185 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3186 OP_ALG_AAI_HMAC_PRECOMP,
3187 .geniv = true,
3188 },
3189 },
3190 {
3191 .aead.base = {
3192 .base = {
3193 .cra_name = "authenc(hmac(md5),"
3194 "rfc3686(ctr(aes)))",
3195 .cra_driver_name = "authenc-hmac-md5-"
3196 "rfc3686-ctr-aes-caam",
3197 .cra_blocksize = 1,
3198 },
3199 .setkey = aead_setkey,
3200 .setauthsize = aead_setauthsize,
3201 .encrypt = aead_encrypt,
3202 .decrypt = aead_decrypt,
3203 .ivsize = CTR_RFC3686_IV_SIZE,
3204 .maxauthsize = MD5_DIGEST_SIZE,
3205 },
3206 .aead.op = {
3207 .do_one_request = aead_do_one_req,
3208 },
3209 .caam = {
3210 .class1_alg_type = OP_ALG_ALGSEL_AES |
3211 OP_ALG_AAI_CTR_MOD128,
3212 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3213 OP_ALG_AAI_HMAC_PRECOMP,
3214 .rfc3686 = true,
3215 },
3216 },
3217 {
3218 .aead.base = {
3219 .base = {
3220 .cra_name = "seqiv(authenc("
3221 "hmac(md5),rfc3686(ctr(aes))))",
3222 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3223 "rfc3686-ctr-aes-caam",
3224 .cra_blocksize = 1,
3225 },
3226 .setkey = aead_setkey,
3227 .setauthsize = aead_setauthsize,
3228 .encrypt = aead_encrypt,
3229 .decrypt = aead_decrypt,
3230 .ivsize = CTR_RFC3686_IV_SIZE,
3231 .maxauthsize = MD5_DIGEST_SIZE,
3232 },
3233 .aead.op = {
3234 .do_one_request = aead_do_one_req,
3235 },
3236 .caam = {
3237 .class1_alg_type = OP_ALG_ALGSEL_AES |
3238 OP_ALG_AAI_CTR_MOD128,
3239 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3240 OP_ALG_AAI_HMAC_PRECOMP,
3241 .rfc3686 = true,
3242 .geniv = true,
3243 },
3244 },
3245 {
3246 .aead.base = {
3247 .base = {
3248 .cra_name = "authenc(hmac(sha1),"
3249 "rfc3686(ctr(aes)))",
3250 .cra_driver_name = "authenc-hmac-sha1-"
3251 "rfc3686-ctr-aes-caam",
3252 .cra_blocksize = 1,
3253 },
3254 .setkey = aead_setkey,
3255 .setauthsize = aead_setauthsize,
3256 .encrypt = aead_encrypt,
3257 .decrypt = aead_decrypt,
3258 .ivsize = CTR_RFC3686_IV_SIZE,
3259 .maxauthsize = SHA1_DIGEST_SIZE,
3260 },
3261 .aead.op = {
3262 .do_one_request = aead_do_one_req,
3263 },
3264 .caam = {
3265 .class1_alg_type = OP_ALG_ALGSEL_AES |
3266 OP_ALG_AAI_CTR_MOD128,
3267 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3268 OP_ALG_AAI_HMAC_PRECOMP,
3269 .rfc3686 = true,
3270 },
3271 },
3272 {
3273 .aead.base = {
3274 .base = {
3275 .cra_name = "seqiv(authenc("
3276 "hmac(sha1),rfc3686(ctr(aes))))",
3277 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3278 "rfc3686-ctr-aes-caam",
3279 .cra_blocksize = 1,
3280 },
3281 .setkey = aead_setkey,
3282 .setauthsize = aead_setauthsize,
3283 .encrypt = aead_encrypt,
3284 .decrypt = aead_decrypt,
3285 .ivsize = CTR_RFC3686_IV_SIZE,
3286 .maxauthsize = SHA1_DIGEST_SIZE,
3287 },
3288 .aead.op = {
3289 .do_one_request = aead_do_one_req,
3290 },
3291 .caam = {
3292 .class1_alg_type = OP_ALG_ALGSEL_AES |
3293 OP_ALG_AAI_CTR_MOD128,
3294 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3295 OP_ALG_AAI_HMAC_PRECOMP,
3296 .rfc3686 = true,
3297 .geniv = true,
3298 },
3299 },
3300 {
3301 .aead.base = {
3302 .base = {
3303 .cra_name = "authenc(hmac(sha224),"
3304 "rfc3686(ctr(aes)))",
3305 .cra_driver_name = "authenc-hmac-sha224-"
3306 "rfc3686-ctr-aes-caam",
3307 .cra_blocksize = 1,
3308 },
3309 .setkey = aead_setkey,
3310 .setauthsize = aead_setauthsize,
3311 .encrypt = aead_encrypt,
3312 .decrypt = aead_decrypt,
3313 .ivsize = CTR_RFC3686_IV_SIZE,
3314 .maxauthsize = SHA224_DIGEST_SIZE,
3315 },
3316 .aead.op = {
3317 .do_one_request = aead_do_one_req,
3318 },
3319 .caam = {
3320 .class1_alg_type = OP_ALG_ALGSEL_AES |
3321 OP_ALG_AAI_CTR_MOD128,
3322 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3323 OP_ALG_AAI_HMAC_PRECOMP,
3324 .rfc3686 = true,
3325 },
3326 },
3327 {
3328 .aead.base = {
3329 .base = {
3330 .cra_name = "seqiv(authenc("
3331 "hmac(sha224),rfc3686(ctr(aes))))",
3332 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3333 "rfc3686-ctr-aes-caam",
3334 .cra_blocksize = 1,
3335 },
3336 .setkey = aead_setkey,
3337 .setauthsize = aead_setauthsize,
3338 .encrypt = aead_encrypt,
3339 .decrypt = aead_decrypt,
3340 .ivsize = CTR_RFC3686_IV_SIZE,
3341 .maxauthsize = SHA224_DIGEST_SIZE,
3342 },
3343 .aead.op = {
3344 .do_one_request = aead_do_one_req,
3345 },
3346 .caam = {
3347 .class1_alg_type = OP_ALG_ALGSEL_AES |
3348 OP_ALG_AAI_CTR_MOD128,
3349 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3350 OP_ALG_AAI_HMAC_PRECOMP,
3351 .rfc3686 = true,
3352 .geniv = true,
3353 },
3354 },
3355 {
3356 .aead.base = {
3357 .base = {
3358 .cra_name = "authenc(hmac(sha256),"
3359 "rfc3686(ctr(aes)))",
3360 .cra_driver_name = "authenc-hmac-sha256-"
3361 "rfc3686-ctr-aes-caam",
3362 .cra_blocksize = 1,
3363 },
3364 .setkey = aead_setkey,
3365 .setauthsize = aead_setauthsize,
3366 .encrypt = aead_encrypt,
3367 .decrypt = aead_decrypt,
3368 .ivsize = CTR_RFC3686_IV_SIZE,
3369 .maxauthsize = SHA256_DIGEST_SIZE,
3370 },
3371 .aead.op = {
3372 .do_one_request = aead_do_one_req,
3373 },
3374 .caam = {
3375 .class1_alg_type = OP_ALG_ALGSEL_AES |
3376 OP_ALG_AAI_CTR_MOD128,
3377 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3378 OP_ALG_AAI_HMAC_PRECOMP,
3379 .rfc3686 = true,
3380 },
3381 },
3382 {
3383 .aead.base = {
3384 .base = {
3385 .cra_name = "seqiv(authenc(hmac(sha256),"
3386 "rfc3686(ctr(aes))))",
3387 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3388 "rfc3686-ctr-aes-caam",
3389 .cra_blocksize = 1,
3390 },
3391 .setkey = aead_setkey,
3392 .setauthsize = aead_setauthsize,
3393 .encrypt = aead_encrypt,
3394 .decrypt = aead_decrypt,
3395 .ivsize = CTR_RFC3686_IV_SIZE,
3396 .maxauthsize = SHA256_DIGEST_SIZE,
3397 },
3398 .aead.op = {
3399 .do_one_request = aead_do_one_req,
3400 },
3401 .caam = {
3402 .class1_alg_type = OP_ALG_ALGSEL_AES |
3403 OP_ALG_AAI_CTR_MOD128,
3404 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3405 OP_ALG_AAI_HMAC_PRECOMP,
3406 .rfc3686 = true,
3407 .geniv = true,
3408 },
3409 },
3410 {
3411 .aead.base = {
3412 .base = {
3413 .cra_name = "authenc(hmac(sha384),"
3414 "rfc3686(ctr(aes)))",
3415 .cra_driver_name = "authenc-hmac-sha384-"
3416 "rfc3686-ctr-aes-caam",
3417 .cra_blocksize = 1,
3418 },
3419 .setkey = aead_setkey,
3420 .setauthsize = aead_setauthsize,
3421 .encrypt = aead_encrypt,
3422 .decrypt = aead_decrypt,
3423 .ivsize = CTR_RFC3686_IV_SIZE,
3424 .maxauthsize = SHA384_DIGEST_SIZE,
3425 },
3426 .aead.op = {
3427 .do_one_request = aead_do_one_req,
3428 },
3429 .caam = {
3430 .class1_alg_type = OP_ALG_ALGSEL_AES |
3431 OP_ALG_AAI_CTR_MOD128,
3432 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3433 OP_ALG_AAI_HMAC_PRECOMP,
3434 .rfc3686 = true,
3435 },
3436 },
3437 {
3438 .aead.base = {
3439 .base = {
3440 .cra_name = "seqiv(authenc(hmac(sha384),"
3441 "rfc3686(ctr(aes))))",
3442 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3443 "rfc3686-ctr-aes-caam",
3444 .cra_blocksize = 1,
3445 },
3446 .setkey = aead_setkey,
3447 .setauthsize = aead_setauthsize,
3448 .encrypt = aead_encrypt,
3449 .decrypt = aead_decrypt,
3450 .ivsize = CTR_RFC3686_IV_SIZE,
3451 .maxauthsize = SHA384_DIGEST_SIZE,
3452 },
3453 .aead.op = {
3454 .do_one_request = aead_do_one_req,
3455 },
3456 .caam = {
3457 .class1_alg_type = OP_ALG_ALGSEL_AES |
3458 OP_ALG_AAI_CTR_MOD128,
3459 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3460 OP_ALG_AAI_HMAC_PRECOMP,
3461 .rfc3686 = true,
3462 .geniv = true,
3463 },
3464 },
3465 {
3466 .aead.base = {
3467 .base = {
3468 .cra_name = "authenc(hmac(sha512),"
3469 "rfc3686(ctr(aes)))",
3470 .cra_driver_name = "authenc-hmac-sha512-"
3471 "rfc3686-ctr-aes-caam",
3472 .cra_blocksize = 1,
3473 },
3474 .setkey = aead_setkey,
3475 .setauthsize = aead_setauthsize,
3476 .encrypt = aead_encrypt,
3477 .decrypt = aead_decrypt,
3478 .ivsize = CTR_RFC3686_IV_SIZE,
3479 .maxauthsize = SHA512_DIGEST_SIZE,
3480 },
3481 .aead.op = {
3482 .do_one_request = aead_do_one_req,
3483 },
3484 .caam = {
3485 .class1_alg_type = OP_ALG_ALGSEL_AES |
3486 OP_ALG_AAI_CTR_MOD128,
3487 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3488 OP_ALG_AAI_HMAC_PRECOMP,
3489 .rfc3686 = true,
3490 },
3491 },
3492 {
3493 .aead.base = {
3494 .base = {
3495 .cra_name = "seqiv(authenc(hmac(sha512),"
3496 "rfc3686(ctr(aes))))",
3497 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3498 "rfc3686-ctr-aes-caam",
3499 .cra_blocksize = 1,
3500 },
3501 .setkey = aead_setkey,
3502 .setauthsize = aead_setauthsize,
3503 .encrypt = aead_encrypt,
3504 .decrypt = aead_decrypt,
3505 .ivsize = CTR_RFC3686_IV_SIZE,
3506 .maxauthsize = SHA512_DIGEST_SIZE,
3507 },
3508 .aead.op = {
3509 .do_one_request = aead_do_one_req,
3510 },
3511 .caam = {
3512 .class1_alg_type = OP_ALG_ALGSEL_AES |
3513 OP_ALG_AAI_CTR_MOD128,
3514 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3515 OP_ALG_AAI_HMAC_PRECOMP,
3516 .rfc3686 = true,
3517 .geniv = true,
3518 },
3519 },
3520 {
3521 .aead.base = {
3522 .base = {
3523 .cra_name = "rfc7539(chacha20,poly1305)",
3524 .cra_driver_name = "rfc7539-chacha20-poly1305-"
3525 "caam",
3526 .cra_blocksize = 1,
3527 },
3528 .setkey = chachapoly_setkey,
3529 .setauthsize = chachapoly_setauthsize,
3530 .encrypt = chachapoly_encrypt,
3531 .decrypt = chachapoly_decrypt,
3532 .ivsize = CHACHAPOLY_IV_SIZE,
3533 .maxauthsize = POLY1305_DIGEST_SIZE,
3534 },
3535 .aead.op = {
3536 .do_one_request = aead_do_one_req,
3537 },
3538 .caam = {
3539 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3540 OP_ALG_AAI_AEAD,
3541 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3542 OP_ALG_AAI_AEAD,
3543 .nodkp = true,
3544 },
3545 },
3546 {
3547 .aead.base = {
3548 .base = {
3549 .cra_name = "rfc7539esp(chacha20,poly1305)",
3550 .cra_driver_name = "rfc7539esp-chacha20-"
3551 "poly1305-caam",
3552 .cra_blocksize = 1,
3553 },
3554 .setkey = chachapoly_setkey,
3555 .setauthsize = chachapoly_setauthsize,
3556 .encrypt = chachapoly_encrypt,
3557 .decrypt = chachapoly_decrypt,
3558 .ivsize = 8,
3559 .maxauthsize = POLY1305_DIGEST_SIZE,
3560 },
3561 .aead.op = {
3562 .do_one_request = aead_do_one_req,
3563 },
3564 .caam = {
3565 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3566 OP_ALG_AAI_AEAD,
3567 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3568 OP_ALG_AAI_AEAD,
3569 .nodkp = true,
3570 },
3571 },
3572 };
3573
caam_init_common(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)3574 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3575 bool uses_dkp)
3576 {
3577 dma_addr_t dma_addr;
3578 struct caam_drv_private *priv;
3579 const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
3580 sh_desc_enc);
3581
3582 ctx->jrdev = caam_jr_alloc();
3583 if (IS_ERR(ctx->jrdev)) {
3584 pr_err("Job Ring Device allocation for transform failed\n");
3585 return PTR_ERR(ctx->jrdev);
3586 }
3587
3588 priv = dev_get_drvdata(ctx->jrdev->parent);
3589 if (priv->era >= 6 && uses_dkp)
3590 ctx->dir = DMA_BIDIRECTIONAL;
3591 else
3592 ctx->dir = DMA_TO_DEVICE;
3593
3594 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3595 offsetof(struct caam_ctx,
3596 sh_desc_enc_dma) -
3597 sh_desc_enc_offset,
3598 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3599 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3600 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3601 caam_jr_free(ctx->jrdev);
3602 return -ENOMEM;
3603 }
3604
3605 ctx->sh_desc_enc_dma = dma_addr;
3606 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3607 sh_desc_dec) -
3608 sh_desc_enc_offset;
3609 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
3610 sh_desc_enc_offset;
3611
3612 /* copy descriptor header template value */
3613 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3614 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3615
3616 return 0;
3617 }
3618
caam_cra_init(struct crypto_skcipher * tfm)3619 static int caam_cra_init(struct crypto_skcipher *tfm)
3620 {
3621 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3622 struct caam_skcipher_alg *caam_alg =
3623 container_of(alg, typeof(*caam_alg), skcipher.base);
3624 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
3625 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3626 int ret = 0;
3627
3628 if (alg_aai == OP_ALG_AAI_XTS) {
3629 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
3630 struct crypto_skcipher *fallback;
3631
3632 fallback = crypto_alloc_skcipher(tfm_name, 0,
3633 CRYPTO_ALG_NEED_FALLBACK);
3634 if (IS_ERR(fallback)) {
3635 pr_err("Failed to allocate %s fallback: %ld\n",
3636 tfm_name, PTR_ERR(fallback));
3637 return PTR_ERR(fallback);
3638 }
3639
3640 ctx->fallback = fallback;
3641 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
3642 crypto_skcipher_reqsize(fallback));
3643 } else {
3644 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
3645 }
3646
3647 ret = caam_init_common(ctx, &caam_alg->caam, false);
3648 if (ret && ctx->fallback)
3649 crypto_free_skcipher(ctx->fallback);
3650
3651 return ret;
3652 }
3653
caam_aead_init(struct crypto_aead * tfm)3654 static int caam_aead_init(struct crypto_aead *tfm)
3655 {
3656 struct aead_alg *alg = crypto_aead_alg(tfm);
3657 struct caam_aead_alg *caam_alg =
3658 container_of(alg, struct caam_aead_alg, aead.base);
3659 struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
3660
3661 crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
3662
3663 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
3664 }
3665
caam_exit_common(struct caam_ctx * ctx)3666 static void caam_exit_common(struct caam_ctx *ctx)
3667 {
3668 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3669 offsetof(struct caam_ctx, sh_desc_enc_dma) -
3670 offsetof(struct caam_ctx, sh_desc_enc),
3671 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3672 caam_jr_free(ctx->jrdev);
3673 }
3674
caam_cra_exit(struct crypto_skcipher * tfm)3675 static void caam_cra_exit(struct crypto_skcipher *tfm)
3676 {
3677 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
3678
3679 if (ctx->fallback)
3680 crypto_free_skcipher(ctx->fallback);
3681 caam_exit_common(ctx);
3682 }
3683
caam_aead_exit(struct crypto_aead * tfm)3684 static void caam_aead_exit(struct crypto_aead *tfm)
3685 {
3686 caam_exit_common(crypto_aead_ctx_dma(tfm));
3687 }
3688
caam_algapi_exit(void)3689 void caam_algapi_exit(void)
3690 {
3691 int i;
3692
3693 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3694 struct caam_aead_alg *t_alg = driver_aeads + i;
3695
3696 if (t_alg->registered)
3697 crypto_engine_unregister_aead(&t_alg->aead);
3698 }
3699
3700 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3701 struct caam_skcipher_alg *t_alg = driver_algs + i;
3702
3703 if (t_alg->registered)
3704 crypto_engine_unregister_skcipher(&t_alg->skcipher);
3705 }
3706 }
3707
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3708 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3709 {
3710 struct skcipher_alg *alg = &t_alg->skcipher.base;
3711
3712 alg->base.cra_module = THIS_MODULE;
3713 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3714 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3715 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3716 CRYPTO_ALG_KERN_DRIVER_ONLY);
3717
3718 alg->init = caam_cra_init;
3719 alg->exit = caam_cra_exit;
3720 }
3721
caam_aead_alg_init(struct caam_aead_alg * t_alg)3722 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3723 {
3724 struct aead_alg *alg = &t_alg->aead.base;
3725
3726 alg->base.cra_module = THIS_MODULE;
3727 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3728 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3729 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3730 CRYPTO_ALG_KERN_DRIVER_ONLY;
3731
3732 alg->init = caam_aead_init;
3733 alg->exit = caam_aead_exit;
3734 }
3735
caam_algapi_init(struct device * ctrldev)3736 int caam_algapi_init(struct device *ctrldev)
3737 {
3738 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
3739 int i = 0, err = 0;
3740 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3741 unsigned int md_limit = SHA512_DIGEST_SIZE;
3742 bool registered = false, gcm_support;
3743
3744 /*
3745 * Register crypto algorithms the device supports.
3746 * First, detect presence and attributes of DES, AES, and MD blocks.
3747 */
3748 if (priv->era < 10) {
3749 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
3750 u32 cha_vid, cha_inst, aes_rn;
3751
3752 cha_vid = rd_reg32(&perfmon->cha_id_ls);
3753 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3754 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3755
3756 cha_inst = rd_reg32(&perfmon->cha_num_ls);
3757 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3758 CHA_ID_LS_DES_SHIFT;
3759 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3760 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3761 ccha_inst = 0;
3762 ptha_inst = 0;
3763
3764 aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
3765 gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3766 } else {
3767 struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
3768 u32 aesa, mdha;
3769
3770 aesa = rd_reg32(&vreg->aesa);
3771 mdha = rd_reg32(&vreg->mdha);
3772
3773 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3774 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3775
3776 des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
3777 aes_inst = aesa & CHA_VER_NUM_MASK;
3778 md_inst = mdha & CHA_VER_NUM_MASK;
3779 ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
3780 ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
3781
3782 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3783 }
3784
3785 /* If MD is present, limit digest size based on LP256 */
3786 if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
3787 md_limit = SHA256_DIGEST_SIZE;
3788
3789 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3790 struct caam_skcipher_alg *t_alg = driver_algs + i;
3791 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
3792
3793 /* Skip DES algorithms if not supported by device */
3794 if (!des_inst &&
3795 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3796 (alg_sel == OP_ALG_ALGSEL_DES)))
3797 continue;
3798
3799 /* Skip AES algorithms if not supported by device */
3800 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3801 continue;
3802
3803 /*
3804 * Check support for AES modes not available
3805 * on LP devices.
3806 */
3807 if (aes_vid == CHA_VER_VID_AES_LP &&
3808 (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3809 OP_ALG_AAI_XTS)
3810 continue;
3811
3812 caam_skcipher_alg_init(t_alg);
3813
3814 err = crypto_engine_register_skcipher(&t_alg->skcipher);
3815 if (err) {
3816 pr_warn("%s alg registration failed\n",
3817 t_alg->skcipher.base.base.cra_driver_name);
3818 continue;
3819 }
3820
3821 t_alg->registered = true;
3822 registered = true;
3823 }
3824
3825 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3826 struct caam_aead_alg *t_alg = driver_aeads + i;
3827 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3828 OP_ALG_ALGSEL_MASK;
3829 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3830 OP_ALG_ALGSEL_MASK;
3831 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3832
3833 /* Skip DES algorithms if not supported by device */
3834 if (!des_inst &&
3835 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3836 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3837 continue;
3838
3839 /* Skip AES algorithms if not supported by device */
3840 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3841 continue;
3842
3843 /* Skip CHACHA20 algorithms if not supported by device */
3844 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3845 continue;
3846
3847 /* Skip POLY1305 algorithms if not supported by device */
3848 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3849 continue;
3850
3851 /* Skip GCM algorithms if not supported by device */
3852 if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3853 alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3854 continue;
3855
3856 /*
3857 * Skip algorithms requiring message digests
3858 * if MD or MD size is not supported by device.
3859 */
3860 if (is_mdha(c2_alg_sel) &&
3861 (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
3862 continue;
3863
3864 caam_aead_alg_init(t_alg);
3865
3866 err = crypto_engine_register_aead(&t_alg->aead);
3867 if (err) {
3868 pr_warn("%s alg registration failed\n",
3869 t_alg->aead.base.base.cra_driver_name);
3870 continue;
3871 }
3872
3873 t_alg->registered = true;
3874 registered = true;
3875 }
3876
3877 if (registered)
3878 pr_info("caam algorithms registered in /proc/crypto\n");
3879
3880 return err;
3881 }
3882