1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4 * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5 */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
22
23 #include "tegra-se.h"
24
25 struct tegra_aes_ctx {
26 struct tegra_se *se;
27 u32 alg;
28 u32 ivsize;
29 u32 key1_id;
30 u32 key2_id;
31 u32 keylen;
32 u8 key1[AES_MAX_KEY_SIZE];
33 u8 key2[AES_MAX_KEY_SIZE];
34 };
35
36 struct tegra_aes_reqctx {
37 struct tegra_se_datbuf datbuf;
38 bool encrypt;
39 u32 config;
40 u32 crypto_config;
41 u32 len;
42 u32 *iv;
43 };
44
45 struct tegra_aead_ctx {
46 struct tegra_se *se;
47 unsigned int authsize;
48 u32 alg;
49 u32 key_id;
50 u32 keylen;
51 u8 key[AES_MAX_KEY_SIZE];
52 };
53
54 struct tegra_aead_reqctx {
55 struct tegra_se_datbuf inbuf;
56 struct tegra_se_datbuf outbuf;
57 struct scatterlist *src_sg;
58 struct scatterlist *dst_sg;
59 unsigned int assoclen;
60 unsigned int cryptlen;
61 unsigned int authsize;
62 bool encrypt;
63 u32 crypto_config;
64 u32 config;
65 u32 key_id;
66 u32 iv[4];
67 u8 authdata[16];
68 };
69
70 struct tegra_cmac_ctx {
71 struct tegra_se *se;
72 unsigned int alg;
73 u32 key_id;
74 u32 keylen;
75 u8 key[AES_MAX_KEY_SIZE];
76 struct crypto_shash *fallback_tfm;
77 };
78
79 struct tegra_cmac_reqctx {
80 struct scatterlist *src_sg;
81 struct tegra_se_datbuf datbuf;
82 struct tegra_se_datbuf residue;
83 unsigned int total_len;
84 unsigned int blk_size;
85 unsigned int task;
86 u32 crypto_config;
87 u32 config;
88 u32 key_id;
89 u32 *iv;
90 u32 result[CMAC_RESULT_REG_COUNT];
91 };
92
93 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)94 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
95 {
96 do {
97 --bits;
98 nums += counter[bits];
99 counter[bits] = nums & 0xff;
100 nums >>= 8;
101 } while (bits && nums);
102 }
103
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)104 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
105 {
106 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
107 unsigned int offset;
108
109 offset = req->cryptlen - ctx->ivsize;
110
111 if (rctx->encrypt)
112 memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
113 else
114 scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
115 }
116
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)117 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
118 {
119 int num;
120
121 if (ctx->alg == SE_ALG_CBC) {
122 tegra_cbc_iv_copyback(req, ctx);
123 } else if (ctx->alg == SE_ALG_CTR) {
124 num = req->cryptlen / ctx->ivsize;
125 if (req->cryptlen % ctx->ivsize)
126 num++;
127
128 ctr_iv_inc(req->iv, ctx->ivsize, num);
129 }
130 }
131
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)132 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
133 {
134 switch (alg) {
135 case SE_ALG_CMAC:
136 case SE_ALG_GMAC:
137 case SE_ALG_GCM:
138 case SE_ALG_GCM_FINAL:
139 return 0;
140 case SE_ALG_CBC:
141 if (encrypt)
142 return SE_CRYPTO_CFG_CBC_ENCRYPT;
143 else
144 return SE_CRYPTO_CFG_CBC_DECRYPT;
145 case SE_ALG_ECB:
146 if (encrypt)
147 return SE_CRYPTO_CFG_ECB_ENCRYPT;
148 else
149 return SE_CRYPTO_CFG_ECB_DECRYPT;
150 case SE_ALG_XTS:
151 if (encrypt)
152 return SE_CRYPTO_CFG_XTS_ENCRYPT;
153 else
154 return SE_CRYPTO_CFG_XTS_DECRYPT;
155
156 case SE_ALG_CTR:
157 return SE_CRYPTO_CFG_CTR;
158 case SE_ALG_CBC_MAC:
159 return SE_CRYPTO_CFG_CBC_MAC;
160
161 default:
162 break;
163 }
164
165 return -EINVAL;
166 }
167
tegra234_aes_cfg(u32 alg,bool encrypt)168 static int tegra234_aes_cfg(u32 alg, bool encrypt)
169 {
170 switch (alg) {
171 case SE_ALG_CBC:
172 case SE_ALG_ECB:
173 case SE_ALG_XTS:
174 case SE_ALG_CTR:
175 if (encrypt)
176 return SE_CFG_AES_ENCRYPT;
177 else
178 return SE_CFG_AES_DECRYPT;
179
180 case SE_ALG_GMAC:
181 if (encrypt)
182 return SE_CFG_GMAC_ENCRYPT;
183 else
184 return SE_CFG_GMAC_DECRYPT;
185
186 case SE_ALG_GCM:
187 if (encrypt)
188 return SE_CFG_GCM_ENCRYPT;
189 else
190 return SE_CFG_GCM_DECRYPT;
191
192 case SE_ALG_GCM_FINAL:
193 if (encrypt)
194 return SE_CFG_GCM_FINAL_ENCRYPT;
195 else
196 return SE_CFG_GCM_FINAL_DECRYPT;
197
198 case SE_ALG_CMAC:
199 return SE_CFG_CMAC;
200
201 case SE_ALG_CBC_MAC:
202 return SE_AES_ENC_ALG_AES_ENC |
203 SE_AES_DST_HASH_REG;
204 }
205 return -EINVAL;
206 }
207
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)208 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
209 struct tegra_aes_reqctx *rctx)
210 {
211 unsigned int data_count, res_bits, i = 0, j;
212 struct tegra_se *se = ctx->se;
213 u32 *cpuvaddr = se->cmdbuf->addr;
214 dma_addr_t addr = rctx->datbuf.addr;
215
216 data_count = rctx->len / AES_BLOCK_SIZE;
217 res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
218
219 /*
220 * Hardware processes data_count + 1 blocks.
221 * Reduce 1 block if there is no residue
222 */
223 if (!res_bits)
224 data_count--;
225
226 if (rctx->iv) {
227 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
228 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
229 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
230 cpuvaddr[i++] = rctx->iv[j];
231 }
232
233 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
234 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
235 SE_LAST_BLOCK_RES_BITS(res_bits);
236
237 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
238 cpuvaddr[i++] = rctx->config;
239 cpuvaddr[i++] = rctx->crypto_config;
240
241 /* Source address setting */
242 cpuvaddr[i++] = lower_32_bits(addr);
243 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
244
245 /* Destination address setting */
246 cpuvaddr[i++] = lower_32_bits(addr);
247 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
248 SE_ADDR_HI_SZ(rctx->len);
249
250 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
251 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
252 SE_AES_OP_START;
253
254 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
255 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
256 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
257
258 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
259
260 return i;
261 }
262
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)263 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
264 {
265 struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
266 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
267 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
268 struct tegra_se *se = ctx->se;
269 unsigned int cmdlen, key1_id, key2_id;
270 int ret;
271
272 rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
273 rctx->len = req->cryptlen;
274 key1_id = ctx->key1_id;
275 key2_id = ctx->key2_id;
276
277 /* Pad input to AES Block size */
278 if (ctx->alg != SE_ALG_XTS) {
279 if (rctx->len % AES_BLOCK_SIZE)
280 rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
281 }
282
283 rctx->datbuf.size = rctx->len;
284 rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
285 &rctx->datbuf.addr, GFP_KERNEL);
286 if (!rctx->datbuf.buf) {
287 ret = -ENOMEM;
288 goto out_finalize;
289 }
290
291 scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
292
293 rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
294 rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
295
296 if (!key1_id) {
297 ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
298 ctx->keylen, ctx->alg, &key1_id);
299 if (ret)
300 goto out;
301 }
302
303 rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
304
305 if (ctx->alg == SE_ALG_XTS) {
306 if (!key2_id) {
307 ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
308 ctx->keylen, ctx->alg, &key2_id);
309 if (ret)
310 goto out;
311 }
312
313 rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
314 }
315
316 /* Prepare the command and submit for execution */
317 cmdlen = tegra_aes_prep_cmd(ctx, rctx);
318 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
319
320 /* Copy the result */
321 tegra_aes_update_iv(req, ctx);
322 scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
323
324 out:
325 /* Free the buffer */
326 dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
327 rctx->datbuf.buf, rctx->datbuf.addr);
328
329 if (tegra_key_is_reserved(key1_id))
330 tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
331
332 if (tegra_key_is_reserved(key2_id))
333 tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
334
335 out_finalize:
336 crypto_finalize_skcipher_request(se->engine, req, ret);
337
338 return 0;
339 }
340
tegra_aes_cra_init(struct crypto_skcipher * tfm)341 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
342 {
343 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
344 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
345 struct tegra_se_alg *se_alg;
346 const char *algname;
347 int ret;
348
349 se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
350
351 crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
352
353 ctx->ivsize = crypto_skcipher_ivsize(tfm);
354 ctx->se = se_alg->se_dev;
355 ctx->key1_id = 0;
356 ctx->key2_id = 0;
357 ctx->keylen = 0;
358
359 algname = crypto_tfm_alg_name(&tfm->base);
360 ret = se_algname_to_algid(algname);
361 if (ret < 0) {
362 dev_err(ctx->se->dev, "invalid algorithm\n");
363 return ret;
364 }
365
366 ctx->alg = ret;
367
368 return 0;
369 }
370
tegra_aes_cra_exit(struct crypto_skcipher * tfm)371 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
372 {
373 struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
374
375 if (ctx->key1_id)
376 tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
377
378 if (ctx->key2_id)
379 tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
380 }
381
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)382 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
383 const u8 *key, u32 keylen)
384 {
385 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
386 int ret;
387
388 if (aes_check_keylen(keylen)) {
389 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
390 return -EINVAL;
391 }
392
393 ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
394 if (ret) {
395 ctx->keylen = keylen;
396 memcpy(ctx->key1, key, keylen);
397 }
398
399 return 0;
400 }
401
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)402 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
403 const u8 *key, u32 keylen)
404 {
405 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
406 u32 len = keylen / 2;
407 int ret;
408
409 ret = xts_verify_key(tfm, key, keylen);
410 if (ret || aes_check_keylen(len)) {
411 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
412 return -EINVAL;
413 }
414
415 ret = tegra_key_submit(ctx->se, key, len,
416 ctx->alg, &ctx->key1_id);
417 if (ret) {
418 ctx->keylen = len;
419 memcpy(ctx->key1, key, len);
420 }
421
422 ret = tegra_key_submit(ctx->se, key + len, len,
423 ctx->alg, &ctx->key2_id);
424 if (ret) {
425 ctx->keylen = len;
426 memcpy(ctx->key2, key + len, len);
427 }
428
429 return 0;
430 }
431
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)432 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
433 {
434 int manifest;
435
436 manifest = SE_KAC_USER_NS;
437
438 switch (alg) {
439 case SE_ALG_CBC:
440 case SE_ALG_ECB:
441 case SE_ALG_CTR:
442 manifest |= SE_KAC_ENC;
443 break;
444 case SE_ALG_XTS:
445 manifest |= SE_KAC_XTS;
446 break;
447 case SE_ALG_GCM:
448 manifest |= SE_KAC_GCM;
449 break;
450 case SE_ALG_CMAC:
451 manifest |= SE_KAC_CMAC;
452 break;
453 case SE_ALG_CBC_MAC:
454 manifest |= SE_KAC_ENC;
455 break;
456 default:
457 return -EINVAL;
458 }
459
460 switch (keylen) {
461 case AES_KEYSIZE_128:
462 manifest |= SE_KAC_SIZE_128;
463 break;
464 case AES_KEYSIZE_192:
465 manifest |= SE_KAC_SIZE_192;
466 break;
467 case AES_KEYSIZE_256:
468 manifest |= SE_KAC_SIZE_256;
469 break;
470 default:
471 return -EINVAL;
472 }
473
474 return manifest;
475 }
476
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)477 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
478
479 {
480 struct crypto_skcipher *tfm;
481 struct tegra_aes_ctx *ctx;
482 struct tegra_aes_reqctx *rctx;
483
484 tfm = crypto_skcipher_reqtfm(req);
485 ctx = crypto_skcipher_ctx(tfm);
486 rctx = skcipher_request_ctx(req);
487
488 if (ctx->alg != SE_ALG_XTS) {
489 if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
490 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
491 return -EINVAL;
492 }
493 } else if (req->cryptlen < XTS_BLOCK_SIZE) {
494 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
495 return -EINVAL;
496 }
497
498 if (!req->cryptlen)
499 return 0;
500
501 rctx->encrypt = encrypt;
502
503 return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
504 }
505
tegra_aes_encrypt(struct skcipher_request * req)506 static int tegra_aes_encrypt(struct skcipher_request *req)
507 {
508 return tegra_aes_crypt(req, true);
509 }
510
tegra_aes_decrypt(struct skcipher_request * req)511 static int tegra_aes_decrypt(struct skcipher_request *req)
512 {
513 return tegra_aes_crypt(req, false);
514 }
515
516 static struct tegra_se_alg tegra_aes_algs[] = {
517 {
518 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
519 .alg.skcipher.base = {
520 .init = tegra_aes_cra_init,
521 .exit = tegra_aes_cra_exit,
522 .setkey = tegra_aes_setkey,
523 .encrypt = tegra_aes_encrypt,
524 .decrypt = tegra_aes_decrypt,
525 .min_keysize = AES_MIN_KEY_SIZE,
526 .max_keysize = AES_MAX_KEY_SIZE,
527 .ivsize = AES_BLOCK_SIZE,
528 .base = {
529 .cra_name = "cbc(aes)",
530 .cra_driver_name = "cbc-aes-tegra",
531 .cra_priority = 500,
532 .cra_flags = CRYPTO_ALG_ASYNC,
533 .cra_blocksize = AES_BLOCK_SIZE,
534 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
535 .cra_alignmask = 0xf,
536 .cra_module = THIS_MODULE,
537 },
538 }
539 }, {
540 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
541 .alg.skcipher.base = {
542 .init = tegra_aes_cra_init,
543 .exit = tegra_aes_cra_exit,
544 .setkey = tegra_aes_setkey,
545 .encrypt = tegra_aes_encrypt,
546 .decrypt = tegra_aes_decrypt,
547 .min_keysize = AES_MIN_KEY_SIZE,
548 .max_keysize = AES_MAX_KEY_SIZE,
549 .base = {
550 .cra_name = "ecb(aes)",
551 .cra_driver_name = "ecb-aes-tegra",
552 .cra_priority = 500,
553 .cra_flags = CRYPTO_ALG_ASYNC,
554 .cra_blocksize = AES_BLOCK_SIZE,
555 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
556 .cra_alignmask = 0xf,
557 .cra_module = THIS_MODULE,
558 },
559 }
560 }, {
561 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
562 .alg.skcipher.base = {
563 .init = tegra_aes_cra_init,
564 .exit = tegra_aes_cra_exit,
565 .setkey = tegra_aes_setkey,
566 .encrypt = tegra_aes_encrypt,
567 .decrypt = tegra_aes_decrypt,
568 .min_keysize = AES_MIN_KEY_SIZE,
569 .max_keysize = AES_MAX_KEY_SIZE,
570 .ivsize = AES_BLOCK_SIZE,
571 .base = {
572 .cra_name = "ctr(aes)",
573 .cra_driver_name = "ctr-aes-tegra",
574 .cra_priority = 500,
575 .cra_flags = CRYPTO_ALG_ASYNC,
576 .cra_blocksize = 1,
577 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
578 .cra_alignmask = 0xf,
579 .cra_module = THIS_MODULE,
580 },
581 }
582 }, {
583 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
584 .alg.skcipher.base = {
585 .init = tegra_aes_cra_init,
586 .exit = tegra_aes_cra_exit,
587 .setkey = tegra_xts_setkey,
588 .encrypt = tegra_aes_encrypt,
589 .decrypt = tegra_aes_decrypt,
590 .min_keysize = 2 * AES_MIN_KEY_SIZE,
591 .max_keysize = 2 * AES_MAX_KEY_SIZE,
592 .ivsize = AES_BLOCK_SIZE,
593 .base = {
594 .cra_name = "xts(aes)",
595 .cra_driver_name = "xts-aes-tegra",
596 .cra_priority = 500,
597 .cra_flags = CRYPTO_ALG_ASYNC,
598 .cra_blocksize = AES_BLOCK_SIZE,
599 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
600 .cra_alignmask = (__alignof__(u64) - 1),
601 .cra_module = THIS_MODULE,
602 },
603 }
604 },
605 };
606
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)607 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
608 struct tegra_aead_reqctx *rctx)
609 {
610 unsigned int data_count, res_bits, i = 0;
611 struct tegra_se *se = ctx->se;
612 u32 *cpuvaddr = se->cmdbuf->addr;
613
614 data_count = (rctx->assoclen / AES_BLOCK_SIZE);
615 res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
616
617 /*
618 * Hardware processes data_count + 1 blocks.
619 * Reduce 1 block if there is no residue
620 */
621 if (!res_bits)
622 data_count--;
623
624 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
625 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
626 SE_LAST_BLOCK_RES_BITS(res_bits);
627
628 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
629 cpuvaddr[i++] = rctx->config;
630 cpuvaddr[i++] = rctx->crypto_config;
631 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
632 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
633 SE_ADDR_HI_SZ(rctx->assoclen);
634
635 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
636 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
637 SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
638 SE_AES_OP_START;
639
640 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
641 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
642 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
643
644 return i;
645 }
646
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)647 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
648 struct tegra_aead_reqctx *rctx)
649 {
650 unsigned int data_count, res_bits, i = 0, j;
651 struct tegra_se *se = ctx->se;
652 u32 *cpuvaddr = se->cmdbuf->addr, op;
653
654 data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
655 res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
656 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
657 SE_AES_OP_LASTBUF | SE_AES_OP_START;
658
659 /*
660 * If there is no assoc data,
661 * this will be the init command
662 */
663 if (!rctx->assoclen)
664 op |= SE_AES_OP_INIT;
665
666 /*
667 * Hardware processes data_count + 1 blocks.
668 * Reduce 1 block if there is no residue
669 */
670 if (!res_bits)
671 data_count--;
672
673 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
674 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
675 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
676 cpuvaddr[i++] = rctx->iv[j];
677
678 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
679 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
680 SE_LAST_BLOCK_RES_BITS(res_bits);
681
682 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
683 cpuvaddr[i++] = rctx->config;
684 cpuvaddr[i++] = rctx->crypto_config;
685
686 /* Source Address */
687 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
688 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
689 SE_ADDR_HI_SZ(rctx->cryptlen);
690
691 /* Destination Address */
692 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
693 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
694 SE_ADDR_HI_SZ(rctx->cryptlen);
695
696 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
697 cpuvaddr[i++] = op;
698
699 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
700 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
701 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
702
703 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
704 return i;
705 }
706
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)707 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
708 struct tegra_aead_reqctx *rctx)
709 {
710 unsigned int i = 0, j;
711 u32 op;
712
713 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
714 SE_AES_OP_LASTBUF | SE_AES_OP_START;
715
716 /*
717 * Set init for zero sized vector
718 */
719 if (!rctx->assoclen && !rctx->cryptlen)
720 op |= SE_AES_OP_INIT;
721
722 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
723 cpuvaddr[i++] = rctx->assoclen * 8;
724 cpuvaddr[i++] = 0;
725
726 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
727 cpuvaddr[i++] = rctx->cryptlen * 8;
728 cpuvaddr[i++] = 0;
729
730 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
731 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
732 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
733 cpuvaddr[i++] = rctx->iv[j];
734
735 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
736 cpuvaddr[i++] = rctx->config;
737 cpuvaddr[i++] = rctx->crypto_config;
738 cpuvaddr[i++] = 0;
739 cpuvaddr[i++] = 0;
740
741 /* Destination Address */
742 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
743 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
744 SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
745
746 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
747 cpuvaddr[i++] = op;
748
749 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
750 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
751 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
752
753 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
754
755 return i;
756 }
757
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)758 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
759 {
760 struct tegra_se *se = ctx->se;
761 unsigned int cmdlen;
762
763 scatterwalk_map_and_copy(rctx->inbuf.buf,
764 rctx->src_sg, 0, rctx->assoclen, 0);
765
766 rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
767 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
768 SE_AES_KEY_INDEX(rctx->key_id);
769
770 cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
771
772 return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
773 }
774
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)775 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
776 {
777 struct tegra_se *se = ctx->se;
778 int cmdlen, ret;
779
780 scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
781 rctx->assoclen, rctx->cryptlen, 0);
782
783 rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
784 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
785 SE_AES_KEY_INDEX(rctx->key_id);
786
787 /* Prepare command and submit */
788 cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
789 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
790 if (ret)
791 return ret;
792
793 /* Copy the result */
794 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
795 rctx->assoclen, rctx->cryptlen, 1);
796
797 return 0;
798 }
799
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)800 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
801 {
802 struct tegra_se *se = ctx->se;
803 u32 *cpuvaddr = se->cmdbuf->addr;
804 int cmdlen, ret, offset;
805
806 rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
807 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
808 SE_AES_KEY_INDEX(rctx->key_id);
809
810 /* Prepare command and submit */
811 cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
812 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
813 if (ret)
814 return ret;
815
816 if (rctx->encrypt) {
817 /* Copy the result */
818 offset = rctx->assoclen + rctx->cryptlen;
819 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
820 offset, rctx->authsize, 1);
821 }
822
823 return 0;
824 }
825
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)826 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
827 {
828 unsigned int offset;
829 u8 mac[16];
830
831 offset = rctx->assoclen + rctx->cryptlen;
832 scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
833
834 if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
835 return -EBADMSG;
836
837 return 0;
838 }
839
tegra_ccm_check_iv(const u8 * iv)840 static inline int tegra_ccm_check_iv(const u8 *iv)
841 {
842 /* iv[0] gives value of q-1
843 * 2 <= q <= 8 as per NIST 800-38C notation
844 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
845 */
846 if (iv[0] < 1 || iv[0] > 7) {
847 pr_debug("ccm_check_iv failed %d\n", iv[0]);
848 return -EINVAL;
849 }
850
851 return 0;
852 }
853
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)854 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
855 struct tegra_aead_reqctx *rctx)
856 {
857 unsigned int data_count, i = 0;
858 struct tegra_se *se = ctx->se;
859 u32 *cpuvaddr = se->cmdbuf->addr;
860
861 data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
862
863 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
864 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
865
866 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
867 cpuvaddr[i++] = rctx->config;
868 cpuvaddr[i++] = rctx->crypto_config;
869
870 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
871 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
872 SE_ADDR_HI_SZ(rctx->inbuf.size);
873
874 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
875 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
876 SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
877
878 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
879 cpuvaddr[i++] = SE_AES_OP_WRSTALL |
880 SE_AES_OP_LASTBUF | SE_AES_OP_START;
881
882 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
883 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
884 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
885
886 return i;
887 }
888
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)889 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
890 struct tegra_aead_reqctx *rctx)
891 {
892 unsigned int i = 0, j;
893 struct tegra_se *se = ctx->se;
894 u32 *cpuvaddr = se->cmdbuf->addr;
895
896 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
897 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
898 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
899 cpuvaddr[i++] = rctx->iv[j];
900
901 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
902 cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
903 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
904 cpuvaddr[i++] = rctx->config;
905 cpuvaddr[i++] = rctx->crypto_config;
906
907 /* Source address setting */
908 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
909 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
910 SE_ADDR_HI_SZ(rctx->inbuf.size);
911
912 /* Destination address setting */
913 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
914 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
915 SE_ADDR_HI_SZ(rctx->inbuf.size);
916
917 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
918 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
919 SE_AES_OP_START;
920
921 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
922 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
923 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
924
925 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
926 rctx->config, rctx->crypto_config);
927
928 return i;
929 }
930
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)931 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
932 {
933 struct tegra_se *se = ctx->se;
934 int cmdlen;
935
936 rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
937 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
938 rctx->encrypt) |
939 SE_AES_KEY_INDEX(rctx->key_id);
940
941 /* Prepare command and submit */
942 cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
943
944 return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
945 }
946
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)947 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
948 {
949 __be32 data;
950
951 memset(block, 0, csize);
952 block += csize;
953
954 if (csize >= 4)
955 csize = 4;
956 else if (msglen > (1 << (8 * csize)))
957 return -EOVERFLOW;
958
959 data = cpu_to_be32(msglen);
960 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
961
962 return 0;
963 }
964
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)965 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
966 {
967 unsigned int q, t;
968 u8 *q_ptr, *iv = (u8 *)rctx->iv;
969
970 memcpy(nonce, rctx->iv, 16);
971
972 /*** 1. Prepare Flags Octet ***/
973
974 /* Encode t (mac length) */
975 t = rctx->authsize;
976 nonce[0] |= (((t - 2) / 2) << 3);
977
978 /* Adata */
979 if (rctx->assoclen)
980 nonce[0] |= (1 << 6);
981
982 /*** Encode Q - message length ***/
983 q = iv[0] + 1;
984 q_ptr = nonce + 16 - q;
985
986 return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
987 }
988
tegra_ccm_format_adata(u8 * adata,unsigned int a)989 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
990 {
991 int len = 0;
992
993 /* add control info for associated data
994 * RFC 3610 and NIST Special Publication 800-38C
995 */
996 if (a < 65280) {
997 *(__be16 *)adata = cpu_to_be16(a);
998 len = 2;
999 } else {
1000 *(__be16 *)adata = cpu_to_be16(0xfffe);
1001 *(__be32 *)&adata[2] = cpu_to_be32(a);
1002 len = 6;
1003 }
1004
1005 return len;
1006 }
1007
tegra_ccm_add_padding(u8 * buf,unsigned int len)1008 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
1009 {
1010 unsigned int padlen = 16 - (len % 16);
1011 u8 padding[16] = {0};
1012
1013 if (padlen == 16)
1014 return 0;
1015
1016 memcpy(buf, padding, padlen);
1017
1018 return padlen;
1019 }
1020
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)1021 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
1022 {
1023 unsigned int alen = 0, offset = 0;
1024 u8 nonce[16], adata[16];
1025 int ret;
1026
1027 ret = tegra_ccm_format_nonce(rctx, nonce);
1028 if (ret)
1029 return ret;
1030
1031 memcpy(rctx->inbuf.buf, nonce, 16);
1032 offset = 16;
1033
1034 if (rctx->assoclen) {
1035 alen = tegra_ccm_format_adata(adata, rctx->assoclen);
1036 memcpy(rctx->inbuf.buf + offset, adata, alen);
1037 offset += alen;
1038
1039 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1040 rctx->src_sg, 0, rctx->assoclen, 0);
1041
1042 offset += rctx->assoclen;
1043 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
1044 rctx->assoclen + alen);
1045 }
1046
1047 return offset;
1048 }
1049
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1050 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1051 {
1052 u32 result[16];
1053 int i, ret;
1054
1055 /* Read and clear Result */
1056 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1057 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1058
1059 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1060 writel(0, se->base + se->hw->regs->result + (i * 4));
1061
1062 if (rctx->encrypt) {
1063 memcpy(rctx->authdata, result, rctx->authsize);
1064 } else {
1065 ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1066 if (ret)
1067 return -EBADMSG;
1068 }
1069
1070 return 0;
1071 }
1072
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1073 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1074 {
1075 /* Copy result */
1076 scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1077 rctx->assoclen, rctx->cryptlen, 1);
1078
1079 if (rctx->encrypt)
1080 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1081 rctx->assoclen + rctx->cryptlen,
1082 rctx->authsize, 1);
1083 else
1084 memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1085
1086 return 0;
1087 }
1088
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1089 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1090 {
1091 struct tegra_se *se = ctx->se;
1092 struct scatterlist *sg;
1093 int offset, ret;
1094
1095 offset = tegra_ccm_format_blocks(rctx);
1096 if (offset < 0)
1097 return -EINVAL;
1098
1099 /* Copy plain text to the buffer */
1100 sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1101
1102 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1103 sg, rctx->assoclen,
1104 rctx->cryptlen, 0);
1105 offset += rctx->cryptlen;
1106 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1107
1108 rctx->inbuf.size = offset;
1109
1110 ret = tegra_ccm_do_cbcmac(ctx, rctx);
1111 if (ret)
1112 return ret;
1113
1114 return tegra_ccm_mac_result(se, rctx);
1115 }
1116
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1117 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1118 {
1119 struct tegra_se *se = ctx->se;
1120 unsigned int cmdlen, offset = 0;
1121 struct scatterlist *sg = rctx->src_sg;
1122 int ret;
1123
1124 rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1125 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1126 SE_AES_KEY_INDEX(rctx->key_id);
1127
1128 /* Copy authdata in the top of buffer for encryption/decryption */
1129 if (rctx->encrypt)
1130 memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1131 else
1132 scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1133 rctx->assoclen + rctx->cryptlen,
1134 rctx->authsize, 0);
1135
1136 offset += rctx->authsize;
1137 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1138
1139 /* If there is no cryptlen, proceed to submit the task */
1140 if (rctx->cryptlen) {
1141 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1142 rctx->assoclen, rctx->cryptlen, 0);
1143 offset += rctx->cryptlen;
1144 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1145 }
1146
1147 rctx->inbuf.size = offset;
1148
1149 /* Prepare command and submit */
1150 cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1151 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1152 if (ret)
1153 return ret;
1154
1155 return tegra_ccm_ctr_result(se, rctx);
1156 }
1157
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)1158 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1159 struct tegra_aead_reqctx *rctx)
1160 {
1161 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1162 u8 *iv = (u8 *)rctx->iv;
1163 int ret, i;
1164
1165 rctx->src_sg = req->src;
1166 rctx->dst_sg = req->dst;
1167 rctx->assoclen = req->assoclen;
1168 rctx->authsize = crypto_aead_authsize(tfm);
1169
1170 if (rctx->encrypt)
1171 rctx->cryptlen = req->cryptlen;
1172 else
1173 rctx->cryptlen = req->cryptlen - rctx->authsize;
1174
1175 memcpy(iv, req->iv, 16);
1176
1177 ret = tegra_ccm_check_iv(iv);
1178 if (ret)
1179 return ret;
1180
1181 /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1182 * zero to encrypt auth tag.
1183 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1184 */
1185 memset(iv + 15 - iv[0], 0, iv[0] + 1);
1186
1187 /* Clear any previous result */
1188 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1189 writel(0, se->base + se->hw->regs->result + (i * 4));
1190
1191 return 0;
1192 }
1193
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)1194 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1195 {
1196 struct aead_request *req = container_of(areq, struct aead_request, base);
1197 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1198 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1199 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1200 struct tegra_se *se = ctx->se;
1201 int ret;
1202
1203 ret = tegra_ccm_crypt_init(req, se, rctx);
1204 if (ret)
1205 goto out_finalize;
1206
1207 rctx->key_id = ctx->key_id;
1208
1209 /* Allocate buffers required */
1210 rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1211 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1212 &rctx->inbuf.addr, GFP_KERNEL);
1213 if (!rctx->inbuf.buf)
1214 goto out_finalize;
1215
1216 rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1217 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1218 &rctx->outbuf.addr, GFP_KERNEL);
1219 if (!rctx->outbuf.buf) {
1220 ret = -ENOMEM;
1221 goto out_free_inbuf;
1222 }
1223
1224 if (!ctx->key_id) {
1225 ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1226 ctx->keylen, ctx->alg, &rctx->key_id);
1227 if (ret)
1228 goto out;
1229 }
1230
1231 if (rctx->encrypt) {
1232 /* CBC MAC Operation */
1233 ret = tegra_ccm_compute_auth(ctx, rctx);
1234 if (ret)
1235 goto out;
1236
1237 /* CTR operation */
1238 ret = tegra_ccm_do_ctr(ctx, rctx);
1239 if (ret)
1240 goto out;
1241 } else {
1242 /* CTR operation */
1243 ret = tegra_ccm_do_ctr(ctx, rctx);
1244 if (ret)
1245 goto out;
1246
1247 /* CBC MAC Operation */
1248 ret = tegra_ccm_compute_auth(ctx, rctx);
1249 if (ret)
1250 goto out;
1251 }
1252
1253 out:
1254 dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1255 rctx->outbuf.buf, rctx->outbuf.addr);
1256
1257 out_free_inbuf:
1258 dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1259 rctx->inbuf.buf, rctx->inbuf.addr);
1260
1261 if (tegra_key_is_reserved(rctx->key_id))
1262 tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1263
1264 out_finalize:
1265 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1266
1267 return 0;
1268 }
1269
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)1270 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1271 {
1272 struct aead_request *req = container_of(areq, struct aead_request, base);
1273 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1274 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1275 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1276 int ret;
1277
1278 rctx->src_sg = req->src;
1279 rctx->dst_sg = req->dst;
1280 rctx->assoclen = req->assoclen;
1281 rctx->authsize = crypto_aead_authsize(tfm);
1282
1283 if (rctx->encrypt)
1284 rctx->cryptlen = req->cryptlen;
1285 else
1286 rctx->cryptlen = req->cryptlen - ctx->authsize;
1287
1288 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1289 rctx->iv[3] = (1 << 24);
1290
1291 rctx->key_id = ctx->key_id;
1292
1293 /* Allocate buffers required */
1294 rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1295 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1296 &rctx->inbuf.addr, GFP_KERNEL);
1297 if (!rctx->inbuf.buf) {
1298 ret = -ENOMEM;
1299 goto out_finalize;
1300 }
1301
1302 rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1303 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1304 &rctx->outbuf.addr, GFP_KERNEL);
1305 if (!rctx->outbuf.buf) {
1306 ret = -ENOMEM;
1307 goto out_free_inbuf;
1308 }
1309
1310 if (!ctx->key_id) {
1311 ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1312 ctx->keylen, ctx->alg, &rctx->key_id);
1313 if (ret)
1314 goto out;
1315 }
1316
1317 /* If there is associated data perform GMAC operation */
1318 if (rctx->assoclen) {
1319 ret = tegra_gcm_do_gmac(ctx, rctx);
1320 if (ret)
1321 goto out;
1322 }
1323
1324 /* GCM Encryption/Decryption operation */
1325 if (rctx->cryptlen) {
1326 ret = tegra_gcm_do_crypt(ctx, rctx);
1327 if (ret)
1328 goto out;
1329 }
1330
1331 /* GCM_FINAL operation */
1332 ret = tegra_gcm_do_final(ctx, rctx);
1333 if (ret)
1334 goto out;
1335
1336 if (!rctx->encrypt)
1337 ret = tegra_gcm_do_verify(ctx->se, rctx);
1338
1339 out:
1340 dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1341 rctx->outbuf.buf, rctx->outbuf.addr);
1342
1343 out_free_inbuf:
1344 dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1345 rctx->inbuf.buf, rctx->inbuf.addr);
1346
1347 if (tegra_key_is_reserved(rctx->key_id))
1348 tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1349
1350 out_finalize:
1351 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1352
1353 return 0;
1354 }
1355
tegra_aead_cra_init(struct crypto_aead * tfm)1356 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1357 {
1358 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1359 struct aead_alg *alg = crypto_aead_alg(tfm);
1360 struct tegra_se_alg *se_alg;
1361 const char *algname;
1362 int ret;
1363
1364 algname = crypto_tfm_alg_name(&tfm->base);
1365
1366 se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1367
1368 crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1369
1370 ctx->se = se_alg->se_dev;
1371 ctx->key_id = 0;
1372 ctx->keylen = 0;
1373
1374 ret = se_algname_to_algid(algname);
1375 if (ret < 0) {
1376 dev_err(ctx->se->dev, "invalid algorithm\n");
1377 return ret;
1378 }
1379
1380 ctx->alg = ret;
1381
1382 return 0;
1383 }
1384
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1385 static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1386 {
1387 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1388
1389 switch (authsize) {
1390 case 4:
1391 case 6:
1392 case 8:
1393 case 10:
1394 case 12:
1395 case 14:
1396 case 16:
1397 break;
1398 default:
1399 return -EINVAL;
1400 }
1401
1402 ctx->authsize = authsize;
1403
1404 return 0;
1405 }
1406
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1407 static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1408 {
1409 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1410 int ret;
1411
1412 ret = crypto_gcm_check_authsize(authsize);
1413 if (ret)
1414 return ret;
1415
1416 ctx->authsize = authsize;
1417
1418 return 0;
1419 }
1420
tegra_aead_cra_exit(struct crypto_aead * tfm)1421 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1422 {
1423 struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1424
1425 if (ctx->key_id)
1426 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1427 }
1428
tegra_aead_crypt(struct aead_request * req,bool encrypt)1429 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1430 {
1431 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1432 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1433 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1434
1435 rctx->encrypt = encrypt;
1436
1437 return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1438 }
1439
tegra_aead_encrypt(struct aead_request * req)1440 static int tegra_aead_encrypt(struct aead_request *req)
1441 {
1442 return tegra_aead_crypt(req, true);
1443 }
1444
tegra_aead_decrypt(struct aead_request * req)1445 static int tegra_aead_decrypt(struct aead_request *req)
1446 {
1447 return tegra_aead_crypt(req, false);
1448 }
1449
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)1450 static int tegra_aead_setkey(struct crypto_aead *tfm,
1451 const u8 *key, u32 keylen)
1452 {
1453 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1454 int ret;
1455
1456 if (aes_check_keylen(keylen)) {
1457 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1458 return -EINVAL;
1459 }
1460
1461 ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1462 if (ret) {
1463 ctx->keylen = keylen;
1464 memcpy(ctx->key, key, keylen);
1465 }
1466
1467 return 0;
1468 }
1469
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)1470 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1471 struct tegra_cmac_reqctx *rctx)
1472 {
1473 unsigned int data_count, res_bits = 0, i = 0, j;
1474 struct tegra_se *se = ctx->se;
1475 u32 *cpuvaddr = se->cmdbuf->addr, op;
1476
1477 data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1478
1479 op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1480
1481 if (!(rctx->task & SHA_UPDATE)) {
1482 op |= SE_AES_OP_FINAL;
1483 res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1484 }
1485
1486 if (!res_bits && data_count)
1487 data_count--;
1488
1489 if (rctx->task & SHA_FIRST) {
1490 rctx->task &= ~SHA_FIRST;
1491
1492 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1493 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1494 /* Load 0 IV */
1495 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1496 cpuvaddr[i++] = 0;
1497 }
1498
1499 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1500 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1501 SE_LAST_BLOCK_RES_BITS(res_bits);
1502
1503 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1504 cpuvaddr[i++] = rctx->config;
1505 cpuvaddr[i++] = rctx->crypto_config;
1506
1507 /* Source Address */
1508 cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1509 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1510 SE_ADDR_HI_SZ(rctx->datbuf.size);
1511 cpuvaddr[i++] = 0;
1512 cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1513
1514 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1515 cpuvaddr[i++] = op;
1516
1517 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1518 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1519 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1520
1521 return i;
1522 }
1523
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1524 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1525 {
1526 int i;
1527
1528 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1529 rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1530 }
1531
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1532 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1533 {
1534 int i;
1535
1536 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1537 writel(rctx->result[i],
1538 se->base + se->hw->regs->result + (i * 4));
1539 }
1540
tegra_cmac_do_init(struct ahash_request * req)1541 static int tegra_cmac_do_init(struct ahash_request *req)
1542 {
1543 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1544 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1545 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1546 struct tegra_se *se = ctx->se;
1547 int i;
1548
1549 rctx->total_len = 0;
1550 rctx->datbuf.size = 0;
1551 rctx->residue.size = 0;
1552 rctx->key_id = ctx->key_id;
1553 rctx->task |= SHA_FIRST;
1554 rctx->blk_size = crypto_ahash_blocksize(tfm);
1555
1556 rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1557 &rctx->residue.addr, GFP_KERNEL);
1558 if (!rctx->residue.buf)
1559 return -ENOMEM;
1560
1561 rctx->residue.size = 0;
1562
1563 /* Clear any previous result */
1564 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1565 writel(0, se->base + se->hw->regs->result + (i * 4));
1566
1567 return 0;
1568 }
1569
tegra_cmac_do_update(struct ahash_request * req)1570 static int tegra_cmac_do_update(struct ahash_request *req)
1571 {
1572 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1573 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1574 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1575 struct tegra_se *se = ctx->se;
1576 unsigned int nblks, nresidue, cmdlen;
1577 int ret;
1578
1579 if (!req->nbytes)
1580 return 0;
1581
1582 nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1583 nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1584
1585 /*
1586 * Reserve the last block as residue during final() to process.
1587 */
1588 if (!nresidue && nblks) {
1589 nresidue += rctx->blk_size;
1590 nblks--;
1591 }
1592
1593 rctx->src_sg = req->src;
1594 rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1595 rctx->total_len += rctx->datbuf.size;
1596 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1597 rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
1598
1599 /*
1600 * Keep one block and residue bytes in residue and
1601 * return. The bytes will be processed in final()
1602 */
1603 if (nblks < 1) {
1604 scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1605 rctx->src_sg, 0, req->nbytes, 0);
1606
1607 rctx->residue.size += req->nbytes;
1608 return 0;
1609 }
1610
1611 rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
1612 &rctx->datbuf.addr, GFP_KERNEL);
1613 if (!rctx->datbuf.buf)
1614 return -ENOMEM;
1615
1616 /* Copy the previous residue first */
1617 if (rctx->residue.size)
1618 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1619
1620 scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1621 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1622
1623 scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1624 req->nbytes - nresidue, nresidue, 0);
1625
1626 /* Update residue value with the residue after current block */
1627 rctx->residue.size = nresidue;
1628
1629 /*
1630 * If this is not the first task, paste the previous copied
1631 * intermediate results to the registers so that it gets picked up.
1632 */
1633 if (!(rctx->task & SHA_FIRST))
1634 tegra_cmac_paste_result(ctx->se, rctx);
1635
1636 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1637 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1638
1639 tegra_cmac_copy_result(ctx->se, rctx);
1640
1641 dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
1642 rctx->datbuf.buf, rctx->datbuf.addr);
1643
1644 return ret;
1645 }
1646
tegra_cmac_do_final(struct ahash_request * req)1647 static int tegra_cmac_do_final(struct ahash_request *req)
1648 {
1649 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1650 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1651 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1652 struct tegra_se *se = ctx->se;
1653 u32 *result = (u32 *)req->result;
1654 int ret = 0, i, cmdlen;
1655
1656 if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1657 return crypto_shash_tfm_digest(ctx->fallback_tfm,
1658 NULL, 0, req->result);
1659 }
1660
1661 if (rctx->residue.size) {
1662 rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
1663 &rctx->datbuf.addr, GFP_KERNEL);
1664 if (!rctx->datbuf.buf) {
1665 ret = -ENOMEM;
1666 goto out_free;
1667 }
1668
1669 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1670 }
1671
1672 rctx->datbuf.size = rctx->residue.size;
1673 rctx->total_len += rctx->residue.size;
1674 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1675
1676 /*
1677 * If this is not the first task, paste the previous copied
1678 * intermediate results to the registers so that it gets picked up.
1679 */
1680 if (!(rctx->task & SHA_FIRST))
1681 tegra_cmac_paste_result(ctx->se, rctx);
1682
1683 /* Prepare command and submit */
1684 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1685 ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1686 if (ret)
1687 goto out;
1688
1689 /* Read and clear Result register */
1690 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1691 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1692
1693 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1694 writel(0, se->base + se->hw->regs->result + (i * 4));
1695
1696 out:
1697 if (rctx->residue.size)
1698 dma_free_coherent(se->dev, rctx->datbuf.size,
1699 rctx->datbuf.buf, rctx->datbuf.addr);
1700 out_free:
1701 dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1702 rctx->residue.buf, rctx->residue.addr);
1703 return ret;
1704 }
1705
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)1706 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1707 {
1708 struct ahash_request *req = ahash_request_cast(areq);
1709 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1710 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1711 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1712 struct tegra_se *se = ctx->se;
1713 int ret = 0;
1714
1715 if (rctx->task & SHA_INIT) {
1716 ret = tegra_cmac_do_init(req);
1717 if (ret)
1718 goto out;
1719
1720 rctx->task &= ~SHA_INIT;
1721 }
1722
1723 if (!ctx->key_id) {
1724 ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1725 ctx->keylen, ctx->alg, &rctx->key_id);
1726 if (ret)
1727 goto out;
1728 }
1729
1730 if (rctx->task & SHA_UPDATE) {
1731 ret = tegra_cmac_do_update(req);
1732 if (ret)
1733 goto out;
1734
1735 rctx->task &= ~SHA_UPDATE;
1736 }
1737
1738 if (rctx->task & SHA_FINAL) {
1739 ret = tegra_cmac_do_final(req);
1740 if (ret)
1741 goto out;
1742
1743 rctx->task &= ~SHA_FINAL;
1744 }
1745 out:
1746 if (tegra_key_is_reserved(rctx->key_id))
1747 tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1748
1749 crypto_finalize_hash_request(se->engine, req, ret);
1750
1751 return 0;
1752 }
1753
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)1754 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1755 const char *algname)
1756 {
1757 unsigned int statesize;
1758
1759 ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1760
1761 if (IS_ERR(ctx->fallback_tfm)) {
1762 dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1763 ctx->fallback_tfm = NULL;
1764 return;
1765 }
1766
1767 statesize = crypto_shash_statesize(ctx->fallback_tfm);
1768
1769 if (statesize > sizeof(struct tegra_cmac_reqctx))
1770 crypto_ahash_set_statesize(tfm, statesize);
1771 }
1772
tegra_cmac_cra_init(struct crypto_tfm * tfm)1773 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1774 {
1775 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1776 struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1777 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1778 struct tegra_se_alg *se_alg;
1779 const char *algname;
1780 int ret;
1781
1782 algname = crypto_tfm_alg_name(tfm);
1783 se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1784
1785 crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1786
1787 ctx->se = se_alg->se_dev;
1788 ctx->key_id = 0;
1789 ctx->keylen = 0;
1790
1791 ret = se_algname_to_algid(algname);
1792 if (ret < 0) {
1793 dev_err(ctx->se->dev, "invalid algorithm\n");
1794 return ret;
1795 }
1796
1797 ctx->alg = ret;
1798
1799 tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1800
1801 return 0;
1802 }
1803
tegra_cmac_cra_exit(struct crypto_tfm * tfm)1804 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1805 {
1806 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1807
1808 if (ctx->fallback_tfm)
1809 crypto_free_shash(ctx->fallback_tfm);
1810
1811 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1812 }
1813
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1814 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1815 unsigned int keylen)
1816 {
1817 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1818 int ret;
1819
1820 if (aes_check_keylen(keylen)) {
1821 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1822 return -EINVAL;
1823 }
1824
1825 if (ctx->fallback_tfm)
1826 crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1827
1828 ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1829 if (ret) {
1830 ctx->keylen = keylen;
1831 memcpy(ctx->key, key, keylen);
1832 }
1833
1834 return 0;
1835 }
1836
tegra_cmac_init(struct ahash_request * req)1837 static int tegra_cmac_init(struct ahash_request *req)
1838 {
1839 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1840 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1841 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1842
1843 rctx->task = SHA_INIT;
1844
1845 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1846 }
1847
tegra_cmac_update(struct ahash_request * req)1848 static int tegra_cmac_update(struct ahash_request *req)
1849 {
1850 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1851 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1852 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1853
1854 rctx->task |= SHA_UPDATE;
1855
1856 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1857 }
1858
tegra_cmac_final(struct ahash_request * req)1859 static int tegra_cmac_final(struct ahash_request *req)
1860 {
1861 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1862 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1863 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1864
1865 rctx->task |= SHA_FINAL;
1866
1867 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1868 }
1869
tegra_cmac_finup(struct ahash_request * req)1870 static int tegra_cmac_finup(struct ahash_request *req)
1871 {
1872 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1873 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1874 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1875
1876 rctx->task |= SHA_UPDATE | SHA_FINAL;
1877
1878 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1879 }
1880
tegra_cmac_digest(struct ahash_request * req)1881 static int tegra_cmac_digest(struct ahash_request *req)
1882 {
1883 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1884 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1885 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1886
1887 rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
1888
1889 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1890 }
1891
tegra_cmac_export(struct ahash_request * req,void * out)1892 static int tegra_cmac_export(struct ahash_request *req, void *out)
1893 {
1894 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1895
1896 memcpy(out, rctx, sizeof(*rctx));
1897
1898 return 0;
1899 }
1900
tegra_cmac_import(struct ahash_request * req,const void * in)1901 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1902 {
1903 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1904
1905 memcpy(rctx, in, sizeof(*rctx));
1906
1907 return 0;
1908 }
1909
1910 static struct tegra_se_alg tegra_aead_algs[] = {
1911 {
1912 .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1913 .alg.aead.base = {
1914 .init = tegra_aead_cra_init,
1915 .exit = tegra_aead_cra_exit,
1916 .setkey = tegra_aead_setkey,
1917 .setauthsize = tegra_gcm_setauthsize,
1918 .encrypt = tegra_aead_encrypt,
1919 .decrypt = tegra_aead_decrypt,
1920 .maxauthsize = AES_BLOCK_SIZE,
1921 .ivsize = GCM_AES_IV_SIZE,
1922 .base = {
1923 .cra_name = "gcm(aes)",
1924 .cra_driver_name = "gcm-aes-tegra",
1925 .cra_priority = 500,
1926 .cra_flags = CRYPTO_ALG_ASYNC,
1927 .cra_blocksize = 1,
1928 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1929 .cra_alignmask = 0xf,
1930 .cra_module = THIS_MODULE,
1931 },
1932 }
1933 }, {
1934 .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1935 .alg.aead.base = {
1936 .init = tegra_aead_cra_init,
1937 .exit = tegra_aead_cra_exit,
1938 .setkey = tegra_aead_setkey,
1939 .setauthsize = tegra_ccm_setauthsize,
1940 .encrypt = tegra_aead_encrypt,
1941 .decrypt = tegra_aead_decrypt,
1942 .maxauthsize = AES_BLOCK_SIZE,
1943 .ivsize = AES_BLOCK_SIZE,
1944 .chunksize = AES_BLOCK_SIZE,
1945 .base = {
1946 .cra_name = "ccm(aes)",
1947 .cra_driver_name = "ccm-aes-tegra",
1948 .cra_priority = 500,
1949 .cra_flags = CRYPTO_ALG_ASYNC,
1950 .cra_blocksize = 1,
1951 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1952 .cra_alignmask = 0xf,
1953 .cra_module = THIS_MODULE,
1954 },
1955 }
1956 }
1957 };
1958
1959 static struct tegra_se_alg tegra_cmac_algs[] = {
1960 {
1961 .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1962 .alg.ahash.base = {
1963 .init = tegra_cmac_init,
1964 .setkey = tegra_cmac_setkey,
1965 .update = tegra_cmac_update,
1966 .final = tegra_cmac_final,
1967 .finup = tegra_cmac_finup,
1968 .digest = tegra_cmac_digest,
1969 .export = tegra_cmac_export,
1970 .import = tegra_cmac_import,
1971 .halg.digestsize = AES_BLOCK_SIZE,
1972 .halg.statesize = sizeof(struct tegra_cmac_reqctx),
1973 .halg.base = {
1974 .cra_name = "cmac(aes)",
1975 .cra_driver_name = "tegra-se-cmac",
1976 .cra_priority = 300,
1977 .cra_flags = CRYPTO_ALG_ASYNC,
1978 .cra_blocksize = AES_BLOCK_SIZE,
1979 .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1980 .cra_alignmask = 0,
1981 .cra_module = THIS_MODULE,
1982 .cra_init = tegra_cmac_cra_init,
1983 .cra_exit = tegra_cmac_cra_exit,
1984 }
1985 }
1986 }
1987 };
1988
tegra_init_aes(struct tegra_se * se)1989 int tegra_init_aes(struct tegra_se *se)
1990 {
1991 struct aead_engine_alg *aead_alg;
1992 struct ahash_engine_alg *ahash_alg;
1993 struct skcipher_engine_alg *sk_alg;
1994 int i, ret;
1995
1996 se->manifest = tegra_aes_kac_manifest;
1997
1998 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1999 sk_alg = &tegra_aes_algs[i].alg.skcipher;
2000 tegra_aes_algs[i].se_dev = se;
2001
2002 ret = crypto_engine_register_skcipher(sk_alg);
2003 if (ret) {
2004 dev_err(se->dev, "failed to register %s\n",
2005 sk_alg->base.base.cra_name);
2006 goto err_aes;
2007 }
2008 }
2009
2010 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
2011 aead_alg = &tegra_aead_algs[i].alg.aead;
2012 tegra_aead_algs[i].se_dev = se;
2013
2014 ret = crypto_engine_register_aead(aead_alg);
2015 if (ret) {
2016 dev_err(se->dev, "failed to register %s\n",
2017 aead_alg->base.base.cra_name);
2018 goto err_aead;
2019 }
2020 }
2021
2022 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
2023 ahash_alg = &tegra_cmac_algs[i].alg.ahash;
2024 tegra_cmac_algs[i].se_dev = se;
2025
2026 ret = crypto_engine_register_ahash(ahash_alg);
2027 if (ret) {
2028 dev_err(se->dev, "failed to register %s\n",
2029 ahash_alg->base.halg.base.cra_name);
2030 goto err_cmac;
2031 }
2032 }
2033
2034 return 0;
2035
2036 err_cmac:
2037 while (i--)
2038 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2039
2040 i = ARRAY_SIZE(tegra_aead_algs);
2041 err_aead:
2042 while (i--)
2043 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2044
2045 i = ARRAY_SIZE(tegra_aes_algs);
2046 err_aes:
2047 while (i--)
2048 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2049
2050 return ret;
2051 }
2052
tegra_deinit_aes(struct tegra_se * se)2053 void tegra_deinit_aes(struct tegra_se *se)
2054 {
2055 int i;
2056
2057 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
2058 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2059
2060 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
2061 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2062
2063 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
2064 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2065 }
2066