1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16
17 #include "cesa.h"
18
19 struct mv_cesa_des_ctx {
20 struct mv_cesa_ctx base;
21 u8 key[DES_KEY_SIZE];
22 };
23
24 struct mv_cesa_des3_ctx {
25 struct mv_cesa_ctx base;
26 u8 key[DES3_EDE_KEY_SIZE];
27 };
28
29 struct mv_cesa_aes_ctx {
30 struct mv_cesa_ctx base;
31 struct crypto_aes_ctx aes;
32 };
33
34 struct mv_cesa_skcipher_dma_iter {
35 struct mv_cesa_dma_iter base;
36 struct mv_cesa_sg_dma_iter src;
37 struct mv_cesa_sg_dma_iter dst;
38 };
39
40 static inline void
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter * iter,struct skcipher_request * req)41 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
42 struct skcipher_request *req)
43 {
44 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
45 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
46 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
47 }
48
49 static inline bool
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter * iter)50 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
51 {
52 iter->src.op_offset = 0;
53 iter->dst.op_offset = 0;
54
55 return mv_cesa_req_dma_iter_next_op(&iter->base);
56 }
57
58 static inline void
mv_cesa_skcipher_dma_cleanup(struct skcipher_request * req)59 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
60 {
61 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
62
63 if (req->dst != req->src) {
64 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
65 DMA_FROM_DEVICE);
66 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
67 DMA_TO_DEVICE);
68 } else {
69 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 DMA_BIDIRECTIONAL);
71 }
72 mv_cesa_dma_cleanup(&creq->base);
73 }
74
mv_cesa_skcipher_cleanup(struct skcipher_request * req)75 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
76 {
77 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
78 struct mv_cesa_engine *engine = creq->base.engine;
79
80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_skcipher_dma_cleanup(req);
82
83 atomic_sub(req->cryptlen, &engine->load);
84 }
85
mv_cesa_skcipher_std_step(struct skcipher_request * req)86 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
87 {
88 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
89 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
90 struct mv_cesa_engine *engine = creq->base.engine;
91 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
92 CESA_SA_SRAM_PAYLOAD_SIZE);
93
94 mv_cesa_adjust_op(engine, &sreq->op);
95 if (engine->pool)
96 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
97 else
98 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
99
100 len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
101 CESA_SA_DATA_SRAM_OFFSET, len,
102 sreq->offset);
103
104 sreq->size = len;
105 mv_cesa_set_crypt_op_len(&sreq->op, len);
106
107 /* FIXME: only update enc_len field */
108 if (!sreq->skip_ctx) {
109 if (engine->pool)
110 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
111 else
112 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
113 sreq->skip_ctx = true;
114 } else if (engine->pool)
115 memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
116 else
117 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
118
119 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
120 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
121 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
122 CESA_SA_CMD_EN_CESA_SA_ACCL0);
123 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
124 }
125
mv_cesa_skcipher_std_process(struct skcipher_request * req,u32 status)126 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
127 u32 status)
128 {
129 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
130 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
131 struct mv_cesa_engine *engine = creq->base.engine;
132 size_t len;
133
134 len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
135 CESA_SA_DATA_SRAM_OFFSET, sreq->size,
136 sreq->offset);
137
138 sreq->offset += len;
139 if (sreq->offset < req->cryptlen)
140 return -EINPROGRESS;
141
142 return 0;
143 }
144
mv_cesa_skcipher_process(struct crypto_async_request * req,u32 status)145 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
146 u32 status)
147 {
148 struct skcipher_request *skreq = skcipher_request_cast(req);
149 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
150 struct mv_cesa_req *basereq = &creq->base;
151
152 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
153 return mv_cesa_skcipher_std_process(skreq, status);
154
155 return mv_cesa_dma_process(basereq, status);
156 }
157
mv_cesa_skcipher_step(struct crypto_async_request * req)158 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
159 {
160 struct skcipher_request *skreq = skcipher_request_cast(req);
161 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
162
163 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
164 mv_cesa_dma_step(&creq->base);
165 else
166 mv_cesa_skcipher_std_step(skreq);
167 }
168
169 static inline void
mv_cesa_skcipher_dma_prepare(struct skcipher_request * req)170 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
171 {
172 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
173 struct mv_cesa_req *basereq = &creq->base;
174
175 mv_cesa_dma_prepare(basereq, basereq->engine);
176 }
177
178 static inline void
mv_cesa_skcipher_std_prepare(struct skcipher_request * req)179 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
180 {
181 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
182 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
183
184 sreq->size = 0;
185 sreq->offset = 0;
186 }
187
mv_cesa_skcipher_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)188 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
189 struct mv_cesa_engine *engine)
190 {
191 struct skcipher_request *skreq = skcipher_request_cast(req);
192 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
193
194 creq->base.engine = engine;
195
196 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
197 mv_cesa_skcipher_dma_prepare(skreq);
198 else
199 mv_cesa_skcipher_std_prepare(skreq);
200 }
201
202 static inline void
mv_cesa_skcipher_req_cleanup(struct crypto_async_request * req)203 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
204 {
205 struct skcipher_request *skreq = skcipher_request_cast(req);
206
207 mv_cesa_skcipher_cleanup(skreq);
208 }
209
210 static void
mv_cesa_skcipher_complete(struct crypto_async_request * req)211 mv_cesa_skcipher_complete(struct crypto_async_request *req)
212 {
213 struct skcipher_request *skreq = skcipher_request_cast(req);
214 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
215 struct mv_cesa_engine *engine = creq->base.engine;
216 unsigned int ivsize;
217
218 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
219
220 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
221 struct mv_cesa_req *basereq;
222
223 basereq = &creq->base;
224 memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
225 ivsize);
226 } else if (engine->pool)
227 memcpy(skreq->iv,
228 engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
229 ivsize);
230 else
231 memcpy_fromio(skreq->iv,
232 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
233 ivsize);
234 }
235
236 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
237 .step = mv_cesa_skcipher_step,
238 .process = mv_cesa_skcipher_process,
239 .cleanup = mv_cesa_skcipher_req_cleanup,
240 .complete = mv_cesa_skcipher_complete,
241 };
242
mv_cesa_skcipher_cra_exit(struct crypto_tfm * tfm)243 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
244 {
245 void *ctx = crypto_tfm_ctx(tfm);
246
247 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
248 }
249
mv_cesa_skcipher_cra_init(struct crypto_tfm * tfm)250 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
251 {
252 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
253
254 ctx->ops = &mv_cesa_skcipher_req_ops;
255
256 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
257 sizeof(struct mv_cesa_skcipher_req));
258
259 return 0;
260 }
261
mv_cesa_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)262 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
263 unsigned int len)
264 {
265 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
266 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
267 int remaining;
268 int offset;
269 int ret;
270 int i;
271
272 ret = aes_expandkey(&ctx->aes, key, len);
273 if (ret)
274 return ret;
275
276 remaining = (ctx->aes.key_length - 16) / 4;
277 offset = ctx->aes.key_length + 24 - remaining;
278 for (i = 0; i < remaining; i++)
279 ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
280
281 return 0;
282 }
283
mv_cesa_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)284 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
285 unsigned int len)
286 {
287 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
288 int err;
289
290 err = verify_skcipher_des_key(cipher, key);
291 if (err)
292 return err;
293
294 memcpy(ctx->key, key, DES_KEY_SIZE);
295
296 return 0;
297 }
298
mv_cesa_des3_ede_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)299 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
300 const u8 *key, unsigned int len)
301 {
302 struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
303 int err;
304
305 err = verify_skcipher_des3_key(cipher, key);
306 if (err)
307 return err;
308
309 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
310
311 return 0;
312 }
313
mv_cesa_skcipher_dma_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)314 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
315 const struct mv_cesa_op_ctx *op_templ)
316 {
317 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
318 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
319 GFP_KERNEL : GFP_ATOMIC;
320 struct mv_cesa_req *basereq = &creq->base;
321 struct mv_cesa_skcipher_dma_iter iter;
322 bool skip_ctx = false;
323 int ret;
324
325 basereq->chain.first = NULL;
326 basereq->chain.last = NULL;
327
328 if (req->src != req->dst) {
329 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
330 DMA_TO_DEVICE);
331 if (!ret)
332 return -ENOMEM;
333
334 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
335 DMA_FROM_DEVICE);
336 if (!ret) {
337 ret = -ENOMEM;
338 goto err_unmap_src;
339 }
340 } else {
341 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
342 DMA_BIDIRECTIONAL);
343 if (!ret)
344 return -ENOMEM;
345 }
346
347 mv_cesa_tdma_desc_iter_init(&basereq->chain);
348 mv_cesa_skcipher_req_iter_init(&iter, req);
349
350 do {
351 struct mv_cesa_op_ctx *op;
352
353 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
354 flags);
355 if (IS_ERR(op)) {
356 ret = PTR_ERR(op);
357 goto err_free_tdma;
358 }
359 skip_ctx = true;
360
361 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
362
363 /* Add input transfers */
364 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
365 &iter.src, flags);
366 if (ret)
367 goto err_free_tdma;
368
369 /* Add dummy desc to launch the crypto operation */
370 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
371 if (ret)
372 goto err_free_tdma;
373
374 /* Add output transfers */
375 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
376 &iter.dst, flags);
377 if (ret)
378 goto err_free_tdma;
379
380 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
381
382 /* Add output data for IV */
383 ret = mv_cesa_dma_add_result_op(&basereq->chain,
384 CESA_SA_CFG_SRAM_OFFSET,
385 CESA_SA_DATA_SRAM_OFFSET,
386 CESA_TDMA_SRC_IN_SRAM, flags);
387
388 if (ret)
389 goto err_free_tdma;
390
391 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
392
393 return 0;
394
395 err_free_tdma:
396 mv_cesa_dma_cleanup(basereq);
397 if (req->dst != req->src)
398 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
399 DMA_FROM_DEVICE);
400
401 err_unmap_src:
402 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
403 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
404
405 return ret;
406 }
407
408 static inline int
mv_cesa_skcipher_std_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)409 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
410 const struct mv_cesa_op_ctx *op_templ)
411 {
412 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
413 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
414 struct mv_cesa_req *basereq = &creq->base;
415
416 sreq->op = *op_templ;
417 sreq->skip_ctx = false;
418 basereq->chain.first = NULL;
419 basereq->chain.last = NULL;
420
421 return 0;
422 }
423
mv_cesa_skcipher_req_init(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)424 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
425 struct mv_cesa_op_ctx *tmpl)
426 {
427 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
428 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
429 unsigned int blksize = crypto_skcipher_blocksize(tfm);
430 int ret;
431
432 if (!IS_ALIGNED(req->cryptlen, blksize))
433 return -EINVAL;
434
435 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
436 if (creq->src_nents < 0) {
437 dev_err(cesa_dev->dev, "Invalid number of src SG");
438 return creq->src_nents;
439 }
440 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
441 if (creq->dst_nents < 0) {
442 dev_err(cesa_dev->dev, "Invalid number of dst SG");
443 return creq->dst_nents;
444 }
445
446 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
447 CESA_SA_DESC_CFG_OP_MSK);
448
449 if (cesa_dev->caps->has_tdma)
450 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
451 else
452 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
453
454 return ret;
455 }
456
mv_cesa_skcipher_queue_req(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)457 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
458 struct mv_cesa_op_ctx *tmpl)
459 {
460 int ret;
461 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
462 struct mv_cesa_engine *engine;
463
464 if (!req->cryptlen)
465 return 0;
466
467 ret = mv_cesa_skcipher_req_init(req, tmpl);
468 if (ret)
469 return ret;
470
471 engine = mv_cesa_select_engine(req->cryptlen);
472 mv_cesa_skcipher_prepare(&req->base, engine);
473
474 ret = mv_cesa_queue_req(&req->base, &creq->base);
475
476 if (mv_cesa_req_needs_cleanup(&req->base, ret))
477 mv_cesa_skcipher_cleanup(req);
478
479 return ret;
480 }
481
mv_cesa_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)482 static int mv_cesa_des_op(struct skcipher_request *req,
483 struct mv_cesa_op_ctx *tmpl)
484 {
485 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
486
487 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
488 CESA_SA_DESC_CFG_CRYPTM_MSK);
489
490 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
491
492 return mv_cesa_skcipher_queue_req(req, tmpl);
493 }
494
mv_cesa_ecb_des_encrypt(struct skcipher_request * req)495 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
496 {
497 struct mv_cesa_op_ctx tmpl = { };
498
499 mv_cesa_set_op_cfg(&tmpl,
500 CESA_SA_DESC_CFG_CRYPTCM_ECB |
501 CESA_SA_DESC_CFG_DIR_ENC);
502
503 return mv_cesa_des_op(req, &tmpl);
504 }
505
mv_cesa_ecb_des_decrypt(struct skcipher_request * req)506 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
507 {
508 struct mv_cesa_op_ctx tmpl = { };
509
510 mv_cesa_set_op_cfg(&tmpl,
511 CESA_SA_DESC_CFG_CRYPTCM_ECB |
512 CESA_SA_DESC_CFG_DIR_DEC);
513
514 return mv_cesa_des_op(req, &tmpl);
515 }
516
517 struct skcipher_alg mv_cesa_ecb_des_alg = {
518 .setkey = mv_cesa_des_setkey,
519 .encrypt = mv_cesa_ecb_des_encrypt,
520 .decrypt = mv_cesa_ecb_des_decrypt,
521 .min_keysize = DES_KEY_SIZE,
522 .max_keysize = DES_KEY_SIZE,
523 .base = {
524 .cra_name = "ecb(des)",
525 .cra_driver_name = "mv-ecb-des",
526 .cra_priority = 300,
527 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
528 CRYPTO_ALG_ALLOCATES_MEMORY,
529 .cra_blocksize = DES_BLOCK_SIZE,
530 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
531 .cra_alignmask = 0,
532 .cra_module = THIS_MODULE,
533 .cra_init = mv_cesa_skcipher_cra_init,
534 .cra_exit = mv_cesa_skcipher_cra_exit,
535 },
536 };
537
mv_cesa_cbc_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)538 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
539 struct mv_cesa_op_ctx *tmpl)
540 {
541 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
542 CESA_SA_DESC_CFG_CRYPTCM_MSK);
543
544 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
545
546 return mv_cesa_des_op(req, tmpl);
547 }
548
mv_cesa_cbc_des_encrypt(struct skcipher_request * req)549 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
550 {
551 struct mv_cesa_op_ctx tmpl = { };
552
553 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
554
555 return mv_cesa_cbc_des_op(req, &tmpl);
556 }
557
mv_cesa_cbc_des_decrypt(struct skcipher_request * req)558 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
559 {
560 struct mv_cesa_op_ctx tmpl = { };
561
562 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
563
564 return mv_cesa_cbc_des_op(req, &tmpl);
565 }
566
567 struct skcipher_alg mv_cesa_cbc_des_alg = {
568 .setkey = mv_cesa_des_setkey,
569 .encrypt = mv_cesa_cbc_des_encrypt,
570 .decrypt = mv_cesa_cbc_des_decrypt,
571 .min_keysize = DES_KEY_SIZE,
572 .max_keysize = DES_KEY_SIZE,
573 .ivsize = DES_BLOCK_SIZE,
574 .base = {
575 .cra_name = "cbc(des)",
576 .cra_driver_name = "mv-cbc-des",
577 .cra_priority = 300,
578 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
579 CRYPTO_ALG_ALLOCATES_MEMORY,
580 .cra_blocksize = DES_BLOCK_SIZE,
581 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
582 .cra_alignmask = 0,
583 .cra_module = THIS_MODULE,
584 .cra_init = mv_cesa_skcipher_cra_init,
585 .cra_exit = mv_cesa_skcipher_cra_exit,
586 },
587 };
588
mv_cesa_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)589 static int mv_cesa_des3_op(struct skcipher_request *req,
590 struct mv_cesa_op_ctx *tmpl)
591 {
592 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
593
594 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
595 CESA_SA_DESC_CFG_CRYPTM_MSK);
596
597 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
598
599 return mv_cesa_skcipher_queue_req(req, tmpl);
600 }
601
mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request * req)602 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
603 {
604 struct mv_cesa_op_ctx tmpl = { };
605
606 mv_cesa_set_op_cfg(&tmpl,
607 CESA_SA_DESC_CFG_CRYPTCM_ECB |
608 CESA_SA_DESC_CFG_3DES_EDE |
609 CESA_SA_DESC_CFG_DIR_ENC);
610
611 return mv_cesa_des3_op(req, &tmpl);
612 }
613
mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request * req)614 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
615 {
616 struct mv_cesa_op_ctx tmpl = { };
617
618 mv_cesa_set_op_cfg(&tmpl,
619 CESA_SA_DESC_CFG_CRYPTCM_ECB |
620 CESA_SA_DESC_CFG_3DES_EDE |
621 CESA_SA_DESC_CFG_DIR_DEC);
622
623 return mv_cesa_des3_op(req, &tmpl);
624 }
625
626 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
627 .setkey = mv_cesa_des3_ede_setkey,
628 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
629 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
630 .min_keysize = DES3_EDE_KEY_SIZE,
631 .max_keysize = DES3_EDE_KEY_SIZE,
632 .base = {
633 .cra_name = "ecb(des3_ede)",
634 .cra_driver_name = "mv-ecb-des3-ede",
635 .cra_priority = 300,
636 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
637 CRYPTO_ALG_ALLOCATES_MEMORY,
638 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
639 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
640 .cra_alignmask = 0,
641 .cra_module = THIS_MODULE,
642 .cra_init = mv_cesa_skcipher_cra_init,
643 .cra_exit = mv_cesa_skcipher_cra_exit,
644 },
645 };
646
mv_cesa_cbc_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)647 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
648 struct mv_cesa_op_ctx *tmpl)
649 {
650 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
651
652 return mv_cesa_des3_op(req, tmpl);
653 }
654
mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request * req)655 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
656 {
657 struct mv_cesa_op_ctx tmpl = { };
658
659 mv_cesa_set_op_cfg(&tmpl,
660 CESA_SA_DESC_CFG_CRYPTCM_CBC |
661 CESA_SA_DESC_CFG_3DES_EDE |
662 CESA_SA_DESC_CFG_DIR_ENC);
663
664 return mv_cesa_cbc_des3_op(req, &tmpl);
665 }
666
mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request * req)667 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
668 {
669 struct mv_cesa_op_ctx tmpl = { };
670
671 mv_cesa_set_op_cfg(&tmpl,
672 CESA_SA_DESC_CFG_CRYPTCM_CBC |
673 CESA_SA_DESC_CFG_3DES_EDE |
674 CESA_SA_DESC_CFG_DIR_DEC);
675
676 return mv_cesa_cbc_des3_op(req, &tmpl);
677 }
678
679 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
680 .setkey = mv_cesa_des3_ede_setkey,
681 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
682 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
683 .min_keysize = DES3_EDE_KEY_SIZE,
684 .max_keysize = DES3_EDE_KEY_SIZE,
685 .ivsize = DES3_EDE_BLOCK_SIZE,
686 .base = {
687 .cra_name = "cbc(des3_ede)",
688 .cra_driver_name = "mv-cbc-des3-ede",
689 .cra_priority = 300,
690 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
691 CRYPTO_ALG_ALLOCATES_MEMORY,
692 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
693 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
694 .cra_alignmask = 0,
695 .cra_module = THIS_MODULE,
696 .cra_init = mv_cesa_skcipher_cra_init,
697 .cra_exit = mv_cesa_skcipher_cra_exit,
698 },
699 };
700
mv_cesa_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)701 static int mv_cesa_aes_op(struct skcipher_request *req,
702 struct mv_cesa_op_ctx *tmpl)
703 {
704 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
705 int i;
706 u32 *key;
707 u32 cfg;
708
709 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
710
711 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
712 key = ctx->aes.key_dec;
713 else
714 key = ctx->aes.key_enc;
715
716 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
717 tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
718
719 if (ctx->aes.key_length == 24)
720 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
721 else if (ctx->aes.key_length == 32)
722 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
723
724 mv_cesa_update_op_cfg(tmpl, cfg,
725 CESA_SA_DESC_CFG_CRYPTM_MSK |
726 CESA_SA_DESC_CFG_AES_LEN_MSK);
727
728 return mv_cesa_skcipher_queue_req(req, tmpl);
729 }
730
mv_cesa_ecb_aes_encrypt(struct skcipher_request * req)731 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
732 {
733 struct mv_cesa_op_ctx tmpl = { };
734
735 mv_cesa_set_op_cfg(&tmpl,
736 CESA_SA_DESC_CFG_CRYPTCM_ECB |
737 CESA_SA_DESC_CFG_DIR_ENC);
738
739 return mv_cesa_aes_op(req, &tmpl);
740 }
741
mv_cesa_ecb_aes_decrypt(struct skcipher_request * req)742 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
743 {
744 struct mv_cesa_op_ctx tmpl = { };
745
746 mv_cesa_set_op_cfg(&tmpl,
747 CESA_SA_DESC_CFG_CRYPTCM_ECB |
748 CESA_SA_DESC_CFG_DIR_DEC);
749
750 return mv_cesa_aes_op(req, &tmpl);
751 }
752
753 struct skcipher_alg mv_cesa_ecb_aes_alg = {
754 .setkey = mv_cesa_aes_setkey,
755 .encrypt = mv_cesa_ecb_aes_encrypt,
756 .decrypt = mv_cesa_ecb_aes_decrypt,
757 .min_keysize = AES_MIN_KEY_SIZE,
758 .max_keysize = AES_MAX_KEY_SIZE,
759 .base = {
760 .cra_name = "ecb(aes)",
761 .cra_driver_name = "mv-ecb-aes",
762 .cra_priority = 300,
763 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
764 CRYPTO_ALG_ALLOCATES_MEMORY,
765 .cra_blocksize = AES_BLOCK_SIZE,
766 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
767 .cra_alignmask = 0,
768 .cra_module = THIS_MODULE,
769 .cra_init = mv_cesa_skcipher_cra_init,
770 .cra_exit = mv_cesa_skcipher_cra_exit,
771 },
772 };
773
mv_cesa_cbc_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)774 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
775 struct mv_cesa_op_ctx *tmpl)
776 {
777 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
778 CESA_SA_DESC_CFG_CRYPTCM_MSK);
779 memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
780
781 return mv_cesa_aes_op(req, tmpl);
782 }
783
mv_cesa_cbc_aes_encrypt(struct skcipher_request * req)784 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
785 {
786 struct mv_cesa_op_ctx tmpl = { };
787
788 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
789
790 return mv_cesa_cbc_aes_op(req, &tmpl);
791 }
792
mv_cesa_cbc_aes_decrypt(struct skcipher_request * req)793 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
794 {
795 struct mv_cesa_op_ctx tmpl = { };
796
797 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
798
799 return mv_cesa_cbc_aes_op(req, &tmpl);
800 }
801
802 struct skcipher_alg mv_cesa_cbc_aes_alg = {
803 .setkey = mv_cesa_aes_setkey,
804 .encrypt = mv_cesa_cbc_aes_encrypt,
805 .decrypt = mv_cesa_cbc_aes_decrypt,
806 .min_keysize = AES_MIN_KEY_SIZE,
807 .max_keysize = AES_MAX_KEY_SIZE,
808 .ivsize = AES_BLOCK_SIZE,
809 .base = {
810 .cra_name = "cbc(aes)",
811 .cra_driver_name = "mv-cbc-aes",
812 .cra_priority = 300,
813 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
814 CRYPTO_ALG_ALLOCATES_MEMORY,
815 .cra_blocksize = AES_BLOCK_SIZE,
816 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
817 .cra_alignmask = 0,
818 .cra_module = THIS_MODULE,
819 .cra_init = mv_cesa_skcipher_cra_init,
820 .cra_exit = mv_cesa_skcipher_cra_exit,
821 },
822 };
823