1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-cipher.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7 *
8 * This file add support for AES cipher with 128,192,256 bits keysize in
9 * CBC and ECB mode.
10 *
11 * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
12 */
13
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
23
sun8i_ce_cipher_need_fallback(struct skcipher_request * areq)24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25 {
26 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 struct scatterlist *sg;
28 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
29 struct sun8i_ce_alg_template *algt;
30 unsigned int todo, len;
31
32 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
33
34 if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
35 sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
36 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
37 algt->stat_fb_maxsg++;
38
39 return true;
40 }
41
42 if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
43 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
44 algt->stat_fb_leniv++;
45
46 return true;
47 }
48
49 if (areq->cryptlen == 0) {
50 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
51 algt->stat_fb_len0++;
52
53 return true;
54 }
55
56 if (areq->cryptlen % 16) {
57 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
58 algt->stat_fb_mod16++;
59
60 return true;
61 }
62
63 len = areq->cryptlen;
64 sg = areq->src;
65 while (sg) {
66 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
67 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
68 algt->stat_fb_srcali++;
69
70 return true;
71 }
72 todo = min(len, sg->length);
73 if (todo % 4) {
74 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
75 algt->stat_fb_srclen++;
76
77 return true;
78 }
79 len -= todo;
80 sg = sg_next(sg);
81 }
82
83 len = areq->cryptlen;
84 sg = areq->dst;
85 while (sg) {
86 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
87 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
88 algt->stat_fb_dstali++;
89
90 return true;
91 }
92 todo = min(len, sg->length);
93 if (todo % 4) {
94 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
95 algt->stat_fb_dstlen++;
96
97 return true;
98 }
99 len -= todo;
100 sg = sg_next(sg);
101 }
102 return false;
103 }
104
sun8i_ce_cipher_fallback(struct skcipher_request * areq)105 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
106 {
107 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
108 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
109 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
110 int err;
111
112 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
113 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
114 struct sun8i_ce_alg_template *algt;
115
116 algt = container_of(alg, struct sun8i_ce_alg_template,
117 alg.skcipher.base);
118
119 algt->stat_fb++;
120 }
121
122 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
123 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
124 areq->base.complete, areq->base.data);
125 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
126 areq->cryptlen, areq->iv);
127 if (rctx->op_dir & CE_DECRYPTION)
128 err = crypto_skcipher_decrypt(&rctx->fallback_req);
129 else
130 err = crypto_skcipher_encrypt(&rctx->fallback_req);
131 return err;
132 }
133
sun8i_ce_cipher_prepare(struct skcipher_request * areq,struct ce_task * cet)134 static int sun8i_ce_cipher_prepare(struct skcipher_request *areq,
135 struct ce_task *cet)
136 {
137 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
138 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
139 struct sun8i_ce_dev *ce = op->ce;
140 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
141 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
142 struct sun8i_ce_alg_template *algt;
143 struct scatterlist *sg;
144 unsigned int todo, len, offset, ivsize;
145 u32 common, sym;
146 int i;
147 int nr_sgs = 0;
148 int nr_sgd = 0;
149 int err = 0;
150 int ns = sg_nents_for_len(areq->src, areq->cryptlen);
151 int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
152
153 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
154
155 dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
156 crypto_tfm_alg_name(areq->base.tfm),
157 areq->cryptlen,
158 rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
159 op->keylen);
160
161 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
162 algt->stat_req++;
163
164 memset(cet, 0, sizeof(struct ce_task));
165
166 cet->t_id = cpu_to_le32(rctx->flow);
167 common = ce->variant->alg_cipher[algt->ce_algo_id];
168 common |= rctx->op_dir | CE_COMM_INT;
169 cet->t_common_ctl = cpu_to_le32(common);
170 /* CTS and recent CE (H6) need length in bytes, in word otherwise */
171 if (ce->variant->cipher_t_dlen_in_bytes)
172 cet->t_dlen = cpu_to_le32(areq->cryptlen);
173 else
174 cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
175
176 sym = ce->variant->op_mode[algt->ce_blockmode];
177 len = op->keylen;
178 switch (len) {
179 case 128 / 8:
180 sym |= CE_AES_128BITS;
181 break;
182 case 192 / 8:
183 sym |= CE_AES_192BITS;
184 break;
185 case 256 / 8:
186 sym |= CE_AES_256BITS;
187 break;
188 }
189
190 cet->t_sym_ctl = cpu_to_le32(sym);
191 cet->t_asym_ctl = 0;
192
193 rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
194 if (dma_mapping_error(ce->dev, rctx->addr_key)) {
195 dev_err(ce->dev, "Cannot DMA MAP KEY\n");
196 err = -EFAULT;
197 goto theend;
198 }
199 cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
200
201 ivsize = crypto_skcipher_ivsize(tfm);
202 if (areq->iv && ivsize > 0) {
203 if (rctx->op_dir & CE_DECRYPTION) {
204 offset = areq->cryptlen - ivsize;
205 scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
206 offset, ivsize, 0);
207 }
208 memcpy(rctx->bounce_iv, areq->iv, ivsize);
209 rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize,
210 DMA_TO_DEVICE);
211 if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
212 dev_err(ce->dev, "Cannot DMA MAP IV\n");
213 err = -ENOMEM;
214 goto theend_iv;
215 }
216 cet->t_iv = desc_addr_val_le32(ce, rctx->addr_iv);
217 }
218
219 if (areq->src == areq->dst) {
220 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
221 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
222 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
223 err = -EINVAL;
224 goto theend_iv;
225 }
226 nr_sgd = nr_sgs;
227 } else {
228 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
229 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
230 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
231 err = -EINVAL;
232 goto theend_iv;
233 }
234 nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
235 if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
236 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
237 err = -EINVAL;
238 goto theend_sgs;
239 }
240 }
241
242 len = areq->cryptlen;
243 for_each_sg(areq->src, sg, nr_sgs, i) {
244 cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
245 todo = min(len, sg_dma_len(sg));
246 cet->t_src[i].len = cpu_to_le32(todo / 4);
247 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
248 areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
249 len -= todo;
250 }
251 if (len > 0) {
252 dev_err(ce->dev, "remaining len %d\n", len);
253 err = -EINVAL;
254 goto theend_sgs;
255 }
256
257 len = areq->cryptlen;
258 for_each_sg(areq->dst, sg, nr_sgd, i) {
259 cet->t_dst[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
260 todo = min(len, sg_dma_len(sg));
261 cet->t_dst[i].len = cpu_to_le32(todo / 4);
262 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
263 areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
264 len -= todo;
265 }
266 if (len > 0) {
267 dev_err(ce->dev, "remaining len %d\n", len);
268 err = -EINVAL;
269 goto theend_sgs;
270 }
271
272 rctx->nr_sgs = ns;
273 rctx->nr_sgd = nd;
274 return 0;
275
276 theend_sgs:
277 if (areq->src == areq->dst) {
278 dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
279 } else {
280 if (nr_sgs > 0)
281 dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
282
283 if (nr_sgd > 0)
284 dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
285 }
286
287 theend_iv:
288 if (areq->iv && ivsize > 0) {
289 if (!dma_mapping_error(ce->dev, rctx->addr_iv))
290 dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
291 DMA_TO_DEVICE);
292
293 offset = areq->cryptlen - ivsize;
294 if (rctx->op_dir & CE_DECRYPTION) {
295 memcpy(areq->iv, rctx->backup_iv, ivsize);
296 memzero_explicit(rctx->backup_iv, ivsize);
297 } else {
298 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
299 ivsize, 0);
300 }
301 memzero_explicit(rctx->bounce_iv, ivsize);
302 }
303
304 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
305
306 theend:
307 return err;
308 }
309
sun8i_ce_cipher_unprepare(struct skcipher_request * areq,struct ce_task * cet)310 static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq,
311 struct ce_task *cet)
312 {
313 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
314 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
315 struct sun8i_ce_dev *ce = op->ce;
316 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
317 unsigned int ivsize, offset;
318 int nr_sgs = rctx->nr_sgs;
319 int nr_sgd = rctx->nr_sgd;
320
321 ivsize = crypto_skcipher_ivsize(tfm);
322
323 if (areq->src == areq->dst) {
324 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
325 } else {
326 if (nr_sgs > 0)
327 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
328 dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
329 }
330
331 if (areq->iv && ivsize > 0) {
332 if (cet->t_iv)
333 dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
334 DMA_TO_DEVICE);
335 offset = areq->cryptlen - ivsize;
336 if (rctx->op_dir & CE_DECRYPTION) {
337 memcpy(areq->iv, rctx->backup_iv, ivsize);
338 memzero_explicit(rctx->backup_iv, ivsize);
339 } else {
340 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
341 ivsize, 0);
342 }
343 memzero_explicit(rctx->bounce_iv, ivsize);
344 }
345
346 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
347 }
348
sun8i_ce_cipher_do_one(struct crypto_engine * engine,void * areq)349 int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
350 {
351 struct skcipher_request *req = skcipher_request_cast(areq);
352 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req);
353 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
354 struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
355 struct sun8i_ce_dev *ce = ctx->ce;
356 struct sun8i_ce_flow *chan;
357 int err;
358
359 chan = &ce->chanlist[rctx->flow];
360
361 err = sun8i_ce_cipher_prepare(req, chan->tl);
362 if (err)
363 return err;
364
365 err = sun8i_ce_run_task(ce, rctx->flow,
366 crypto_tfm_alg_name(req->base.tfm));
367
368 sun8i_ce_cipher_unprepare(req, chan->tl);
369
370 local_bh_disable();
371 crypto_finalize_skcipher_request(engine, req, err);
372 local_bh_enable();
373
374 return 0;
375 }
376
sun8i_ce_skdecrypt(struct skcipher_request * areq)377 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
378 {
379 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
380 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
381 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
382 struct crypto_engine *engine;
383 int e;
384
385 rctx->op_dir = CE_DECRYPTION;
386 if (sun8i_ce_cipher_need_fallback(areq))
387 return sun8i_ce_cipher_fallback(areq);
388
389 e = sun8i_ce_get_engine_number(op->ce);
390 rctx->flow = e;
391 engine = op->ce->chanlist[e].engine;
392
393 return crypto_transfer_skcipher_request_to_engine(engine, areq);
394 }
395
sun8i_ce_skencrypt(struct skcipher_request * areq)396 int sun8i_ce_skencrypt(struct skcipher_request *areq)
397 {
398 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
399 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
400 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
401 struct crypto_engine *engine;
402 int e;
403
404 rctx->op_dir = CE_ENCRYPTION;
405 if (sun8i_ce_cipher_need_fallback(areq))
406 return sun8i_ce_cipher_fallback(areq);
407
408 e = sun8i_ce_get_engine_number(op->ce);
409 rctx->flow = e;
410 engine = op->ce->chanlist[e].engine;
411
412 return crypto_transfer_skcipher_request_to_engine(engine, areq);
413 }
414
sun8i_ce_cipher_init(struct crypto_tfm * tfm)415 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
416 {
417 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
418 struct sun8i_ce_alg_template *algt;
419 const char *name = crypto_tfm_alg_name(tfm);
420 struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
421 struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
422 int err;
423
424 memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
425
426 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
427 op->ce = algt->ce;
428
429 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
430 if (IS_ERR(op->fallback_tfm)) {
431 dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
432 name, PTR_ERR(op->fallback_tfm));
433 return PTR_ERR(op->fallback_tfm);
434 }
435
436 crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
437 crypto_skcipher_reqsize(op->fallback_tfm));
438
439 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
440 memcpy(algt->fbname,
441 crypto_skcipher_driver_name(op->fallback_tfm),
442 CRYPTO_MAX_ALG_NAME);
443
444 err = pm_runtime_resume_and_get(op->ce->dev);
445 if (err < 0)
446 goto error_pm;
447
448 return 0;
449 error_pm:
450 crypto_free_skcipher(op->fallback_tfm);
451 return err;
452 }
453
sun8i_ce_cipher_exit(struct crypto_tfm * tfm)454 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
455 {
456 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
457
458 kfree_sensitive(op->key);
459 crypto_free_skcipher(op->fallback_tfm);
460 pm_runtime_put_sync_suspend(op->ce->dev);
461 }
462
sun8i_ce_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)463 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
464 unsigned int keylen)
465 {
466 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
467 struct sun8i_ce_dev *ce = op->ce;
468
469 switch (keylen) {
470 case 128 / 8:
471 break;
472 case 192 / 8:
473 break;
474 case 256 / 8:
475 break;
476 default:
477 dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
478 return -EINVAL;
479 }
480 kfree_sensitive(op->key);
481 op->keylen = keylen;
482 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
483 if (!op->key)
484 return -ENOMEM;
485
486 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
487 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
488
489 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
490 }
491
sun8i_ce_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)492 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
493 unsigned int keylen)
494 {
495 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
496 int err;
497
498 err = verify_skcipher_des3_key(tfm, key);
499 if (err)
500 return err;
501
502 kfree_sensitive(op->key);
503 op->keylen = keylen;
504 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
505 if (!op->key)
506 return -ENOMEM;
507
508 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
509 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
510
511 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
512 }
513