1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7 *
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
9 *
10 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11 */
12
13 #include <crypto/internal/hash.h>
14 #include <crypto/md5.h>
15 #include <crypto/sha1.h>
16 #include <crypto/sha2.h>
17 #include <linux/bottom_half.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include "sun8i-ce.h"
25
sun8i_ce_hash_stat_fb_inc(struct crypto_ahash * tfm)26 static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
27 {
28 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
29 struct sun8i_ce_alg_template *algt __maybe_unused;
30 struct ahash_alg *alg = crypto_ahash_alg(tfm);
31
32 algt = container_of(alg, struct sun8i_ce_alg_template,
33 alg.hash.base);
34 algt->stat_fb++;
35 }
36 }
37
sun8i_ce_hash_init_tfm(struct crypto_ahash * tfm)38 int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
39 {
40 struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
41 struct ahash_alg *alg = crypto_ahash_alg(tfm);
42 struct sun8i_ce_alg_template *algt;
43 int err;
44
45 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
46 op->ce = algt->ce;
47
48 /* FALLBACK */
49 op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
50 CRYPTO_ALG_NEED_FALLBACK);
51 if (IS_ERR(op->fallback_tfm)) {
52 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
53 return PTR_ERR(op->fallback_tfm);
54 }
55
56 crypto_ahash_set_statesize(tfm,
57 crypto_ahash_statesize(op->fallback_tfm));
58
59 crypto_ahash_set_reqsize(tfm,
60 sizeof(struct sun8i_ce_hash_reqctx) +
61 crypto_ahash_reqsize(op->fallback_tfm));
62
63 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
64 memcpy(algt->fbname,
65 crypto_ahash_driver_name(op->fallback_tfm),
66 CRYPTO_MAX_ALG_NAME);
67
68 err = pm_runtime_resume_and_get(op->ce->dev);
69 if (err < 0)
70 goto error_pm;
71 return 0;
72 error_pm:
73 crypto_free_ahash(op->fallback_tfm);
74 return err;
75 }
76
sun8i_ce_hash_exit_tfm(struct crypto_ahash * tfm)77 void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
78 {
79 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
80
81 crypto_free_ahash(tfmctx->fallback_tfm);
82 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
83 }
84
sun8i_ce_hash_init(struct ahash_request * areq)85 int sun8i_ce_hash_init(struct ahash_request *areq)
86 {
87 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
88 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
89 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
90
91 memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
92
93 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
94 ahash_request_set_callback(&rctx->fallback_req,
95 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
96 areq->base.complete, areq->base.data);
97
98 return crypto_ahash_init(&rctx->fallback_req);
99 }
100
sun8i_ce_hash_export(struct ahash_request * areq,void * out)101 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
102 {
103 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
104 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
105 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
106
107 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
108 ahash_request_set_callback(&rctx->fallback_req,
109 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
110 areq->base.complete, areq->base.data);
111
112 return crypto_ahash_export(&rctx->fallback_req, out);
113 }
114
sun8i_ce_hash_import(struct ahash_request * areq,const void * in)115 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
116 {
117 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
119 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
120
121 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
122 ahash_request_set_callback(&rctx->fallback_req,
123 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
124 areq->base.complete, areq->base.data);
125
126 return crypto_ahash_import(&rctx->fallback_req, in);
127 }
128
sun8i_ce_hash_final(struct ahash_request * areq)129 int sun8i_ce_hash_final(struct ahash_request *areq)
130 {
131 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
132 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
133 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
134
135 sun8i_ce_hash_stat_fb_inc(tfm);
136
137 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
138 ahash_request_set_callback(&rctx->fallback_req,
139 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
140 areq->base.complete, areq->base.data);
141 ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
142
143 return crypto_ahash_final(&rctx->fallback_req);
144 }
145
sun8i_ce_hash_update(struct ahash_request * areq)146 int sun8i_ce_hash_update(struct ahash_request *areq)
147 {
148 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
149 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
150 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
151
152 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
153 ahash_request_set_callback(&rctx->fallback_req,
154 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
155 areq->base.complete, areq->base.data);
156 ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
157
158 return crypto_ahash_update(&rctx->fallback_req);
159 }
160
sun8i_ce_hash_finup(struct ahash_request * areq)161 int sun8i_ce_hash_finup(struct ahash_request *areq)
162 {
163 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
164 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
165 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
166
167 sun8i_ce_hash_stat_fb_inc(tfm);
168
169 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
170 ahash_request_set_callback(&rctx->fallback_req,
171 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
172 areq->base.complete, areq->base.data);
173 ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
174 areq->nbytes);
175
176 return crypto_ahash_finup(&rctx->fallback_req);
177 }
178
sun8i_ce_hash_digest_fb(struct ahash_request * areq)179 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
180 {
181 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
182 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
183 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
184
185 sun8i_ce_hash_stat_fb_inc(tfm);
186
187 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
188 ahash_request_set_callback(&rctx->fallback_req,
189 areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
190 areq->base.complete, areq->base.data);
191 ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
192 areq->nbytes);
193
194 return crypto_ahash_digest(&rctx->fallback_req);
195 }
196
sun8i_ce_hash_need_fallback(struct ahash_request * areq)197 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
198 {
199 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
200 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
201 struct sun8i_ce_alg_template *algt;
202 struct scatterlist *sg;
203
204 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
205
206 if (areq->nbytes == 0) {
207 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
208 algt->stat_fb_len0++;
209
210 return true;
211 }
212 /* we need to reserve one SG for padding one */
213 if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
214 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
215 algt->stat_fb_maxsg++;
216
217 return true;
218 }
219 sg = areq->src;
220 while (sg) {
221 if (sg->length % 4) {
222 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
223 algt->stat_fb_srclen++;
224
225 return true;
226 }
227 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
228 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
229 algt->stat_fb_srcali++;
230
231 return true;
232 }
233 sg = sg_next(sg);
234 }
235 return false;
236 }
237
sun8i_ce_hash_digest(struct ahash_request * areq)238 int sun8i_ce_hash_digest(struct ahash_request *areq)
239 {
240 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
241 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
242 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
243 struct sun8i_ce_alg_template *algt;
244 struct sun8i_ce_dev *ce;
245 struct crypto_engine *engine;
246 int e;
247
248 if (sun8i_ce_hash_need_fallback(areq))
249 return sun8i_ce_hash_digest_fb(areq);
250
251 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
252 ce = algt->ce;
253
254 e = sun8i_ce_get_engine_number(ce);
255 rctx->flow = e;
256 engine = ce->chanlist[e].engine;
257
258 return crypto_transfer_hash_request_to_engine(engine, areq);
259 }
260
hash_pad(__le32 * buf,unsigned int bufsize,u64 padi,u64 byte_count,bool le,int bs)261 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
262 {
263 u64 fill, min_fill, j, k;
264 __be64 *bebits;
265 __le64 *lebits;
266
267 j = padi;
268 buf[j++] = cpu_to_le32(0x80);
269
270 if (bs == 64) {
271 fill = 64 - (byte_count % 64);
272 min_fill = 2 * sizeof(u32) + sizeof(u32);
273 } else {
274 fill = 128 - (byte_count % 128);
275 min_fill = 4 * sizeof(u32) + sizeof(u32);
276 }
277
278 if (fill < min_fill)
279 fill += bs;
280
281 k = j;
282 j += (fill - min_fill) / sizeof(u32);
283 if (j * 4 > bufsize) {
284 pr_err("%s OVERFLOW %llu\n", __func__, j);
285 return 0;
286 }
287 for (; k < j; k++)
288 buf[k] = 0;
289
290 if (le) {
291 /* MD5 */
292 lebits = (__le64 *)&buf[j];
293 *lebits = cpu_to_le64(byte_count << 3);
294 j += 2;
295 } else {
296 if (bs == 64) {
297 /* sha1 sha224 sha256 */
298 bebits = (__be64 *)&buf[j];
299 *bebits = cpu_to_be64(byte_count << 3);
300 j += 2;
301 } else {
302 /* sha384 sha512*/
303 bebits = (__be64 *)&buf[j];
304 *bebits = cpu_to_be64(byte_count >> 61);
305 j += 2;
306 bebits = (__be64 *)&buf[j];
307 *bebits = cpu_to_be64(byte_count << 3);
308 j += 2;
309 }
310 }
311 if (j * 4 > bufsize) {
312 pr_err("%s OVERFLOW %llu\n", __func__, j);
313 return 0;
314 }
315
316 return j;
317 }
318
sun8i_ce_hash_run(struct crypto_engine * engine,void * breq)319 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
320 {
321 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
322 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
323 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
324 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
325 struct sun8i_ce_alg_template *algt;
326 struct sun8i_ce_dev *ce;
327 struct sun8i_ce_flow *chan;
328 struct ce_task *cet;
329 struct scatterlist *sg;
330 int nr_sgs, flow, err;
331 unsigned int len;
332 u32 common;
333 u64 byte_count;
334 __le32 *bf;
335 void *buf, *result;
336 int j, i, todo;
337 u64 bs;
338 int digestsize;
339 dma_addr_t addr_res, addr_pad;
340 int ns = sg_nents_for_len(areq->src, areq->nbytes);
341
342 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
343 ce = algt->ce;
344
345 bs = crypto_ahash_blocksize(tfm);
346 digestsize = crypto_ahash_digestsize(tfm);
347 if (digestsize == SHA224_DIGEST_SIZE)
348 digestsize = SHA256_DIGEST_SIZE;
349 if (digestsize == SHA384_DIGEST_SIZE)
350 digestsize = SHA512_DIGEST_SIZE;
351
352 /* the padding could be up to two block. */
353 buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
354 if (!buf) {
355 err = -ENOMEM;
356 goto err_out;
357 }
358 bf = (__le32 *)buf;
359
360 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
361 if (!result) {
362 err = -ENOMEM;
363 goto err_free_buf;
364 }
365
366 flow = rctx->flow;
367 chan = &ce->chanlist[flow];
368
369 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
370 algt->stat_req++;
371
372 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
373
374 cet = chan->tl;
375 memset(cet, 0, sizeof(struct ce_task));
376
377 cet->t_id = cpu_to_le32(flow);
378 common = ce->variant->alg_hash[algt->ce_algo_id];
379 common |= CE_COMM_INT;
380 cet->t_common_ctl = cpu_to_le32(common);
381
382 cet->t_sym_ctl = 0;
383 cet->t_asym_ctl = 0;
384
385 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
386 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
387 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
388 err = -EINVAL;
389 goto err_free_result;
390 }
391
392 len = areq->nbytes;
393 for_each_sg(areq->src, sg, nr_sgs, i) {
394 cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
395 todo = min(len, sg_dma_len(sg));
396 cet->t_src[i].len = cpu_to_le32(todo / 4);
397 len -= todo;
398 }
399 if (len > 0) {
400 dev_err(ce->dev, "remaining len %d\n", len);
401 err = -EINVAL;
402 goto err_unmap_src;
403 }
404 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
405 cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
406 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
407 if (dma_mapping_error(ce->dev, addr_res)) {
408 dev_err(ce->dev, "DMA map dest\n");
409 err = -EINVAL;
410 goto err_unmap_src;
411 }
412
413 byte_count = areq->nbytes;
414 j = 0;
415
416 switch (algt->ce_algo_id) {
417 case CE_ID_HASH_MD5:
418 j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
419 break;
420 case CE_ID_HASH_SHA1:
421 case CE_ID_HASH_SHA224:
422 case CE_ID_HASH_SHA256:
423 j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
424 break;
425 case CE_ID_HASH_SHA384:
426 case CE_ID_HASH_SHA512:
427 j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
428 break;
429 }
430 if (!j) {
431 err = -EINVAL;
432 goto err_unmap_result;
433 }
434
435 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
436 cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
437 cet->t_src[i].len = cpu_to_le32(j);
438 if (dma_mapping_error(ce->dev, addr_pad)) {
439 dev_err(ce->dev, "DMA error on padding SG\n");
440 err = -EINVAL;
441 goto err_unmap_result;
442 }
443
444 if (ce->variant->hash_t_dlen_in_bits)
445 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
446 else
447 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
448
449 chan->timeout = areq->nbytes;
450
451 err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
452
453 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
454
455 err_unmap_result:
456 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
457 if (!err)
458 memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
459
460 err_unmap_src:
461 dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
462
463 err_free_result:
464 kfree(result);
465
466 err_free_buf:
467 kfree(buf);
468
469 err_out:
470 local_bh_disable();
471 crypto_finalize_hash_request(engine, breq, err);
472 local_bh_enable();
473
474 return 0;
475 }
476