1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2021 Aspeed Technology Inc.
4 */
5
6 #include "aspeed-hace.h"
7 #include <crypto/engine.h>
8 #include <crypto/internal/hash.h>
9 #include <crypto/scatterwalk.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/scatterlist.h>
17 #include <linux/string.h>
18
19 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
20 #define AHASH_DBG(h, fmt, ...) \
21 dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
22 #else
23 #define AHASH_DBG(h, fmt, ...) \
24 dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
25 #endif
26
27 /* Initialization Vectors for SHA-family */
28 static const __be32 sha1_iv[8] = {
29 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
30 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
31 cpu_to_be32(SHA1_H4), 0, 0, 0
32 };
33
34 static const __be32 sha224_iv[8] = {
35 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
36 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
37 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
38 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
39 };
40
41 static const __be32 sha256_iv[8] = {
42 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
43 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
44 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
45 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
46 };
47
48 static const __be64 sha384_iv[8] = {
49 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
50 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
51 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
52 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
53 };
54
55 static const __be64 sha512_iv[8] = {
56 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
57 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
58 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
59 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
60 };
61
62 static int aspeed_sham_init(struct ahash_request *req);
63 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
64
aspeed_sham_export(struct ahash_request * req,void * out)65 static int aspeed_sham_export(struct ahash_request *req, void *out)
66 {
67 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
68 union {
69 u8 *u8;
70 u64 *u64;
71 } p = { .u8 = out };
72
73 memcpy(out, rctx->digest, rctx->ivsize);
74 p.u8 += rctx->ivsize;
75 put_unaligned(rctx->digcnt[0], p.u64++);
76 if (rctx->ivsize == 64)
77 put_unaligned(rctx->digcnt[1], p.u64);
78 return 0;
79 }
80
aspeed_sham_import(struct ahash_request * req,const void * in)81 static int aspeed_sham_import(struct ahash_request *req, const void *in)
82 {
83 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
84 union {
85 const u8 *u8;
86 const u64 *u64;
87 } p = { .u8 = in };
88 int err;
89
90 err = aspeed_sham_init(req);
91 if (err)
92 return err;
93
94 memcpy(rctx->digest, in, rctx->ivsize);
95 p.u8 += rctx->ivsize;
96 rctx->digcnt[0] = get_unaligned(p.u64++);
97 if (rctx->ivsize == 64)
98 rctx->digcnt[1] = get_unaligned(p.u64);
99 return 0;
100 }
101
102 /* The purpose of this padding is to ensure that the padded message is a
103 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
104 * The bit "1" is appended at the end of the message followed by
105 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
106 * 128 bits block (SHA384/SHA512) equals to the message length in bits
107 * is appended.
108 *
109 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
110 * - if message length < 56 bytes then padlen = 56 - message length
111 * - else padlen = 64 + 56 - message length
112 *
113 * For SHA384/SHA512, padlen is calculated as followed:
114 * - if message length < 112 bytes then padlen = 112 - message length
115 * - else padlen = 128 + 112 - message length
116 */
aspeed_ahash_fill_padding(struct aspeed_hace_dev * hace_dev,struct aspeed_sham_reqctx * rctx,u8 * buf)117 static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
118 struct aspeed_sham_reqctx *rctx, u8 *buf)
119 {
120 unsigned int index, padlen, bitslen;
121 __be64 bits[2];
122
123 AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
124
125 switch (rctx->flags & SHA_FLAGS_MASK) {
126 case SHA_FLAGS_SHA1:
127 case SHA_FLAGS_SHA224:
128 case SHA_FLAGS_SHA256:
129 bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
130 index = rctx->digcnt[0] & 0x3f;
131 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
132 bitslen = 8;
133 break;
134 default:
135 bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
136 bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
137 rctx->digcnt[0] >> 61);
138 index = rctx->digcnt[0] & 0x7f;
139 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
140 bitslen = 16;
141 break;
142 }
143 buf[0] = 0x80;
144 memset(buf + 1, 0, padlen - 1);
145 memcpy(buf + padlen, bits, bitslen);
146 return padlen + bitslen;
147 }
148
aspeed_ahash_update_counter(struct aspeed_sham_reqctx * rctx,unsigned int len)149 static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
150 unsigned int len)
151 {
152 rctx->offset += len;
153 rctx->digcnt[0] += len;
154 if (rctx->digcnt[0] < len)
155 rctx->digcnt[1]++;
156 }
157
158 /*
159 * Prepare DMA buffer before hardware engine
160 * processing.
161 */
aspeed_ahash_dma_prepare(struct aspeed_hace_dev * hace_dev)162 static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
163 {
164 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
165 struct ahash_request *req = hash_engine->req;
166 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
167 unsigned int length, remain;
168 bool final = false;
169
170 length = rctx->total - rctx->offset;
171 remain = length - round_down(length, rctx->block_size);
172
173 AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
174
175 if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
176 length = ASPEED_HASH_SRC_DMA_BUF_LEN;
177 else if (rctx->flags & SHA_FLAGS_FINUP) {
178 if (round_up(length, rctx->block_size) + rctx->block_size >
179 ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
180 length = round_down(length - 1, rctx->block_size);
181 else
182 final = true;
183 } else
184 length -= remain;
185 scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
186 rctx->offset, length, 0);
187 aspeed_ahash_update_counter(rctx, length);
188 if (final)
189 length += aspeed_ahash_fill_padding(
190 hace_dev, rctx, hash_engine->ahash_src_addr + length);
191
192 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
193 SHA512_DIGEST_SIZE,
194 DMA_BIDIRECTIONAL);
195 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
196 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
197 return -ENOMEM;
198 }
199
200 hash_engine->src_length = length;
201 hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
202 hash_engine->digest_dma = rctx->digest_dma_addr;
203
204 return 0;
205 }
206
207 /*
208 * Prepare DMA buffer as SG list buffer before
209 * hardware engine processing.
210 */
aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev * hace_dev)211 static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
212 {
213 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
214 struct ahash_request *req = hash_engine->req;
215 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
216 bool final = rctx->flags & SHA_FLAGS_FINUP;
217 int remain, sg_len, i, max_sg_nents;
218 unsigned int length, offset, total;
219 struct aspeed_sg_list *src_list;
220 struct scatterlist *s;
221 int rc = 0;
222
223 offset = rctx->offset;
224 length = rctx->total - offset;
225 remain = final ? 0 : length - round_down(length, rctx->block_size);
226 length -= remain;
227
228 AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
229 "rctx total", rctx->total,
230 "length", length, "remain", remain);
231
232 sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
233 DMA_TO_DEVICE);
234 if (!sg_len) {
235 dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
236 rc = -ENOMEM;
237 goto end;
238 }
239
240 max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
241 sg_len = min(sg_len, max_sg_nents);
242 src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
243 rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
244 SHA512_DIGEST_SIZE,
245 DMA_BIDIRECTIONAL);
246 if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
247 dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
248 rc = -ENOMEM;
249 goto free_src_sg;
250 }
251
252 total = 0;
253 for_each_sg(rctx->src_sg, s, sg_len, i) {
254 u32 phy_addr = sg_dma_address(s);
255 u32 len = sg_dma_len(s);
256
257 if (len <= offset) {
258 offset -= len;
259 continue;
260 }
261
262 len -= offset;
263 phy_addr += offset;
264 offset = 0;
265
266 if (length > len)
267 length -= len;
268 else {
269 /* Last sg list */
270 len = length;
271 length = 0;
272 }
273
274 total += len;
275 src_list[i].phy_addr = cpu_to_le32(phy_addr);
276 src_list[i].len = cpu_to_le32(len);
277 }
278
279 if (length != 0) {
280 total = round_down(total, rctx->block_size);
281 final = false;
282 }
283
284 aspeed_ahash_update_counter(rctx, total);
285 if (final) {
286 int len = aspeed_ahash_fill_padding(hace_dev, rctx,
287 rctx->buffer);
288
289 total += len;
290 rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
291 rctx->buffer,
292 sizeof(rctx->buffer),
293 DMA_TO_DEVICE);
294 if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
295 dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
296 rc = -ENOMEM;
297 goto free_rctx_digest;
298 }
299
300 src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
301 src_list[i].len = cpu_to_le32(len);
302 i++;
303 }
304 src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
305
306 hash_engine->src_length = total;
307 hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
308 hash_engine->digest_dma = rctx->digest_dma_addr;
309
310 return 0;
311
312 free_rctx_digest:
313 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
314 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
315 free_src_sg:
316 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
317 DMA_TO_DEVICE);
318 end:
319 return rc;
320 }
321
aspeed_ahash_complete(struct aspeed_hace_dev * hace_dev)322 static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
323 {
324 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
325 struct ahash_request *req = hash_engine->req;
326 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
327
328 AHASH_DBG(hace_dev, "\n");
329
330 dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
331 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
332
333 if (rctx->total - rctx->offset >= rctx->block_size ||
334 (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
335 return aspeed_ahash_req_update(hace_dev);
336
337 hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
338
339 if (rctx->flags & SHA_FLAGS_FINUP)
340 memcpy(req->result, rctx->digest, rctx->digsize);
341
342 crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
343 rctx->total - rctx->offset);
344
345 return 0;
346 }
347
348 /*
349 * Trigger hardware engines to do the math.
350 */
aspeed_hace_ahash_trigger(struct aspeed_hace_dev * hace_dev,aspeed_hace_fn_t resume)351 static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
352 aspeed_hace_fn_t resume)
353 {
354 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
355 struct ahash_request *req = hash_engine->req;
356 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
357
358 AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
359 &hash_engine->src_dma, &hash_engine->digest_dma,
360 hash_engine->src_length);
361
362 rctx->cmd |= HASH_CMD_INT_ENABLE;
363 hash_engine->resume = resume;
364
365 ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
366 ast_hace_write(hace_dev, hash_engine->digest_dma,
367 ASPEED_HACE_HASH_DIGEST_BUFF);
368 ast_hace_write(hace_dev, hash_engine->digest_dma,
369 ASPEED_HACE_HASH_KEY_BUFF);
370 ast_hace_write(hace_dev, hash_engine->src_length,
371 ASPEED_HACE_HASH_DATA_LEN);
372
373 /* Memory barrier to ensure all data setup before engine starts */
374 mb();
375
376 ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
377
378 return -EINPROGRESS;
379 }
380
aspeed_ahash_update_resume_sg(struct aspeed_hace_dev * hace_dev)381 static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
382 {
383 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
384 struct ahash_request *req = hash_engine->req;
385 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
386
387 AHASH_DBG(hace_dev, "\n");
388
389 dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
390 DMA_TO_DEVICE);
391
392 if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
393 dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
394 sizeof(rctx->buffer), DMA_TO_DEVICE);
395
396 rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
397
398 return aspeed_ahash_complete(hace_dev);
399 }
400
aspeed_ahash_req_update(struct aspeed_hace_dev * hace_dev)401 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
402 {
403 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
404 struct ahash_request *req = hash_engine->req;
405 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
406 aspeed_hace_fn_t resume;
407 int ret;
408
409 AHASH_DBG(hace_dev, "\n");
410
411 if (hace_dev->version == AST2600_VERSION) {
412 rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
413 resume = aspeed_ahash_update_resume_sg;
414
415 } else {
416 resume = aspeed_ahash_complete;
417 }
418
419 ret = hash_engine->dma_prepare(hace_dev);
420 if (ret)
421 return ret;
422
423 return aspeed_hace_ahash_trigger(hace_dev, resume);
424 }
425
aspeed_hace_hash_handle_queue(struct aspeed_hace_dev * hace_dev,struct ahash_request * req)426 static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
427 struct ahash_request *req)
428 {
429 return crypto_transfer_hash_request_to_engine(
430 hace_dev->crypt_engine_hash, req);
431 }
432
aspeed_ahash_fallback(struct ahash_request * req)433 static noinline int aspeed_ahash_fallback(struct ahash_request *req)
434 {
435 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
436 HASH_FBREQ_ON_STACK(fbreq, req);
437 u8 *state = rctx->buffer;
438 struct scatterlist sg[2];
439 struct scatterlist *ssg;
440 int ret;
441
442 ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
443 ahash_request_set_crypt(fbreq, ssg, req->result,
444 rctx->total - rctx->offset);
445
446 ret = aspeed_sham_export(req, state) ?:
447 crypto_ahash_import_core(fbreq, state);
448
449 if (rctx->flags & SHA_FLAGS_FINUP)
450 ret = ret ?: crypto_ahash_finup(fbreq);
451 else
452 ret = ret ?: crypto_ahash_update(fbreq) ?:
453 crypto_ahash_export_core(fbreq, state) ?:
454 aspeed_sham_import(req, state);
455 HASH_REQUEST_ZERO(fbreq);
456 return ret;
457 }
458
aspeed_ahash_do_request(struct crypto_engine * engine,void * areq)459 static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
460 {
461 struct ahash_request *req = ahash_request_cast(areq);
462 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
463 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
464 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
465 struct aspeed_engine_hash *hash_engine;
466 int ret;
467
468 hash_engine = &hace_dev->hash_engine;
469 hash_engine->flags |= CRYPTO_FLAGS_BUSY;
470
471 ret = aspeed_ahash_req_update(hace_dev);
472 if (ret != -EINPROGRESS)
473 return aspeed_ahash_fallback(req);
474
475 return 0;
476 }
477
aspeed_ahash_prepare_request(struct crypto_engine * engine,void * areq)478 static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
479 void *areq)
480 {
481 struct ahash_request *req = ahash_request_cast(areq);
482 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
483 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
484 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
485 struct aspeed_engine_hash *hash_engine;
486
487 hash_engine = &hace_dev->hash_engine;
488 hash_engine->req = req;
489
490 if (hace_dev->version == AST2600_VERSION)
491 hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
492 else
493 hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
494 }
495
aspeed_ahash_do_one(struct crypto_engine * engine,void * areq)496 static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
497 {
498 aspeed_ahash_prepare_request(engine, areq);
499 return aspeed_ahash_do_request(engine, areq);
500 }
501
aspeed_sham_update(struct ahash_request * req)502 static int aspeed_sham_update(struct ahash_request *req)
503 {
504 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
505 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
506 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
507 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
508
509 AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
510
511 rctx->total = req->nbytes;
512 rctx->src_sg = req->src;
513 rctx->offset = 0;
514 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
515
516 return aspeed_hace_hash_handle_queue(hace_dev, req);
517 }
518
aspeed_sham_finup(struct ahash_request * req)519 static int aspeed_sham_finup(struct ahash_request *req)
520 {
521 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
522 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
523 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
524 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
525
526 AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
527
528 rctx->flags |= SHA_FLAGS_FINUP;
529
530 return aspeed_sham_update(req);
531 }
532
aspeed_sham_init(struct ahash_request * req)533 static int aspeed_sham_init(struct ahash_request *req)
534 {
535 struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
537 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
538 struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
539
540 AHASH_DBG(hace_dev, "%s: digest size:%d\n",
541 crypto_tfm_alg_name(&tfm->base),
542 crypto_ahash_digestsize(tfm));
543
544 rctx->cmd = HASH_CMD_ACC_MODE;
545 rctx->flags = 0;
546
547 switch (crypto_ahash_digestsize(tfm)) {
548 case SHA1_DIGEST_SIZE:
549 rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
550 rctx->flags |= SHA_FLAGS_SHA1;
551 rctx->digsize = SHA1_DIGEST_SIZE;
552 rctx->block_size = SHA1_BLOCK_SIZE;
553 rctx->ivsize = 32;
554 memcpy(rctx->digest, sha1_iv, rctx->ivsize);
555 break;
556 case SHA224_DIGEST_SIZE:
557 rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
558 rctx->flags |= SHA_FLAGS_SHA224;
559 rctx->digsize = SHA224_DIGEST_SIZE;
560 rctx->block_size = SHA224_BLOCK_SIZE;
561 rctx->ivsize = 32;
562 memcpy(rctx->digest, sha224_iv, rctx->ivsize);
563 break;
564 case SHA256_DIGEST_SIZE:
565 rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
566 rctx->flags |= SHA_FLAGS_SHA256;
567 rctx->digsize = SHA256_DIGEST_SIZE;
568 rctx->block_size = SHA256_BLOCK_SIZE;
569 rctx->ivsize = 32;
570 memcpy(rctx->digest, sha256_iv, rctx->ivsize);
571 break;
572 case SHA384_DIGEST_SIZE:
573 rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
574 HASH_CMD_SHA_SWAP;
575 rctx->flags |= SHA_FLAGS_SHA384;
576 rctx->digsize = SHA384_DIGEST_SIZE;
577 rctx->block_size = SHA384_BLOCK_SIZE;
578 rctx->ivsize = 64;
579 memcpy(rctx->digest, sha384_iv, rctx->ivsize);
580 break;
581 case SHA512_DIGEST_SIZE:
582 rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
583 HASH_CMD_SHA_SWAP;
584 rctx->flags |= SHA_FLAGS_SHA512;
585 rctx->digsize = SHA512_DIGEST_SIZE;
586 rctx->block_size = SHA512_BLOCK_SIZE;
587 rctx->ivsize = 64;
588 memcpy(rctx->digest, sha512_iv, rctx->ivsize);
589 break;
590 default:
591 dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
592 crypto_ahash_digestsize(tfm));
593 return -EINVAL;
594 }
595
596 rctx->total = 0;
597 rctx->digcnt[0] = 0;
598 rctx->digcnt[1] = 0;
599
600 return 0;
601 }
602
aspeed_sham_digest(struct ahash_request * req)603 static int aspeed_sham_digest(struct ahash_request *req)
604 {
605 return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
606 }
607
aspeed_sham_cra_init(struct crypto_ahash * tfm)608 static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
609 {
610 struct ahash_alg *alg = crypto_ahash_alg(tfm);
611 struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
612 struct aspeed_hace_alg *ast_alg;
613
614 ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
615 tctx->hace_dev = ast_alg->hace_dev;
616
617 return 0;
618 }
619
620 static struct aspeed_hace_alg aspeed_ahash_algs[] = {
621 {
622 .alg.ahash.base = {
623 .init = aspeed_sham_init,
624 .update = aspeed_sham_update,
625 .finup = aspeed_sham_finup,
626 .digest = aspeed_sham_digest,
627 .export = aspeed_sham_export,
628 .import = aspeed_sham_import,
629 .init_tfm = aspeed_sham_cra_init,
630 .halg = {
631 .digestsize = SHA1_DIGEST_SIZE,
632 .statesize = sizeof(struct aspeed_sham_reqctx),
633 .base = {
634 .cra_name = "sha1",
635 .cra_driver_name = "aspeed-sha1",
636 .cra_priority = 300,
637 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
638 CRYPTO_ALG_ASYNC |
639 CRYPTO_AHASH_ALG_BLOCK_ONLY |
640 CRYPTO_ALG_KERN_DRIVER_ONLY,
641 .cra_blocksize = SHA1_BLOCK_SIZE,
642 .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
643 .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
644 .cra_alignmask = 0,
645 .cra_module = THIS_MODULE,
646 }
647 }
648 },
649 .alg.ahash.op = {
650 .do_one_request = aspeed_ahash_do_one,
651 },
652 },
653 {
654 .alg.ahash.base = {
655 .init = aspeed_sham_init,
656 .update = aspeed_sham_update,
657 .finup = aspeed_sham_finup,
658 .digest = aspeed_sham_digest,
659 .export = aspeed_sham_export,
660 .import = aspeed_sham_import,
661 .init_tfm = aspeed_sham_cra_init,
662 .halg = {
663 .digestsize = SHA256_DIGEST_SIZE,
664 .statesize = sizeof(struct aspeed_sham_reqctx),
665 .base = {
666 .cra_name = "sha256",
667 .cra_driver_name = "aspeed-sha256",
668 .cra_priority = 300,
669 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
670 CRYPTO_ALG_ASYNC |
671 CRYPTO_AHASH_ALG_BLOCK_ONLY |
672 CRYPTO_ALG_KERN_DRIVER_ONLY,
673 .cra_blocksize = SHA256_BLOCK_SIZE,
674 .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
675 .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
676 .cra_alignmask = 0,
677 .cra_module = THIS_MODULE,
678 }
679 }
680 },
681 .alg.ahash.op = {
682 .do_one_request = aspeed_ahash_do_one,
683 },
684 },
685 {
686 .alg.ahash.base = {
687 .init = aspeed_sham_init,
688 .update = aspeed_sham_update,
689 .finup = aspeed_sham_finup,
690 .digest = aspeed_sham_digest,
691 .export = aspeed_sham_export,
692 .import = aspeed_sham_import,
693 .init_tfm = aspeed_sham_cra_init,
694 .halg = {
695 .digestsize = SHA224_DIGEST_SIZE,
696 .statesize = sizeof(struct aspeed_sham_reqctx),
697 .base = {
698 .cra_name = "sha224",
699 .cra_driver_name = "aspeed-sha224",
700 .cra_priority = 300,
701 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
702 CRYPTO_ALG_ASYNC |
703 CRYPTO_AHASH_ALG_BLOCK_ONLY |
704 CRYPTO_ALG_KERN_DRIVER_ONLY,
705 .cra_blocksize = SHA224_BLOCK_SIZE,
706 .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
707 .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
708 .cra_alignmask = 0,
709 .cra_module = THIS_MODULE,
710 }
711 }
712 },
713 .alg.ahash.op = {
714 .do_one_request = aspeed_ahash_do_one,
715 },
716 },
717 };
718
719 static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
720 {
721 .alg.ahash.base = {
722 .init = aspeed_sham_init,
723 .update = aspeed_sham_update,
724 .finup = aspeed_sham_finup,
725 .digest = aspeed_sham_digest,
726 .export = aspeed_sham_export,
727 .import = aspeed_sham_import,
728 .init_tfm = aspeed_sham_cra_init,
729 .halg = {
730 .digestsize = SHA384_DIGEST_SIZE,
731 .statesize = sizeof(struct aspeed_sham_reqctx),
732 .base = {
733 .cra_name = "sha384",
734 .cra_driver_name = "aspeed-sha384",
735 .cra_priority = 300,
736 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
737 CRYPTO_ALG_ASYNC |
738 CRYPTO_AHASH_ALG_BLOCK_ONLY |
739 CRYPTO_ALG_KERN_DRIVER_ONLY,
740 .cra_blocksize = SHA384_BLOCK_SIZE,
741 .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
742 .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
743 .cra_alignmask = 0,
744 .cra_module = THIS_MODULE,
745 }
746 }
747 },
748 .alg.ahash.op = {
749 .do_one_request = aspeed_ahash_do_one,
750 },
751 },
752 {
753 .alg.ahash.base = {
754 .init = aspeed_sham_init,
755 .update = aspeed_sham_update,
756 .finup = aspeed_sham_finup,
757 .digest = aspeed_sham_digest,
758 .export = aspeed_sham_export,
759 .import = aspeed_sham_import,
760 .init_tfm = aspeed_sham_cra_init,
761 .halg = {
762 .digestsize = SHA512_DIGEST_SIZE,
763 .statesize = sizeof(struct aspeed_sham_reqctx),
764 .base = {
765 .cra_name = "sha512",
766 .cra_driver_name = "aspeed-sha512",
767 .cra_priority = 300,
768 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
769 CRYPTO_ALG_ASYNC |
770 CRYPTO_AHASH_ALG_BLOCK_ONLY |
771 CRYPTO_ALG_KERN_DRIVER_ONLY,
772 .cra_blocksize = SHA512_BLOCK_SIZE,
773 .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
774 .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
775 .cra_alignmask = 0,
776 .cra_module = THIS_MODULE,
777 }
778 }
779 },
780 .alg.ahash.op = {
781 .do_one_request = aspeed_ahash_do_one,
782 },
783 },
784 };
785
aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev * hace_dev)786 void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
787 {
788 int i;
789
790 for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
791 crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
792
793 if (hace_dev->version != AST2600_VERSION)
794 return;
795
796 for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
797 crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
798 }
799
aspeed_register_hace_hash_algs(struct aspeed_hace_dev * hace_dev)800 void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
801 {
802 int rc, i;
803
804 AHASH_DBG(hace_dev, "\n");
805
806 for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
807 aspeed_ahash_algs[i].hace_dev = hace_dev;
808 rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
809 if (rc) {
810 AHASH_DBG(hace_dev, "Failed to register %s\n",
811 aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
812 }
813 }
814
815 if (hace_dev->version != AST2600_VERSION)
816 return;
817
818 for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
819 aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
820 rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
821 if (rc) {
822 AHASH_DBG(hace_dev, "Failed to register %s\n",
823 aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
824 }
825 }
826 }
827