1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Asynchronous Cryptographic Hash operations.
4 *
5 * This is the implementation of the ahash (asynchronous hash) API. It differs
6 * from shash (synchronous hash) in that ahash supports asynchronous operations,
7 * and it hashes data from scatterlists instead of virtually addressed buffers.
8 *
9 * The ahash API provides access to both ahash and shash algorithms. The shash
10 * API only provides access to shash algorithms.
11 *
12 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
13 */
14
15 #include <crypto/scatterwalk.h>
16 #include <linux/cryptouser.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/string.h>
25 #include <linux/string_choices.h>
26 #include <net/netlink.h>
27
28 #include "hash.h"
29
30 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
31
32 struct crypto_hash_walk {
33 const char *data;
34
35 unsigned int offset;
36 unsigned int flags;
37
38 struct page *pg;
39 unsigned int entrylen;
40
41 unsigned int total;
42 struct scatterlist *sg;
43 };
44
45 struct ahash_save_req_state {
46 struct list_head head;
47 struct ahash_request *req0;
48 struct ahash_request *cur;
49 int (*op)(struct ahash_request *req);
50 crypto_completion_t compl;
51 void *data;
52 struct scatterlist sg;
53 const u8 *src;
54 u8 *page;
55 unsigned int offset;
56 unsigned int nbytes;
57 };
58
59 static void ahash_reqchain_done(void *data, int err);
60 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
61 static void ahash_restore_req(struct ahash_request *req);
62 static void ahash_def_finup_done1(void *data, int err);
63 static int ahash_def_finup_finish1(struct ahash_request *req, int err);
64 static int ahash_def_finup(struct ahash_request *req);
65
hash_walk_next(struct crypto_hash_walk * walk)66 static int hash_walk_next(struct crypto_hash_walk *walk)
67 {
68 unsigned int offset = walk->offset;
69 unsigned int nbytes = min(walk->entrylen,
70 ((unsigned int)(PAGE_SIZE)) - offset);
71
72 walk->data = kmap_local_page(walk->pg);
73 walk->data += offset;
74 walk->entrylen -= nbytes;
75 return nbytes;
76 }
77
hash_walk_new_entry(struct crypto_hash_walk * walk)78 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
79 {
80 struct scatterlist *sg;
81
82 sg = walk->sg;
83 walk->offset = sg->offset;
84 walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT));
85 walk->offset = offset_in_page(walk->offset);
86 walk->entrylen = sg->length;
87
88 if (walk->entrylen > walk->total)
89 walk->entrylen = walk->total;
90 walk->total -= walk->entrylen;
91
92 return hash_walk_next(walk);
93 }
94
crypto_hash_walk_first(struct ahash_request * req,struct crypto_hash_walk * walk)95 static int crypto_hash_walk_first(struct ahash_request *req,
96 struct crypto_hash_walk *walk)
97 {
98 walk->total = req->nbytes;
99 walk->entrylen = 0;
100
101 if (!walk->total)
102 return 0;
103
104 walk->flags = req->base.flags;
105
106 if (ahash_request_isvirt(req)) {
107 walk->data = req->svirt;
108 walk->total = 0;
109 return req->nbytes;
110 }
111
112 walk->sg = req->src;
113
114 return hash_walk_new_entry(walk);
115 }
116
crypto_hash_walk_done(struct crypto_hash_walk * walk,int err)117 static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
118 {
119 if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
120 return err;
121
122 walk->data -= walk->offset;
123
124 kunmap_local(walk->data);
125 crypto_yield(walk->flags);
126
127 if (err)
128 return err;
129
130 if (walk->entrylen) {
131 walk->offset = 0;
132 walk->pg++;
133 return hash_walk_next(walk);
134 }
135
136 if (!walk->total)
137 return 0;
138
139 walk->sg = sg_next(walk->sg);
140
141 return hash_walk_new_entry(walk);
142 }
143
crypto_hash_walk_last(struct crypto_hash_walk * walk)144 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
145 {
146 return !(walk->entrylen | walk->total);
147 }
148
149 /*
150 * For an ahash tfm that is using an shash algorithm (instead of an ahash
151 * algorithm), this returns the underlying shash tfm.
152 */
ahash_to_shash(struct crypto_ahash * tfm)153 static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
154 {
155 return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
156 }
157
prepare_shash_desc(struct ahash_request * req,struct crypto_ahash * tfm)158 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
159 struct crypto_ahash *tfm)
160 {
161 struct shash_desc *desc = ahash_request_ctx(req);
162
163 desc->tfm = ahash_to_shash(tfm);
164 return desc;
165 }
166
shash_ahash_update(struct ahash_request * req,struct shash_desc * desc)167 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
168 {
169 struct crypto_hash_walk walk;
170 int nbytes;
171
172 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
173 nbytes = crypto_hash_walk_done(&walk, nbytes))
174 nbytes = crypto_shash_update(desc, walk.data, nbytes);
175
176 return nbytes;
177 }
178 EXPORT_SYMBOL_GPL(shash_ahash_update);
179
shash_ahash_finup(struct ahash_request * req,struct shash_desc * desc)180 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
181 {
182 struct crypto_hash_walk walk;
183 int nbytes;
184
185 nbytes = crypto_hash_walk_first(req, &walk);
186 if (!nbytes)
187 return crypto_shash_final(desc, req->result);
188
189 do {
190 nbytes = crypto_hash_walk_last(&walk) ?
191 crypto_shash_finup(desc, walk.data, nbytes,
192 req->result) :
193 crypto_shash_update(desc, walk.data, nbytes);
194 nbytes = crypto_hash_walk_done(&walk, nbytes);
195 } while (nbytes > 0);
196
197 return nbytes;
198 }
199 EXPORT_SYMBOL_GPL(shash_ahash_finup);
200
shash_ahash_digest(struct ahash_request * req,struct shash_desc * desc)201 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
202 {
203 unsigned int nbytes = req->nbytes;
204 struct scatterlist *sg;
205 unsigned int offset;
206 struct page *page;
207 const u8 *data;
208 int err;
209
210 data = req->svirt;
211 if (!nbytes || ahash_request_isvirt(req))
212 return crypto_shash_digest(desc, data, nbytes, req->result);
213
214 sg = req->src;
215 if (nbytes > sg->length)
216 return crypto_shash_init(desc) ?:
217 shash_ahash_finup(req, desc);
218
219 page = sg_page(sg);
220 offset = sg->offset;
221 data = lowmem_page_address(page) + offset;
222 if (!IS_ENABLED(CONFIG_HIGHMEM))
223 return crypto_shash_digest(desc, data, nbytes, req->result);
224
225 page = nth_page(page, offset >> PAGE_SHIFT);
226 offset = offset_in_page(offset);
227
228 if (nbytes > (unsigned int)PAGE_SIZE - offset)
229 return crypto_shash_init(desc) ?:
230 shash_ahash_finup(req, desc);
231
232 data = kmap_local_page(page);
233 err = crypto_shash_digest(desc, data + offset, nbytes,
234 req->result);
235 kunmap_local(data);
236 return err;
237 }
238 EXPORT_SYMBOL_GPL(shash_ahash_digest);
239
crypto_exit_ahash_using_shash(struct crypto_tfm * tfm)240 static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
241 {
242 struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
243
244 crypto_free_shash(*ctx);
245 }
246
crypto_init_ahash_using_shash(struct crypto_tfm * tfm)247 static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
248 {
249 struct crypto_alg *calg = tfm->__crt_alg;
250 struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
251 struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
252 struct crypto_shash *shash;
253
254 if (!crypto_mod_get(calg))
255 return -EAGAIN;
256
257 shash = crypto_create_tfm(calg, &crypto_shash_type);
258 if (IS_ERR(shash)) {
259 crypto_mod_put(calg);
260 return PTR_ERR(shash);
261 }
262
263 crt->using_shash = true;
264 *ctx = shash;
265 tfm->exit = crypto_exit_ahash_using_shash;
266
267 crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
268 CRYPTO_TFM_NEED_KEY);
269 crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
270
271 return 0;
272 }
273
ahash_nosetkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)274 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
275 unsigned int keylen)
276 {
277 return -ENOSYS;
278 }
279
ahash_set_needkey(struct crypto_ahash * tfm,struct ahash_alg * alg)280 static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
281 {
282 if (alg->setkey != ahash_nosetkey &&
283 !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
284 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
285 }
286
crypto_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)287 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
288 unsigned int keylen)
289 {
290 if (likely(tfm->using_shash)) {
291 struct crypto_shash *shash = ahash_to_shash(tfm);
292 int err;
293
294 err = crypto_shash_setkey(shash, key, keylen);
295 if (unlikely(err)) {
296 crypto_ahash_set_flags(tfm,
297 crypto_shash_get_flags(shash) &
298 CRYPTO_TFM_NEED_KEY);
299 return err;
300 }
301 } else {
302 struct ahash_alg *alg = crypto_ahash_alg(tfm);
303 int err;
304
305 err = alg->setkey(tfm, key, keylen);
306 if (unlikely(err)) {
307 ahash_set_needkey(tfm, alg);
308 return err;
309 }
310 }
311 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
312 return 0;
313 }
314 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
315
ahash_request_hasvirt(struct ahash_request * req)316 static bool ahash_request_hasvirt(struct ahash_request *req)
317 {
318 return ahash_request_isvirt(req);
319 }
320
ahash_reqchain_virt(struct ahash_save_req_state * state,int err,u32 mask)321 static int ahash_reqchain_virt(struct ahash_save_req_state *state,
322 int err, u32 mask)
323 {
324 struct ahash_request *req = state->cur;
325
326 for (;;) {
327 unsigned len = state->nbytes;
328
329 req->base.err = err;
330
331 if (!state->offset)
332 break;
333
334 if (state->offset == len || err) {
335 u8 *result = req->result;
336
337 ahash_request_set_virt(req, state->src, result, len);
338 state->offset = 0;
339 break;
340 }
341
342 len -= state->offset;
343
344 len = min(PAGE_SIZE, len);
345 memcpy(state->page, state->src + state->offset, len);
346 state->offset += len;
347 req->nbytes = len;
348
349 err = state->op(req);
350 if (err == -EINPROGRESS) {
351 if (!list_empty(&state->head) ||
352 state->offset < state->nbytes)
353 err = -EBUSY;
354 break;
355 }
356
357 if (err == -EBUSY)
358 break;
359 }
360
361 return err;
362 }
363
ahash_reqchain_finish(struct ahash_request * req0,struct ahash_save_req_state * state,int err,u32 mask)364 static int ahash_reqchain_finish(struct ahash_request *req0,
365 struct ahash_save_req_state *state,
366 int err, u32 mask)
367 {
368 struct ahash_request *req = state->cur;
369 struct crypto_ahash *tfm;
370 struct ahash_request *n;
371 bool update;
372 u8 *page;
373
374 err = ahash_reqchain_virt(state, err, mask);
375 if (err == -EINPROGRESS || err == -EBUSY)
376 goto out;
377
378 if (req != req0)
379 list_add_tail(&req->base.list, &req0->base.list);
380
381 tfm = crypto_ahash_reqtfm(req);
382 update = state->op == crypto_ahash_alg(tfm)->update;
383
384 list_for_each_entry_safe(req, n, &state->head, base.list) {
385 list_del_init(&req->base.list);
386
387 req->base.flags &= mask;
388 req->base.complete = ahash_reqchain_done;
389 req->base.data = state;
390 state->cur = req;
391
392 if (update && ahash_request_isvirt(req) && req->nbytes) {
393 unsigned len = req->nbytes;
394 u8 *result = req->result;
395
396 state->src = req->svirt;
397 state->nbytes = len;
398
399 len = min(PAGE_SIZE, len);
400
401 memcpy(state->page, req->svirt, len);
402 state->offset = len;
403
404 ahash_request_set_crypt(req, &state->sg, result, len);
405 }
406
407 err = state->op(req);
408
409 if (err == -EINPROGRESS) {
410 if (!list_empty(&state->head) ||
411 state->offset < state->nbytes)
412 err = -EBUSY;
413 goto out;
414 }
415
416 if (err == -EBUSY)
417 goto out;
418
419 err = ahash_reqchain_virt(state, err, mask);
420 if (err == -EINPROGRESS || err == -EBUSY)
421 goto out;
422
423 list_add_tail(&req->base.list, &req0->base.list);
424 }
425
426 page = state->page;
427 if (page) {
428 memset(page, 0, PAGE_SIZE);
429 free_page((unsigned long)page);
430 }
431 ahash_restore_req(req0);
432
433 out:
434 return err;
435 }
436
ahash_reqchain_done(void * data,int err)437 static void ahash_reqchain_done(void *data, int err)
438 {
439 struct ahash_save_req_state *state = data;
440 crypto_completion_t compl = state->compl;
441
442 data = state->data;
443
444 if (err == -EINPROGRESS) {
445 if (!list_empty(&state->head) || state->offset < state->nbytes)
446 return;
447 goto notify;
448 }
449
450 err = ahash_reqchain_finish(state->req0, state, err,
451 CRYPTO_TFM_REQ_MAY_BACKLOG);
452 if (err == -EBUSY)
453 return;
454
455 notify:
456 compl(data, err);
457 }
458
ahash_do_req_chain(struct ahash_request * req,int (* op)(struct ahash_request * req))459 static int ahash_do_req_chain(struct ahash_request *req,
460 int (*op)(struct ahash_request *req))
461 {
462 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
463 bool update = op == crypto_ahash_alg(tfm)->update;
464 struct ahash_save_req_state *state;
465 struct ahash_save_req_state state0;
466 u8 *page = NULL;
467 int err;
468
469 if (crypto_ahash_req_chain(tfm) ||
470 (!ahash_request_chained(req) &&
471 (!update || !ahash_request_isvirt(req))))
472 return op(req);
473
474 if (update && ahash_request_hasvirt(req)) {
475 gfp_t gfp;
476 u32 flags;
477
478 flags = ahash_request_flags(req);
479 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
480 GFP_KERNEL : GFP_ATOMIC;
481 page = (void *)__get_free_page(gfp);
482 err = -ENOMEM;
483 if (!page)
484 goto out_set_chain;
485 }
486
487 state = &state0;
488 if (ahash_is_async(tfm)) {
489 err = ahash_save_req(req, ahash_reqchain_done);
490 if (err)
491 goto out_free_page;
492
493 state = req->base.data;
494 }
495
496 state->op = op;
497 state->cur = req;
498 state->page = page;
499 state->offset = 0;
500 state->nbytes = 0;
501 INIT_LIST_HEAD(&state->head);
502
503 if (page)
504 sg_init_one(&state->sg, page, PAGE_SIZE);
505
506 if (update && ahash_request_isvirt(req) && req->nbytes) {
507 unsigned len = req->nbytes;
508 u8 *result = req->result;
509
510 state->src = req->svirt;
511 state->nbytes = len;
512
513 len = min(PAGE_SIZE, len);
514
515 memcpy(page, req->svirt, len);
516 state->offset = len;
517
518 ahash_request_set_crypt(req, &state->sg, result, len);
519 }
520
521 err = op(req);
522 if (err == -EBUSY || err == -EINPROGRESS)
523 return -EBUSY;
524
525 return ahash_reqchain_finish(req, state, err, ~0);
526
527 out_free_page:
528 free_page((unsigned long)page);
529
530 out_set_chain:
531 req->base.err = err;
532 return err;
533 }
534
crypto_ahash_init(struct ahash_request * req)535 int crypto_ahash_init(struct ahash_request *req)
536 {
537 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
538
539 if (likely(tfm->using_shash)) {
540 int err;
541
542 err = crypto_shash_init(prepare_shash_desc(req, tfm));
543 req->base.err = err;
544 return err;
545 }
546
547 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
548 return -ENOKEY;
549
550 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
551 }
552 EXPORT_SYMBOL_GPL(crypto_ahash_init);
553
ahash_save_req(struct ahash_request * req,crypto_completion_t cplt)554 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
555 {
556 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
557 struct ahash_save_req_state *state;
558 gfp_t gfp;
559 u32 flags;
560
561 if (!ahash_is_async(tfm))
562 return 0;
563
564 flags = ahash_request_flags(req);
565 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
566 state = kmalloc(sizeof(*state), gfp);
567 if (!state)
568 return -ENOMEM;
569
570 state->compl = req->base.complete;
571 state->data = req->base.data;
572 req->base.complete = cplt;
573 req->base.data = state;
574 state->req0 = req;
575
576 return 0;
577 }
578
ahash_restore_req(struct ahash_request * req)579 static void ahash_restore_req(struct ahash_request *req)
580 {
581 struct ahash_save_req_state *state;
582 struct crypto_ahash *tfm;
583
584 tfm = crypto_ahash_reqtfm(req);
585 if (!ahash_is_async(tfm))
586 return;
587
588 state = req->base.data;
589
590 req->base.complete = state->compl;
591 req->base.data = state->data;
592 kfree(state);
593 }
594
crypto_ahash_update(struct ahash_request * req)595 int crypto_ahash_update(struct ahash_request *req)
596 {
597 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
598
599 if (likely(tfm->using_shash)) {
600 int err;
601
602 err = shash_ahash_update(req, ahash_request_ctx(req));
603 req->base.err = err;
604 return err;
605 }
606
607 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
608 }
609 EXPORT_SYMBOL_GPL(crypto_ahash_update);
610
crypto_ahash_final(struct ahash_request * req)611 int crypto_ahash_final(struct ahash_request *req)
612 {
613 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
614
615 if (likely(tfm->using_shash)) {
616 int err;
617
618 err = crypto_shash_final(ahash_request_ctx(req), req->result);
619 req->base.err = err;
620 return err;
621 }
622
623 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
624 }
625 EXPORT_SYMBOL_GPL(crypto_ahash_final);
626
crypto_ahash_finup(struct ahash_request * req)627 int crypto_ahash_finup(struct ahash_request *req)
628 {
629 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
630
631 if (likely(tfm->using_shash)) {
632 int err;
633
634 err = shash_ahash_finup(req, ahash_request_ctx(req));
635 req->base.err = err;
636 return err;
637 }
638
639 if (!crypto_ahash_alg(tfm)->finup ||
640 (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
641 return ahash_def_finup(req);
642
643 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
644 }
645 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
646
ahash_def_digest_finish(struct ahash_request * req,int err)647 static int ahash_def_digest_finish(struct ahash_request *req, int err)
648 {
649 struct crypto_ahash *tfm;
650
651 if (err)
652 goto out;
653
654 tfm = crypto_ahash_reqtfm(req);
655 if (ahash_is_async(tfm))
656 req->base.complete = ahash_def_finup_done1;
657
658 err = crypto_ahash_update(req);
659 if (err == -EINPROGRESS || err == -EBUSY)
660 return err;
661
662 return ahash_def_finup_finish1(req, err);
663
664 out:
665 ahash_restore_req(req);
666 return err;
667 }
668
ahash_def_digest_done(void * data,int err)669 static void ahash_def_digest_done(void *data, int err)
670 {
671 struct ahash_save_req_state *state0 = data;
672 struct ahash_save_req_state state;
673 struct ahash_request *areq;
674
675 state = *state0;
676 areq = state.req0;
677 if (err == -EINPROGRESS)
678 goto out;
679
680 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
681
682 err = ahash_def_digest_finish(areq, err);
683 if (err == -EINPROGRESS || err == -EBUSY)
684 return;
685
686 out:
687 state.compl(state.data, err);
688 }
689
ahash_def_digest(struct ahash_request * req)690 static int ahash_def_digest(struct ahash_request *req)
691 {
692 int err;
693
694 err = ahash_save_req(req, ahash_def_digest_done);
695 if (err)
696 return err;
697
698 err = crypto_ahash_init(req);
699 if (err == -EINPROGRESS || err == -EBUSY)
700 return err;
701
702 return ahash_def_digest_finish(req, err);
703 }
704
crypto_ahash_digest(struct ahash_request * req)705 int crypto_ahash_digest(struct ahash_request *req)
706 {
707 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
708
709 if (likely(tfm->using_shash)) {
710 int err;
711
712 err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
713 req->base.err = err;
714 return err;
715 }
716
717 if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
718 return ahash_def_digest(req);
719
720 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
721 return -ENOKEY;
722
723 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
724 }
725 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
726
ahash_def_finup_done2(void * data,int err)727 static void ahash_def_finup_done2(void *data, int err)
728 {
729 struct ahash_save_req_state *state = data;
730 struct ahash_request *areq = state->req0;
731
732 if (err == -EINPROGRESS)
733 return;
734
735 ahash_restore_req(areq);
736 ahash_request_complete(areq, err);
737 }
738
ahash_def_finup_finish1(struct ahash_request * req,int err)739 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
740 {
741 struct crypto_ahash *tfm;
742
743 if (err)
744 goto out;
745
746 tfm = crypto_ahash_reqtfm(req);
747 if (ahash_is_async(tfm))
748 req->base.complete = ahash_def_finup_done2;
749
750 err = crypto_ahash_final(req);
751 if (err == -EINPROGRESS || err == -EBUSY)
752 return err;
753
754 out:
755 ahash_restore_req(req);
756 return err;
757 }
758
ahash_def_finup_done1(void * data,int err)759 static void ahash_def_finup_done1(void *data, int err)
760 {
761 struct ahash_save_req_state *state0 = data;
762 struct ahash_save_req_state state;
763 struct ahash_request *areq;
764
765 state = *state0;
766 areq = state.req0;
767 if (err == -EINPROGRESS)
768 goto out;
769
770 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
771
772 err = ahash_def_finup_finish1(areq, err);
773 if (err == -EINPROGRESS || err == -EBUSY)
774 return;
775
776 out:
777 state.compl(state.data, err);
778 }
779
ahash_def_finup(struct ahash_request * req)780 static int ahash_def_finup(struct ahash_request *req)
781 {
782 int err;
783
784 err = ahash_save_req(req, ahash_def_finup_done1);
785 if (err)
786 return err;
787
788 err = crypto_ahash_update(req);
789 if (err == -EINPROGRESS || err == -EBUSY)
790 return err;
791
792 return ahash_def_finup_finish1(req, err);
793 }
794
crypto_ahash_export(struct ahash_request * req,void * out)795 int crypto_ahash_export(struct ahash_request *req, void *out)
796 {
797 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
798
799 if (likely(tfm->using_shash))
800 return crypto_shash_export(ahash_request_ctx(req), out);
801 return crypto_ahash_alg(tfm)->export(req, out);
802 }
803 EXPORT_SYMBOL_GPL(crypto_ahash_export);
804
crypto_ahash_import(struct ahash_request * req,const void * in)805 int crypto_ahash_import(struct ahash_request *req, const void *in)
806 {
807 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
808
809 if (likely(tfm->using_shash))
810 return crypto_shash_import(prepare_shash_desc(req, tfm), in);
811 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
812 return -ENOKEY;
813 return crypto_ahash_alg(tfm)->import(req, in);
814 }
815 EXPORT_SYMBOL_GPL(crypto_ahash_import);
816
crypto_ahash_exit_tfm(struct crypto_tfm * tfm)817 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
818 {
819 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
820 struct ahash_alg *alg = crypto_ahash_alg(hash);
821
822 alg->exit_tfm(hash);
823 }
824
crypto_ahash_init_tfm(struct crypto_tfm * tfm)825 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
826 {
827 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
828 struct ahash_alg *alg = crypto_ahash_alg(hash);
829
830 crypto_ahash_set_statesize(hash, alg->halg.statesize);
831 crypto_ahash_set_reqsize(hash, alg->reqsize);
832
833 if (tfm->__crt_alg->cra_type == &crypto_shash_type)
834 return crypto_init_ahash_using_shash(tfm);
835
836 ahash_set_needkey(hash, alg);
837
838 if (alg->exit_tfm)
839 tfm->exit = crypto_ahash_exit_tfm;
840
841 return alg->init_tfm ? alg->init_tfm(hash) : 0;
842 }
843
crypto_ahash_extsize(struct crypto_alg * alg)844 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
845 {
846 if (alg->cra_type == &crypto_shash_type)
847 return sizeof(struct crypto_shash *);
848
849 return crypto_alg_extsize(alg);
850 }
851
crypto_ahash_free_instance(struct crypto_instance * inst)852 static void crypto_ahash_free_instance(struct crypto_instance *inst)
853 {
854 struct ahash_instance *ahash = ahash_instance(inst);
855
856 ahash->free(ahash);
857 }
858
crypto_ahash_report(struct sk_buff * skb,struct crypto_alg * alg)859 static int __maybe_unused crypto_ahash_report(
860 struct sk_buff *skb, struct crypto_alg *alg)
861 {
862 struct crypto_report_hash rhash;
863
864 memset(&rhash, 0, sizeof(rhash));
865
866 strscpy(rhash.type, "ahash", sizeof(rhash.type));
867
868 rhash.blocksize = alg->cra_blocksize;
869 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
870
871 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
872 }
873
874 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
875 __maybe_unused;
crypto_ahash_show(struct seq_file * m,struct crypto_alg * alg)876 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
877 {
878 seq_printf(m, "type : ahash\n");
879 seq_printf(m, "async : %s\n",
880 str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
881 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
882 seq_printf(m, "digestsize : %u\n",
883 __crypto_hash_alg_common(alg)->digestsize);
884 }
885
886 static const struct crypto_type crypto_ahash_type = {
887 .extsize = crypto_ahash_extsize,
888 .init_tfm = crypto_ahash_init_tfm,
889 .free = crypto_ahash_free_instance,
890 #ifdef CONFIG_PROC_FS
891 .show = crypto_ahash_show,
892 #endif
893 #if IS_ENABLED(CONFIG_CRYPTO_USER)
894 .report = crypto_ahash_report,
895 #endif
896 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
897 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
898 .type = CRYPTO_ALG_TYPE_AHASH,
899 .tfmsize = offsetof(struct crypto_ahash, base),
900 };
901
crypto_grab_ahash(struct crypto_ahash_spawn * spawn,struct crypto_instance * inst,const char * name,u32 type,u32 mask)902 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
903 struct crypto_instance *inst,
904 const char *name, u32 type, u32 mask)
905 {
906 spawn->base.frontend = &crypto_ahash_type;
907 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
908 }
909 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
910
crypto_alloc_ahash(const char * alg_name,u32 type,u32 mask)911 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
912 u32 mask)
913 {
914 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
915 }
916 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
917
crypto_has_ahash(const char * alg_name,u32 type,u32 mask)918 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
919 {
920 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
921 }
922 EXPORT_SYMBOL_GPL(crypto_has_ahash);
923
crypto_hash_alg_has_setkey(struct hash_alg_common * halg)924 static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
925 {
926 struct crypto_alg *alg = &halg->base;
927
928 if (alg->cra_type == &crypto_shash_type)
929 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
930
931 return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
932 }
933
crypto_clone_ahash(struct crypto_ahash * hash)934 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
935 {
936 struct hash_alg_common *halg = crypto_hash_alg_common(hash);
937 struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
938 struct crypto_ahash *nhash;
939 struct ahash_alg *alg;
940 int err;
941
942 if (!crypto_hash_alg_has_setkey(halg)) {
943 tfm = crypto_tfm_get(tfm);
944 if (IS_ERR(tfm))
945 return ERR_CAST(tfm);
946
947 return hash;
948 }
949
950 nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
951
952 if (IS_ERR(nhash))
953 return nhash;
954
955 nhash->reqsize = hash->reqsize;
956 nhash->statesize = hash->statesize;
957
958 if (likely(hash->using_shash)) {
959 struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
960 struct crypto_shash *shash;
961
962 shash = crypto_clone_shash(ahash_to_shash(hash));
963 if (IS_ERR(shash)) {
964 err = PTR_ERR(shash);
965 goto out_free_nhash;
966 }
967 nhash->using_shash = true;
968 *nctx = shash;
969 return nhash;
970 }
971
972 err = -ENOSYS;
973 alg = crypto_ahash_alg(hash);
974 if (!alg->clone_tfm)
975 goto out_free_nhash;
976
977 err = alg->clone_tfm(nhash, hash);
978 if (err)
979 goto out_free_nhash;
980
981 return nhash;
982
983 out_free_nhash:
984 crypto_free_ahash(nhash);
985 return ERR_PTR(err);
986 }
987 EXPORT_SYMBOL_GPL(crypto_clone_ahash);
988
ahash_prepare_alg(struct ahash_alg * alg)989 static int ahash_prepare_alg(struct ahash_alg *alg)
990 {
991 struct crypto_alg *base = &alg->halg.base;
992 int err;
993
994 if (alg->halg.statesize == 0)
995 return -EINVAL;
996
997 if (alg->reqsize && alg->reqsize < alg->halg.statesize)
998 return -EINVAL;
999
1000 err = hash_prepare_alg(&alg->halg);
1001 if (err)
1002 return err;
1003
1004 base->cra_type = &crypto_ahash_type;
1005 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
1006
1007 if (!alg->setkey)
1008 alg->setkey = ahash_nosetkey;
1009
1010 return 0;
1011 }
1012
crypto_register_ahash(struct ahash_alg * alg)1013 int crypto_register_ahash(struct ahash_alg *alg)
1014 {
1015 struct crypto_alg *base = &alg->halg.base;
1016 int err;
1017
1018 err = ahash_prepare_alg(alg);
1019 if (err)
1020 return err;
1021
1022 return crypto_register_alg(base);
1023 }
1024 EXPORT_SYMBOL_GPL(crypto_register_ahash);
1025
crypto_unregister_ahash(struct ahash_alg * alg)1026 void crypto_unregister_ahash(struct ahash_alg *alg)
1027 {
1028 crypto_unregister_alg(&alg->halg.base);
1029 }
1030 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
1031
crypto_register_ahashes(struct ahash_alg * algs,int count)1032 int crypto_register_ahashes(struct ahash_alg *algs, int count)
1033 {
1034 int i, ret;
1035
1036 for (i = 0; i < count; i++) {
1037 ret = crypto_register_ahash(&algs[i]);
1038 if (ret)
1039 goto err;
1040 }
1041
1042 return 0;
1043
1044 err:
1045 for (--i; i >= 0; --i)
1046 crypto_unregister_ahash(&algs[i]);
1047
1048 return ret;
1049 }
1050 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
1051
crypto_unregister_ahashes(struct ahash_alg * algs,int count)1052 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
1053 {
1054 int i;
1055
1056 for (i = count - 1; i >= 0; --i)
1057 crypto_unregister_ahash(&algs[i]);
1058 }
1059 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
1060
ahash_register_instance(struct crypto_template * tmpl,struct ahash_instance * inst)1061 int ahash_register_instance(struct crypto_template *tmpl,
1062 struct ahash_instance *inst)
1063 {
1064 int err;
1065
1066 if (WARN_ON(!inst->free))
1067 return -EINVAL;
1068
1069 err = ahash_prepare_alg(&inst->alg);
1070 if (err)
1071 return err;
1072
1073 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
1074 }
1075 EXPORT_SYMBOL_GPL(ahash_register_instance);
1076
1077 MODULE_LICENSE("GPL");
1078 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
1079