1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Scatterlist Cryptographic API.
4 *
5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
10 * and Nettle, by Niels Möller.
11 */
12
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/jump_label.h>
16 #include <linux/kernel.h>
17 #include <linux/kmod.h>
18 #include <linux/module.h>
19 #include <linux/param.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/completion.h>
24 #include "internal.h"
25
26 LIST_HEAD(crypto_alg_list);
27 EXPORT_SYMBOL_GPL(crypto_alg_list);
28 DECLARE_RWSEM(crypto_alg_sem);
29 EXPORT_SYMBOL_GPL(crypto_alg_sem);
30
31 BLOCKING_NOTIFIER_HEAD(crypto_chain);
32 EXPORT_SYMBOL_GPL(crypto_chain);
33
34 #if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
35 DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
36 #endif
37
38 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
39 u32 type, u32 mask);
40 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
41 u32 mask);
42
crypto_mod_get(struct crypto_alg * alg)43 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
44 {
45 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
46 }
47 EXPORT_SYMBOL_GPL(crypto_mod_get);
48
crypto_mod_put(struct crypto_alg * alg)49 void crypto_mod_put(struct crypto_alg *alg)
50 {
51 struct module *module = alg->cra_module;
52
53 crypto_alg_put(alg);
54 module_put(module);
55 }
56 EXPORT_SYMBOL_GPL(crypto_mod_put);
57
__crypto_alg_lookup(const char * name,u32 type,u32 mask)58 static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
59 u32 mask)
60 {
61 struct crypto_alg *q, *alg = NULL;
62 int best = -2;
63
64 list_for_each_entry(q, &crypto_alg_list, cra_list) {
65 int exact, fuzzy;
66
67 if (crypto_is_moribund(q))
68 continue;
69
70 if ((q->cra_flags ^ type) & mask)
71 continue;
72
73 exact = !strcmp(q->cra_driver_name, name);
74 fuzzy = !strcmp(q->cra_name, name);
75 if (!exact && !(fuzzy && q->cra_priority > best))
76 continue;
77
78 if (unlikely(!crypto_mod_get(q)))
79 continue;
80
81 best = q->cra_priority;
82 if (alg)
83 crypto_mod_put(alg);
84 alg = q;
85
86 if (exact)
87 break;
88 }
89
90 return alg;
91 }
92
crypto_larval_destroy(struct crypto_alg * alg)93 static void crypto_larval_destroy(struct crypto_alg *alg)
94 {
95 struct crypto_larval *larval = (void *)alg;
96
97 BUG_ON(!crypto_is_larval(alg));
98 if (!IS_ERR_OR_NULL(larval->adult))
99 crypto_mod_put(larval->adult);
100 kfree(larval);
101 }
102
crypto_larval_alloc(const char * name,u32 type,u32 mask)103 struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
104 {
105 struct crypto_larval *larval;
106
107 larval = kzalloc(sizeof(*larval), GFP_KERNEL);
108 if (!larval)
109 return ERR_PTR(-ENOMEM);
110
111 type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
112
113 larval->mask = mask;
114 larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
115 larval->alg.cra_priority = -1;
116 larval->alg.cra_destroy = crypto_larval_destroy;
117
118 strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
119 init_completion(&larval->completion);
120
121 return larval;
122 }
123 EXPORT_SYMBOL_GPL(crypto_larval_alloc);
124
crypto_larval_add(const char * name,u32 type,u32 mask)125 static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
126 u32 mask)
127 {
128 struct crypto_alg *alg;
129 struct crypto_larval *larval;
130
131 larval = crypto_larval_alloc(name, type, mask);
132 if (IS_ERR(larval))
133 return ERR_CAST(larval);
134
135 refcount_set(&larval->alg.cra_refcnt, 2);
136
137 down_write(&crypto_alg_sem);
138 alg = __crypto_alg_lookup(name, type, mask);
139 if (!alg) {
140 alg = &larval->alg;
141 list_add(&alg->cra_list, &crypto_alg_list);
142 }
143 up_write(&crypto_alg_sem);
144
145 if (alg != &larval->alg) {
146 kfree(larval);
147 if (crypto_is_larval(alg))
148 alg = crypto_larval_wait(alg, type, mask);
149 }
150
151 return alg;
152 }
153
crypto_larval_kill(struct crypto_larval * larval)154 static void crypto_larval_kill(struct crypto_larval *larval)
155 {
156 bool unlinked;
157
158 down_write(&crypto_alg_sem);
159 unlinked = list_empty(&larval->alg.cra_list);
160 if (!unlinked)
161 list_del_init(&larval->alg.cra_list);
162 up_write(&crypto_alg_sem);
163
164 if (unlinked)
165 return;
166
167 complete_all(&larval->completion);
168 crypto_alg_put(&larval->alg);
169 }
170
crypto_schedule_test(struct crypto_larval * larval)171 void crypto_schedule_test(struct crypto_larval *larval)
172 {
173 int err;
174
175 err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
176 WARN_ON_ONCE(err != NOTIFY_STOP);
177 }
178 EXPORT_SYMBOL_GPL(crypto_schedule_test);
179
crypto_start_test(struct crypto_larval * larval)180 static void crypto_start_test(struct crypto_larval *larval)
181 {
182 if (!crypto_is_test_larval(larval))
183 return;
184
185 if (larval->test_started)
186 return;
187
188 down_write(&crypto_alg_sem);
189 if (larval->test_started) {
190 up_write(&crypto_alg_sem);
191 return;
192 }
193
194 larval->test_started = true;
195 up_write(&crypto_alg_sem);
196
197 crypto_schedule_test(larval);
198 }
199
crypto_larval_wait(struct crypto_alg * alg,u32 type,u32 mask)200 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
201 u32 type, u32 mask)
202 {
203 struct crypto_larval *larval;
204 long time_left;
205
206 again:
207 larval = container_of(alg, struct crypto_larval, alg);
208
209 if (!crypto_boot_test_finished())
210 crypto_start_test(larval);
211
212 time_left = wait_for_completion_killable_timeout(
213 &larval->completion, 60 * HZ);
214
215 alg = larval->adult;
216 if (time_left < 0)
217 alg = ERR_PTR(-EINTR);
218 else if (!time_left) {
219 if (crypto_is_test_larval(larval))
220 crypto_larval_kill(larval);
221 alg = ERR_PTR(-ETIMEDOUT);
222 } else if (!alg || PTR_ERR(alg) == -EEXIST) {
223 int err = alg ? -EEXIST : -EAGAIN;
224
225 /*
226 * EEXIST is expected because two probes can be scheduled
227 * at the same time with one using alg_name and the other
228 * using driver_name. Do a re-lookup but do not retry in
229 * case we hit a quirk like gcm_base(ctr(aes),...) which
230 * will never match.
231 */
232 alg = &larval->alg;
233 alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
234 ERR_PTR(err);
235 } else if (IS_ERR(alg))
236 ;
237 else if (crypto_is_test_larval(larval) &&
238 !(alg->cra_flags & CRYPTO_ALG_TESTED))
239 alg = ERR_PTR(-EAGAIN);
240 else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
241 alg = ERR_PTR(-EAGAIN);
242 else if (!crypto_mod_get(alg))
243 alg = ERR_PTR(-EAGAIN);
244 crypto_mod_put(&larval->alg);
245
246 if (!IS_ERR(alg) && crypto_is_larval(alg))
247 goto again;
248
249 return alg;
250 }
251
crypto_alg_lookup(const char * name,u32 type,u32 mask)252 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
253 u32 mask)
254 {
255 const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
256 struct crypto_alg *alg;
257 u32 test = 0;
258
259 if (!((type | mask) & CRYPTO_ALG_TESTED))
260 test |= CRYPTO_ALG_TESTED;
261
262 down_read(&crypto_alg_sem);
263 alg = __crypto_alg_lookup(name, (type | test) & ~fips,
264 (mask | test) & ~fips);
265 if (alg) {
266 if (((type | mask) ^ fips) & fips)
267 mask |= fips;
268 mask &= fips;
269
270 if (!crypto_is_larval(alg) &&
271 ((type ^ alg->cra_flags) & mask)) {
272 /* Algorithm is disallowed in FIPS mode. */
273 crypto_mod_put(alg);
274 alg = ERR_PTR(-ENOENT);
275 }
276 } else if (test) {
277 alg = __crypto_alg_lookup(name, type, mask);
278 if (alg && !crypto_is_larval(alg)) {
279 /* Test failed */
280 crypto_mod_put(alg);
281 alg = ERR_PTR(-ELIBBAD);
282 }
283 }
284 up_read(&crypto_alg_sem);
285
286 return alg;
287 }
288
crypto_larval_lookup(const char * name,u32 type,u32 mask)289 static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
290 u32 mask)
291 {
292 struct crypto_alg *alg;
293
294 if (!name)
295 return ERR_PTR(-ENOENT);
296
297 type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
298 mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
299
300 alg = crypto_alg_lookup(name, type, mask);
301 if (!alg && !(mask & CRYPTO_NOLOAD)) {
302 request_module("crypto-%s", name);
303
304 if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
305 CRYPTO_ALG_NEED_FALLBACK))
306 request_module("crypto-%s-all", name);
307
308 alg = crypto_alg_lookup(name, type, mask);
309 }
310
311 if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
312 alg = crypto_larval_wait(alg, type, mask);
313 else if (alg)
314 ;
315 else if (!(mask & CRYPTO_ALG_TESTED))
316 alg = crypto_larval_add(name, type, mask);
317 else
318 alg = ERR_PTR(-ENOENT);
319
320 return alg;
321 }
322
crypto_probing_notify(unsigned long val,void * v)323 int crypto_probing_notify(unsigned long val, void *v)
324 {
325 int ok;
326
327 ok = blocking_notifier_call_chain(&crypto_chain, val, v);
328 if (ok == NOTIFY_DONE) {
329 request_module("cryptomgr");
330 ok = blocking_notifier_call_chain(&crypto_chain, val, v);
331 }
332
333 return ok;
334 }
335 EXPORT_SYMBOL_GPL(crypto_probing_notify);
336
crypto_alg_mod_lookup(const char * name,u32 type,u32 mask)337 struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
338 {
339 struct crypto_alg *alg;
340 struct crypto_alg *larval;
341 int ok;
342
343 /*
344 * If the internal flag is set for a cipher, require a caller to
345 * invoke the cipher with the internal flag to use that cipher.
346 * Also, if a caller wants to allocate a cipher that may or may
347 * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
348 * !(mask & CRYPTO_ALG_INTERNAL).
349 */
350 if (!((type | mask) & CRYPTO_ALG_INTERNAL))
351 mask |= CRYPTO_ALG_INTERNAL;
352
353 larval = crypto_larval_lookup(name, type, mask);
354 if (IS_ERR(larval) || !crypto_is_larval(larval))
355 return larval;
356
357 ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
358
359 if (ok == NOTIFY_STOP)
360 alg = crypto_larval_wait(larval, type, mask);
361 else {
362 crypto_mod_put(larval);
363 alg = ERR_PTR(-ENOENT);
364 }
365 crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
366 return alg;
367 }
368 EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
369
crypto_exit_ops(struct crypto_tfm * tfm)370 static void crypto_exit_ops(struct crypto_tfm *tfm)
371 {
372 const struct crypto_type *type = tfm->__crt_alg->cra_type;
373
374 if (type && tfm->exit)
375 tfm->exit(tfm);
376 }
377
crypto_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)378 static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
379 {
380 const struct crypto_type *type_obj = alg->cra_type;
381 unsigned int len;
382
383 len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
384 if (type_obj)
385 return len + type_obj->ctxsize(alg, type, mask);
386
387 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
388 default:
389 BUG();
390
391 case CRYPTO_ALG_TYPE_CIPHER:
392 len += crypto_cipher_ctxsize(alg);
393 break;
394 }
395
396 return len;
397 }
398
crypto_shoot_alg(struct crypto_alg * alg)399 void crypto_shoot_alg(struct crypto_alg *alg)
400 {
401 down_write(&crypto_alg_sem);
402 alg->cra_flags |= CRYPTO_ALG_DYING;
403 up_write(&crypto_alg_sem);
404 }
405 EXPORT_SYMBOL_GPL(crypto_shoot_alg);
406
__crypto_alloc_tfmgfp(struct crypto_alg * alg,u32 type,u32 mask,gfp_t gfp)407 struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
408 u32 mask, gfp_t gfp)
409 {
410 struct crypto_tfm *tfm;
411 unsigned int tfm_size;
412 int err = -ENOMEM;
413
414 tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
415 tfm = kzalloc(tfm_size, gfp);
416 if (tfm == NULL)
417 goto out_err;
418
419 tfm->__crt_alg = alg;
420 refcount_set(&tfm->refcnt, 1);
421
422 if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
423 goto cra_init_failed;
424
425 goto out;
426
427 cra_init_failed:
428 crypto_exit_ops(tfm);
429 if (err == -EAGAIN)
430 crypto_shoot_alg(alg);
431 kfree(tfm);
432 out_err:
433 tfm = ERR_PTR(err);
434 out:
435 return tfm;
436 }
437 EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
438
__crypto_alloc_tfm(struct crypto_alg * alg,u32 type,u32 mask)439 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
440 u32 mask)
441 {
442 return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
443 }
444 EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
445
446 /*
447 * crypto_alloc_base - Locate algorithm and allocate transform
448 * @alg_name: Name of algorithm
449 * @type: Type of algorithm
450 * @mask: Mask for type comparison
451 *
452 * This function should not be used by new algorithm types.
453 * Please use crypto_alloc_tfm instead.
454 *
455 * crypto_alloc_base() will first attempt to locate an already loaded
456 * algorithm. If that fails and the kernel supports dynamically loadable
457 * modules, it will then attempt to load a module of the same name or
458 * alias. If that fails it will send a query to any loaded crypto manager
459 * to construct an algorithm on the fly. A refcount is grabbed on the
460 * algorithm which is then associated with the new transform.
461 *
462 * The returned transform is of a non-determinate type. Most people
463 * should use one of the more specific allocation functions such as
464 * crypto_alloc_skcipher().
465 *
466 * In case of error the return value is an error pointer.
467 */
crypto_alloc_base(const char * alg_name,u32 type,u32 mask)468 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
469 {
470 struct crypto_tfm *tfm;
471 int err;
472
473 for (;;) {
474 struct crypto_alg *alg;
475
476 alg = crypto_alg_mod_lookup(alg_name, type, mask);
477 if (IS_ERR(alg)) {
478 err = PTR_ERR(alg);
479 goto err;
480 }
481
482 tfm = __crypto_alloc_tfm(alg, type, mask);
483 if (!IS_ERR(tfm))
484 return tfm;
485
486 crypto_mod_put(alg);
487 err = PTR_ERR(tfm);
488
489 err:
490 if (err != -EAGAIN)
491 break;
492 if (fatal_signal_pending(current)) {
493 err = -EINTR;
494 break;
495 }
496 }
497
498 return ERR_PTR(err);
499 }
500 EXPORT_SYMBOL_GPL(crypto_alloc_base);
501
crypto_alloc_tfmmem(struct crypto_alg * alg,const struct crypto_type * frontend,int node,gfp_t gfp)502 static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
503 const struct crypto_type *frontend, int node,
504 gfp_t gfp)
505 {
506 struct crypto_tfm *tfm;
507 unsigned int tfmsize;
508 unsigned int total;
509 char *mem;
510
511 tfmsize = frontend->tfmsize;
512 total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
513
514 mem = kzalloc_node(total, gfp, node);
515 if (mem == NULL)
516 return ERR_PTR(-ENOMEM);
517
518 tfm = (struct crypto_tfm *)(mem + tfmsize);
519 tfm->__crt_alg = alg;
520 tfm->node = node;
521 refcount_set(&tfm->refcnt, 1);
522
523 return mem;
524 }
525
crypto_create_tfm_node(struct crypto_alg * alg,const struct crypto_type * frontend,int node)526 void *crypto_create_tfm_node(struct crypto_alg *alg,
527 const struct crypto_type *frontend,
528 int node)
529 {
530 struct crypto_tfm *tfm;
531 char *mem;
532 int err;
533
534 mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
535 if (IS_ERR(mem))
536 goto out;
537
538 tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
539 tfm->fb = tfm;
540
541 err = frontend->init_tfm(tfm);
542 if (err)
543 goto out_free_tfm;
544
545 if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
546 goto cra_init_failed;
547
548 goto out;
549
550 cra_init_failed:
551 crypto_exit_ops(tfm);
552 out_free_tfm:
553 if (err == -EAGAIN)
554 crypto_shoot_alg(alg);
555 kfree(mem);
556 mem = ERR_PTR(err);
557 out:
558 return mem;
559 }
560 EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
561
crypto_clone_tfm(const struct crypto_type * frontend,struct crypto_tfm * otfm)562 void *crypto_clone_tfm(const struct crypto_type *frontend,
563 struct crypto_tfm *otfm)
564 {
565 struct crypto_alg *alg = otfm->__crt_alg;
566 struct crypto_tfm *tfm;
567 char *mem;
568
569 mem = ERR_PTR(-ESTALE);
570 if (unlikely(!crypto_mod_get(alg)))
571 goto out;
572
573 mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
574 if (IS_ERR(mem)) {
575 crypto_mod_put(alg);
576 goto out;
577 }
578
579 tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
580 tfm->crt_flags = otfm->crt_flags;
581 tfm->fb = tfm;
582
583 out:
584 return mem;
585 }
586 EXPORT_SYMBOL_GPL(crypto_clone_tfm);
587
crypto_find_alg(const char * alg_name,const struct crypto_type * frontend,u32 type,u32 mask)588 struct crypto_alg *crypto_find_alg(const char *alg_name,
589 const struct crypto_type *frontend,
590 u32 type, u32 mask)
591 {
592 if (frontend) {
593 type &= frontend->maskclear;
594 mask &= frontend->maskclear;
595 type |= frontend->type;
596 mask |= frontend->maskset;
597 }
598
599 return crypto_alg_mod_lookup(alg_name, type, mask);
600 }
601 EXPORT_SYMBOL_GPL(crypto_find_alg);
602
603 /*
604 * crypto_alloc_tfm_node - Locate algorithm and allocate transform
605 * @alg_name: Name of algorithm
606 * @frontend: Frontend algorithm type
607 * @type: Type of algorithm
608 * @mask: Mask for type comparison
609 * @node: NUMA node in which users desire to put requests, if node is
610 * NUMA_NO_NODE, it means users have no special requirement.
611 *
612 * crypto_alloc_tfm() will first attempt to locate an already loaded
613 * algorithm. If that fails and the kernel supports dynamically loadable
614 * modules, it will then attempt to load a module of the same name or
615 * alias. If that fails it will send a query to any loaded crypto manager
616 * to construct an algorithm on the fly. A refcount is grabbed on the
617 * algorithm which is then associated with the new transform.
618 *
619 * The returned transform is of a non-determinate type. Most people
620 * should use one of the more specific allocation functions such as
621 * crypto_alloc_skcipher().
622 *
623 * In case of error the return value is an error pointer.
624 */
625
crypto_alloc_tfm_node(const char * alg_name,const struct crypto_type * frontend,u32 type,u32 mask,int node)626 void *crypto_alloc_tfm_node(const char *alg_name,
627 const struct crypto_type *frontend, u32 type, u32 mask,
628 int node)
629 {
630 void *tfm;
631 int err;
632
633 for (;;) {
634 struct crypto_alg *alg;
635
636 alg = crypto_find_alg(alg_name, frontend, type, mask);
637 if (IS_ERR(alg)) {
638 err = PTR_ERR(alg);
639 goto err;
640 }
641
642 tfm = crypto_create_tfm_node(alg, frontend, node);
643 if (!IS_ERR(tfm))
644 return tfm;
645
646 crypto_mod_put(alg);
647 err = PTR_ERR(tfm);
648
649 err:
650 if (err != -EAGAIN)
651 break;
652 if (fatal_signal_pending(current)) {
653 err = -EINTR;
654 break;
655 }
656 }
657
658 return ERR_PTR(err);
659 }
660 EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
661
662 /*
663 * crypto_destroy_tfm - Free crypto transform
664 * @mem: Start of tfm slab
665 * @tfm: Transform to free
666 *
667 * This function frees up the transform and any associated resources,
668 * then drops the refcount on the associated algorithm.
669 */
crypto_destroy_tfm(void * mem,struct crypto_tfm * tfm)670 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
671 {
672 struct crypto_alg *alg;
673
674 if (IS_ERR_OR_NULL(mem))
675 return;
676
677 if (!refcount_dec_and_test(&tfm->refcnt))
678 return;
679 alg = tfm->__crt_alg;
680
681 if (!tfm->exit && alg->cra_exit)
682 alg->cra_exit(tfm);
683 crypto_exit_ops(tfm);
684 crypto_mod_put(alg);
685 kfree_sensitive(mem);
686 }
687 EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
688
crypto_has_alg(const char * name,u32 type,u32 mask)689 int crypto_has_alg(const char *name, u32 type, u32 mask)
690 {
691 int ret = 0;
692 struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
693
694 if (!IS_ERR(alg)) {
695 crypto_mod_put(alg);
696 ret = 1;
697 }
698
699 return ret;
700 }
701 EXPORT_SYMBOL_GPL(crypto_has_alg);
702
crypto_req_done(void * data,int err)703 void crypto_req_done(void *data, int err)
704 {
705 struct crypto_wait *wait = data;
706
707 if (err == -EINPROGRESS)
708 return;
709
710 wait->err = err;
711 complete(&wait->completion);
712 }
713 EXPORT_SYMBOL_GPL(crypto_req_done);
714
crypto_destroy_alg(struct crypto_alg * alg)715 void crypto_destroy_alg(struct crypto_alg *alg)
716 {
717 if (alg->cra_type && alg->cra_type->destroy)
718 alg->cra_type->destroy(alg);
719 if (alg->cra_destroy)
720 alg->cra_destroy(alg);
721 }
722 EXPORT_SYMBOL_GPL(crypto_destroy_alg);
723
crypto_request_clone(struct crypto_async_request * req,size_t total,gfp_t gfp)724 struct crypto_async_request *crypto_request_clone(
725 struct crypto_async_request *req, size_t total, gfp_t gfp)
726 {
727 struct crypto_tfm *tfm = req->tfm;
728 struct crypto_async_request *nreq;
729
730 nreq = kmemdup(req, total, gfp);
731 if (!nreq) {
732 req->tfm = tfm->fb;
733 return req;
734 }
735
736 nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
737 return nreq;
738 }
739 EXPORT_SYMBOL_GPL(crypto_request_clone);
740
741 MODULE_DESCRIPTION("Cryptographic core API");
742 MODULE_LICENSE("GPL");
743