1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/cryptd.h>
19 #include <linux/refcount.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/workqueue.h>
29
30 static unsigned int cryptd_max_cpu_qlen = 1000;
31 module_param(cryptd_max_cpu_qlen, uint, 0);
32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34 static struct workqueue_struct *cryptd_wq;
35
36 struct cryptd_cpu_queue {
37 local_lock_t bh_lock;
38 struct crypto_queue queue;
39 struct work_struct work;
40 };
41
42 struct cryptd_queue {
43 /*
44 * Protected by disabling BH to allow enqueueing from softinterrupt and
45 * dequeuing from kworker (cryptd_queue_worker()).
46 */
47 struct cryptd_cpu_queue __percpu *cpu_queue;
48 };
49
50 struct cryptd_instance_ctx {
51 struct crypto_spawn spawn;
52 struct cryptd_queue *queue;
53 };
54
55 struct skcipherd_instance_ctx {
56 struct crypto_skcipher_spawn spawn;
57 struct cryptd_queue *queue;
58 };
59
60 struct hashd_instance_ctx {
61 struct crypto_shash_spawn spawn;
62 struct cryptd_queue *queue;
63 };
64
65 struct aead_instance_ctx {
66 struct crypto_aead_spawn aead_spawn;
67 struct cryptd_queue *queue;
68 };
69
70 struct cryptd_skcipher_ctx {
71 refcount_t refcnt;
72 struct crypto_skcipher *child;
73 };
74
75 struct cryptd_skcipher_request_ctx {
76 struct skcipher_request req;
77 };
78
79 struct cryptd_hash_ctx {
80 refcount_t refcnt;
81 struct crypto_shash *child;
82 };
83
84 struct cryptd_hash_request_ctx {
85 crypto_completion_t complete;
86 void *data;
87 struct shash_desc desc;
88 };
89
90 struct cryptd_aead_ctx {
91 refcount_t refcnt;
92 struct crypto_aead *child;
93 };
94
95 struct cryptd_aead_request_ctx {
96 struct aead_request req;
97 };
98
99 static void cryptd_queue_worker(struct work_struct *work);
100
cryptd_init_queue(struct cryptd_queue * queue,unsigned int max_cpu_qlen)101 static int cryptd_init_queue(struct cryptd_queue *queue,
102 unsigned int max_cpu_qlen)
103 {
104 int cpu;
105 struct cryptd_cpu_queue *cpu_queue;
106
107 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
108 if (!queue->cpu_queue)
109 return -ENOMEM;
110 for_each_possible_cpu(cpu) {
111 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
112 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
113 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
114 local_lock_init(&cpu_queue->bh_lock);
115 }
116 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
117 return 0;
118 }
119
cryptd_fini_queue(struct cryptd_queue * queue)120 static void cryptd_fini_queue(struct cryptd_queue *queue)
121 {
122 int cpu;
123 struct cryptd_cpu_queue *cpu_queue;
124
125 for_each_possible_cpu(cpu) {
126 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
127 BUG_ON(cpu_queue->queue.qlen);
128 }
129 free_percpu(queue->cpu_queue);
130 }
131
cryptd_enqueue_request(struct cryptd_queue * queue,struct crypto_async_request * request)132 static int cryptd_enqueue_request(struct cryptd_queue *queue,
133 struct crypto_async_request *request)
134 {
135 int err;
136 struct cryptd_cpu_queue *cpu_queue;
137 refcount_t *refcnt;
138
139 local_bh_disable();
140 local_lock_nested_bh(&queue->cpu_queue->bh_lock);
141 cpu_queue = this_cpu_ptr(queue->cpu_queue);
142 err = crypto_enqueue_request(&cpu_queue->queue, request);
143
144 refcnt = crypto_tfm_ctx(request->tfm);
145
146 if (err == -ENOSPC)
147 goto out;
148
149 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
150
151 if (!refcount_read(refcnt))
152 goto out;
153
154 refcount_inc(refcnt);
155
156 out:
157 local_unlock_nested_bh(&queue->cpu_queue->bh_lock);
158 local_bh_enable();
159
160 return err;
161 }
162
163 /* Called in workqueue context, do one real cryption work (via
164 * req->complete) and reschedule itself if there are more work to
165 * do. */
cryptd_queue_worker(struct work_struct * work)166 static void cryptd_queue_worker(struct work_struct *work)
167 {
168 struct cryptd_cpu_queue *cpu_queue;
169 struct crypto_async_request *req, *backlog;
170
171 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
172 /*
173 * Only handle one request at a time to avoid hogging crypto workqueue.
174 */
175 local_bh_disable();
176 __local_lock_nested_bh(&cpu_queue->bh_lock);
177 backlog = crypto_get_backlog(&cpu_queue->queue);
178 req = crypto_dequeue_request(&cpu_queue->queue);
179 __local_unlock_nested_bh(&cpu_queue->bh_lock);
180 local_bh_enable();
181
182 if (!req)
183 return;
184
185 if (backlog)
186 crypto_request_complete(backlog, -EINPROGRESS);
187 crypto_request_complete(req, 0);
188
189 if (cpu_queue->queue.qlen)
190 queue_work(cryptd_wq, &cpu_queue->work);
191 }
192
cryptd_get_queue(struct crypto_tfm * tfm)193 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
194 {
195 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
196 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
197 return ictx->queue;
198 }
199
cryptd_type_and_mask(struct crypto_attr_type * algt,u32 * type,u32 * mask)200 static void cryptd_type_and_mask(struct crypto_attr_type *algt,
201 u32 *type, u32 *mask)
202 {
203 /*
204 * cryptd is allowed to wrap internal algorithms, but in that case the
205 * resulting cryptd instance will be marked as internal as well.
206 */
207 *type = algt->type & CRYPTO_ALG_INTERNAL;
208 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
209
210 /* No point in cryptd wrapping an algorithm that's already async. */
211 *mask |= CRYPTO_ALG_ASYNC;
212
213 *mask |= crypto_algt_inherited_mask(algt);
214 }
215
cryptd_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)216 static int cryptd_init_instance(struct crypto_instance *inst,
217 struct crypto_alg *alg)
218 {
219 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
220 "cryptd(%s)",
221 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
222 return -ENAMETOOLONG;
223
224 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
225
226 inst->alg.cra_priority = alg->cra_priority + 50;
227 inst->alg.cra_blocksize = alg->cra_blocksize;
228 inst->alg.cra_alignmask = alg->cra_alignmask;
229
230 return 0;
231 }
232
cryptd_skcipher_setkey(struct crypto_skcipher * parent,const u8 * key,unsigned int keylen)233 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
234 const u8 *key, unsigned int keylen)
235 {
236 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
237 struct crypto_skcipher *child = ctx->child;
238
239 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
240 crypto_skcipher_set_flags(child,
241 crypto_skcipher_get_flags(parent) &
242 CRYPTO_TFM_REQ_MASK);
243 return crypto_skcipher_setkey(child, key, keylen);
244 }
245
cryptd_skcipher_prepare(struct skcipher_request * req,int err)246 static struct skcipher_request *cryptd_skcipher_prepare(
247 struct skcipher_request *req, int err)
248 {
249 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
250 struct skcipher_request *subreq = &rctx->req;
251 struct cryptd_skcipher_ctx *ctx;
252 struct crypto_skcipher *child;
253
254 req->base.complete = subreq->base.complete;
255 req->base.data = subreq->base.data;
256
257 if (unlikely(err == -EINPROGRESS))
258 return NULL;
259
260 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
261 child = ctx->child;
262
263 skcipher_request_set_tfm(subreq, child);
264 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
265 NULL, NULL);
266 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
267 req->iv);
268
269 return subreq;
270 }
271
cryptd_skcipher_complete(struct skcipher_request * req,int err,crypto_completion_t complete)272 static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
273 crypto_completion_t complete)
274 {
275 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
276 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
277 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
278 struct skcipher_request *subreq = &rctx->req;
279 int refcnt = refcount_read(&ctx->refcnt);
280
281 local_bh_disable();
282 skcipher_request_complete(req, err);
283 local_bh_enable();
284
285 if (unlikely(err == -EINPROGRESS)) {
286 subreq->base.complete = req->base.complete;
287 subreq->base.data = req->base.data;
288 req->base.complete = complete;
289 req->base.data = req;
290 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
291 crypto_free_skcipher(tfm);
292 }
293
cryptd_skcipher_encrypt(void * data,int err)294 static void cryptd_skcipher_encrypt(void *data, int err)
295 {
296 struct skcipher_request *req = data;
297 struct skcipher_request *subreq;
298
299 subreq = cryptd_skcipher_prepare(req, err);
300 if (likely(subreq))
301 err = crypto_skcipher_encrypt(subreq);
302
303 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
304 }
305
cryptd_skcipher_decrypt(void * data,int err)306 static void cryptd_skcipher_decrypt(void *data, int err)
307 {
308 struct skcipher_request *req = data;
309 struct skcipher_request *subreq;
310
311 subreq = cryptd_skcipher_prepare(req, err);
312 if (likely(subreq))
313 err = crypto_skcipher_decrypt(subreq);
314
315 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
316 }
317
cryptd_skcipher_enqueue(struct skcipher_request * req,crypto_completion_t compl)318 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
319 crypto_completion_t compl)
320 {
321 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
322 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
323 struct skcipher_request *subreq = &rctx->req;
324 struct cryptd_queue *queue;
325
326 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
327 subreq->base.complete = req->base.complete;
328 subreq->base.data = req->base.data;
329 req->base.complete = compl;
330 req->base.data = req;
331
332 return cryptd_enqueue_request(queue, &req->base);
333 }
334
cryptd_skcipher_encrypt_enqueue(struct skcipher_request * req)335 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
336 {
337 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
338 }
339
cryptd_skcipher_decrypt_enqueue(struct skcipher_request * req)340 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
341 {
342 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
343 }
344
cryptd_skcipher_init_tfm(struct crypto_skcipher * tfm)345 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
346 {
347 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
348 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
349 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
350 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
351 struct crypto_skcipher *cipher;
352
353 cipher = crypto_spawn_skcipher(spawn);
354 if (IS_ERR(cipher))
355 return PTR_ERR(cipher);
356
357 ctx->child = cipher;
358 crypto_skcipher_set_reqsize(
359 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
360 crypto_skcipher_reqsize(cipher));
361 return 0;
362 }
363
cryptd_skcipher_exit_tfm(struct crypto_skcipher * tfm)364 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
365 {
366 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
367
368 crypto_free_skcipher(ctx->child);
369 }
370
cryptd_skcipher_free(struct skcipher_instance * inst)371 static void cryptd_skcipher_free(struct skcipher_instance *inst)
372 {
373 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
374
375 crypto_drop_skcipher(&ctx->spawn);
376 kfree(inst);
377 }
378
cryptd_create_skcipher(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)379 static int cryptd_create_skcipher(struct crypto_template *tmpl,
380 struct rtattr **tb,
381 struct crypto_attr_type *algt,
382 struct cryptd_queue *queue)
383 {
384 struct skcipherd_instance_ctx *ctx;
385 struct skcipher_instance *inst;
386 struct skcipher_alg_common *alg;
387 u32 type;
388 u32 mask;
389 int err;
390
391 cryptd_type_and_mask(algt, &type, &mask);
392
393 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
394 if (!inst)
395 return -ENOMEM;
396
397 ctx = skcipher_instance_ctx(inst);
398 ctx->queue = queue;
399
400 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
401 crypto_attr_alg_name(tb[1]), type, mask);
402 if (err)
403 goto err_free_inst;
404
405 alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
406 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
407 if (err)
408 goto err_free_inst;
409
410 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
411 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
412 inst->alg.ivsize = alg->ivsize;
413 inst->alg.chunksize = alg->chunksize;
414 inst->alg.min_keysize = alg->min_keysize;
415 inst->alg.max_keysize = alg->max_keysize;
416
417 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
418
419 inst->alg.init = cryptd_skcipher_init_tfm;
420 inst->alg.exit = cryptd_skcipher_exit_tfm;
421
422 inst->alg.setkey = cryptd_skcipher_setkey;
423 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
424 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
425
426 inst->free = cryptd_skcipher_free;
427
428 err = skcipher_register_instance(tmpl, inst);
429 if (err) {
430 err_free_inst:
431 cryptd_skcipher_free(inst);
432 }
433 return err;
434 }
435
cryptd_hash_init_tfm(struct crypto_ahash * tfm)436 static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
437 {
438 struct ahash_instance *inst = ahash_alg_instance(tfm);
439 struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
440 struct crypto_shash_spawn *spawn = &ictx->spawn;
441 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
442 struct crypto_shash *hash;
443
444 hash = crypto_spawn_shash(spawn);
445 if (IS_ERR(hash))
446 return PTR_ERR(hash);
447
448 ctx->child = hash;
449 crypto_ahash_set_reqsize(tfm,
450 sizeof(struct cryptd_hash_request_ctx) +
451 crypto_shash_descsize(hash));
452 return 0;
453 }
454
cryptd_hash_clone_tfm(struct crypto_ahash * ntfm,struct crypto_ahash * tfm)455 static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
456 struct crypto_ahash *tfm)
457 {
458 struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
459 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
460 struct crypto_shash *hash;
461
462 hash = crypto_clone_shash(ctx->child);
463 if (IS_ERR(hash))
464 return PTR_ERR(hash);
465
466 nctx->child = hash;
467 return 0;
468 }
469
cryptd_hash_exit_tfm(struct crypto_ahash * tfm)470 static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
471 {
472 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
473
474 crypto_free_shash(ctx->child);
475 }
476
cryptd_hash_setkey(struct crypto_ahash * parent,const u8 * key,unsigned int keylen)477 static int cryptd_hash_setkey(struct crypto_ahash *parent,
478 const u8 *key, unsigned int keylen)
479 {
480 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
481 struct crypto_shash *child = ctx->child;
482
483 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
484 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
485 CRYPTO_TFM_REQ_MASK);
486 return crypto_shash_setkey(child, key, keylen);
487 }
488
cryptd_hash_enqueue(struct ahash_request * req,crypto_completion_t compl)489 static int cryptd_hash_enqueue(struct ahash_request *req,
490 crypto_completion_t compl)
491 {
492 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
494 struct cryptd_queue *queue =
495 cryptd_get_queue(crypto_ahash_tfm(tfm));
496
497 rctx->complete = req->base.complete;
498 rctx->data = req->base.data;
499 req->base.complete = compl;
500 req->base.data = req;
501
502 return cryptd_enqueue_request(queue, &req->base);
503 }
504
cryptd_hash_prepare(struct ahash_request * req,int err)505 static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
506 int err)
507 {
508 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
509
510 req->base.complete = rctx->complete;
511 req->base.data = rctx->data;
512
513 if (unlikely(err == -EINPROGRESS))
514 return NULL;
515
516 return &rctx->desc;
517 }
518
cryptd_hash_complete(struct ahash_request * req,int err,crypto_completion_t complete)519 static void cryptd_hash_complete(struct ahash_request *req, int err,
520 crypto_completion_t complete)
521 {
522 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
523 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
524 int refcnt = refcount_read(&ctx->refcnt);
525
526 local_bh_disable();
527 ahash_request_complete(req, err);
528 local_bh_enable();
529
530 if (err == -EINPROGRESS) {
531 req->base.complete = complete;
532 req->base.data = req;
533 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
534 crypto_free_ahash(tfm);
535 }
536
cryptd_hash_init(void * data,int err)537 static void cryptd_hash_init(void *data, int err)
538 {
539 struct ahash_request *req = data;
540 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
541 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
542 struct crypto_shash *child = ctx->child;
543 struct shash_desc *desc;
544
545 desc = cryptd_hash_prepare(req, err);
546 if (unlikely(!desc))
547 goto out;
548
549 desc->tfm = child;
550
551 err = crypto_shash_init(desc);
552
553 out:
554 cryptd_hash_complete(req, err, cryptd_hash_init);
555 }
556
cryptd_hash_init_enqueue(struct ahash_request * req)557 static int cryptd_hash_init_enqueue(struct ahash_request *req)
558 {
559 return cryptd_hash_enqueue(req, cryptd_hash_init);
560 }
561
cryptd_hash_update(void * data,int err)562 static void cryptd_hash_update(void *data, int err)
563 {
564 struct ahash_request *req = data;
565 struct shash_desc *desc;
566
567 desc = cryptd_hash_prepare(req, err);
568 if (likely(desc))
569 err = shash_ahash_update(req, desc);
570
571 cryptd_hash_complete(req, err, cryptd_hash_update);
572 }
573
cryptd_hash_update_enqueue(struct ahash_request * req)574 static int cryptd_hash_update_enqueue(struct ahash_request *req)
575 {
576 return cryptd_hash_enqueue(req, cryptd_hash_update);
577 }
578
cryptd_hash_final(void * data,int err)579 static void cryptd_hash_final(void *data, int err)
580 {
581 struct ahash_request *req = data;
582 struct shash_desc *desc;
583
584 desc = cryptd_hash_prepare(req, err);
585 if (likely(desc))
586 err = crypto_shash_final(desc, req->result);
587
588 cryptd_hash_complete(req, err, cryptd_hash_final);
589 }
590
cryptd_hash_final_enqueue(struct ahash_request * req)591 static int cryptd_hash_final_enqueue(struct ahash_request *req)
592 {
593 return cryptd_hash_enqueue(req, cryptd_hash_final);
594 }
595
cryptd_hash_finup(void * data,int err)596 static void cryptd_hash_finup(void *data, int err)
597 {
598 struct ahash_request *req = data;
599 struct shash_desc *desc;
600
601 desc = cryptd_hash_prepare(req, err);
602 if (likely(desc))
603 err = shash_ahash_finup(req, desc);
604
605 cryptd_hash_complete(req, err, cryptd_hash_finup);
606 }
607
cryptd_hash_finup_enqueue(struct ahash_request * req)608 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
609 {
610 return cryptd_hash_enqueue(req, cryptd_hash_finup);
611 }
612
cryptd_hash_digest(void * data,int err)613 static void cryptd_hash_digest(void *data, int err)
614 {
615 struct ahash_request *req = data;
616 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
617 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
618 struct crypto_shash *child = ctx->child;
619 struct shash_desc *desc;
620
621 desc = cryptd_hash_prepare(req, err);
622 if (unlikely(!desc))
623 goto out;
624
625 desc->tfm = child;
626
627 err = shash_ahash_digest(req, desc);
628
629 out:
630 cryptd_hash_complete(req, err, cryptd_hash_digest);
631 }
632
cryptd_hash_digest_enqueue(struct ahash_request * req)633 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
634 {
635 return cryptd_hash_enqueue(req, cryptd_hash_digest);
636 }
637
cryptd_hash_export(struct ahash_request * req,void * out)638 static int cryptd_hash_export(struct ahash_request *req, void *out)
639 {
640 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
641
642 return crypto_shash_export(&rctx->desc, out);
643 }
644
cryptd_hash_import(struct ahash_request * req,const void * in)645 static int cryptd_hash_import(struct ahash_request *req, const void *in)
646 {
647 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
648 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
649 struct shash_desc *desc = cryptd_shash_desc(req);
650
651 desc->tfm = ctx->child;
652
653 return crypto_shash_import(desc, in);
654 }
655
cryptd_hash_free(struct ahash_instance * inst)656 static void cryptd_hash_free(struct ahash_instance *inst)
657 {
658 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
659
660 crypto_drop_shash(&ctx->spawn);
661 kfree(inst);
662 }
663
cryptd_create_hash(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)664 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
665 struct crypto_attr_type *algt,
666 struct cryptd_queue *queue)
667 {
668 struct hashd_instance_ctx *ctx;
669 struct ahash_instance *inst;
670 struct shash_alg *alg;
671 u32 type;
672 u32 mask;
673 int err;
674
675 cryptd_type_and_mask(algt, &type, &mask);
676
677 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
678 if (!inst)
679 return -ENOMEM;
680
681 ctx = ahash_instance_ctx(inst);
682 ctx->queue = queue;
683
684 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
685 crypto_attr_alg_name(tb[1]), type, mask);
686 if (err)
687 goto err_free_inst;
688 alg = crypto_spawn_shash_alg(&ctx->spawn);
689
690 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
691 if (err)
692 goto err_free_inst;
693
694 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
695 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
696 CRYPTO_ALG_OPTIONAL_KEY));
697 inst->alg.halg.digestsize = alg->digestsize;
698 inst->alg.halg.statesize = alg->statesize;
699 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
700
701 inst->alg.init_tfm = cryptd_hash_init_tfm;
702 inst->alg.clone_tfm = cryptd_hash_clone_tfm;
703 inst->alg.exit_tfm = cryptd_hash_exit_tfm;
704
705 inst->alg.init = cryptd_hash_init_enqueue;
706 inst->alg.update = cryptd_hash_update_enqueue;
707 inst->alg.final = cryptd_hash_final_enqueue;
708 inst->alg.finup = cryptd_hash_finup_enqueue;
709 inst->alg.export = cryptd_hash_export;
710 inst->alg.import = cryptd_hash_import;
711 if (crypto_shash_alg_has_setkey(alg))
712 inst->alg.setkey = cryptd_hash_setkey;
713 inst->alg.digest = cryptd_hash_digest_enqueue;
714
715 inst->free = cryptd_hash_free;
716
717 err = ahash_register_instance(tmpl, inst);
718 if (err) {
719 err_free_inst:
720 cryptd_hash_free(inst);
721 }
722 return err;
723 }
724
cryptd_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)725 static int cryptd_aead_setkey(struct crypto_aead *parent,
726 const u8 *key, unsigned int keylen)
727 {
728 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
729 struct crypto_aead *child = ctx->child;
730
731 return crypto_aead_setkey(child, key, keylen);
732 }
733
cryptd_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)734 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
735 unsigned int authsize)
736 {
737 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
738 struct crypto_aead *child = ctx->child;
739
740 return crypto_aead_setauthsize(child, authsize);
741 }
742
cryptd_aead_crypt(struct aead_request * req,struct crypto_aead * child,int err,int (* crypt)(struct aead_request * req),crypto_completion_t compl)743 static void cryptd_aead_crypt(struct aead_request *req,
744 struct crypto_aead *child, int err,
745 int (*crypt)(struct aead_request *req),
746 crypto_completion_t compl)
747 {
748 struct cryptd_aead_request_ctx *rctx;
749 struct aead_request *subreq;
750 struct cryptd_aead_ctx *ctx;
751 struct crypto_aead *tfm;
752 int refcnt;
753
754 rctx = aead_request_ctx(req);
755 subreq = &rctx->req;
756 req->base.complete = subreq->base.complete;
757 req->base.data = subreq->base.data;
758
759 tfm = crypto_aead_reqtfm(req);
760
761 if (unlikely(err == -EINPROGRESS))
762 goto out;
763
764 aead_request_set_tfm(subreq, child);
765 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
766 NULL, NULL);
767 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
768 req->iv);
769 aead_request_set_ad(subreq, req->assoclen);
770
771 err = crypt(subreq);
772
773 out:
774 ctx = crypto_aead_ctx(tfm);
775 refcnt = refcount_read(&ctx->refcnt);
776
777 local_bh_disable();
778 aead_request_complete(req, err);
779 local_bh_enable();
780
781 if (err == -EINPROGRESS) {
782 subreq->base.complete = req->base.complete;
783 subreq->base.data = req->base.data;
784 req->base.complete = compl;
785 req->base.data = req;
786 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
787 crypto_free_aead(tfm);
788 }
789
cryptd_aead_encrypt(void * data,int err)790 static void cryptd_aead_encrypt(void *data, int err)
791 {
792 struct aead_request *req = data;
793 struct cryptd_aead_ctx *ctx;
794 struct crypto_aead *child;
795
796 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
797 child = ctx->child;
798 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
799 cryptd_aead_encrypt);
800 }
801
cryptd_aead_decrypt(void * data,int err)802 static void cryptd_aead_decrypt(void *data, int err)
803 {
804 struct aead_request *req = data;
805 struct cryptd_aead_ctx *ctx;
806 struct crypto_aead *child;
807
808 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
809 child = ctx->child;
810 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
811 cryptd_aead_decrypt);
812 }
813
cryptd_aead_enqueue(struct aead_request * req,crypto_completion_t compl)814 static int cryptd_aead_enqueue(struct aead_request *req,
815 crypto_completion_t compl)
816 {
817 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
818 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
819 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
820 struct aead_request *subreq = &rctx->req;
821
822 subreq->base.complete = req->base.complete;
823 subreq->base.data = req->base.data;
824 req->base.complete = compl;
825 req->base.data = req;
826 return cryptd_enqueue_request(queue, &req->base);
827 }
828
cryptd_aead_encrypt_enqueue(struct aead_request * req)829 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
830 {
831 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
832 }
833
cryptd_aead_decrypt_enqueue(struct aead_request * req)834 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
835 {
836 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
837 }
838
cryptd_aead_init_tfm(struct crypto_aead * tfm)839 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
840 {
841 struct aead_instance *inst = aead_alg_instance(tfm);
842 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
843 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
844 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
845 struct crypto_aead *cipher;
846
847 cipher = crypto_spawn_aead(spawn);
848 if (IS_ERR(cipher))
849 return PTR_ERR(cipher);
850
851 ctx->child = cipher;
852 crypto_aead_set_reqsize(
853 tfm, sizeof(struct cryptd_aead_request_ctx) +
854 crypto_aead_reqsize(cipher));
855 return 0;
856 }
857
cryptd_aead_exit_tfm(struct crypto_aead * tfm)858 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
859 {
860 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
861 crypto_free_aead(ctx->child);
862 }
863
cryptd_aead_free(struct aead_instance * inst)864 static void cryptd_aead_free(struct aead_instance *inst)
865 {
866 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
867
868 crypto_drop_aead(&ctx->aead_spawn);
869 kfree(inst);
870 }
871
cryptd_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)872 static int cryptd_create_aead(struct crypto_template *tmpl,
873 struct rtattr **tb,
874 struct crypto_attr_type *algt,
875 struct cryptd_queue *queue)
876 {
877 struct aead_instance_ctx *ctx;
878 struct aead_instance *inst;
879 struct aead_alg *alg;
880 u32 type;
881 u32 mask;
882 int err;
883
884 cryptd_type_and_mask(algt, &type, &mask);
885
886 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
887 if (!inst)
888 return -ENOMEM;
889
890 ctx = aead_instance_ctx(inst);
891 ctx->queue = queue;
892
893 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
894 crypto_attr_alg_name(tb[1]), type, mask);
895 if (err)
896 goto err_free_inst;
897
898 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
899 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
900 if (err)
901 goto err_free_inst;
902
903 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
904 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
905 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
906
907 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
908 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
909
910 inst->alg.init = cryptd_aead_init_tfm;
911 inst->alg.exit = cryptd_aead_exit_tfm;
912 inst->alg.setkey = cryptd_aead_setkey;
913 inst->alg.setauthsize = cryptd_aead_setauthsize;
914 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
915 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
916
917 inst->free = cryptd_aead_free;
918
919 err = aead_register_instance(tmpl, inst);
920 if (err) {
921 err_free_inst:
922 cryptd_aead_free(inst);
923 }
924 return err;
925 }
926
927 static struct cryptd_queue queue;
928
cryptd_create(struct crypto_template * tmpl,struct rtattr ** tb)929 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
930 {
931 struct crypto_attr_type *algt;
932
933 algt = crypto_get_attr_type(tb);
934 if (IS_ERR(algt))
935 return PTR_ERR(algt);
936
937 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
938 case CRYPTO_ALG_TYPE_LSKCIPHER:
939 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
940 case CRYPTO_ALG_TYPE_HASH:
941 return cryptd_create_hash(tmpl, tb, algt, &queue);
942 case CRYPTO_ALG_TYPE_AEAD:
943 return cryptd_create_aead(tmpl, tb, algt, &queue);
944 }
945
946 return -EINVAL;
947 }
948
949 static struct crypto_template cryptd_tmpl = {
950 .name = "cryptd",
951 .create = cryptd_create,
952 .module = THIS_MODULE,
953 };
954
cryptd_alloc_skcipher(const char * alg_name,u32 type,u32 mask)955 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
956 u32 type, u32 mask)
957 {
958 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
959 struct cryptd_skcipher_ctx *ctx;
960 struct crypto_skcipher *tfm;
961
962 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
963 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
964 return ERR_PTR(-EINVAL);
965
966 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
967 if (IS_ERR(tfm))
968 return ERR_CAST(tfm);
969
970 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
971 crypto_free_skcipher(tfm);
972 return ERR_PTR(-EINVAL);
973 }
974
975 ctx = crypto_skcipher_ctx(tfm);
976 refcount_set(&ctx->refcnt, 1);
977
978 return container_of(tfm, struct cryptd_skcipher, base);
979 }
980 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
981
cryptd_skcipher_child(struct cryptd_skcipher * tfm)982 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
983 {
984 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
985
986 return ctx->child;
987 }
988 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
989
cryptd_skcipher_queued(struct cryptd_skcipher * tfm)990 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
991 {
992 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
993
994 return refcount_read(&ctx->refcnt) - 1;
995 }
996 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
997
cryptd_free_skcipher(struct cryptd_skcipher * tfm)998 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
999 {
1000 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1001
1002 if (refcount_dec_and_test(&ctx->refcnt))
1003 crypto_free_skcipher(&tfm->base);
1004 }
1005 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1006
cryptd_alloc_ahash(const char * alg_name,u32 type,u32 mask)1007 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1008 u32 type, u32 mask)
1009 {
1010 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1011 struct cryptd_hash_ctx *ctx;
1012 struct crypto_ahash *tfm;
1013
1014 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1015 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1016 return ERR_PTR(-EINVAL);
1017 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1018 if (IS_ERR(tfm))
1019 return ERR_CAST(tfm);
1020 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1021 crypto_free_ahash(tfm);
1022 return ERR_PTR(-EINVAL);
1023 }
1024
1025 ctx = crypto_ahash_ctx(tfm);
1026 refcount_set(&ctx->refcnt, 1);
1027
1028 return __cryptd_ahash_cast(tfm);
1029 }
1030 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1031
cryptd_ahash_child(struct cryptd_ahash * tfm)1032 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1033 {
1034 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1035
1036 return ctx->child;
1037 }
1038 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1039
cryptd_shash_desc(struct ahash_request * req)1040 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1041 {
1042 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1043 return &rctx->desc;
1044 }
1045 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1046
cryptd_ahash_queued(struct cryptd_ahash * tfm)1047 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1048 {
1049 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1050
1051 return refcount_read(&ctx->refcnt) - 1;
1052 }
1053 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1054
cryptd_free_ahash(struct cryptd_ahash * tfm)1055 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1056 {
1057 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1058
1059 if (refcount_dec_and_test(&ctx->refcnt))
1060 crypto_free_ahash(&tfm->base);
1061 }
1062 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1063
cryptd_alloc_aead(const char * alg_name,u32 type,u32 mask)1064 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1065 u32 type, u32 mask)
1066 {
1067 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1068 struct cryptd_aead_ctx *ctx;
1069 struct crypto_aead *tfm;
1070
1071 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1072 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1073 return ERR_PTR(-EINVAL);
1074 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1075 if (IS_ERR(tfm))
1076 return ERR_CAST(tfm);
1077 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1078 crypto_free_aead(tfm);
1079 return ERR_PTR(-EINVAL);
1080 }
1081
1082 ctx = crypto_aead_ctx(tfm);
1083 refcount_set(&ctx->refcnt, 1);
1084
1085 return __cryptd_aead_cast(tfm);
1086 }
1087 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1088
cryptd_aead_child(struct cryptd_aead * tfm)1089 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1090 {
1091 struct cryptd_aead_ctx *ctx;
1092 ctx = crypto_aead_ctx(&tfm->base);
1093 return ctx->child;
1094 }
1095 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1096
cryptd_aead_queued(struct cryptd_aead * tfm)1097 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1098 {
1099 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1100
1101 return refcount_read(&ctx->refcnt) - 1;
1102 }
1103 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1104
cryptd_free_aead(struct cryptd_aead * tfm)1105 void cryptd_free_aead(struct cryptd_aead *tfm)
1106 {
1107 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1108
1109 if (refcount_dec_and_test(&ctx->refcnt))
1110 crypto_free_aead(&tfm->base);
1111 }
1112 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1113
cryptd_init(void)1114 static int __init cryptd_init(void)
1115 {
1116 int err;
1117
1118 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1119 1);
1120 if (!cryptd_wq)
1121 return -ENOMEM;
1122
1123 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1124 if (err)
1125 goto err_destroy_wq;
1126
1127 err = crypto_register_template(&cryptd_tmpl);
1128 if (err)
1129 goto err_fini_queue;
1130
1131 return 0;
1132
1133 err_fini_queue:
1134 cryptd_fini_queue(&queue);
1135 err_destroy_wq:
1136 destroy_workqueue(cryptd_wq);
1137 return err;
1138 }
1139
cryptd_exit(void)1140 static void __exit cryptd_exit(void)
1141 {
1142 destroy_workqueue(cryptd_wq);
1143 cryptd_fini_queue(&queue);
1144 crypto_unregister_template(&cryptd_tmpl);
1145 }
1146
1147 module_init(cryptd_init);
1148 module_exit(cryptd_exit);
1149
1150 MODULE_LICENSE("GPL");
1151 MODULE_DESCRIPTION("Software async crypto daemon");
1152 MODULE_ALIAS_CRYPTO("cryptd");
1153