xref: /linux/crypto/crypto_engine.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handle async block request by crypto hardware engine.
4  *
5  * Copyright (C) 2016 Linaro, Inc.
6  *
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/akcipher.h>
12 #include <crypto/internal/engine.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/internal/kpp.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <uapi/linux/sched/types.h>
22 #include "internal.h"
23 
24 #define CRYPTO_ENGINE_MAX_QLEN 10
25 
26 struct crypto_engine_alg {
27 	struct crypto_alg base;
28 	struct crypto_engine_op op;
29 };
30 
31 /**
32  * crypto_finalize_request - finalize one request if the request is done
33  * @engine: the hardware engine
34  * @req: the request need to be finalized
35  * @err: error number
36  */
crypto_finalize_request(struct crypto_engine * engine,struct crypto_async_request * req,int err)37 static void crypto_finalize_request(struct crypto_engine *engine,
38 				    struct crypto_async_request *req, int err)
39 {
40 	unsigned long flags;
41 
42 	/*
43 	 * If hardware cannot enqueue more requests
44 	 * and retry mechanism is not supported
45 	 * make sure we are completing the current request
46 	 */
47 	if (!engine->retry_support) {
48 		spin_lock_irqsave(&engine->queue_lock, flags);
49 		if (engine->cur_req == req) {
50 			engine->cur_req = NULL;
51 		}
52 		spin_unlock_irqrestore(&engine->queue_lock, flags);
53 	}
54 
55 	lockdep_assert_in_softirq();
56 	crypto_request_complete(req, err);
57 
58 	kthread_queue_work(engine->kworker, &engine->pump_requests);
59 }
60 
61 /**
62  * crypto_pump_requests - dequeue one request from engine queue to process
63  * @engine: the hardware engine
64  * @in_kthread: true if we are in the context of the request pump thread
65  *
66  * This function checks if there is any request in the engine queue that
67  * needs processing and if so call out to the driver to initialize hardware
68  * and handle each request.
69  */
crypto_pump_requests(struct crypto_engine * engine,bool in_kthread)70 static void crypto_pump_requests(struct crypto_engine *engine,
71 				 bool in_kthread)
72 {
73 	struct crypto_async_request *async_req, *backlog;
74 	struct crypto_engine_alg *alg;
75 	struct crypto_engine_op *op;
76 	unsigned long flags;
77 	int ret;
78 
79 	spin_lock_irqsave(&engine->queue_lock, flags);
80 
81 	/* Make sure we are not already running a request */
82 	if (!engine->retry_support && engine->cur_req)
83 		goto out;
84 
85 	/* Check if the engine queue is idle */
86 	if (!crypto_queue_len(&engine->queue) || !engine->running) {
87 		if (!engine->busy)
88 			goto out;
89 
90 		/* Only do teardown in the thread */
91 		if (!in_kthread) {
92 			kthread_queue_work(engine->kworker,
93 					   &engine->pump_requests);
94 			goto out;
95 		}
96 
97 		engine->busy = false;
98 		goto out;
99 	}
100 
101 start_request:
102 	/* Get the fist request from the engine queue to handle */
103 	backlog = crypto_get_backlog(&engine->queue);
104 	async_req = crypto_dequeue_request(&engine->queue);
105 	if (!async_req)
106 		goto out;
107 
108 	/*
109 	 * If hardware doesn't support the retry mechanism,
110 	 * keep track of the request we are processing now.
111 	 * We'll need it on completion (crypto_finalize_request).
112 	 */
113 	if (!engine->retry_support)
114 		engine->cur_req = async_req;
115 
116 	if (!engine->busy)
117 		engine->busy = true;
118 
119 	spin_unlock_irqrestore(&engine->queue_lock, flags);
120 
121 	alg = container_of(async_req->tfm->__crt_alg,
122 			   struct crypto_engine_alg, base);
123 	op = &alg->op;
124 	ret = op->do_one_request(engine, async_req);
125 
126 	/* Request unsuccessfully executed by hardware */
127 	if (ret < 0) {
128 		/*
129 		 * If hardware queue is full (-ENOSPC), requeue request
130 		 * regardless of backlog flag.
131 		 * Otherwise, unprepare and complete the request.
132 		 */
133 		if (!engine->retry_support ||
134 		    (ret != -ENOSPC)) {
135 			dev_err(engine->dev,
136 				"Failed to do one request from queue: %d\n",
137 				ret);
138 			goto req_err_1;
139 		}
140 		spin_lock_irqsave(&engine->queue_lock, flags);
141 		/*
142 		 * If hardware was unable to execute request, enqueue it
143 		 * back in front of crypto-engine queue, to keep the order
144 		 * of requests.
145 		 */
146 		crypto_enqueue_request_head(&engine->queue, async_req);
147 
148 		kthread_queue_work(engine->kworker, &engine->pump_requests);
149 		goto out;
150 	}
151 
152 	goto retry;
153 
154 req_err_1:
155 	crypto_request_complete(async_req, ret);
156 
157 retry:
158 	if (backlog)
159 		crypto_request_complete(backlog, -EINPROGRESS);
160 
161 	/* If retry mechanism is supported, send new requests to engine */
162 	if (engine->retry_support) {
163 		spin_lock_irqsave(&engine->queue_lock, flags);
164 		goto start_request;
165 	}
166 	return;
167 
168 out:
169 	spin_unlock_irqrestore(&engine->queue_lock, flags);
170 
171 	return;
172 }
173 
crypto_pump_work(struct kthread_work * work)174 static void crypto_pump_work(struct kthread_work *work)
175 {
176 	struct crypto_engine *engine =
177 		container_of(work, struct crypto_engine, pump_requests);
178 
179 	crypto_pump_requests(engine, true);
180 }
181 
182 /**
183  * crypto_transfer_request - transfer the new request into the engine queue
184  * @engine: the hardware engine
185  * @req: the request need to be listed into the engine queue
186  * @need_pump: indicates whether queue the pump of request to kthread_work
187  */
crypto_transfer_request(struct crypto_engine * engine,struct crypto_async_request * req,bool need_pump)188 static int crypto_transfer_request(struct crypto_engine *engine,
189 				   struct crypto_async_request *req,
190 				   bool need_pump)
191 {
192 	unsigned long flags;
193 	int ret;
194 
195 	spin_lock_irqsave(&engine->queue_lock, flags);
196 
197 	if (!engine->running) {
198 		spin_unlock_irqrestore(&engine->queue_lock, flags);
199 		return -ESHUTDOWN;
200 	}
201 
202 	ret = crypto_enqueue_request(&engine->queue, req);
203 
204 	if (!engine->busy && need_pump)
205 		kthread_queue_work(engine->kworker, &engine->pump_requests);
206 
207 	spin_unlock_irqrestore(&engine->queue_lock, flags);
208 	return ret;
209 }
210 
211 /**
212  * crypto_transfer_request_to_engine - transfer one request to list
213  * into the engine queue
214  * @engine: the hardware engine
215  * @req: the request need to be listed into the engine queue
216  */
crypto_transfer_request_to_engine(struct crypto_engine * engine,struct crypto_async_request * req)217 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
218 					     struct crypto_async_request *req)
219 {
220 	return crypto_transfer_request(engine, req, true);
221 }
222 
223 /**
224  * crypto_transfer_aead_request_to_engine - transfer one aead_request
225  * to list into the engine queue
226  * @engine: the hardware engine
227  * @req: the request need to be listed into the engine queue
228  */
crypto_transfer_aead_request_to_engine(struct crypto_engine * engine,struct aead_request * req)229 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
230 					   struct aead_request *req)
231 {
232 	return crypto_transfer_request_to_engine(engine, &req->base);
233 }
234 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
235 
236 /**
237  * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
238  * to list into the engine queue
239  * @engine: the hardware engine
240  * @req: the request need to be listed into the engine queue
241  */
crypto_transfer_akcipher_request_to_engine(struct crypto_engine * engine,struct akcipher_request * req)242 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
243 					       struct akcipher_request *req)
244 {
245 	return crypto_transfer_request_to_engine(engine, &req->base);
246 }
247 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
248 
249 /**
250  * crypto_transfer_hash_request_to_engine - transfer one ahash_request
251  * to list into the engine queue
252  * @engine: the hardware engine
253  * @req: the request need to be listed into the engine queue
254  */
crypto_transfer_hash_request_to_engine(struct crypto_engine * engine,struct ahash_request * req)255 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
256 					   struct ahash_request *req)
257 {
258 	return crypto_transfer_request_to_engine(engine, &req->base);
259 }
260 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
261 
262 /**
263  * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
264  * into the engine queue
265  * @engine: the hardware engine
266  * @req: the request need to be listed into the engine queue
267  */
crypto_transfer_kpp_request_to_engine(struct crypto_engine * engine,struct kpp_request * req)268 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
269 					  struct kpp_request *req)
270 {
271 	return crypto_transfer_request_to_engine(engine, &req->base);
272 }
273 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
274 
275 /**
276  * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
277  * to list into the engine queue
278  * @engine: the hardware engine
279  * @req: the request need to be listed into the engine queue
280  */
crypto_transfer_skcipher_request_to_engine(struct crypto_engine * engine,struct skcipher_request * req)281 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
282 					       struct skcipher_request *req)
283 {
284 	return crypto_transfer_request_to_engine(engine, &req->base);
285 }
286 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
287 
288 /**
289  * crypto_finalize_aead_request - finalize one aead_request if
290  * the request is done
291  * @engine: the hardware engine
292  * @req: the request need to be finalized
293  * @err: error number
294  */
crypto_finalize_aead_request(struct crypto_engine * engine,struct aead_request * req,int err)295 void crypto_finalize_aead_request(struct crypto_engine *engine,
296 				  struct aead_request *req, int err)
297 {
298 	return crypto_finalize_request(engine, &req->base, err);
299 }
300 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
301 
302 /**
303  * crypto_finalize_akcipher_request - finalize one akcipher_request if
304  * the request is done
305  * @engine: the hardware engine
306  * @req: the request need to be finalized
307  * @err: error number
308  */
crypto_finalize_akcipher_request(struct crypto_engine * engine,struct akcipher_request * req,int err)309 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
310 				      struct akcipher_request *req, int err)
311 {
312 	return crypto_finalize_request(engine, &req->base, err);
313 }
314 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
315 
316 /**
317  * crypto_finalize_hash_request - finalize one ahash_request if
318  * the request is done
319  * @engine: the hardware engine
320  * @req: the request need to be finalized
321  * @err: error number
322  */
crypto_finalize_hash_request(struct crypto_engine * engine,struct ahash_request * req,int err)323 void crypto_finalize_hash_request(struct crypto_engine *engine,
324 				  struct ahash_request *req, int err)
325 {
326 	return crypto_finalize_request(engine, &req->base, err);
327 }
328 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
329 
330 /**
331  * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
332  * @engine: the hardware engine
333  * @req: the request need to be finalized
334  * @err: error number
335  */
crypto_finalize_kpp_request(struct crypto_engine * engine,struct kpp_request * req,int err)336 void crypto_finalize_kpp_request(struct crypto_engine *engine,
337 				 struct kpp_request *req, int err)
338 {
339 	return crypto_finalize_request(engine, &req->base, err);
340 }
341 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
342 
343 /**
344  * crypto_finalize_skcipher_request - finalize one skcipher_request if
345  * the request is done
346  * @engine: the hardware engine
347  * @req: the request need to be finalized
348  * @err: error number
349  */
crypto_finalize_skcipher_request(struct crypto_engine * engine,struct skcipher_request * req,int err)350 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
351 				      struct skcipher_request *req, int err)
352 {
353 	return crypto_finalize_request(engine, &req->base, err);
354 }
355 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
356 
357 /**
358  * crypto_engine_start - start the hardware engine
359  * @engine: the hardware engine need to be started
360  *
361  * Return 0 on success, else on fail.
362  */
crypto_engine_start(struct crypto_engine * engine)363 int crypto_engine_start(struct crypto_engine *engine)
364 {
365 	unsigned long flags;
366 
367 	spin_lock_irqsave(&engine->queue_lock, flags);
368 
369 	if (engine->running || engine->busy) {
370 		spin_unlock_irqrestore(&engine->queue_lock, flags);
371 		return -EBUSY;
372 	}
373 
374 	engine->running = true;
375 	spin_unlock_irqrestore(&engine->queue_lock, flags);
376 
377 	kthread_queue_work(engine->kworker, &engine->pump_requests);
378 
379 	return 0;
380 }
381 EXPORT_SYMBOL_GPL(crypto_engine_start);
382 
383 /**
384  * crypto_engine_stop - stop the hardware engine
385  * @engine: the hardware engine need to be stopped
386  *
387  * Return 0 on success, else on fail.
388  */
crypto_engine_stop(struct crypto_engine * engine)389 int crypto_engine_stop(struct crypto_engine *engine)
390 {
391 	unsigned long flags;
392 	unsigned int limit = 500;
393 	int ret = 0;
394 
395 	spin_lock_irqsave(&engine->queue_lock, flags);
396 
397 	/*
398 	 * If the engine queue is not empty or the engine is on busy state,
399 	 * we need to wait for a while to pump the requests of engine queue.
400 	 */
401 	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
402 		spin_unlock_irqrestore(&engine->queue_lock, flags);
403 		msleep(20);
404 		spin_lock_irqsave(&engine->queue_lock, flags);
405 	}
406 
407 	if (crypto_queue_len(&engine->queue) || engine->busy)
408 		ret = -EBUSY;
409 	else
410 		engine->running = false;
411 
412 	spin_unlock_irqrestore(&engine->queue_lock, flags);
413 
414 	if (ret)
415 		dev_warn(engine->dev, "could not stop engine\n");
416 
417 	return ret;
418 }
419 EXPORT_SYMBOL_GPL(crypto_engine_stop);
420 
421 /**
422  * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
423  * and initialize it by setting the maximum number of entries in the software
424  * crypto-engine queue.
425  * @dev: the device attached with one hardware engine
426  * @retry_support: whether hardware has support for retry mechanism
427  * @rt: whether this queue is set to run as a realtime task
428  * @qlen: maximum size of the crypto-engine queue
429  *
430  * This must be called from context that can sleep.
431  * Return: the crypto engine structure on success, else NULL.
432  */
crypto_engine_alloc_init_and_set(struct device * dev,bool retry_support,bool rt,int qlen)433 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
434 						       bool retry_support,
435 						       bool rt, int qlen)
436 {
437 	struct crypto_engine *engine;
438 
439 	if (!dev)
440 		return NULL;
441 
442 	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
443 	if (!engine)
444 		return NULL;
445 
446 	engine->dev = dev;
447 	engine->rt = rt;
448 	engine->running = false;
449 	engine->busy = false;
450 	engine->retry_support = retry_support;
451 	engine->priv_data = dev;
452 
453 	snprintf(engine->name, sizeof(engine->name),
454 		 "%s-engine", dev_name(dev));
455 
456 	crypto_init_queue(&engine->queue, qlen);
457 	spin_lock_init(&engine->queue_lock);
458 
459 	engine->kworker = kthread_run_worker(0, "%s", engine->name);
460 	if (IS_ERR(engine->kworker)) {
461 		dev_err(dev, "failed to create crypto request pump task\n");
462 		return NULL;
463 	}
464 	kthread_init_work(&engine->pump_requests, crypto_pump_work);
465 
466 	if (engine->rt) {
467 		dev_info(dev, "will run requests pump with realtime priority\n");
468 		sched_set_fifo(engine->kworker->task);
469 	}
470 
471 	return engine;
472 }
473 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
474 
475 /**
476  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
477  * initialize it.
478  * @dev: the device attached with one hardware engine
479  * @rt: whether this queue is set to run as a realtime task
480  *
481  * This must be called from context that can sleep.
482  * Return: the crypto engine structure on success, else NULL.
483  */
crypto_engine_alloc_init(struct device * dev,bool rt)484 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
485 {
486 	return crypto_engine_alloc_init_and_set(dev, false, rt,
487 						CRYPTO_ENGINE_MAX_QLEN);
488 }
489 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
490 
491 /**
492  * crypto_engine_exit - free the resources of hardware engine when exit
493  * @engine: the hardware engine need to be freed
494  */
crypto_engine_exit(struct crypto_engine * engine)495 void crypto_engine_exit(struct crypto_engine *engine)
496 {
497 	int ret;
498 
499 	ret = crypto_engine_stop(engine);
500 	if (ret)
501 		return;
502 
503 	kthread_destroy_worker(engine->kworker);
504 }
505 EXPORT_SYMBOL_GPL(crypto_engine_exit);
506 
crypto_engine_register_aead(struct aead_engine_alg * alg)507 int crypto_engine_register_aead(struct aead_engine_alg *alg)
508 {
509 	if (!alg->op.do_one_request)
510 		return -EINVAL;
511 	return crypto_register_aead(&alg->base);
512 }
513 EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
514 
crypto_engine_unregister_aead(struct aead_engine_alg * alg)515 void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
516 {
517 	crypto_unregister_aead(&alg->base);
518 }
519 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
520 
crypto_engine_register_aeads(struct aead_engine_alg * algs,int count)521 int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
522 {
523 	int i, ret;
524 
525 	for (i = 0; i < count; i++) {
526 		ret = crypto_engine_register_aead(&algs[i]);
527 		if (ret)
528 			goto err;
529 	}
530 
531 	return 0;
532 
533 err:
534 	crypto_engine_unregister_aeads(algs, i);
535 
536 	return ret;
537 }
538 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
539 
crypto_engine_unregister_aeads(struct aead_engine_alg * algs,int count)540 void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
541 {
542 	int i;
543 
544 	for (i = count - 1; i >= 0; --i)
545 		crypto_engine_unregister_aead(&algs[i]);
546 }
547 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
548 
crypto_engine_register_ahash(struct ahash_engine_alg * alg)549 int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
550 {
551 	if (!alg->op.do_one_request)
552 		return -EINVAL;
553 	return crypto_register_ahash(&alg->base);
554 }
555 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
556 
crypto_engine_unregister_ahash(struct ahash_engine_alg * alg)557 void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
558 {
559 	crypto_unregister_ahash(&alg->base);
560 }
561 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
562 
crypto_engine_register_ahashes(struct ahash_engine_alg * algs,int count)563 int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
564 {
565 	int i, ret;
566 
567 	for (i = 0; i < count; i++) {
568 		ret = crypto_engine_register_ahash(&algs[i]);
569 		if (ret)
570 			goto err;
571 	}
572 
573 	return 0;
574 
575 err:
576 	crypto_engine_unregister_ahashes(algs, i);
577 
578 	return ret;
579 }
580 EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
581 
crypto_engine_unregister_ahashes(struct ahash_engine_alg * algs,int count)582 void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
583 				      int count)
584 {
585 	int i;
586 
587 	for (i = count - 1; i >= 0; --i)
588 		crypto_engine_unregister_ahash(&algs[i]);
589 }
590 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
591 
crypto_engine_register_akcipher(struct akcipher_engine_alg * alg)592 int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
593 {
594 	if (!alg->op.do_one_request)
595 		return -EINVAL;
596 	return crypto_register_akcipher(&alg->base);
597 }
598 EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
599 
crypto_engine_unregister_akcipher(struct akcipher_engine_alg * alg)600 void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
601 {
602 	crypto_unregister_akcipher(&alg->base);
603 }
604 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
605 
crypto_engine_register_kpp(struct kpp_engine_alg * alg)606 int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
607 {
608 	if (!alg->op.do_one_request)
609 		return -EINVAL;
610 	return crypto_register_kpp(&alg->base);
611 }
612 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
613 
crypto_engine_unregister_kpp(struct kpp_engine_alg * alg)614 void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
615 {
616 	crypto_unregister_kpp(&alg->base);
617 }
618 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
619 
crypto_engine_register_skcipher(struct skcipher_engine_alg * alg)620 int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
621 {
622 	if (!alg->op.do_one_request)
623 		return -EINVAL;
624 	return crypto_register_skcipher(&alg->base);
625 }
626 EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
627 
crypto_engine_unregister_skcipher(struct skcipher_engine_alg * alg)628 void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
629 {
630 	return crypto_unregister_skcipher(&alg->base);
631 }
632 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
633 
crypto_engine_register_skciphers(struct skcipher_engine_alg * algs,int count)634 int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
635 				     int count)
636 {
637 	int i, ret;
638 
639 	for (i = 0; i < count; i++) {
640 		ret = crypto_engine_register_skcipher(&algs[i]);
641 		if (ret)
642 			goto err;
643 	}
644 
645 	return 0;
646 
647 err:
648 	crypto_engine_unregister_skciphers(algs, i);
649 
650 	return ret;
651 }
652 EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
653 
crypto_engine_unregister_skciphers(struct skcipher_engine_alg * algs,int count)654 void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
655 					int count)
656 {
657 	int i;
658 
659 	for (i = count - 1; i >= 0; --i)
660 		crypto_engine_unregister_skcipher(&algs[i]);
661 }
662 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
663 
664 MODULE_LICENSE("GPL");
665 MODULE_DESCRIPTION("Crypto hardware engine framework");
666