1 // SPDX-License-Identifier: GPL-2.0-or-later
2  /* Algorithms supported by virtio crypto device
3   *
4   * Authors: Gonglei <arei.gonglei@huawei.com>
5   *
6   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7   */
8 
9 #include <crypto/engine.h>
10 #include <crypto/internal/skcipher.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/err.h>
13 #include <linux/scatterlist.h>
14 #include <uapi/linux/virtio_crypto.h>
15 #include "virtio_crypto_common.h"
16 
17 
18 struct virtio_crypto_skcipher_ctx {
19 	struct virtio_crypto *vcrypto;
20 
21 	struct virtio_crypto_sym_session_info enc_sess_info;
22 	struct virtio_crypto_sym_session_info dec_sess_info;
23 };
24 
25 struct virtio_crypto_sym_request {
26 	struct virtio_crypto_request base;
27 
28 	/* Cipher or aead */
29 	uint32_t type;
30 	uint8_t *iv;
31 	/* Encryption? */
32 	bool encrypt;
33 };
34 
35 struct virtio_crypto_algo {
36 	uint32_t algonum;
37 	uint32_t service;
38 	unsigned int active_devs;
39 	struct skcipher_engine_alg algo;
40 };
41 
42 /*
43  * The algs_lock protects the below global virtio_crypto_active_devs
44  * and crypto algorithms registion.
45  */
46 static DEFINE_MUTEX(algs_lock);
47 static void virtio_crypto_skcipher_finalize_req(
48 	struct virtio_crypto_sym_request *vc_sym_req,
49 	struct skcipher_request *req,
50 	int err);
51 
virtio_crypto_dataq_sym_callback(struct virtio_crypto_request * vc_req,int len)52 static void virtio_crypto_dataq_sym_callback
53 		(struct virtio_crypto_request *vc_req, int len)
54 {
55 	struct virtio_crypto_sym_request *vc_sym_req =
56 		container_of(vc_req, struct virtio_crypto_sym_request, base);
57 	struct skcipher_request *ablk_req =
58 		container_of((void *)vc_sym_req, struct skcipher_request,
59 			     __ctx);
60 	int error;
61 
62 	/* Finish the encrypt or decrypt process */
63 	if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
64 		switch (vc_req->status) {
65 		case VIRTIO_CRYPTO_OK:
66 			error = 0;
67 			break;
68 		case VIRTIO_CRYPTO_INVSESS:
69 		case VIRTIO_CRYPTO_ERR:
70 			error = -EINVAL;
71 			break;
72 		case VIRTIO_CRYPTO_BADMSG:
73 			error = -EBADMSG;
74 			break;
75 		default:
76 			error = -EIO;
77 			break;
78 		}
79 		virtio_crypto_skcipher_finalize_req(vc_sym_req,
80 							ablk_req, error);
81 	}
82 }
83 
virtio_crypto_alg_sg_nents_length(struct scatterlist * sg)84 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
85 {
86 	u64 total = 0;
87 
88 	for (total = 0; sg; sg = sg_next(sg))
89 		total += sg->length;
90 
91 	return total;
92 }
93 
94 static int
virtio_crypto_alg_validate_key(int key_len,uint32_t * alg)95 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
96 {
97 	switch (key_len) {
98 	case AES_KEYSIZE_128:
99 	case AES_KEYSIZE_192:
100 	case AES_KEYSIZE_256:
101 		*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
102 		break;
103 	default:
104 		return -EINVAL;
105 	}
106 	return 0;
107 }
108 
virtio_crypto_alg_skcipher_init_session(struct virtio_crypto_skcipher_ctx * ctx,uint32_t alg,const uint8_t * key,unsigned int keylen,int encrypt)109 static int virtio_crypto_alg_skcipher_init_session(
110 		struct virtio_crypto_skcipher_ctx *ctx,
111 		uint32_t alg, const uint8_t *key,
112 		unsigned int keylen,
113 		int encrypt)
114 {
115 	struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
116 	struct virtio_crypto *vcrypto = ctx->vcrypto;
117 	int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
118 	int err;
119 	unsigned int num_out = 0, num_in = 0;
120 	struct virtio_crypto_op_ctrl_req *ctrl;
121 	struct virtio_crypto_session_input *input;
122 	struct virtio_crypto_sym_create_session_req *sym_create_session;
123 	struct virtio_crypto_ctrl_request *vc_ctrl_req;
124 
125 	/*
126 	 * Avoid to do DMA from the stack, switch to using
127 	 * dynamically-allocated for the key
128 	 */
129 	uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
130 
131 	if (!cipher_key)
132 		return -ENOMEM;
133 
134 	vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
135 	if (!vc_ctrl_req) {
136 		err = -ENOMEM;
137 		goto out;
138 	}
139 
140 	/* Pad ctrl header */
141 	ctrl = &vc_ctrl_req->ctrl;
142 	ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
143 	ctrl->header.algo = cpu_to_le32(alg);
144 	/* Set the default dataqueue id to 0 */
145 	ctrl->header.queue_id = 0;
146 
147 	input = &vc_ctrl_req->input;
148 	input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
149 	/* Pad cipher's parameters */
150 	sym_create_session = &ctrl->u.sym_create_session;
151 	sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
152 	sym_create_session->u.cipher.para.algo = ctrl->header.algo;
153 	sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
154 	sym_create_session->u.cipher.para.op = cpu_to_le32(op);
155 
156 	sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
157 	sgs[num_out++] = &outhdr;
158 
159 	/* Set key */
160 	sg_init_one(&key_sg, cipher_key, keylen);
161 	sgs[num_out++] = &key_sg;
162 
163 	/* Return status and session id back */
164 	sg_init_one(&inhdr, input, sizeof(*input));
165 	sgs[num_out + num_in++] = &inhdr;
166 
167 	err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
168 	if (err < 0)
169 		goto out;
170 
171 	if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
172 		pr_err("virtio_crypto: Create session failed status: %u\n",
173 			le32_to_cpu(input->status));
174 		err = -EINVAL;
175 		goto out;
176 	}
177 
178 	if (encrypt)
179 		ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
180 	else
181 		ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
182 
183 	err = 0;
184 out:
185 	kfree(vc_ctrl_req);
186 	kfree_sensitive(cipher_key);
187 	return err;
188 }
189 
virtio_crypto_alg_skcipher_close_session(struct virtio_crypto_skcipher_ctx * ctx,int encrypt)190 static int virtio_crypto_alg_skcipher_close_session(
191 		struct virtio_crypto_skcipher_ctx *ctx,
192 		int encrypt)
193 {
194 	struct scatterlist outhdr, status_sg, *sgs[2];
195 	struct virtio_crypto_destroy_session_req *destroy_session;
196 	struct virtio_crypto *vcrypto = ctx->vcrypto;
197 	int err;
198 	unsigned int num_out = 0, num_in = 0;
199 	struct virtio_crypto_op_ctrl_req *ctrl;
200 	struct virtio_crypto_inhdr *ctrl_status;
201 	struct virtio_crypto_ctrl_request *vc_ctrl_req;
202 
203 	vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
204 	if (!vc_ctrl_req)
205 		return -ENOMEM;
206 
207 	ctrl_status = &vc_ctrl_req->ctrl_status;
208 	ctrl_status->status = VIRTIO_CRYPTO_ERR;
209 	/* Pad ctrl header */
210 	ctrl = &vc_ctrl_req->ctrl;
211 	ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
212 	/* Set the default virtqueue id to 0 */
213 	ctrl->header.queue_id = 0;
214 
215 	destroy_session = &ctrl->u.destroy_session;
216 
217 	if (encrypt)
218 		destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
219 	else
220 		destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
221 
222 	sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
223 	sgs[num_out++] = &outhdr;
224 
225 	/* Return status and session id back */
226 	sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
227 	sgs[num_out + num_in++] = &status_sg;
228 
229 	err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
230 	if (err < 0)
231 		goto out;
232 
233 	if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
234 		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
235 			ctrl_status->status, destroy_session->session_id);
236 
237 		err = -EINVAL;
238 		goto out;
239 	}
240 
241 	err = 0;
242 out:
243 	kfree(vc_ctrl_req);
244 	return err;
245 }
246 
virtio_crypto_alg_skcipher_init_sessions(struct virtio_crypto_skcipher_ctx * ctx,const uint8_t * key,unsigned int keylen)247 static int virtio_crypto_alg_skcipher_init_sessions(
248 		struct virtio_crypto_skcipher_ctx *ctx,
249 		const uint8_t *key, unsigned int keylen)
250 {
251 	uint32_t alg;
252 	int ret;
253 	struct virtio_crypto *vcrypto = ctx->vcrypto;
254 
255 	if (keylen > vcrypto->max_cipher_key_len) {
256 		pr_err("virtio_crypto: the key is too long\n");
257 		return -EINVAL;
258 	}
259 
260 	if (virtio_crypto_alg_validate_key(keylen, &alg))
261 		return -EINVAL;
262 
263 	/* Create encryption session */
264 	ret = virtio_crypto_alg_skcipher_init_session(ctx,
265 			alg, key, keylen, 1);
266 	if (ret)
267 		return ret;
268 	/* Create decryption session */
269 	ret = virtio_crypto_alg_skcipher_init_session(ctx,
270 			alg, key, keylen, 0);
271 	if (ret) {
272 		virtio_crypto_alg_skcipher_close_session(ctx, 1);
273 		return ret;
274 	}
275 	return 0;
276 }
277 
278 /* Note: kernel crypto API realization */
virtio_crypto_skcipher_setkey(struct crypto_skcipher * tfm,const uint8_t * key,unsigned int keylen)279 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
280 					 const uint8_t *key,
281 					 unsigned int keylen)
282 {
283 	struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
284 	uint32_t alg;
285 	int ret;
286 
287 	ret = virtio_crypto_alg_validate_key(keylen, &alg);
288 	if (ret)
289 		return ret;
290 
291 	if (!ctx->vcrypto) {
292 		/* New key */
293 		int node = virtio_crypto_get_current_node();
294 		struct virtio_crypto *vcrypto =
295 				      virtcrypto_get_dev_node(node,
296 				      VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
297 		if (!vcrypto) {
298 			pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
299 			return -ENODEV;
300 		}
301 
302 		ctx->vcrypto = vcrypto;
303 	} else {
304 		/* Rekeying, we should close the created sessions previously */
305 		virtio_crypto_alg_skcipher_close_session(ctx, 1);
306 		virtio_crypto_alg_skcipher_close_session(ctx, 0);
307 	}
308 
309 	ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
310 	if (ret) {
311 		virtcrypto_dev_put(ctx->vcrypto);
312 		ctx->vcrypto = NULL;
313 
314 		return ret;
315 	}
316 
317 	return 0;
318 }
319 
320 static int
__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request * vc_sym_req,struct skcipher_request * req,struct data_queue * data_vq)321 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
322 		struct skcipher_request *req,
323 		struct data_queue *data_vq)
324 {
325 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
326 	struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
327 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
328 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
329 	struct virtio_crypto *vcrypto = ctx->vcrypto;
330 	struct virtio_crypto_op_data_req *req_data;
331 	int src_nents, dst_nents;
332 	int err;
333 	unsigned long flags;
334 	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
335 	u64 dst_len;
336 	unsigned int num_out = 0, num_in = 0;
337 	int sg_total;
338 	uint8_t *iv;
339 	struct scatterlist *sg;
340 
341 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
342 	if (src_nents < 0) {
343 		pr_err("Invalid number of src SG.\n");
344 		return src_nents;
345 	}
346 
347 	dst_nents = sg_nents(req->dst);
348 
349 	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
350 			src_nents, dst_nents);
351 
352 	/* Why 3?  outhdr + iv + inhdr */
353 	sg_total = src_nents + dst_nents + 3;
354 	sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
355 				dev_to_node(&vcrypto->vdev->dev));
356 	if (!sgs)
357 		return -ENOMEM;
358 
359 	req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
360 				dev_to_node(&vcrypto->vdev->dev));
361 	if (!req_data) {
362 		kfree(sgs);
363 		return -ENOMEM;
364 	}
365 
366 	vc_req->req_data = req_data;
367 	vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
368 	/* Head of operation */
369 	if (vc_sym_req->encrypt) {
370 		req_data->header.session_id =
371 			cpu_to_le64(ctx->enc_sess_info.session_id);
372 		req_data->header.opcode =
373 			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
374 	} else {
375 		req_data->header.session_id =
376 			cpu_to_le64(ctx->dec_sess_info.session_id);
377 		req_data->header.opcode =
378 			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
379 	}
380 	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
381 	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
382 	req_data->u.sym_req.u.cipher.para.src_data_len =
383 			cpu_to_le32(req->cryptlen);
384 
385 	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
386 	if (unlikely(dst_len > U32_MAX)) {
387 		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
388 		err = -EINVAL;
389 		goto free;
390 	}
391 
392 	dst_len = min_t(unsigned int, req->cryptlen, dst_len);
393 	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
394 			req->cryptlen, dst_len);
395 
396 	if (unlikely(req->cryptlen + dst_len + ivsize +
397 		sizeof(vc_req->status) > vcrypto->max_size)) {
398 		pr_err("virtio_crypto: The length is too big\n");
399 		err = -EINVAL;
400 		goto free;
401 	}
402 
403 	req_data->u.sym_req.u.cipher.para.dst_data_len =
404 			cpu_to_le32((uint32_t)dst_len);
405 
406 	/* Outhdr */
407 	sg_init_one(&outhdr, req_data, sizeof(*req_data));
408 	sgs[num_out++] = &outhdr;
409 
410 	/* IV */
411 
412 	/*
413 	 * Avoid to do DMA from the stack, switch to using
414 	 * dynamically-allocated for the IV
415 	 */
416 	iv = kzalloc_node(ivsize, GFP_ATOMIC,
417 				dev_to_node(&vcrypto->vdev->dev));
418 	if (!iv) {
419 		err = -ENOMEM;
420 		goto free;
421 	}
422 	memcpy(iv, req->iv, ivsize);
423 	if (!vc_sym_req->encrypt)
424 		scatterwalk_map_and_copy(req->iv, req->src,
425 					 req->cryptlen - AES_BLOCK_SIZE,
426 					 AES_BLOCK_SIZE, 0);
427 
428 	sg_init_one(&iv_sg, iv, ivsize);
429 	sgs[num_out++] = &iv_sg;
430 	vc_sym_req->iv = iv;
431 
432 	/* Source data */
433 	for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
434 		sgs[num_out++] = sg;
435 
436 	/* Destination data */
437 	for (sg = req->dst; sg; sg = sg_next(sg))
438 		sgs[num_out + num_in++] = sg;
439 
440 	/* Status */
441 	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
442 	sgs[num_out + num_in++] = &status_sg;
443 
444 	vc_req->sgs = sgs;
445 
446 	spin_lock_irqsave(&data_vq->lock, flags);
447 	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
448 				num_in, vc_req, GFP_ATOMIC);
449 	virtqueue_kick(data_vq->vq);
450 	spin_unlock_irqrestore(&data_vq->lock, flags);
451 	if (unlikely(err < 0))
452 		goto free_iv;
453 
454 	return 0;
455 
456 free_iv:
457 	kfree_sensitive(iv);
458 free:
459 	kfree_sensitive(req_data);
460 	kfree(sgs);
461 	return err;
462 }
463 
virtio_crypto_skcipher_encrypt(struct skcipher_request * req)464 static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
465 {
466 	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
467 	struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
468 	struct virtio_crypto_sym_request *vc_sym_req =
469 				skcipher_request_ctx(req);
470 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
471 	struct virtio_crypto *vcrypto = ctx->vcrypto;
472 	/* Use the first data virtqueue as default */
473 	struct data_queue *data_vq = &vcrypto->data_vq[0];
474 
475 	if (!req->cryptlen)
476 		return 0;
477 	if (req->cryptlen % AES_BLOCK_SIZE)
478 		return -EINVAL;
479 
480 	vc_req->dataq = data_vq;
481 	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
482 	vc_sym_req->encrypt = true;
483 
484 	return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
485 }
486 
virtio_crypto_skcipher_decrypt(struct skcipher_request * req)487 static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
488 {
489 	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
490 	struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
491 	struct virtio_crypto_sym_request *vc_sym_req =
492 				skcipher_request_ctx(req);
493 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
494 	struct virtio_crypto *vcrypto = ctx->vcrypto;
495 	/* Use the first data virtqueue as default */
496 	struct data_queue *data_vq = &vcrypto->data_vq[0];
497 
498 	if (!req->cryptlen)
499 		return 0;
500 	if (req->cryptlen % AES_BLOCK_SIZE)
501 		return -EINVAL;
502 
503 	vc_req->dataq = data_vq;
504 	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
505 	vc_sym_req->encrypt = false;
506 
507 	return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
508 }
509 
virtio_crypto_skcipher_init(struct crypto_skcipher * tfm)510 static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
511 {
512 	crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
513 
514 	return 0;
515 }
516 
virtio_crypto_skcipher_exit(struct crypto_skcipher * tfm)517 static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
518 {
519 	struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
520 
521 	if (!ctx->vcrypto)
522 		return;
523 
524 	virtio_crypto_alg_skcipher_close_session(ctx, 1);
525 	virtio_crypto_alg_skcipher_close_session(ctx, 0);
526 	virtcrypto_dev_put(ctx->vcrypto);
527 	ctx->vcrypto = NULL;
528 }
529 
virtio_crypto_skcipher_crypt_req(struct crypto_engine * engine,void * vreq)530 int virtio_crypto_skcipher_crypt_req(
531 	struct crypto_engine *engine, void *vreq)
532 {
533 	struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
534 	struct virtio_crypto_sym_request *vc_sym_req =
535 				skcipher_request_ctx(req);
536 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
537 	struct data_queue *data_vq = vc_req->dataq;
538 	int ret;
539 
540 	ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
541 	if (ret < 0)
542 		return ret;
543 
544 	virtqueue_kick(data_vq->vq);
545 
546 	return 0;
547 }
548 
virtio_crypto_skcipher_finalize_req(struct virtio_crypto_sym_request * vc_sym_req,struct skcipher_request * req,int err)549 static void virtio_crypto_skcipher_finalize_req(
550 	struct virtio_crypto_sym_request *vc_sym_req,
551 	struct skcipher_request *req,
552 	int err)
553 {
554 	if (vc_sym_req->encrypt)
555 		scatterwalk_map_and_copy(req->iv, req->dst,
556 					 req->cryptlen - AES_BLOCK_SIZE,
557 					 AES_BLOCK_SIZE, 0);
558 	kfree_sensitive(vc_sym_req->iv);
559 	virtcrypto_clear_request(&vc_sym_req->base);
560 
561 	crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
562 					   req, err);
563 }
564 
565 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
566 	.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
567 	.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
568 	.algo.base = {
569 		.base.cra_name		= "cbc(aes)",
570 		.base.cra_driver_name	= "virtio_crypto_aes_cbc",
571 		.base.cra_priority	= 150,
572 		.base.cra_flags		= CRYPTO_ALG_ASYNC |
573 					  CRYPTO_ALG_ALLOCATES_MEMORY,
574 		.base.cra_blocksize	= AES_BLOCK_SIZE,
575 		.base.cra_ctxsize	= sizeof(struct virtio_crypto_skcipher_ctx),
576 		.base.cra_module	= THIS_MODULE,
577 		.init			= virtio_crypto_skcipher_init,
578 		.exit			= virtio_crypto_skcipher_exit,
579 		.setkey			= virtio_crypto_skcipher_setkey,
580 		.decrypt		= virtio_crypto_skcipher_decrypt,
581 		.encrypt		= virtio_crypto_skcipher_encrypt,
582 		.min_keysize		= AES_MIN_KEY_SIZE,
583 		.max_keysize		= AES_MAX_KEY_SIZE,
584 		.ivsize			= AES_BLOCK_SIZE,
585 	},
586 	.algo.op = {
587 		.do_one_request = virtio_crypto_skcipher_crypt_req,
588 	},
589 } };
590 
virtio_crypto_skcipher_algs_register(struct virtio_crypto * vcrypto)591 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
592 {
593 	int ret = 0;
594 	int i = 0;
595 
596 	mutex_lock(&algs_lock);
597 
598 	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
599 
600 		uint32_t service = virtio_crypto_algs[i].service;
601 		uint32_t algonum = virtio_crypto_algs[i].algonum;
602 
603 		if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
604 			continue;
605 
606 		if (virtio_crypto_algs[i].active_devs == 0) {
607 			ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
608 			if (ret)
609 				goto unlock;
610 		}
611 
612 		virtio_crypto_algs[i].active_devs++;
613 		dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
614 			 virtio_crypto_algs[i].algo.base.base.cra_name);
615 	}
616 
617 unlock:
618 	mutex_unlock(&algs_lock);
619 	return ret;
620 }
621 
virtio_crypto_skcipher_algs_unregister(struct virtio_crypto * vcrypto)622 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
623 {
624 	int i = 0;
625 
626 	mutex_lock(&algs_lock);
627 
628 	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
629 
630 		uint32_t service = virtio_crypto_algs[i].service;
631 		uint32_t algonum = virtio_crypto_algs[i].algonum;
632 
633 		if (virtio_crypto_algs[i].active_devs == 0 ||
634 		    !virtcrypto_algo_is_supported(vcrypto, service, algonum))
635 			continue;
636 
637 		if (virtio_crypto_algs[i].active_devs == 1)
638 			crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
639 
640 		virtio_crypto_algs[i].active_devs--;
641 	}
642 
643 	mutex_unlock(&algs_lock);
644 }
645