xref: /linux/drivers/crypto/hisilicon/sec2/sec_crypto.c (revision f7f0adfe64de08803990dc4cbecd2849c04e314a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
19 
20 #include "sec.h"
21 #include "sec_crypto.h"
22 
23 #define SEC_PRIORITY		4001
24 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
29 
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET		1
32 #define SEC_CIPHER_OFFSET	4
33 #define SEC_SCENE_OFFSET	3
34 #define SEC_DST_SGL_OFFSET	2
35 #define SEC_SRC_SGL_OFFSET	7
36 #define SEC_CKEY_OFFSET		9
37 #define SEC_CMODE_OFFSET	12
38 #define SEC_AKEY_OFFSET         5
39 #define SEC_AEAD_ALG_OFFSET     11
40 #define SEC_AUTH_OFFSET		6
41 
42 #define SEC_DE_OFFSET_V3		9
43 #define SEC_SCENE_OFFSET_V3	5
44 #define SEC_CKEY_OFFSET_V3	13
45 #define SEC_CTR_CNT_OFFSET	25
46 #define SEC_CTR_CNT_ROLLOVER	2
47 #define SEC_SRC_SGL_OFFSET_V3	11
48 #define SEC_DST_SGL_OFFSET_V3	14
49 #define SEC_CALG_OFFSET_V3	4
50 #define SEC_AKEY_OFFSET_V3	9
51 #define SEC_MAC_OFFSET_V3	4
52 #define SEC_AUTH_ALG_OFFSET_V3	15
53 #define SEC_CIPHER_AUTH_V3	0xbf
54 #define SEC_AUTH_CIPHER_V3	0x40
55 #define SEC_FLAG_OFFSET		7
56 #define SEC_FLAG_MASK		0x0780
57 #define SEC_TYPE_MASK		0x0F
58 #define SEC_DONE_MASK		0x0001
59 #define SEC_ICV_MASK		0x000E
60 #define SEC_SQE_LEN_RATE_MASK	0x3
61 
62 #define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
63 #define SEC_SGL_SGE_NR		128
64 #define SEC_CIPHER_AUTH		0xfe
65 #define SEC_AUTH_CIPHER		0x1
66 #define SEC_MAX_MAC_LEN		64
67 #define SEC_MAX_AAD_LEN		65535
68 #define SEC_MAX_CCM_AAD_LEN	65279
69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
70 
71 #define SEC_PBUF_SZ			512
72 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
73 #define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
74 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
75 			SEC_MAX_MAC_LEN * 2)
76 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
77 #define SEC_PBUF_PAGE_NUM(depth)	((depth) / SEC_PBUF_NUM)
78 #define SEC_PBUF_LEFT_SZ(depth)		(SEC_PBUF_PKG * ((depth) -	\
79 				SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
80 #define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
81 				SEC_PBUF_LEFT_SZ(depth))
82 
83 #define SEC_SQE_LEN_RATE	4
84 #define SEC_SQE_CFLAG		2
85 #define SEC_SQE_AEAD_FLAG	3
86 #define SEC_SQE_DONE		0x1
87 #define SEC_ICV_ERR		0x2
88 #define MIN_MAC_LEN		4
89 #define MAC_LEN_MASK		0x1U
90 #define MAX_INPUT_DATA_LEN	0xFFFE00
91 #define BITS_MASK		0xFF
92 #define BYTE_BITS		0x8
93 #define SEC_XTS_NAME_SZ		0x3
94 #define IV_CM_CAL_NUM		2
95 #define IV_CL_MASK		0x7
96 #define IV_CL_MIN		2
97 #define IV_CL_MID		4
98 #define IV_CL_MAX		8
99 #define IV_FLAGS_OFFSET	0x6
100 #define IV_CM_OFFSET		0x3
101 #define IV_LAST_BYTE1		1
102 #define IV_LAST_BYTE2		2
103 #define IV_LAST_BYTE_MASK	0xFF
104 #define IV_CTR_INIT		0x1
105 #define IV_BYTE_OFFSET		0x8
106 
107 static DEFINE_MUTEX(sec_algs_lock);
108 static unsigned int sec_available_devs;
109 
110 struct sec_skcipher {
111 	u64 alg_msk;
112 	struct skcipher_alg alg;
113 };
114 
115 struct sec_aead {
116 	u64 alg_msk;
117 	struct aead_alg alg;
118 };
119 
120 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
121 static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
122 {
123 	if (req->c_req.encrypt)
124 		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
125 				 ctx->hlf_q_num;
126 
127 	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
128 				 ctx->hlf_q_num;
129 }
130 
131 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
132 {
133 	if (req->c_req.encrypt)
134 		atomic_dec(&ctx->enc_qcyclic);
135 	else
136 		atomic_dec(&ctx->dec_qcyclic);
137 }
138 
139 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
140 {
141 	int req_id;
142 
143 	spin_lock_bh(&qp_ctx->req_lock);
144 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
145 	spin_unlock_bh(&qp_ctx->req_lock);
146 	if (unlikely(req_id < 0)) {
147 		dev_err(req->ctx->dev, "alloc req id fail!\n");
148 		return req_id;
149 	}
150 
151 	req->qp_ctx = qp_ctx;
152 	qp_ctx->req_list[req_id] = req;
153 
154 	return req_id;
155 }
156 
157 static void sec_free_req_id(struct sec_req *req)
158 {
159 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
160 	int req_id = req->req_id;
161 
162 	if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
163 		dev_err(req->ctx->dev, "free request id invalid!\n");
164 		return;
165 	}
166 
167 	qp_ctx->req_list[req_id] = NULL;
168 	req->qp_ctx = NULL;
169 
170 	spin_lock_bh(&qp_ctx->req_lock);
171 	idr_remove(&qp_ctx->req_idr, req_id);
172 	spin_unlock_bh(&qp_ctx->req_lock);
173 }
174 
175 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
176 {
177 	struct sec_sqe *bd = resp;
178 
179 	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
180 	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
181 	status->flag = (le16_to_cpu(bd->type2.done_flag) &
182 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
183 	status->tag = le16_to_cpu(bd->type2.tag);
184 	status->err_type = bd->type2.error_type;
185 
186 	return bd->type_cipher_auth & SEC_TYPE_MASK;
187 }
188 
189 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
190 {
191 	struct sec_sqe3 *bd3 = resp;
192 
193 	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
194 	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
195 	status->flag = (le16_to_cpu(bd3->done_flag) &
196 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
197 	status->tag = le64_to_cpu(bd3->tag);
198 	status->err_type = bd3->error_type;
199 
200 	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
201 }
202 
203 static int sec_cb_status_check(struct sec_req *req,
204 			       struct bd_status *status)
205 {
206 	struct sec_ctx *ctx = req->ctx;
207 
208 	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
209 		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
210 				    req->err_type, status->done);
211 		return -EIO;
212 	}
213 
214 	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
215 		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
216 			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
217 					    status->flag);
218 			return -EIO;
219 		}
220 	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
221 		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
222 			     status->icv == SEC_ICV_ERR)) {
223 			dev_err_ratelimited(ctx->dev,
224 					    "flag[%u], icv[%u]\n",
225 					    status->flag, status->icv);
226 			return -EBADMSG;
227 		}
228 	}
229 
230 	return 0;
231 }
232 
233 static void sec_req_cb(struct hisi_qp *qp, void *resp)
234 {
235 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
236 	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
237 	u8 type_supported = qp_ctx->ctx->type_supported;
238 	struct bd_status status;
239 	struct sec_ctx *ctx;
240 	struct sec_req *req;
241 	int err;
242 	u8 type;
243 
244 	if (type_supported == SEC_BD_TYPE2) {
245 		type = pre_parse_finished_bd(&status, resp);
246 		req = qp_ctx->req_list[status.tag];
247 	} else {
248 		type = pre_parse_finished_bd3(&status, resp);
249 		req = (void *)(uintptr_t)status.tag;
250 	}
251 
252 	if (unlikely(type != type_supported)) {
253 		atomic64_inc(&dfx->err_bd_cnt);
254 		pr_err("err bd type [%u]\n", type);
255 		return;
256 	}
257 
258 	if (unlikely(!req)) {
259 		atomic64_inc(&dfx->invalid_req_cnt);
260 		atomic_inc(&qp->qp_status.used);
261 		return;
262 	}
263 
264 	req->err_type = status.err_type;
265 	ctx = req->ctx;
266 	err = sec_cb_status_check(req, &status);
267 	if (err)
268 		atomic64_inc(&dfx->done_flag_cnt);
269 
270 	atomic64_inc(&dfx->recv_cnt);
271 
272 	ctx->req_op->buf_unmap(ctx, req);
273 
274 	ctx->req_op->callback(ctx, req, err);
275 }
276 
277 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
278 {
279 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
280 	int ret;
281 
282 	if (ctx->fake_req_limit <=
283 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
284 	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
285 		return -EBUSY;
286 
287 	spin_lock_bh(&qp_ctx->req_lock);
288 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
289 	if (ctx->fake_req_limit <=
290 	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
291 		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
292 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
293 		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
294 		spin_unlock_bh(&qp_ctx->req_lock);
295 		return -EBUSY;
296 	}
297 	spin_unlock_bh(&qp_ctx->req_lock);
298 
299 	if (unlikely(ret == -EBUSY))
300 		return -ENOBUFS;
301 
302 	if (likely(!ret)) {
303 		ret = -EINPROGRESS;
304 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
305 	}
306 
307 	return ret;
308 }
309 
310 /* Get DMA memory resources */
311 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
312 {
313 	u16 q_depth = res->depth;
314 	int i;
315 
316 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
317 					 &res->c_ivin_dma, GFP_KERNEL);
318 	if (!res->c_ivin)
319 		return -ENOMEM;
320 
321 	for (i = 1; i < q_depth; i++) {
322 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
323 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
324 	}
325 
326 	return 0;
327 }
328 
329 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
330 {
331 	if (res->c_ivin)
332 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
333 				  res->c_ivin, res->c_ivin_dma);
334 }
335 
336 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
337 {
338 	u16 q_depth = res->depth;
339 	int i;
340 
341 	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
342 					 &res->a_ivin_dma, GFP_KERNEL);
343 	if (!res->a_ivin)
344 		return -ENOMEM;
345 
346 	for (i = 1; i < q_depth; i++) {
347 		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
348 		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
349 	}
350 
351 	return 0;
352 }
353 
354 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
355 {
356 	if (res->a_ivin)
357 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
358 				  res->a_ivin, res->a_ivin_dma);
359 }
360 
361 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
362 {
363 	u16 q_depth = res->depth;
364 	int i;
365 
366 	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
367 					  &res->out_mac_dma, GFP_KERNEL);
368 	if (!res->out_mac)
369 		return -ENOMEM;
370 
371 	for (i = 1; i < q_depth; i++) {
372 		res[i].out_mac_dma = res->out_mac_dma +
373 				     i * (SEC_MAX_MAC_LEN << 1);
374 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
375 	}
376 
377 	return 0;
378 }
379 
380 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
381 {
382 	if (res->out_mac)
383 		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
384 				  res->out_mac, res->out_mac_dma);
385 }
386 
387 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
388 {
389 	if (res->pbuf)
390 		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
391 				  res->pbuf, res->pbuf_dma);
392 }
393 
394 /*
395  * To improve performance, pbuffer is used for
396  * small packets (< 512Bytes) as IOMMU translation using.
397  */
398 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
399 {
400 	u16 q_depth = res->depth;
401 	int size = SEC_PBUF_PAGE_NUM(q_depth);
402 	int pbuf_page_offset;
403 	int i, j, k;
404 
405 	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
406 				&res->pbuf_dma, GFP_KERNEL);
407 	if (!res->pbuf)
408 		return -ENOMEM;
409 
410 	/*
411 	 * SEC_PBUF_PKG contains data pbuf, iv and
412 	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
413 	 * Every PAGE contains six SEC_PBUF_PKG
414 	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
415 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
416 	 * for the SEC_TOTAL_PBUF_SZ
417 	 */
418 	for (i = 0; i <= size; i++) {
419 		pbuf_page_offset = PAGE_SIZE * i;
420 		for (j = 0; j < SEC_PBUF_NUM; j++) {
421 			k = i * SEC_PBUF_NUM + j;
422 			if (k == q_depth)
423 				break;
424 			res[k].pbuf = res->pbuf +
425 				j * SEC_PBUF_PKG + pbuf_page_offset;
426 			res[k].pbuf_dma = res->pbuf_dma +
427 				j * SEC_PBUF_PKG + pbuf_page_offset;
428 		}
429 	}
430 
431 	return 0;
432 }
433 
434 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
435 				  struct sec_qp_ctx *qp_ctx)
436 {
437 	struct sec_alg_res *res = qp_ctx->res;
438 	struct device *dev = ctx->dev;
439 	int ret;
440 
441 	ret = sec_alloc_civ_resource(dev, res);
442 	if (ret)
443 		return ret;
444 
445 	if (ctx->alg_type == SEC_AEAD) {
446 		ret = sec_alloc_aiv_resource(dev, res);
447 		if (ret)
448 			goto alloc_aiv_fail;
449 
450 		ret = sec_alloc_mac_resource(dev, res);
451 		if (ret)
452 			goto alloc_mac_fail;
453 	}
454 	if (ctx->pbuf_supported) {
455 		ret = sec_alloc_pbuf_resource(dev, res);
456 		if (ret) {
457 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
458 			goto alloc_pbuf_fail;
459 		}
460 	}
461 
462 	return 0;
463 
464 alloc_pbuf_fail:
465 	if (ctx->alg_type == SEC_AEAD)
466 		sec_free_mac_resource(dev, qp_ctx->res);
467 alloc_mac_fail:
468 	if (ctx->alg_type == SEC_AEAD)
469 		sec_free_aiv_resource(dev, res);
470 alloc_aiv_fail:
471 	sec_free_civ_resource(dev, res);
472 	return ret;
473 }
474 
475 static void sec_alg_resource_free(struct sec_ctx *ctx,
476 				  struct sec_qp_ctx *qp_ctx)
477 {
478 	struct device *dev = ctx->dev;
479 
480 	sec_free_civ_resource(dev, qp_ctx->res);
481 
482 	if (ctx->pbuf_supported)
483 		sec_free_pbuf_resource(dev, qp_ctx->res);
484 	if (ctx->alg_type == SEC_AEAD) {
485 		sec_free_mac_resource(dev, qp_ctx->res);
486 		sec_free_aiv_resource(dev, qp_ctx->res);
487 	}
488 }
489 
490 static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
491 {
492 	u16 q_depth = qp_ctx->qp->sq_depth;
493 	struct device *dev = ctx->dev;
494 	int ret = -ENOMEM;
495 
496 	qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
497 	if (!qp_ctx->req_list)
498 		return ret;
499 
500 	qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
501 	if (!qp_ctx->res)
502 		goto err_free_req_list;
503 	qp_ctx->res->depth = q_depth;
504 
505 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
506 	if (IS_ERR(qp_ctx->c_in_pool)) {
507 		dev_err(dev, "fail to create sgl pool for input!\n");
508 		goto err_free_res;
509 	}
510 
511 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
512 	if (IS_ERR(qp_ctx->c_out_pool)) {
513 		dev_err(dev, "fail to create sgl pool for output!\n");
514 		goto err_free_c_in_pool;
515 	}
516 
517 	ret = sec_alg_resource_alloc(ctx, qp_ctx);
518 	if (ret)
519 		goto err_free_c_out_pool;
520 
521 	return 0;
522 
523 err_free_c_out_pool:
524 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
525 err_free_c_in_pool:
526 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
527 err_free_res:
528 	kfree(qp_ctx->res);
529 err_free_req_list:
530 	kfree(qp_ctx->req_list);
531 	return ret;
532 }
533 
534 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
535 {
536 	struct device *dev = ctx->dev;
537 
538 	sec_alg_resource_free(ctx, qp_ctx);
539 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
540 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
541 	kfree(qp_ctx->res);
542 	kfree(qp_ctx->req_list);
543 }
544 
545 static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
546 {
547 	struct sec_qp_ctx *qp_ctx;
548 	struct hisi_qp *qp;
549 	int ret;
550 
551 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
552 	qp = ctx->qps[qp_ctx_id];
553 	qp->req_type = 0;
554 	qp->qp_ctx = qp_ctx;
555 	qp_ctx->qp = qp;
556 	qp_ctx->ctx = ctx;
557 
558 	qp->req_cb = sec_req_cb;
559 
560 	spin_lock_init(&qp_ctx->req_lock);
561 	idr_init(&qp_ctx->req_idr);
562 	INIT_LIST_HEAD(&qp_ctx->backlog);
563 
564 	ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
565 	if (ret)
566 		goto err_destroy_idr;
567 
568 	ret = hisi_qm_start_qp(qp, 0);
569 	if (ret < 0)
570 		goto err_resource_free;
571 
572 	return 0;
573 
574 err_resource_free:
575 	sec_free_qp_ctx_resource(ctx, qp_ctx);
576 err_destroy_idr:
577 	idr_destroy(&qp_ctx->req_idr);
578 	return ret;
579 }
580 
581 static void sec_release_qp_ctx(struct sec_ctx *ctx,
582 			       struct sec_qp_ctx *qp_ctx)
583 {
584 	hisi_qm_stop_qp(qp_ctx->qp);
585 	sec_free_qp_ctx_resource(ctx, qp_ctx);
586 	idr_destroy(&qp_ctx->req_idr);
587 }
588 
589 static int sec_ctx_base_init(struct sec_ctx *ctx)
590 {
591 	struct sec_dev *sec;
592 	int i, ret;
593 
594 	ctx->qps = sec_create_qps();
595 	if (!ctx->qps) {
596 		pr_err("Can not create sec qps!\n");
597 		return -ENODEV;
598 	}
599 
600 	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
601 	ctx->sec = sec;
602 	ctx->dev = &sec->qm.pdev->dev;
603 	ctx->hlf_q_num = sec->ctx_q_num >> 1;
604 
605 	ctx->pbuf_supported = ctx->sec->iommu_used;
606 
607 	/* Half of queue depth is taken as fake requests limit in the queue. */
608 	ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
609 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
610 			      GFP_KERNEL);
611 	if (!ctx->qp_ctx) {
612 		ret = -ENOMEM;
613 		goto err_destroy_qps;
614 	}
615 
616 	for (i = 0; i < sec->ctx_q_num; i++) {
617 		ret = sec_create_qp_ctx(ctx, i);
618 		if (ret)
619 			goto err_sec_release_qp_ctx;
620 	}
621 
622 	return 0;
623 
624 err_sec_release_qp_ctx:
625 	for (i = i - 1; i >= 0; i--)
626 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
627 	kfree(ctx->qp_ctx);
628 err_destroy_qps:
629 	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
630 	return ret;
631 }
632 
633 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
634 {
635 	int i;
636 
637 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
638 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
639 
640 	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
641 	kfree(ctx->qp_ctx);
642 }
643 
644 static int sec_cipher_init(struct sec_ctx *ctx)
645 {
646 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
647 
648 	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
649 					  &c_ctx->c_key_dma, GFP_KERNEL);
650 	if (!c_ctx->c_key)
651 		return -ENOMEM;
652 
653 	return 0;
654 }
655 
656 static void sec_cipher_uninit(struct sec_ctx *ctx)
657 {
658 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
659 
660 	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
661 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
662 			  c_ctx->c_key, c_ctx->c_key_dma);
663 }
664 
665 static int sec_auth_init(struct sec_ctx *ctx)
666 {
667 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
668 
669 	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
670 					  &a_ctx->a_key_dma, GFP_KERNEL);
671 	if (!a_ctx->a_key)
672 		return -ENOMEM;
673 
674 	return 0;
675 }
676 
677 static void sec_auth_uninit(struct sec_ctx *ctx)
678 {
679 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
680 
681 	memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
682 	dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
683 			  a_ctx->a_key, a_ctx->a_key_dma);
684 }
685 
686 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
687 {
688 	const char *alg = crypto_tfm_alg_name(&tfm->base);
689 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
690 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
691 
692 	c_ctx->fallback = false;
693 
694 	/* Currently, only XTS mode need fallback tfm when using 192bit key */
695 	if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
696 		return 0;
697 
698 	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
699 						  CRYPTO_ALG_NEED_FALLBACK);
700 	if (IS_ERR(c_ctx->fbtfm)) {
701 		pr_err("failed to alloc xts mode fallback tfm!\n");
702 		return PTR_ERR(c_ctx->fbtfm);
703 	}
704 
705 	return 0;
706 }
707 
708 static int sec_skcipher_init(struct crypto_skcipher *tfm)
709 {
710 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
711 	int ret;
712 
713 	ctx->alg_type = SEC_SKCIPHER;
714 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
715 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
716 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
717 		pr_err("get error skcipher iv size!\n");
718 		return -EINVAL;
719 	}
720 
721 	ret = sec_ctx_base_init(ctx);
722 	if (ret)
723 		return ret;
724 
725 	ret = sec_cipher_init(ctx);
726 	if (ret)
727 		goto err_cipher_init;
728 
729 	ret = sec_skcipher_fbtfm_init(tfm);
730 	if (ret)
731 		goto err_fbtfm_init;
732 
733 	return 0;
734 
735 err_fbtfm_init:
736 	sec_cipher_uninit(ctx);
737 err_cipher_init:
738 	sec_ctx_base_uninit(ctx);
739 	return ret;
740 }
741 
742 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
743 {
744 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
745 
746 	if (ctx->c_ctx.fbtfm)
747 		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
748 
749 	sec_cipher_uninit(ctx);
750 	sec_ctx_base_uninit(ctx);
751 }
752 
753 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
754 {
755 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
756 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
757 	int ret;
758 
759 	ret = verify_skcipher_des3_key(tfm, key);
760 	if (ret)
761 		return ret;
762 
763 	switch (keylen) {
764 	case SEC_DES3_2KEY_SIZE:
765 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
766 		break;
767 	case SEC_DES3_3KEY_SIZE:
768 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
769 		break;
770 	default:
771 		return -EINVAL;
772 	}
773 
774 	return 0;
775 }
776 
777 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
778 				       const u32 keylen,
779 				       const enum sec_cmode c_mode)
780 {
781 	if (c_mode == SEC_CMODE_XTS) {
782 		switch (keylen) {
783 		case SEC_XTS_MIN_KEY_SIZE:
784 			c_ctx->c_key_len = SEC_CKEY_128BIT;
785 			break;
786 		case SEC_XTS_MID_KEY_SIZE:
787 			c_ctx->fallback = true;
788 			break;
789 		case SEC_XTS_MAX_KEY_SIZE:
790 			c_ctx->c_key_len = SEC_CKEY_256BIT;
791 			break;
792 		default:
793 			pr_err("hisi_sec2: xts mode key error!\n");
794 			return -EINVAL;
795 		}
796 	} else {
797 		if (c_ctx->c_alg == SEC_CALG_SM4 &&
798 		    keylen != AES_KEYSIZE_128) {
799 			pr_err("hisi_sec2: sm4 key error!\n");
800 			return -EINVAL;
801 		} else {
802 			switch (keylen) {
803 			case AES_KEYSIZE_128:
804 				c_ctx->c_key_len = SEC_CKEY_128BIT;
805 				break;
806 			case AES_KEYSIZE_192:
807 				c_ctx->c_key_len = SEC_CKEY_192BIT;
808 				break;
809 			case AES_KEYSIZE_256:
810 				c_ctx->c_key_len = SEC_CKEY_256BIT;
811 				break;
812 			default:
813 				pr_err("hisi_sec2: aes key error!\n");
814 				return -EINVAL;
815 			}
816 		}
817 	}
818 
819 	return 0;
820 }
821 
822 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
823 			       const u32 keylen, const enum sec_calg c_alg,
824 			       const enum sec_cmode c_mode)
825 {
826 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
827 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
828 	struct device *dev = ctx->dev;
829 	int ret;
830 
831 	if (c_mode == SEC_CMODE_XTS) {
832 		ret = xts_verify_key(tfm, key, keylen);
833 		if (ret) {
834 			dev_err(dev, "xts mode key err!\n");
835 			return ret;
836 		}
837 	}
838 
839 	c_ctx->c_alg  = c_alg;
840 	c_ctx->c_mode = c_mode;
841 
842 	switch (c_alg) {
843 	case SEC_CALG_3DES:
844 		ret = sec_skcipher_3des_setkey(tfm, key, keylen);
845 		break;
846 	case SEC_CALG_AES:
847 	case SEC_CALG_SM4:
848 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
849 		break;
850 	default:
851 		dev_err(dev, "sec c_alg err!\n");
852 		return -EINVAL;
853 	}
854 
855 	if (ret) {
856 		dev_err(dev, "set sec key err!\n");
857 		return ret;
858 	}
859 
860 	memcpy(c_ctx->c_key, key, keylen);
861 	if (c_ctx->fallback && c_ctx->fbtfm) {
862 		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
863 		if (ret) {
864 			dev_err(dev, "failed to set fallback skcipher key!\n");
865 			return ret;
866 		}
867 	}
868 	return 0;
869 }
870 
871 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
872 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
873 	u32 keylen)							\
874 {									\
875 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
876 }
877 
878 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
879 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
880 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
881 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
882 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
883 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
884 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
885 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
886 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
887 
888 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
889 			struct scatterlist *src)
890 {
891 	struct sec_aead_req *a_req = &req->aead_req;
892 	struct aead_request *aead_req = a_req->aead_req;
893 	struct sec_cipher_req *c_req = &req->c_req;
894 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
895 	struct device *dev = ctx->dev;
896 	int copy_size, pbuf_length;
897 	int req_id = req->req_id;
898 	struct crypto_aead *tfm;
899 	size_t authsize;
900 	u8 *mac_offset;
901 
902 	if (ctx->alg_type == SEC_AEAD)
903 		copy_size = aead_req->cryptlen + aead_req->assoclen;
904 	else
905 		copy_size = c_req->c_len;
906 
907 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
908 			qp_ctx->res[req_id].pbuf, copy_size);
909 	if (unlikely(pbuf_length != copy_size)) {
910 		dev_err(dev, "copy src data to pbuf error!\n");
911 		return -EINVAL;
912 	}
913 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
914 		tfm = crypto_aead_reqtfm(aead_req);
915 		authsize = crypto_aead_authsize(tfm);
916 		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
917 		memcpy(a_req->out_mac, mac_offset, authsize);
918 	}
919 
920 	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
921 	c_req->c_out_dma = req->in_dma;
922 
923 	return 0;
924 }
925 
926 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
927 			struct scatterlist *dst)
928 {
929 	struct aead_request *aead_req = req->aead_req.aead_req;
930 	struct sec_cipher_req *c_req = &req->c_req;
931 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
932 	int copy_size, pbuf_length;
933 	int req_id = req->req_id;
934 
935 	if (ctx->alg_type == SEC_AEAD)
936 		copy_size = c_req->c_len + aead_req->assoclen;
937 	else
938 		copy_size = c_req->c_len;
939 
940 	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
941 			qp_ctx->res[req_id].pbuf, copy_size);
942 	if (unlikely(pbuf_length != copy_size))
943 		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
944 }
945 
946 static int sec_aead_mac_init(struct sec_aead_req *req)
947 {
948 	struct aead_request *aead_req = req->aead_req;
949 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
950 	size_t authsize = crypto_aead_authsize(tfm);
951 	struct scatterlist *sgl = aead_req->src;
952 	u8 *mac_out = req->out_mac;
953 	size_t copy_size;
954 	off_t skip_size;
955 
956 	/* Copy input mac */
957 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
958 	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
959 	if (unlikely(copy_size != authsize))
960 		return -EINVAL;
961 
962 	return 0;
963 }
964 
965 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
966 			  struct scatterlist *src, struct scatterlist *dst)
967 {
968 	struct sec_cipher_req *c_req = &req->c_req;
969 	struct sec_aead_req *a_req = &req->aead_req;
970 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
971 	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
972 	struct device *dev = ctx->dev;
973 	int ret;
974 
975 	if (req->use_pbuf) {
976 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
977 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
978 		if (ctx->alg_type == SEC_AEAD) {
979 			a_req->a_ivin = res->a_ivin;
980 			a_req->a_ivin_dma = res->a_ivin_dma;
981 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
982 			a_req->out_mac_dma = res->pbuf_dma +
983 					SEC_PBUF_MAC_OFFSET;
984 		}
985 		ret = sec_cipher_pbuf_map(ctx, req, src);
986 
987 		return ret;
988 	}
989 	c_req->c_ivin = res->c_ivin;
990 	c_req->c_ivin_dma = res->c_ivin_dma;
991 	if (ctx->alg_type == SEC_AEAD) {
992 		a_req->a_ivin = res->a_ivin;
993 		a_req->a_ivin_dma = res->a_ivin_dma;
994 		a_req->out_mac = res->out_mac;
995 		a_req->out_mac_dma = res->out_mac_dma;
996 	}
997 
998 	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
999 						qp_ctx->c_in_pool,
1000 						req->req_id,
1001 						&req->in_dma);
1002 	if (IS_ERR(req->in)) {
1003 		dev_err(dev, "fail to dma map input sgl buffers!\n");
1004 		return PTR_ERR(req->in);
1005 	}
1006 
1007 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
1008 		ret = sec_aead_mac_init(a_req);
1009 		if (unlikely(ret)) {
1010 			dev_err(dev, "fail to init mac data for ICV!\n");
1011 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1012 			return ret;
1013 		}
1014 	}
1015 
1016 	if (dst == src) {
1017 		c_req->c_out = req->in;
1018 		c_req->c_out_dma = req->in_dma;
1019 	} else {
1020 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
1021 							     qp_ctx->c_out_pool,
1022 							     req->req_id,
1023 							     &c_req->c_out_dma);
1024 
1025 		if (IS_ERR(c_req->c_out)) {
1026 			dev_err(dev, "fail to dma map output sgl buffers!\n");
1027 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1028 			return PTR_ERR(c_req->c_out);
1029 		}
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1036 			     struct scatterlist *src, struct scatterlist *dst)
1037 {
1038 	struct sec_cipher_req *c_req = &req->c_req;
1039 	struct device *dev = ctx->dev;
1040 
1041 	if (req->use_pbuf) {
1042 		sec_cipher_pbuf_unmap(ctx, req, dst);
1043 	} else {
1044 		if (dst != src)
1045 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1046 
1047 		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
1048 	}
1049 }
1050 
1051 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1052 {
1053 	struct skcipher_request *sq = req->c_req.sk_req;
1054 
1055 	return sec_cipher_map(ctx, req, sq->src, sq->dst);
1056 }
1057 
1058 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1059 {
1060 	struct skcipher_request *sq = req->c_req.sk_req;
1061 
1062 	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1063 }
1064 
1065 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1066 				struct crypto_authenc_keys *keys)
1067 {
1068 	switch (keys->enckeylen) {
1069 	case AES_KEYSIZE_128:
1070 		c_ctx->c_key_len = SEC_CKEY_128BIT;
1071 		break;
1072 	case AES_KEYSIZE_192:
1073 		c_ctx->c_key_len = SEC_CKEY_192BIT;
1074 		break;
1075 	case AES_KEYSIZE_256:
1076 		c_ctx->c_key_len = SEC_CKEY_256BIT;
1077 		break;
1078 	default:
1079 		pr_err("hisi_sec2: aead aes key error!\n");
1080 		return -EINVAL;
1081 	}
1082 	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1083 
1084 	return 0;
1085 }
1086 
1087 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1088 				 struct crypto_authenc_keys *keys)
1089 {
1090 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
1091 	int blocksize, digestsize, ret;
1092 
1093 	if (!keys->authkeylen) {
1094 		pr_err("hisi_sec2: aead auth key error!\n");
1095 		return -EINVAL;
1096 	}
1097 
1098 	blocksize = crypto_shash_blocksize(hash_tfm);
1099 	digestsize = crypto_shash_digestsize(hash_tfm);
1100 	if (keys->authkeylen > blocksize) {
1101 		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1102 					      keys->authkeylen, ctx->a_key);
1103 		if (ret) {
1104 			pr_err("hisi_sec2: aead auth digest error!\n");
1105 			return -EINVAL;
1106 		}
1107 		ctx->a_key_len = digestsize;
1108 	} else {
1109 		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1110 		ctx->a_key_len = keys->authkeylen;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1117 {
1118 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1119 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1120 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1121 
1122 	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1123 }
1124 
1125 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1126 				    struct crypto_aead *tfm, const u8 *key,
1127 				    unsigned int keylen)
1128 {
1129 	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1130 	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1131 			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1132 	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1133 }
1134 
1135 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1136 			   const u32 keylen, const enum sec_hash_alg a_alg,
1137 			   const enum sec_calg c_alg,
1138 			   const enum sec_cmode c_mode)
1139 {
1140 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1141 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1142 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1143 	struct device *dev = ctx->dev;
1144 	struct crypto_authenc_keys keys;
1145 	int ret;
1146 
1147 	ctx->a_ctx.a_alg = a_alg;
1148 	ctx->c_ctx.c_alg = c_alg;
1149 	c_ctx->c_mode = c_mode;
1150 
1151 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1152 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1153 		if (ret) {
1154 			dev_err(dev, "set sec aes ccm cipher key err!\n");
1155 			return ret;
1156 		}
1157 		memcpy(c_ctx->c_key, key, keylen);
1158 
1159 		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1160 	}
1161 
1162 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
1163 	if (ret)
1164 		goto bad_key;
1165 
1166 	ret = sec_aead_aes_set_key(c_ctx, &keys);
1167 	if (ret) {
1168 		dev_err(dev, "set sec cipher key err!\n");
1169 		goto bad_key;
1170 	}
1171 
1172 	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1173 	if (ret) {
1174 		dev_err(dev, "set sec auth key err!\n");
1175 		goto bad_key;
1176 	}
1177 
1178 	if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
1179 		ret = -EINVAL;
1180 		dev_err(dev, "AUTH key length error!\n");
1181 		goto bad_key;
1182 	}
1183 
1184 	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1185 	if (ret) {
1186 		dev_err(dev, "set sec fallback key err!\n");
1187 		goto bad_key;
1188 	}
1189 
1190 	return 0;
1191 
1192 bad_key:
1193 	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1194 	return ret;
1195 }
1196 
1197 
1198 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
1199 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
1200 {											\
1201 	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
1202 }
1203 
1204 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
1205 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
1206 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
1207 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
1208 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
1209 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
1210 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
1211 
1212 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1213 {
1214 	struct aead_request *aq = req->aead_req.aead_req;
1215 
1216 	return sec_cipher_map(ctx, req, aq->src, aq->dst);
1217 }
1218 
1219 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1220 {
1221 	struct aead_request *aq = req->aead_req.aead_req;
1222 
1223 	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1224 }
1225 
1226 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1227 {
1228 	int ret;
1229 
1230 	ret = ctx->req_op->buf_map(ctx, req);
1231 	if (unlikely(ret))
1232 		return ret;
1233 
1234 	ctx->req_op->do_transfer(ctx, req);
1235 
1236 	ret = ctx->req_op->bd_fill(ctx, req);
1237 	if (unlikely(ret))
1238 		goto unmap_req_buf;
1239 
1240 	return ret;
1241 
1242 unmap_req_buf:
1243 	ctx->req_op->buf_unmap(ctx, req);
1244 	return ret;
1245 }
1246 
1247 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1248 {
1249 	ctx->req_op->buf_unmap(ctx, req);
1250 }
1251 
1252 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1253 {
1254 	struct skcipher_request *sk_req = req->c_req.sk_req;
1255 	struct sec_cipher_req *c_req = &req->c_req;
1256 
1257 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1258 }
1259 
1260 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1261 {
1262 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1263 	struct sec_cipher_req *c_req = &req->c_req;
1264 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1265 	u8 scene, sa_type, da_type;
1266 	u8 bd_type, cipher;
1267 	u8 de = 0;
1268 
1269 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
1270 
1271 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1272 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1273 	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1274 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1275 
1276 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1277 						SEC_CMODE_OFFSET);
1278 	sec_sqe->type2.c_alg = c_ctx->c_alg;
1279 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1280 						SEC_CKEY_OFFSET);
1281 
1282 	bd_type = SEC_BD_TYPE2;
1283 	if (c_req->encrypt)
1284 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1285 	else
1286 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1287 	sec_sqe->type_cipher_auth = bd_type | cipher;
1288 
1289 	/* Set destination and source address type */
1290 	if (req->use_pbuf) {
1291 		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1292 		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1293 	} else {
1294 		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1295 		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1296 	}
1297 
1298 	sec_sqe->sdm_addr_type |= da_type;
1299 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1300 	if (req->in_dma != c_req->c_out_dma)
1301 		de = 0x1 << SEC_DE_OFFSET;
1302 
1303 	sec_sqe->sds_sa_type = (de | scene | sa_type);
1304 
1305 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1306 	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1307 
1308 	return 0;
1309 }
1310 
1311 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1312 {
1313 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1314 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1315 	struct sec_cipher_req *c_req = &req->c_req;
1316 	u32 bd_param = 0;
1317 	u16 cipher;
1318 
1319 	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1320 
1321 	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1322 	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1323 	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1324 	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1325 
1326 	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1327 						c_ctx->c_mode;
1328 	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1329 						SEC_CKEY_OFFSET_V3);
1330 
1331 	if (c_req->encrypt)
1332 		cipher = SEC_CIPHER_ENC;
1333 	else
1334 		cipher = SEC_CIPHER_DEC;
1335 	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1336 
1337 	/* Set the CTR counter mode is 128bit rollover */
1338 	sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
1339 					SEC_CTR_CNT_OFFSET);
1340 
1341 	if (req->use_pbuf) {
1342 		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1343 		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1344 	} else {
1345 		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1346 		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1347 	}
1348 
1349 	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1350 	if (req->in_dma != c_req->c_out_dma)
1351 		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1352 
1353 	bd_param |= SEC_BD_TYPE3;
1354 	sec_sqe3->bd_param = cpu_to_le32(bd_param);
1355 
1356 	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1357 	sec_sqe3->tag = cpu_to_le64((unsigned long)req);
1358 
1359 	return 0;
1360 }
1361 
1362 /* increment counter (128-bit int) */
1363 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1364 {
1365 	do {
1366 		--bits;
1367 		nums += counter[bits];
1368 		counter[bits] = nums & BITS_MASK;
1369 		nums >>= BYTE_BITS;
1370 	} while (bits && nums);
1371 }
1372 
1373 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1374 {
1375 	struct aead_request *aead_req = req->aead_req.aead_req;
1376 	struct skcipher_request *sk_req = req->c_req.sk_req;
1377 	u32 iv_size = req->ctx->c_ctx.ivsize;
1378 	struct scatterlist *sgl;
1379 	unsigned int cryptlen;
1380 	size_t sz;
1381 	u8 *iv;
1382 
1383 	if (req->c_req.encrypt)
1384 		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1385 	else
1386 		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1387 
1388 	if (alg_type == SEC_SKCIPHER) {
1389 		iv = sk_req->iv;
1390 		cryptlen = sk_req->cryptlen;
1391 	} else {
1392 		iv = aead_req->iv;
1393 		cryptlen = aead_req->cryptlen;
1394 	}
1395 
1396 	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1397 		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1398 					cryptlen - iv_size);
1399 		if (unlikely(sz != iv_size))
1400 			dev_err(req->ctx->dev, "copy output iv error!\n");
1401 	} else {
1402 		sz = cryptlen / iv_size;
1403 		if (cryptlen % iv_size)
1404 			sz += 1;
1405 		ctr_iv_inc(iv, iv_size, sz);
1406 	}
1407 }
1408 
1409 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1410 				struct sec_qp_ctx *qp_ctx)
1411 {
1412 	struct sec_req *backlog_req = NULL;
1413 
1414 	spin_lock_bh(&qp_ctx->req_lock);
1415 	if (ctx->fake_req_limit >=
1416 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
1417 	    !list_empty(&qp_ctx->backlog)) {
1418 		backlog_req = list_first_entry(&qp_ctx->backlog,
1419 				typeof(*backlog_req), backlog_head);
1420 		list_del(&backlog_req->backlog_head);
1421 	}
1422 	spin_unlock_bh(&qp_ctx->req_lock);
1423 
1424 	return backlog_req;
1425 }
1426 
1427 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1428 				  int err)
1429 {
1430 	struct skcipher_request *sk_req = req->c_req.sk_req;
1431 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1432 	struct skcipher_request *backlog_sk_req;
1433 	struct sec_req *backlog_req;
1434 
1435 	sec_free_req_id(req);
1436 
1437 	/* IV output at encrypto of CBC/CTR mode */
1438 	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1439 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1440 		sec_update_iv(req, SEC_SKCIPHER);
1441 
1442 	while (1) {
1443 		backlog_req = sec_back_req_clear(ctx, qp_ctx);
1444 		if (!backlog_req)
1445 			break;
1446 
1447 		backlog_sk_req = backlog_req->c_req.sk_req;
1448 		skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
1449 		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1450 	}
1451 
1452 	skcipher_request_complete(sk_req, err);
1453 }
1454 
1455 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1456 {
1457 	struct aead_request *aead_req = req->aead_req.aead_req;
1458 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1459 	size_t authsize = crypto_aead_authsize(tfm);
1460 	struct sec_aead_req *a_req = &req->aead_req;
1461 	struct sec_cipher_req *c_req = &req->c_req;
1462 	u32 data_size = aead_req->cryptlen;
1463 	u8 flage = 0;
1464 	u8 cm, cl;
1465 
1466 	/* the specification has been checked in aead_iv_demension_check() */
1467 	cl = c_req->c_ivin[0] + 1;
1468 	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1469 	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1470 	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1471 
1472 	/* the last 3bit is L' */
1473 	flage |= c_req->c_ivin[0] & IV_CL_MASK;
1474 
1475 	/* the M' is bit3~bit5, the Flags is bit6 */
1476 	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1477 	flage |= cm << IV_CM_OFFSET;
1478 	if (aead_req->assoclen)
1479 		flage |= 0x01 << IV_FLAGS_OFFSET;
1480 
1481 	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1482 	a_req->a_ivin[0] = flage;
1483 
1484 	/*
1485 	 * the last 32bit is counter's initial number,
1486 	 * but the nonce uses the first 16bit
1487 	 * the tail 16bit fill with the cipher length
1488 	 */
1489 	if (!c_req->encrypt)
1490 		data_size = aead_req->cryptlen - authsize;
1491 
1492 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1493 			data_size & IV_LAST_BYTE_MASK;
1494 	data_size >>= IV_BYTE_OFFSET;
1495 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1496 			data_size & IV_LAST_BYTE_MASK;
1497 }
1498 
1499 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1500 {
1501 	struct aead_request *aead_req = req->aead_req.aead_req;
1502 	struct sec_aead_req *a_req = &req->aead_req;
1503 	struct sec_cipher_req *c_req = &req->c_req;
1504 
1505 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1506 
1507 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1508 		/*
1509 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1510 		 * the  counter must set to 0x01
1511 		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
1512 		 */
1513 		set_aead_auth_iv(ctx, req);
1514 	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1515 		/* GCM 12Byte Cipher_IV == Auth_IV */
1516 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1517 	}
1518 }
1519 
1520 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1521 				 struct sec_req *req, struct sec_sqe *sec_sqe)
1522 {
1523 	struct sec_aead_req *a_req = &req->aead_req;
1524 	struct aead_request *aq = a_req->aead_req;
1525 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1526 	size_t authsize = crypto_aead_authsize(tfm);
1527 
1528 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1529 	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
1530 
1531 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1532 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1533 	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1534 	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1535 
1536 	if (dir)
1537 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1538 	else
1539 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1540 
1541 	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1542 	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1543 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1544 
1545 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1546 }
1547 
1548 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1549 				    struct sec_req *req, struct sec_sqe3 *sqe3)
1550 {
1551 	struct sec_aead_req *a_req = &req->aead_req;
1552 	struct aead_request *aq = a_req->aead_req;
1553 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1554 	size_t authsize = crypto_aead_authsize(tfm);
1555 
1556 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1557 	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
1558 
1559 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1560 	sqe3->a_key_addr = sqe3->c_key_addr;
1561 	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1562 	sqe3->auth_mac_key |= SEC_NO_AUTH;
1563 
1564 	if (dir)
1565 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1566 	else
1567 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1568 
1569 	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1570 	sqe3->auth_src_offset = cpu_to_le16(0x0);
1571 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1572 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1573 }
1574 
1575 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1576 			       struct sec_req *req, struct sec_sqe *sec_sqe)
1577 {
1578 	struct sec_aead_req *a_req = &req->aead_req;
1579 	struct sec_cipher_req *c_req = &req->c_req;
1580 	struct aead_request *aq = a_req->aead_req;
1581 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1582 	size_t authsize = crypto_aead_authsize(tfm);
1583 
1584 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1585 
1586 	sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
1587 
1588 	sec_sqe->type2.mac_key_alg |=
1589 			cpu_to_le32((u32)((ctx->a_key_len) /
1590 			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1591 
1592 	sec_sqe->type2.mac_key_alg |=
1593 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1594 
1595 	if (dir) {
1596 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1597 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1598 	} else {
1599 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1600 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1601 	}
1602 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1603 
1604 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1605 
1606 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1607 }
1608 
1609 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1610 {
1611 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1612 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1613 	int ret;
1614 
1615 	ret = sec_skcipher_bd_fill(ctx, req);
1616 	if (unlikely(ret)) {
1617 		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1618 		return ret;
1619 	}
1620 
1621 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1622 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1623 		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1624 	else
1625 		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1626 
1627 	return 0;
1628 }
1629 
1630 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1631 				   struct sec_req *req, struct sec_sqe3 *sqe3)
1632 {
1633 	struct sec_aead_req *a_req = &req->aead_req;
1634 	struct sec_cipher_req *c_req = &req->c_req;
1635 	struct aead_request *aq = a_req->aead_req;
1636 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1637 	size_t authsize = crypto_aead_authsize(tfm);
1638 
1639 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1640 
1641 	sqe3->auth_mac_key |=
1642 			cpu_to_le32((u32)(authsize /
1643 			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
1644 
1645 	sqe3->auth_mac_key |=
1646 			cpu_to_le32((u32)(ctx->a_key_len /
1647 			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
1648 
1649 	sqe3->auth_mac_key |=
1650 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1651 
1652 	if (dir) {
1653 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1654 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1655 	} else {
1656 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
1657 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1658 	}
1659 	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1660 
1661 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1662 
1663 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1664 }
1665 
1666 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1667 {
1668 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1669 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1670 	int ret;
1671 
1672 	ret = sec_skcipher_bd_fill_v3(ctx, req);
1673 	if (unlikely(ret)) {
1674 		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1675 		return ret;
1676 	}
1677 
1678 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1679 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1680 		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1681 					req, sec_sqe3);
1682 	else
1683 		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1684 				       req, sec_sqe3);
1685 
1686 	return 0;
1687 }
1688 
1689 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1690 {
1691 	struct aead_request *a_req = req->aead_req.aead_req;
1692 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1693 	size_t authsize = crypto_aead_authsize(tfm);
1694 	struct sec_aead_req *aead_req = &req->aead_req;
1695 	struct sec_cipher_req *c_req = &req->c_req;
1696 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1697 	struct aead_request *backlog_aead_req;
1698 	struct sec_req *backlog_req;
1699 	size_t sz;
1700 
1701 	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1702 		sec_update_iv(req, SEC_AEAD);
1703 
1704 	/* Copy output mac */
1705 	if (!err && c_req->encrypt) {
1706 		struct scatterlist *sgl = a_req->dst;
1707 
1708 		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
1709 					  authsize, a_req->cryptlen + a_req->assoclen);
1710 		if (unlikely(sz != authsize)) {
1711 			dev_err(c->dev, "copy out mac err!\n");
1712 			err = -EINVAL;
1713 		}
1714 	}
1715 
1716 	sec_free_req_id(req);
1717 
1718 	while (1) {
1719 		backlog_req = sec_back_req_clear(c, qp_ctx);
1720 		if (!backlog_req)
1721 			break;
1722 
1723 		backlog_aead_req = backlog_req->aead_req.aead_req;
1724 		aead_request_complete(backlog_aead_req, -EINPROGRESS);
1725 		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1726 	}
1727 
1728 	aead_request_complete(a_req, err);
1729 }
1730 
1731 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1732 {
1733 	sec_free_req_id(req);
1734 	sec_free_queue_id(ctx, req);
1735 }
1736 
1737 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1738 {
1739 	struct sec_qp_ctx *qp_ctx;
1740 	int queue_id;
1741 
1742 	/* To load balance */
1743 	queue_id = sec_alloc_queue_id(ctx, req);
1744 	qp_ctx = &ctx->qp_ctx[queue_id];
1745 
1746 	req->req_id = sec_alloc_req_id(req, qp_ctx);
1747 	if (unlikely(req->req_id < 0)) {
1748 		sec_free_queue_id(ctx, req);
1749 		return req->req_id;
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1756 {
1757 	struct sec_cipher_req *c_req = &req->c_req;
1758 	int ret;
1759 
1760 	ret = sec_request_init(ctx, req);
1761 	if (unlikely(ret))
1762 		return ret;
1763 
1764 	ret = sec_request_transfer(ctx, req);
1765 	if (unlikely(ret))
1766 		goto err_uninit_req;
1767 
1768 	/* Output IV as decrypto */
1769 	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1770 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1771 		sec_update_iv(req, ctx->alg_type);
1772 
1773 	ret = ctx->req_op->bd_send(ctx, req);
1774 	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1775 		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1776 		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1777 		goto err_send_req;
1778 	}
1779 
1780 	return ret;
1781 
1782 err_send_req:
1783 	/* As failing, restore the IV from user */
1784 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1785 		if (ctx->alg_type == SEC_SKCIPHER)
1786 			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1787 			       ctx->c_ctx.ivsize);
1788 		else
1789 			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1790 			       ctx->c_ctx.ivsize);
1791 	}
1792 
1793 	sec_request_untransfer(ctx, req);
1794 err_uninit_req:
1795 	sec_request_uninit(ctx, req);
1796 	return ret;
1797 }
1798 
1799 static const struct sec_req_op sec_skcipher_req_ops = {
1800 	.buf_map	= sec_skcipher_sgl_map,
1801 	.buf_unmap	= sec_skcipher_sgl_unmap,
1802 	.do_transfer	= sec_skcipher_copy_iv,
1803 	.bd_fill	= sec_skcipher_bd_fill,
1804 	.bd_send	= sec_bd_send,
1805 	.callback	= sec_skcipher_callback,
1806 	.process	= sec_process,
1807 };
1808 
1809 static const struct sec_req_op sec_aead_req_ops = {
1810 	.buf_map	= sec_aead_sgl_map,
1811 	.buf_unmap	= sec_aead_sgl_unmap,
1812 	.do_transfer	= sec_aead_set_iv,
1813 	.bd_fill	= sec_aead_bd_fill,
1814 	.bd_send	= sec_bd_send,
1815 	.callback	= sec_aead_callback,
1816 	.process	= sec_process,
1817 };
1818 
1819 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
1820 	.buf_map	= sec_skcipher_sgl_map,
1821 	.buf_unmap	= sec_skcipher_sgl_unmap,
1822 	.do_transfer	= sec_skcipher_copy_iv,
1823 	.bd_fill	= sec_skcipher_bd_fill_v3,
1824 	.bd_send	= sec_bd_send,
1825 	.callback	= sec_skcipher_callback,
1826 	.process	= sec_process,
1827 };
1828 
1829 static const struct sec_req_op sec_aead_req_ops_v3 = {
1830 	.buf_map	= sec_aead_sgl_map,
1831 	.buf_unmap	= sec_aead_sgl_unmap,
1832 	.do_transfer	= sec_aead_set_iv,
1833 	.bd_fill	= sec_aead_bd_fill_v3,
1834 	.bd_send	= sec_bd_send,
1835 	.callback	= sec_aead_callback,
1836 	.process	= sec_process,
1837 };
1838 
1839 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1840 {
1841 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1842 	int ret;
1843 
1844 	ret = sec_skcipher_init(tfm);
1845 	if (ret)
1846 		return ret;
1847 
1848 	if (ctx->sec->qm.ver < QM_HW_V3) {
1849 		ctx->type_supported = SEC_BD_TYPE2;
1850 		ctx->req_op = &sec_skcipher_req_ops;
1851 	} else {
1852 		ctx->type_supported = SEC_BD_TYPE3;
1853 		ctx->req_op = &sec_skcipher_req_ops_v3;
1854 	}
1855 
1856 	return ret;
1857 }
1858 
1859 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1860 {
1861 	sec_skcipher_uninit(tfm);
1862 }
1863 
1864 static int sec_aead_init(struct crypto_aead *tfm)
1865 {
1866 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1867 	int ret;
1868 
1869 	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1870 	ctx->alg_type = SEC_AEAD;
1871 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1872 	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
1873 	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1874 		pr_err("get error aead iv size!\n");
1875 		return -EINVAL;
1876 	}
1877 
1878 	ret = sec_ctx_base_init(ctx);
1879 	if (ret)
1880 		return ret;
1881 	if (ctx->sec->qm.ver < QM_HW_V3) {
1882 		ctx->type_supported = SEC_BD_TYPE2;
1883 		ctx->req_op = &sec_aead_req_ops;
1884 	} else {
1885 		ctx->type_supported = SEC_BD_TYPE3;
1886 		ctx->req_op = &sec_aead_req_ops_v3;
1887 	}
1888 
1889 	ret = sec_auth_init(ctx);
1890 	if (ret)
1891 		goto err_auth_init;
1892 
1893 	ret = sec_cipher_init(ctx);
1894 	if (ret)
1895 		goto err_cipher_init;
1896 
1897 	return ret;
1898 
1899 err_cipher_init:
1900 	sec_auth_uninit(ctx);
1901 err_auth_init:
1902 	sec_ctx_base_uninit(ctx);
1903 	return ret;
1904 }
1905 
1906 static void sec_aead_exit(struct crypto_aead *tfm)
1907 {
1908 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1909 
1910 	sec_cipher_uninit(ctx);
1911 	sec_auth_uninit(ctx);
1912 	sec_ctx_base_uninit(ctx);
1913 }
1914 
1915 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1916 {
1917 	struct aead_alg *alg = crypto_aead_alg(tfm);
1918 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1919 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1920 	const char *aead_name = alg->base.cra_name;
1921 	int ret;
1922 
1923 	ret = sec_aead_init(tfm);
1924 	if (ret) {
1925 		pr_err("hisi_sec2: aead init error!\n");
1926 		return ret;
1927 	}
1928 
1929 	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1930 	if (IS_ERR(a_ctx->hash_tfm)) {
1931 		dev_err(ctx->dev, "aead alloc shash error!\n");
1932 		sec_aead_exit(tfm);
1933 		return PTR_ERR(a_ctx->hash_tfm);
1934 	}
1935 
1936 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1937 						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
1938 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1939 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1940 		crypto_free_shash(ctx->a_ctx.hash_tfm);
1941 		sec_aead_exit(tfm);
1942 		return PTR_ERR(a_ctx->fallback_aead_tfm);
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1949 {
1950 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1951 
1952 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1953 	crypto_free_shash(ctx->a_ctx.hash_tfm);
1954 	sec_aead_exit(tfm);
1955 }
1956 
1957 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
1958 {
1959 	struct aead_alg *alg = crypto_aead_alg(tfm);
1960 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1961 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1962 	const char *aead_name = alg->base.cra_name;
1963 	int ret;
1964 
1965 	ret = sec_aead_init(tfm);
1966 	if (ret) {
1967 		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
1968 		return ret;
1969 	}
1970 
1971 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1972 						     CRYPTO_ALG_NEED_FALLBACK |
1973 						     CRYPTO_ALG_ASYNC);
1974 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1975 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1976 		sec_aead_exit(tfm);
1977 		return PTR_ERR(a_ctx->fallback_aead_tfm);
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
1984 {
1985 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1986 
1987 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1988 	sec_aead_exit(tfm);
1989 }
1990 
1991 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1992 {
1993 	return sec_aead_ctx_init(tfm, "sha1");
1994 }
1995 
1996 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1997 {
1998 	return sec_aead_ctx_init(tfm, "sha256");
1999 }
2000 
2001 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
2002 {
2003 	return sec_aead_ctx_init(tfm, "sha512");
2004 }
2005 
2006 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2007 	struct sec_req *sreq)
2008 {
2009 	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
2010 	struct device *dev = ctx->dev;
2011 	u8 c_mode = ctx->c_ctx.c_mode;
2012 	int ret = 0;
2013 
2014 	switch (c_mode) {
2015 	case SEC_CMODE_XTS:
2016 		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
2017 			dev_err(dev, "skcipher XTS mode input length error!\n");
2018 			ret = -EINVAL;
2019 		}
2020 		break;
2021 	case SEC_CMODE_ECB:
2022 	case SEC_CMODE_CBC:
2023 		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
2024 			dev_err(dev, "skcipher AES input length error!\n");
2025 			ret = -EINVAL;
2026 		}
2027 		break;
2028 	case SEC_CMODE_CTR:
2029 		if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
2030 			dev_err(dev, "skcipher HW version error!\n");
2031 			ret = -EINVAL;
2032 		}
2033 		break;
2034 	default:
2035 		ret = -EINVAL;
2036 	}
2037 
2038 	return ret;
2039 }
2040 
2041 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2042 {
2043 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
2044 	struct device *dev = ctx->dev;
2045 	u8 c_alg = ctx->c_ctx.c_alg;
2046 
2047 	if (unlikely(!sk_req->src || !sk_req->dst ||
2048 		     sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2049 		dev_err(dev, "skcipher input param error!\n");
2050 		return -EINVAL;
2051 	}
2052 	sreq->c_req.c_len = sk_req->cryptlen;
2053 
2054 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2055 		sreq->use_pbuf = true;
2056 	else
2057 		sreq->use_pbuf = false;
2058 
2059 	if (c_alg == SEC_CALG_3DES) {
2060 		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2061 			dev_err(dev, "skcipher 3des input length error!\n");
2062 			return -EINVAL;
2063 		}
2064 		return 0;
2065 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2066 		return sec_skcipher_cryptlen_check(ctx, sreq);
2067 	}
2068 
2069 	dev_err(dev, "skcipher algorithm error!\n");
2070 
2071 	return -EINVAL;
2072 }
2073 
2074 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2075 				    struct skcipher_request *sreq, bool encrypt)
2076 {
2077 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2078 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2079 	struct device *dev = ctx->dev;
2080 	int ret;
2081 
2082 	if (!c_ctx->fbtfm) {
2083 		dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
2084 		return -EINVAL;
2085 	}
2086 
2087 	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2088 
2089 	/* software need sync mode to do crypto */
2090 	skcipher_request_set_callback(subreq, sreq->base.flags,
2091 				      NULL, NULL);
2092 	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2093 				   sreq->cryptlen, sreq->iv);
2094 	if (encrypt)
2095 		ret = crypto_skcipher_encrypt(subreq);
2096 	else
2097 		ret = crypto_skcipher_decrypt(subreq);
2098 
2099 	skcipher_request_zero(subreq);
2100 
2101 	return ret;
2102 }
2103 
2104 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2105 {
2106 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2107 	struct sec_req *req = skcipher_request_ctx(sk_req);
2108 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2109 	int ret;
2110 
2111 	if (!sk_req->cryptlen) {
2112 		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2113 			return -EINVAL;
2114 		return 0;
2115 	}
2116 
2117 	req->flag = sk_req->base.flags;
2118 	req->c_req.sk_req = sk_req;
2119 	req->c_req.encrypt = encrypt;
2120 	req->ctx = ctx;
2121 
2122 	ret = sec_skcipher_param_check(ctx, req);
2123 	if (unlikely(ret))
2124 		return -EINVAL;
2125 
2126 	if (unlikely(ctx->c_ctx.fallback))
2127 		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2128 
2129 	return ctx->req_op->process(ctx, req);
2130 }
2131 
2132 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2133 {
2134 	return sec_skcipher_crypto(sk_req, true);
2135 }
2136 
2137 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2138 {
2139 	return sec_skcipher_crypto(sk_req, false);
2140 }
2141 
2142 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
2143 	sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
2144 {\
2145 	.base = {\
2146 		.cra_name = sec_cra_name,\
2147 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2148 		.cra_priority = SEC_PRIORITY,\
2149 		.cra_flags = CRYPTO_ALG_ASYNC |\
2150 		 CRYPTO_ALG_NEED_FALLBACK,\
2151 		.cra_blocksize = blk_size,\
2152 		.cra_ctxsize = sizeof(struct sec_ctx),\
2153 		.cra_module = THIS_MODULE,\
2154 	},\
2155 	.init = sec_skcipher_ctx_init,\
2156 	.exit = sec_skcipher_ctx_exit,\
2157 	.setkey = sec_set_key,\
2158 	.decrypt = sec_skcipher_decrypt,\
2159 	.encrypt = sec_skcipher_encrypt,\
2160 	.min_keysize = sec_min_key_size,\
2161 	.max_keysize = sec_max_key_size,\
2162 	.ivsize = iv_size,\
2163 }
2164 
2165 static struct sec_skcipher sec_skciphers[] = {
2166 	{
2167 		.alg_msk = BIT(0),
2168 		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
2169 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
2170 	},
2171 	{
2172 		.alg_msk = BIT(1),
2173 		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
2174 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2175 	},
2176 	{
2177 		.alg_msk = BIT(2),
2178 		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
2179 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2180 	},
2181 	{
2182 		.alg_msk = BIT(3),
2183 		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
2184 					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2185 	},
2186 	{
2187 		.alg_msk = BIT(12),
2188 		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
2189 					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2190 	},
2191 	{
2192 		.alg_msk = BIT(13),
2193 		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
2194 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2195 	},
2196 	{
2197 		.alg_msk = BIT(14),
2198 		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
2199 					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2200 	},
2201 	{
2202 		.alg_msk = BIT(23),
2203 		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
2204 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
2205 	},
2206 	{
2207 		.alg_msk = BIT(24),
2208 		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
2209 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
2210 					DES3_EDE_BLOCK_SIZE),
2211 	},
2212 };
2213 
2214 static int aead_iv_demension_check(struct aead_request *aead_req)
2215 {
2216 	u8 cl;
2217 
2218 	cl = aead_req->iv[0] + 1;
2219 	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2220 		return -EINVAL;
2221 
2222 	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2223 		return -EOVERFLOW;
2224 
2225 	return 0;
2226 }
2227 
2228 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2229 {
2230 	struct aead_request *req = sreq->aead_req.aead_req;
2231 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2232 	size_t sz = crypto_aead_authsize(tfm);
2233 	u8 c_mode = ctx->c_ctx.c_mode;
2234 	struct device *dev = ctx->dev;
2235 	int ret;
2236 
2237 	/* Hardware does not handle cases where authsize is less than 4 bytes */
2238 	if (unlikely(sz < MIN_MAC_LEN)) {
2239 		sreq->aead_req.fallback = true;
2240 		return -EINVAL;
2241 	}
2242 
2243 	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2244 	    req->assoclen > SEC_MAX_AAD_LEN)) {
2245 		dev_err(dev, "aead input spec error!\n");
2246 		return -EINVAL;
2247 	}
2248 
2249 	if (c_mode == SEC_CMODE_CCM) {
2250 		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
2251 			dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
2252 			return -EINVAL;
2253 		}
2254 		ret = aead_iv_demension_check(req);
2255 		if (ret) {
2256 			dev_err(dev, "aead input iv param error!\n");
2257 			return ret;
2258 		}
2259 	}
2260 
2261 	if (sreq->c_req.encrypt)
2262 		sreq->c_req.c_len = req->cryptlen;
2263 	else
2264 		sreq->c_req.c_len = req->cryptlen - sz;
2265 	if (c_mode == SEC_CMODE_CBC) {
2266 		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2267 			dev_err(dev, "aead crypto length error!\n");
2268 			return -EINVAL;
2269 		}
2270 	}
2271 
2272 	return 0;
2273 }
2274 
2275 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2276 {
2277 	struct aead_request *req = sreq->aead_req.aead_req;
2278 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2279 	size_t authsize = crypto_aead_authsize(tfm);
2280 	struct device *dev = ctx->dev;
2281 	u8 c_alg = ctx->c_ctx.c_alg;
2282 
2283 	if (unlikely(!req->src || !req->dst)) {
2284 		dev_err(dev, "aead input param error!\n");
2285 		return -EINVAL;
2286 	}
2287 
2288 	if (ctx->sec->qm.ver == QM_HW_V2) {
2289 		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2290 			     req->cryptlen <= authsize))) {
2291 			sreq->aead_req.fallback = true;
2292 			return -EINVAL;
2293 		}
2294 	}
2295 
2296 	/* Support AES or SM4 */
2297 	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2298 		dev_err(dev, "aead crypto alg error!\n");
2299 		return -EINVAL;
2300 	}
2301 
2302 	if (unlikely(sec_aead_spec_check(ctx, sreq)))
2303 		return -EINVAL;
2304 
2305 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2306 		SEC_PBUF_SZ)
2307 		sreq->use_pbuf = true;
2308 	else
2309 		sreq->use_pbuf = false;
2310 
2311 	return 0;
2312 }
2313 
2314 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2315 				struct aead_request *aead_req,
2316 				bool encrypt)
2317 {
2318 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2319 	struct aead_request *subreq;
2320 	int ret;
2321 
2322 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2323 	if (!subreq)
2324 		return -ENOMEM;
2325 
2326 	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2327 	aead_request_set_callback(subreq, aead_req->base.flags,
2328 				  aead_req->base.complete, aead_req->base.data);
2329 	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2330 			       aead_req->cryptlen, aead_req->iv);
2331 	aead_request_set_ad(subreq, aead_req->assoclen);
2332 
2333 	if (encrypt)
2334 		ret = crypto_aead_encrypt(subreq);
2335 	else
2336 		ret = crypto_aead_decrypt(subreq);
2337 	aead_request_free(subreq);
2338 
2339 	return ret;
2340 }
2341 
2342 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2343 {
2344 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2345 	struct sec_req *req = aead_request_ctx(a_req);
2346 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2347 	int ret;
2348 
2349 	req->flag = a_req->base.flags;
2350 	req->aead_req.aead_req = a_req;
2351 	req->c_req.encrypt = encrypt;
2352 	req->ctx = ctx;
2353 	req->aead_req.fallback = false;
2354 
2355 	ret = sec_aead_param_check(ctx, req);
2356 	if (unlikely(ret)) {
2357 		if (req->aead_req.fallback)
2358 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
2359 		return -EINVAL;
2360 	}
2361 
2362 	return ctx->req_op->process(ctx, req);
2363 }
2364 
2365 static int sec_aead_encrypt(struct aead_request *a_req)
2366 {
2367 	return sec_aead_crypto(a_req, true);
2368 }
2369 
2370 static int sec_aead_decrypt(struct aead_request *a_req)
2371 {
2372 	return sec_aead_crypto(a_req, false);
2373 }
2374 
2375 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2376 			 ctx_exit, blk_size, iv_size, max_authsize)\
2377 {\
2378 	.base = {\
2379 		.cra_name = sec_cra_name,\
2380 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2381 		.cra_priority = SEC_PRIORITY,\
2382 		.cra_flags = CRYPTO_ALG_ASYNC |\
2383 		 CRYPTO_ALG_NEED_FALLBACK,\
2384 		.cra_blocksize = blk_size,\
2385 		.cra_ctxsize = sizeof(struct sec_ctx),\
2386 		.cra_module = THIS_MODULE,\
2387 	},\
2388 	.init = ctx_init,\
2389 	.exit = ctx_exit,\
2390 	.setkey = sec_set_key,\
2391 	.setauthsize = sec_aead_setauthsize,\
2392 	.decrypt = sec_aead_decrypt,\
2393 	.encrypt = sec_aead_encrypt,\
2394 	.ivsize = iv_size,\
2395 	.maxauthsize = max_authsize,\
2396 }
2397 
2398 static struct sec_aead sec_aeads[] = {
2399 	{
2400 		.alg_msk = BIT(6),
2401 		.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2402 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2403 				    AES_BLOCK_SIZE),
2404 	},
2405 	{
2406 		.alg_msk = BIT(7),
2407 		.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2408 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2409 				    AES_BLOCK_SIZE),
2410 	},
2411 	{
2412 		.alg_msk = BIT(17),
2413 		.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2414 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2415 				    AES_BLOCK_SIZE),
2416 	},
2417 	{
2418 		.alg_msk = BIT(18),
2419 		.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2420 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2421 				    AES_BLOCK_SIZE),
2422 	},
2423 	{
2424 		.alg_msk = BIT(43),
2425 		.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
2426 				    sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2427 				    AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2428 	},
2429 	{
2430 		.alg_msk = BIT(44),
2431 		.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
2432 				    sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2433 				    AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2434 	},
2435 	{
2436 		.alg_msk = BIT(45),
2437 		.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
2438 				    sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2439 				    AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2440 	},
2441 };
2442 
2443 static void sec_unregister_skcipher(u64 alg_mask, int end)
2444 {
2445 	int i;
2446 
2447 	for (i = 0; i < end; i++)
2448 		if (sec_skciphers[i].alg_msk & alg_mask)
2449 			crypto_unregister_skcipher(&sec_skciphers[i].alg);
2450 }
2451 
2452 static int sec_register_skcipher(u64 alg_mask)
2453 {
2454 	int i, ret, count;
2455 
2456 	count = ARRAY_SIZE(sec_skciphers);
2457 
2458 	for (i = 0; i < count; i++) {
2459 		if (!(sec_skciphers[i].alg_msk & alg_mask))
2460 			continue;
2461 
2462 		ret = crypto_register_skcipher(&sec_skciphers[i].alg);
2463 		if (ret)
2464 			goto err;
2465 	}
2466 
2467 	return 0;
2468 
2469 err:
2470 	sec_unregister_skcipher(alg_mask, i);
2471 
2472 	return ret;
2473 }
2474 
2475 static void sec_unregister_aead(u64 alg_mask, int end)
2476 {
2477 	int i;
2478 
2479 	for (i = 0; i < end; i++)
2480 		if (sec_aeads[i].alg_msk & alg_mask)
2481 			crypto_unregister_aead(&sec_aeads[i].alg);
2482 }
2483 
2484 static int sec_register_aead(u64 alg_mask)
2485 {
2486 	int i, ret, count;
2487 
2488 	count = ARRAY_SIZE(sec_aeads);
2489 
2490 	for (i = 0; i < count; i++) {
2491 		if (!(sec_aeads[i].alg_msk & alg_mask))
2492 			continue;
2493 
2494 		ret = crypto_register_aead(&sec_aeads[i].alg);
2495 		if (ret)
2496 			goto err;
2497 	}
2498 
2499 	return 0;
2500 
2501 err:
2502 	sec_unregister_aead(alg_mask, i);
2503 
2504 	return ret;
2505 }
2506 
2507 int sec_register_to_crypto(struct hisi_qm *qm)
2508 {
2509 	u64 alg_mask;
2510 	int ret = 0;
2511 
2512 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2513 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2514 
2515 	mutex_lock(&sec_algs_lock);
2516 	if (sec_available_devs) {
2517 		sec_available_devs++;
2518 		goto unlock;
2519 	}
2520 
2521 	ret = sec_register_skcipher(alg_mask);
2522 	if (ret)
2523 		goto unlock;
2524 
2525 	ret = sec_register_aead(alg_mask);
2526 	if (ret)
2527 		goto unreg_skcipher;
2528 
2529 	sec_available_devs++;
2530 	mutex_unlock(&sec_algs_lock);
2531 
2532 	return 0;
2533 
2534 unreg_skcipher:
2535 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2536 unlock:
2537 	mutex_unlock(&sec_algs_lock);
2538 	return ret;
2539 }
2540 
2541 void sec_unregister_from_crypto(struct hisi_qm *qm)
2542 {
2543 	u64 alg_mask;
2544 
2545 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2546 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2547 
2548 	mutex_lock(&sec_algs_lock);
2549 	if (--sec_available_devs)
2550 		goto unlock;
2551 
2552 	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
2553 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2554 
2555 unlock:
2556 	mutex_unlock(&sec_algs_lock);
2557 }
2558