xref: /linux/drivers/crypto/hisilicon/sec2/sec_crypto.c (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
19 
20 #include "sec.h"
21 #include "sec_crypto.h"
22 
23 #define SEC_PRIORITY		4001
24 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
29 
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET		1
32 #define SEC_CIPHER_OFFSET	4
33 #define SEC_SCENE_OFFSET	3
34 #define SEC_DST_SGL_OFFSET	2
35 #define SEC_SRC_SGL_OFFSET	7
36 #define SEC_CKEY_OFFSET		9
37 #define SEC_CMODE_OFFSET	12
38 #define SEC_AKEY_OFFSET         5
39 #define SEC_AEAD_ALG_OFFSET     11
40 #define SEC_AUTH_OFFSET		6
41 
42 #define SEC_DE_OFFSET_V3		9
43 #define SEC_SCENE_OFFSET_V3	5
44 #define SEC_CKEY_OFFSET_V3	13
45 #define SEC_CTR_CNT_OFFSET	25
46 #define SEC_CTR_CNT_ROLLOVER	2
47 #define SEC_SRC_SGL_OFFSET_V3	11
48 #define SEC_DST_SGL_OFFSET_V3	14
49 #define SEC_CALG_OFFSET_V3	4
50 #define SEC_AKEY_OFFSET_V3	9
51 #define SEC_MAC_OFFSET_V3	4
52 #define SEC_AUTH_ALG_OFFSET_V3	15
53 #define SEC_CIPHER_AUTH_V3	0xbf
54 #define SEC_AUTH_CIPHER_V3	0x40
55 #define SEC_FLAG_OFFSET		7
56 #define SEC_FLAG_MASK		0x0780
57 #define SEC_TYPE_MASK		0x0F
58 #define SEC_DONE_MASK		0x0001
59 #define SEC_ICV_MASK		0x000E
60 
61 #define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
62 #define SEC_SGL_SGE_NR		128
63 #define SEC_CIPHER_AUTH		0xfe
64 #define SEC_AUTH_CIPHER		0x1
65 #define SEC_MAX_MAC_LEN		64
66 #define SEC_MAX_AAD_LEN		65535
67 #define SEC_MAX_CCM_AAD_LEN	65279
68 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
69 
70 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
71 #define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
72 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
73 			SEC_MAX_MAC_LEN * 2)
74 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
75 #define SEC_PBUF_PAGE_NUM(depth)	((depth) / SEC_PBUF_NUM)
76 #define SEC_PBUF_LEFT_SZ(depth)		(SEC_PBUF_PKG * ((depth) -	\
77 				SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
78 #define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
79 				SEC_PBUF_LEFT_SZ(depth))
80 
81 #define SEC_SQE_CFLAG		2
82 #define SEC_SQE_AEAD_FLAG	3
83 #define SEC_SQE_DONE		0x1
84 #define SEC_ICV_ERR		0x2
85 #define MAC_LEN_MASK		0x1U
86 #define MAX_INPUT_DATA_LEN	0xFFFE00
87 #define BITS_MASK		0xFF
88 #define WORD_MASK		0x3
89 #define BYTE_BITS		0x8
90 #define BYTES_TO_WORDS(bcount)	((bcount) >> 2)
91 #define SEC_XTS_NAME_SZ		0x3
92 #define IV_CM_CAL_NUM		2
93 #define IV_CL_MASK		0x7
94 #define IV_CL_MIN		2
95 #define IV_CL_MID		4
96 #define IV_CL_MAX		8
97 #define IV_FLAGS_OFFSET	0x6
98 #define IV_CM_OFFSET		0x3
99 #define IV_LAST_BYTE1		1
100 #define IV_LAST_BYTE2		2
101 #define IV_LAST_BYTE_MASK	0xFF
102 #define IV_CTR_INIT		0x1
103 #define IV_BYTE_OFFSET		0x8
104 #define SEC_GCM_MIN_AUTH_SZ	0x8
105 #define SEC_RETRY_MAX_CNT	5U
106 
107 static DEFINE_MUTEX(sec_algs_lock);
108 static unsigned int sec_available_devs;
109 
110 struct sec_skcipher {
111 	u64 alg_msk;
112 	struct skcipher_alg alg;
113 };
114 
115 struct sec_aead {
116 	u64 alg_msk;
117 	struct aead_alg alg;
118 };
119 
120 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
121 				struct aead_request *aead_req,
122 				bool encrypt);
123 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
124 				    struct skcipher_request *sreq, bool encrypt);
125 
sec_alloc_req_id(struct sec_req * req,struct sec_qp_ctx * qp_ctx)126 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
127 {
128 	int req_id;
129 
130 	spin_lock_bh(&qp_ctx->id_lock);
131 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
132 	spin_unlock_bh(&qp_ctx->id_lock);
133 	return req_id;
134 }
135 
sec_free_req_id(struct sec_req * req)136 static void sec_free_req_id(struct sec_req *req)
137 {
138 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
139 	int req_id = req->req_id;
140 
141 	if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
142 		dev_err(req->ctx->dev, "free request id invalid!\n");
143 		return;
144 	}
145 
146 	spin_lock_bh(&qp_ctx->id_lock);
147 	idr_remove(&qp_ctx->req_idr, req_id);
148 	spin_unlock_bh(&qp_ctx->id_lock);
149 }
150 
pre_parse_finished_bd(struct bd_status * status,void * resp)151 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
152 {
153 	struct sec_sqe *bd = resp;
154 
155 	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
156 	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
157 	status->flag = (le16_to_cpu(bd->type2.done_flag) &
158 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
159 	status->tag = le16_to_cpu(bd->type2.tag);
160 	status->err_type = bd->type2.error_type;
161 
162 	return bd->type_cipher_auth & SEC_TYPE_MASK;
163 }
164 
pre_parse_finished_bd3(struct bd_status * status,void * resp)165 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
166 {
167 	struct sec_sqe3 *bd3 = resp;
168 
169 	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
170 	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
171 	status->flag = (le16_to_cpu(bd3->done_flag) &
172 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
173 	status->tag = le64_to_cpu(bd3->tag);
174 	status->err_type = bd3->error_type;
175 
176 	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
177 }
178 
sec_cb_status_check(struct sec_req * req,struct bd_status * status)179 static int sec_cb_status_check(struct sec_req *req,
180 			       struct bd_status *status)
181 {
182 	struct sec_ctx *ctx = req->ctx;
183 
184 	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
185 		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
186 				    req->err_type, status->done);
187 		return -EIO;
188 	}
189 
190 	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
191 		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
192 			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
193 					    status->flag);
194 			return -EIO;
195 		}
196 	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
197 		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
198 			     status->icv == SEC_ICV_ERR)) {
199 			dev_err_ratelimited(ctx->dev,
200 					    "flag[%u], icv[%u]\n",
201 					    status->flag, status->icv);
202 			return -EBADMSG;
203 		}
204 	}
205 
206 	return 0;
207 }
208 
qp_send_message(struct sec_req * req)209 static int qp_send_message(struct sec_req *req)
210 {
211 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
212 	int ret;
213 
214 	if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
215 		return -EBUSY;
216 
217 	spin_lock_bh(&qp_ctx->req_lock);
218 	if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
219 		spin_unlock_bh(&qp_ctx->req_lock);
220 		return -EBUSY;
221 	}
222 
223 	if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
224 		req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
225 		qp_ctx->req_list[qp_ctx->send_head] = req;
226 	}
227 
228 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
229 	if (ret) {
230 		spin_unlock_bh(&qp_ctx->req_lock);
231 		return ret;
232 	}
233 	if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
234 		qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
235 
236 	spin_unlock_bh(&qp_ctx->req_lock);
237 
238 	atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
239 	return -EINPROGRESS;
240 }
241 
sec_alg_send_backlog_soft(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)242 static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
243 {
244 	struct sec_req *req, *tmp;
245 	int ret;
246 
247 	list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
248 		list_del(&req->list);
249 		ctx->req_op->buf_unmap(ctx, req);
250 		if (req->req_id >= 0)
251 			sec_free_req_id(req);
252 
253 		if (ctx->alg_type == SEC_AEAD)
254 			ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
255 						   req->c_req.encrypt);
256 		else
257 			ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
258 						       req->c_req.encrypt);
259 
260 		/* Wake up the busy thread first, then return the errno. */
261 		crypto_request_complete(req->base, -EINPROGRESS);
262 		crypto_request_complete(req->base, ret);
263 	}
264 }
265 
sec_alg_send_backlog(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)266 static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
267 {
268 	struct sec_req *req, *tmp;
269 	int ret;
270 
271 	spin_lock_bh(&qp_ctx->backlog.lock);
272 	list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
273 		ret = qp_send_message(req);
274 		switch (ret) {
275 		case -EINPROGRESS:
276 			list_del(&req->list);
277 			crypto_request_complete(req->base, -EINPROGRESS);
278 			break;
279 		case -EBUSY:
280 			/* Device is busy and stop send any request. */
281 			goto unlock;
282 		default:
283 			/* Release memory resources and send all requests through software. */
284 			sec_alg_send_backlog_soft(ctx, qp_ctx);
285 			goto unlock;
286 		}
287 	}
288 
289 unlock:
290 	spin_unlock_bh(&qp_ctx->backlog.lock);
291 }
292 
sec_req_cb(struct hisi_qp * qp,void * resp)293 static void sec_req_cb(struct hisi_qp *qp, void *resp)
294 {
295 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
296 	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
297 	u8 type_supported = qp_ctx->ctx->type_supported;
298 	struct bd_status status;
299 	struct sec_ctx *ctx;
300 	struct sec_req *req;
301 	int err;
302 	u8 type;
303 
304 	if (type_supported == SEC_BD_TYPE2) {
305 		type = pre_parse_finished_bd(&status, resp);
306 		req = qp_ctx->req_list[status.tag];
307 	} else {
308 		type = pre_parse_finished_bd3(&status, resp);
309 		req = (void *)(uintptr_t)status.tag;
310 	}
311 
312 	if (unlikely(type != type_supported)) {
313 		atomic64_inc(&dfx->err_bd_cnt);
314 		pr_err("err bd type [%u]\n", type);
315 		return;
316 	}
317 
318 	if (unlikely(!req)) {
319 		atomic64_inc(&dfx->invalid_req_cnt);
320 		atomic_inc(&qp->qp_status.used);
321 		return;
322 	}
323 
324 	req->err_type = status.err_type;
325 	ctx = req->ctx;
326 	err = sec_cb_status_check(req, &status);
327 	if (err)
328 		atomic64_inc(&dfx->done_flag_cnt);
329 
330 	atomic64_inc(&dfx->recv_cnt);
331 
332 	ctx->req_op->buf_unmap(ctx, req);
333 
334 	ctx->req_op->callback(ctx, req, err);
335 }
336 
sec_alg_send_message_retry(struct sec_req * req)337 static int sec_alg_send_message_retry(struct sec_req *req)
338 {
339 	int ctr = 0;
340 	int ret;
341 
342 	do {
343 		ret = qp_send_message(req);
344 	} while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
345 
346 	return ret;
347 }
348 
sec_alg_try_enqueue(struct sec_req * req)349 static int sec_alg_try_enqueue(struct sec_req *req)
350 {
351 	/* Check if any request is already backlogged */
352 	if (!list_empty(&req->backlog->list))
353 		return -EBUSY;
354 
355 	/* Try to enqueue to HW ring */
356 	return qp_send_message(req);
357 }
358 
359 
sec_alg_send_message_maybacklog(struct sec_req * req)360 static int sec_alg_send_message_maybacklog(struct sec_req *req)
361 {
362 	int ret;
363 
364 	ret = sec_alg_try_enqueue(req);
365 	if (ret != -EBUSY)
366 		return ret;
367 
368 	spin_lock_bh(&req->backlog->lock);
369 	ret = sec_alg_try_enqueue(req);
370 	if (ret == -EBUSY)
371 		list_add_tail(&req->list, &req->backlog->list);
372 	spin_unlock_bh(&req->backlog->lock);
373 
374 	return ret;
375 }
376 
sec_bd_send(struct sec_ctx * ctx,struct sec_req * req)377 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
378 {
379 	if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
380 		return sec_alg_send_message_maybacklog(req);
381 
382 	return sec_alg_send_message_retry(req);
383 }
384 
sec_alloc_civ_resource(struct device * dev,struct sec_alg_res * res)385 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
386 {
387 	u16 q_depth = res->depth;
388 	int i;
389 
390 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
391 					 &res->c_ivin_dma, GFP_KERNEL);
392 	if (!res->c_ivin)
393 		return -ENOMEM;
394 
395 	for (i = 1; i < q_depth; i++) {
396 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
397 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
398 	}
399 
400 	return 0;
401 }
402 
sec_free_civ_resource(struct device * dev,struct sec_alg_res * res)403 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
404 {
405 	if (res->c_ivin)
406 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
407 				  res->c_ivin, res->c_ivin_dma);
408 }
409 
sec_alloc_aiv_resource(struct device * dev,struct sec_alg_res * res)410 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
411 {
412 	u16 q_depth = res->depth;
413 	int i;
414 
415 	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
416 					 &res->a_ivin_dma, GFP_KERNEL);
417 	if (!res->a_ivin)
418 		return -ENOMEM;
419 
420 	for (i = 1; i < q_depth; i++) {
421 		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
422 		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
423 	}
424 
425 	return 0;
426 }
427 
sec_free_aiv_resource(struct device * dev,struct sec_alg_res * res)428 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
429 {
430 	if (res->a_ivin)
431 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
432 				  res->a_ivin, res->a_ivin_dma);
433 }
434 
sec_alloc_mac_resource(struct device * dev,struct sec_alg_res * res)435 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
436 {
437 	u16 q_depth = res->depth;
438 	int i;
439 
440 	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
441 					  &res->out_mac_dma, GFP_KERNEL);
442 	if (!res->out_mac)
443 		return -ENOMEM;
444 
445 	for (i = 1; i < q_depth; i++) {
446 		res[i].out_mac_dma = res->out_mac_dma +
447 				     i * (SEC_MAX_MAC_LEN << 1);
448 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
449 	}
450 
451 	return 0;
452 }
453 
sec_free_mac_resource(struct device * dev,struct sec_alg_res * res)454 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
455 {
456 	if (res->out_mac)
457 		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
458 				  res->out_mac, res->out_mac_dma);
459 }
460 
sec_free_pbuf_resource(struct device * dev,struct sec_alg_res * res)461 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
462 {
463 	if (res->pbuf)
464 		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
465 				  res->pbuf, res->pbuf_dma);
466 }
467 
468 /*
469  * To improve performance, pbuffer is used for
470  * small packets (< 512Bytes) as IOMMU translation using.
471  */
sec_alloc_pbuf_resource(struct device * dev,struct sec_alg_res * res)472 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
473 {
474 	u16 q_depth = res->depth;
475 	int size = SEC_PBUF_PAGE_NUM(q_depth);
476 	int pbuf_page_offset;
477 	int i, j, k;
478 
479 	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
480 				&res->pbuf_dma, GFP_KERNEL);
481 	if (!res->pbuf)
482 		return -ENOMEM;
483 
484 	/*
485 	 * SEC_PBUF_PKG contains data pbuf, iv and
486 	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
487 	 * Every PAGE contains six SEC_PBUF_PKG
488 	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
489 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
490 	 * for the SEC_TOTAL_PBUF_SZ
491 	 */
492 	for (i = 0; i <= size; i++) {
493 		pbuf_page_offset = PAGE_SIZE * i;
494 		for (j = 0; j < SEC_PBUF_NUM; j++) {
495 			k = i * SEC_PBUF_NUM + j;
496 			if (k == q_depth)
497 				break;
498 			res[k].pbuf = res->pbuf +
499 				j * SEC_PBUF_PKG + pbuf_page_offset;
500 			res[k].pbuf_dma = res->pbuf_dma +
501 				j * SEC_PBUF_PKG + pbuf_page_offset;
502 		}
503 	}
504 
505 	return 0;
506 }
507 
sec_alg_resource_alloc(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)508 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
509 				  struct sec_qp_ctx *qp_ctx)
510 {
511 	struct sec_alg_res *res = qp_ctx->res;
512 	struct device *dev = ctx->dev;
513 	int ret;
514 
515 	ret = sec_alloc_civ_resource(dev, res);
516 	if (ret)
517 		return ret;
518 
519 	if (ctx->alg_type == SEC_AEAD) {
520 		ret = sec_alloc_aiv_resource(dev, res);
521 		if (ret)
522 			goto alloc_aiv_fail;
523 
524 		ret = sec_alloc_mac_resource(dev, res);
525 		if (ret)
526 			goto alloc_mac_fail;
527 	}
528 	if (ctx->pbuf_supported) {
529 		ret = sec_alloc_pbuf_resource(dev, res);
530 		if (ret) {
531 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
532 			goto alloc_pbuf_fail;
533 		}
534 	}
535 
536 	return 0;
537 
538 alloc_pbuf_fail:
539 	if (ctx->alg_type == SEC_AEAD)
540 		sec_free_mac_resource(dev, qp_ctx->res);
541 alloc_mac_fail:
542 	if (ctx->alg_type == SEC_AEAD)
543 		sec_free_aiv_resource(dev, res);
544 alloc_aiv_fail:
545 	sec_free_civ_resource(dev, res);
546 	return ret;
547 }
548 
sec_alg_resource_free(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)549 static void sec_alg_resource_free(struct sec_ctx *ctx,
550 				  struct sec_qp_ctx *qp_ctx)
551 {
552 	struct device *dev = ctx->dev;
553 
554 	sec_free_civ_resource(dev, qp_ctx->res);
555 
556 	if (ctx->pbuf_supported)
557 		sec_free_pbuf_resource(dev, qp_ctx->res);
558 	if (ctx->alg_type == SEC_AEAD) {
559 		sec_free_mac_resource(dev, qp_ctx->res);
560 		sec_free_aiv_resource(dev, qp_ctx->res);
561 	}
562 }
563 
sec_alloc_qp_ctx_resource(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)564 static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
565 {
566 	u16 q_depth = qp_ctx->qp->sq_depth;
567 	struct device *dev = ctx->dev;
568 	int ret = -ENOMEM;
569 
570 	qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
571 	if (!qp_ctx->req_list)
572 		return ret;
573 
574 	qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
575 	if (!qp_ctx->res)
576 		goto err_free_req_list;
577 	qp_ctx->res->depth = q_depth;
578 
579 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
580 	if (IS_ERR(qp_ctx->c_in_pool)) {
581 		dev_err(dev, "fail to create sgl pool for input!\n");
582 		goto err_free_res;
583 	}
584 
585 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
586 	if (IS_ERR(qp_ctx->c_out_pool)) {
587 		dev_err(dev, "fail to create sgl pool for output!\n");
588 		goto err_free_c_in_pool;
589 	}
590 
591 	ret = sec_alg_resource_alloc(ctx, qp_ctx);
592 	if (ret)
593 		goto err_free_c_out_pool;
594 
595 	return 0;
596 
597 err_free_c_out_pool:
598 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
599 err_free_c_in_pool:
600 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
601 err_free_res:
602 	kfree(qp_ctx->res);
603 err_free_req_list:
604 	kfree(qp_ctx->req_list);
605 	return ret;
606 }
607 
sec_free_qp_ctx_resource(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)608 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
609 {
610 	struct device *dev = ctx->dev;
611 
612 	sec_alg_resource_free(ctx, qp_ctx);
613 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
614 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
615 	kfree(qp_ctx->res);
616 	kfree(qp_ctx->req_list);
617 }
618 
sec_create_qp_ctx(struct sec_ctx * ctx,int qp_ctx_id)619 static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
620 {
621 	struct sec_qp_ctx *qp_ctx;
622 	struct hisi_qp *qp;
623 	int ret;
624 
625 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
626 	qp = ctx->qps[qp_ctx_id];
627 	qp->req_type = 0;
628 	qp->qp_ctx = qp_ctx;
629 	qp_ctx->qp = qp;
630 	qp_ctx->ctx = ctx;
631 
632 	qp->req_cb = sec_req_cb;
633 
634 	spin_lock_init(&qp_ctx->req_lock);
635 	idr_init(&qp_ctx->req_idr);
636 	spin_lock_init(&qp_ctx->backlog.lock);
637 	spin_lock_init(&qp_ctx->id_lock);
638 	INIT_LIST_HEAD(&qp_ctx->backlog.list);
639 	qp_ctx->send_head = 0;
640 
641 	ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
642 	if (ret)
643 		goto err_destroy_idr;
644 
645 	ret = hisi_qm_start_qp(qp, 0);
646 	if (ret < 0)
647 		goto err_resource_free;
648 
649 	return 0;
650 
651 err_resource_free:
652 	sec_free_qp_ctx_resource(ctx, qp_ctx);
653 err_destroy_idr:
654 	idr_destroy(&qp_ctx->req_idr);
655 	return ret;
656 }
657 
sec_release_qp_ctx(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)658 static void sec_release_qp_ctx(struct sec_ctx *ctx,
659 			       struct sec_qp_ctx *qp_ctx)
660 {
661 	hisi_qm_stop_qp(qp_ctx->qp);
662 	sec_free_qp_ctx_resource(ctx, qp_ctx);
663 	idr_destroy(&qp_ctx->req_idr);
664 }
665 
sec_ctx_base_init(struct sec_ctx * ctx)666 static int sec_ctx_base_init(struct sec_ctx *ctx)
667 {
668 	struct sec_dev *sec;
669 	int i, ret;
670 
671 	ctx->qps = sec_create_qps();
672 	if (!ctx->qps) {
673 		pr_err("Can not create sec qps!\n");
674 		return -ENODEV;
675 	}
676 
677 	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
678 	ctx->sec = sec;
679 	ctx->dev = &sec->qm.pdev->dev;
680 	ctx->hlf_q_num = sec->ctx_q_num >> 1;
681 
682 	ctx->pbuf_supported = ctx->sec->iommu_used;
683 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
684 			      GFP_KERNEL);
685 	if (!ctx->qp_ctx) {
686 		ret = -ENOMEM;
687 		goto err_destroy_qps;
688 	}
689 
690 	for (i = 0; i < sec->ctx_q_num; i++) {
691 		ret = sec_create_qp_ctx(ctx, i);
692 		if (ret)
693 			goto err_sec_release_qp_ctx;
694 	}
695 
696 	return 0;
697 
698 err_sec_release_qp_ctx:
699 	for (i = i - 1; i >= 0; i--)
700 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
701 	kfree(ctx->qp_ctx);
702 err_destroy_qps:
703 	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
704 	return ret;
705 }
706 
sec_ctx_base_uninit(struct sec_ctx * ctx)707 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
708 {
709 	int i;
710 
711 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
712 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
713 
714 	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
715 	kfree(ctx->qp_ctx);
716 }
717 
sec_cipher_init(struct sec_ctx * ctx)718 static int sec_cipher_init(struct sec_ctx *ctx)
719 {
720 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
721 
722 	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
723 					  &c_ctx->c_key_dma, GFP_KERNEL);
724 	if (!c_ctx->c_key)
725 		return -ENOMEM;
726 
727 	return 0;
728 }
729 
sec_cipher_uninit(struct sec_ctx * ctx)730 static void sec_cipher_uninit(struct sec_ctx *ctx)
731 {
732 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
733 
734 	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
735 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
736 			  c_ctx->c_key, c_ctx->c_key_dma);
737 }
738 
sec_auth_init(struct sec_ctx * ctx)739 static int sec_auth_init(struct sec_ctx *ctx)
740 {
741 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
742 
743 	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
744 					  &a_ctx->a_key_dma, GFP_KERNEL);
745 	if (!a_ctx->a_key)
746 		return -ENOMEM;
747 
748 	return 0;
749 }
750 
sec_auth_uninit(struct sec_ctx * ctx)751 static void sec_auth_uninit(struct sec_ctx *ctx)
752 {
753 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
754 
755 	memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
756 	dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
757 			  a_ctx->a_key, a_ctx->a_key_dma);
758 }
759 
sec_skcipher_fbtfm_init(struct crypto_skcipher * tfm)760 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
761 {
762 	const char *alg = crypto_tfm_alg_name(&tfm->base);
763 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
764 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
765 
766 	c_ctx->fallback = false;
767 
768 	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
769 						  CRYPTO_ALG_NEED_FALLBACK);
770 	if (IS_ERR(c_ctx->fbtfm)) {
771 		pr_err("failed to alloc fallback tfm for %s!\n", alg);
772 		return PTR_ERR(c_ctx->fbtfm);
773 	}
774 
775 	return 0;
776 }
777 
sec_skcipher_init(struct crypto_skcipher * tfm)778 static int sec_skcipher_init(struct crypto_skcipher *tfm)
779 {
780 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
781 	int ret;
782 
783 	ctx->alg_type = SEC_SKCIPHER;
784 	crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
785 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
786 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
787 		pr_err("get error skcipher iv size!\n");
788 		return -EINVAL;
789 	}
790 
791 	ret = sec_ctx_base_init(ctx);
792 	if (ret)
793 		return ret;
794 
795 	ret = sec_cipher_init(ctx);
796 	if (ret)
797 		goto err_cipher_init;
798 
799 	ret = sec_skcipher_fbtfm_init(tfm);
800 	if (ret)
801 		goto err_fbtfm_init;
802 
803 	return 0;
804 
805 err_fbtfm_init:
806 	sec_cipher_uninit(ctx);
807 err_cipher_init:
808 	sec_ctx_base_uninit(ctx);
809 	return ret;
810 }
811 
sec_skcipher_uninit(struct crypto_skcipher * tfm)812 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
813 {
814 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
815 
816 	if (ctx->c_ctx.fbtfm)
817 		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
818 
819 	sec_cipher_uninit(ctx);
820 	sec_ctx_base_uninit(ctx);
821 }
822 
sec_skcipher_3des_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen)823 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
824 {
825 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
826 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
827 	int ret;
828 
829 	ret = verify_skcipher_des3_key(tfm, key);
830 	if (ret)
831 		return ret;
832 
833 	switch (keylen) {
834 	case SEC_DES3_2KEY_SIZE:
835 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
836 		break;
837 	case SEC_DES3_3KEY_SIZE:
838 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
839 		break;
840 	default:
841 		return -EINVAL;
842 	}
843 
844 	return 0;
845 }
846 
sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx * c_ctx,const u32 keylen,const enum sec_cmode c_mode)847 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
848 				       const u32 keylen,
849 				       const enum sec_cmode c_mode)
850 {
851 	if (c_mode == SEC_CMODE_XTS) {
852 		switch (keylen) {
853 		case SEC_XTS_MIN_KEY_SIZE:
854 			c_ctx->c_key_len = SEC_CKEY_128BIT;
855 			break;
856 		case SEC_XTS_MID_KEY_SIZE:
857 			c_ctx->fallback = true;
858 			break;
859 		case SEC_XTS_MAX_KEY_SIZE:
860 			c_ctx->c_key_len = SEC_CKEY_256BIT;
861 			break;
862 		default:
863 			pr_err("hisi_sec2: xts mode key error!\n");
864 			return -EINVAL;
865 		}
866 	} else {
867 		if (c_ctx->c_alg == SEC_CALG_SM4 &&
868 		    keylen != AES_KEYSIZE_128) {
869 			pr_err("hisi_sec2: sm4 key error!\n");
870 			return -EINVAL;
871 		} else {
872 			switch (keylen) {
873 			case AES_KEYSIZE_128:
874 				c_ctx->c_key_len = SEC_CKEY_128BIT;
875 				break;
876 			case AES_KEYSIZE_192:
877 				c_ctx->c_key_len = SEC_CKEY_192BIT;
878 				break;
879 			case AES_KEYSIZE_256:
880 				c_ctx->c_key_len = SEC_CKEY_256BIT;
881 				break;
882 			default:
883 				pr_err("hisi_sec2: aes key error!\n");
884 				return -EINVAL;
885 			}
886 		}
887 	}
888 
889 	return 0;
890 }
891 
sec_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_calg c_alg,const enum sec_cmode c_mode)892 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
893 			       const u32 keylen, const enum sec_calg c_alg,
894 			       const enum sec_cmode c_mode)
895 {
896 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
897 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
898 	struct device *dev = ctx->dev;
899 	int ret;
900 
901 	if (c_mode == SEC_CMODE_XTS) {
902 		ret = xts_verify_key(tfm, key, keylen);
903 		if (ret) {
904 			dev_err(dev, "xts mode key err!\n");
905 			return ret;
906 		}
907 	}
908 
909 	c_ctx->c_alg  = c_alg;
910 	c_ctx->c_mode = c_mode;
911 
912 	switch (c_alg) {
913 	case SEC_CALG_3DES:
914 		ret = sec_skcipher_3des_setkey(tfm, key, keylen);
915 		break;
916 	case SEC_CALG_AES:
917 	case SEC_CALG_SM4:
918 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
919 		break;
920 	default:
921 		dev_err(dev, "sec c_alg err!\n");
922 		return -EINVAL;
923 	}
924 
925 	if (ret) {
926 		dev_err(dev, "set sec key err!\n");
927 		return ret;
928 	}
929 
930 	memcpy(c_ctx->c_key, key, keylen);
931 	if (c_ctx->fbtfm) {
932 		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
933 		if (ret) {
934 			dev_err(dev, "failed to set fallback skcipher key!\n");
935 			return ret;
936 		}
937 	}
938 	return 0;
939 }
940 
941 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
942 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
943 	u32 keylen)							\
944 {									\
945 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
946 }
947 
GEN_SEC_SETKEY_FUNC(aes_ecb,SEC_CALG_AES,SEC_CMODE_ECB)948 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
949 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
950 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
951 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
952 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
953 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
954 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
955 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
956 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
957 
958 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
959 			struct scatterlist *src)
960 {
961 	struct aead_request *aead_req = req->aead_req.aead_req;
962 	struct sec_cipher_req *c_req = &req->c_req;
963 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
964 	struct sec_request_buf *buf = &req->buf;
965 	struct device *dev = ctx->dev;
966 	int copy_size, pbuf_length;
967 	int req_id = req->req_id;
968 	struct crypto_aead *tfm;
969 	u8 *mac_offset, *pbuf;
970 	size_t authsize;
971 
972 	if (ctx->alg_type == SEC_AEAD)
973 		copy_size = aead_req->cryptlen + aead_req->assoclen;
974 	else
975 		copy_size = c_req->c_len;
976 
977 
978 	pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
979 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
980 	if (unlikely(pbuf_length != copy_size)) {
981 		dev_err(dev, "copy src data to pbuf error!\n");
982 		return -EINVAL;
983 	}
984 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
985 		tfm = crypto_aead_reqtfm(aead_req);
986 		authsize = crypto_aead_authsize(tfm);
987 		mac_offset = pbuf + copy_size - authsize;
988 		memcpy(req->aead_req.out_mac, mac_offset, authsize);
989 	}
990 
991 	if (req->req_id < 0) {
992 		buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
993 		if (unlikely(dma_mapping_error(dev, buf->in_dma)))
994 			return -ENOMEM;
995 
996 		buf->out_dma = buf->in_dma;
997 		return 0;
998 	}
999 
1000 	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
1001 	c_req->c_out_dma = req->in_dma;
1002 
1003 	return 0;
1004 }
1005 
sec_cipher_pbuf_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * dst)1006 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
1007 			struct scatterlist *dst)
1008 {
1009 	struct aead_request *aead_req = req->aead_req.aead_req;
1010 	struct sec_cipher_req *c_req = &req->c_req;
1011 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1012 	struct sec_request_buf *buf = &req->buf;
1013 	int copy_size, pbuf_length;
1014 	int req_id = req->req_id;
1015 
1016 	if (ctx->alg_type == SEC_AEAD)
1017 		copy_size = c_req->c_len + aead_req->assoclen;
1018 	else
1019 		copy_size = c_req->c_len;
1020 
1021 	if (req->req_id < 0)
1022 		pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
1023 	else
1024 		pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
1025 						  copy_size);
1026 	if (unlikely(pbuf_length != copy_size))
1027 		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
1028 
1029 	if (req->req_id < 0)
1030 		dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
1031 }
1032 
sec_aead_mac_init(struct sec_aead_req * req)1033 static int sec_aead_mac_init(struct sec_aead_req *req)
1034 {
1035 	struct aead_request *aead_req = req->aead_req;
1036 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1037 	size_t authsize = crypto_aead_authsize(tfm);
1038 	struct scatterlist *sgl = aead_req->src;
1039 	u8 *mac_out = req->out_mac;
1040 	size_t copy_size;
1041 	off_t skip_size;
1042 
1043 	/* Copy input mac */
1044 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
1045 	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
1046 	if (unlikely(copy_size != authsize))
1047 		return -EINVAL;
1048 
1049 	return 0;
1050 }
1051 
fill_sg_to_hw_sge(struct scatterlist * sgl,struct sec_hw_sge * hw_sge)1052 static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
1053 {
1054 	hw_sge->buf = sg_dma_address(sgl);
1055 	hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
1056 	hw_sge->page_ctrl = sg_virt(sgl);
1057 }
1058 
sec_cipher_to_hw_sgl(struct device * dev,struct scatterlist * src,struct sec_hw_sgl * src_in,dma_addr_t * hw_sgl_dma,int dma_dir)1059 static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
1060 				struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
1061 				int dma_dir)
1062 {
1063 	struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
1064 	u32 i, sg_n, sg_n_mapped;
1065 	struct scatterlist *sg;
1066 	u32 sge_var = 0;
1067 
1068 	sg_n = sg_nents(src);
1069 	sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
1070 	if (unlikely(!sg_n_mapped)) {
1071 		dev_err(dev, "dma mapping for SG error!\n");
1072 		return -EINVAL;
1073 	} else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
1074 		dev_err(dev, "the number of entries in input scatterlist error!\n");
1075 		dma_unmap_sg(dev, src, sg_n, dma_dir);
1076 		return -EINVAL;
1077 	}
1078 
1079 	for_each_sg(src, sg, sg_n_mapped, i) {
1080 		fill_sg_to_hw_sge(sg, curr_hw_sge);
1081 		curr_hw_sge++;
1082 		sge_var++;
1083 	}
1084 
1085 	src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
1086 	src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
1087 	src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
1088 	*hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
1089 	if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
1090 		dma_unmap_sg(dev, src, sg_n, dma_dir);
1091 		return -ENOMEM;
1092 	}
1093 
1094 	return 0;
1095 }
1096 
sec_cipher_put_hw_sgl(struct device * dev,struct scatterlist * src,dma_addr_t src_in,int dma_dir)1097 static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
1098 				  dma_addr_t src_in, int dma_dir)
1099 {
1100 	dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
1101 	dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
1102 }
1103 
sec_cipher_map_sgl(struct device * dev,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)1104 static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
1105 			      struct scatterlist *src, struct scatterlist *dst)
1106 {
1107 	struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
1108 	struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
1109 	int ret;
1110 
1111 	if (dst == src) {
1112 		ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
1113 					    DMA_BIDIRECTIONAL);
1114 		req->buf.out_dma = req->buf.in_dma;
1115 		return ret;
1116 	}
1117 
1118 	ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
1119 	if (unlikely(ret))
1120 		return ret;
1121 
1122 	ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
1123 				   DMA_FROM_DEVICE);
1124 	if (unlikely(ret)) {
1125 		sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1126 		return ret;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
sec_cipher_map_inner(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)1132 static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
1133 				struct scatterlist *src, struct scatterlist *dst)
1134 {
1135 	struct sec_cipher_req *c_req = &req->c_req;
1136 	struct sec_aead_req *a_req = &req->aead_req;
1137 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1138 	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
1139 	struct device *dev = ctx->dev;
1140 	enum dma_data_direction src_direction;
1141 	int ret;
1142 
1143 	if (req->use_pbuf) {
1144 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
1145 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
1146 		if (ctx->alg_type == SEC_AEAD) {
1147 			a_req->a_ivin = res->a_ivin;
1148 			a_req->a_ivin_dma = res->a_ivin_dma;
1149 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
1150 			a_req->out_mac_dma = res->pbuf_dma +
1151 					SEC_PBUF_MAC_OFFSET;
1152 		}
1153 		return sec_cipher_pbuf_map(ctx, req, src);
1154 	}
1155 
1156 	c_req->c_ivin = res->c_ivin;
1157 	c_req->c_ivin_dma = res->c_ivin_dma;
1158 	if (ctx->alg_type == SEC_AEAD) {
1159 		a_req->a_ivin = res->a_ivin;
1160 		a_req->a_ivin_dma = res->a_ivin_dma;
1161 		a_req->out_mac = res->out_mac;
1162 		a_req->out_mac_dma = res->out_mac_dma;
1163 	}
1164 
1165 	src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1166 	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
1167 						qp_ctx->c_in_pool,
1168 						req->req_id,
1169 						&req->in_dma, src_direction);
1170 	if (IS_ERR(req->in)) {
1171 		dev_err(dev, "fail to dma map input sgl buffers!\n");
1172 		return PTR_ERR(req->in);
1173 	}
1174 
1175 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
1176 		ret = sec_aead_mac_init(a_req);
1177 		if (unlikely(ret)) {
1178 			dev_err(dev, "fail to init mac data for ICV!\n");
1179 			hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1180 			return ret;
1181 		}
1182 	}
1183 
1184 	if (dst == src) {
1185 		c_req->c_out = req->in;
1186 		c_req->c_out_dma = req->in_dma;
1187 	} else {
1188 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
1189 							     qp_ctx->c_out_pool,
1190 							     req->req_id,
1191 							     &c_req->c_out_dma,
1192 							     DMA_FROM_DEVICE);
1193 
1194 		if (IS_ERR(c_req->c_out)) {
1195 			dev_err(dev, "fail to dma map output sgl buffers!\n");
1196 			hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1197 			return PTR_ERR(c_req->c_out);
1198 		}
1199 	}
1200 
1201 	return 0;
1202 }
1203 
sec_cipher_map(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)1204 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
1205 			  struct scatterlist *src, struct scatterlist *dst)
1206 {
1207 	struct sec_aead_req *a_req = &req->aead_req;
1208 	struct sec_cipher_req *c_req = &req->c_req;
1209 	bool is_aead = (ctx->alg_type == SEC_AEAD);
1210 	struct device *dev = ctx->dev;
1211 	int ret = -ENOMEM;
1212 
1213 	if (req->req_id >= 0)
1214 		return sec_cipher_map_inner(ctx, req, src, dst);
1215 
1216 	c_req->c_ivin = c_req->c_ivin_buf;
1217 	c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
1218 					   SEC_IV_SIZE, DMA_TO_DEVICE);
1219 	if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
1220 		return -ENOMEM;
1221 
1222 	if (is_aead) {
1223 		a_req->a_ivin = a_req->a_ivin_buf;
1224 		a_req->out_mac = a_req->out_mac_buf;
1225 		a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
1226 						   SEC_IV_SIZE, DMA_TO_DEVICE);
1227 		if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
1228 			goto free_c_ivin_dma;
1229 
1230 		a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
1231 						    SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1232 		if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
1233 			goto free_a_ivin_dma;
1234 	}
1235 	if (req->use_pbuf) {
1236 		ret = sec_cipher_pbuf_map(ctx, req, src);
1237 		if (unlikely(ret))
1238 			goto free_out_mac_dma;
1239 
1240 		return 0;
1241 	}
1242 
1243 	if (!c_req->encrypt && is_aead) {
1244 		ret = sec_aead_mac_init(a_req);
1245 		if (unlikely(ret)) {
1246 			dev_err(dev, "fail to init mac data for ICV!\n");
1247 			goto free_out_mac_dma;
1248 		}
1249 	}
1250 
1251 	ret = sec_cipher_map_sgl(dev, req, src, dst);
1252 	if (unlikely(ret)) {
1253 		dev_err(dev, "fail to dma map input sgl buffers!\n");
1254 		goto free_out_mac_dma;
1255 	}
1256 
1257 	return 0;
1258 
1259 free_out_mac_dma:
1260 	if (is_aead)
1261 		dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1262 free_a_ivin_dma:
1263 	if (is_aead)
1264 		dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1265 free_c_ivin_dma:
1266 	dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1267 	return ret;
1268 }
1269 
sec_cipher_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)1270 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1271 			     struct scatterlist *src, struct scatterlist *dst)
1272 {
1273 	struct sec_aead_req *a_req = &req->aead_req;
1274 	struct sec_cipher_req *c_req = &req->c_req;
1275 	struct device *dev = ctx->dev;
1276 
1277 	if (req->req_id >= 0) {
1278 		if (req->use_pbuf) {
1279 			sec_cipher_pbuf_unmap(ctx, req, dst);
1280 		} else {
1281 			if (dst != src) {
1282 				hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
1283 				hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
1284 			} else {
1285 				hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
1286 			}
1287 		}
1288 		return;
1289 	}
1290 
1291 	if (req->use_pbuf) {
1292 		sec_cipher_pbuf_unmap(ctx, req, dst);
1293 	} else {
1294 		if (dst != src) {
1295 			sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
1296 			sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1297 		} else {
1298 			sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
1299 		}
1300 	}
1301 
1302 	dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1303 	if (ctx->alg_type == SEC_AEAD) {
1304 		dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1305 		dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1306 	}
1307 }
1308 
sec_skcipher_sgl_map(struct sec_ctx * ctx,struct sec_req * req)1309 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1310 {
1311 	struct skcipher_request *sq = req->c_req.sk_req;
1312 
1313 	return sec_cipher_map(ctx, req, sq->src, sq->dst);
1314 }
1315 
sec_skcipher_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1316 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1317 {
1318 	struct skcipher_request *sq = req->c_req.sk_req;
1319 
1320 	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1321 }
1322 
sec_aead_aes_set_key(struct sec_cipher_ctx * c_ctx,struct crypto_authenc_keys * keys)1323 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1324 				struct crypto_authenc_keys *keys)
1325 {
1326 	switch (keys->enckeylen) {
1327 	case AES_KEYSIZE_128:
1328 		c_ctx->c_key_len = SEC_CKEY_128BIT;
1329 		break;
1330 	case AES_KEYSIZE_192:
1331 		c_ctx->c_key_len = SEC_CKEY_192BIT;
1332 		break;
1333 	case AES_KEYSIZE_256:
1334 		c_ctx->c_key_len = SEC_CKEY_256BIT;
1335 		break;
1336 	default:
1337 		pr_err("hisi_sec2: aead aes key error!\n");
1338 		return -EINVAL;
1339 	}
1340 	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1341 
1342 	return 0;
1343 }
1344 
sec_aead_auth_set_key(struct sec_auth_ctx * ctx,struct crypto_authenc_keys * keys)1345 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1346 				 struct crypto_authenc_keys *keys)
1347 {
1348 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
1349 	int blocksize, digestsize, ret;
1350 
1351 	blocksize = crypto_shash_blocksize(hash_tfm);
1352 	digestsize = crypto_shash_digestsize(hash_tfm);
1353 	if (keys->authkeylen > blocksize) {
1354 		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1355 					      keys->authkeylen, ctx->a_key);
1356 		if (ret) {
1357 			pr_err("hisi_sec2: aead auth digest error!\n");
1358 			return -EINVAL;
1359 		}
1360 		ctx->a_key_len = digestsize;
1361 	} else {
1362 		if (keys->authkeylen)
1363 			memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1364 		ctx->a_key_len = keys->authkeylen;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
sec_aead_setauthsize(struct crypto_aead * aead,unsigned int authsize)1370 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1371 {
1372 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1373 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1374 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1375 
1376 	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1377 }
1378 
sec_aead_fallback_setkey(struct sec_auth_ctx * a_ctx,struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1379 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1380 				    struct crypto_aead *tfm, const u8 *key,
1381 				    unsigned int keylen)
1382 {
1383 	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1384 	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1385 			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1386 	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1387 }
1388 
sec_aead_setkey(struct crypto_aead * tfm,const u8 * key,const u32 keylen,const enum sec_hash_alg a_alg,const enum sec_calg c_alg,const enum sec_cmode c_mode)1389 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1390 			   const u32 keylen, const enum sec_hash_alg a_alg,
1391 			   const enum sec_calg c_alg,
1392 			   const enum sec_cmode c_mode)
1393 {
1394 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1395 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1396 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1397 	struct device *dev = ctx->dev;
1398 	struct crypto_authenc_keys keys;
1399 	int ret;
1400 
1401 	ctx->a_ctx.a_alg = a_alg;
1402 	ctx->c_ctx.c_alg = c_alg;
1403 	c_ctx->c_mode = c_mode;
1404 
1405 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1406 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1407 		if (ret) {
1408 			dev_err(dev, "set sec aes ccm cipher key err!\n");
1409 			return ret;
1410 		}
1411 		memcpy(c_ctx->c_key, key, keylen);
1412 
1413 		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1414 	}
1415 
1416 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
1417 	if (ret) {
1418 		dev_err(dev, "sec extract aead keys err!\n");
1419 		goto bad_key;
1420 	}
1421 
1422 	ret = sec_aead_aes_set_key(c_ctx, &keys);
1423 	if (ret) {
1424 		dev_err(dev, "set sec cipher key err!\n");
1425 		goto bad_key;
1426 	}
1427 
1428 	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1429 	if (ret) {
1430 		dev_err(dev, "set sec auth key err!\n");
1431 		goto bad_key;
1432 	}
1433 
1434 	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1435 	if (ret) {
1436 		dev_err(dev, "set sec fallback key err!\n");
1437 		goto bad_key;
1438 	}
1439 
1440 	return 0;
1441 
1442 bad_key:
1443 	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1444 	return ret;
1445 }
1446 
1447 
1448 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
1449 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
1450 {											\
1451 	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
1452 }
1453 
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1,SEC_A_HMAC_SHA1,SEC_CALG_AES,SEC_CMODE_CBC)1454 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
1455 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
1456 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
1457 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
1458 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
1459 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
1460 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
1461 
1462 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1463 {
1464 	struct aead_request *aq = req->aead_req.aead_req;
1465 
1466 	return sec_cipher_map(ctx, req, aq->src, aq->dst);
1467 }
1468 
sec_aead_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1469 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1470 {
1471 	struct aead_request *aq = req->aead_req.aead_req;
1472 
1473 	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1474 }
1475 
sec_request_transfer(struct sec_ctx * ctx,struct sec_req * req)1476 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1477 {
1478 	int ret;
1479 
1480 	ret = ctx->req_op->buf_map(ctx, req);
1481 	if (unlikely(ret))
1482 		return ret;
1483 
1484 	ctx->req_op->do_transfer(ctx, req);
1485 
1486 	ret = ctx->req_op->bd_fill(ctx, req);
1487 	if (unlikely(ret))
1488 		goto unmap_req_buf;
1489 
1490 	return ret;
1491 
1492 unmap_req_buf:
1493 	ctx->req_op->buf_unmap(ctx, req);
1494 	return ret;
1495 }
1496 
sec_request_untransfer(struct sec_ctx * ctx,struct sec_req * req)1497 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1498 {
1499 	ctx->req_op->buf_unmap(ctx, req);
1500 }
1501 
sec_skcipher_copy_iv(struct sec_ctx * ctx,struct sec_req * req)1502 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1503 {
1504 	struct skcipher_request *sk_req = req->c_req.sk_req;
1505 	struct sec_cipher_req *c_req = &req->c_req;
1506 
1507 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1508 }
1509 
sec_skcipher_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1510 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1511 {
1512 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1513 	struct sec_cipher_req *c_req = &req->c_req;
1514 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1515 	u8 scene, sa_type, da_type;
1516 	u8 bd_type, cipher;
1517 	u8 de = 0;
1518 
1519 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
1520 
1521 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1522 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1523 	if (req->req_id < 0) {
1524 		sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
1525 		sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
1526 	} else {
1527 		sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1528 		sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1529 	}
1530 	if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
1531 		de = 0x1 << SEC_DE_OFFSET;
1532 
1533 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1534 						SEC_CMODE_OFFSET);
1535 	sec_sqe->type2.c_alg = c_ctx->c_alg;
1536 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1537 						SEC_CKEY_OFFSET);
1538 
1539 	bd_type = SEC_BD_TYPE2;
1540 	if (c_req->encrypt)
1541 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1542 	else
1543 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1544 	sec_sqe->type_cipher_auth = bd_type | cipher;
1545 
1546 	/* Set destination and source address type */
1547 	if (req->use_pbuf) {
1548 		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1549 		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1550 	} else {
1551 		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1552 		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1553 	}
1554 
1555 	sec_sqe->sdm_addr_type |= da_type;
1556 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1557 
1558 	sec_sqe->sds_sa_type = (de | scene | sa_type);
1559 
1560 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1561 
1562 	return 0;
1563 }
1564 
sec_skcipher_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1565 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1566 {
1567 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1568 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1569 	struct sec_cipher_req *c_req = &req->c_req;
1570 	u32 bd_param = 0;
1571 	u16 cipher;
1572 
1573 	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1574 
1575 	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1576 	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1577 	if (req->req_id < 0) {
1578 		sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
1579 		sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
1580 	} else {
1581 		sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1582 		sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1583 	}
1584 	if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
1585 		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1586 
1587 	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1588 						c_ctx->c_mode;
1589 	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1590 						SEC_CKEY_OFFSET_V3);
1591 
1592 	if (c_req->encrypt)
1593 		cipher = SEC_CIPHER_ENC;
1594 	else
1595 		cipher = SEC_CIPHER_DEC;
1596 	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1597 
1598 	/* Set the CTR counter mode is 128bit rollover */
1599 	sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
1600 					SEC_CTR_CNT_OFFSET);
1601 
1602 	if (req->use_pbuf) {
1603 		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1604 		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1605 	} else {
1606 		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1607 		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1608 	}
1609 
1610 	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1611 
1612 	bd_param |= SEC_BD_TYPE3;
1613 	sec_sqe3->bd_param = cpu_to_le32(bd_param);
1614 
1615 	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1616 	sec_sqe3->tag = cpu_to_le64((unsigned long)req);
1617 
1618 	return 0;
1619 }
1620 
1621 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)1622 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1623 {
1624 	do {
1625 		--bits;
1626 		nums += counter[bits];
1627 		counter[bits] = nums & BITS_MASK;
1628 		nums >>= BYTE_BITS;
1629 	} while (bits && nums);
1630 }
1631 
sec_update_iv(struct sec_req * req,enum sec_alg_type alg_type)1632 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1633 {
1634 	struct aead_request *aead_req = req->aead_req.aead_req;
1635 	struct skcipher_request *sk_req = req->c_req.sk_req;
1636 	u32 iv_size = req->ctx->c_ctx.ivsize;
1637 	struct scatterlist *sgl;
1638 	unsigned int cryptlen;
1639 	size_t sz;
1640 	u8 *iv;
1641 
1642 	if (alg_type == SEC_SKCIPHER) {
1643 		sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
1644 		iv = sk_req->iv;
1645 		cryptlen = sk_req->cryptlen;
1646 	} else {
1647 		sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
1648 		iv = aead_req->iv;
1649 		cryptlen = aead_req->cryptlen;
1650 	}
1651 
1652 	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1653 		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1654 					cryptlen - iv_size);
1655 		if (unlikely(sz != iv_size))
1656 			dev_err(req->ctx->dev, "copy output iv error!\n");
1657 	} else {
1658 		sz = (cryptlen + iv_size - 1) / iv_size;
1659 		ctr_iv_inc(iv, iv_size, sz);
1660 	}
1661 }
1662 
sec_skcipher_callback(struct sec_ctx * ctx,struct sec_req * req,int err)1663 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1664 				  int err)
1665 {
1666 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1667 
1668 	if (req->req_id >= 0)
1669 		sec_free_req_id(req);
1670 
1671 	/* IV output at encrypto of CBC/CTR mode */
1672 	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1673 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1674 		sec_update_iv(req, SEC_SKCIPHER);
1675 
1676 	crypto_request_complete(req->base, err);
1677 	sec_alg_send_backlog(ctx, qp_ctx);
1678 }
1679 
set_aead_auth_iv(struct sec_ctx * ctx,struct sec_req * req)1680 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1681 {
1682 	struct aead_request *aead_req = req->aead_req.aead_req;
1683 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1684 	size_t authsize = crypto_aead_authsize(tfm);
1685 	struct sec_aead_req *a_req = &req->aead_req;
1686 	struct sec_cipher_req *c_req = &req->c_req;
1687 	u32 data_size = aead_req->cryptlen;
1688 	u8 flage = 0;
1689 	u8 cm, cl;
1690 
1691 	/* the specification has been checked in aead_iv_demension_check() */
1692 	cl = c_req->c_ivin[0] + 1;
1693 	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1694 	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1695 	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1696 
1697 	/* the last 3bit is L' */
1698 	flage |= c_req->c_ivin[0] & IV_CL_MASK;
1699 
1700 	/* the M' is bit3~bit5, the Flags is bit6 */
1701 	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1702 	flage |= cm << IV_CM_OFFSET;
1703 	if (aead_req->assoclen)
1704 		flage |= 0x01 << IV_FLAGS_OFFSET;
1705 
1706 	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1707 	a_req->a_ivin[0] = flage;
1708 
1709 	/*
1710 	 * the last 32bit is counter's initial number,
1711 	 * but the nonce uses the first 16bit
1712 	 * the tail 16bit fill with the cipher length
1713 	 */
1714 	if (!c_req->encrypt)
1715 		data_size = aead_req->cryptlen - authsize;
1716 
1717 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1718 			data_size & IV_LAST_BYTE_MASK;
1719 	data_size >>= IV_BYTE_OFFSET;
1720 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1721 			data_size & IV_LAST_BYTE_MASK;
1722 }
1723 
sec_aead_set_iv(struct sec_ctx * ctx,struct sec_req * req)1724 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1725 {
1726 	struct aead_request *aead_req = req->aead_req.aead_req;
1727 	struct sec_aead_req *a_req = &req->aead_req;
1728 	struct sec_cipher_req *c_req = &req->c_req;
1729 
1730 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1731 
1732 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1733 		/*
1734 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1735 		 * the  counter must set to 0x01
1736 		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
1737 		 */
1738 		set_aead_auth_iv(ctx, req);
1739 	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1740 		/* GCM 12Byte Cipher_IV == Auth_IV */
1741 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1742 	}
1743 }
1744 
sec_auth_bd_fill_xcm(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1745 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1746 				 struct sec_req *req, struct sec_sqe *sec_sqe)
1747 {
1748 	struct sec_aead_req *a_req = &req->aead_req;
1749 	struct aead_request *aq = a_req->aead_req;
1750 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1751 	size_t authsize = crypto_aead_authsize(tfm);
1752 
1753 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1754 	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
1755 
1756 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1757 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1758 	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1759 	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1760 
1761 	if (dir)
1762 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1763 	else
1764 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1765 
1766 	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1767 	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1768 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1769 
1770 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1771 }
1772 
sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1773 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1774 				    struct sec_req *req, struct sec_sqe3 *sqe3)
1775 {
1776 	struct sec_aead_req *a_req = &req->aead_req;
1777 	struct aead_request *aq = a_req->aead_req;
1778 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1779 	size_t authsize = crypto_aead_authsize(tfm);
1780 
1781 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1782 	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
1783 
1784 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1785 	sqe3->a_key_addr = sqe3->c_key_addr;
1786 	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1787 	sqe3->auth_mac_key |= SEC_NO_AUTH;
1788 
1789 	if (dir)
1790 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1791 	else
1792 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1793 
1794 	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1795 	sqe3->auth_src_offset = cpu_to_le16(0x0);
1796 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1797 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1798 }
1799 
sec_auth_bd_fill_ex(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1800 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1801 			       struct sec_req *req, struct sec_sqe *sec_sqe)
1802 {
1803 	struct sec_aead_req *a_req = &req->aead_req;
1804 	struct sec_cipher_req *c_req = &req->c_req;
1805 	struct aead_request *aq = a_req->aead_req;
1806 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1807 	size_t authsize = crypto_aead_authsize(tfm);
1808 
1809 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1810 
1811 	sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
1812 
1813 	sec_sqe->type2.mac_key_alg |=
1814 			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
1815 
1816 	sec_sqe->type2.mac_key_alg |=
1817 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1818 
1819 	if (dir) {
1820 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1821 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1822 	} else {
1823 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1824 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1825 	}
1826 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1827 
1828 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1829 
1830 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1831 }
1832 
sec_aead_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1833 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1834 {
1835 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1836 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1837 	int ret;
1838 
1839 	ret = sec_skcipher_bd_fill(ctx, req);
1840 	if (unlikely(ret)) {
1841 		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1842 		return ret;
1843 	}
1844 
1845 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1846 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1847 		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1848 	else
1849 		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1850 
1851 	return 0;
1852 }
1853 
sec_auth_bd_fill_ex_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1854 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1855 				   struct sec_req *req, struct sec_sqe3 *sqe3)
1856 {
1857 	struct sec_aead_req *a_req = &req->aead_req;
1858 	struct sec_cipher_req *c_req = &req->c_req;
1859 	struct aead_request *aq = a_req->aead_req;
1860 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1861 	size_t authsize = crypto_aead_authsize(tfm);
1862 
1863 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1864 
1865 	sqe3->auth_mac_key |=
1866 			cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
1867 
1868 	sqe3->auth_mac_key |=
1869 			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
1870 
1871 	sqe3->auth_mac_key |=
1872 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1873 
1874 	if (dir) {
1875 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1876 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1877 	} else {
1878 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
1879 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1880 	}
1881 	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1882 
1883 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1884 
1885 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1886 }
1887 
sec_aead_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1888 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1889 {
1890 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1891 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1892 	int ret;
1893 
1894 	ret = sec_skcipher_bd_fill_v3(ctx, req);
1895 	if (unlikely(ret)) {
1896 		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1897 		return ret;
1898 	}
1899 
1900 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1901 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1902 		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1903 					req, sec_sqe3);
1904 	else
1905 		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1906 				       req, sec_sqe3);
1907 
1908 	return 0;
1909 }
1910 
sec_aead_callback(struct sec_ctx * c,struct sec_req * req,int err)1911 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1912 {
1913 	struct aead_request *a_req = req->aead_req.aead_req;
1914 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1915 	size_t authsize = crypto_aead_authsize(tfm);
1916 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1917 	size_t sz;
1918 
1919 	if (!err && req->c_req.encrypt) {
1920 		if (c->c_ctx.c_mode == SEC_CMODE_CBC)
1921 			sec_update_iv(req, SEC_AEAD);
1922 
1923 		sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
1924 					  authsize, a_req->cryptlen + a_req->assoclen);
1925 		if (unlikely(sz != authsize)) {
1926 			dev_err(c->dev, "copy out mac err!\n");
1927 			err = -EINVAL;
1928 		}
1929 	}
1930 
1931 	if (req->req_id >= 0)
1932 		sec_free_req_id(req);
1933 
1934 	crypto_request_complete(req->base, err);
1935 	sec_alg_send_backlog(c, qp_ctx);
1936 }
1937 
sec_request_uninit(struct sec_req * req)1938 static void sec_request_uninit(struct sec_req *req)
1939 {
1940 	if (req->req_id >= 0)
1941 		sec_free_req_id(req);
1942 }
1943 
sec_request_init(struct sec_ctx * ctx,struct sec_req * req)1944 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1945 {
1946 	struct sec_qp_ctx *qp_ctx;
1947 	int i;
1948 
1949 	for (i = 0; i < ctx->sec->ctx_q_num; i++) {
1950 		qp_ctx = &ctx->qp_ctx[i];
1951 		req->req_id = sec_alloc_req_id(req, qp_ctx);
1952 		if (req->req_id >= 0)
1953 			break;
1954 	}
1955 
1956 	req->qp_ctx = qp_ctx;
1957 	req->backlog = &qp_ctx->backlog;
1958 
1959 	return 0;
1960 }
1961 
sec_process(struct sec_ctx * ctx,struct sec_req * req)1962 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1963 {
1964 	int ret;
1965 
1966 	ret = sec_request_init(ctx, req);
1967 	if (unlikely(ret))
1968 		return ret;
1969 
1970 	ret = sec_request_transfer(ctx, req);
1971 	if (unlikely(ret))
1972 		goto err_uninit_req;
1973 
1974 	/* Output IV as decrypto */
1975 	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1976 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1977 		sec_update_iv(req, ctx->alg_type);
1978 
1979 	ret = ctx->req_op->bd_send(ctx, req);
1980 	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
1981 		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1982 		goto err_send_req;
1983 	}
1984 
1985 	return ret;
1986 
1987 err_send_req:
1988 	/* As failing, restore the IV from user */
1989 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1990 		if (ctx->alg_type == SEC_SKCIPHER)
1991 			memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
1992 			       ctx->c_ctx.ivsize);
1993 		else
1994 			memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
1995 			       ctx->c_ctx.ivsize);
1996 	}
1997 
1998 	sec_request_untransfer(ctx, req);
1999 
2000 err_uninit_req:
2001 	sec_request_uninit(req);
2002 	if (ctx->alg_type == SEC_AEAD)
2003 		ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
2004 					   req->c_req.encrypt);
2005 	else
2006 		ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
2007 					       req->c_req.encrypt);
2008 	return ret;
2009 }
2010 
2011 static const struct sec_req_op sec_skcipher_req_ops = {
2012 	.buf_map	= sec_skcipher_sgl_map,
2013 	.buf_unmap	= sec_skcipher_sgl_unmap,
2014 	.do_transfer	= sec_skcipher_copy_iv,
2015 	.bd_fill	= sec_skcipher_bd_fill,
2016 	.bd_send	= sec_bd_send,
2017 	.callback	= sec_skcipher_callback,
2018 	.process	= sec_process,
2019 };
2020 
2021 static const struct sec_req_op sec_aead_req_ops = {
2022 	.buf_map	= sec_aead_sgl_map,
2023 	.buf_unmap	= sec_aead_sgl_unmap,
2024 	.do_transfer	= sec_aead_set_iv,
2025 	.bd_fill	= sec_aead_bd_fill,
2026 	.bd_send	= sec_bd_send,
2027 	.callback	= sec_aead_callback,
2028 	.process	= sec_process,
2029 };
2030 
2031 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
2032 	.buf_map	= sec_skcipher_sgl_map,
2033 	.buf_unmap	= sec_skcipher_sgl_unmap,
2034 	.do_transfer	= sec_skcipher_copy_iv,
2035 	.bd_fill	= sec_skcipher_bd_fill_v3,
2036 	.bd_send	= sec_bd_send,
2037 	.callback	= sec_skcipher_callback,
2038 	.process	= sec_process,
2039 };
2040 
2041 static const struct sec_req_op sec_aead_req_ops_v3 = {
2042 	.buf_map	= sec_aead_sgl_map,
2043 	.buf_unmap	= sec_aead_sgl_unmap,
2044 	.do_transfer	= sec_aead_set_iv,
2045 	.bd_fill	= sec_aead_bd_fill_v3,
2046 	.bd_send	= sec_bd_send,
2047 	.callback	= sec_aead_callback,
2048 	.process	= sec_process,
2049 };
2050 
sec_skcipher_ctx_init(struct crypto_skcipher * tfm)2051 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
2052 {
2053 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2054 	int ret;
2055 
2056 	ret = sec_skcipher_init(tfm);
2057 	if (ret)
2058 		return ret;
2059 
2060 	if (ctx->sec->qm.ver < QM_HW_V3) {
2061 		ctx->type_supported = SEC_BD_TYPE2;
2062 		ctx->req_op = &sec_skcipher_req_ops;
2063 	} else {
2064 		ctx->type_supported = SEC_BD_TYPE3;
2065 		ctx->req_op = &sec_skcipher_req_ops_v3;
2066 	}
2067 
2068 	return ret;
2069 }
2070 
sec_skcipher_ctx_exit(struct crypto_skcipher * tfm)2071 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
2072 {
2073 	sec_skcipher_uninit(tfm);
2074 }
2075 
sec_aead_init(struct crypto_aead * tfm)2076 static int sec_aead_init(struct crypto_aead *tfm)
2077 {
2078 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2079 	int ret;
2080 
2081 	crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
2082 	ctx->alg_type = SEC_AEAD;
2083 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
2084 	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
2085 	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
2086 		pr_err("get error aead iv size!\n");
2087 		return -EINVAL;
2088 	}
2089 
2090 	ret = sec_ctx_base_init(ctx);
2091 	if (ret)
2092 		return ret;
2093 	if (ctx->sec->qm.ver < QM_HW_V3) {
2094 		ctx->type_supported = SEC_BD_TYPE2;
2095 		ctx->req_op = &sec_aead_req_ops;
2096 	} else {
2097 		ctx->type_supported = SEC_BD_TYPE3;
2098 		ctx->req_op = &sec_aead_req_ops_v3;
2099 	}
2100 
2101 	ret = sec_auth_init(ctx);
2102 	if (ret)
2103 		goto err_auth_init;
2104 
2105 	ret = sec_cipher_init(ctx);
2106 	if (ret)
2107 		goto err_cipher_init;
2108 
2109 	return ret;
2110 
2111 err_cipher_init:
2112 	sec_auth_uninit(ctx);
2113 err_auth_init:
2114 	sec_ctx_base_uninit(ctx);
2115 	return ret;
2116 }
2117 
sec_aead_exit(struct crypto_aead * tfm)2118 static void sec_aead_exit(struct crypto_aead *tfm)
2119 {
2120 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2121 
2122 	sec_cipher_uninit(ctx);
2123 	sec_auth_uninit(ctx);
2124 	sec_ctx_base_uninit(ctx);
2125 }
2126 
sec_aead_ctx_init(struct crypto_aead * tfm,const char * hash_name)2127 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
2128 {
2129 	struct aead_alg *alg = crypto_aead_alg(tfm);
2130 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2131 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2132 	const char *aead_name = alg->base.cra_name;
2133 	int ret;
2134 
2135 	ret = sec_aead_init(tfm);
2136 	if (ret) {
2137 		pr_err("hisi_sec2: aead init error!\n");
2138 		return ret;
2139 	}
2140 
2141 	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
2142 	if (IS_ERR(a_ctx->hash_tfm)) {
2143 		dev_err(ctx->dev, "aead alloc shash error!\n");
2144 		sec_aead_exit(tfm);
2145 		return PTR_ERR(a_ctx->hash_tfm);
2146 	}
2147 
2148 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
2149 						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
2150 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
2151 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
2152 		crypto_free_shash(ctx->a_ctx.hash_tfm);
2153 		sec_aead_exit(tfm);
2154 		return PTR_ERR(a_ctx->fallback_aead_tfm);
2155 	}
2156 
2157 	return 0;
2158 }
2159 
sec_aead_ctx_exit(struct crypto_aead * tfm)2160 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
2161 {
2162 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2163 
2164 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
2165 	crypto_free_shash(ctx->a_ctx.hash_tfm);
2166 	sec_aead_exit(tfm);
2167 }
2168 
sec_aead_xcm_ctx_init(struct crypto_aead * tfm)2169 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
2170 {
2171 	struct aead_alg *alg = crypto_aead_alg(tfm);
2172 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2173 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2174 	const char *aead_name = alg->base.cra_name;
2175 	int ret;
2176 
2177 	ret = sec_aead_init(tfm);
2178 	if (ret) {
2179 		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
2180 		return ret;
2181 	}
2182 
2183 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
2184 						     CRYPTO_ALG_NEED_FALLBACK |
2185 						     CRYPTO_ALG_ASYNC);
2186 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
2187 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
2188 		sec_aead_exit(tfm);
2189 		return PTR_ERR(a_ctx->fallback_aead_tfm);
2190 	}
2191 
2192 	return 0;
2193 }
2194 
sec_aead_xcm_ctx_exit(struct crypto_aead * tfm)2195 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
2196 {
2197 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2198 
2199 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
2200 	sec_aead_exit(tfm);
2201 }
2202 
sec_aead_sha1_ctx_init(struct crypto_aead * tfm)2203 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
2204 {
2205 	return sec_aead_ctx_init(tfm, "sha1");
2206 }
2207 
sec_aead_sha256_ctx_init(struct crypto_aead * tfm)2208 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
2209 {
2210 	return sec_aead_ctx_init(tfm, "sha256");
2211 }
2212 
sec_aead_sha512_ctx_init(struct crypto_aead * tfm)2213 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
2214 {
2215 	return sec_aead_ctx_init(tfm, "sha512");
2216 }
2217 
sec_skcipher_cryptlen_check(struct sec_ctx * ctx,struct sec_req * sreq)2218 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
2219 {
2220 	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
2221 	struct device *dev = ctx->dev;
2222 	u8 c_mode = ctx->c_ctx.c_mode;
2223 	int ret = 0;
2224 
2225 	switch (c_mode) {
2226 	case SEC_CMODE_XTS:
2227 		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
2228 			dev_err(dev, "skcipher XTS mode input length error!\n");
2229 			ret = -EINVAL;
2230 		}
2231 		break;
2232 	case SEC_CMODE_ECB:
2233 	case SEC_CMODE_CBC:
2234 		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
2235 			dev_err(dev, "skcipher AES input length error!\n");
2236 			ret = -EINVAL;
2237 		}
2238 		break;
2239 	case SEC_CMODE_CTR:
2240 		break;
2241 	default:
2242 		ret = -EINVAL;
2243 	}
2244 
2245 	return ret;
2246 }
2247 
sec_skcipher_param_check(struct sec_ctx * ctx,struct sec_req * sreq,bool * need_fallback)2248 static int sec_skcipher_param_check(struct sec_ctx *ctx,
2249 				    struct sec_req *sreq, bool *need_fallback)
2250 {
2251 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
2252 	struct device *dev = ctx->dev;
2253 	u8 c_alg = ctx->c_ctx.c_alg;
2254 
2255 	if (unlikely(!sk_req->src || !sk_req->dst)) {
2256 		dev_err(dev, "skcipher input param error!\n");
2257 		return -EINVAL;
2258 	}
2259 
2260 	if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
2261 		*need_fallback = true;
2262 
2263 	sreq->c_req.c_len = sk_req->cryptlen;
2264 
2265 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2266 		sreq->use_pbuf = true;
2267 	else
2268 		sreq->use_pbuf = false;
2269 
2270 	if (c_alg == SEC_CALG_3DES) {
2271 		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2272 			dev_err(dev, "skcipher 3des input length error!\n");
2273 			return -EINVAL;
2274 		}
2275 		return 0;
2276 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2277 		return sec_skcipher_cryptlen_check(ctx, sreq);
2278 	}
2279 
2280 	dev_err(dev, "skcipher algorithm error!\n");
2281 
2282 	return -EINVAL;
2283 }
2284 
sec_skcipher_soft_crypto(struct sec_ctx * ctx,struct skcipher_request * sreq,bool encrypt)2285 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2286 				    struct skcipher_request *sreq, bool encrypt)
2287 {
2288 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2289 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2290 	struct device *dev = ctx->dev;
2291 	int ret;
2292 
2293 	if (!c_ctx->fbtfm) {
2294 		dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
2295 		return -EINVAL;
2296 	}
2297 
2298 	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2299 
2300 	/* software need sync mode to do crypto */
2301 	skcipher_request_set_callback(subreq, sreq->base.flags,
2302 				      NULL, NULL);
2303 	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2304 				   sreq->cryptlen, sreq->iv);
2305 	if (encrypt)
2306 		ret = crypto_skcipher_encrypt(subreq);
2307 	else
2308 		ret = crypto_skcipher_decrypt(subreq);
2309 
2310 	skcipher_request_zero(subreq);
2311 
2312 	return ret;
2313 }
2314 
sec_skcipher_crypto(struct skcipher_request * sk_req,bool encrypt)2315 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2316 {
2317 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2318 	struct sec_req *req = skcipher_request_ctx_dma(sk_req);
2319 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2320 	bool need_fallback = false;
2321 	int ret;
2322 
2323 	if (!sk_req->cryptlen) {
2324 		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2325 			return -EINVAL;
2326 		return 0;
2327 	}
2328 
2329 	req->flag = sk_req->base.flags;
2330 	req->c_req.sk_req = sk_req;
2331 	req->c_req.encrypt = encrypt;
2332 	req->ctx = ctx;
2333 	req->base = &sk_req->base;
2334 
2335 	ret = sec_skcipher_param_check(ctx, req, &need_fallback);
2336 	if (unlikely(ret))
2337 		return -EINVAL;
2338 
2339 	if (unlikely(ctx->c_ctx.fallback || need_fallback))
2340 		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2341 
2342 	return ctx->req_op->process(ctx, req);
2343 }
2344 
sec_skcipher_encrypt(struct skcipher_request * sk_req)2345 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2346 {
2347 	return sec_skcipher_crypto(sk_req, true);
2348 }
2349 
sec_skcipher_decrypt(struct skcipher_request * sk_req)2350 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2351 {
2352 	return sec_skcipher_crypto(sk_req, false);
2353 }
2354 
2355 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
2356 	sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
2357 {\
2358 	.base = {\
2359 		.cra_name = sec_cra_name,\
2360 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2361 		.cra_priority = SEC_PRIORITY,\
2362 		.cra_flags = CRYPTO_ALG_ASYNC |\
2363 		 CRYPTO_ALG_NEED_FALLBACK,\
2364 		.cra_blocksize = blk_size,\
2365 		.cra_ctxsize = sizeof(struct sec_ctx),\
2366 		.cra_module = THIS_MODULE,\
2367 	},\
2368 	.init = sec_skcipher_ctx_init,\
2369 	.exit = sec_skcipher_ctx_exit,\
2370 	.setkey = sec_set_key,\
2371 	.decrypt = sec_skcipher_decrypt,\
2372 	.encrypt = sec_skcipher_encrypt,\
2373 	.min_keysize = sec_min_key_size,\
2374 	.max_keysize = sec_max_key_size,\
2375 	.ivsize = iv_size,\
2376 }
2377 
2378 static struct sec_skcipher sec_skciphers[] = {
2379 	{
2380 		.alg_msk = BIT(0),
2381 		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
2382 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
2383 	},
2384 	{
2385 		.alg_msk = BIT(1),
2386 		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
2387 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2388 	},
2389 	{
2390 		.alg_msk = BIT(2),
2391 		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
2392 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2393 	},
2394 	{
2395 		.alg_msk = BIT(3),
2396 		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
2397 					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2398 	},
2399 	{
2400 		.alg_msk = BIT(12),
2401 		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
2402 					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2403 	},
2404 	{
2405 		.alg_msk = BIT(13),
2406 		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
2407 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2408 	},
2409 	{
2410 		.alg_msk = BIT(14),
2411 		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
2412 					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2413 	},
2414 	{
2415 		.alg_msk = BIT(23),
2416 		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
2417 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
2418 	},
2419 	{
2420 		.alg_msk = BIT(24),
2421 		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
2422 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
2423 					DES3_EDE_BLOCK_SIZE),
2424 	},
2425 };
2426 
aead_iv_demension_check(struct aead_request * aead_req)2427 static int aead_iv_demension_check(struct aead_request *aead_req)
2428 {
2429 	u8 cl;
2430 
2431 	cl = aead_req->iv[0] + 1;
2432 	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2433 		return -EINVAL;
2434 
2435 	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2436 		return -EOVERFLOW;
2437 
2438 	return 0;
2439 }
2440 
sec_aead_spec_check(struct sec_ctx * ctx,struct sec_req * sreq)2441 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2442 {
2443 	struct aead_request *req = sreq->aead_req.aead_req;
2444 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2445 	size_t sz = crypto_aead_authsize(tfm);
2446 	u8 c_mode = ctx->c_ctx.c_mode;
2447 	int ret;
2448 
2449 	if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
2450 		return -EINVAL;
2451 
2452 	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2453 		     req->assoclen > SEC_MAX_AAD_LEN))
2454 		return -EINVAL;
2455 
2456 	if (c_mode == SEC_CMODE_CCM) {
2457 		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
2458 			return -EINVAL;
2459 
2460 		ret = aead_iv_demension_check(req);
2461 		if (unlikely(ret))
2462 			return -EINVAL;
2463 	} else if (c_mode == SEC_CMODE_CBC) {
2464 		if (unlikely(sz & WORD_MASK))
2465 			return -EINVAL;
2466 		if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
2467 			return -EINVAL;
2468 	} else if (c_mode == SEC_CMODE_GCM) {
2469 		if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
2470 			return -EINVAL;
2471 	}
2472 
2473 	return 0;
2474 }
2475 
sec_aead_param_check(struct sec_ctx * ctx,struct sec_req * sreq,bool * need_fallback)2476 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
2477 {
2478 	struct aead_request *req = sreq->aead_req.aead_req;
2479 	struct device *dev = ctx->dev;
2480 	u8 c_alg = ctx->c_ctx.c_alg;
2481 
2482 	if (unlikely(!req->src || !req->dst)) {
2483 		dev_err(dev, "aead input param error!\n");
2484 		return -EINVAL;
2485 	}
2486 
2487 	if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
2488 		     sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2489 		dev_err(dev, "aead cbc mode input data length error!\n");
2490 		return -EINVAL;
2491 	}
2492 
2493 	/* Support AES or SM4 */
2494 	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2495 		dev_err(dev, "aead crypto alg error!\n");
2496 		return -EINVAL;
2497 	}
2498 
2499 	if (unlikely(sec_aead_spec_check(ctx, sreq))) {
2500 		*need_fallback = true;
2501 		return -EINVAL;
2502 	}
2503 
2504 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2505 		SEC_PBUF_SZ)
2506 		sreq->use_pbuf = true;
2507 	else
2508 		sreq->use_pbuf = false;
2509 
2510 	return 0;
2511 }
2512 
sec_aead_soft_crypto(struct sec_ctx * ctx,struct aead_request * aead_req,bool encrypt)2513 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2514 				struct aead_request *aead_req,
2515 				bool encrypt)
2516 {
2517 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2518 	struct aead_request *subreq;
2519 	int ret;
2520 
2521 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2522 	if (!subreq)
2523 		return -ENOMEM;
2524 
2525 	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2526 	aead_request_set_callback(subreq, aead_req->base.flags,
2527 				  aead_req->base.complete, aead_req->base.data);
2528 	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2529 			       aead_req->cryptlen, aead_req->iv);
2530 	aead_request_set_ad(subreq, aead_req->assoclen);
2531 
2532 	if (encrypt)
2533 		ret = crypto_aead_encrypt(subreq);
2534 	else
2535 		ret = crypto_aead_decrypt(subreq);
2536 	aead_request_free(subreq);
2537 
2538 	return ret;
2539 }
2540 
sec_aead_crypto(struct aead_request * a_req,bool encrypt)2541 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2542 {
2543 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2544 	struct sec_req *req = aead_request_ctx_dma(a_req);
2545 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2546 	size_t sz = crypto_aead_authsize(tfm);
2547 	bool need_fallback = false;
2548 	int ret;
2549 
2550 	req->flag = a_req->base.flags;
2551 	req->aead_req.aead_req = a_req;
2552 	req->c_req.encrypt = encrypt;
2553 	req->ctx = ctx;
2554 	req->base = &a_req->base;
2555 	req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
2556 
2557 	ret = sec_aead_param_check(ctx, req, &need_fallback);
2558 	if (unlikely(ret)) {
2559 		if (need_fallback)
2560 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
2561 		return -EINVAL;
2562 	}
2563 
2564 	return ctx->req_op->process(ctx, req);
2565 }
2566 
sec_aead_encrypt(struct aead_request * a_req)2567 static int sec_aead_encrypt(struct aead_request *a_req)
2568 {
2569 	return sec_aead_crypto(a_req, true);
2570 }
2571 
sec_aead_decrypt(struct aead_request * a_req)2572 static int sec_aead_decrypt(struct aead_request *a_req)
2573 {
2574 	return sec_aead_crypto(a_req, false);
2575 }
2576 
2577 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2578 			 ctx_exit, blk_size, iv_size, max_authsize)\
2579 {\
2580 	.base = {\
2581 		.cra_name = sec_cra_name,\
2582 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2583 		.cra_priority = SEC_PRIORITY,\
2584 		.cra_flags = CRYPTO_ALG_ASYNC |\
2585 		 CRYPTO_ALG_NEED_FALLBACK,\
2586 		.cra_blocksize = blk_size,\
2587 		.cra_ctxsize = sizeof(struct sec_ctx),\
2588 		.cra_module = THIS_MODULE,\
2589 	},\
2590 	.init = ctx_init,\
2591 	.exit = ctx_exit,\
2592 	.setkey = sec_set_key,\
2593 	.setauthsize = sec_aead_setauthsize,\
2594 	.decrypt = sec_aead_decrypt,\
2595 	.encrypt = sec_aead_encrypt,\
2596 	.ivsize = iv_size,\
2597 	.maxauthsize = max_authsize,\
2598 }
2599 
2600 static struct sec_aead sec_aeads[] = {
2601 	{
2602 		.alg_msk = BIT(6),
2603 		.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2604 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2605 				    AES_BLOCK_SIZE),
2606 	},
2607 	{
2608 		.alg_msk = BIT(7),
2609 		.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2610 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2611 				    AES_BLOCK_SIZE),
2612 	},
2613 	{
2614 		.alg_msk = BIT(17),
2615 		.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2616 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2617 				    AES_BLOCK_SIZE),
2618 	},
2619 	{
2620 		.alg_msk = BIT(18),
2621 		.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2622 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2623 				    AES_BLOCK_SIZE),
2624 	},
2625 	{
2626 		.alg_msk = BIT(43),
2627 		.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
2628 				    sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2629 				    AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2630 	},
2631 	{
2632 		.alg_msk = BIT(44),
2633 		.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
2634 				    sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2635 				    AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2636 	},
2637 	{
2638 		.alg_msk = BIT(45),
2639 		.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
2640 				    sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2641 				    AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2642 	},
2643 };
2644 
sec_unregister_skcipher(u64 alg_mask,int end)2645 static void sec_unregister_skcipher(u64 alg_mask, int end)
2646 {
2647 	int i;
2648 
2649 	for (i = 0; i < end; i++)
2650 		if (sec_skciphers[i].alg_msk & alg_mask)
2651 			crypto_unregister_skcipher(&sec_skciphers[i].alg);
2652 }
2653 
sec_register_skcipher(u64 alg_mask)2654 static int sec_register_skcipher(u64 alg_mask)
2655 {
2656 	int i, ret, count;
2657 
2658 	count = ARRAY_SIZE(sec_skciphers);
2659 
2660 	for (i = 0; i < count; i++) {
2661 		if (!(sec_skciphers[i].alg_msk & alg_mask))
2662 			continue;
2663 
2664 		ret = crypto_register_skcipher(&sec_skciphers[i].alg);
2665 		if (ret)
2666 			goto err;
2667 	}
2668 
2669 	return 0;
2670 
2671 err:
2672 	sec_unregister_skcipher(alg_mask, i);
2673 
2674 	return ret;
2675 }
2676 
sec_unregister_aead(u64 alg_mask,int end)2677 static void sec_unregister_aead(u64 alg_mask, int end)
2678 {
2679 	int i;
2680 
2681 	for (i = 0; i < end; i++)
2682 		if (sec_aeads[i].alg_msk & alg_mask)
2683 			crypto_unregister_aead(&sec_aeads[i].alg);
2684 }
2685 
sec_register_aead(u64 alg_mask)2686 static int sec_register_aead(u64 alg_mask)
2687 {
2688 	int i, ret, count;
2689 
2690 	count = ARRAY_SIZE(sec_aeads);
2691 
2692 	for (i = 0; i < count; i++) {
2693 		if (!(sec_aeads[i].alg_msk & alg_mask))
2694 			continue;
2695 
2696 		ret = crypto_register_aead(&sec_aeads[i].alg);
2697 		if (ret)
2698 			goto err;
2699 	}
2700 
2701 	return 0;
2702 
2703 err:
2704 	sec_unregister_aead(alg_mask, i);
2705 
2706 	return ret;
2707 }
2708 
sec_register_to_crypto(struct hisi_qm * qm)2709 int sec_register_to_crypto(struct hisi_qm *qm)
2710 {
2711 	u64 alg_mask;
2712 	int ret = 0;
2713 
2714 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2715 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2716 
2717 	mutex_lock(&sec_algs_lock);
2718 	if (sec_available_devs) {
2719 		sec_available_devs++;
2720 		goto unlock;
2721 	}
2722 
2723 	ret = sec_register_skcipher(alg_mask);
2724 	if (ret)
2725 		goto unlock;
2726 
2727 	ret = sec_register_aead(alg_mask);
2728 	if (ret)
2729 		goto unreg_skcipher;
2730 
2731 	sec_available_devs++;
2732 	mutex_unlock(&sec_algs_lock);
2733 
2734 	return 0;
2735 
2736 unreg_skcipher:
2737 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2738 unlock:
2739 	mutex_unlock(&sec_algs_lock);
2740 	return ret;
2741 }
2742 
sec_unregister_from_crypto(struct hisi_qm * qm)2743 void sec_unregister_from_crypto(struct hisi_qm *qm)
2744 {
2745 	u64 alg_mask;
2746 
2747 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2748 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2749 
2750 	mutex_lock(&sec_algs_lock);
2751 	if (--sec_available_devs)
2752 		goto unlock;
2753 
2754 	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
2755 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2756 
2757 unlock:
2758 	mutex_unlock(&sec_algs_lock);
2759 }
2760