Lines Matching +full:input +full:- +full:depth

1 // SPDX-License-Identifier: GPL-2.0
17 #include <linux/dma-mapping.h>
62 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) argument
69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) argument
77 #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) argument
78 #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ argument
79 SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
80 #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ argument
81 SEC_PBUF_LEFT_SZ(depth))
120 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
123 if (req->c_req.encrypt) in sec_alloc_queue_id()
124 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % in sec_alloc_queue_id()
125 ctx->hlf_q_num; in sec_alloc_queue_id()
127 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + in sec_alloc_queue_id()
128 ctx->hlf_q_num; in sec_alloc_queue_id()
133 if (req->c_req.encrypt) in sec_free_queue_id()
134 atomic_dec(&ctx->enc_qcyclic); in sec_free_queue_id()
136 atomic_dec(&ctx->dec_qcyclic); in sec_free_queue_id()
143 spin_lock_bh(&qp_ctx->req_lock); in sec_alloc_req_id()
144 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); in sec_alloc_req_id()
145 spin_unlock_bh(&qp_ctx->req_lock); in sec_alloc_req_id()
147 dev_err(req->ctx->dev, "alloc req id fail!\n"); in sec_alloc_req_id()
151 req->qp_ctx = qp_ctx; in sec_alloc_req_id()
152 qp_ctx->req_list[req_id] = req; in sec_alloc_req_id()
159 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_free_req_id()
160 int req_id = req->req_id; in sec_free_req_id()
162 if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { in sec_free_req_id()
163 dev_err(req->ctx->dev, "free request id invalid!\n"); in sec_free_req_id()
167 qp_ctx->req_list[req_id] = NULL; in sec_free_req_id()
168 req->qp_ctx = NULL; in sec_free_req_id()
170 spin_lock_bh(&qp_ctx->req_lock); in sec_free_req_id()
171 idr_remove(&qp_ctx->req_idr, req_id); in sec_free_req_id()
172 spin_unlock_bh(&qp_ctx->req_lock); in sec_free_req_id()
179 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; in pre_parse_finished_bd()
180 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; in pre_parse_finished_bd()
181 status->flag = (le16_to_cpu(bd->type2.done_flag) & in pre_parse_finished_bd()
183 status->tag = le16_to_cpu(bd->type2.tag); in pre_parse_finished_bd()
184 status->err_type = bd->type2.error_type; in pre_parse_finished_bd()
186 return bd->type_cipher_auth & SEC_TYPE_MASK; in pre_parse_finished_bd()
193 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; in pre_parse_finished_bd3()
194 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; in pre_parse_finished_bd3()
195 status->flag = (le16_to_cpu(bd3->done_flag) & in pre_parse_finished_bd3()
197 status->tag = le64_to_cpu(bd3->tag); in pre_parse_finished_bd3()
198 status->err_type = bd3->error_type; in pre_parse_finished_bd3()
200 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; in pre_parse_finished_bd3()
206 struct sec_ctx *ctx = req->ctx; in sec_cb_status_check()
208 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { in sec_cb_status_check()
209 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", in sec_cb_status_check()
210 req->err_type, status->done); in sec_cb_status_check()
211 return -EIO; in sec_cb_status_check()
214 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { in sec_cb_status_check()
215 if (unlikely(status->flag != SEC_SQE_CFLAG)) { in sec_cb_status_check()
216 dev_err_ratelimited(ctx->dev, "flag[%u]\n", in sec_cb_status_check()
217 status->flag); in sec_cb_status_check()
218 return -EIO; in sec_cb_status_check()
220 } else if (unlikely(ctx->alg_type == SEC_AEAD)) { in sec_cb_status_check()
221 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || in sec_cb_status_check()
222 status->icv == SEC_ICV_ERR)) { in sec_cb_status_check()
223 dev_err_ratelimited(ctx->dev, in sec_cb_status_check()
225 status->flag, status->icv); in sec_cb_status_check()
226 return -EBADMSG; in sec_cb_status_check()
235 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; in sec_req_cb()
236 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; in sec_req_cb()
237 u8 type_supported = qp_ctx->ctx->type_supported; in sec_req_cb()
246 req = qp_ctx->req_list[status.tag]; in sec_req_cb()
253 atomic64_inc(&dfx->err_bd_cnt); in sec_req_cb()
259 atomic64_inc(&dfx->invalid_req_cnt); in sec_req_cb()
260 atomic_inc(&qp->qp_status.used); in sec_req_cb()
264 req->err_type = status.err_type; in sec_req_cb()
265 ctx = req->ctx; in sec_req_cb()
268 atomic64_inc(&dfx->done_flag_cnt); in sec_req_cb()
270 atomic64_inc(&dfx->recv_cnt); in sec_req_cb()
272 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb()
274 ctx->req_op->callback(ctx, req, err); in sec_req_cb()
279 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_bd_send()
282 if (ctx->fake_req_limit <= in sec_bd_send()
283 atomic_read(&qp_ctx->qp->qp_status.used) && in sec_bd_send()
284 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) in sec_bd_send()
285 return -EBUSY; in sec_bd_send()
287 spin_lock_bh(&qp_ctx->req_lock); in sec_bd_send()
288 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); in sec_bd_send()
289 if (ctx->fake_req_limit <= in sec_bd_send()
290 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { in sec_bd_send()
291 list_add_tail(&req->backlog_head, &qp_ctx->backlog); in sec_bd_send()
292 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); in sec_bd_send()
293 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); in sec_bd_send()
294 spin_unlock_bh(&qp_ctx->req_lock); in sec_bd_send()
295 return -EBUSY; in sec_bd_send()
297 spin_unlock_bh(&qp_ctx->req_lock); in sec_bd_send()
299 if (unlikely(ret == -EBUSY)) in sec_bd_send()
300 return -ENOBUFS; in sec_bd_send()
303 ret = -EINPROGRESS; in sec_bd_send()
304 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); in sec_bd_send()
313 u16 q_depth = res->depth; in sec_alloc_civ_resource()
316 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_civ_resource()
317 &res->c_ivin_dma, GFP_KERNEL); in sec_alloc_civ_resource()
318 if (!res->c_ivin) in sec_alloc_civ_resource()
319 return -ENOMEM; in sec_alloc_civ_resource()
322 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; in sec_alloc_civ_resource()
323 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; in sec_alloc_civ_resource()
331 if (res->c_ivin) in sec_free_civ_resource()
332 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), in sec_free_civ_resource()
333 res->c_ivin, res->c_ivin_dma); in sec_free_civ_resource()
338 u16 q_depth = res->depth; in sec_alloc_aiv_resource()
341 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_aiv_resource()
342 &res->a_ivin_dma, GFP_KERNEL); in sec_alloc_aiv_resource()
343 if (!res->a_ivin) in sec_alloc_aiv_resource()
344 return -ENOMEM; in sec_alloc_aiv_resource()
347 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; in sec_alloc_aiv_resource()
348 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; in sec_alloc_aiv_resource()
356 if (res->a_ivin) in sec_free_aiv_resource()
357 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), in sec_free_aiv_resource()
358 res->a_ivin, res->a_ivin_dma); in sec_free_aiv_resource()
363 u16 q_depth = res->depth; in sec_alloc_mac_resource()
366 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, in sec_alloc_mac_resource()
367 &res->out_mac_dma, GFP_KERNEL); in sec_alloc_mac_resource()
368 if (!res->out_mac) in sec_alloc_mac_resource()
369 return -ENOMEM; in sec_alloc_mac_resource()
372 res[i].out_mac_dma = res->out_mac_dma + in sec_alloc_mac_resource()
374 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); in sec_alloc_mac_resource()
382 if (res->out_mac) in sec_free_mac_resource()
383 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, in sec_free_mac_resource()
384 res->out_mac, res->out_mac_dma); in sec_free_mac_resource()
389 if (res->pbuf) in sec_free_pbuf_resource()
390 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), in sec_free_pbuf_resource()
391 res->pbuf, res->pbuf_dma); in sec_free_pbuf_resource()
400 u16 q_depth = res->depth; in sec_alloc_pbuf_resource()
405 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), in sec_alloc_pbuf_resource()
406 &res->pbuf_dma, GFP_KERNEL); in sec_alloc_pbuf_resource()
407 if (!res->pbuf) in sec_alloc_pbuf_resource()
408 return -ENOMEM; in sec_alloc_pbuf_resource()
424 res[k].pbuf = res->pbuf + in sec_alloc_pbuf_resource()
426 res[k].pbuf_dma = res->pbuf_dma + in sec_alloc_pbuf_resource()
437 struct sec_alg_res *res = qp_ctx->res; in sec_alg_resource_alloc()
438 struct device *dev = ctx->dev; in sec_alg_resource_alloc()
445 if (ctx->alg_type == SEC_AEAD) { in sec_alg_resource_alloc()
454 if (ctx->pbuf_supported) { in sec_alg_resource_alloc()
465 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_alloc()
466 sec_free_mac_resource(dev, qp_ctx->res); in sec_alg_resource_alloc()
468 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_alloc()
478 struct device *dev = ctx->dev; in sec_alg_resource_free()
480 sec_free_civ_resource(dev, qp_ctx->res); in sec_alg_resource_free()
482 if (ctx->pbuf_supported) in sec_alg_resource_free()
483 sec_free_pbuf_resource(dev, qp_ctx->res); in sec_alg_resource_free()
484 if (ctx->alg_type == SEC_AEAD) in sec_alg_resource_free()
485 sec_free_mac_resource(dev, qp_ctx->res); in sec_alg_resource_free()
491 u16 q_depth = qp_ctx->qp->sq_depth; in sec_alloc_qp_ctx_resource()
492 struct device *dev = ctx->dev; in sec_alloc_qp_ctx_resource()
493 int ret = -ENOMEM; in sec_alloc_qp_ctx_resource()
495 qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); in sec_alloc_qp_ctx_resource()
496 if (!qp_ctx->req_list) in sec_alloc_qp_ctx_resource()
499 qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); in sec_alloc_qp_ctx_resource()
500 if (!qp_ctx->res) in sec_alloc_qp_ctx_resource()
502 qp_ctx->res->depth = q_depth; in sec_alloc_qp_ctx_resource()
504 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); in sec_alloc_qp_ctx_resource()
505 if (IS_ERR(qp_ctx->c_in_pool)) { in sec_alloc_qp_ctx_resource()
506 dev_err(dev, "fail to create sgl pool for input!\n"); in sec_alloc_qp_ctx_resource()
510 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); in sec_alloc_qp_ctx_resource()
511 if (IS_ERR(qp_ctx->c_out_pool)) { in sec_alloc_qp_ctx_resource()
523 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); in sec_alloc_qp_ctx_resource()
525 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); in sec_alloc_qp_ctx_resource()
527 kfree(qp_ctx->res); in sec_alloc_qp_ctx_resource()
529 kfree(qp_ctx->req_list); in sec_alloc_qp_ctx_resource()
535 struct device *dev = ctx->dev; in sec_free_qp_ctx_resource()
538 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); in sec_free_qp_ctx_resource()
539 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); in sec_free_qp_ctx_resource()
540 kfree(qp_ctx->res); in sec_free_qp_ctx_resource()
541 kfree(qp_ctx->req_list); in sec_free_qp_ctx_resource()
551 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; in sec_create_qp_ctx()
552 qp = ctx->qps[qp_ctx_id]; in sec_create_qp_ctx()
553 qp->req_type = 0; in sec_create_qp_ctx()
554 qp->qp_ctx = qp_ctx; in sec_create_qp_ctx()
555 qp_ctx->qp = qp; in sec_create_qp_ctx()
556 qp_ctx->ctx = ctx; in sec_create_qp_ctx()
558 qp->req_cb = sec_req_cb; in sec_create_qp_ctx()
560 spin_lock_init(&qp_ctx->req_lock); in sec_create_qp_ctx()
561 idr_init(&qp_ctx->req_idr); in sec_create_qp_ctx()
562 INIT_LIST_HEAD(&qp_ctx->backlog); in sec_create_qp_ctx()
577 idr_destroy(&qp_ctx->req_idr); in sec_create_qp_ctx()
584 hisi_qm_stop_qp(qp_ctx->qp); in sec_release_qp_ctx()
586 idr_destroy(&qp_ctx->req_idr); in sec_release_qp_ctx()
594 ctx->qps = sec_create_qps(); in sec_ctx_base_init()
595 if (!ctx->qps) { in sec_ctx_base_init()
597 return -ENODEV; in sec_ctx_base_init()
600 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); in sec_ctx_base_init()
601 ctx->sec = sec; in sec_ctx_base_init()
602 ctx->dev = &sec->qm.pdev->dev; in sec_ctx_base_init()
603 ctx->hlf_q_num = sec->ctx_q_num >> 1; in sec_ctx_base_init()
605 ctx->pbuf_supported = ctx->sec->iommu_used; in sec_ctx_base_init()
607 /* Half of queue depth is taken as fake requests limit in the queue. */ in sec_ctx_base_init()
608 ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; in sec_ctx_base_init()
609 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), in sec_ctx_base_init()
611 if (!ctx->qp_ctx) { in sec_ctx_base_init()
612 ret = -ENOMEM; in sec_ctx_base_init()
616 for (i = 0; i < sec->ctx_q_num; i++) { in sec_ctx_base_init()
617 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); in sec_ctx_base_init()
625 for (i = i - 1; i >= 0; i--) in sec_ctx_base_init()
626 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); in sec_ctx_base_init()
627 kfree(ctx->qp_ctx); in sec_ctx_base_init()
629 sec_destroy_qps(ctx->qps, sec->ctx_q_num); in sec_ctx_base_init()
637 for (i = 0; i < ctx->sec->ctx_q_num; i++) in sec_ctx_base_uninit()
638 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); in sec_ctx_base_uninit()
640 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); in sec_ctx_base_uninit()
641 kfree(ctx->qp_ctx); in sec_ctx_base_uninit()
646 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_cipher_init()
648 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, in sec_cipher_init()
649 &c_ctx->c_key_dma, GFP_KERNEL); in sec_cipher_init()
650 if (!c_ctx->c_key) in sec_cipher_init()
651 return -ENOMEM; in sec_cipher_init()
658 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_cipher_uninit()
660 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); in sec_cipher_uninit()
661 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, in sec_cipher_uninit()
662 c_ctx->c_key, c_ctx->c_key_dma); in sec_cipher_uninit()
667 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_auth_init()
669 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, in sec_auth_init()
670 &a_ctx->a_key_dma, GFP_KERNEL); in sec_auth_init()
671 if (!a_ctx->a_key) in sec_auth_init()
672 return -ENOMEM; in sec_auth_init()
679 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_auth_uninit()
681 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); in sec_auth_uninit()
682 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, in sec_auth_uninit()
683 a_ctx->a_key, a_ctx->a_key_dma); in sec_auth_uninit()
688 const char *alg = crypto_tfm_alg_name(&tfm->base); in sec_skcipher_fbtfm_init()
690 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_fbtfm_init()
692 c_ctx->fallback = false; in sec_skcipher_fbtfm_init()
698 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, in sec_skcipher_fbtfm_init()
700 if (IS_ERR(c_ctx->fbtfm)) { in sec_skcipher_fbtfm_init()
702 return PTR_ERR(c_ctx->fbtfm); in sec_skcipher_fbtfm_init()
713 ctx->alg_type = SEC_SKCIPHER; in sec_skcipher_init()
715 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); in sec_skcipher_init()
716 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { in sec_skcipher_init()
718 return -EINVAL; in sec_skcipher_init()
746 if (ctx->c_ctx.fbtfm) in sec_skcipher_uninit()
747 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); in sec_skcipher_uninit()
758 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_3des_setkey()
767 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; in sec_skcipher_3des_setkey()
770 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; in sec_skcipher_3des_setkey()
773 return -EINVAL; in sec_skcipher_3des_setkey()
786 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_skcipher_aes_sm4_setkey()
789 c_ctx->fallback = true; in sec_skcipher_aes_sm4_setkey()
792 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_skcipher_aes_sm4_setkey()
796 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
799 if (c_ctx->c_alg == SEC_CALG_SM4 && in sec_skcipher_aes_sm4_setkey()
802 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
806 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_skcipher_aes_sm4_setkey()
809 c_ctx->c_key_len = SEC_CKEY_192BIT; in sec_skcipher_aes_sm4_setkey()
812 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_skcipher_aes_sm4_setkey()
816 return -EINVAL; in sec_skcipher_aes_sm4_setkey()
829 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_setkey()
830 struct device *dev = ctx->dev; in sec_skcipher_setkey()
841 c_ctx->c_alg = c_alg; in sec_skcipher_setkey()
842 c_ctx->c_mode = c_mode; in sec_skcipher_setkey()
854 return -EINVAL; in sec_skcipher_setkey()
862 memcpy(c_ctx->c_key, key, keylen); in sec_skcipher_setkey()
863 if (c_ctx->fallback && c_ctx->fbtfm) { in sec_skcipher_setkey()
864 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); in sec_skcipher_setkey()
893 struct sec_aead_req *a_req = &req->aead_req; in GEN_SEC_SETKEY_FUNC()
894 struct aead_request *aead_req = a_req->aead_req; in GEN_SEC_SETKEY_FUNC()
895 struct sec_cipher_req *c_req = &req->c_req; in GEN_SEC_SETKEY_FUNC()
896 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in GEN_SEC_SETKEY_FUNC()
897 struct device *dev = ctx->dev; in GEN_SEC_SETKEY_FUNC()
899 int req_id = req->req_id; in GEN_SEC_SETKEY_FUNC()
904 if (ctx->alg_type == SEC_AEAD) in GEN_SEC_SETKEY_FUNC()
905 copy_size = aead_req->cryptlen + aead_req->assoclen; in GEN_SEC_SETKEY_FUNC()
907 copy_size = c_req->c_len; in GEN_SEC_SETKEY_FUNC()
910 qp_ctx->res[req_id].pbuf, copy_size); in GEN_SEC_SETKEY_FUNC()
913 return -EINVAL; in GEN_SEC_SETKEY_FUNC()
915 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { in GEN_SEC_SETKEY_FUNC()
918 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; in GEN_SEC_SETKEY_FUNC()
919 memcpy(a_req->out_mac, mac_offset, authsize); in GEN_SEC_SETKEY_FUNC()
922 req->in_dma = qp_ctx->res[req_id].pbuf_dma; in GEN_SEC_SETKEY_FUNC()
923 c_req->c_out_dma = req->in_dma; in GEN_SEC_SETKEY_FUNC()
931 struct aead_request *aead_req = req->aead_req.aead_req; in sec_cipher_pbuf_unmap()
932 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_pbuf_unmap()
933 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_cipher_pbuf_unmap()
935 int req_id = req->req_id; in sec_cipher_pbuf_unmap()
937 if (ctx->alg_type == SEC_AEAD) in sec_cipher_pbuf_unmap()
938 copy_size = c_req->c_len + aead_req->assoclen; in sec_cipher_pbuf_unmap()
940 copy_size = c_req->c_len; in sec_cipher_pbuf_unmap()
943 qp_ctx->res[req_id].pbuf, copy_size); in sec_cipher_pbuf_unmap()
945 dev_err(ctx->dev, "copy pbuf data to dst error!\n"); in sec_cipher_pbuf_unmap()
950 struct aead_request *aead_req = req->aead_req; in sec_aead_mac_init()
953 u8 *mac_out = req->out_mac; in sec_aead_mac_init()
954 struct scatterlist *sgl = aead_req->src; in sec_aead_mac_init()
958 /* Copy input mac */ in sec_aead_mac_init()
959 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; in sec_aead_mac_init()
963 return -EINVAL; in sec_aead_mac_init()
971 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_map()
972 struct sec_aead_req *a_req = &req->aead_req; in sec_cipher_map()
973 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_cipher_map()
974 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; in sec_cipher_map()
975 struct device *dev = ctx->dev; in sec_cipher_map()
978 if (req->use_pbuf) { in sec_cipher_map()
979 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; in sec_cipher_map()
980 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; in sec_cipher_map()
981 if (ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
982 a_req->a_ivin = res->a_ivin; in sec_cipher_map()
983 a_req->a_ivin_dma = res->a_ivin_dma; in sec_cipher_map()
984 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; in sec_cipher_map()
985 a_req->out_mac_dma = res->pbuf_dma + in sec_cipher_map()
992 c_req->c_ivin = res->c_ivin; in sec_cipher_map()
993 c_req->c_ivin_dma = res->c_ivin_dma; in sec_cipher_map()
994 if (ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
995 a_req->a_ivin = res->a_ivin; in sec_cipher_map()
996 a_req->a_ivin_dma = res->a_ivin_dma; in sec_cipher_map()
997 a_req->out_mac = res->out_mac; in sec_cipher_map()
998 a_req->out_mac_dma = res->out_mac_dma; in sec_cipher_map()
1001 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, in sec_cipher_map()
1002 qp_ctx->c_in_pool, in sec_cipher_map()
1003 req->req_id, in sec_cipher_map()
1004 &req->in_dma); in sec_cipher_map()
1005 if (IS_ERR(req->in)) { in sec_cipher_map()
1006 dev_err(dev, "fail to dma map input sgl buffers!\n"); in sec_cipher_map()
1007 return PTR_ERR(req->in); in sec_cipher_map()
1010 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { in sec_cipher_map()
1014 hisi_acc_sg_buf_unmap(dev, src, req->in); in sec_cipher_map()
1020 c_req->c_out = req->in; in sec_cipher_map()
1021 c_req->c_out_dma = req->in_dma; in sec_cipher_map()
1023 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, in sec_cipher_map()
1024 qp_ctx->c_out_pool, in sec_cipher_map()
1025 req->req_id, in sec_cipher_map()
1026 &c_req->c_out_dma); in sec_cipher_map()
1028 if (IS_ERR(c_req->c_out)) { in sec_cipher_map()
1030 hisi_acc_sg_buf_unmap(dev, src, req->in); in sec_cipher_map()
1031 return PTR_ERR(c_req->c_out); in sec_cipher_map()
1041 struct sec_cipher_req *c_req = &req->c_req; in sec_cipher_unmap()
1042 struct device *dev = ctx->dev; in sec_cipher_unmap()
1044 if (req->use_pbuf) { in sec_cipher_unmap()
1048 hisi_acc_sg_buf_unmap(dev, src, req->in); in sec_cipher_unmap()
1050 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); in sec_cipher_unmap()
1056 struct skcipher_request *sq = req->c_req.sk_req; in sec_skcipher_sgl_map()
1058 return sec_cipher_map(ctx, req, sq->src, sq->dst); in sec_skcipher_sgl_map()
1063 struct skcipher_request *sq = req->c_req.sk_req; in sec_skcipher_sgl_unmap()
1065 sec_cipher_unmap(ctx, req, sq->src, sq->dst); in sec_skcipher_sgl_unmap()
1071 switch (keys->enckeylen) { in sec_aead_aes_set_key()
1073 c_ctx->c_key_len = SEC_CKEY_128BIT; in sec_aead_aes_set_key()
1076 c_ctx->c_key_len = SEC_CKEY_192BIT; in sec_aead_aes_set_key()
1079 c_ctx->c_key_len = SEC_CKEY_256BIT; in sec_aead_aes_set_key()
1083 return -EINVAL; in sec_aead_aes_set_key()
1085 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); in sec_aead_aes_set_key()
1093 struct crypto_shash *hash_tfm = ctx->hash_tfm; in sec_aead_auth_set_key()
1096 if (!keys->authkeylen) { in sec_aead_auth_set_key()
1098 return -EINVAL; in sec_aead_auth_set_key()
1103 if (keys->authkeylen > blocksize) { in sec_aead_auth_set_key()
1104 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, in sec_aead_auth_set_key()
1105 keys->authkeylen, ctx->a_key); in sec_aead_auth_set_key()
1108 return -EINVAL; in sec_aead_auth_set_key()
1110 ctx->a_key_len = digestsize; in sec_aead_auth_set_key()
1112 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); in sec_aead_auth_set_key()
1113 ctx->a_key_len = keys->authkeylen; in sec_aead_auth_set_key()
1123 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_setauthsize()
1125 if (unlikely(a_ctx->fallback_aead_tfm)) in sec_aead_setauthsize()
1126 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); in sec_aead_setauthsize()
1135 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); in sec_aead_fallback_setkey()
1136 crypto_aead_set_flags(a_ctx->fallback_aead_tfm, in sec_aead_fallback_setkey()
1138 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); in sec_aead_fallback_setkey()
1148 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_aead_setkey()
1149 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_setkey()
1150 struct device *dev = ctx->dev; in sec_aead_setkey()
1154 ctx->a_ctx.a_alg = a_alg; in sec_aead_setkey()
1155 ctx->c_ctx.c_alg = c_alg; in sec_aead_setkey()
1156 ctx->a_ctx.mac_len = mac_len; in sec_aead_setkey()
1157 c_ctx->c_mode = c_mode; in sec_aead_setkey()
1165 memcpy(c_ctx->c_key, key, keylen); in sec_aead_setkey()
1167 if (unlikely(a_ctx->fallback_aead_tfm)) { in sec_aead_setkey()
1186 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); in sec_aead_setkey()
1192 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || in sec_aead_setkey()
1193 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { in sec_aead_setkey()
1194 ret = -EINVAL; in sec_aead_setkey()
1231 struct aead_request *aq = req->aead_req.aead_req; in GEN_SEC_AEAD_SETKEY_FUNC()
1233 return sec_cipher_map(ctx, req, aq->src, aq->dst); in GEN_SEC_AEAD_SETKEY_FUNC()
1238 struct aead_request *aq = req->aead_req.aead_req; in sec_aead_sgl_unmap()
1240 sec_cipher_unmap(ctx, req, aq->src, aq->dst); in sec_aead_sgl_unmap()
1247 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer()
1251 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer()
1253 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer()
1260 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer()
1266 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer()
1271 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_skcipher_copy_iv()
1272 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_copy_iv()
1274 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); in sec_skcipher_copy_iv()
1279 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_bd_fill()
1280 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_bd_fill()
1281 struct sec_sqe *sec_sqe = &req->sec_sqe; in sec_skcipher_bd_fill()
1288 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); in sec_skcipher_bd_fill()
1289 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); in sec_skcipher_bd_fill()
1290 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); in sec_skcipher_bd_fill()
1291 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); in sec_skcipher_bd_fill()
1293 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << in sec_skcipher_bd_fill()
1295 sec_sqe->type2.c_alg = c_ctx->c_alg; in sec_skcipher_bd_fill()
1296 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << in sec_skcipher_bd_fill()
1300 if (c_req->encrypt) in sec_skcipher_bd_fill()
1304 sec_sqe->type_cipher_auth = bd_type | cipher; in sec_skcipher_bd_fill()
1307 if (req->use_pbuf) { in sec_skcipher_bd_fill()
1315 sec_sqe->sdm_addr_type |= da_type; in sec_skcipher_bd_fill()
1317 if (req->in_dma != c_req->c_out_dma) in sec_skcipher_bd_fill()
1320 sec_sqe->sds_sa_type = (de | scene | sa_type); in sec_skcipher_bd_fill()
1322 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); in sec_skcipher_bd_fill()
1323 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); in sec_skcipher_bd_fill()
1330 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; in sec_skcipher_bd_fill_v3()
1331 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_bd_fill_v3()
1332 struct sec_cipher_req *c_req = &req->c_req; in sec_skcipher_bd_fill_v3()
1338 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); in sec_skcipher_bd_fill_v3()
1339 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); in sec_skcipher_bd_fill_v3()
1340 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); in sec_skcipher_bd_fill_v3()
1341 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); in sec_skcipher_bd_fill_v3()
1343 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | in sec_skcipher_bd_fill_v3()
1344 c_ctx->c_mode; in sec_skcipher_bd_fill_v3()
1345 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << in sec_skcipher_bd_fill_v3()
1348 if (c_req->encrypt) in sec_skcipher_bd_fill_v3()
1352 sec_sqe3->c_icv_key |= cpu_to_le16(cipher); in sec_skcipher_bd_fill_v3()
1355 sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << in sec_skcipher_bd_fill_v3()
1358 if (req->use_pbuf) { in sec_skcipher_bd_fill_v3()
1367 if (req->in_dma != c_req->c_out_dma) in sec_skcipher_bd_fill_v3()
1371 sec_sqe3->bd_param = cpu_to_le32(bd_param); in sec_skcipher_bd_fill_v3()
1373 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); in sec_skcipher_bd_fill_v3()
1374 sec_sqe3->tag = cpu_to_le64(req); in sec_skcipher_bd_fill_v3()
1379 /* increment counter (128-bit int) */
1383 --bits; in ctr_iv_inc()
1392 struct aead_request *aead_req = req->aead_req.aead_req; in sec_update_iv()
1393 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_update_iv()
1394 u32 iv_size = req->ctx->c_ctx.ivsize; in sec_update_iv()
1400 if (req->c_req.encrypt) in sec_update_iv()
1401 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; in sec_update_iv()
1403 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; in sec_update_iv()
1406 iv = sk_req->iv; in sec_update_iv()
1407 cryptlen = sk_req->cryptlen; in sec_update_iv()
1409 iv = aead_req->iv; in sec_update_iv()
1410 cryptlen = aead_req->cryptlen; in sec_update_iv()
1413 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { in sec_update_iv()
1415 cryptlen - iv_size); in sec_update_iv()
1417 dev_err(req->ctx->dev, "copy output iv error!\n"); in sec_update_iv()
1431 spin_lock_bh(&qp_ctx->req_lock); in sec_back_req_clear()
1432 if (ctx->fake_req_limit >= in sec_back_req_clear()
1433 atomic_read(&qp_ctx->qp->qp_status.used) && in sec_back_req_clear()
1434 !list_empty(&qp_ctx->backlog)) { in sec_back_req_clear()
1435 backlog_req = list_first_entry(&qp_ctx->backlog, in sec_back_req_clear()
1437 list_del(&backlog_req->backlog_head); in sec_back_req_clear()
1439 spin_unlock_bh(&qp_ctx->req_lock); in sec_back_req_clear()
1447 struct skcipher_request *sk_req = req->c_req.sk_req; in sec_skcipher_callback()
1448 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_skcipher_callback()
1455 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || in sec_skcipher_callback()
1456 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) in sec_skcipher_callback()
1464 backlog_sk_req = backlog_req->c_req.sk_req; in sec_skcipher_callback()
1465 skcipher_request_complete(backlog_sk_req, -EINPROGRESS); in sec_skcipher_callback()
1466 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); in sec_skcipher_callback()
1474 struct aead_request *aead_req = req->aead_req.aead_req; in set_aead_auth_iv()
1475 struct sec_cipher_req *c_req = &req->c_req; in set_aead_auth_iv()
1476 struct sec_aead_req *a_req = &req->aead_req; in set_aead_auth_iv()
1477 size_t authsize = ctx->a_ctx.mac_len; in set_aead_auth_iv()
1478 u32 data_size = aead_req->cryptlen; in set_aead_auth_iv()
1483 cl = c_req->c_ivin[0] + 1; in set_aead_auth_iv()
1484 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; in set_aead_auth_iv()
1485 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); in set_aead_auth_iv()
1486 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; in set_aead_auth_iv()
1489 flage |= c_req->c_ivin[0] & IV_CL_MASK; in set_aead_auth_iv()
1492 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; in set_aead_auth_iv()
1494 if (aead_req->assoclen) in set_aead_auth_iv()
1497 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); in set_aead_auth_iv()
1498 a_req->a_ivin[0] = flage; in set_aead_auth_iv()
1505 if (!c_req->encrypt) in set_aead_auth_iv()
1506 data_size = aead_req->cryptlen - authsize; in set_aead_auth_iv()
1508 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = in set_aead_auth_iv()
1511 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = in set_aead_auth_iv()
1517 struct aead_request *aead_req = req->aead_req.aead_req; in sec_aead_set_iv()
1520 struct sec_cipher_req *c_req = &req->c_req; in sec_aead_set_iv()
1521 struct sec_aead_req *a_req = &req->aead_req; in sec_aead_set_iv()
1523 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); in sec_aead_set_iv()
1525 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { in sec_aead_set_iv()
1530 ctx->a_ctx.mac_len = authsize; in sec_aead_set_iv()
1536 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { in sec_aead_set_iv()
1537 ctx->a_ctx.mac_len = authsize; in sec_aead_set_iv()
1538 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); in sec_aead_set_iv()
1545 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_xcm()
1546 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_xcm()
1549 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); in sec_auth_bd_fill_xcm()
1552 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; in sec_auth_bd_fill_xcm()
1553 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); in sec_auth_bd_fill_xcm()
1554 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; in sec_auth_bd_fill_xcm()
1557 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; in sec_auth_bd_fill_xcm()
1559 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; in sec_auth_bd_fill_xcm()
1561 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); in sec_auth_bd_fill_xcm()
1562 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); in sec_auth_bd_fill_xcm()
1563 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_xcm()
1565 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_xcm()
1571 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_xcm_v3()
1572 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_xcm_v3()
1575 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); in sec_auth_bd_fill_xcm_v3()
1578 sqe3->a_key_addr = sqe3->c_key_addr; in sec_auth_bd_fill_xcm_v3()
1579 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); in sec_auth_bd_fill_xcm_v3()
1580 sqe3->auth_mac_key |= SEC_NO_AUTH; in sec_auth_bd_fill_xcm_v3()
1583 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; in sec_auth_bd_fill_xcm_v3()
1585 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; in sec_auth_bd_fill_xcm_v3()
1587 sqe3->a_len_key = cpu_to_le32(aq->assoclen); in sec_auth_bd_fill_xcm_v3()
1588 sqe3->auth_src_offset = cpu_to_le16(0x0); in sec_auth_bd_fill_xcm_v3()
1589 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_xcm_v3()
1590 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_xcm_v3()
1596 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_ex()
1597 struct sec_cipher_req *c_req = &req->c_req; in sec_auth_bd_fill_ex()
1598 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_ex()
1600 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); in sec_auth_bd_fill_ex()
1602 sec_sqe->type2.mac_key_alg = in sec_auth_bd_fill_ex()
1603 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); in sec_auth_bd_fill_ex()
1605 sec_sqe->type2.mac_key_alg |= in sec_auth_bd_fill_ex()
1606 cpu_to_le32((u32)((ctx->a_key_len) / in sec_auth_bd_fill_ex()
1609 sec_sqe->type2.mac_key_alg |= in sec_auth_bd_fill_ex()
1610 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); in sec_auth_bd_fill_ex()
1613 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; in sec_auth_bd_fill_ex()
1614 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; in sec_auth_bd_fill_ex()
1616 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; in sec_auth_bd_fill_ex()
1617 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; in sec_auth_bd_fill_ex()
1619 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); in sec_auth_bd_fill_ex()
1621 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_ex()
1623 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_ex()
1628 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_bd_fill()
1629 struct sec_sqe *sec_sqe = &req->sec_sqe; in sec_aead_bd_fill()
1634 dev_err(ctx->dev, "skcipher bd fill is error!\n"); in sec_aead_bd_fill()
1638 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || in sec_aead_bd_fill()
1639 ctx->c_ctx.c_mode == SEC_CMODE_GCM) in sec_aead_bd_fill()
1640 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); in sec_aead_bd_fill()
1642 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); in sec_aead_bd_fill()
1650 struct sec_aead_req *a_req = &req->aead_req; in sec_auth_bd_fill_ex_v3()
1651 struct sec_cipher_req *c_req = &req->c_req; in sec_auth_bd_fill_ex_v3()
1652 struct aead_request *aq = a_req->aead_req; in sec_auth_bd_fill_ex_v3()
1654 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); in sec_auth_bd_fill_ex_v3()
1656 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1657 cpu_to_le32((u32)(ctx->mac_len / in sec_auth_bd_fill_ex_v3()
1660 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1661 cpu_to_le32((u32)(ctx->a_key_len / in sec_auth_bd_fill_ex_v3()
1664 sqe3->auth_mac_key |= in sec_auth_bd_fill_ex_v3()
1665 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); in sec_auth_bd_fill_ex_v3()
1668 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); in sec_auth_bd_fill_ex_v3()
1669 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; in sec_auth_bd_fill_ex_v3()
1671 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); in sec_auth_bd_fill_ex_v3()
1672 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; in sec_auth_bd_fill_ex_v3()
1674 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); in sec_auth_bd_fill_ex_v3()
1676 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); in sec_auth_bd_fill_ex_v3()
1678 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); in sec_auth_bd_fill_ex_v3()
1683 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_bd_fill_v3()
1684 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; in sec_aead_bd_fill_v3()
1689 dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); in sec_aead_bd_fill_v3()
1693 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || in sec_aead_bd_fill_v3()
1694 ctx->c_ctx.c_mode == SEC_CMODE_GCM) in sec_aead_bd_fill_v3()
1695 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, in sec_aead_bd_fill_v3()
1698 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, in sec_aead_bd_fill_v3()
1706 struct aead_request *a_req = req->aead_req.aead_req; in sec_aead_callback()
1708 struct sec_aead_req *aead_req = &req->aead_req; in sec_aead_callback()
1709 struct sec_cipher_req *c_req = &req->c_req; in sec_aead_callback()
1711 struct sec_qp_ctx *qp_ctx = req->qp_ctx; in sec_aead_callback()
1716 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) in sec_aead_callback()
1720 if (!err && c_req->encrypt) { in sec_aead_callback()
1721 struct scatterlist *sgl = a_req->dst; in sec_aead_callback()
1724 aead_req->out_mac, in sec_aead_callback()
1725 authsize, a_req->cryptlen + in sec_aead_callback()
1726 a_req->assoclen); in sec_aead_callback()
1728 dev_err(c->dev, "copy out mac err!\n"); in sec_aead_callback()
1729 err = -EINVAL; in sec_aead_callback()
1740 backlog_aead_req = backlog_req->aead_req.aead_req; in sec_aead_callback()
1741 aead_request_complete(backlog_aead_req, -EINPROGRESS); in sec_aead_callback()
1742 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); in sec_aead_callback()
1761 qp_ctx = &ctx->qp_ctx[queue_id]; in sec_request_init()
1763 req->req_id = sec_alloc_req_id(req, qp_ctx); in sec_request_init()
1764 if (unlikely(req->req_id < 0)) { in sec_request_init()
1766 return req->req_id; in sec_request_init()
1774 struct sec_cipher_req *c_req = &req->c_req; in sec_process()
1786 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || in sec_process()
1787 ctx->c_ctx.c_mode == SEC_CMODE_CTR)) in sec_process()
1788 sec_update_iv(req, ctx->alg_type); in sec_process()
1790 ret = ctx->req_op->bd_send(ctx, req); in sec_process()
1791 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || in sec_process()
1792 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { in sec_process()
1793 dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); in sec_process()
1801 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { in sec_process()
1802 if (ctx->alg_type == SEC_SKCIPHER) in sec_process()
1803 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, in sec_process()
1804 ctx->c_ctx.ivsize); in sec_process()
1806 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, in sec_process()
1807 ctx->c_ctx.ivsize); in sec_process()
1865 if (ctx->sec->qm.ver < QM_HW_V3) { in sec_skcipher_ctx_init()
1866 ctx->type_supported = SEC_BD_TYPE2; in sec_skcipher_ctx_init()
1867 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init()
1869 ctx->type_supported = SEC_BD_TYPE3; in sec_skcipher_ctx_init()
1870 ctx->req_op = &sec_skcipher_req_ops_v3; in sec_skcipher_ctx_init()
1887 ctx->alg_type = SEC_AEAD; in sec_aead_init()
1888 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); in sec_aead_init()
1889 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || in sec_aead_init()
1890 ctx->c_ctx.ivsize > SEC_IV_SIZE) { in sec_aead_init()
1892 return -EINVAL; in sec_aead_init()
1898 if (ctx->sec->qm.ver < QM_HW_V3) { in sec_aead_init()
1899 ctx->type_supported = SEC_BD_TYPE2; in sec_aead_init()
1900 ctx->req_op = &sec_aead_req_ops; in sec_aead_init()
1902 ctx->type_supported = SEC_BD_TYPE3; in sec_aead_init()
1903 ctx->req_op = &sec_aead_req_ops_v3; in sec_aead_init()
1935 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; in sec_aead_ctx_init()
1944 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); in sec_aead_ctx_init()
1945 if (IS_ERR(auth_ctx->hash_tfm)) { in sec_aead_ctx_init()
1946 dev_err(ctx->dev, "aead alloc shash error!\n"); in sec_aead_ctx_init()
1948 return PTR_ERR(auth_ctx->hash_tfm); in sec_aead_ctx_init()
1958 crypto_free_shash(ctx->a_ctx.hash_tfm); in sec_aead_ctx_exit()
1966 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_xcm_ctx_init()
1967 const char *aead_name = alg->base.cra_name; in sec_aead_xcm_ctx_init()
1972 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); in sec_aead_xcm_ctx_init()
1976 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, in sec_aead_xcm_ctx_init()
1979 if (IS_ERR(a_ctx->fallback_aead_tfm)) { in sec_aead_xcm_ctx_init()
1980 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); in sec_aead_xcm_ctx_init()
1982 return PTR_ERR(a_ctx->fallback_aead_tfm); in sec_aead_xcm_ctx_init()
1984 a_ctx->fallback = false; in sec_aead_xcm_ctx_init()
1993 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); in sec_aead_xcm_ctx_exit()
2015 u32 cryptlen = sreq->c_req.sk_req->cryptlen; in sec_skcipher_cryptlen_check()
2016 struct device *dev = ctx->dev; in sec_skcipher_cryptlen_check()
2017 u8 c_mode = ctx->c_ctx.c_mode; in sec_skcipher_cryptlen_check()
2023 dev_err(dev, "skcipher XTS mode input length error!\n"); in sec_skcipher_cryptlen_check()
2024 ret = -EINVAL; in sec_skcipher_cryptlen_check()
2029 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { in sec_skcipher_cryptlen_check()
2030 dev_err(dev, "skcipher AES input length error!\n"); in sec_skcipher_cryptlen_check()
2031 ret = -EINVAL; in sec_skcipher_cryptlen_check()
2035 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { in sec_skcipher_cryptlen_check()
2037 ret = -EINVAL; in sec_skcipher_cryptlen_check()
2041 ret = -EINVAL; in sec_skcipher_cryptlen_check()
2049 struct skcipher_request *sk_req = sreq->c_req.sk_req; in sec_skcipher_param_check()
2050 struct device *dev = ctx->dev; in sec_skcipher_param_check()
2051 u8 c_alg = ctx->c_ctx.c_alg; in sec_skcipher_param_check()
2053 if (unlikely(!sk_req->src || !sk_req->dst || in sec_skcipher_param_check()
2054 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { in sec_skcipher_param_check()
2055 dev_err(dev, "skcipher input param error!\n"); in sec_skcipher_param_check()
2056 return -EINVAL; in sec_skcipher_param_check()
2058 sreq->c_req.c_len = sk_req->cryptlen; in sec_skcipher_param_check()
2060 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) in sec_skcipher_param_check()
2061 sreq->use_pbuf = true; in sec_skcipher_param_check()
2063 sreq->use_pbuf = false; in sec_skcipher_param_check()
2066 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { in sec_skcipher_param_check()
2067 dev_err(dev, "skcipher 3des input length error!\n"); in sec_skcipher_param_check()
2068 return -EINVAL; in sec_skcipher_param_check()
2077 return -EINVAL; in sec_skcipher_param_check()
2083 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; in sec_skcipher_soft_crypto()
2084 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); in sec_skcipher_soft_crypto()
2085 struct device *dev = ctx->dev; in sec_skcipher_soft_crypto()
2088 if (!c_ctx->fbtfm) { in sec_skcipher_soft_crypto()
2090 return -EINVAL; in sec_skcipher_soft_crypto()
2093 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); in sec_skcipher_soft_crypto()
2096 skcipher_request_set_callback(subreq, sreq->base.flags, in sec_skcipher_soft_crypto()
2098 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, in sec_skcipher_soft_crypto()
2099 sreq->cryptlen, sreq->iv); in sec_skcipher_soft_crypto()
2117 if (!sk_req->cryptlen) { in sec_skcipher_crypto()
2118 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) in sec_skcipher_crypto()
2119 return -EINVAL; in sec_skcipher_crypto()
2123 req->flag = sk_req->base.flags; in sec_skcipher_crypto()
2124 req->c_req.sk_req = sk_req; in sec_skcipher_crypto()
2125 req->c_req.encrypt = encrypt; in sec_skcipher_crypto()
2126 req->ctx = ctx; in sec_skcipher_crypto()
2130 return -EINVAL; in sec_skcipher_crypto()
2132 if (unlikely(ctx->c_ctx.fallback)) in sec_skcipher_crypto()
2135 return ctx->req_op->process(ctx, req); in sec_skcipher_crypto()
2229 cl = aead_req->iv[0] + 1; in aead_iv_demension_check()
2231 return -EINVAL; in aead_iv_demension_check()
2233 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) in aead_iv_demension_check()
2234 return -EOVERFLOW; in aead_iv_demension_check()
2241 struct aead_request *req = sreq->aead_req.aead_req; in sec_aead_spec_check()
2244 u8 c_mode = ctx->c_ctx.c_mode; in sec_aead_spec_check()
2245 struct device *dev = ctx->dev; in sec_aead_spec_check()
2248 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || in sec_aead_spec_check()
2249 req->assoclen > SEC_MAX_AAD_LEN)) { in sec_aead_spec_check()
2250 dev_err(dev, "aead input spec error!\n"); in sec_aead_spec_check()
2251 return -EINVAL; in sec_aead_spec_check()
2257 dev_err(dev, "aead input mac length error!\n"); in sec_aead_spec_check()
2258 return -EINVAL; in sec_aead_spec_check()
2262 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { in sec_aead_spec_check()
2263 dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); in sec_aead_spec_check()
2264 return -EINVAL; in sec_aead_spec_check()
2268 dev_err(dev, "aead input iv param error!\n"); in sec_aead_spec_check()
2273 if (sreq->c_req.encrypt) in sec_aead_spec_check()
2274 sreq->c_req.c_len = req->cryptlen; in sec_aead_spec_check()
2276 sreq->c_req.c_len = req->cryptlen - authsize; in sec_aead_spec_check()
2278 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { in sec_aead_spec_check()
2280 return -EINVAL; in sec_aead_spec_check()
2289 struct aead_request *req = sreq->aead_req.aead_req; in sec_aead_param_check()
2292 struct device *dev = ctx->dev; in sec_aead_param_check()
2293 u8 c_alg = ctx->c_ctx.c_alg; in sec_aead_param_check()
2295 if (unlikely(!req->src || !req->dst)) { in sec_aead_param_check()
2296 dev_err(dev, "aead input param error!\n"); in sec_aead_param_check()
2297 return -EINVAL; in sec_aead_param_check()
2300 if (ctx->sec->qm.ver == QM_HW_V2) { in sec_aead_param_check()
2301 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && in sec_aead_param_check()
2302 req->cryptlen <= authsize))) { in sec_aead_param_check()
2303 ctx->a_ctx.fallback = true; in sec_aead_param_check()
2304 return -EINVAL; in sec_aead_param_check()
2311 return -EINVAL; in sec_aead_param_check()
2315 return -EINVAL; in sec_aead_param_check()
2317 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= in sec_aead_param_check()
2319 sreq->use_pbuf = true; in sec_aead_param_check()
2321 sreq->use_pbuf = false; in sec_aead_param_check()
2330 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; in sec_aead_soft_crypto()
2331 struct device *dev = ctx->dev; in sec_aead_soft_crypto()
2335 /* Kunpeng920 aead mode not support input 0 size */ in sec_aead_soft_crypto()
2336 if (!a_ctx->fallback_aead_tfm) { in sec_aead_soft_crypto()
2338 return -EINVAL; in sec_aead_soft_crypto()
2341 subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); in sec_aead_soft_crypto()
2343 return -ENOMEM; in sec_aead_soft_crypto()
2345 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); in sec_aead_soft_crypto()
2346 aead_request_set_callback(subreq, aead_req->base.flags, in sec_aead_soft_crypto()
2347 aead_req->base.complete, aead_req->base.data); in sec_aead_soft_crypto()
2348 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, in sec_aead_soft_crypto()
2349 aead_req->cryptlen, aead_req->iv); in sec_aead_soft_crypto()
2350 aead_request_set_ad(subreq, aead_req->assoclen); in sec_aead_soft_crypto()
2368 req->flag = a_req->base.flags; in sec_aead_crypto()
2369 req->aead_req.aead_req = a_req; in sec_aead_crypto()
2370 req->c_req.encrypt = encrypt; in sec_aead_crypto()
2371 req->ctx = ctx; in sec_aead_crypto()
2375 if (ctx->a_ctx.fallback) in sec_aead_crypto()
2377 return -EINVAL; in sec_aead_crypto()
2380 return ctx->req_op->process(ctx, req); in sec_aead_crypto()
2567 if (--sec_available_devs) in sec_unregister_from_crypto()