Lines Matching defs:req
126 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
136 static void sec_free_req_id(struct sec_req *req)
138 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
139 int req_id = req->req_id;
142 dev_err(req->ctx->dev, "free request id invalid!\n");
179 static int sec_cb_status_check(struct sec_req *req,
182 struct sec_ctx *ctx = req->ctx;
184 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
186 req->err_type, status->done);
209 static int qp_send_message(struct sec_req *req)
211 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
224 req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
225 qp_ctx->req_list[qp_ctx->send_head] = req;
228 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
238 atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
244 struct sec_req *req, *tmp;
247 list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
248 list_del(&req->list);
249 ctx->req_op->buf_unmap(ctx, req);
250 if (req->req_id >= 0)
251 sec_free_req_id(req);
254 ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
255 req->c_req.encrypt);
257 ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
258 req->c_req.encrypt);
261 crypto_request_complete(req->base, -EINPROGRESS);
262 crypto_request_complete(req->base, ret);
268 struct sec_req *req, *tmp;
272 list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
273 ret = qp_send_message(req);
276 list_del(&req->list);
277 crypto_request_complete(req->base, -EINPROGRESS);
300 struct sec_req *req;
306 req = qp_ctx->req_list[status.tag];
309 req = (void *)(uintptr_t)status.tag;
318 if (unlikely(!req)) {
324 req->err_type = status.err_type;
325 ctx = req->ctx;
326 err = sec_cb_status_check(req, &status);
332 ctx->req_op->buf_unmap(ctx, req);
334 ctx->req_op->callback(ctx, req, err);
337 static int sec_alg_send_message_retry(struct sec_req *req)
343 ret = qp_send_message(req);
349 static int sec_alg_try_enqueue(struct sec_req *req)
352 if (!list_empty(&req->backlog->list))
356 return qp_send_message(req);
360 static int sec_alg_send_message_maybacklog(struct sec_req *req)
364 ret = sec_alg_try_enqueue(req);
368 spin_lock_bh(&req->backlog->lock);
369 ret = sec_alg_try_enqueue(req);
371 list_add_tail(&req->list, &req->backlog->list);
372 spin_unlock_bh(&req->backlog->lock);
377 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
379 if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
380 return sec_alg_send_message_maybacklog(req);
382 return sec_alg_send_message_retry(req);
958 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
961 struct aead_request *aead_req = req->aead_req.aead_req;
962 struct sec_cipher_req *c_req = &req->c_req;
963 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
964 struct sec_request_buf *buf = &req->buf;
967 int req_id = req->req_id;
978 pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
988 memcpy(req->aead_req.out_mac, mac_offset, authsize);
991 if (req->req_id < 0) {
1000 req->in_dma = qp_ctx->res[req_id].pbuf_dma;
1001 c_req->c_out_dma = req->in_dma;
1006 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
1009 struct aead_request *aead_req = req->aead_req.aead_req;
1010 struct sec_cipher_req *c_req = &req->c_req;
1011 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1012 struct sec_request_buf *buf = &req->buf;
1014 int req_id = req->req_id;
1021 if (req->req_id < 0)
1029 if (req->req_id < 0)
1033 static int sec_aead_mac_init(struct sec_aead_req *req)
1035 struct aead_request *aead_req = req->aead_req;
1039 u8 *mac_out = req->out_mac;
1104 static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
1107 struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
1108 struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
1112 ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
1114 req->buf.out_dma = req->buf.in_dma;
1118 ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
1122 ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
1125 sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1132 static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
1135 struct sec_cipher_req *c_req = &req->c_req;
1136 struct sec_aead_req *a_req = &req->aead_req;
1137 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1138 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
1143 if (req->use_pbuf) {
1153 return sec_cipher_pbuf_map(ctx, req, src);
1166 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
1168 req->req_id,
1169 &req->in_dma, src_direction);
1170 if (IS_ERR(req->in)) {
1172 return PTR_ERR(req->in);
1179 hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1185 c_req->c_out = req->in;
1186 c_req->c_out_dma = req->in_dma;
1190 req->req_id,
1196 hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1204 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
1207 struct sec_aead_req *a_req = &req->aead_req;
1208 struct sec_cipher_req *c_req = &req->c_req;
1213 if (req->req_id >= 0)
1214 return sec_cipher_map_inner(ctx, req, src, dst);
1235 if (req->use_pbuf) {
1236 ret = sec_cipher_pbuf_map(ctx, req, src);
1251 ret = sec_cipher_map_sgl(dev, req, src, dst);
1270 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1273 struct sec_aead_req *a_req = &req->aead_req;
1274 struct sec_cipher_req *c_req = &req->c_req;
1277 if (req->req_id >= 0) {
1278 if (req->use_pbuf) {
1279 sec_cipher_pbuf_unmap(ctx, req, dst);
1283 hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
1285 hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
1291 if (req->use_pbuf) {
1292 sec_cipher_pbuf_unmap(ctx, req, dst);
1295 sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
1296 sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1298 sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
1309 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1311 struct skcipher_request *sq = req->c_req.sk_req;
1313 return sec_cipher_map(ctx, req, sq->src, sq->dst);
1316 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1318 struct skcipher_request *sq = req->c_req.sk_req;
1320 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1462 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1464 struct aead_request *aq = req->aead_req.aead_req;
1466 return sec_cipher_map(ctx, req, aq->src, aq->dst);
1469 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1471 struct aead_request *aq = req->aead_req.aead_req;
1473 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1476 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1480 ret = ctx->req_op->buf_map(ctx, req);
1484 ctx->req_op->do_transfer(ctx, req);
1486 ret = ctx->req_op->bd_fill(ctx, req);
1493 ctx->req_op->buf_unmap(ctx, req);
1497 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1499 ctx->req_op->buf_unmap(ctx, req);
1502 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1504 struct skcipher_request *sk_req = req->c_req.sk_req;
1505 struct sec_cipher_req *c_req = &req->c_req;
1510 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1513 struct sec_cipher_req *c_req = &req->c_req;
1514 struct sec_sqe *sec_sqe = &req->sec_sqe;
1523 if (req->req_id < 0) {
1524 sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
1525 sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
1527 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1547 if (req->use_pbuf) {
1565 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1567 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1569 struct sec_cipher_req *c_req = &req->c_req;
1577 if (req->req_id < 0) {
1578 sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
1579 sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
1581 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1602 if (req->use_pbuf) {
1616 sec_sqe3->tag = cpu_to_le64((unsigned long)req);
1632 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1634 struct aead_request *aead_req = req->aead_req.aead_req;
1635 struct skcipher_request *sk_req = req->c_req.sk_req;
1636 u32 iv_size = req->ctx->c_ctx.ivsize;
1643 sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
1647 sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
1652 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1656 dev_err(req->ctx->dev, "copy output iv error!\n");
1663 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1666 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1668 if (req->req_id >= 0)
1669 sec_free_req_id(req);
1673 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1674 sec_update_iv(req, SEC_SKCIPHER);
1676 crypto_request_complete(req->base, err);
1680 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1682 struct aead_request *aead_req = req->aead_req.aead_req;
1685 struct sec_aead_req *a_req = &req->aead_req;
1686 struct sec_cipher_req *c_req = &req->c_req;
1724 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1726 struct aead_request *aead_req = req->aead_req.aead_req;
1727 struct sec_aead_req *a_req = &req->aead_req;
1728 struct sec_cipher_req *c_req = &req->c_req;
1738 set_aead_auth_iv(ctx, req);
1746 struct sec_req *req, struct sec_sqe *sec_sqe)
1748 struct sec_aead_req *a_req = &req->aead_req;
1774 struct sec_req *req, struct sec_sqe3 *sqe3)
1776 struct sec_aead_req *a_req = &req->aead_req;
1801 struct sec_req *req, struct sec_sqe *sec_sqe)
1803 struct sec_aead_req *a_req = &req->aead_req;
1804 struct sec_cipher_req *c_req = &req->c_req;
1833 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1836 struct sec_sqe *sec_sqe = &req->sec_sqe;
1839 ret = sec_skcipher_bd_fill(ctx, req);
1847 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1849 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1855 struct sec_req *req, struct sec_sqe3 *sqe3)
1857 struct sec_aead_req *a_req = &req->aead_req;
1858 struct sec_cipher_req *c_req = &req->c_req;
1888 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1891 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1894 ret = sec_skcipher_bd_fill_v3(ctx, req);
1902 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1903 req, sec_sqe3);
1905 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1906 req, sec_sqe3);
1911 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1913 struct aead_request *a_req = req->aead_req.aead_req;
1916 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1919 if (!err && req->c_req.encrypt) {
1921 sec_update_iv(req, SEC_AEAD);
1923 sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
1931 if (req->req_id >= 0)
1932 sec_free_req_id(req);
1934 crypto_request_complete(req->base, err);
1938 static void sec_request_uninit(struct sec_req *req)
1940 if (req->req_id >= 0)
1941 sec_free_req_id(req);
1944 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1951 req->req_id = sec_alloc_req_id(req, qp_ctx);
1952 if (req->req_id >= 0)
1956 req->qp_ctx = qp_ctx;
1957 req->backlog = &qp_ctx->backlog;
1962 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1966 ret = sec_request_init(ctx, req);
1970 ret = sec_request_transfer(ctx, req);
1975 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1977 sec_update_iv(req, ctx->alg_type);
1979 ret = ctx->req_op->bd_send(ctx, req);
1989 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1991 memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
1994 memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
1998 sec_request_untransfer(ctx, req);
2001 sec_request_uninit(req);
2003 ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
2004 req->c_req.encrypt);
2006 ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
2007 req->c_req.encrypt);
2318 struct sec_req *req = skcipher_request_ctx_dma(sk_req);
2329 req->flag = sk_req->base.flags;
2330 req->c_req.sk_req = sk_req;
2331 req->c_req.encrypt = encrypt;
2332 req->ctx = ctx;
2333 req->base = &sk_req->base;
2335 ret = sec_skcipher_param_check(ctx, req, &need_fallback);
2342 return ctx->req_op->process(ctx, req);
2443 struct aead_request *req = sreq->aead_req.aead_req;
2444 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2452 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2453 req->assoclen > SEC_MAX_AAD_LEN))
2457 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
2460 ret = aead_iv_demension_check(req);
2478 struct aead_request *req = sreq->aead_req.aead_req;
2482 if (unlikely(!req->src || !req->dst)) {
2504 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2544 struct sec_req *req = aead_request_ctx_dma(a_req);
2550 req->flag = a_req->base.flags;
2551 req->aead_req.aead_req = a_req;
2552 req->c_req.encrypt = encrypt;
2553 req->ctx = ctx;
2554 req->base = &a_req->base;
2555 req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
2557 ret = sec_aead_param_check(ctx, req, &need_fallback);
2564 return ctx->req_op->process(ctx, req);