Lines Matching defs:edesc

49 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
54 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
55 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
57 if (edesc->sec4_sg_bytes)
58 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
62 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
68 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
74 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
80 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
86 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
92 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
103 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
109 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
128 struct rsa_edesc *edesc;
135 edesc = req_ctx->edesc;
136 has_bklog = edesc->bklog;
138 rsa_pub_unmap(dev, edesc, req);
139 rsa_io_unmap(dev, edesc, req);
140 kfree(edesc);
161 struct rsa_edesc *edesc;
168 edesc = req_ctx->edesc;
169 has_bklog = edesc->bklog;
173 rsa_priv_f1_unmap(dev, edesc, req);
176 rsa_priv_f2_unmap(dev, edesc, req);
179 rsa_priv_f3_unmap(dev, edesc, req);
182 rsa_io_unmap(dev, edesc, req);
183 kfree(edesc);
257 struct rsa_edesc *edesc;
320 /* allocate space for base edesc, hw desc commands and link tables */
321 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
322 if (!edesc)
325 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
327 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
332 edesc->sec4_sg + !!diff_size, 0);
336 edesc->sec4_sg + sec4_sg_index, 0);
339 edesc->src_nents = src_nents;
340 edesc->dst_nents = dst_nents;
342 req_ctx->edesc = edesc;
345 return edesc;
347 edesc->mapped_src_nents = mapped_src_nents;
348 edesc->mapped_dst_nents = mapped_dst_nents;
350 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
352 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
357 edesc->sec4_sg_bytes = sec4_sg_bytes;
360 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
361 edesc->sec4_sg_bytes, 1);
363 return edesc;
366 kfree(edesc);
383 u32 *desc = req_ctx->edesc->hw_desc;
386 req_ctx->edesc->bklog = true;
394 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
395 rsa_io_unmap(jrdev, req_ctx->edesc, req);
396 kfree(req_ctx->edesc);
405 struct rsa_edesc *edesc)
412 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
428 if (edesc->mapped_src_nents > 1) {
430 pdb->f_dma = edesc->sec4_sg_dma;
431 sec4_sg_index += edesc->mapped_src_nents;
436 if (edesc->mapped_dst_nents > 1) {
438 pdb->g_dma = edesc->sec4_sg_dma +
451 struct rsa_edesc *edesc)
457 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
473 if (edesc->mapped_src_nents > 1) {
475 pdb->g_dma = edesc->sec4_sg_dma;
476 sec4_sg_index += edesc->mapped_src_nents;
484 if (edesc->mapped_dst_nents > 1) {
486 pdb->f_dma = edesc->sec4_sg_dma +
498 struct rsa_edesc *edesc)
504 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
539 if (edesc->mapped_src_nents > 1) {
541 pdb->g_dma = edesc->sec4_sg_dma;
542 sec4_sg_index += edesc->mapped_src_nents;
549 if (edesc->mapped_dst_nents > 1) {
551 pdb->f_dma = edesc->sec4_sg_dma +
575 struct rsa_edesc *edesc)
581 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
628 if (edesc->mapped_src_nents > 1) {
630 pdb->g_dma = edesc->sec4_sg_dma;
631 sec4_sg_index += edesc->mapped_src_nents;
638 if (edesc->mapped_dst_nents > 1) {
640 pdb->f_dma = edesc->sec4_sg_dma +
677 struct rsa_edesc *edesc = req_ctx->edesc;
678 u32 *desc = edesc->hw_desc;
696 rsa_priv_f1_unmap(jrdev, edesc, req);
699 rsa_priv_f2_unmap(jrdev, edesc, req);
702 rsa_priv_f3_unmap(jrdev, edesc, req);
705 rsa_pub_unmap(jrdev, edesc, req);
707 rsa_io_unmap(jrdev, edesc, req);
708 kfree(edesc);
720 struct rsa_edesc *edesc;
733 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
734 if (IS_ERR(edesc))
735 return PTR_ERR(edesc);
738 ret = set_rsa_pub_pdb(req, edesc);
743 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
748 rsa_io_unmap(jrdev, edesc, req);
749 kfree(edesc);
758 struct rsa_edesc *edesc;
762 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
763 if (IS_ERR(edesc))
764 return PTR_ERR(edesc);
767 ret = set_rsa_priv_f1_pdb(req, edesc);
772 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
777 rsa_io_unmap(jrdev, edesc, req);
778 kfree(edesc);
787 struct rsa_edesc *edesc;
791 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
792 if (IS_ERR(edesc))
793 return PTR_ERR(edesc);
796 ret = set_rsa_priv_f2_pdb(req, edesc);
801 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
806 rsa_io_unmap(jrdev, edesc, req);
807 kfree(edesc);
816 struct rsa_edesc *edesc;
820 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
821 if (IS_ERR(edesc))
822 return PTR_ERR(edesc);
825 ret = set_rsa_priv_f3_pdb(req, edesc);
830 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
835 rsa_io_unmap(jrdev, edesc, req);
836 kfree(edesc);