xref: /linux/drivers/crypto/caam/caampkc.c (revision e30685204711a6be40dec2622606950ccd37dafe)
1618b5dc4SHoria Geantă // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
28c419778STudor Ambarus /*
38c419778STudor Ambarus  * caam - Freescale FSL CAAM support for Public Key Cryptography
48c419778STudor Ambarus  *
58c419778STudor Ambarus  * Copyright 2016 Freescale Semiconductor, Inc.
6ae1dd17dSHoria GeantA  * Copyright 2018-2019, 2023 NXP
78c419778STudor Ambarus  *
88c419778STudor Ambarus  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
98c419778STudor Ambarus  * all the desired key parameters, input and output pointers.
108c419778STudor Ambarus  */
118c419778STudor Ambarus #include "compat.h"
128c419778STudor Ambarus #include "regs.h"
138c419778STudor Ambarus #include "intern.h"
148c419778STudor Ambarus #include "jr.h"
158c419778STudor Ambarus #include "error.h"
168c419778STudor Ambarus #include "desc_constr.h"
178c419778STudor Ambarus #include "sg_sw_sec4.h"
188c419778STudor Ambarus #include "caampkc.h"
19199354d7SHerbert Xu #include <linux/dma-mapping.h>
20199354d7SHerbert Xu #include <linux/kernel.h>
218c419778STudor Ambarus 
22a1cf573eSAndrey Smirnov #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
238c419778STudor Ambarus #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
24a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F1_PDB)
2552e26d77SRadu Alexe #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
26a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F2_PDB)
274a651b12SRadu Alexe #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
28a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F3_PDB)
29c3725f7cSIuliana Prodan #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
30c3725f7cSIuliana Prodan 
31c3725f7cSIuliana Prodan /* buffer filled with zeros, used for padding */
32c3725f7cSIuliana Prodan static u8 *zero_buffer;
338c419778STudor Ambarus 
344e3a61c5SIuliana Prodan /*
354e3a61c5SIuliana Prodan  * variable used to avoid double free of resources in case
364e3a61c5SIuliana Prodan  * algorithm registration was unsuccessful
374e3a61c5SIuliana Prodan  */
384e3a61c5SIuliana Prodan static bool init_done;
394e3a61c5SIuliana Prodan 
4058068cfcSIuliana Prodan struct caam_akcipher_alg {
4158068cfcSIuliana Prodan 	struct akcipher_alg akcipher;
4258068cfcSIuliana Prodan 	bool registered;
4358068cfcSIuliana Prodan };
4458068cfcSIuliana Prodan 
458c419778STudor Ambarus static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
468c419778STudor Ambarus 			 struct akcipher_request *req)
478c419778STudor Ambarus {
483b2614cbSIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
493b2614cbSIuliana Prodan 
508c419778STudor Ambarus 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
513b2614cbSIuliana Prodan 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
528c419778STudor Ambarus 
538c419778STudor Ambarus 	if (edesc->sec4_sg_bytes)
548c419778STudor Ambarus 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
558c419778STudor Ambarus 				 DMA_TO_DEVICE);
568c419778STudor Ambarus }
578c419778STudor Ambarus 
588c419778STudor Ambarus static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
598c419778STudor Ambarus 			  struct akcipher_request *req)
608c419778STudor Ambarus {
618c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
624cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
638c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
648c419778STudor Ambarus 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
658c419778STudor Ambarus 
668c419778STudor Ambarus 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
678c419778STudor Ambarus 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
688c419778STudor Ambarus }
698c419778STudor Ambarus 
708c419778STudor Ambarus static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
718c419778STudor Ambarus 			      struct akcipher_request *req)
728c419778STudor Ambarus {
738c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
744cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
758c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
768c419778STudor Ambarus 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
778c419778STudor Ambarus 
788c419778STudor Ambarus 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
798c419778STudor Ambarus 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
808c419778STudor Ambarus }
818c419778STudor Ambarus 
8252e26d77SRadu Alexe static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
8352e26d77SRadu Alexe 			      struct akcipher_request *req)
8452e26d77SRadu Alexe {
8552e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
864cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
8752e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
8852e26d77SRadu Alexe 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
8952e26d77SRadu Alexe 	size_t p_sz = key->p_sz;
904bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
9152e26d77SRadu Alexe 
9252e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
9352e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
9452e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
95f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
96f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
9752e26d77SRadu Alexe }
9852e26d77SRadu Alexe 
994a651b12SRadu Alexe static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
1004a651b12SRadu Alexe 			      struct akcipher_request *req)
1014a651b12SRadu Alexe {
1024a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
1034cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1044a651b12SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
1054a651b12SRadu Alexe 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1064a651b12SRadu Alexe 	size_t p_sz = key->p_sz;
1074bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
1084a651b12SRadu Alexe 
1094a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1104a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
1114a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
1124a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
1134a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
114f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
115f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
1164a651b12SRadu Alexe }
1174a651b12SRadu Alexe 
1188c419778STudor Ambarus /* RSA Job Completion handler */
1198c419778STudor Ambarus static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
1208c419778STudor Ambarus {
1218c419778STudor Ambarus 	struct akcipher_request *req = context;
122bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
123bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
1248c419778STudor Ambarus 	struct rsa_edesc *edesc;
1251984aaeeSHoria Geantă 	int ecode = 0;
12680994e3fSIuliana Prodan 	bool has_bklog;
1278c419778STudor Ambarus 
1288c419778STudor Ambarus 	if (err)
1291984aaeeSHoria Geantă 		ecode = caam_jr_strstatus(dev, err);
1308c419778STudor Ambarus 
131bf537950SIuliana Prodan 	edesc = req_ctx->edesc;
13280994e3fSIuliana Prodan 	has_bklog = edesc->bklog;
1338c419778STudor Ambarus 
1348c419778STudor Ambarus 	rsa_pub_unmap(dev, edesc, req);
1358c419778STudor Ambarus 	rsa_io_unmap(dev, edesc, req);
1368c419778STudor Ambarus 	kfree(edesc);
1378c419778STudor Ambarus 
138bf537950SIuliana Prodan 	/*
139bf537950SIuliana Prodan 	 * If no backlog flag, the completion of the request is done
140bf537950SIuliana Prodan 	 * by CAAM, not crypto engine.
141bf537950SIuliana Prodan 	 */
14280994e3fSIuliana Prodan 	if (!has_bklog)
1431984aaeeSHoria Geantă 		akcipher_request_complete(req, ecode);
144bf537950SIuliana Prodan 	else
145bf537950SIuliana Prodan 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
1468c419778STudor Ambarus }
1478c419778STudor Ambarus 
148d53e44feSIuliana Prodan static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
1498c419778STudor Ambarus 			    void *context)
1508c419778STudor Ambarus {
1518c419778STudor Ambarus 	struct akcipher_request *req = context;
152d53e44feSIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
153bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
1544cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
155d53e44feSIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
156bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
1578c419778STudor Ambarus 	struct rsa_edesc *edesc;
1581984aaeeSHoria Geantă 	int ecode = 0;
15980994e3fSIuliana Prodan 	bool has_bklog;
1608c419778STudor Ambarus 
1618c419778STudor Ambarus 	if (err)
1621984aaeeSHoria Geantă 		ecode = caam_jr_strstatus(dev, err);
1638c419778STudor Ambarus 
164bf537950SIuliana Prodan 	edesc = req_ctx->edesc;
16580994e3fSIuliana Prodan 	has_bklog = edesc->bklog;
1668c419778STudor Ambarus 
167d53e44feSIuliana Prodan 	switch (key->priv_form) {
168d53e44feSIuliana Prodan 	case FORM1:
1698c419778STudor Ambarus 		rsa_priv_f1_unmap(dev, edesc, req);
170d53e44feSIuliana Prodan 		break;
171d53e44feSIuliana Prodan 	case FORM2:
17252e26d77SRadu Alexe 		rsa_priv_f2_unmap(dev, edesc, req);
173d53e44feSIuliana Prodan 		break;
174d53e44feSIuliana Prodan 	case FORM3:
175d53e44feSIuliana Prodan 		rsa_priv_f3_unmap(dev, edesc, req);
17652e26d77SRadu Alexe 	}
17752e26d77SRadu Alexe 
1784a651b12SRadu Alexe 	rsa_io_unmap(dev, edesc, req);
1794a651b12SRadu Alexe 	kfree(edesc);
1804a651b12SRadu Alexe 
181bf537950SIuliana Prodan 	/*
182bf537950SIuliana Prodan 	 * If no backlog flag, the completion of the request is done
183bf537950SIuliana Prodan 	 * by CAAM, not crypto engine.
184bf537950SIuliana Prodan 	 */
18580994e3fSIuliana Prodan 	if (!has_bklog)
1861984aaeeSHoria Geantă 		akcipher_request_complete(req, ecode);
187bf537950SIuliana Prodan 	else
188bf537950SIuliana Prodan 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
1894a651b12SRadu Alexe }
1904a651b12SRadu Alexe 
191c3725f7cSIuliana Prodan /**
1920beb2b60SLee Jones  * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
1930beb2b60SLee Jones  *                                from a given scatterlist
194c3725f7cSIuliana Prodan  *
195c3725f7cSIuliana Prodan  * @sgl   : scatterlist to count zeros from
196c3725f7cSIuliana Prodan  * @nbytes: number of zeros, in bytes, to strip
197c3725f7cSIuliana Prodan  * @flags : operation flags
198c3725f7cSIuliana Prodan  */
1998a2a0dd3SHoria Geantă static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
2008a2a0dd3SHoria Geantă 					unsigned int nbytes,
2018a2a0dd3SHoria Geantă 					unsigned int flags)
2028a2a0dd3SHoria Geantă {
2038a2a0dd3SHoria Geantă 	struct sg_mapping_iter miter;
2048a2a0dd3SHoria Geantă 	int lzeros, ents;
2058a2a0dd3SHoria Geantă 	unsigned int len;
2068a2a0dd3SHoria Geantă 	unsigned int tbytes = nbytes;
2078a2a0dd3SHoria Geantă 	const u8 *buff;
2088a2a0dd3SHoria Geantă 
2098a2a0dd3SHoria Geantă 	ents = sg_nents_for_len(sgl, nbytes);
2108a2a0dd3SHoria Geantă 	if (ents < 0)
2118a2a0dd3SHoria Geantă 		return ents;
2128a2a0dd3SHoria Geantă 
2138a2a0dd3SHoria Geantă 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
2148a2a0dd3SHoria Geantă 
2158a2a0dd3SHoria Geantă 	lzeros = 0;
2168a2a0dd3SHoria Geantă 	len = 0;
2178a2a0dd3SHoria Geantă 	while (nbytes > 0) {
218c3725f7cSIuliana Prodan 		/* do not strip more than given bytes */
219c3725f7cSIuliana Prodan 		while (len && !*buff && lzeros < nbytes) {
2208a2a0dd3SHoria Geantă 			lzeros++;
2218a2a0dd3SHoria Geantă 			len--;
2228a2a0dd3SHoria Geantă 			buff++;
2238a2a0dd3SHoria Geantă 		}
2248a2a0dd3SHoria Geantă 
2258a2a0dd3SHoria Geantă 		if (len && *buff)
2268a2a0dd3SHoria Geantă 			break;
2278a2a0dd3SHoria Geantă 
228e3068520SGaurav Jain 		if (!sg_miter_next(&miter))
229e3068520SGaurav Jain 			break;
230e3068520SGaurav Jain 
2318a2a0dd3SHoria Geantă 		buff = miter.addr;
2328a2a0dd3SHoria Geantă 		len = miter.length;
2338a2a0dd3SHoria Geantă 
2348a2a0dd3SHoria Geantă 		nbytes -= lzeros;
2358a2a0dd3SHoria Geantă 		lzeros = 0;
2368a2a0dd3SHoria Geantă 	}
2378a2a0dd3SHoria Geantă 
2388a2a0dd3SHoria Geantă 	miter.consumed = lzeros;
2398a2a0dd3SHoria Geantă 	sg_miter_stop(&miter);
2408a2a0dd3SHoria Geantă 	nbytes -= lzeros;
2418a2a0dd3SHoria Geantă 
2428a2a0dd3SHoria Geantă 	return tbytes - nbytes;
2438a2a0dd3SHoria Geantă }
2448a2a0dd3SHoria Geantă 
2458c419778STudor Ambarus static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
2468c419778STudor Ambarus 					 size_t desclen)
2478c419778STudor Ambarus {
2488c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
2494cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
2508c419778STudor Ambarus 	struct device *dev = ctx->dev;
2518a2a0dd3SHoria Geantă 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
252c3725f7cSIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
2538c419778STudor Ambarus 	struct rsa_edesc *edesc;
254019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
255019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
2568a2a0dd3SHoria Geantă 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
2578c419778STudor Ambarus 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2588c419778STudor Ambarus 	int src_nents, dst_nents;
259eff9771dSIuliana Prodan 	int mapped_src_nents, mapped_dst_nents;
260c3725f7cSIuliana Prodan 	unsigned int diff_size = 0;
2618a2a0dd3SHoria Geantă 	int lzeros;
2628a2a0dd3SHoria Geantă 
263c3725f7cSIuliana Prodan 	if (req->src_len > key->n_sz) {
264c3725f7cSIuliana Prodan 		/*
265c3725f7cSIuliana Prodan 		 * strip leading zeros and
266c3725f7cSIuliana Prodan 		 * return the number of zeros to skip
267c3725f7cSIuliana Prodan 		 */
268c3725f7cSIuliana Prodan 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
269c3725f7cSIuliana Prodan 						      key->n_sz, sg_flags);
2708a2a0dd3SHoria Geantă 		if (lzeros < 0)
2718a2a0dd3SHoria Geantă 			return ERR_PTR(lzeros);
2728a2a0dd3SHoria Geantă 
2733b2614cbSIuliana Prodan 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
2743b2614cbSIuliana Prodan 						      lzeros);
2753b2614cbSIuliana Prodan 		req_ctx->fixup_src_len = req->src_len - lzeros;
276c3725f7cSIuliana Prodan 	} else {
277c3725f7cSIuliana Prodan 		/*
278c3725f7cSIuliana Prodan 		 * input src is less then n key modulus,
279c3725f7cSIuliana Prodan 		 * so there will be zero padding
280c3725f7cSIuliana Prodan 		 */
281c3725f7cSIuliana Prodan 		diff_size = key->n_sz - req->src_len;
2823b2614cbSIuliana Prodan 		req_ctx->fixup_src = req->src;
2833b2614cbSIuliana Prodan 		req_ctx->fixup_src_len = req->src_len;
284c3725f7cSIuliana Prodan 	}
2858c419778STudor Ambarus 
2863b2614cbSIuliana Prodan 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
2873b2614cbSIuliana Prodan 				     req_ctx->fixup_src_len);
2888c419778STudor Ambarus 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
2898c419778STudor Ambarus 
290eff9771dSIuliana Prodan 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
291eff9771dSIuliana Prodan 				      DMA_TO_DEVICE);
292eff9771dSIuliana Prodan 	if (unlikely(!mapped_src_nents)) {
293eff9771dSIuliana Prodan 		dev_err(dev, "unable to map source\n");
294eff9771dSIuliana Prodan 		return ERR_PTR(-ENOMEM);
295eff9771dSIuliana Prodan 	}
296eff9771dSIuliana Prodan 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
297eff9771dSIuliana Prodan 				      DMA_FROM_DEVICE);
298eff9771dSIuliana Prodan 	if (unlikely(!mapped_dst_nents)) {
299eff9771dSIuliana Prodan 		dev_err(dev, "unable to map destination\n");
300eff9771dSIuliana Prodan 		goto src_fail;
301eff9771dSIuliana Prodan 	}
302eff9771dSIuliana Prodan 
303eff9771dSIuliana Prodan 	if (!diff_size && mapped_src_nents == 1)
304c3725f7cSIuliana Prodan 		sec4_sg_len = 0; /* no need for an input hw s/g table */
305c3725f7cSIuliana Prodan 	else
306eff9771dSIuliana Prodan 		sec4_sg_len = mapped_src_nents + !!diff_size;
307c3725f7cSIuliana Prodan 	sec4_sg_index = sec4_sg_len;
308eff9771dSIuliana Prodan 
309eff9771dSIuliana Prodan 	if (mapped_dst_nents > 1)
310eff9771dSIuliana Prodan 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
311a5e5c133SHoria Geantă 	else
312a5e5c133SHoria Geantă 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
3138c419778STudor Ambarus 
3148c419778STudor Ambarus 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
3158c419778STudor Ambarus 
3168c419778STudor Ambarus 	/* allocate space for base edesc, hw desc commands and link tables */
317199354d7SHerbert Xu 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
3188c419778STudor Ambarus 	if (!edesc)
3198c419778STudor Ambarus 		goto dst_fail;
3208c419778STudor Ambarus 
3218c419778STudor Ambarus 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
322c3725f7cSIuliana Prodan 	if (diff_size)
323c3725f7cSIuliana Prodan 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
324c3725f7cSIuliana Prodan 				   0);
3258c419778STudor Ambarus 
326c3725f7cSIuliana Prodan 	if (sec4_sg_index)
327059d73eeSHoria Geantă 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
3283b2614cbSIuliana Prodan 				   edesc->sec4_sg + !!diff_size, 0);
329c3725f7cSIuliana Prodan 
330eff9771dSIuliana Prodan 	if (mapped_dst_nents > 1)
331059d73eeSHoria Geantă 		sg_to_sec4_sg_last(req->dst, req->dst_len,
3328c419778STudor Ambarus 				   edesc->sec4_sg + sec4_sg_index, 0);
3338c419778STudor Ambarus 
3348c419778STudor Ambarus 	/* Save nents for later use in Job Descriptor */
3358c419778STudor Ambarus 	edesc->src_nents = src_nents;
3368c419778STudor Ambarus 	edesc->dst_nents = dst_nents;
3378c419778STudor Ambarus 
338bf537950SIuliana Prodan 	req_ctx->edesc = edesc;
339bf537950SIuliana Prodan 
3408c419778STudor Ambarus 	if (!sec4_sg_bytes)
3418c419778STudor Ambarus 		return edesc;
3428c419778STudor Ambarus 
343eff9771dSIuliana Prodan 	edesc->mapped_src_nents = mapped_src_nents;
344eff9771dSIuliana Prodan 	edesc->mapped_dst_nents = mapped_dst_nents;
345eff9771dSIuliana Prodan 
3468c419778STudor Ambarus 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
3478c419778STudor Ambarus 					    sec4_sg_bytes, DMA_TO_DEVICE);
3488c419778STudor Ambarus 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
3498c419778STudor Ambarus 		dev_err(dev, "unable to map S/G table\n");
3508c419778STudor Ambarus 		goto sec4_sg_fail;
3518c419778STudor Ambarus 	}
3528c419778STudor Ambarus 
3538c419778STudor Ambarus 	edesc->sec4_sg_bytes = sec4_sg_bytes;
3548c419778STudor Ambarus 
355c3725f7cSIuliana Prodan 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
356c3725f7cSIuliana Prodan 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
357c3725f7cSIuliana Prodan 			     edesc->sec4_sg_bytes, 1);
358c3725f7cSIuliana Prodan 
3598c419778STudor Ambarus 	return edesc;
3608c419778STudor Ambarus 
3618c419778STudor Ambarus sec4_sg_fail:
3628c419778STudor Ambarus 	kfree(edesc);
363eff9771dSIuliana Prodan dst_fail:
364eff9771dSIuliana Prodan 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
365eff9771dSIuliana Prodan src_fail:
366eff9771dSIuliana Prodan 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
3678c419778STudor Ambarus 	return ERR_PTR(-ENOMEM);
3688c419778STudor Ambarus }
3698c419778STudor Ambarus 
370bf537950SIuliana Prodan static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
371bf537950SIuliana Prodan {
372bf537950SIuliana Prodan 	struct akcipher_request *req = container_of(areq,
373bf537950SIuliana Prodan 						    struct akcipher_request,
374bf537950SIuliana Prodan 						    base);
375bf537950SIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
376bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
3774cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
378bf537950SIuliana Prodan 	struct device *jrdev = ctx->dev;
379bf537950SIuliana Prodan 	u32 *desc = req_ctx->edesc->hw_desc;
380bf537950SIuliana Prodan 	int ret;
381bf537950SIuliana Prodan 
382bf537950SIuliana Prodan 	req_ctx->edesc->bklog = true;
383bf537950SIuliana Prodan 
384bf537950SIuliana Prodan 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
385bf537950SIuliana Prodan 
386087e1d71SGaurav Jain 	if (ret == -ENOSPC && engine->retry_support)
387087e1d71SGaurav Jain 		return ret;
388087e1d71SGaurav Jain 
389bf537950SIuliana Prodan 	if (ret != -EINPROGRESS) {
390bf537950SIuliana Prodan 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
391bf537950SIuliana Prodan 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
392bf537950SIuliana Prodan 		kfree(req_ctx->edesc);
393bf537950SIuliana Prodan 	} else {
394bf537950SIuliana Prodan 		ret = 0;
395bf537950SIuliana Prodan 	}
396bf537950SIuliana Prodan 
397bf537950SIuliana Prodan 	return ret;
398bf537950SIuliana Prodan }
399bf537950SIuliana Prodan 
4008c419778STudor Ambarus static int set_rsa_pub_pdb(struct akcipher_request *req,
4018c419778STudor Ambarus 			   struct rsa_edesc *edesc)
4028c419778STudor Ambarus {
4038c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
4043b2614cbSIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
4054cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
4068c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
4078c419778STudor Ambarus 	struct device *dev = ctx->dev;
4088c419778STudor Ambarus 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
4098c419778STudor Ambarus 	int sec4_sg_index = 0;
4108c419778STudor Ambarus 
4118c419778STudor Ambarus 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
4128c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->n_dma)) {
4138c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA modulus memory\n");
4148c419778STudor Ambarus 		return -ENOMEM;
4158c419778STudor Ambarus 	}
4168c419778STudor Ambarus 
4178c419778STudor Ambarus 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
4188c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->e_dma)) {
4198c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA public exponent memory\n");
4208c419778STudor Ambarus 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
4218c419778STudor Ambarus 		return -ENOMEM;
4228c419778STudor Ambarus 	}
4238c419778STudor Ambarus 
424eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
4258c419778STudor Ambarus 		pdb->sgf |= RSA_PDB_SGF_F;
4268c419778STudor Ambarus 		pdb->f_dma = edesc->sec4_sg_dma;
427eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
4288c419778STudor Ambarus 	} else {
4293b2614cbSIuliana Prodan 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
4308c419778STudor Ambarus 	}
4318c419778STudor Ambarus 
432eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
4338c419778STudor Ambarus 		pdb->sgf |= RSA_PDB_SGF_G;
4348c419778STudor Ambarus 		pdb->g_dma = edesc->sec4_sg_dma +
4358c419778STudor Ambarus 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
4368c419778STudor Ambarus 	} else {
4378c419778STudor Ambarus 		pdb->g_dma = sg_dma_address(req->dst);
4388c419778STudor Ambarus 	}
4398c419778STudor Ambarus 
4408c419778STudor Ambarus 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
4413b2614cbSIuliana Prodan 	pdb->f_len = req_ctx->fixup_src_len;
4428c419778STudor Ambarus 
4438c419778STudor Ambarus 	return 0;
4448c419778STudor Ambarus }
4458c419778STudor Ambarus 
4468c419778STudor Ambarus static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
4478c419778STudor Ambarus 			       struct rsa_edesc *edesc)
4488c419778STudor Ambarus {
4498c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
4504cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
4518c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
4528c419778STudor Ambarus 	struct device *dev = ctx->dev;
4538c419778STudor Ambarus 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
4548c419778STudor Ambarus 	int sec4_sg_index = 0;
4558c419778STudor Ambarus 
4568c419778STudor Ambarus 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
4578c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->n_dma)) {
4588c419778STudor Ambarus 		dev_err(dev, "Unable to map modulus memory\n");
4598c419778STudor Ambarus 		return -ENOMEM;
4608c419778STudor Ambarus 	}
4618c419778STudor Ambarus 
4628c419778STudor Ambarus 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
4638c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->d_dma)) {
4648c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA private exponent memory\n");
4658c419778STudor Ambarus 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
4668c419778STudor Ambarus 		return -ENOMEM;
4678c419778STudor Ambarus 	}
4688c419778STudor Ambarus 
469eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
4708c419778STudor Ambarus 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
4718c419778STudor Ambarus 		pdb->g_dma = edesc->sec4_sg_dma;
472eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
473eff9771dSIuliana Prodan 
4748c419778STudor Ambarus 	} else {
4753b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
4763b2614cbSIuliana Prodan 
4773b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
4788c419778STudor Ambarus 	}
4798c419778STudor Ambarus 
480eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
4818c419778STudor Ambarus 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
4828c419778STudor Ambarus 		pdb->f_dma = edesc->sec4_sg_dma +
4838c419778STudor Ambarus 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
4848c419778STudor Ambarus 	} else {
4858c419778STudor Ambarus 		pdb->f_dma = sg_dma_address(req->dst);
4868c419778STudor Ambarus 	}
4878c419778STudor Ambarus 
4888c419778STudor Ambarus 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
4898c419778STudor Ambarus 
4908c419778STudor Ambarus 	return 0;
4918c419778STudor Ambarus }
4928c419778STudor Ambarus 
49352e26d77SRadu Alexe static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
49452e26d77SRadu Alexe 			       struct rsa_edesc *edesc)
49552e26d77SRadu Alexe {
49652e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
4974cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
49852e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
49952e26d77SRadu Alexe 	struct device *dev = ctx->dev;
50052e26d77SRadu Alexe 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
50152e26d77SRadu Alexe 	int sec4_sg_index = 0;
50252e26d77SRadu Alexe 	size_t p_sz = key->p_sz;
5034bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
50452e26d77SRadu Alexe 
50552e26d77SRadu Alexe 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
50652e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->d_dma)) {
50752e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA private exponent memory\n");
50852e26d77SRadu Alexe 		return -ENOMEM;
50952e26d77SRadu Alexe 	}
51052e26d77SRadu Alexe 
51152e26d77SRadu Alexe 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
51252e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->p_dma)) {
51352e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
51452e26d77SRadu Alexe 		goto unmap_d;
51552e26d77SRadu Alexe 	}
51652e26d77SRadu Alexe 
51752e26d77SRadu Alexe 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
51852e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->q_dma)) {
51952e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
52052e26d77SRadu Alexe 		goto unmap_p;
52152e26d77SRadu Alexe 	}
52252e26d77SRadu Alexe 
523f1bf9e60SHoria Geantă 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
52452e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
52552e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
52652e26d77SRadu Alexe 		goto unmap_q;
52752e26d77SRadu Alexe 	}
52852e26d77SRadu Alexe 
529f1bf9e60SHoria Geantă 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
53052e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
53152e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
53252e26d77SRadu Alexe 		goto unmap_tmp1;
53352e26d77SRadu Alexe 	}
53452e26d77SRadu Alexe 
535eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
53652e26d77SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
53752e26d77SRadu Alexe 		pdb->g_dma = edesc->sec4_sg_dma;
538eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
53952e26d77SRadu Alexe 	} else {
5403b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
5413b2614cbSIuliana Prodan 
5423b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
54352e26d77SRadu Alexe 	}
54452e26d77SRadu Alexe 
545eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
54652e26d77SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
54752e26d77SRadu Alexe 		pdb->f_dma = edesc->sec4_sg_dma +
54852e26d77SRadu Alexe 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
54952e26d77SRadu Alexe 	} else {
55052e26d77SRadu Alexe 		pdb->f_dma = sg_dma_address(req->dst);
55152e26d77SRadu Alexe 	}
55252e26d77SRadu Alexe 
55352e26d77SRadu Alexe 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
55452e26d77SRadu Alexe 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
55552e26d77SRadu Alexe 
55652e26d77SRadu Alexe 	return 0;
55752e26d77SRadu Alexe 
55852e26d77SRadu Alexe unmap_tmp1:
559f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
56052e26d77SRadu Alexe unmap_q:
56152e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
56252e26d77SRadu Alexe unmap_p:
56352e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
56452e26d77SRadu Alexe unmap_d:
56552e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
56652e26d77SRadu Alexe 
56752e26d77SRadu Alexe 	return -ENOMEM;
56852e26d77SRadu Alexe }
56952e26d77SRadu Alexe 
5704a651b12SRadu Alexe static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
5714a651b12SRadu Alexe 			       struct rsa_edesc *edesc)
5724a651b12SRadu Alexe {
5734a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
5744cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
5754a651b12SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
5764a651b12SRadu Alexe 	struct device *dev = ctx->dev;
5774a651b12SRadu Alexe 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
5784a651b12SRadu Alexe 	int sec4_sg_index = 0;
5794a651b12SRadu Alexe 	size_t p_sz = key->p_sz;
5804bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
5814a651b12SRadu Alexe 
5824a651b12SRadu Alexe 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
5834a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->p_dma)) {
5844a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
5854a651b12SRadu Alexe 		return -ENOMEM;
5864a651b12SRadu Alexe 	}
5874a651b12SRadu Alexe 
5884a651b12SRadu Alexe 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
5894a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->q_dma)) {
5904a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
5914a651b12SRadu Alexe 		goto unmap_p;
5924a651b12SRadu Alexe 	}
5934a651b12SRadu Alexe 
5944a651b12SRadu Alexe 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
5954a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->dp_dma)) {
5964a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
5974a651b12SRadu Alexe 		goto unmap_q;
5984a651b12SRadu Alexe 	}
5994a651b12SRadu Alexe 
6004a651b12SRadu Alexe 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
6014a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->dq_dma)) {
6024a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
6034a651b12SRadu Alexe 		goto unmap_dp;
6044a651b12SRadu Alexe 	}
6054a651b12SRadu Alexe 
6064a651b12SRadu Alexe 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
6074a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->c_dma)) {
6084a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
6094a651b12SRadu Alexe 		goto unmap_dq;
6104a651b12SRadu Alexe 	}
6114a651b12SRadu Alexe 
612f1bf9e60SHoria Geantă 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
6134a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
6144a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
6154a651b12SRadu Alexe 		goto unmap_qinv;
6164a651b12SRadu Alexe 	}
6174a651b12SRadu Alexe 
618f1bf9e60SHoria Geantă 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
6194a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
6204a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
6214a651b12SRadu Alexe 		goto unmap_tmp1;
6224a651b12SRadu Alexe 	}
6234a651b12SRadu Alexe 
624eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
6254a651b12SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
6264a651b12SRadu Alexe 		pdb->g_dma = edesc->sec4_sg_dma;
627eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
6284a651b12SRadu Alexe 	} else {
6293b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
6303b2614cbSIuliana Prodan 
6313b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
6324a651b12SRadu Alexe 	}
6334a651b12SRadu Alexe 
634eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
6354a651b12SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
6364a651b12SRadu Alexe 		pdb->f_dma = edesc->sec4_sg_dma +
6374a651b12SRadu Alexe 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
6384a651b12SRadu Alexe 	} else {
6394a651b12SRadu Alexe 		pdb->f_dma = sg_dma_address(req->dst);
6404a651b12SRadu Alexe 	}
6414a651b12SRadu Alexe 
6424a651b12SRadu Alexe 	pdb->sgf |= key->n_sz;
6434a651b12SRadu Alexe 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
6444a651b12SRadu Alexe 
6454a651b12SRadu Alexe 	return 0;
6464a651b12SRadu Alexe 
6474a651b12SRadu Alexe unmap_tmp1:
648f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
6494a651b12SRadu Alexe unmap_qinv:
6504a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
6514a651b12SRadu Alexe unmap_dq:
6524a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
6534a651b12SRadu Alexe unmap_dp:
6544a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
6554a651b12SRadu Alexe unmap_q:
6564a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
6574a651b12SRadu Alexe unmap_p:
6584a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
6594a651b12SRadu Alexe 
6604a651b12SRadu Alexe 	return -ENOMEM;
6614a651b12SRadu Alexe }
6624a651b12SRadu Alexe 
663bf537950SIuliana Prodan static int akcipher_enqueue_req(struct device *jrdev,
664bf537950SIuliana Prodan 				void (*cbk)(struct device *jrdev, u32 *desc,
665bf537950SIuliana Prodan 					    u32 err, void *context),
666bf537950SIuliana Prodan 				struct akcipher_request *req)
667bf537950SIuliana Prodan {
668bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
669bf537950SIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
6704cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
671bf537950SIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
672bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
673bf537950SIuliana Prodan 	struct rsa_edesc *edesc = req_ctx->edesc;
674bf537950SIuliana Prodan 	u32 *desc = edesc->hw_desc;
675bf537950SIuliana Prodan 	int ret;
676bf537950SIuliana Prodan 
677bf537950SIuliana Prodan 	req_ctx->akcipher_op_done = cbk;
678bf537950SIuliana Prodan 	/*
679bf537950SIuliana Prodan 	 * Only the backlog request are sent to crypto-engine since the others
680bf537950SIuliana Prodan 	 * can be handled by CAAM, if free, especially since JR has up to 1024
681bf537950SIuliana Prodan 	 * entries (more than the 10 entries from crypto-engine).
682bf537950SIuliana Prodan 	 */
683bf537950SIuliana Prodan 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
684bf537950SIuliana Prodan 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
685bf537950SIuliana Prodan 								 req);
686bf537950SIuliana Prodan 	else
687bf537950SIuliana Prodan 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
688bf537950SIuliana Prodan 
689bf537950SIuliana Prodan 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
690bf537950SIuliana Prodan 		switch (key->priv_form) {
691bf537950SIuliana Prodan 		case FORM1:
692bf537950SIuliana Prodan 			rsa_priv_f1_unmap(jrdev, edesc, req);
693bf537950SIuliana Prodan 			break;
694bf537950SIuliana Prodan 		case FORM2:
695bf537950SIuliana Prodan 			rsa_priv_f2_unmap(jrdev, edesc, req);
696bf537950SIuliana Prodan 			break;
697bf537950SIuliana Prodan 		case FORM3:
698bf537950SIuliana Prodan 			rsa_priv_f3_unmap(jrdev, edesc, req);
699bf537950SIuliana Prodan 			break;
700bf537950SIuliana Prodan 		default:
701bf537950SIuliana Prodan 			rsa_pub_unmap(jrdev, edesc, req);
702bf537950SIuliana Prodan 		}
703bf537950SIuliana Prodan 		rsa_io_unmap(jrdev, edesc, req);
704bf537950SIuliana Prodan 		kfree(edesc);
705bf537950SIuliana Prodan 	}
706bf537950SIuliana Prodan 
707bf537950SIuliana Prodan 	return ret;
708bf537950SIuliana Prodan }
709bf537950SIuliana Prodan 
7108c419778STudor Ambarus static int caam_rsa_enc(struct akcipher_request *req)
7118c419778STudor Ambarus {
7128c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7134cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
7148c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
7158c419778STudor Ambarus 	struct device *jrdev = ctx->dev;
7168c419778STudor Ambarus 	struct rsa_edesc *edesc;
7178c419778STudor Ambarus 	int ret;
7188c419778STudor Ambarus 
7198c419778STudor Ambarus 	if (unlikely(!key->n || !key->e))
7208c419778STudor Ambarus 		return -EINVAL;
7218c419778STudor Ambarus 
7228c419778STudor Ambarus 	if (req->dst_len < key->n_sz) {
7238c419778STudor Ambarus 		req->dst_len = key->n_sz;
7248c419778STudor Ambarus 		dev_err(jrdev, "Output buffer length less than parameter n\n");
7258c419778STudor Ambarus 		return -EOVERFLOW;
7268c419778STudor Ambarus 	}
7278c419778STudor Ambarus 
7288c419778STudor Ambarus 	/* Allocate extended descriptor */
7298c419778STudor Ambarus 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
7308c419778STudor Ambarus 	if (IS_ERR(edesc))
7318c419778STudor Ambarus 		return PTR_ERR(edesc);
7328c419778STudor Ambarus 
7338c419778STudor Ambarus 	/* Set RSA Encrypt Protocol Data Block */
7348c419778STudor Ambarus 	ret = set_rsa_pub_pdb(req, edesc);
7358c419778STudor Ambarus 	if (ret)
7368c419778STudor Ambarus 		goto init_fail;
7378c419778STudor Ambarus 
7388c419778STudor Ambarus 	/* Initialize Job Descriptor */
7398c419778STudor Ambarus 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
7408c419778STudor Ambarus 
741bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
7428c419778STudor Ambarus 
7438c419778STudor Ambarus init_fail:
7448c419778STudor Ambarus 	rsa_io_unmap(jrdev, edesc, req);
7458c419778STudor Ambarus 	kfree(edesc);
7468c419778STudor Ambarus 	return ret;
7478c419778STudor Ambarus }
7488c419778STudor Ambarus 
74952e26d77SRadu Alexe static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
7508c419778STudor Ambarus {
7518c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7524cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
7538c419778STudor Ambarus 	struct device *jrdev = ctx->dev;
7548c419778STudor Ambarus 	struct rsa_edesc *edesc;
7558c419778STudor Ambarus 	int ret;
7568c419778STudor Ambarus 
7578c419778STudor Ambarus 	/* Allocate extended descriptor */
7588c419778STudor Ambarus 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
7598c419778STudor Ambarus 	if (IS_ERR(edesc))
7608c419778STudor Ambarus 		return PTR_ERR(edesc);
7618c419778STudor Ambarus 
7628c419778STudor Ambarus 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
7638c419778STudor Ambarus 	ret = set_rsa_priv_f1_pdb(req, edesc);
7648c419778STudor Ambarus 	if (ret)
7658c419778STudor Ambarus 		goto init_fail;
7668c419778STudor Ambarus 
7678c419778STudor Ambarus 	/* Initialize Job Descriptor */
7688c419778STudor Ambarus 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
7698c419778STudor Ambarus 
770bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
7718c419778STudor Ambarus 
7728c419778STudor Ambarus init_fail:
7738c419778STudor Ambarus 	rsa_io_unmap(jrdev, edesc, req);
7748c419778STudor Ambarus 	kfree(edesc);
7758c419778STudor Ambarus 	return ret;
7768c419778STudor Ambarus }
7778c419778STudor Ambarus 
77852e26d77SRadu Alexe static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
77952e26d77SRadu Alexe {
78052e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7814cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
78252e26d77SRadu Alexe 	struct device *jrdev = ctx->dev;
78352e26d77SRadu Alexe 	struct rsa_edesc *edesc;
78452e26d77SRadu Alexe 	int ret;
78552e26d77SRadu Alexe 
78652e26d77SRadu Alexe 	/* Allocate extended descriptor */
78752e26d77SRadu Alexe 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
78852e26d77SRadu Alexe 	if (IS_ERR(edesc))
78952e26d77SRadu Alexe 		return PTR_ERR(edesc);
79052e26d77SRadu Alexe 
79152e26d77SRadu Alexe 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
79252e26d77SRadu Alexe 	ret = set_rsa_priv_f2_pdb(req, edesc);
79352e26d77SRadu Alexe 	if (ret)
79452e26d77SRadu Alexe 		goto init_fail;
79552e26d77SRadu Alexe 
79652e26d77SRadu Alexe 	/* Initialize Job Descriptor */
79752e26d77SRadu Alexe 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
79852e26d77SRadu Alexe 
799bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
80052e26d77SRadu Alexe 
80152e26d77SRadu Alexe init_fail:
80252e26d77SRadu Alexe 	rsa_io_unmap(jrdev, edesc, req);
80352e26d77SRadu Alexe 	kfree(edesc);
80452e26d77SRadu Alexe 	return ret;
80552e26d77SRadu Alexe }
80652e26d77SRadu Alexe 
8074a651b12SRadu Alexe static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
8084a651b12SRadu Alexe {
8094a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
8104cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
8114a651b12SRadu Alexe 	struct device *jrdev = ctx->dev;
8124a651b12SRadu Alexe 	struct rsa_edesc *edesc;
8134a651b12SRadu Alexe 	int ret;
8144a651b12SRadu Alexe 
8154a651b12SRadu Alexe 	/* Allocate extended descriptor */
8164a651b12SRadu Alexe 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
8174a651b12SRadu Alexe 	if (IS_ERR(edesc))
8184a651b12SRadu Alexe 		return PTR_ERR(edesc);
8194a651b12SRadu Alexe 
8204a651b12SRadu Alexe 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
8214a651b12SRadu Alexe 	ret = set_rsa_priv_f3_pdb(req, edesc);
8224a651b12SRadu Alexe 	if (ret)
8234a651b12SRadu Alexe 		goto init_fail;
8244a651b12SRadu Alexe 
8254a651b12SRadu Alexe 	/* Initialize Job Descriptor */
8264a651b12SRadu Alexe 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
8274a651b12SRadu Alexe 
828bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
8294a651b12SRadu Alexe 
8304a651b12SRadu Alexe init_fail:
8314a651b12SRadu Alexe 	rsa_io_unmap(jrdev, edesc, req);
8324a651b12SRadu Alexe 	kfree(edesc);
8334a651b12SRadu Alexe 	return ret;
8344a651b12SRadu Alexe }
8354a651b12SRadu Alexe 
83652e26d77SRadu Alexe static int caam_rsa_dec(struct akcipher_request *req)
83752e26d77SRadu Alexe {
83852e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
8394cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
84052e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
84152e26d77SRadu Alexe 	int ret;
84252e26d77SRadu Alexe 
84352e26d77SRadu Alexe 	if (unlikely(!key->n || !key->d))
84452e26d77SRadu Alexe 		return -EINVAL;
84552e26d77SRadu Alexe 
84652e26d77SRadu Alexe 	if (req->dst_len < key->n_sz) {
84752e26d77SRadu Alexe 		req->dst_len = key->n_sz;
84852e26d77SRadu Alexe 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
84952e26d77SRadu Alexe 		return -EOVERFLOW;
85052e26d77SRadu Alexe 	}
85152e26d77SRadu Alexe 
8524a651b12SRadu Alexe 	if (key->priv_form == FORM3)
8534a651b12SRadu Alexe 		ret = caam_rsa_dec_priv_f3(req);
8544a651b12SRadu Alexe 	else if (key->priv_form == FORM2)
85552e26d77SRadu Alexe 		ret = caam_rsa_dec_priv_f2(req);
85652e26d77SRadu Alexe 	else
85752e26d77SRadu Alexe 		ret = caam_rsa_dec_priv_f1(req);
85852e26d77SRadu Alexe 
85952e26d77SRadu Alexe 	return ret;
86052e26d77SRadu Alexe }
86152e26d77SRadu Alexe 
8628c419778STudor Ambarus static void caam_rsa_free_key(struct caam_rsa_key *key)
8638c419778STudor Ambarus {
864453431a5SWaiman Long 	kfree_sensitive(key->d);
865453431a5SWaiman Long 	kfree_sensitive(key->p);
866453431a5SWaiman Long 	kfree_sensitive(key->q);
867453431a5SWaiman Long 	kfree_sensitive(key->dp);
868453431a5SWaiman Long 	kfree_sensitive(key->dq);
869453431a5SWaiman Long 	kfree_sensitive(key->qinv);
870453431a5SWaiman Long 	kfree_sensitive(key->tmp1);
871453431a5SWaiman Long 	kfree_sensitive(key->tmp2);
8728c419778STudor Ambarus 	kfree(key->e);
8738c419778STudor Ambarus 	kfree(key->n);
87452e26d77SRadu Alexe 	memset(key, 0, sizeof(*key));
8758c419778STudor Ambarus }
8768c419778STudor Ambarus 
8777ca4a9a1SRadu Alexe static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
8787ca4a9a1SRadu Alexe {
8797ca4a9a1SRadu Alexe 	while (!**ptr && *nbytes) {
8807ca4a9a1SRadu Alexe 		(*ptr)++;
8817ca4a9a1SRadu Alexe 		(*nbytes)--;
8827ca4a9a1SRadu Alexe 	}
8837ca4a9a1SRadu Alexe }
8847ca4a9a1SRadu Alexe 
8858c419778STudor Ambarus /**
8864a651b12SRadu Alexe  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
8874a651b12SRadu Alexe  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
8884a651b12SRadu Alexe  * BER-encoding requires that the minimum number of bytes be used to encode the
8894a651b12SRadu Alexe  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
8904a651b12SRadu Alexe  * length.
8914a651b12SRadu Alexe  *
8924a651b12SRadu Alexe  * @ptr   : pointer to {dP, dQ, qInv} CRT member
8934a651b12SRadu Alexe  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
8944a651b12SRadu Alexe  * @dstlen: length in bytes of corresponding p or q prime factor
8954a651b12SRadu Alexe  */
8964a651b12SRadu Alexe static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
8974a651b12SRadu Alexe {
8984a651b12SRadu Alexe 	u8 *dst;
8994a651b12SRadu Alexe 
9004a651b12SRadu Alexe 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
9014a651b12SRadu Alexe 	if (!nbytes)
9024a651b12SRadu Alexe 		return NULL;
9034a651b12SRadu Alexe 
904199354d7SHerbert Xu 	dst = kzalloc(dstlen, GFP_KERNEL);
9054a651b12SRadu Alexe 	if (!dst)
9064a651b12SRadu Alexe 		return NULL;
9074a651b12SRadu Alexe 
9084a651b12SRadu Alexe 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
9094a651b12SRadu Alexe 
9104a651b12SRadu Alexe 	return dst;
9114a651b12SRadu Alexe }
9124a651b12SRadu Alexe 
9134a651b12SRadu Alexe /**
9148c419778STudor Ambarus  * caam_read_raw_data - Read a raw byte stream as a positive integer.
9158c419778STudor Ambarus  * The function skips buffer's leading zeros, copies the remained data
916199354d7SHerbert Xu  * to a buffer allocated in the GFP_KERNEL zone and returns
9178c419778STudor Ambarus  * the address of the new buffer.
9188c419778STudor Ambarus  *
9198c419778STudor Ambarus  * @buf   : The data to read
9208c419778STudor Ambarus  * @nbytes: The amount of data to read
9218c419778STudor Ambarus  */
9228c419778STudor Ambarus static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
9238c419778STudor Ambarus {
9248c419778STudor Ambarus 
9257ca4a9a1SRadu Alexe 	caam_rsa_drop_leading_zeros(&buf, nbytes);
9267fcaf62aSTudor Ambarus 	if (!*nbytes)
9277fcaf62aSTudor Ambarus 		return NULL;
9288c419778STudor Ambarus 
929199354d7SHerbert Xu 	return kmemdup(buf, *nbytes, GFP_KERNEL);
9308c419778STudor Ambarus }
9318c419778STudor Ambarus 
9328c419778STudor Ambarus static int caam_rsa_check_key_length(unsigned int len)
9338c419778STudor Ambarus {
9348c419778STudor Ambarus 	if (len > 4096)
9358c419778STudor Ambarus 		return -EINVAL;
9368c419778STudor Ambarus 	return 0;
9378c419778STudor Ambarus }
9388c419778STudor Ambarus 
9398c419778STudor Ambarus static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
9408c419778STudor Ambarus 				unsigned int keylen)
9418c419778STudor Ambarus {
9424cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
9438439e94fSHoria Geantă 	struct rsa_key raw_key = {NULL};
9448c419778STudor Ambarus 	struct caam_rsa_key *rsa_key = &ctx->key;
9458c419778STudor Ambarus 	int ret;
9468c419778STudor Ambarus 
9478c419778STudor Ambarus 	/* Free the old RSA key if any */
9488c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
9498c419778STudor Ambarus 
9508c419778STudor Ambarus 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
9518c419778STudor Ambarus 	if (ret)
9528c419778STudor Ambarus 		return ret;
9538c419778STudor Ambarus 
9548c419778STudor Ambarus 	/* Copy key in DMA zone */
955199354d7SHerbert Xu 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
9568c419778STudor Ambarus 	if (!rsa_key->e)
9578c419778STudor Ambarus 		goto err;
9588c419778STudor Ambarus 
9598c419778STudor Ambarus 	/*
9608c419778STudor Ambarus 	 * Skip leading zeros and copy the positive integer to a buffer
961199354d7SHerbert Xu 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
9628c419778STudor Ambarus 	 * expects a positive integer for the RSA modulus and uses its length as
9638c419778STudor Ambarus 	 * decryption output length.
9648c419778STudor Ambarus 	 */
9658c419778STudor Ambarus 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
9668c419778STudor Ambarus 	if (!rsa_key->n)
9678c419778STudor Ambarus 		goto err;
9688c419778STudor Ambarus 
9698c419778STudor Ambarus 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
9708c419778STudor Ambarus 		caam_rsa_free_key(rsa_key);
9718c419778STudor Ambarus 		return -EINVAL;
9728c419778STudor Ambarus 	}
9738c419778STudor Ambarus 
9748c419778STudor Ambarus 	rsa_key->e_sz = raw_key.e_sz;
9758c419778STudor Ambarus 	rsa_key->n_sz = raw_key.n_sz;
9768c419778STudor Ambarus 
9778c419778STudor Ambarus 	return 0;
9788c419778STudor Ambarus err:
9798c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
9808c419778STudor Ambarus 	return -ENOMEM;
9818c419778STudor Ambarus }
9828c419778STudor Ambarus 
98352e26d77SRadu Alexe static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
98452e26d77SRadu Alexe 				       struct rsa_key *raw_key)
98552e26d77SRadu Alexe {
98652e26d77SRadu Alexe 	struct caam_rsa_key *rsa_key = &ctx->key;
98752e26d77SRadu Alexe 	size_t p_sz = raw_key->p_sz;
98852e26d77SRadu Alexe 	size_t q_sz = raw_key->q_sz;
989199354d7SHerbert Xu 	unsigned aligned_size;
99052e26d77SRadu Alexe 
99152e26d77SRadu Alexe 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
99252e26d77SRadu Alexe 	if (!rsa_key->p)
99352e26d77SRadu Alexe 		return;
99452e26d77SRadu Alexe 	rsa_key->p_sz = p_sz;
99552e26d77SRadu Alexe 
99652e26d77SRadu Alexe 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
99752e26d77SRadu Alexe 	if (!rsa_key->q)
99852e26d77SRadu Alexe 		goto free_p;
99952e26d77SRadu Alexe 	rsa_key->q_sz = q_sz;
100052e26d77SRadu Alexe 
1001199354d7SHerbert Xu 	aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
1002199354d7SHerbert Xu 	rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
100352e26d77SRadu Alexe 	if (!rsa_key->tmp1)
100452e26d77SRadu Alexe 		goto free_q;
100552e26d77SRadu Alexe 
1006199354d7SHerbert Xu 	aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
1007199354d7SHerbert Xu 	rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
100852e26d77SRadu Alexe 	if (!rsa_key->tmp2)
100952e26d77SRadu Alexe 		goto free_tmp1;
101052e26d77SRadu Alexe 
101152e26d77SRadu Alexe 	rsa_key->priv_form = FORM2;
101252e26d77SRadu Alexe 
10134a651b12SRadu Alexe 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
10144a651b12SRadu Alexe 	if (!rsa_key->dp)
10154a651b12SRadu Alexe 		goto free_tmp2;
10164a651b12SRadu Alexe 
10174a651b12SRadu Alexe 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
10184a651b12SRadu Alexe 	if (!rsa_key->dq)
10194a651b12SRadu Alexe 		goto free_dp;
10204a651b12SRadu Alexe 
10214a651b12SRadu Alexe 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
10224a651b12SRadu Alexe 					  q_sz);
10234a651b12SRadu Alexe 	if (!rsa_key->qinv)
10244a651b12SRadu Alexe 		goto free_dq;
10254a651b12SRadu Alexe 
10264a651b12SRadu Alexe 	rsa_key->priv_form = FORM3;
10274a651b12SRadu Alexe 
102852e26d77SRadu Alexe 	return;
102952e26d77SRadu Alexe 
10304a651b12SRadu Alexe free_dq:
1031453431a5SWaiman Long 	kfree_sensitive(rsa_key->dq);
10324a651b12SRadu Alexe free_dp:
1033453431a5SWaiman Long 	kfree_sensitive(rsa_key->dp);
10344a651b12SRadu Alexe free_tmp2:
1035453431a5SWaiman Long 	kfree_sensitive(rsa_key->tmp2);
103652e26d77SRadu Alexe free_tmp1:
1037453431a5SWaiman Long 	kfree_sensitive(rsa_key->tmp1);
103852e26d77SRadu Alexe free_q:
1039453431a5SWaiman Long 	kfree_sensitive(rsa_key->q);
104052e26d77SRadu Alexe free_p:
1041453431a5SWaiman Long 	kfree_sensitive(rsa_key->p);
104252e26d77SRadu Alexe }
104352e26d77SRadu Alexe 
10448c419778STudor Ambarus static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
10458c419778STudor Ambarus 				 unsigned int keylen)
10468c419778STudor Ambarus {
10474cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
10488439e94fSHoria Geantă 	struct rsa_key raw_key = {NULL};
10498c419778STudor Ambarus 	struct caam_rsa_key *rsa_key = &ctx->key;
10508c419778STudor Ambarus 	int ret;
10518c419778STudor Ambarus 
10528c419778STudor Ambarus 	/* Free the old RSA key if any */
10538c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
10548c419778STudor Ambarus 
10558c419778STudor Ambarus 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
10568c419778STudor Ambarus 	if (ret)
10578c419778STudor Ambarus 		return ret;
10588c419778STudor Ambarus 
10598c419778STudor Ambarus 	/* Copy key in DMA zone */
1060199354d7SHerbert Xu 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
10618c419778STudor Ambarus 	if (!rsa_key->d)
10628c419778STudor Ambarus 		goto err;
10638c419778STudor Ambarus 
1064199354d7SHerbert Xu 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
10658c419778STudor Ambarus 	if (!rsa_key->e)
10668c419778STudor Ambarus 		goto err;
10678c419778STudor Ambarus 
10688c419778STudor Ambarus 	/*
10698c419778STudor Ambarus 	 * Skip leading zeros and copy the positive integer to a buffer
1070199354d7SHerbert Xu 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
10718c419778STudor Ambarus 	 * expects a positive integer for the RSA modulus and uses its length as
10728c419778STudor Ambarus 	 * decryption output length.
10738c419778STudor Ambarus 	 */
10748c419778STudor Ambarus 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
10758c419778STudor Ambarus 	if (!rsa_key->n)
10768c419778STudor Ambarus 		goto err;
10778c419778STudor Ambarus 
10788c419778STudor Ambarus 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
10798c419778STudor Ambarus 		caam_rsa_free_key(rsa_key);
10808c419778STudor Ambarus 		return -EINVAL;
10818c419778STudor Ambarus 	}
10828c419778STudor Ambarus 
10838c419778STudor Ambarus 	rsa_key->d_sz = raw_key.d_sz;
10848c419778STudor Ambarus 	rsa_key->e_sz = raw_key.e_sz;
10858c419778STudor Ambarus 	rsa_key->n_sz = raw_key.n_sz;
10868c419778STudor Ambarus 
108752e26d77SRadu Alexe 	caam_rsa_set_priv_key_form(ctx, &raw_key);
108852e26d77SRadu Alexe 
10898c419778STudor Ambarus 	return 0;
10908c419778STudor Ambarus 
10918c419778STudor Ambarus err:
10928c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
10938c419778STudor Ambarus 	return -ENOMEM;
10948c419778STudor Ambarus }
10958c419778STudor Ambarus 
1096e198429cSTudor-Dan Ambarus static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
10978c419778STudor Ambarus {
10984cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
10998c419778STudor Ambarus 
1100e198429cSTudor-Dan Ambarus 	return ctx->key.n_sz;
11018c419778STudor Ambarus }
11028c419778STudor Ambarus 
11038c419778STudor Ambarus /* Per session pkc's driver context creation function */
11048c419778STudor Ambarus static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
11058c419778STudor Ambarus {
11064cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
11078c419778STudor Ambarus 
1108908d383bSHerbert Xu 	akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
1109908d383bSHerbert Xu 
11108c419778STudor Ambarus 	ctx->dev = caam_jr_alloc();
11118c419778STudor Ambarus 
11128c419778STudor Ambarus 	if (IS_ERR(ctx->dev)) {
111333fa46d7SHoria Geantă 		pr_err("Job Ring Device allocation for transform failed\n");
11148c419778STudor Ambarus 		return PTR_ERR(ctx->dev);
11158c419778STudor Ambarus 	}
11168c419778STudor Ambarus 
1117c3725f7cSIuliana Prodan 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1118c3725f7cSIuliana Prodan 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1119c3725f7cSIuliana Prodan 					  DMA_TO_DEVICE);
1120c3725f7cSIuliana Prodan 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1121c3725f7cSIuliana Prodan 		dev_err(ctx->dev, "unable to map padding\n");
1122c3725f7cSIuliana Prodan 		caam_jr_free(ctx->dev);
1123c3725f7cSIuliana Prodan 		return -ENOMEM;
1124c3725f7cSIuliana Prodan 	}
1125c3725f7cSIuliana Prodan 
1126bf537950SIuliana Prodan 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1127bf537950SIuliana Prodan 
11288c419778STudor Ambarus 	return 0;
11298c419778STudor Ambarus }
11308c419778STudor Ambarus 
11318c419778STudor Ambarus /* Per session pkc's driver context cleanup function */
11328c419778STudor Ambarus static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
11338c419778STudor Ambarus {
11344cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
11358c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
11368c419778STudor Ambarus 
1137c3725f7cSIuliana Prodan 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1138c3725f7cSIuliana Prodan 			 1, DMA_TO_DEVICE);
11398c419778STudor Ambarus 	caam_rsa_free_key(key);
11408c419778STudor Ambarus 	caam_jr_free(ctx->dev);
11418c419778STudor Ambarus }
11428c419778STudor Ambarus 
114358068cfcSIuliana Prodan static struct caam_akcipher_alg caam_rsa = {
114458068cfcSIuliana Prodan 	.akcipher = {
11458c419778STudor Ambarus 		.encrypt = caam_rsa_enc,
11468c419778STudor Ambarus 		.decrypt = caam_rsa_dec,
11478c419778STudor Ambarus 		.set_pub_key = caam_rsa_set_pub_key,
11488c419778STudor Ambarus 		.set_priv_key = caam_rsa_set_priv_key,
11498c419778STudor Ambarus 		.max_size = caam_rsa_max_size,
11508c419778STudor Ambarus 		.init = caam_rsa_init_tfm,
11518c419778STudor Ambarus 		.exit = caam_rsa_exit_tfm,
11528c419778STudor Ambarus 		.base = {
11538c419778STudor Ambarus 			.cra_name = "rsa",
11548c419778STudor Ambarus 			.cra_driver_name = "rsa-caam",
11558c419778STudor Ambarus 			.cra_priority = 3000,
11568c419778STudor Ambarus 			.cra_module = THIS_MODULE,
11574cb4f7c1SHerbert Xu 			.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
11584cb4f7c1SHerbert Xu 				       CRYPTO_DMA_PADDING,
11598c419778STudor Ambarus 		},
116058068cfcSIuliana Prodan 	}
11618c419778STudor Ambarus };
11628c419778STudor Ambarus 
11638c419778STudor Ambarus /* Public Key Cryptography module initialization handler */
11641b46c90cSHoria Geantă int caam_pkc_init(struct device *ctrldev)
11658c419778STudor Ambarus {
11661b46c90cSHoria Geantă 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1167f20311ccSMichael Walle 	u32 pk_inst, pkha;
11688c419778STudor Ambarus 	int err;
11694e3a61c5SIuliana Prodan 	init_done = false;
11708c419778STudor Ambarus 
11718c419778STudor Ambarus 	/* Determine public key hardware accelerator presence. */
1172f20311ccSMichael Walle 	if (priv->era < 10) {
1173ae1dd17dSHoria GeantA 		pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
1174d239b10dSHoria Geantă 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1175f20311ccSMichael Walle 	} else {
1176ae1dd17dSHoria GeantA 		pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
1177f20311ccSMichael Walle 		pk_inst = pkha & CHA_VER_NUM_MASK;
1178f20311ccSMichael Walle 
1179f20311ccSMichael Walle 		/*
1180f20311ccSMichael Walle 		 * Newer CAAMs support partially disabled functionality. If this is the
1181f20311ccSMichael Walle 		 * case, the number is non-zero, but this bit is set to indicate that
1182f20311ccSMichael Walle 		 * no encryption or decryption is supported. Only signing and verifying
1183f20311ccSMichael Walle 		 * is supported.
1184f20311ccSMichael Walle 		 */
1185f20311ccSMichael Walle 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1186f20311ccSMichael Walle 			pk_inst = 0;
1187f20311ccSMichael Walle 	}
11888c419778STudor Ambarus 
11898c419778STudor Ambarus 	/* Do not register algorithms if PKHA is not present. */
11901b46c90cSHoria Geantă 	if (!pk_inst)
11911b46c90cSHoria Geantă 		return 0;
11928c419778STudor Ambarus 
1193c3725f7cSIuliana Prodan 	/* allocate zero buffer, used for padding input */
1194199354d7SHerbert Xu 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
1195c3725f7cSIuliana Prodan 	if (!zero_buffer)
1196c3725f7cSIuliana Prodan 		return -ENOMEM;
1197c3725f7cSIuliana Prodan 
119858068cfcSIuliana Prodan 	err = crypto_register_akcipher(&caam_rsa.akcipher);
119958068cfcSIuliana Prodan 
1200c3725f7cSIuliana Prodan 	if (err) {
1201c3725f7cSIuliana Prodan 		kfree(zero_buffer);
12028c419778STudor Ambarus 		dev_warn(ctrldev, "%s alg registration failed\n",
120358068cfcSIuliana Prodan 			 caam_rsa.akcipher.base.cra_driver_name);
1204c3725f7cSIuliana Prodan 	} else {
12054e3a61c5SIuliana Prodan 		init_done = true;
120658068cfcSIuliana Prodan 		caam_rsa.registered = true;
12078c419778STudor Ambarus 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1208c3725f7cSIuliana Prodan 	}
12098c419778STudor Ambarus 
12108c419778STudor Ambarus 	return err;
12118c419778STudor Ambarus }
12128c419778STudor Ambarus 
12131b46c90cSHoria Geantă void caam_pkc_exit(void)
12148c419778STudor Ambarus {
12154e3a61c5SIuliana Prodan 	if (!init_done)
12164e3a61c5SIuliana Prodan 		return;
12174e3a61c5SIuliana Prodan 
121858068cfcSIuliana Prodan 	if (caam_rsa.registered)
121958068cfcSIuliana Prodan 		crypto_unregister_akcipher(&caam_rsa.akcipher);
122058068cfcSIuliana Prodan 
1221c3725f7cSIuliana Prodan 	kfree(zero_buffer);
12228c419778STudor Ambarus }
1223