xref: /linux/drivers/crypto/caam/caampkc.c (revision c771600c6af14749609b49565ffb4cac2959710d)
1618b5dc4SHoria Geantă // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
28c419778STudor Ambarus /*
38c419778STudor Ambarus  * caam - Freescale FSL CAAM support for Public Key Cryptography
48c419778STudor Ambarus  *
58c419778STudor Ambarus  * Copyright 2016 Freescale Semiconductor, Inc.
6ae1dd17dSHoria GeantA  * Copyright 2018-2019, 2023 NXP
78c419778STudor Ambarus  *
88c419778STudor Ambarus  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
98c419778STudor Ambarus  * all the desired key parameters, input and output pointers.
108c419778STudor Ambarus  */
118c419778STudor Ambarus #include "compat.h"
128c419778STudor Ambarus #include "regs.h"
138c419778STudor Ambarus #include "intern.h"
148c419778STudor Ambarus #include "jr.h"
158c419778STudor Ambarus #include "error.h"
168c419778STudor Ambarus #include "desc_constr.h"
178c419778STudor Ambarus #include "sg_sw_sec4.h"
188c419778STudor Ambarus #include "caampkc.h"
194ac1a2d8SHerbert Xu #include <crypto/internal/engine.h>
20199354d7SHerbert Xu #include <linux/dma-mapping.h>
21623814c0SHerbert Xu #include <linux/err.h>
22199354d7SHerbert Xu #include <linux/kernel.h>
23623814c0SHerbert Xu #include <linux/slab.h>
24623814c0SHerbert Xu #include <linux/string.h>
258c419778STudor Ambarus 
26a1cf573eSAndrey Smirnov #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
278c419778STudor Ambarus #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
28a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F1_PDB)
2952e26d77SRadu Alexe #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
30a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F2_PDB)
314a651b12SRadu Alexe #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
32a1cf573eSAndrey Smirnov 				 SIZEOF_RSA_PRIV_F3_PDB)
33c3725f7cSIuliana Prodan #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
34c3725f7cSIuliana Prodan 
35c3725f7cSIuliana Prodan /* buffer filled with zeros, used for padding */
36c3725f7cSIuliana Prodan static u8 *zero_buffer;
378c419778STudor Ambarus 
384e3a61c5SIuliana Prodan /*
394e3a61c5SIuliana Prodan  * variable used to avoid double free of resources in case
404e3a61c5SIuliana Prodan  * algorithm registration was unsuccessful
414e3a61c5SIuliana Prodan  */
424e3a61c5SIuliana Prodan static bool init_done;
434e3a61c5SIuliana Prodan 
4458068cfcSIuliana Prodan struct caam_akcipher_alg {
45623814c0SHerbert Xu 	struct akcipher_engine_alg akcipher;
4658068cfcSIuliana Prodan 	bool registered;
4758068cfcSIuliana Prodan };
4858068cfcSIuliana Prodan 
rsa_io_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)498c419778STudor Ambarus static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
508c419778STudor Ambarus 			 struct akcipher_request *req)
518c419778STudor Ambarus {
523b2614cbSIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
533b2614cbSIuliana Prodan 
548c419778STudor Ambarus 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
553b2614cbSIuliana Prodan 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
568c419778STudor Ambarus 
578c419778STudor Ambarus 	if (edesc->sec4_sg_bytes)
588c419778STudor Ambarus 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
598c419778STudor Ambarus 				 DMA_TO_DEVICE);
608c419778STudor Ambarus }
618c419778STudor Ambarus 
rsa_pub_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)628c419778STudor Ambarus static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
638c419778STudor Ambarus 			  struct akcipher_request *req)
648c419778STudor Ambarus {
658c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
664cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
678c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
688c419778STudor Ambarus 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
698c419778STudor Ambarus 
708c419778STudor Ambarus 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
718c419778STudor Ambarus 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
728c419778STudor Ambarus }
738c419778STudor Ambarus 
rsa_priv_f1_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)748c419778STudor Ambarus static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
758c419778STudor Ambarus 			      struct akcipher_request *req)
768c419778STudor Ambarus {
778c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
784cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
798c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
808c419778STudor Ambarus 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
818c419778STudor Ambarus 
828c419778STudor Ambarus 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
838c419778STudor Ambarus 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
848c419778STudor Ambarus }
858c419778STudor Ambarus 
rsa_priv_f2_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)8652e26d77SRadu Alexe static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
8752e26d77SRadu Alexe 			      struct akcipher_request *req)
8852e26d77SRadu Alexe {
8952e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
904cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
9152e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
9252e26d77SRadu Alexe 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
9352e26d77SRadu Alexe 	size_t p_sz = key->p_sz;
944bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
9552e26d77SRadu Alexe 
9652e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
9752e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
9852e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
99f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
100f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
10152e26d77SRadu Alexe }
10252e26d77SRadu Alexe 
rsa_priv_f3_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)1034a651b12SRadu Alexe static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
1044a651b12SRadu Alexe 			      struct akcipher_request *req)
1054a651b12SRadu Alexe {
1064a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
1074cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1084a651b12SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
1094a651b12SRadu Alexe 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1104a651b12SRadu Alexe 	size_t p_sz = key->p_sz;
1114bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
1124a651b12SRadu Alexe 
1134a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1144a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
1154a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
1164a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
1174a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
118f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
119f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
1204a651b12SRadu Alexe }
1214a651b12SRadu Alexe 
1228c419778STudor Ambarus /* RSA Job Completion handler */
rsa_pub_done(struct device * dev,u32 * desc,u32 err,void * context)1238c419778STudor Ambarus static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
1248c419778STudor Ambarus {
1258c419778STudor Ambarus 	struct akcipher_request *req = context;
126bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
127bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
1288c419778STudor Ambarus 	struct rsa_edesc *edesc;
1291984aaeeSHoria Geantă 	int ecode = 0;
13080994e3fSIuliana Prodan 	bool has_bklog;
1318c419778STudor Ambarus 
1328c419778STudor Ambarus 	if (err)
1331984aaeeSHoria Geantă 		ecode = caam_jr_strstatus(dev, err);
1348c419778STudor Ambarus 
135bf537950SIuliana Prodan 	edesc = req_ctx->edesc;
13680994e3fSIuliana Prodan 	has_bklog = edesc->bklog;
1378c419778STudor Ambarus 
1388c419778STudor Ambarus 	rsa_pub_unmap(dev, edesc, req);
1398c419778STudor Ambarus 	rsa_io_unmap(dev, edesc, req);
1408c419778STudor Ambarus 	kfree(edesc);
1418c419778STudor Ambarus 
142bf537950SIuliana Prodan 	/*
143bf537950SIuliana Prodan 	 * If no backlog flag, the completion of the request is done
144bf537950SIuliana Prodan 	 * by CAAM, not crypto engine.
145bf537950SIuliana Prodan 	 */
14680994e3fSIuliana Prodan 	if (!has_bklog)
1471984aaeeSHoria Geantă 		akcipher_request_complete(req, ecode);
148bf537950SIuliana Prodan 	else
149bf537950SIuliana Prodan 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
1508c419778STudor Ambarus }
1518c419778STudor Ambarus 
rsa_priv_f_done(struct device * dev,u32 * desc,u32 err,void * context)152d53e44feSIuliana Prodan static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
1538c419778STudor Ambarus 			    void *context)
1548c419778STudor Ambarus {
1558c419778STudor Ambarus 	struct akcipher_request *req = context;
156d53e44feSIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
157bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
1584cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
159d53e44feSIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
160bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
1618c419778STudor Ambarus 	struct rsa_edesc *edesc;
1621984aaeeSHoria Geantă 	int ecode = 0;
16380994e3fSIuliana Prodan 	bool has_bklog;
1648c419778STudor Ambarus 
1658c419778STudor Ambarus 	if (err)
1661984aaeeSHoria Geantă 		ecode = caam_jr_strstatus(dev, err);
1678c419778STudor Ambarus 
168bf537950SIuliana Prodan 	edesc = req_ctx->edesc;
16980994e3fSIuliana Prodan 	has_bklog = edesc->bklog;
1708c419778STudor Ambarus 
171d53e44feSIuliana Prodan 	switch (key->priv_form) {
172d53e44feSIuliana Prodan 	case FORM1:
1738c419778STudor Ambarus 		rsa_priv_f1_unmap(dev, edesc, req);
174d53e44feSIuliana Prodan 		break;
175d53e44feSIuliana Prodan 	case FORM2:
17652e26d77SRadu Alexe 		rsa_priv_f2_unmap(dev, edesc, req);
177d53e44feSIuliana Prodan 		break;
178d53e44feSIuliana Prodan 	case FORM3:
179d53e44feSIuliana Prodan 		rsa_priv_f3_unmap(dev, edesc, req);
18052e26d77SRadu Alexe 	}
18152e26d77SRadu Alexe 
1824a651b12SRadu Alexe 	rsa_io_unmap(dev, edesc, req);
1834a651b12SRadu Alexe 	kfree(edesc);
1844a651b12SRadu Alexe 
185bf537950SIuliana Prodan 	/*
186bf537950SIuliana Prodan 	 * If no backlog flag, the completion of the request is done
187bf537950SIuliana Prodan 	 * by CAAM, not crypto engine.
188bf537950SIuliana Prodan 	 */
18980994e3fSIuliana Prodan 	if (!has_bklog)
1901984aaeeSHoria Geantă 		akcipher_request_complete(req, ecode);
191bf537950SIuliana Prodan 	else
192bf537950SIuliana Prodan 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
1934a651b12SRadu Alexe }
1944a651b12SRadu Alexe 
195c3725f7cSIuliana Prodan /**
1960beb2b60SLee Jones  * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
1970beb2b60SLee Jones  *                                from a given scatterlist
198c3725f7cSIuliana Prodan  *
199c3725f7cSIuliana Prodan  * @sgl   : scatterlist to count zeros from
200c3725f7cSIuliana Prodan  * @nbytes: number of zeros, in bytes, to strip
201c3725f7cSIuliana Prodan  * @flags : operation flags
202c3725f7cSIuliana Prodan  */
caam_rsa_count_leading_zeros(struct scatterlist * sgl,unsigned int nbytes,unsigned int flags)2038a2a0dd3SHoria Geantă static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
2048a2a0dd3SHoria Geantă 					unsigned int nbytes,
2058a2a0dd3SHoria Geantă 					unsigned int flags)
2068a2a0dd3SHoria Geantă {
2078a2a0dd3SHoria Geantă 	struct sg_mapping_iter miter;
2088a2a0dd3SHoria Geantă 	int lzeros, ents;
2098a2a0dd3SHoria Geantă 	unsigned int len;
2108a2a0dd3SHoria Geantă 	unsigned int tbytes = nbytes;
2118a2a0dd3SHoria Geantă 	const u8 *buff;
2128a2a0dd3SHoria Geantă 
2138a2a0dd3SHoria Geantă 	ents = sg_nents_for_len(sgl, nbytes);
2148a2a0dd3SHoria Geantă 	if (ents < 0)
2158a2a0dd3SHoria Geantă 		return ents;
2168a2a0dd3SHoria Geantă 
2178a2a0dd3SHoria Geantă 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
2188a2a0dd3SHoria Geantă 
2198a2a0dd3SHoria Geantă 	lzeros = 0;
2208a2a0dd3SHoria Geantă 	len = 0;
2218a2a0dd3SHoria Geantă 	while (nbytes > 0) {
222c3725f7cSIuliana Prodan 		/* do not strip more than given bytes */
223c3725f7cSIuliana Prodan 		while (len && !*buff && lzeros < nbytes) {
2248a2a0dd3SHoria Geantă 			lzeros++;
2258a2a0dd3SHoria Geantă 			len--;
2268a2a0dd3SHoria Geantă 			buff++;
2278a2a0dd3SHoria Geantă 		}
2288a2a0dd3SHoria Geantă 
2298a2a0dd3SHoria Geantă 		if (len && *buff)
2308a2a0dd3SHoria Geantă 			break;
2318a2a0dd3SHoria Geantă 
232e3068520SGaurav Jain 		if (!sg_miter_next(&miter))
233e3068520SGaurav Jain 			break;
234e3068520SGaurav Jain 
2358a2a0dd3SHoria Geantă 		buff = miter.addr;
2368a2a0dd3SHoria Geantă 		len = miter.length;
2378a2a0dd3SHoria Geantă 
2388a2a0dd3SHoria Geantă 		nbytes -= lzeros;
2398a2a0dd3SHoria Geantă 		lzeros = 0;
2408a2a0dd3SHoria Geantă 	}
2418a2a0dd3SHoria Geantă 
2428a2a0dd3SHoria Geantă 	miter.consumed = lzeros;
2438a2a0dd3SHoria Geantă 	sg_miter_stop(&miter);
2448a2a0dd3SHoria Geantă 	nbytes -= lzeros;
2458a2a0dd3SHoria Geantă 
2468a2a0dd3SHoria Geantă 	return tbytes - nbytes;
2478a2a0dd3SHoria Geantă }
2488a2a0dd3SHoria Geantă 
rsa_edesc_alloc(struct akcipher_request * req,size_t desclen)2498c419778STudor Ambarus static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
2508c419778STudor Ambarus 					 size_t desclen)
2518c419778STudor Ambarus {
2528c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
2534cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
2548c419778STudor Ambarus 	struct device *dev = ctx->dev;
2558a2a0dd3SHoria Geantă 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
256c3725f7cSIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
2578c419778STudor Ambarus 	struct rsa_edesc *edesc;
258019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
259019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
2608a2a0dd3SHoria Geantă 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
2618c419778STudor Ambarus 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2628c419778STudor Ambarus 	int src_nents, dst_nents;
263eff9771dSIuliana Prodan 	int mapped_src_nents, mapped_dst_nents;
264c3725f7cSIuliana Prodan 	unsigned int diff_size = 0;
2658a2a0dd3SHoria Geantă 	int lzeros;
2668a2a0dd3SHoria Geantă 
267c3725f7cSIuliana Prodan 	if (req->src_len > key->n_sz) {
268c3725f7cSIuliana Prodan 		/*
269c3725f7cSIuliana Prodan 		 * strip leading zeros and
270c3725f7cSIuliana Prodan 		 * return the number of zeros to skip
271c3725f7cSIuliana Prodan 		 */
272c3725f7cSIuliana Prodan 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
273c3725f7cSIuliana Prodan 						      key->n_sz, sg_flags);
2748a2a0dd3SHoria Geantă 		if (lzeros < 0)
2758a2a0dd3SHoria Geantă 			return ERR_PTR(lzeros);
2768a2a0dd3SHoria Geantă 
2773b2614cbSIuliana Prodan 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
2783b2614cbSIuliana Prodan 						      lzeros);
2793b2614cbSIuliana Prodan 		req_ctx->fixup_src_len = req->src_len - lzeros;
280c3725f7cSIuliana Prodan 	} else {
281c3725f7cSIuliana Prodan 		/*
282c3725f7cSIuliana Prodan 		 * input src is less then n key modulus,
283c3725f7cSIuliana Prodan 		 * so there will be zero padding
284c3725f7cSIuliana Prodan 		 */
285c3725f7cSIuliana Prodan 		diff_size = key->n_sz - req->src_len;
2863b2614cbSIuliana Prodan 		req_ctx->fixup_src = req->src;
2873b2614cbSIuliana Prodan 		req_ctx->fixup_src_len = req->src_len;
288c3725f7cSIuliana Prodan 	}
2898c419778STudor Ambarus 
2903b2614cbSIuliana Prodan 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
2913b2614cbSIuliana Prodan 				     req_ctx->fixup_src_len);
2928c419778STudor Ambarus 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
2938c419778STudor Ambarus 
294eff9771dSIuliana Prodan 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
295eff9771dSIuliana Prodan 				      DMA_TO_DEVICE);
296eff9771dSIuliana Prodan 	if (unlikely(!mapped_src_nents)) {
297eff9771dSIuliana Prodan 		dev_err(dev, "unable to map source\n");
298eff9771dSIuliana Prodan 		return ERR_PTR(-ENOMEM);
299eff9771dSIuliana Prodan 	}
300eff9771dSIuliana Prodan 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
301eff9771dSIuliana Prodan 				      DMA_FROM_DEVICE);
302eff9771dSIuliana Prodan 	if (unlikely(!mapped_dst_nents)) {
303eff9771dSIuliana Prodan 		dev_err(dev, "unable to map destination\n");
304eff9771dSIuliana Prodan 		goto src_fail;
305eff9771dSIuliana Prodan 	}
306eff9771dSIuliana Prodan 
307eff9771dSIuliana Prodan 	if (!diff_size && mapped_src_nents == 1)
308c3725f7cSIuliana Prodan 		sec4_sg_len = 0; /* no need for an input hw s/g table */
309c3725f7cSIuliana Prodan 	else
310eff9771dSIuliana Prodan 		sec4_sg_len = mapped_src_nents + !!diff_size;
311c3725f7cSIuliana Prodan 	sec4_sg_index = sec4_sg_len;
312eff9771dSIuliana Prodan 
313eff9771dSIuliana Prodan 	if (mapped_dst_nents > 1)
314eff9771dSIuliana Prodan 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
315a5e5c133SHoria Geantă 	else
316a5e5c133SHoria Geantă 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
3178c419778STudor Ambarus 
3188c419778STudor Ambarus 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
3198c419778STudor Ambarus 
3208c419778STudor Ambarus 	/* allocate space for base edesc, hw desc commands and link tables */
321199354d7SHerbert Xu 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
3228c419778STudor Ambarus 	if (!edesc)
3238c419778STudor Ambarus 		goto dst_fail;
3248c419778STudor Ambarus 
3258c419778STudor Ambarus 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
326c3725f7cSIuliana Prodan 	if (diff_size)
327c3725f7cSIuliana Prodan 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
328c3725f7cSIuliana Prodan 				   0);
3298c419778STudor Ambarus 
330c3725f7cSIuliana Prodan 	if (sec4_sg_index)
331059d73eeSHoria Geantă 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
3323b2614cbSIuliana Prodan 				   edesc->sec4_sg + !!diff_size, 0);
333c3725f7cSIuliana Prodan 
334eff9771dSIuliana Prodan 	if (mapped_dst_nents > 1)
335059d73eeSHoria Geantă 		sg_to_sec4_sg_last(req->dst, req->dst_len,
3368c419778STudor Ambarus 				   edesc->sec4_sg + sec4_sg_index, 0);
3378c419778STudor Ambarus 
3388c419778STudor Ambarus 	/* Save nents for later use in Job Descriptor */
3398c419778STudor Ambarus 	edesc->src_nents = src_nents;
3408c419778STudor Ambarus 	edesc->dst_nents = dst_nents;
3418c419778STudor Ambarus 
342bf537950SIuliana Prodan 	req_ctx->edesc = edesc;
343bf537950SIuliana Prodan 
3448c419778STudor Ambarus 	if (!sec4_sg_bytes)
3458c419778STudor Ambarus 		return edesc;
3468c419778STudor Ambarus 
347eff9771dSIuliana Prodan 	edesc->mapped_src_nents = mapped_src_nents;
348eff9771dSIuliana Prodan 	edesc->mapped_dst_nents = mapped_dst_nents;
349eff9771dSIuliana Prodan 
3508c419778STudor Ambarus 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
3518c419778STudor Ambarus 					    sec4_sg_bytes, DMA_TO_DEVICE);
3528c419778STudor Ambarus 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
3538c419778STudor Ambarus 		dev_err(dev, "unable to map S/G table\n");
3548c419778STudor Ambarus 		goto sec4_sg_fail;
3558c419778STudor Ambarus 	}
3568c419778STudor Ambarus 
3578c419778STudor Ambarus 	edesc->sec4_sg_bytes = sec4_sg_bytes;
3588c419778STudor Ambarus 
359c3725f7cSIuliana Prodan 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
360c3725f7cSIuliana Prodan 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
361c3725f7cSIuliana Prodan 			     edesc->sec4_sg_bytes, 1);
362c3725f7cSIuliana Prodan 
3638c419778STudor Ambarus 	return edesc;
3648c419778STudor Ambarus 
3658c419778STudor Ambarus sec4_sg_fail:
3668c419778STudor Ambarus 	kfree(edesc);
367eff9771dSIuliana Prodan dst_fail:
368eff9771dSIuliana Prodan 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
369eff9771dSIuliana Prodan src_fail:
370eff9771dSIuliana Prodan 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
3718c419778STudor Ambarus 	return ERR_PTR(-ENOMEM);
3728c419778STudor Ambarus }
3738c419778STudor Ambarus 
akcipher_do_one_req(struct crypto_engine * engine,void * areq)374bf537950SIuliana Prodan static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
375bf537950SIuliana Prodan {
376bf537950SIuliana Prodan 	struct akcipher_request *req = container_of(areq,
377bf537950SIuliana Prodan 						    struct akcipher_request,
378bf537950SIuliana Prodan 						    base);
379bf537950SIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
380bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
3814cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
382bf537950SIuliana Prodan 	struct device *jrdev = ctx->dev;
383bf537950SIuliana Prodan 	u32 *desc = req_ctx->edesc->hw_desc;
384bf537950SIuliana Prodan 	int ret;
385bf537950SIuliana Prodan 
386bf537950SIuliana Prodan 	req_ctx->edesc->bklog = true;
387bf537950SIuliana Prodan 
388bf537950SIuliana Prodan 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
389bf537950SIuliana Prodan 
390087e1d71SGaurav Jain 	if (ret == -ENOSPC && engine->retry_support)
391087e1d71SGaurav Jain 		return ret;
392087e1d71SGaurav Jain 
393bf537950SIuliana Prodan 	if (ret != -EINPROGRESS) {
394bf537950SIuliana Prodan 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
395bf537950SIuliana Prodan 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
396bf537950SIuliana Prodan 		kfree(req_ctx->edesc);
397bf537950SIuliana Prodan 	} else {
398bf537950SIuliana Prodan 		ret = 0;
399bf537950SIuliana Prodan 	}
400bf537950SIuliana Prodan 
401bf537950SIuliana Prodan 	return ret;
402bf537950SIuliana Prodan }
403bf537950SIuliana Prodan 
set_rsa_pub_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)4048c419778STudor Ambarus static int set_rsa_pub_pdb(struct akcipher_request *req,
4058c419778STudor Ambarus 			   struct rsa_edesc *edesc)
4068c419778STudor Ambarus {
4078c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
4083b2614cbSIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
4094cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
4108c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
4118c419778STudor Ambarus 	struct device *dev = ctx->dev;
4128c419778STudor Ambarus 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
4138c419778STudor Ambarus 	int sec4_sg_index = 0;
4148c419778STudor Ambarus 
4158c419778STudor Ambarus 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
4168c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->n_dma)) {
4178c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA modulus memory\n");
4188c419778STudor Ambarus 		return -ENOMEM;
4198c419778STudor Ambarus 	}
4208c419778STudor Ambarus 
4218c419778STudor Ambarus 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
4228c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->e_dma)) {
4238c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA public exponent memory\n");
4248c419778STudor Ambarus 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
4258c419778STudor Ambarus 		return -ENOMEM;
4268c419778STudor Ambarus 	}
4278c419778STudor Ambarus 
428eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
4298c419778STudor Ambarus 		pdb->sgf |= RSA_PDB_SGF_F;
4308c419778STudor Ambarus 		pdb->f_dma = edesc->sec4_sg_dma;
431eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
4328c419778STudor Ambarus 	} else {
4333b2614cbSIuliana Prodan 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
4348c419778STudor Ambarus 	}
4358c419778STudor Ambarus 
436eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
4378c419778STudor Ambarus 		pdb->sgf |= RSA_PDB_SGF_G;
4388c419778STudor Ambarus 		pdb->g_dma = edesc->sec4_sg_dma +
4398c419778STudor Ambarus 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
4408c419778STudor Ambarus 	} else {
4418c419778STudor Ambarus 		pdb->g_dma = sg_dma_address(req->dst);
4428c419778STudor Ambarus 	}
4438c419778STudor Ambarus 
4448c419778STudor Ambarus 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
4453b2614cbSIuliana Prodan 	pdb->f_len = req_ctx->fixup_src_len;
4468c419778STudor Ambarus 
4478c419778STudor Ambarus 	return 0;
4488c419778STudor Ambarus }
4498c419778STudor Ambarus 
set_rsa_priv_f1_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)4508c419778STudor Ambarus static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
4518c419778STudor Ambarus 			       struct rsa_edesc *edesc)
4528c419778STudor Ambarus {
4538c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
4544cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
4558c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
4568c419778STudor Ambarus 	struct device *dev = ctx->dev;
4578c419778STudor Ambarus 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
4588c419778STudor Ambarus 	int sec4_sg_index = 0;
4598c419778STudor Ambarus 
4608c419778STudor Ambarus 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
4618c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->n_dma)) {
4628c419778STudor Ambarus 		dev_err(dev, "Unable to map modulus memory\n");
4638c419778STudor Ambarus 		return -ENOMEM;
4648c419778STudor Ambarus 	}
4658c419778STudor Ambarus 
4668c419778STudor Ambarus 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
4678c419778STudor Ambarus 	if (dma_mapping_error(dev, pdb->d_dma)) {
4688c419778STudor Ambarus 		dev_err(dev, "Unable to map RSA private exponent memory\n");
4698c419778STudor Ambarus 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
4708c419778STudor Ambarus 		return -ENOMEM;
4718c419778STudor Ambarus 	}
4728c419778STudor Ambarus 
473eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
4748c419778STudor Ambarus 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
4758c419778STudor Ambarus 		pdb->g_dma = edesc->sec4_sg_dma;
476eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
477eff9771dSIuliana Prodan 
4788c419778STudor Ambarus 	} else {
4793b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
4803b2614cbSIuliana Prodan 
4813b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
4828c419778STudor Ambarus 	}
4838c419778STudor Ambarus 
484eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
4858c419778STudor Ambarus 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
4868c419778STudor Ambarus 		pdb->f_dma = edesc->sec4_sg_dma +
4878c419778STudor Ambarus 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
4888c419778STudor Ambarus 	} else {
4898c419778STudor Ambarus 		pdb->f_dma = sg_dma_address(req->dst);
4908c419778STudor Ambarus 	}
4918c419778STudor Ambarus 
4928c419778STudor Ambarus 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
4938c419778STudor Ambarus 
4948c419778STudor Ambarus 	return 0;
4958c419778STudor Ambarus }
4968c419778STudor Ambarus 
set_rsa_priv_f2_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)49752e26d77SRadu Alexe static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
49852e26d77SRadu Alexe 			       struct rsa_edesc *edesc)
49952e26d77SRadu Alexe {
50052e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
5014cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
50252e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
50352e26d77SRadu Alexe 	struct device *dev = ctx->dev;
50452e26d77SRadu Alexe 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
50552e26d77SRadu Alexe 	int sec4_sg_index = 0;
50652e26d77SRadu Alexe 	size_t p_sz = key->p_sz;
5074bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
50852e26d77SRadu Alexe 
50952e26d77SRadu Alexe 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
51052e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->d_dma)) {
51152e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA private exponent memory\n");
51252e26d77SRadu Alexe 		return -ENOMEM;
51352e26d77SRadu Alexe 	}
51452e26d77SRadu Alexe 
51552e26d77SRadu Alexe 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
51652e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->p_dma)) {
51752e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
51852e26d77SRadu Alexe 		goto unmap_d;
51952e26d77SRadu Alexe 	}
52052e26d77SRadu Alexe 
52152e26d77SRadu Alexe 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
52252e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->q_dma)) {
52352e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
52452e26d77SRadu Alexe 		goto unmap_p;
52552e26d77SRadu Alexe 	}
52652e26d77SRadu Alexe 
527f1bf9e60SHoria Geantă 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
52852e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
52952e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
53052e26d77SRadu Alexe 		goto unmap_q;
53152e26d77SRadu Alexe 	}
53252e26d77SRadu Alexe 
533f1bf9e60SHoria Geantă 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
53452e26d77SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
53552e26d77SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
53652e26d77SRadu Alexe 		goto unmap_tmp1;
53752e26d77SRadu Alexe 	}
53852e26d77SRadu Alexe 
539eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
54052e26d77SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
54152e26d77SRadu Alexe 		pdb->g_dma = edesc->sec4_sg_dma;
542eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
54352e26d77SRadu Alexe 	} else {
5443b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
5453b2614cbSIuliana Prodan 
5463b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
54752e26d77SRadu Alexe 	}
54852e26d77SRadu Alexe 
549eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
55052e26d77SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
55152e26d77SRadu Alexe 		pdb->f_dma = edesc->sec4_sg_dma +
55252e26d77SRadu Alexe 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
55352e26d77SRadu Alexe 	} else {
55452e26d77SRadu Alexe 		pdb->f_dma = sg_dma_address(req->dst);
55552e26d77SRadu Alexe 	}
55652e26d77SRadu Alexe 
55752e26d77SRadu Alexe 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
55852e26d77SRadu Alexe 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
55952e26d77SRadu Alexe 
56052e26d77SRadu Alexe 	return 0;
56152e26d77SRadu Alexe 
56252e26d77SRadu Alexe unmap_tmp1:
563f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
56452e26d77SRadu Alexe unmap_q:
56552e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
56652e26d77SRadu Alexe unmap_p:
56752e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
56852e26d77SRadu Alexe unmap_d:
56952e26d77SRadu Alexe 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
57052e26d77SRadu Alexe 
57152e26d77SRadu Alexe 	return -ENOMEM;
57252e26d77SRadu Alexe }
57352e26d77SRadu Alexe 
set_rsa_priv_f3_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)5744a651b12SRadu Alexe static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
5754a651b12SRadu Alexe 			       struct rsa_edesc *edesc)
5764a651b12SRadu Alexe {
5774a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
5784cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
5794a651b12SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
5804a651b12SRadu Alexe 	struct device *dev = ctx->dev;
5814a651b12SRadu Alexe 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
5824a651b12SRadu Alexe 	int sec4_sg_index = 0;
5834a651b12SRadu Alexe 	size_t p_sz = key->p_sz;
5844bffaab3SHoria Geantă 	size_t q_sz = key->q_sz;
5854a651b12SRadu Alexe 
5864a651b12SRadu Alexe 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
5874a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->p_dma)) {
5884a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
5894a651b12SRadu Alexe 		return -ENOMEM;
5904a651b12SRadu Alexe 	}
5914a651b12SRadu Alexe 
5924a651b12SRadu Alexe 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
5934a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->q_dma)) {
5944a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
5954a651b12SRadu Alexe 		goto unmap_p;
5964a651b12SRadu Alexe 	}
5974a651b12SRadu Alexe 
5984a651b12SRadu Alexe 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
5994a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->dp_dma)) {
6004a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
6014a651b12SRadu Alexe 		goto unmap_q;
6024a651b12SRadu Alexe 	}
6034a651b12SRadu Alexe 
6044a651b12SRadu Alexe 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
6054a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->dq_dma)) {
6064a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
6074a651b12SRadu Alexe 		goto unmap_dp;
6084a651b12SRadu Alexe 	}
6094a651b12SRadu Alexe 
6104a651b12SRadu Alexe 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
6114a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->c_dma)) {
6124a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
6134a651b12SRadu Alexe 		goto unmap_dq;
6144a651b12SRadu Alexe 	}
6154a651b12SRadu Alexe 
616f1bf9e60SHoria Geantă 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
6174a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
6184a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
6194a651b12SRadu Alexe 		goto unmap_qinv;
6204a651b12SRadu Alexe 	}
6214a651b12SRadu Alexe 
622f1bf9e60SHoria Geantă 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
6234a651b12SRadu Alexe 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
6244a651b12SRadu Alexe 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
6254a651b12SRadu Alexe 		goto unmap_tmp1;
6264a651b12SRadu Alexe 	}
6274a651b12SRadu Alexe 
628eff9771dSIuliana Prodan 	if (edesc->mapped_src_nents > 1) {
6294a651b12SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
6304a651b12SRadu Alexe 		pdb->g_dma = edesc->sec4_sg_dma;
631eff9771dSIuliana Prodan 		sec4_sg_index += edesc->mapped_src_nents;
6324a651b12SRadu Alexe 	} else {
6333b2614cbSIuliana Prodan 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
6343b2614cbSIuliana Prodan 
6353b2614cbSIuliana Prodan 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
6364a651b12SRadu Alexe 	}
6374a651b12SRadu Alexe 
638eff9771dSIuliana Prodan 	if (edesc->mapped_dst_nents > 1) {
6394a651b12SRadu Alexe 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
6404a651b12SRadu Alexe 		pdb->f_dma = edesc->sec4_sg_dma +
6414a651b12SRadu Alexe 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
6424a651b12SRadu Alexe 	} else {
6434a651b12SRadu Alexe 		pdb->f_dma = sg_dma_address(req->dst);
6444a651b12SRadu Alexe 	}
6454a651b12SRadu Alexe 
6464a651b12SRadu Alexe 	pdb->sgf |= key->n_sz;
6474a651b12SRadu Alexe 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
6484a651b12SRadu Alexe 
6494a651b12SRadu Alexe 	return 0;
6504a651b12SRadu Alexe 
6514a651b12SRadu Alexe unmap_tmp1:
652f1bf9e60SHoria Geantă 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
6534a651b12SRadu Alexe unmap_qinv:
6544a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
6554a651b12SRadu Alexe unmap_dq:
6564a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
6574a651b12SRadu Alexe unmap_dp:
6584a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
6594a651b12SRadu Alexe unmap_q:
6604a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
6614a651b12SRadu Alexe unmap_p:
6624a651b12SRadu Alexe 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
6634a651b12SRadu Alexe 
6644a651b12SRadu Alexe 	return -ENOMEM;
6654a651b12SRadu Alexe }
6664a651b12SRadu Alexe 
akcipher_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct akcipher_request * req)667bf537950SIuliana Prodan static int akcipher_enqueue_req(struct device *jrdev,
668bf537950SIuliana Prodan 				void (*cbk)(struct device *jrdev, u32 *desc,
669bf537950SIuliana Prodan 					    u32 err, void *context),
670bf537950SIuliana Prodan 				struct akcipher_request *req)
671bf537950SIuliana Prodan {
672bf537950SIuliana Prodan 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
673bf537950SIuliana Prodan 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
6744cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
675bf537950SIuliana Prodan 	struct caam_rsa_key *key = &ctx->key;
676bf537950SIuliana Prodan 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
677bf537950SIuliana Prodan 	struct rsa_edesc *edesc = req_ctx->edesc;
678bf537950SIuliana Prodan 	u32 *desc = edesc->hw_desc;
679bf537950SIuliana Prodan 	int ret;
680bf537950SIuliana Prodan 
681bf537950SIuliana Prodan 	req_ctx->akcipher_op_done = cbk;
682bf537950SIuliana Prodan 	/*
683bf537950SIuliana Prodan 	 * Only the backlog request are sent to crypto-engine since the others
684bf537950SIuliana Prodan 	 * can be handled by CAAM, if free, especially since JR has up to 1024
685bf537950SIuliana Prodan 	 * entries (more than the 10 entries from crypto-engine).
686bf537950SIuliana Prodan 	 */
687bf537950SIuliana Prodan 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
688bf537950SIuliana Prodan 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
689bf537950SIuliana Prodan 								 req);
690bf537950SIuliana Prodan 	else
691bf537950SIuliana Prodan 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
692bf537950SIuliana Prodan 
693bf537950SIuliana Prodan 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
694bf537950SIuliana Prodan 		switch (key->priv_form) {
695bf537950SIuliana Prodan 		case FORM1:
696bf537950SIuliana Prodan 			rsa_priv_f1_unmap(jrdev, edesc, req);
697bf537950SIuliana Prodan 			break;
698bf537950SIuliana Prodan 		case FORM2:
699bf537950SIuliana Prodan 			rsa_priv_f2_unmap(jrdev, edesc, req);
700bf537950SIuliana Prodan 			break;
701bf537950SIuliana Prodan 		case FORM3:
702bf537950SIuliana Prodan 			rsa_priv_f3_unmap(jrdev, edesc, req);
703bf537950SIuliana Prodan 			break;
704bf537950SIuliana Prodan 		default:
705bf537950SIuliana Prodan 			rsa_pub_unmap(jrdev, edesc, req);
706bf537950SIuliana Prodan 		}
707bf537950SIuliana Prodan 		rsa_io_unmap(jrdev, edesc, req);
708bf537950SIuliana Prodan 		kfree(edesc);
709bf537950SIuliana Prodan 	}
710bf537950SIuliana Prodan 
711bf537950SIuliana Prodan 	return ret;
712bf537950SIuliana Prodan }
713bf537950SIuliana Prodan 
caam_rsa_enc(struct akcipher_request * req)7148c419778STudor Ambarus static int caam_rsa_enc(struct akcipher_request *req)
7158c419778STudor Ambarus {
7168c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7174cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
7188c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
7198c419778STudor Ambarus 	struct device *jrdev = ctx->dev;
7208c419778STudor Ambarus 	struct rsa_edesc *edesc;
7218c419778STudor Ambarus 	int ret;
7228c419778STudor Ambarus 
7238c419778STudor Ambarus 	if (unlikely(!key->n || !key->e))
7248c419778STudor Ambarus 		return -EINVAL;
7258c419778STudor Ambarus 
7268c419778STudor Ambarus 	if (req->dst_len < key->n_sz) {
7278c419778STudor Ambarus 		req->dst_len = key->n_sz;
7288c419778STudor Ambarus 		dev_err(jrdev, "Output buffer length less than parameter n\n");
7298c419778STudor Ambarus 		return -EOVERFLOW;
7308c419778STudor Ambarus 	}
7318c419778STudor Ambarus 
7328c419778STudor Ambarus 	/* Allocate extended descriptor */
7338c419778STudor Ambarus 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
7348c419778STudor Ambarus 	if (IS_ERR(edesc))
7358c419778STudor Ambarus 		return PTR_ERR(edesc);
7368c419778STudor Ambarus 
7378c419778STudor Ambarus 	/* Set RSA Encrypt Protocol Data Block */
7388c419778STudor Ambarus 	ret = set_rsa_pub_pdb(req, edesc);
7398c419778STudor Ambarus 	if (ret)
7408c419778STudor Ambarus 		goto init_fail;
7418c419778STudor Ambarus 
7428c419778STudor Ambarus 	/* Initialize Job Descriptor */
7438c419778STudor Ambarus 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
7448c419778STudor Ambarus 
745bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
7468c419778STudor Ambarus 
7478c419778STudor Ambarus init_fail:
7488c419778STudor Ambarus 	rsa_io_unmap(jrdev, edesc, req);
7498c419778STudor Ambarus 	kfree(edesc);
7508c419778STudor Ambarus 	return ret;
7518c419778STudor Ambarus }
7528c419778STudor Ambarus 
caam_rsa_dec_priv_f1(struct akcipher_request * req)75352e26d77SRadu Alexe static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
7548c419778STudor Ambarus {
7558c419778STudor Ambarus 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7564cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
7578c419778STudor Ambarus 	struct device *jrdev = ctx->dev;
7588c419778STudor Ambarus 	struct rsa_edesc *edesc;
7598c419778STudor Ambarus 	int ret;
7608c419778STudor Ambarus 
7618c419778STudor Ambarus 	/* Allocate extended descriptor */
7628c419778STudor Ambarus 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
7638c419778STudor Ambarus 	if (IS_ERR(edesc))
7648c419778STudor Ambarus 		return PTR_ERR(edesc);
7658c419778STudor Ambarus 
7668c419778STudor Ambarus 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
7678c419778STudor Ambarus 	ret = set_rsa_priv_f1_pdb(req, edesc);
7688c419778STudor Ambarus 	if (ret)
7698c419778STudor Ambarus 		goto init_fail;
7708c419778STudor Ambarus 
7718c419778STudor Ambarus 	/* Initialize Job Descriptor */
7728c419778STudor Ambarus 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
7738c419778STudor Ambarus 
774bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
7758c419778STudor Ambarus 
7768c419778STudor Ambarus init_fail:
7778c419778STudor Ambarus 	rsa_io_unmap(jrdev, edesc, req);
7788c419778STudor Ambarus 	kfree(edesc);
7798c419778STudor Ambarus 	return ret;
7808c419778STudor Ambarus }
7818c419778STudor Ambarus 
caam_rsa_dec_priv_f2(struct akcipher_request * req)78252e26d77SRadu Alexe static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
78352e26d77SRadu Alexe {
78452e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
7854cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
78652e26d77SRadu Alexe 	struct device *jrdev = ctx->dev;
78752e26d77SRadu Alexe 	struct rsa_edesc *edesc;
78852e26d77SRadu Alexe 	int ret;
78952e26d77SRadu Alexe 
79052e26d77SRadu Alexe 	/* Allocate extended descriptor */
79152e26d77SRadu Alexe 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
79252e26d77SRadu Alexe 	if (IS_ERR(edesc))
79352e26d77SRadu Alexe 		return PTR_ERR(edesc);
79452e26d77SRadu Alexe 
79552e26d77SRadu Alexe 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
79652e26d77SRadu Alexe 	ret = set_rsa_priv_f2_pdb(req, edesc);
79752e26d77SRadu Alexe 	if (ret)
79852e26d77SRadu Alexe 		goto init_fail;
79952e26d77SRadu Alexe 
80052e26d77SRadu Alexe 	/* Initialize Job Descriptor */
80152e26d77SRadu Alexe 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
80252e26d77SRadu Alexe 
803bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
80452e26d77SRadu Alexe 
80552e26d77SRadu Alexe init_fail:
80652e26d77SRadu Alexe 	rsa_io_unmap(jrdev, edesc, req);
80752e26d77SRadu Alexe 	kfree(edesc);
80852e26d77SRadu Alexe 	return ret;
80952e26d77SRadu Alexe }
81052e26d77SRadu Alexe 
caam_rsa_dec_priv_f3(struct akcipher_request * req)8114a651b12SRadu Alexe static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
8124a651b12SRadu Alexe {
8134a651b12SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
8144cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
8154a651b12SRadu Alexe 	struct device *jrdev = ctx->dev;
8164a651b12SRadu Alexe 	struct rsa_edesc *edesc;
8174a651b12SRadu Alexe 	int ret;
8184a651b12SRadu Alexe 
8194a651b12SRadu Alexe 	/* Allocate extended descriptor */
8204a651b12SRadu Alexe 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
8214a651b12SRadu Alexe 	if (IS_ERR(edesc))
8224a651b12SRadu Alexe 		return PTR_ERR(edesc);
8234a651b12SRadu Alexe 
8244a651b12SRadu Alexe 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
8254a651b12SRadu Alexe 	ret = set_rsa_priv_f3_pdb(req, edesc);
8264a651b12SRadu Alexe 	if (ret)
8274a651b12SRadu Alexe 		goto init_fail;
8284a651b12SRadu Alexe 
8294a651b12SRadu Alexe 	/* Initialize Job Descriptor */
8304a651b12SRadu Alexe 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
8314a651b12SRadu Alexe 
832bf537950SIuliana Prodan 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
8334a651b12SRadu Alexe 
8344a651b12SRadu Alexe init_fail:
8354a651b12SRadu Alexe 	rsa_io_unmap(jrdev, edesc, req);
8364a651b12SRadu Alexe 	kfree(edesc);
8374a651b12SRadu Alexe 	return ret;
8384a651b12SRadu Alexe }
8394a651b12SRadu Alexe 
caam_rsa_dec(struct akcipher_request * req)84052e26d77SRadu Alexe static int caam_rsa_dec(struct akcipher_request *req)
84152e26d77SRadu Alexe {
84252e26d77SRadu Alexe 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
8434cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
84452e26d77SRadu Alexe 	struct caam_rsa_key *key = &ctx->key;
84552e26d77SRadu Alexe 	int ret;
84652e26d77SRadu Alexe 
84752e26d77SRadu Alexe 	if (unlikely(!key->n || !key->d))
84852e26d77SRadu Alexe 		return -EINVAL;
84952e26d77SRadu Alexe 
85052e26d77SRadu Alexe 	if (req->dst_len < key->n_sz) {
85152e26d77SRadu Alexe 		req->dst_len = key->n_sz;
85252e26d77SRadu Alexe 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
85352e26d77SRadu Alexe 		return -EOVERFLOW;
85452e26d77SRadu Alexe 	}
85552e26d77SRadu Alexe 
8564a651b12SRadu Alexe 	if (key->priv_form == FORM3)
8574a651b12SRadu Alexe 		ret = caam_rsa_dec_priv_f3(req);
8584a651b12SRadu Alexe 	else if (key->priv_form == FORM2)
85952e26d77SRadu Alexe 		ret = caam_rsa_dec_priv_f2(req);
86052e26d77SRadu Alexe 	else
86152e26d77SRadu Alexe 		ret = caam_rsa_dec_priv_f1(req);
86252e26d77SRadu Alexe 
86352e26d77SRadu Alexe 	return ret;
86452e26d77SRadu Alexe }
86552e26d77SRadu Alexe 
caam_rsa_free_key(struct caam_rsa_key * key)8668c419778STudor Ambarus static void caam_rsa_free_key(struct caam_rsa_key *key)
8678c419778STudor Ambarus {
868453431a5SWaiman Long 	kfree_sensitive(key->d);
869453431a5SWaiman Long 	kfree_sensitive(key->p);
870453431a5SWaiman Long 	kfree_sensitive(key->q);
871453431a5SWaiman Long 	kfree_sensitive(key->dp);
872453431a5SWaiman Long 	kfree_sensitive(key->dq);
873453431a5SWaiman Long 	kfree_sensitive(key->qinv);
874453431a5SWaiman Long 	kfree_sensitive(key->tmp1);
875453431a5SWaiman Long 	kfree_sensitive(key->tmp2);
8768c419778STudor Ambarus 	kfree(key->e);
8778c419778STudor Ambarus 	kfree(key->n);
87852e26d77SRadu Alexe 	memset(key, 0, sizeof(*key));
8798c419778STudor Ambarus }
8808c419778STudor Ambarus 
caam_rsa_drop_leading_zeros(const u8 ** ptr,size_t * nbytes)8817ca4a9a1SRadu Alexe static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
8827ca4a9a1SRadu Alexe {
8837ca4a9a1SRadu Alexe 	while (!**ptr && *nbytes) {
8847ca4a9a1SRadu Alexe 		(*ptr)++;
8857ca4a9a1SRadu Alexe 		(*nbytes)--;
8867ca4a9a1SRadu Alexe 	}
8877ca4a9a1SRadu Alexe }
8887ca4a9a1SRadu Alexe 
8898c419778STudor Ambarus /**
8904a651b12SRadu Alexe  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
8914a651b12SRadu Alexe  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
8924a651b12SRadu Alexe  * BER-encoding requires that the minimum number of bytes be used to encode the
8934a651b12SRadu Alexe  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
8944a651b12SRadu Alexe  * length.
8954a651b12SRadu Alexe  *
8964a651b12SRadu Alexe  * @ptr   : pointer to {dP, dQ, qInv} CRT member
8974a651b12SRadu Alexe  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
8984a651b12SRadu Alexe  * @dstlen: length in bytes of corresponding p or q prime factor
8994a651b12SRadu Alexe  */
caam_read_rsa_crt(const u8 * ptr,size_t nbytes,size_t dstlen)9004a651b12SRadu Alexe static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
9014a651b12SRadu Alexe {
9024a651b12SRadu Alexe 	u8 *dst;
9034a651b12SRadu Alexe 
9044a651b12SRadu Alexe 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
9054a651b12SRadu Alexe 	if (!nbytes)
9064a651b12SRadu Alexe 		return NULL;
9074a651b12SRadu Alexe 
908199354d7SHerbert Xu 	dst = kzalloc(dstlen, GFP_KERNEL);
9094a651b12SRadu Alexe 	if (!dst)
9104a651b12SRadu Alexe 		return NULL;
9114a651b12SRadu Alexe 
9124a651b12SRadu Alexe 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
9134a651b12SRadu Alexe 
9144a651b12SRadu Alexe 	return dst;
9154a651b12SRadu Alexe }
9164a651b12SRadu Alexe 
9174a651b12SRadu Alexe /**
9188c419778STudor Ambarus  * caam_read_raw_data - Read a raw byte stream as a positive integer.
9198c419778STudor Ambarus  * The function skips buffer's leading zeros, copies the remained data
920199354d7SHerbert Xu  * to a buffer allocated in the GFP_KERNEL zone and returns
9218c419778STudor Ambarus  * the address of the new buffer.
9228c419778STudor Ambarus  *
9238c419778STudor Ambarus  * @buf   : The data to read
9248c419778STudor Ambarus  * @nbytes: The amount of data to read
9258c419778STudor Ambarus  */
caam_read_raw_data(const u8 * buf,size_t * nbytes)9268c419778STudor Ambarus static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
9278c419778STudor Ambarus {
9288c419778STudor Ambarus 
9297ca4a9a1SRadu Alexe 	caam_rsa_drop_leading_zeros(&buf, nbytes);
9307fcaf62aSTudor Ambarus 	if (!*nbytes)
9317fcaf62aSTudor Ambarus 		return NULL;
9328c419778STudor Ambarus 
933199354d7SHerbert Xu 	return kmemdup(buf, *nbytes, GFP_KERNEL);
9348c419778STudor Ambarus }
9358c419778STudor Ambarus 
caam_rsa_check_key_length(unsigned int len)9368c419778STudor Ambarus static int caam_rsa_check_key_length(unsigned int len)
9378c419778STudor Ambarus {
9388c419778STudor Ambarus 	if (len > 4096)
9398c419778STudor Ambarus 		return -EINVAL;
9408c419778STudor Ambarus 	return 0;
9418c419778STudor Ambarus }
9428c419778STudor Ambarus 
caam_rsa_set_pub_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)9438c419778STudor Ambarus static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
9448c419778STudor Ambarus 				unsigned int keylen)
9458c419778STudor Ambarus {
9464cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
9478439e94fSHoria Geantă 	struct rsa_key raw_key = {NULL};
9488c419778STudor Ambarus 	struct caam_rsa_key *rsa_key = &ctx->key;
9498c419778STudor Ambarus 	int ret;
9508c419778STudor Ambarus 
9518c419778STudor Ambarus 	/* Free the old RSA key if any */
9528c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
9538c419778STudor Ambarus 
9548c419778STudor Ambarus 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
9558c419778STudor Ambarus 	if (ret)
9568c419778STudor Ambarus 		return ret;
9578c419778STudor Ambarus 
9588c419778STudor Ambarus 	/* Copy key in DMA zone */
959199354d7SHerbert Xu 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
9608c419778STudor Ambarus 	if (!rsa_key->e)
9618c419778STudor Ambarus 		goto err;
9628c419778STudor Ambarus 
9638c419778STudor Ambarus 	/*
9648c419778STudor Ambarus 	 * Skip leading zeros and copy the positive integer to a buffer
965199354d7SHerbert Xu 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
9668c419778STudor Ambarus 	 * expects a positive integer for the RSA modulus and uses its length as
9678c419778STudor Ambarus 	 * decryption output length.
9688c419778STudor Ambarus 	 */
9698c419778STudor Ambarus 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
9708c419778STudor Ambarus 	if (!rsa_key->n)
9718c419778STudor Ambarus 		goto err;
9728c419778STudor Ambarus 
9738c419778STudor Ambarus 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
9748c419778STudor Ambarus 		caam_rsa_free_key(rsa_key);
9758c419778STudor Ambarus 		return -EINVAL;
9768c419778STudor Ambarus 	}
9778c419778STudor Ambarus 
9788c419778STudor Ambarus 	rsa_key->e_sz = raw_key.e_sz;
9798c419778STudor Ambarus 	rsa_key->n_sz = raw_key.n_sz;
9808c419778STudor Ambarus 
9818c419778STudor Ambarus 	return 0;
9828c419778STudor Ambarus err:
9838c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
9848c419778STudor Ambarus 	return -ENOMEM;
9858c419778STudor Ambarus }
9868c419778STudor Ambarus 
caam_rsa_set_priv_key_form(struct caam_rsa_ctx * ctx,struct rsa_key * raw_key)987*b64140c7SChen Ridong static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
98852e26d77SRadu Alexe 				       struct rsa_key *raw_key)
98952e26d77SRadu Alexe {
99052e26d77SRadu Alexe 	struct caam_rsa_key *rsa_key = &ctx->key;
99152e26d77SRadu Alexe 	size_t p_sz = raw_key->p_sz;
99252e26d77SRadu Alexe 	size_t q_sz = raw_key->q_sz;
993199354d7SHerbert Xu 	unsigned aligned_size;
99452e26d77SRadu Alexe 
99552e26d77SRadu Alexe 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
99652e26d77SRadu Alexe 	if (!rsa_key->p)
997*b64140c7SChen Ridong 		return -ENOMEM;
99852e26d77SRadu Alexe 	rsa_key->p_sz = p_sz;
99952e26d77SRadu Alexe 
100052e26d77SRadu Alexe 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
100152e26d77SRadu Alexe 	if (!rsa_key->q)
100252e26d77SRadu Alexe 		goto free_p;
100352e26d77SRadu Alexe 	rsa_key->q_sz = q_sz;
100452e26d77SRadu Alexe 
1005199354d7SHerbert Xu 	aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
1006199354d7SHerbert Xu 	rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
100752e26d77SRadu Alexe 	if (!rsa_key->tmp1)
100852e26d77SRadu Alexe 		goto free_q;
100952e26d77SRadu Alexe 
1010199354d7SHerbert Xu 	aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
1011199354d7SHerbert Xu 	rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
101252e26d77SRadu Alexe 	if (!rsa_key->tmp2)
101352e26d77SRadu Alexe 		goto free_tmp1;
101452e26d77SRadu Alexe 
101552e26d77SRadu Alexe 	rsa_key->priv_form = FORM2;
101652e26d77SRadu Alexe 
10174a651b12SRadu Alexe 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
10184a651b12SRadu Alexe 	if (!rsa_key->dp)
10194a651b12SRadu Alexe 		goto free_tmp2;
10204a651b12SRadu Alexe 
10214a651b12SRadu Alexe 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
10224a651b12SRadu Alexe 	if (!rsa_key->dq)
10234a651b12SRadu Alexe 		goto free_dp;
10244a651b12SRadu Alexe 
10254a651b12SRadu Alexe 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
10264a651b12SRadu Alexe 					  q_sz);
10274a651b12SRadu Alexe 	if (!rsa_key->qinv)
10284a651b12SRadu Alexe 		goto free_dq;
10294a651b12SRadu Alexe 
10304a651b12SRadu Alexe 	rsa_key->priv_form = FORM3;
10314a651b12SRadu Alexe 
1032*b64140c7SChen Ridong 	return 0;
103352e26d77SRadu Alexe 
10344a651b12SRadu Alexe free_dq:
1035453431a5SWaiman Long 	kfree_sensitive(rsa_key->dq);
10364a651b12SRadu Alexe free_dp:
1037453431a5SWaiman Long 	kfree_sensitive(rsa_key->dp);
10384a651b12SRadu Alexe free_tmp2:
1039453431a5SWaiman Long 	kfree_sensitive(rsa_key->tmp2);
104052e26d77SRadu Alexe free_tmp1:
1041453431a5SWaiman Long 	kfree_sensitive(rsa_key->tmp1);
104252e26d77SRadu Alexe free_q:
1043453431a5SWaiman Long 	kfree_sensitive(rsa_key->q);
104452e26d77SRadu Alexe free_p:
1045453431a5SWaiman Long 	kfree_sensitive(rsa_key->p);
1046*b64140c7SChen Ridong 	return -ENOMEM;
104752e26d77SRadu Alexe }
104852e26d77SRadu Alexe 
caam_rsa_set_priv_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)10498c419778STudor Ambarus static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
10508c419778STudor Ambarus 				 unsigned int keylen)
10518c419778STudor Ambarus {
10524cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
10538439e94fSHoria Geantă 	struct rsa_key raw_key = {NULL};
10548c419778STudor Ambarus 	struct caam_rsa_key *rsa_key = &ctx->key;
10558c419778STudor Ambarus 	int ret;
10568c419778STudor Ambarus 
10578c419778STudor Ambarus 	/* Free the old RSA key if any */
10588c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
10598c419778STudor Ambarus 
10608c419778STudor Ambarus 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
10618c419778STudor Ambarus 	if (ret)
10628c419778STudor Ambarus 		return ret;
10638c419778STudor Ambarus 
10648c419778STudor Ambarus 	/* Copy key in DMA zone */
1065199354d7SHerbert Xu 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
10668c419778STudor Ambarus 	if (!rsa_key->d)
10678c419778STudor Ambarus 		goto err;
10688c419778STudor Ambarus 
1069199354d7SHerbert Xu 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
10708c419778STudor Ambarus 	if (!rsa_key->e)
10718c419778STudor Ambarus 		goto err;
10728c419778STudor Ambarus 
10738c419778STudor Ambarus 	/*
10748c419778STudor Ambarus 	 * Skip leading zeros and copy the positive integer to a buffer
1075199354d7SHerbert Xu 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
10768c419778STudor Ambarus 	 * expects a positive integer for the RSA modulus and uses its length as
10778c419778STudor Ambarus 	 * decryption output length.
10788c419778STudor Ambarus 	 */
10798c419778STudor Ambarus 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
10808c419778STudor Ambarus 	if (!rsa_key->n)
10818c419778STudor Ambarus 		goto err;
10828c419778STudor Ambarus 
10838c419778STudor Ambarus 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
10848c419778STudor Ambarus 		caam_rsa_free_key(rsa_key);
10858c419778STudor Ambarus 		return -EINVAL;
10868c419778STudor Ambarus 	}
10878c419778STudor Ambarus 
10888c419778STudor Ambarus 	rsa_key->d_sz = raw_key.d_sz;
10898c419778STudor Ambarus 	rsa_key->e_sz = raw_key.e_sz;
10908c419778STudor Ambarus 	rsa_key->n_sz = raw_key.n_sz;
10918c419778STudor Ambarus 
1092*b64140c7SChen Ridong 	ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
1093*b64140c7SChen Ridong 	if (ret)
1094*b64140c7SChen Ridong 		goto err;
109552e26d77SRadu Alexe 
10968c419778STudor Ambarus 	return 0;
10978c419778STudor Ambarus 
10988c419778STudor Ambarus err:
10998c419778STudor Ambarus 	caam_rsa_free_key(rsa_key);
11008c419778STudor Ambarus 	return -ENOMEM;
11018c419778STudor Ambarus }
11028c419778STudor Ambarus 
caam_rsa_max_size(struct crypto_akcipher * tfm)1103e198429cSTudor-Dan Ambarus static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
11048c419778STudor Ambarus {
11054cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
11068c419778STudor Ambarus 
1107e198429cSTudor-Dan Ambarus 	return ctx->key.n_sz;
11088c419778STudor Ambarus }
11098c419778STudor Ambarus 
11108c419778STudor Ambarus /* Per session pkc's driver context creation function */
caam_rsa_init_tfm(struct crypto_akcipher * tfm)11118c419778STudor Ambarus static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
11128c419778STudor Ambarus {
11134cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
11148c419778STudor Ambarus 
1115908d383bSHerbert Xu 	akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
1116908d383bSHerbert Xu 
11178c419778STudor Ambarus 	ctx->dev = caam_jr_alloc();
11188c419778STudor Ambarus 
11198c419778STudor Ambarus 	if (IS_ERR(ctx->dev)) {
112033fa46d7SHoria Geantă 		pr_err("Job Ring Device allocation for transform failed\n");
11218c419778STudor Ambarus 		return PTR_ERR(ctx->dev);
11228c419778STudor Ambarus 	}
11238c419778STudor Ambarus 
1124c3725f7cSIuliana Prodan 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1125c3725f7cSIuliana Prodan 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1126c3725f7cSIuliana Prodan 					  DMA_TO_DEVICE);
1127c3725f7cSIuliana Prodan 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1128c3725f7cSIuliana Prodan 		dev_err(ctx->dev, "unable to map padding\n");
1129c3725f7cSIuliana Prodan 		caam_jr_free(ctx->dev);
1130c3725f7cSIuliana Prodan 		return -ENOMEM;
1131c3725f7cSIuliana Prodan 	}
1132c3725f7cSIuliana Prodan 
11338c419778STudor Ambarus 	return 0;
11348c419778STudor Ambarus }
11358c419778STudor Ambarus 
11368c419778STudor Ambarus /* Per session pkc's driver context cleanup function */
caam_rsa_exit_tfm(struct crypto_akcipher * tfm)11378c419778STudor Ambarus static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
11388c419778STudor Ambarus {
11394cb4f7c1SHerbert Xu 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
11408c419778STudor Ambarus 	struct caam_rsa_key *key = &ctx->key;
11418c419778STudor Ambarus 
1142c3725f7cSIuliana Prodan 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1143c3725f7cSIuliana Prodan 			 1, DMA_TO_DEVICE);
11448c419778STudor Ambarus 	caam_rsa_free_key(key);
11458c419778STudor Ambarus 	caam_jr_free(ctx->dev);
11468c419778STudor Ambarus }
11478c419778STudor Ambarus 
114858068cfcSIuliana Prodan static struct caam_akcipher_alg caam_rsa = {
1149623814c0SHerbert Xu 	.akcipher.base = {
11508c419778STudor Ambarus 		.encrypt = caam_rsa_enc,
11518c419778STudor Ambarus 		.decrypt = caam_rsa_dec,
11528c419778STudor Ambarus 		.set_pub_key = caam_rsa_set_pub_key,
11538c419778STudor Ambarus 		.set_priv_key = caam_rsa_set_priv_key,
11548c419778STudor Ambarus 		.max_size = caam_rsa_max_size,
11558c419778STudor Ambarus 		.init = caam_rsa_init_tfm,
11568c419778STudor Ambarus 		.exit = caam_rsa_exit_tfm,
11578c419778STudor Ambarus 		.base = {
11588c419778STudor Ambarus 			.cra_name = "rsa",
11598c419778STudor Ambarus 			.cra_driver_name = "rsa-caam",
11608c419778STudor Ambarus 			.cra_priority = 3000,
11618c419778STudor Ambarus 			.cra_module = THIS_MODULE,
11624cb4f7c1SHerbert Xu 			.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
11634cb4f7c1SHerbert Xu 				       CRYPTO_DMA_PADDING,
11648c419778STudor Ambarus 		},
1165623814c0SHerbert Xu 	},
1166623814c0SHerbert Xu 	.akcipher.op = {
1167623814c0SHerbert Xu 		.do_one_request = akcipher_do_one_req,
1168623814c0SHerbert Xu 	},
11698c419778STudor Ambarus };
11708c419778STudor Ambarus 
11718c419778STudor Ambarus /* Public Key Cryptography module initialization handler */
caam_pkc_init(struct device * ctrldev)11721b46c90cSHoria Geantă int caam_pkc_init(struct device *ctrldev)
11738c419778STudor Ambarus {
11741b46c90cSHoria Geantă 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1175f20311ccSMichael Walle 	u32 pk_inst, pkha;
11768c419778STudor Ambarus 	int err;
11774e3a61c5SIuliana Prodan 	init_done = false;
11788c419778STudor Ambarus 
11798c419778STudor Ambarus 	/* Determine public key hardware accelerator presence. */
1180f20311ccSMichael Walle 	if (priv->era < 10) {
1181ae1dd17dSHoria GeantA 		pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
1182d239b10dSHoria Geantă 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1183f20311ccSMichael Walle 	} else {
1184ae1dd17dSHoria GeantA 		pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
1185f20311ccSMichael Walle 		pk_inst = pkha & CHA_VER_NUM_MASK;
1186f20311ccSMichael Walle 
1187f20311ccSMichael Walle 		/*
1188f20311ccSMichael Walle 		 * Newer CAAMs support partially disabled functionality. If this is the
1189f20311ccSMichael Walle 		 * case, the number is non-zero, but this bit is set to indicate that
1190f20311ccSMichael Walle 		 * no encryption or decryption is supported. Only signing and verifying
1191f20311ccSMichael Walle 		 * is supported.
1192f20311ccSMichael Walle 		 */
1193f20311ccSMichael Walle 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1194f20311ccSMichael Walle 			pk_inst = 0;
1195f20311ccSMichael Walle 	}
11968c419778STudor Ambarus 
11978c419778STudor Ambarus 	/* Do not register algorithms if PKHA is not present. */
11981b46c90cSHoria Geantă 	if (!pk_inst)
11991b46c90cSHoria Geantă 		return 0;
12008c419778STudor Ambarus 
1201c3725f7cSIuliana Prodan 	/* allocate zero buffer, used for padding input */
1202199354d7SHerbert Xu 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
1203c3725f7cSIuliana Prodan 	if (!zero_buffer)
1204c3725f7cSIuliana Prodan 		return -ENOMEM;
1205c3725f7cSIuliana Prodan 
1206623814c0SHerbert Xu 	err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
120758068cfcSIuliana Prodan 
1208c3725f7cSIuliana Prodan 	if (err) {
1209c3725f7cSIuliana Prodan 		kfree(zero_buffer);
12108c419778STudor Ambarus 		dev_warn(ctrldev, "%s alg registration failed\n",
1211623814c0SHerbert Xu 			 caam_rsa.akcipher.base.base.cra_driver_name);
1212c3725f7cSIuliana Prodan 	} else {
12134e3a61c5SIuliana Prodan 		init_done = true;
121458068cfcSIuliana Prodan 		caam_rsa.registered = true;
12158c419778STudor Ambarus 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1216c3725f7cSIuliana Prodan 	}
12178c419778STudor Ambarus 
12188c419778STudor Ambarus 	return err;
12198c419778STudor Ambarus }
12208c419778STudor Ambarus 
caam_pkc_exit(void)12211b46c90cSHoria Geantă void caam_pkc_exit(void)
12228c419778STudor Ambarus {
12234e3a61c5SIuliana Prodan 	if (!init_done)
12244e3a61c5SIuliana Prodan 		return;
12254e3a61c5SIuliana Prodan 
122658068cfcSIuliana Prodan 	if (caam_rsa.registered)
1227623814c0SHerbert Xu 		crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
122858068cfcSIuliana Prodan 
1229c3725f7cSIuliana Prodan 	kfree(zero_buffer);
12308c419778STudor Ambarus }
1231