xref: /linux/drivers/crypto/marvell/cesa/hash.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4  *
5  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6  * Author: Arnaud Ebalard <arno@natisbad.org>
7  *
8  * This work is based on an initial version written by
9  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10  */
11 
12 #include <crypto/hmac.h>
13 #include <crypto/md5.h>
14 #include <crypto/sha1.h>
15 #include <crypto/sha2.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 
19 #include "cesa.h"
20 
21 struct mv_cesa_ahash_dma_iter {
22 	struct mv_cesa_dma_iter base;
23 	struct mv_cesa_sg_dma_iter src;
24 };
25 
26 static inline void
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter * iter,struct ahash_request * req)27 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
28 			    struct ahash_request *req)
29 {
30 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
31 	unsigned int len = req->nbytes + creq->cache_ptr;
32 
33 	if (!creq->last_req)
34 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35 
36 	mv_cesa_req_dma_iter_init(&iter->base, len);
37 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
38 	iter->src.op_offset = creq->cache_ptr;
39 }
40 
41 static inline bool
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter * iter)42 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43 {
44 	iter->src.op_offset = 0;
45 
46 	return mv_cesa_req_dma_iter_next_op(&iter->base);
47 }
48 
49 static inline int
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req * req,gfp_t flags)50 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51 {
52 	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
53 				    &req->cache_dma);
54 	if (!req->cache)
55 		return -ENOMEM;
56 
57 	return 0;
58 }
59 
60 static inline void
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req * req)61 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
62 {
63 	if (!req->cache)
64 		return;
65 
66 	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
67 		      req->cache_dma);
68 }
69 
mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req * req,gfp_t flags)70 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
71 					   gfp_t flags)
72 {
73 	if (req->padding)
74 		return 0;
75 
76 	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
77 				      &req->padding_dma);
78 	if (!req->padding)
79 		return -ENOMEM;
80 
81 	return 0;
82 }
83 
mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req * req)84 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
85 {
86 	if (!req->padding)
87 		return;
88 
89 	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
90 		      req->padding_dma);
91 	req->padding = NULL;
92 }
93 
mv_cesa_ahash_dma_last_cleanup(struct ahash_request * req)94 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95 {
96 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97 
98 	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
99 }
100 
mv_cesa_ahash_dma_cleanup(struct ahash_request * req)101 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102 {
103 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104 
105 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
106 	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
107 	mv_cesa_dma_cleanup(&creq->base);
108 }
109 
mv_cesa_ahash_cleanup(struct ahash_request * req)110 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111 {
112 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 	struct mv_cesa_engine *engine = creq->base.engine;
114 
115 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
116 		mv_cesa_ahash_dma_cleanup(req);
117 
118 	atomic_sub(req->nbytes, &engine->load);
119 }
120 
mv_cesa_ahash_last_cleanup(struct ahash_request * req)121 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
122 {
123 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
124 
125 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
126 		mv_cesa_ahash_dma_last_cleanup(req);
127 }
128 
mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req * creq)129 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
130 {
131 	unsigned int index, padlen;
132 
133 	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
134 	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
135 
136 	return padlen;
137 }
138 
mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req * creq,u8 * buf)139 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
140 {
141 	unsigned int padlen;
142 
143 	buf[0] = 0x80;
144 	/* Pad out to 56 mod 64 */
145 	padlen = mv_cesa_ahash_pad_len(creq);
146 	memset(buf + 1, 0, padlen - 1);
147 
148 	if (creq->algo_le) {
149 		__le64 bits = cpu_to_le64(creq->len << 3);
150 
151 		memcpy(buf + padlen, &bits, sizeof(bits));
152 	} else {
153 		__be64 bits = cpu_to_be64(creq->len << 3);
154 
155 		memcpy(buf + padlen, &bits, sizeof(bits));
156 	}
157 
158 	return padlen + 8;
159 }
160 
mv_cesa_ahash_std_step(struct ahash_request * req)161 static void mv_cesa_ahash_std_step(struct ahash_request *req)
162 {
163 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
164 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
165 	struct mv_cesa_engine *engine = creq->base.engine;
166 	struct mv_cesa_op_ctx *op;
167 	unsigned int new_cache_ptr = 0;
168 	u32 frag_mode;
169 	size_t  len;
170 	unsigned int digsize;
171 	int i;
172 
173 	mv_cesa_adjust_op(engine, &creq->op_tmpl);
174 	if (engine->pool)
175 		memcpy(engine->sram_pool, &creq->op_tmpl,
176 		       sizeof(creq->op_tmpl));
177 	else
178 		memcpy_toio(engine->sram, &creq->op_tmpl,
179 			    sizeof(creq->op_tmpl));
180 
181 	if (!sreq->offset) {
182 		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
183 		for (i = 0; i < digsize / 4; i++)
184 			writel_relaxed(creq->state[i],
185 				       engine->regs + CESA_IVDIG(i));
186 	}
187 
188 	if (creq->cache_ptr) {
189 		if (engine->pool)
190 			memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
191 			       creq->cache, creq->cache_ptr);
192 		else
193 			memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
194 				    creq->cache, creq->cache_ptr);
195 	}
196 
197 	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
198 		    CESA_SA_SRAM_PAYLOAD_SIZE);
199 
200 	if (!creq->last_req) {
201 		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
202 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
203 	}
204 
205 	if (len - creq->cache_ptr)
206 		sreq->offset += mv_cesa_sg_copy_to_sram(
207 			engine, req->src, creq->src_nents,
208 			CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
209 			len - creq->cache_ptr, sreq->offset);
210 
211 	op = &creq->op_tmpl;
212 
213 	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
214 
215 	if (creq->last_req && sreq->offset == req->nbytes &&
216 	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
217 		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
218 			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
219 		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
220 			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
221 	}
222 
223 	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
224 	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
225 		if (len &&
226 		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
227 			mv_cesa_set_mac_op_total_len(op, creq->len);
228 		} else {
229 			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
230 
231 			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
232 				len &= CESA_HASH_BLOCK_SIZE_MSK;
233 				new_cache_ptr = 64 - trailerlen;
234 				if (engine->pool)
235 					memcpy(creq->cache,
236 					       engine->sram_pool +
237 					       CESA_SA_DATA_SRAM_OFFSET + len,
238 					       new_cache_ptr);
239 				else
240 					memcpy_fromio(creq->cache,
241 						      engine->sram +
242 						      CESA_SA_DATA_SRAM_OFFSET +
243 						      len,
244 						      new_cache_ptr);
245 			} else {
246 				i = mv_cesa_ahash_pad_req(creq, creq->cache);
247 				len += i;
248 				if (engine->pool)
249 					memcpy(engine->sram_pool + len +
250 					       CESA_SA_DATA_SRAM_OFFSET,
251 					       creq->cache, i);
252 				else
253 					memcpy_toio(engine->sram + len +
254 						    CESA_SA_DATA_SRAM_OFFSET,
255 						    creq->cache, i);
256 			}
257 
258 			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
259 				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
260 			else
261 				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
262 		}
263 	}
264 
265 	mv_cesa_set_mac_op_frag_len(op, len);
266 	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
267 
268 	/* FIXME: only update enc_len field */
269 	if (engine->pool)
270 		memcpy(engine->sram_pool, op, sizeof(*op));
271 	else
272 		memcpy_toio(engine->sram, op, sizeof(*op));
273 
274 	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
275 		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
276 				      CESA_SA_DESC_CFG_FRAG_MSK);
277 
278 	creq->cache_ptr = new_cache_ptr;
279 
280 	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
281 	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
282 	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
283 		CESA_SA_CMD_EN_CESA_SA_ACCL0);
284 	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
285 }
286 
mv_cesa_ahash_std_process(struct ahash_request * req,u32 status)287 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
288 {
289 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
291 
292 	if (sreq->offset < (req->nbytes - creq->cache_ptr))
293 		return -EINPROGRESS;
294 
295 	return 0;
296 }
297 
mv_cesa_ahash_dma_prepare(struct ahash_request * req)298 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
299 {
300 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
301 	struct mv_cesa_req *basereq = &creq->base;
302 
303 	mv_cesa_dma_prepare(basereq, basereq->engine);
304 }
305 
mv_cesa_ahash_std_prepare(struct ahash_request * req)306 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
307 {
308 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
309 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
310 
311 	sreq->offset = 0;
312 }
313 
mv_cesa_ahash_dma_step(struct ahash_request * req)314 static void mv_cesa_ahash_dma_step(struct ahash_request *req)
315 {
316 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
317 	struct mv_cesa_req *base = &creq->base;
318 
319 	/* We must explicitly set the digest state. */
320 	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
321 		struct mv_cesa_engine *engine = base->engine;
322 		int i;
323 
324 		/* Set the hash state in the IVDIG regs. */
325 		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
326 			writel_relaxed(creq->state[i], engine->regs +
327 				       CESA_IVDIG(i));
328 	}
329 
330 	mv_cesa_dma_step(base);
331 }
332 
mv_cesa_ahash_step(struct crypto_async_request * req)333 static void mv_cesa_ahash_step(struct crypto_async_request *req)
334 {
335 	struct ahash_request *ahashreq = ahash_request_cast(req);
336 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
337 
338 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
339 		mv_cesa_ahash_dma_step(ahashreq);
340 	else
341 		mv_cesa_ahash_std_step(ahashreq);
342 }
343 
mv_cesa_ahash_process(struct crypto_async_request * req,u32 status)344 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
345 {
346 	struct ahash_request *ahashreq = ahash_request_cast(req);
347 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
348 
349 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
350 		return mv_cesa_dma_process(&creq->base, status);
351 
352 	return mv_cesa_ahash_std_process(ahashreq, status);
353 }
354 
mv_cesa_ahash_complete(struct crypto_async_request * req)355 static void mv_cesa_ahash_complete(struct crypto_async_request *req)
356 {
357 	struct ahash_request *ahashreq = ahash_request_cast(req);
358 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
359 	struct mv_cesa_engine *engine = creq->base.engine;
360 	unsigned int digsize;
361 	int i;
362 
363 	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
364 
365 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
366 	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
367 	     CESA_TDMA_RESULT) {
368 		const void *data;
369 
370 		/*
371 		 * Result is already in the correct endianness when the SA is
372 		 * used
373 		 */
374 		data = creq->base.chain.last->op->ctx.hash.hash;
375 		memcpy(ahashreq->result, data, digsize);
376 	} else {
377 		for (i = 0; i < digsize / 4; i++)
378 			creq->state[i] = readl_relaxed(engine->regs +
379 						       CESA_IVDIG(i));
380 		if (creq->last_req) {
381 			/*
382 			 * Hardware's MD5 digest is in little endian format, but
383 			 * SHA in big endian format
384 			 */
385 			if (creq->algo_le) {
386 				__le32 *result = (void *)ahashreq->result;
387 
388 				for (i = 0; i < digsize / 4; i++)
389 					result[i] = cpu_to_le32(creq->state[i]);
390 			} else {
391 				__be32 *result = (void *)ahashreq->result;
392 
393 				for (i = 0; i < digsize / 4; i++)
394 					result[i] = cpu_to_be32(creq->state[i]);
395 			}
396 		}
397 	}
398 }
399 
mv_cesa_ahash_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)400 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
401 				  struct mv_cesa_engine *engine)
402 {
403 	struct ahash_request *ahashreq = ahash_request_cast(req);
404 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
405 
406 	creq->base.engine = engine;
407 
408 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
409 		mv_cesa_ahash_dma_prepare(ahashreq);
410 	else
411 		mv_cesa_ahash_std_prepare(ahashreq);
412 }
413 
mv_cesa_ahash_req_cleanup(struct crypto_async_request * req)414 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
415 {
416 	struct ahash_request *ahashreq = ahash_request_cast(req);
417 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
418 
419 	if (creq->last_req)
420 		mv_cesa_ahash_last_cleanup(ahashreq);
421 
422 	mv_cesa_ahash_cleanup(ahashreq);
423 
424 	if (creq->cache_ptr)
425 		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
426 				   creq->cache,
427 				   creq->cache_ptr,
428 				   ahashreq->nbytes - creq->cache_ptr);
429 }
430 
431 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
432 	.step = mv_cesa_ahash_step,
433 	.process = mv_cesa_ahash_process,
434 	.cleanup = mv_cesa_ahash_req_cleanup,
435 	.complete = mv_cesa_ahash_complete,
436 };
437 
mv_cesa_ahash_init(struct ahash_request * req,struct mv_cesa_op_ctx * tmpl,bool algo_le)438 static void mv_cesa_ahash_init(struct ahash_request *req,
439 			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
440 {
441 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
442 
443 	memset(creq, 0, sizeof(*creq));
444 	mv_cesa_update_op_cfg(tmpl,
445 			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
446 			      CESA_SA_DESC_CFG_FIRST_FRAG,
447 			      CESA_SA_DESC_CFG_OP_MSK |
448 			      CESA_SA_DESC_CFG_FRAG_MSK);
449 	mv_cesa_set_mac_op_total_len(tmpl, 0);
450 	mv_cesa_set_mac_op_frag_len(tmpl, 0);
451 	creq->op_tmpl = *tmpl;
452 	creq->len = 0;
453 	creq->algo_le = algo_le;
454 }
455 
mv_cesa_ahash_cra_init(struct crypto_tfm * tfm)456 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
457 {
458 	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
459 
460 	ctx->base.ops = &mv_cesa_ahash_req_ops;
461 
462 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
463 				 sizeof(struct mv_cesa_ahash_req));
464 	return 0;
465 }
466 
mv_cesa_ahash_cache_req(struct ahash_request * req)467 static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
468 {
469 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
470 	bool cached = false;
471 
472 	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
473 	    !creq->last_req) {
474 		cached = true;
475 
476 		if (!req->nbytes)
477 			return cached;
478 
479 		sg_pcopy_to_buffer(req->src, creq->src_nents,
480 				   creq->cache + creq->cache_ptr,
481 				   req->nbytes, 0);
482 
483 		creq->cache_ptr += req->nbytes;
484 	}
485 
486 	return cached;
487 }
488 
489 static struct mv_cesa_op_ctx *
mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain * chain,struct mv_cesa_op_ctx * tmpl,unsigned int frag_len,gfp_t flags)490 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
491 		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
492 		     gfp_t flags)
493 {
494 	struct mv_cesa_op_ctx *op;
495 	int ret;
496 
497 	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
498 	if (IS_ERR(op))
499 		return op;
500 
501 	/* Set the operation block fragment length. */
502 	mv_cesa_set_mac_op_frag_len(op, frag_len);
503 
504 	/* Append dummy desc to launch operation */
505 	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
506 	if (ret)
507 		return ERR_PTR(ret);
508 
509 	if (mv_cesa_mac_op_is_first_frag(tmpl))
510 		mv_cesa_update_op_cfg(tmpl,
511 				      CESA_SA_DESC_CFG_MID_FRAG,
512 				      CESA_SA_DESC_CFG_FRAG_MSK);
513 
514 	return op;
515 }
516 
517 static int
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain * chain,struct mv_cesa_ahash_req * creq,gfp_t flags)518 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
519 			    struct mv_cesa_ahash_req *creq,
520 			    gfp_t flags)
521 {
522 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
523 	int ret;
524 
525 	if (!creq->cache_ptr)
526 		return 0;
527 
528 	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
529 	if (ret)
530 		return ret;
531 
532 	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
533 
534 	return mv_cesa_dma_add_data_transfer(chain,
535 					     CESA_SA_DATA_SRAM_OFFSET,
536 					     ahashdreq->cache_dma,
537 					     creq->cache_ptr,
538 					     CESA_TDMA_DST_IN_SRAM,
539 					     flags);
540 }
541 
542 static struct mv_cesa_op_ctx *
mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain * chain,struct mv_cesa_ahash_dma_iter * dma_iter,struct mv_cesa_ahash_req * creq,unsigned int frag_len,gfp_t flags)543 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
544 			   struct mv_cesa_ahash_dma_iter *dma_iter,
545 			   struct mv_cesa_ahash_req *creq,
546 			   unsigned int frag_len, gfp_t flags)
547 {
548 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
549 	unsigned int len, trailerlen, padoff = 0;
550 	struct mv_cesa_op_ctx *op;
551 	int ret;
552 
553 	/*
554 	 * If the transfer is smaller than our maximum length, and we have
555 	 * some data outstanding, we can ask the engine to finish the hash.
556 	 */
557 	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
558 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
559 					  flags);
560 		if (IS_ERR(op))
561 			return op;
562 
563 		mv_cesa_set_mac_op_total_len(op, creq->len);
564 		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
565 						CESA_SA_DESC_CFG_NOT_FRAG :
566 						CESA_SA_DESC_CFG_LAST_FRAG,
567 				      CESA_SA_DESC_CFG_FRAG_MSK);
568 
569 		ret = mv_cesa_dma_add_result_op(chain,
570 						CESA_SA_CFG_SRAM_OFFSET,
571 						CESA_SA_DATA_SRAM_OFFSET,
572 						CESA_TDMA_SRC_IN_SRAM, flags);
573 		if (ret)
574 			return ERR_PTR(-ENOMEM);
575 		return op;
576 	}
577 
578 	/*
579 	 * The request is longer than the engine can handle, or we have
580 	 * no data outstanding. Manually generate the padding, adding it
581 	 * as a "mid" fragment.
582 	 */
583 	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
584 	if (ret)
585 		return ERR_PTR(ret);
586 
587 	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
588 
589 	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
590 	if (len) {
591 		ret = mv_cesa_dma_add_data_transfer(chain,
592 						CESA_SA_DATA_SRAM_OFFSET +
593 						frag_len,
594 						ahashdreq->padding_dma,
595 						len, CESA_TDMA_DST_IN_SRAM,
596 						flags);
597 		if (ret)
598 			return ERR_PTR(ret);
599 
600 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
601 					  flags);
602 		if (IS_ERR(op))
603 			return op;
604 
605 		if (len == trailerlen)
606 			return op;
607 
608 		padoff += len;
609 	}
610 
611 	ret = mv_cesa_dma_add_data_transfer(chain,
612 					    CESA_SA_DATA_SRAM_OFFSET,
613 					    ahashdreq->padding_dma +
614 					    padoff,
615 					    trailerlen - padoff,
616 					    CESA_TDMA_DST_IN_SRAM,
617 					    flags);
618 	if (ret)
619 		return ERR_PTR(ret);
620 
621 	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
622 				    flags);
623 }
624 
mv_cesa_ahash_dma_req_init(struct ahash_request * req)625 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
626 {
627 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
628 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
629 		      GFP_KERNEL : GFP_ATOMIC;
630 	struct mv_cesa_req *basereq = &creq->base;
631 	struct mv_cesa_ahash_dma_iter iter;
632 	struct mv_cesa_op_ctx *op = NULL;
633 	unsigned int frag_len;
634 	bool set_state = false;
635 	int ret;
636 	u32 type;
637 
638 	basereq->chain.first = NULL;
639 	basereq->chain.last = NULL;
640 
641 	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
642 		set_state = true;
643 
644 	if (creq->src_nents) {
645 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
646 				 DMA_TO_DEVICE);
647 		if (!ret) {
648 			ret = -ENOMEM;
649 			goto err;
650 		}
651 	}
652 
653 	mv_cesa_tdma_desc_iter_init(&basereq->chain);
654 	mv_cesa_ahash_req_iter_init(&iter, req);
655 
656 	/*
657 	 * Add the cache (left-over data from a previous block) first.
658 	 * This will never overflow the SRAM size.
659 	 */
660 	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
661 	if (ret)
662 		goto err_free_tdma;
663 
664 	if (iter.base.len > iter.src.op_offset) {
665 		/*
666 		 * Add all the new data, inserting an operation block and
667 		 * launch command between each full SRAM block-worth of
668 		 * data. We intentionally do not add the final op block.
669 		 */
670 		while (true) {
671 			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
672 							   &iter.base,
673 							   &iter.src, flags);
674 			if (ret)
675 				goto err_free_tdma;
676 
677 			frag_len = iter.base.op_len;
678 
679 			if (!mv_cesa_ahash_req_iter_next_op(&iter))
680 				break;
681 
682 			op = mv_cesa_dma_add_frag(&basereq->chain,
683 						  &creq->op_tmpl,
684 						  frag_len, flags);
685 			if (IS_ERR(op)) {
686 				ret = PTR_ERR(op);
687 				goto err_free_tdma;
688 			}
689 		}
690 	} else {
691 		/* Account for the data that was in the cache. */
692 		frag_len = iter.base.op_len;
693 	}
694 
695 	/*
696 	 * At this point, frag_len indicates whether we have any data
697 	 * outstanding which needs an operation.  Queue up the final
698 	 * operation, which depends whether this is the final request.
699 	 */
700 	if (creq->last_req)
701 		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
702 						frag_len, flags);
703 	else if (frag_len)
704 		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
705 					  frag_len, flags);
706 
707 	if (IS_ERR(op)) {
708 		ret = PTR_ERR(op);
709 		goto err_free_tdma;
710 	}
711 
712 	/*
713 	 * If results are copied via DMA, this means that this
714 	 * request can be directly processed by the engine,
715 	 * without partial updates. So we can chain it at the
716 	 * DMA level with other requests.
717 	 */
718 	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
719 
720 	if (op && type != CESA_TDMA_RESULT) {
721 		/* Add dummy desc to wait for crypto operation end */
722 		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
723 		if (ret)
724 			goto err_free_tdma;
725 	}
726 
727 	if (!creq->last_req)
728 		creq->cache_ptr = req->nbytes + creq->cache_ptr -
729 				  iter.base.len;
730 	else
731 		creq->cache_ptr = 0;
732 
733 	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
734 
735 	if (type != CESA_TDMA_RESULT)
736 		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
737 
738 	if (set_state) {
739 		/*
740 		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
741 		 * let the step logic know that the IVDIG registers should be
742 		 * explicitly set before launching a TDMA chain.
743 		 */
744 		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
745 	}
746 
747 	return 0;
748 
749 err_free_tdma:
750 	mv_cesa_dma_cleanup(basereq);
751 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
752 
753 err:
754 	mv_cesa_ahash_last_cleanup(req);
755 
756 	return ret;
757 }
758 
mv_cesa_ahash_req_init(struct ahash_request * req,bool * cached)759 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
760 {
761 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
762 
763 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
764 	if (creq->src_nents < 0) {
765 		dev_err(cesa_dev->dev, "Invalid number of src SG");
766 		return creq->src_nents;
767 	}
768 
769 	*cached = mv_cesa_ahash_cache_req(req);
770 
771 	if (*cached)
772 		return 0;
773 
774 	if (cesa_dev->caps->has_tdma)
775 		return mv_cesa_ahash_dma_req_init(req);
776 	else
777 		return 0;
778 }
779 
mv_cesa_ahash_queue_req(struct ahash_request * req)780 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
781 {
782 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
783 	struct mv_cesa_engine *engine;
784 	bool cached = false;
785 	int ret;
786 
787 	ret = mv_cesa_ahash_req_init(req, &cached);
788 	if (ret)
789 		return ret;
790 
791 	if (cached)
792 		return 0;
793 
794 	engine = mv_cesa_select_engine(req->nbytes);
795 	mv_cesa_ahash_prepare(&req->base, engine);
796 
797 	ret = mv_cesa_queue_req(&req->base, &creq->base);
798 
799 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
800 		mv_cesa_ahash_cleanup(req);
801 
802 	return ret;
803 }
804 
mv_cesa_ahash_update(struct ahash_request * req)805 static int mv_cesa_ahash_update(struct ahash_request *req)
806 {
807 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
808 
809 	creq->len += req->nbytes;
810 
811 	return mv_cesa_ahash_queue_req(req);
812 }
813 
mv_cesa_ahash_final(struct ahash_request * req)814 static int mv_cesa_ahash_final(struct ahash_request *req)
815 {
816 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
817 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
818 
819 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
820 	creq->last_req = true;
821 	req->nbytes = 0;
822 
823 	return mv_cesa_ahash_queue_req(req);
824 }
825 
mv_cesa_ahash_finup(struct ahash_request * req)826 static int mv_cesa_ahash_finup(struct ahash_request *req)
827 {
828 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
829 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
830 
831 	creq->len += req->nbytes;
832 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
833 	creq->last_req = true;
834 
835 	return mv_cesa_ahash_queue_req(req);
836 }
837 
mv_cesa_ahash_export(struct ahash_request * req,void * hash,u64 * len,void * cache)838 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
839 				u64 *len, void *cache)
840 {
841 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
842 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
843 	unsigned int digsize = crypto_ahash_digestsize(ahash);
844 	unsigned int blocksize;
845 
846 	blocksize = crypto_ahash_blocksize(ahash);
847 
848 	*len = creq->len;
849 	memcpy(hash, creq->state, digsize);
850 	memcpy_and_pad(cache, blocksize, creq->cache, creq->cache_ptr, 0);
851 
852 	return 0;
853 }
854 
mv_cesa_ahash_import(struct ahash_request * req,const void * hash,u64 len,const void * cache)855 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
856 				u64 len, const void *cache)
857 {
858 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
859 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
860 	unsigned int digsize = crypto_ahash_digestsize(ahash);
861 	unsigned int blocksize;
862 	unsigned int cache_ptr;
863 	int ret;
864 
865 	ret = crypto_ahash_init(req);
866 	if (ret)
867 		return ret;
868 
869 	blocksize = crypto_ahash_blocksize(ahash);
870 	if (len >= blocksize)
871 		mv_cesa_update_op_cfg(&creq->op_tmpl,
872 				      CESA_SA_DESC_CFG_MID_FRAG,
873 				      CESA_SA_DESC_CFG_FRAG_MSK);
874 
875 	creq->len = len;
876 	memcpy(creq->state, hash, digsize);
877 	creq->cache_ptr = 0;
878 
879 	cache_ptr = do_div(len, blocksize);
880 	if (!cache_ptr)
881 		return 0;
882 
883 	memcpy(creq->cache, cache, cache_ptr);
884 	creq->cache_ptr = cache_ptr;
885 
886 	return 0;
887 }
888 
mv_cesa_md5_init(struct ahash_request * req)889 static int mv_cesa_md5_init(struct ahash_request *req)
890 {
891 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
892 	struct mv_cesa_op_ctx tmpl = { };
893 
894 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
895 
896 	mv_cesa_ahash_init(req, &tmpl, true);
897 
898 	creq->state[0] = MD5_H0;
899 	creq->state[1] = MD5_H1;
900 	creq->state[2] = MD5_H2;
901 	creq->state[3] = MD5_H3;
902 
903 	return 0;
904 }
905 
mv_cesa_md5_export(struct ahash_request * req,void * out)906 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
907 {
908 	struct md5_state *out_state = out;
909 
910 	return mv_cesa_ahash_export(req, out_state->hash,
911 				    &out_state->byte_count, out_state->block);
912 }
913 
mv_cesa_md5_import(struct ahash_request * req,const void * in)914 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
915 {
916 	const struct md5_state *in_state = in;
917 
918 	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
919 				    in_state->block);
920 }
921 
mv_cesa_md5_digest(struct ahash_request * req)922 static int mv_cesa_md5_digest(struct ahash_request *req)
923 {
924 	int ret;
925 
926 	ret = mv_cesa_md5_init(req);
927 	if (ret)
928 		return ret;
929 
930 	return mv_cesa_ahash_finup(req);
931 }
932 
933 struct ahash_alg mv_md5_alg = {
934 	.init = mv_cesa_md5_init,
935 	.update = mv_cesa_ahash_update,
936 	.final = mv_cesa_ahash_final,
937 	.finup = mv_cesa_ahash_finup,
938 	.digest = mv_cesa_md5_digest,
939 	.export = mv_cesa_md5_export,
940 	.import = mv_cesa_md5_import,
941 	.halg = {
942 		.digestsize = MD5_DIGEST_SIZE,
943 		.statesize = sizeof(struct md5_state),
944 		.base = {
945 			.cra_name = "md5",
946 			.cra_driver_name = "mv-md5",
947 			.cra_priority = 0,
948 			.cra_flags = CRYPTO_ALG_ASYNC |
949 				     CRYPTO_ALG_ALLOCATES_MEMORY |
950 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
951 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
952 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
953 			.cra_init = mv_cesa_ahash_cra_init,
954 			.cra_module = THIS_MODULE,
955 		}
956 	}
957 };
958 
mv_cesa_sha1_init(struct ahash_request * req)959 static int mv_cesa_sha1_init(struct ahash_request *req)
960 {
961 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
962 	struct mv_cesa_op_ctx tmpl = { };
963 
964 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
965 
966 	mv_cesa_ahash_init(req, &tmpl, false);
967 
968 	creq->state[0] = SHA1_H0;
969 	creq->state[1] = SHA1_H1;
970 	creq->state[2] = SHA1_H2;
971 	creq->state[3] = SHA1_H3;
972 	creq->state[4] = SHA1_H4;
973 
974 	return 0;
975 }
976 
mv_cesa_sha1_export(struct ahash_request * req,void * out)977 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
978 {
979 	struct sha1_state *out_state = out;
980 
981 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
982 				    out_state->buffer);
983 }
984 
mv_cesa_sha1_import(struct ahash_request * req,const void * in)985 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
986 {
987 	const struct sha1_state *in_state = in;
988 
989 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
990 				    in_state->buffer);
991 }
992 
mv_cesa_sha1_digest(struct ahash_request * req)993 static int mv_cesa_sha1_digest(struct ahash_request *req)
994 {
995 	int ret;
996 
997 	ret = mv_cesa_sha1_init(req);
998 	if (ret)
999 		return ret;
1000 
1001 	return mv_cesa_ahash_finup(req);
1002 }
1003 
1004 struct ahash_alg mv_sha1_alg = {
1005 	.init = mv_cesa_sha1_init,
1006 	.update = mv_cesa_ahash_update,
1007 	.final = mv_cesa_ahash_final,
1008 	.finup = mv_cesa_ahash_finup,
1009 	.digest = mv_cesa_sha1_digest,
1010 	.export = mv_cesa_sha1_export,
1011 	.import = mv_cesa_sha1_import,
1012 	.halg = {
1013 		.digestsize = SHA1_DIGEST_SIZE,
1014 		.statesize = sizeof(struct sha1_state),
1015 		.base = {
1016 			.cra_name = "sha1",
1017 			.cra_driver_name = "mv-sha1",
1018 			.cra_priority = 0,
1019 			.cra_flags = CRYPTO_ALG_ASYNC |
1020 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1021 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1022 			.cra_blocksize = SHA1_BLOCK_SIZE,
1023 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1024 			.cra_init = mv_cesa_ahash_cra_init,
1025 			.cra_module = THIS_MODULE,
1026 		}
1027 	}
1028 };
1029 
mv_cesa_sha256_init(struct ahash_request * req)1030 static int mv_cesa_sha256_init(struct ahash_request *req)
1031 {
1032 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1033 	struct mv_cesa_op_ctx tmpl = { };
1034 
1035 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1036 
1037 	mv_cesa_ahash_init(req, &tmpl, false);
1038 
1039 	creq->state[0] = SHA256_H0;
1040 	creq->state[1] = SHA256_H1;
1041 	creq->state[2] = SHA256_H2;
1042 	creq->state[3] = SHA256_H3;
1043 	creq->state[4] = SHA256_H4;
1044 	creq->state[5] = SHA256_H5;
1045 	creq->state[6] = SHA256_H6;
1046 	creq->state[7] = SHA256_H7;
1047 
1048 	return 0;
1049 }
1050 
mv_cesa_sha256_digest(struct ahash_request * req)1051 static int mv_cesa_sha256_digest(struct ahash_request *req)
1052 {
1053 	int ret;
1054 
1055 	ret = mv_cesa_sha256_init(req);
1056 	if (ret)
1057 		return ret;
1058 
1059 	return mv_cesa_ahash_finup(req);
1060 }
1061 
mv_cesa_sha256_export(struct ahash_request * req,void * out)1062 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1063 {
1064 	struct sha256_state *out_state = out;
1065 
1066 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1067 				    out_state->buf);
1068 }
1069 
mv_cesa_sha256_import(struct ahash_request * req,const void * in)1070 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1071 {
1072 	const struct sha256_state *in_state = in;
1073 
1074 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1075 				    in_state->buf);
1076 }
1077 
1078 struct ahash_alg mv_sha256_alg = {
1079 	.init = mv_cesa_sha256_init,
1080 	.update = mv_cesa_ahash_update,
1081 	.final = mv_cesa_ahash_final,
1082 	.finup = mv_cesa_ahash_finup,
1083 	.digest = mv_cesa_sha256_digest,
1084 	.export = mv_cesa_sha256_export,
1085 	.import = mv_cesa_sha256_import,
1086 	.halg = {
1087 		.digestsize = SHA256_DIGEST_SIZE,
1088 		.statesize = sizeof(struct sha256_state),
1089 		.base = {
1090 			.cra_name = "sha256",
1091 			.cra_driver_name = "mv-sha256",
1092 			.cra_priority = 0,
1093 			.cra_flags = CRYPTO_ALG_ASYNC |
1094 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1095 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1096 			.cra_blocksize = SHA256_BLOCK_SIZE,
1097 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1098 			.cra_init = mv_cesa_ahash_cra_init,
1099 			.cra_module = THIS_MODULE,
1100 		}
1101 	}
1102 };
1103 
mv_cesa_ahmac_iv_state_init(struct ahash_request * req,u8 * pad,void * state,unsigned int blocksize)1104 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1105 				       void *state, unsigned int blocksize)
1106 {
1107 	DECLARE_CRYPTO_WAIT(result);
1108 	struct scatterlist sg;
1109 	int ret;
1110 
1111 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1112 				   crypto_req_done, &result);
1113 	sg_init_one(&sg, pad, blocksize);
1114 	ahash_request_set_crypt(req, &sg, pad, blocksize);
1115 
1116 	ret = crypto_ahash_init(req);
1117 	if (ret)
1118 		return ret;
1119 
1120 	ret = crypto_ahash_update(req);
1121 	ret = crypto_wait_req(ret, &result);
1122 
1123 	if (ret)
1124 		return ret;
1125 
1126 	ret = crypto_ahash_export(req, state);
1127 	if (ret)
1128 		return ret;
1129 
1130 	return 0;
1131 }
1132 
mv_cesa_ahmac_pad_init(struct ahash_request * req,const u8 * key,unsigned int keylen,u8 * ipad,u8 * opad,unsigned int blocksize)1133 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1134 				  const u8 *key, unsigned int keylen,
1135 				  u8 *ipad, u8 *opad,
1136 				  unsigned int blocksize)
1137 {
1138 	DECLARE_CRYPTO_WAIT(result);
1139 	struct scatterlist sg;
1140 	int ret;
1141 	int i;
1142 
1143 	if (keylen <= blocksize) {
1144 		memcpy(ipad, key, keylen);
1145 	} else {
1146 		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1147 
1148 		if (!keydup)
1149 			return -ENOMEM;
1150 
1151 		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1152 					   crypto_req_done, &result);
1153 		sg_init_one(&sg, keydup, keylen);
1154 		ahash_request_set_crypt(req, &sg, ipad, keylen);
1155 
1156 		ret = crypto_ahash_digest(req);
1157 		ret = crypto_wait_req(ret, &result);
1158 
1159 		/* Set the memory region to 0 to avoid any leak. */
1160 		kfree_sensitive(keydup);
1161 
1162 		if (ret)
1163 			return ret;
1164 
1165 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1166 	}
1167 
1168 	memset(ipad + keylen, 0, blocksize - keylen);
1169 	memcpy(opad, ipad, blocksize);
1170 
1171 	for (i = 0; i < blocksize; i++) {
1172 		ipad[i] ^= HMAC_IPAD_VALUE;
1173 		opad[i] ^= HMAC_OPAD_VALUE;
1174 	}
1175 
1176 	return 0;
1177 }
1178 
mv_cesa_ahmac_setkey(const char * hash_alg_name,const u8 * key,unsigned int keylen,void * istate,void * ostate)1179 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1180 				const u8 *key, unsigned int keylen,
1181 				void *istate, void *ostate)
1182 {
1183 	struct ahash_request *req;
1184 	struct crypto_ahash *tfm;
1185 	unsigned int blocksize;
1186 	u8 *ipad = NULL;
1187 	u8 *opad;
1188 	int ret;
1189 
1190 	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1191 	if (IS_ERR(tfm))
1192 		return PTR_ERR(tfm);
1193 
1194 	req = ahash_request_alloc(tfm, GFP_KERNEL);
1195 	if (!req) {
1196 		ret = -ENOMEM;
1197 		goto free_ahash;
1198 	}
1199 
1200 	crypto_ahash_clear_flags(tfm, ~0);
1201 
1202 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1203 
1204 	ipad = kcalloc(2, blocksize, GFP_KERNEL);
1205 	if (!ipad) {
1206 		ret = -ENOMEM;
1207 		goto free_req;
1208 	}
1209 
1210 	opad = ipad + blocksize;
1211 
1212 	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1213 	if (ret)
1214 		goto free_ipad;
1215 
1216 	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1217 	if (ret)
1218 		goto free_ipad;
1219 
1220 	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1221 
1222 free_ipad:
1223 	kfree(ipad);
1224 free_req:
1225 	ahash_request_free(req);
1226 free_ahash:
1227 	crypto_free_ahash(tfm);
1228 
1229 	return ret;
1230 }
1231 
mv_cesa_ahmac_cra_init(struct crypto_tfm * tfm)1232 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1233 {
1234 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1235 
1236 	ctx->base.ops = &mv_cesa_ahash_req_ops;
1237 
1238 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1239 				 sizeof(struct mv_cesa_ahash_req));
1240 	return 0;
1241 }
1242 
mv_cesa_ahmac_md5_init(struct ahash_request * req)1243 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1244 {
1245 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1246 	struct mv_cesa_op_ctx tmpl = { };
1247 
1248 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1249 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1250 
1251 	mv_cesa_ahash_init(req, &tmpl, true);
1252 
1253 	return 0;
1254 }
1255 
mv_cesa_ahmac_md5_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1256 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1257 				    unsigned int keylen)
1258 {
1259 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1260 	struct md5_state istate, ostate;
1261 	int ret, i;
1262 
1263 	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1264 	if (ret)
1265 		return ret;
1266 
1267 	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1268 		ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1269 
1270 	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1271 		ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1272 
1273 	return 0;
1274 }
1275 
mv_cesa_ahmac_md5_digest(struct ahash_request * req)1276 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1277 {
1278 	int ret;
1279 
1280 	ret = mv_cesa_ahmac_md5_init(req);
1281 	if (ret)
1282 		return ret;
1283 
1284 	return mv_cesa_ahash_finup(req);
1285 }
1286 
1287 struct ahash_alg mv_ahmac_md5_alg = {
1288 	.init = mv_cesa_ahmac_md5_init,
1289 	.update = mv_cesa_ahash_update,
1290 	.final = mv_cesa_ahash_final,
1291 	.finup = mv_cesa_ahash_finup,
1292 	.digest = mv_cesa_ahmac_md5_digest,
1293 	.setkey = mv_cesa_ahmac_md5_setkey,
1294 	.export = mv_cesa_md5_export,
1295 	.import = mv_cesa_md5_import,
1296 	.halg = {
1297 		.digestsize = MD5_DIGEST_SIZE,
1298 		.statesize = sizeof(struct md5_state),
1299 		.base = {
1300 			.cra_name = "hmac(md5)",
1301 			.cra_driver_name = "mv-hmac-md5",
1302 			.cra_priority = 0,
1303 			.cra_flags = CRYPTO_ALG_ASYNC |
1304 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1305 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1306 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1307 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1308 			.cra_init = mv_cesa_ahmac_cra_init,
1309 			.cra_module = THIS_MODULE,
1310 		}
1311 	}
1312 };
1313 
mv_cesa_ahmac_sha1_init(struct ahash_request * req)1314 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1315 {
1316 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1317 	struct mv_cesa_op_ctx tmpl = { };
1318 
1319 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1320 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1321 
1322 	mv_cesa_ahash_init(req, &tmpl, false);
1323 
1324 	return 0;
1325 }
1326 
mv_cesa_ahmac_sha1_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1327 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1328 				     unsigned int keylen)
1329 {
1330 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1331 	struct sha1_state istate, ostate;
1332 	int ret, i;
1333 
1334 	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1335 	if (ret)
1336 		return ret;
1337 
1338 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1339 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1340 
1341 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1342 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1343 
1344 	return 0;
1345 }
1346 
mv_cesa_ahmac_sha1_digest(struct ahash_request * req)1347 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1348 {
1349 	int ret;
1350 
1351 	ret = mv_cesa_ahmac_sha1_init(req);
1352 	if (ret)
1353 		return ret;
1354 
1355 	return mv_cesa_ahash_finup(req);
1356 }
1357 
1358 struct ahash_alg mv_ahmac_sha1_alg = {
1359 	.init = mv_cesa_ahmac_sha1_init,
1360 	.update = mv_cesa_ahash_update,
1361 	.final = mv_cesa_ahash_final,
1362 	.finup = mv_cesa_ahash_finup,
1363 	.digest = mv_cesa_ahmac_sha1_digest,
1364 	.setkey = mv_cesa_ahmac_sha1_setkey,
1365 	.export = mv_cesa_sha1_export,
1366 	.import = mv_cesa_sha1_import,
1367 	.halg = {
1368 		.digestsize = SHA1_DIGEST_SIZE,
1369 		.statesize = sizeof(struct sha1_state),
1370 		.base = {
1371 			.cra_name = "hmac(sha1)",
1372 			.cra_driver_name = "mv-hmac-sha1",
1373 			.cra_priority = 0,
1374 			.cra_flags = CRYPTO_ALG_ASYNC |
1375 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1376 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1377 			.cra_blocksize = SHA1_BLOCK_SIZE,
1378 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1379 			.cra_init = mv_cesa_ahmac_cra_init,
1380 			.cra_module = THIS_MODULE,
1381 		}
1382 	}
1383 };
1384 
mv_cesa_ahmac_sha256_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1385 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1386 				       unsigned int keylen)
1387 {
1388 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1389 	struct sha256_state istate, ostate;
1390 	int ret, i;
1391 
1392 	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1393 	if (ret)
1394 		return ret;
1395 
1396 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1397 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1398 
1399 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1400 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1401 
1402 	return 0;
1403 }
1404 
mv_cesa_ahmac_sha256_init(struct ahash_request * req)1405 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1406 {
1407 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1408 	struct mv_cesa_op_ctx tmpl = { };
1409 
1410 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1411 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1412 
1413 	mv_cesa_ahash_init(req, &tmpl, false);
1414 
1415 	return 0;
1416 }
1417 
mv_cesa_ahmac_sha256_digest(struct ahash_request * req)1418 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1419 {
1420 	int ret;
1421 
1422 	ret = mv_cesa_ahmac_sha256_init(req);
1423 	if (ret)
1424 		return ret;
1425 
1426 	return mv_cesa_ahash_finup(req);
1427 }
1428 
1429 struct ahash_alg mv_ahmac_sha256_alg = {
1430 	.init = mv_cesa_ahmac_sha256_init,
1431 	.update = mv_cesa_ahash_update,
1432 	.final = mv_cesa_ahash_final,
1433 	.finup = mv_cesa_ahash_finup,
1434 	.digest = mv_cesa_ahmac_sha256_digest,
1435 	.setkey = mv_cesa_ahmac_sha256_setkey,
1436 	.export = mv_cesa_sha256_export,
1437 	.import = mv_cesa_sha256_import,
1438 	.halg = {
1439 		.digestsize = SHA256_DIGEST_SIZE,
1440 		.statesize = sizeof(struct sha256_state),
1441 		.base = {
1442 			.cra_name = "hmac(sha256)",
1443 			.cra_driver_name = "mv-hmac-sha256",
1444 			.cra_priority = 0,
1445 			.cra_flags = CRYPTO_ALG_ASYNC |
1446 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1447 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1448 			.cra_blocksize = SHA256_BLOCK_SIZE,
1449 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1450 			.cra_init = mv_cesa_ahmac_cra_init,
1451 			.cra_module = THIS_MODULE,
1452 		}
1453 	}
1454 };
1455