xref: /linux/drivers/crypto/stm32/stm32-hash.c (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <crypto/engine.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/md5.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha1.h>
14 #include <crypto/sha2.h>
15 #include <crypto/sha3.h>
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/string.h>
29 
30 #define HASH_CR				0x00
31 #define HASH_DIN			0x04
32 #define HASH_STR			0x08
33 #define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
34 #define HASH_IMR			0x20
35 #define HASH_SR				0x24
36 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR			0x3F0
39 #define HASH_VER			0x3F4
40 #define HASH_ID				0x3F8
41 
42 /* Control Register */
43 #define HASH_CR_INIT			BIT(2)
44 #define HASH_CR_DMAE			BIT(3)
45 #define HASH_CR_DATATYPE_POS		4
46 #define HASH_CR_MODE			BIT(6)
47 #define HASH_CR_ALGO_POS		7
48 #define HASH_CR_MDMAT			BIT(13)
49 #define HASH_CR_DMAA			BIT(14)
50 #define HASH_CR_LKEY			BIT(16)
51 
52 /* Interrupt */
53 #define HASH_DINIE			BIT(0)
54 #define HASH_DCIE			BIT(1)
55 
56 /* Interrupt Mask */
57 #define HASH_MASK_CALC_COMPLETION	BIT(0)
58 #define HASH_MASK_DATA_INPUT		BIT(1)
59 
60 /* Status Flags */
61 #define HASH_SR_DATA_INPUT_READY	BIT(0)
62 #define HASH_SR_OUTPUT_READY		BIT(1)
63 #define HASH_SR_DMA_ACTIVE		BIT(2)
64 #define HASH_SR_BUSY			BIT(3)
65 
66 /* STR Register */
67 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
68 #define HASH_STR_DCAL			BIT(8)
69 
70 /* HWCFGR Register */
71 #define HASH_HWCFG_DMA_MASK		GENMASK(3, 0)
72 
73 /* Context swap register */
74 #define HASH_CSR_NB_SHA256_HMAC		54
75 #define HASH_CSR_NB_SHA256		38
76 #define HASH_CSR_NB_SHA512_HMAC		103
77 #define HASH_CSR_NB_SHA512		91
78 #define HASH_CSR_NB_SHA3_HMAC		88
79 #define HASH_CSR_NB_SHA3		72
80 #define HASH_CSR_NB_MAX			HASH_CSR_NB_SHA512_HMAC
81 
82 #define HASH_FLAGS_INIT			BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
84 #define HASH_FLAGS_CPU			BIT(2)
85 #define HASH_FLAGS_DMA_ACTIVE		BIT(3)
86 #define HASH_FLAGS_HMAC_INIT		BIT(4)
87 #define HASH_FLAGS_HMAC_FINAL		BIT(5)
88 #define HASH_FLAGS_HMAC_KEY		BIT(6)
89 #define HASH_FLAGS_SHA3_MODE		BIT(7)
90 #define HASH_FLAGS_FINAL		BIT(15)
91 #define HASH_FLAGS_FINUP		BIT(16)
92 #define HASH_FLAGS_ALGO_MASK		GENMASK(20, 17)
93 #define HASH_FLAGS_ALGO_SHIFT		17
94 #define HASH_FLAGS_ERRORS		BIT(21)
95 #define HASH_FLAGS_EMPTY		BIT(22)
96 #define HASH_FLAGS_HMAC			BIT(23)
97 #define HASH_FLAGS_SGS_COPIED		BIT(24)
98 
99 #define HASH_OP_UPDATE			1
100 #define HASH_OP_FINAL			2
101 
102 #define HASH_BURST_LEVEL		4
103 
104 enum stm32_hash_data_format {
105 	HASH_DATA_32_BITS		= 0x0,
106 	HASH_DATA_16_BITS		= 0x1,
107 	HASH_DATA_8_BITS		= 0x2,
108 	HASH_DATA_1_BIT			= 0x3
109 };
110 
111 #define HASH_BUFLEN			(SHA3_224_BLOCK_SIZE + 4)
112 #define HASH_MAX_KEY_SIZE		(SHA512_BLOCK_SIZE * 8)
113 
114 enum stm32_hash_algo {
115 	HASH_SHA1			= 0,
116 	HASH_MD5			= 1,
117 	HASH_SHA224			= 2,
118 	HASH_SHA256			= 3,
119 	HASH_SHA3_224			= 4,
120 	HASH_SHA3_256			= 5,
121 	HASH_SHA3_384			= 6,
122 	HASH_SHA3_512			= 7,
123 	HASH_SHA384			= 12,
124 	HASH_SHA512			= 15,
125 };
126 
127 enum ux500_hash_algo {
128 	HASH_SHA256_UX500		= 0,
129 	HASH_SHA1_UX500			= 1,
130 };
131 
132 #define HASH_AUTOSUSPEND_DELAY		50
133 
134 struct stm32_hash_ctx {
135 	struct stm32_hash_dev	*hdev;
136 	struct crypto_shash	*xtfm;
137 	unsigned long		flags;
138 
139 	u8			key[HASH_MAX_KEY_SIZE];
140 	int			keylen;
141 };
142 
143 struct stm32_hash_state {
144 	u32			flags;
145 
146 	u16			bufcnt;
147 	u16			blocklen;
148 
149 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
150 
151 	/* hash state */
152 	u32			hw_context[3 + HASH_CSR_NB_MAX];
153 };
154 
155 struct stm32_hash_request_ctx {
156 	struct stm32_hash_dev	*hdev;
157 	unsigned long		op;
158 
159 	u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
160 	size_t			digcnt;
161 
162 	struct scatterlist	*sg;
163 	struct scatterlist	sgl[2]; /* scatterlist used to realize alignment */
164 	unsigned int		offset;
165 	unsigned int		total;
166 	struct scatterlist	sg_key;
167 
168 	dma_addr_t		dma_addr;
169 	size_t			dma_ct;
170 	int			nents;
171 
172 	u8			data_type;
173 
174 	struct stm32_hash_state state;
175 };
176 
177 struct stm32_hash_algs_info {
178 	struct ahash_engine_alg	*algs_list;
179 	size_t			size;
180 };
181 
182 struct stm32_hash_pdata {
183 	const int				alg_shift;
184 	const struct stm32_hash_algs_info	*algs_info;
185 	size_t					algs_info_size;
186 	bool					has_sr;
187 	bool					has_mdmat;
188 	bool					context_secured;
189 	bool					broken_emptymsg;
190 	bool					ux500;
191 };
192 
193 struct stm32_hash_dev {
194 	struct list_head	list;
195 	struct device		*dev;
196 	struct clk		*clk;
197 	struct reset_control	*rst;
198 	void __iomem		*io_base;
199 	phys_addr_t		phys_base;
200 	u8			xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
201 	u32			dma_mode;
202 	bool			polled;
203 
204 	struct ahash_request	*req;
205 	struct crypto_engine	*engine;
206 
207 	unsigned long		flags;
208 
209 	struct dma_chan		*dma_lch;
210 	struct completion	dma_completion;
211 
212 	const struct stm32_hash_pdata	*pdata;
213 };
214 
215 struct stm32_hash_drv {
216 	struct list_head	dev_list;
217 	spinlock_t		lock; /* List protection access */
218 };
219 
220 static struct stm32_hash_drv stm32_hash = {
221 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
222 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
223 };
224 
225 static void stm32_hash_dma_callback(void *param);
226 static int stm32_hash_prepare_request(struct ahash_request *req);
227 static void stm32_hash_unprepare_request(struct ahash_request *req);
228 
stm32_hash_read(struct stm32_hash_dev * hdev,u32 offset)229 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
230 {
231 	return readl_relaxed(hdev->io_base + offset);
232 }
233 
stm32_hash_write(struct stm32_hash_dev * hdev,u32 offset,u32 value)234 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
235 				    u32 offset, u32 value)
236 {
237 	writel_relaxed(value, hdev->io_base + offset);
238 }
239 
240 /**
241  * stm32_hash_wait_busy - wait until hash processor is available. It return an
242  * error if the hash core is processing a block of data for more than 10 ms.
243  * @hdev: the stm32_hash_dev device.
244  */
stm32_hash_wait_busy(struct stm32_hash_dev * hdev)245 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
246 {
247 	u32 status;
248 
249 	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
250 	if (!hdev->pdata->has_sr)
251 		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
252 						  !(status & HASH_STR_DCAL), 10, 10000);
253 
254 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
255 				   !(status & HASH_SR_BUSY), 10, 10000);
256 }
257 
258 /**
259  * stm32_hash_set_nblw - set the number of valid bytes in the last word.
260  * @hdev: the stm32_hash_dev device.
261  * @length: the length of the final word.
262  */
stm32_hash_set_nblw(struct stm32_hash_dev * hdev,int length)263 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
264 {
265 	u32 reg;
266 
267 	reg = stm32_hash_read(hdev, HASH_STR);
268 	reg &= ~(HASH_STR_NBLW_MASK);
269 	reg |= (8U * ((length) % 4U));
270 	stm32_hash_write(hdev, HASH_STR, reg);
271 }
272 
stm32_hash_write_key(struct stm32_hash_dev * hdev)273 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
274 {
275 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 	u32 reg;
278 	int keylen = ctx->keylen;
279 	void *key = ctx->key;
280 
281 	if (keylen) {
282 		stm32_hash_set_nblw(hdev, keylen);
283 
284 		while (keylen > 0) {
285 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
286 			keylen -= 4;
287 			key += 4;
288 		}
289 
290 		reg = stm32_hash_read(hdev, HASH_STR);
291 		reg |= HASH_STR_DCAL;
292 		stm32_hash_write(hdev, HASH_STR, reg);
293 
294 		return -EINPROGRESS;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * stm32_hash_write_ctrl - Initialize the hash processor, only if
302  * HASH_FLAGS_INIT is set.
303  * @hdev: the stm32_hash_dev device
304  */
stm32_hash_write_ctrl(struct stm32_hash_dev * hdev)305 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
306 {
307 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
309 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
310 	struct stm32_hash_state *state = &rctx->state;
311 	u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
312 
313 	u32 reg = HASH_CR_INIT;
314 
315 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
316 		if (hdev->pdata->ux500) {
317 			reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
318 		} else {
319 			if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
320 				reg |= ((alg & BIT(1)) << 17) |
321 				       ((alg & BIT(0)) << HASH_CR_ALGO_POS);
322 			else
323 				reg |= alg << hdev->pdata->alg_shift;
324 		}
325 
326 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
327 
328 		if (state->flags & HASH_FLAGS_HMAC) {
329 			hdev->flags |= HASH_FLAGS_HMAC;
330 			reg |= HASH_CR_MODE;
331 			if (ctx->keylen > crypto_ahash_blocksize(tfm))
332 				reg |= HASH_CR_LKEY;
333 		}
334 
335 		if (!hdev->polled)
336 			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
337 
338 		stm32_hash_write(hdev, HASH_CR, reg);
339 
340 		hdev->flags |= HASH_FLAGS_INIT;
341 
342 		/*
343 		 * After first block + 1 words are fill up,
344 		 * we only need to fill 1 block to start partial computation
345 		 */
346 		rctx->state.blocklen -= sizeof(u32);
347 
348 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
349 	}
350 }
351 
stm32_hash_append_sg(struct stm32_hash_request_ctx * rctx)352 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
353 {
354 	struct stm32_hash_state *state = &rctx->state;
355 	size_t count;
356 
357 	while ((state->bufcnt < state->blocklen) && rctx->total) {
358 		count = min(rctx->sg->length - rctx->offset, rctx->total);
359 		count = min_t(size_t, count, state->blocklen - state->bufcnt);
360 
361 		if (count <= 0) {
362 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
363 				rctx->sg = sg_next(rctx->sg);
364 				continue;
365 			} else {
366 				break;
367 			}
368 		}
369 
370 		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
371 					 rctx->sg, rctx->offset, count, 0);
372 
373 		state->bufcnt += count;
374 		rctx->offset += count;
375 		rctx->total -= count;
376 
377 		if (rctx->offset == rctx->sg->length) {
378 			rctx->sg = sg_next(rctx->sg);
379 			if (rctx->sg)
380 				rctx->offset = 0;
381 			else
382 				rctx->total = 0;
383 		}
384 	}
385 }
386 
stm32_hash_xmit_cpu(struct stm32_hash_dev * hdev,const u8 * buf,size_t length,int final)387 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
388 			       const u8 *buf, size_t length, int final)
389 {
390 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
391 	struct stm32_hash_state *state = &rctx->state;
392 	unsigned int count, len32;
393 	const u32 *buffer = (const u32 *)buf;
394 	u32 reg;
395 
396 	if (final) {
397 		hdev->flags |= HASH_FLAGS_FINAL;
398 
399 		/* Do not process empty messages if hw is buggy. */
400 		if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
401 		    hdev->pdata->broken_emptymsg) {
402 			state->flags |= HASH_FLAGS_EMPTY;
403 			return 0;
404 		}
405 	}
406 
407 	len32 = DIV_ROUND_UP(length, sizeof(u32));
408 
409 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
410 		__func__, length, final, len32);
411 
412 	hdev->flags |= HASH_FLAGS_CPU;
413 
414 	stm32_hash_write_ctrl(hdev);
415 
416 	if (stm32_hash_wait_busy(hdev))
417 		return -ETIMEDOUT;
418 
419 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
420 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
421 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
422 		stm32_hash_write_key(hdev);
423 		if (stm32_hash_wait_busy(hdev))
424 			return -ETIMEDOUT;
425 	}
426 
427 	for (count = 0; count < len32; count++)
428 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
429 
430 	if (final) {
431 		if (stm32_hash_wait_busy(hdev))
432 			return -ETIMEDOUT;
433 
434 		stm32_hash_set_nblw(hdev, length);
435 		reg = stm32_hash_read(hdev, HASH_STR);
436 		reg |= HASH_STR_DCAL;
437 		stm32_hash_write(hdev, HASH_STR, reg);
438 		if (hdev->flags & HASH_FLAGS_HMAC) {
439 			if (stm32_hash_wait_busy(hdev))
440 				return -ETIMEDOUT;
441 			stm32_hash_write_key(hdev);
442 		}
443 		return -EINPROGRESS;
444 	}
445 
446 	return 0;
447 }
448 
hash_swap_reg(struct stm32_hash_request_ctx * rctx)449 static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
450 {
451 	struct stm32_hash_state *state = &rctx->state;
452 
453 	switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
454 		HASH_FLAGS_ALGO_SHIFT) {
455 	case HASH_MD5:
456 	case HASH_SHA1:
457 	case HASH_SHA224:
458 	case HASH_SHA256:
459 		if (state->flags & HASH_FLAGS_HMAC)
460 			return HASH_CSR_NB_SHA256_HMAC;
461 		else
462 			return HASH_CSR_NB_SHA256;
463 		break;
464 
465 	case HASH_SHA384:
466 	case HASH_SHA512:
467 		if (state->flags & HASH_FLAGS_HMAC)
468 			return HASH_CSR_NB_SHA512_HMAC;
469 		else
470 			return HASH_CSR_NB_SHA512;
471 		break;
472 
473 	case HASH_SHA3_224:
474 	case HASH_SHA3_256:
475 	case HASH_SHA3_384:
476 	case HASH_SHA3_512:
477 		if (state->flags & HASH_FLAGS_HMAC)
478 			return HASH_CSR_NB_SHA3_HMAC;
479 		else
480 			return HASH_CSR_NB_SHA3;
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 }
487 
stm32_hash_update_cpu(struct stm32_hash_dev * hdev)488 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
489 {
490 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 	struct stm32_hash_state *state = &rctx->state;
492 	int bufcnt, err = 0, final;
493 
494 	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
495 
496 	final = state->flags & HASH_FLAGS_FINAL;
497 
498 	while ((rctx->total >= state->blocklen) ||
499 	       (state->bufcnt + rctx->total >= state->blocklen)) {
500 		stm32_hash_append_sg(rctx);
501 		bufcnt = state->bufcnt;
502 		state->bufcnt = 0;
503 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
504 		if (err)
505 			return err;
506 	}
507 
508 	stm32_hash_append_sg(rctx);
509 
510 	if (final) {
511 		bufcnt = state->bufcnt;
512 		state->bufcnt = 0;
513 		return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
514 	}
515 
516 	return err;
517 }
518 
stm32_hash_xmit_dma(struct stm32_hash_dev * hdev,struct scatterlist * sg,int length,int mdmat)519 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
520 			       struct scatterlist *sg, int length, int mdmat)
521 {
522 	struct dma_async_tx_descriptor *in_desc;
523 	dma_cookie_t cookie;
524 	u32 reg;
525 	int err;
526 
527 	dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
528 
529 	/* do not use dma if there is no data to send */
530 	if (length <= 0)
531 		return 0;
532 
533 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
534 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
535 					  DMA_CTRL_ACK);
536 	if (!in_desc) {
537 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
538 		return -ENOMEM;
539 	}
540 
541 	reinit_completion(&hdev->dma_completion);
542 	in_desc->callback = stm32_hash_dma_callback;
543 	in_desc->callback_param = hdev;
544 
545 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
546 
547 	reg = stm32_hash_read(hdev, HASH_CR);
548 
549 	if (hdev->pdata->has_mdmat) {
550 		if (mdmat)
551 			reg |= HASH_CR_MDMAT;
552 		else
553 			reg &= ~HASH_CR_MDMAT;
554 	}
555 	reg |= HASH_CR_DMAE;
556 
557 	stm32_hash_write(hdev, HASH_CR, reg);
558 
559 
560 	cookie = dmaengine_submit(in_desc);
561 	err = dma_submit_error(cookie);
562 	if (err)
563 		return -ENOMEM;
564 
565 	dma_async_issue_pending(hdev->dma_lch);
566 
567 	if (!wait_for_completion_timeout(&hdev->dma_completion,
568 					 msecs_to_jiffies(100)))
569 		err = -ETIMEDOUT;
570 
571 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
572 				     NULL, NULL) != DMA_COMPLETE)
573 		err = -ETIMEDOUT;
574 
575 	if (err) {
576 		dev_err(hdev->dev, "DMA Error %i\n", err);
577 		dmaengine_terminate_all(hdev->dma_lch);
578 		return err;
579 	}
580 
581 	return -EINPROGRESS;
582 }
583 
stm32_hash_dma_callback(void * param)584 static void stm32_hash_dma_callback(void *param)
585 {
586 	struct stm32_hash_dev *hdev = param;
587 
588 	complete(&hdev->dma_completion);
589 }
590 
stm32_hash_hmac_dma_send(struct stm32_hash_dev * hdev)591 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
592 {
593 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
594 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
595 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
596 	int err;
597 
598 	if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
599 		err = stm32_hash_write_key(hdev);
600 		if (stm32_hash_wait_busy(hdev))
601 			return -ETIMEDOUT;
602 	} else {
603 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
604 			sg_init_one(&rctx->sg_key, ctx->key,
605 				    ALIGN(ctx->keylen, sizeof(u32)));
606 
607 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
608 					  DMA_TO_DEVICE);
609 		if (rctx->dma_ct == 0) {
610 			dev_err(hdev->dev, "dma_map_sg error\n");
611 			return -ENOMEM;
612 		}
613 
614 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
615 
616 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
617 	}
618 
619 	return err;
620 }
621 
stm32_hash_dma_init(struct stm32_hash_dev * hdev)622 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
623 {
624 	struct dma_slave_config dma_conf;
625 	struct dma_chan *chan;
626 	int err;
627 
628 	memset(&dma_conf, 0, sizeof(dma_conf));
629 
630 	dma_conf.direction = DMA_MEM_TO_DEV;
631 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
632 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
633 	dma_conf.src_maxburst = HASH_BURST_LEVEL;
634 	dma_conf.dst_maxburst = HASH_BURST_LEVEL;
635 	dma_conf.device_fc = false;
636 
637 	chan = dma_request_chan(hdev->dev, "in");
638 	if (IS_ERR(chan))
639 		return PTR_ERR(chan);
640 
641 	hdev->dma_lch = chan;
642 
643 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
644 	if (err) {
645 		dma_release_channel(hdev->dma_lch);
646 		hdev->dma_lch = NULL;
647 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
648 		return err;
649 	}
650 
651 	init_completion(&hdev->dma_completion);
652 
653 	return 0;
654 }
655 
stm32_hash_dma_send(struct stm32_hash_dev * hdev)656 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
657 {
658 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
659 	u32 *buffer = (void *)rctx->state.buffer;
660 	struct scatterlist sg[1], *tsg;
661 	int err = 0, reg, ncp = 0;
662 	unsigned int i, len = 0, bufcnt = 0;
663 	bool final = hdev->flags & HASH_FLAGS_FINAL;
664 	bool is_last = false;
665 	u32 last_word;
666 
667 	dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
668 		__func__, rctx->total, rctx->state.bufcnt, final);
669 
670 	if (rctx->nents < 0)
671 		return -EINVAL;
672 
673 	stm32_hash_write_ctrl(hdev);
674 
675 	if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
676 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
677 		err = stm32_hash_hmac_dma_send(hdev);
678 		if (err != -EINPROGRESS)
679 			return err;
680 	}
681 
682 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
683 		sg[0] = *tsg;
684 		len = sg->length;
685 
686 		if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
687 			if (!final) {
688 				/* Always manually put the last word of a non-final transfer. */
689 				len -= sizeof(u32);
690 				sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
691 				sg->length -= sizeof(u32);
692 			} else {
693 				/*
694 				 * In Multiple DMA mode, DMA must be aborted before the final
695 				 * transfer.
696 				 */
697 				sg->length = rctx->total - bufcnt;
698 				if (hdev->dma_mode > 0) {
699 					len = (ALIGN(sg->length, 16) - 16);
700 
701 					ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
702 								 rctx->state.buffer,
703 								 sg->length - len,
704 								 rctx->total - sg->length + len);
705 
706 					if (!len)
707 						break;
708 
709 					sg->length = len;
710 				} else {
711 					is_last = true;
712 					if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
713 						len = sg->length;
714 						sg->length = ALIGN(sg->length,
715 								   sizeof(u32));
716 					}
717 				}
718 			}
719 		}
720 
721 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
722 					  DMA_TO_DEVICE);
723 		if (rctx->dma_ct == 0) {
724 			dev_err(hdev->dev, "dma_map_sg error\n");
725 			return -ENOMEM;
726 		}
727 
728 		err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
729 
730 		/* The last word of a non final transfer is sent manually. */
731 		if (!final) {
732 			stm32_hash_write(hdev, HASH_DIN, last_word);
733 			len += sizeof(u32);
734 		}
735 
736 		rctx->total -= len;
737 
738 		bufcnt += sg[0].length;
739 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
740 
741 		if (err == -ENOMEM || err == -ETIMEDOUT)
742 			return err;
743 		if (is_last)
744 			break;
745 	}
746 
747 	/*
748 	 * When the second last block transfer of 4 words is performed by the DMA,
749 	 * the software must set the DMA Abort bit (DMAA) to 1 before completing the
750 	 * last transfer of 4 words or less.
751 	 */
752 	if (final) {
753 		if (hdev->dma_mode > 0) {
754 			if (stm32_hash_wait_busy(hdev))
755 				return -ETIMEDOUT;
756 			reg = stm32_hash_read(hdev, HASH_CR);
757 			reg &= ~HASH_CR_DMAE;
758 			reg |= HASH_CR_DMAA;
759 			stm32_hash_write(hdev, HASH_CR, reg);
760 
761 			if (ncp) {
762 				memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
763 				writesl(hdev->io_base + HASH_DIN, buffer,
764 					DIV_ROUND_UP(ncp, sizeof(u32)));
765 			}
766 
767 			stm32_hash_set_nblw(hdev, ncp);
768 			reg = stm32_hash_read(hdev, HASH_STR);
769 			reg |= HASH_STR_DCAL;
770 			stm32_hash_write(hdev, HASH_STR, reg);
771 			err = -EINPROGRESS;
772 		}
773 
774 		/*
775 		 * The hash processor needs the key to be loaded a second time in order
776 		 * to process the HMAC.
777 		 */
778 		if (hdev->flags & HASH_FLAGS_HMAC) {
779 			if (stm32_hash_wait_busy(hdev))
780 				return -ETIMEDOUT;
781 			err = stm32_hash_hmac_dma_send(hdev);
782 		}
783 
784 		return err;
785 	}
786 
787 	if (err != -EINPROGRESS)
788 		return err;
789 
790 	return 0;
791 }
792 
stm32_hash_find_dev(struct stm32_hash_ctx * ctx)793 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
794 {
795 	struct stm32_hash_dev *hdev = NULL, *tmp;
796 
797 	spin_lock_bh(&stm32_hash.lock);
798 	if (!ctx->hdev) {
799 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
800 			hdev = tmp;
801 			break;
802 		}
803 		ctx->hdev = hdev;
804 	} else {
805 		hdev = ctx->hdev;
806 	}
807 
808 	spin_unlock_bh(&stm32_hash.lock);
809 
810 	return hdev;
811 }
812 
stm32_hash_init(struct ahash_request * req)813 static int stm32_hash_init(struct ahash_request *req)
814 {
815 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
816 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
817 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
818 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
819 	struct stm32_hash_state *state = &rctx->state;
820 	bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
821 
822 	rctx->hdev = hdev;
823 	state->flags = 0;
824 
825 	if (!(hdev->dma_lch &&  hdev->pdata->has_mdmat))
826 		state->flags |= HASH_FLAGS_CPU;
827 
828 	if (sha3_mode)
829 		state->flags |= HASH_FLAGS_SHA3_MODE;
830 
831 	rctx->digcnt = crypto_ahash_digestsize(tfm);
832 	switch (rctx->digcnt) {
833 	case MD5_DIGEST_SIZE:
834 		state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
835 		break;
836 	case SHA1_DIGEST_SIZE:
837 		if (hdev->pdata->ux500)
838 			state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
839 		else
840 			state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
841 		break;
842 	case SHA224_DIGEST_SIZE:
843 		if (sha3_mode)
844 			state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
845 		else
846 			state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
847 		break;
848 	case SHA256_DIGEST_SIZE:
849 		if (sha3_mode) {
850 			state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
851 		} else {
852 			if (hdev->pdata->ux500)
853 				state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
854 			else
855 				state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
856 		}
857 		break;
858 	case SHA384_DIGEST_SIZE:
859 		if (sha3_mode)
860 			state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
861 		else
862 			state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
863 		break;
864 	case SHA512_DIGEST_SIZE:
865 		if (sha3_mode)
866 			state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
867 		else
868 			state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
869 		break;
870 	default:
871 		return -EINVAL;
872 	}
873 
874 	rctx->state.bufcnt = 0;
875 	rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
876 	if (rctx->state.blocklen > HASH_BUFLEN) {
877 		dev_err(hdev->dev, "Error, block too large");
878 		return -EINVAL;
879 	}
880 	rctx->nents = 0;
881 	rctx->total = 0;
882 	rctx->offset = 0;
883 	rctx->data_type = HASH_DATA_8_BITS;
884 
885 	if (ctx->flags & HASH_FLAGS_HMAC)
886 		state->flags |= HASH_FLAGS_HMAC;
887 
888 	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
889 
890 	return 0;
891 }
892 
stm32_hash_update_req(struct stm32_hash_dev * hdev)893 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
894 {
895 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
896 	struct stm32_hash_state *state = &rctx->state;
897 
898 	dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
899 		rctx->total, rctx->digcnt);
900 
901 	if (!(state->flags & HASH_FLAGS_CPU))
902 		return stm32_hash_dma_send(hdev);
903 
904 	return stm32_hash_update_cpu(hdev);
905 }
906 
stm32_hash_final_req(struct stm32_hash_dev * hdev)907 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
908 {
909 	struct ahash_request *req = hdev->req;
910 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
911 	struct stm32_hash_state *state = &rctx->state;
912 	int buflen = state->bufcnt;
913 
914 	if (!(state->flags & HASH_FLAGS_CPU)) {
915 		hdev->flags |= HASH_FLAGS_FINAL;
916 		return stm32_hash_dma_send(hdev);
917 	}
918 
919 	if (state->flags & HASH_FLAGS_FINUP)
920 		return stm32_hash_update_req(hdev);
921 
922 	state->bufcnt = 0;
923 
924 	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
925 }
926 
stm32_hash_emptymsg_fallback(struct ahash_request * req)927 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
928 {
929 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
930 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
931 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 	struct stm32_hash_dev *hdev = rctx->hdev;
933 	int ret;
934 
935 	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
936 		ctx->keylen);
937 
938 	if (!ctx->xtfm) {
939 		dev_err(hdev->dev, "no fallback engine\n");
940 		return;
941 	}
942 
943 	if (ctx->keylen) {
944 		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
945 		if (ret) {
946 			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
947 			return;
948 		}
949 	}
950 
951 	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
952 	if (ret)
953 		dev_err(hdev->dev, "shash digest error\n");
954 }
955 
stm32_hash_copy_hash(struct ahash_request * req)956 static void stm32_hash_copy_hash(struct ahash_request *req)
957 {
958 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
959 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
960 	struct stm32_hash_state *state = &rctx->state;
961 	struct stm32_hash_dev *hdev = rctx->hdev;
962 	__be32 *hash = (void *)rctx->digest;
963 	unsigned int i, hashsize;
964 
965 	if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
966 		return stm32_hash_emptymsg_fallback(req);
967 
968 	hashsize = crypto_ahash_digestsize(tfm);
969 
970 	for (i = 0; i < hashsize / sizeof(u32); i++) {
971 		if (hdev->pdata->ux500)
972 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
973 					      HASH_UX500_HREG(i)));
974 		else
975 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
976 					      HASH_HREG(i)));
977 	}
978 }
979 
stm32_hash_finish(struct ahash_request * req)980 static int stm32_hash_finish(struct ahash_request *req)
981 {
982 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 	u32 reg;
984 
985 	reg = stm32_hash_read(rctx->hdev, HASH_SR);
986 	reg &= ~HASH_SR_OUTPUT_READY;
987 	stm32_hash_write(rctx->hdev, HASH_SR, reg);
988 
989 	if (!req->result)
990 		return -EINVAL;
991 
992 	memcpy(req->result, rctx->digest, rctx->digcnt);
993 
994 	return 0;
995 }
996 
stm32_hash_finish_req(struct ahash_request * req,int err)997 static void stm32_hash_finish_req(struct ahash_request *req, int err)
998 {
999 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1000 	struct stm32_hash_state *state = &rctx->state;
1001 	struct stm32_hash_dev *hdev = rctx->hdev;
1002 
1003 	if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
1004 		state->flags |= HASH_FLAGS_DMA_ACTIVE;
1005 	else
1006 		state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1007 
1008 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
1009 		stm32_hash_copy_hash(req);
1010 		err = stm32_hash_finish(req);
1011 	}
1012 
1013 	/* Finalized request mist be unprepared here */
1014 	stm32_hash_unprepare_request(req);
1015 
1016 	crypto_finalize_hash_request(hdev->engine, req, err);
1017 }
1018 
stm32_hash_handle_queue(struct stm32_hash_dev * hdev,struct ahash_request * req)1019 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
1020 				   struct ahash_request *req)
1021 {
1022 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
1023 }
1024 
stm32_hash_one_request(struct crypto_engine * engine,void * areq)1025 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
1026 {
1027 	struct ahash_request *req = container_of(areq, struct ahash_request,
1028 						 base);
1029 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1030 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1031 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1032 	struct stm32_hash_state *state = &rctx->state;
1033 	int swap_reg;
1034 	int err = 0;
1035 
1036 	if (!hdev)
1037 		return -ENODEV;
1038 
1039 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1040 		rctx->op, req->nbytes);
1041 
1042 	pm_runtime_get_sync(hdev->dev);
1043 
1044 	err = stm32_hash_prepare_request(req);
1045 	if (err)
1046 		return err;
1047 
1048 	hdev->req = req;
1049 	hdev->flags = 0;
1050 	swap_reg = hash_swap_reg(rctx);
1051 
1052 	if (state->flags & HASH_FLAGS_INIT) {
1053 		u32 *preg = rctx->state.hw_context;
1054 		u32 reg;
1055 		int i;
1056 
1057 		if (!hdev->pdata->ux500)
1058 			stm32_hash_write(hdev, HASH_IMR, *preg++);
1059 		stm32_hash_write(hdev, HASH_STR, *preg++);
1060 		stm32_hash_write(hdev, HASH_CR, *preg);
1061 		reg = *preg++ | HASH_CR_INIT;
1062 		stm32_hash_write(hdev, HASH_CR, reg);
1063 
1064 		for (i = 0; i < swap_reg; i++)
1065 			stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1066 
1067 		hdev->flags |= HASH_FLAGS_INIT;
1068 
1069 		if (state->flags & HASH_FLAGS_HMAC)
1070 			hdev->flags |= HASH_FLAGS_HMAC |
1071 				       HASH_FLAGS_HMAC_KEY;
1072 
1073 		if (state->flags & HASH_FLAGS_CPU)
1074 			hdev->flags |= HASH_FLAGS_CPU;
1075 
1076 		if (state->flags & HASH_FLAGS_DMA_ACTIVE)
1077 			hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
1078 	}
1079 
1080 	if (rctx->op == HASH_OP_UPDATE)
1081 		err = stm32_hash_update_req(hdev);
1082 	else if (rctx->op == HASH_OP_FINAL)
1083 		err = stm32_hash_final_req(hdev);
1084 
1085 	/* If we have an IRQ, wait for that, else poll for completion */
1086 	if (err == -EINPROGRESS && hdev->polled) {
1087 		if (stm32_hash_wait_busy(hdev))
1088 			err = -ETIMEDOUT;
1089 		else {
1090 			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1091 			err = 0;
1092 		}
1093 	}
1094 
1095 	if (err != -EINPROGRESS)
1096 	/* done task will not finish it, so do it here */
1097 		stm32_hash_finish_req(req, err);
1098 
1099 	return 0;
1100 }
1101 
stm32_hash_copy_sgs(struct stm32_hash_request_ctx * rctx,struct scatterlist * sg,int bs,unsigned int new_len)1102 static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
1103 			       struct scatterlist *sg, int bs,
1104 			       unsigned int new_len)
1105 {
1106 	struct stm32_hash_state *state = &rctx->state;
1107 	int pages;
1108 	void *buf;
1109 
1110 	pages = get_order(new_len);
1111 
1112 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1113 	if (!buf) {
1114 		pr_err("Couldn't allocate pages for unaligned cases.\n");
1115 		return -ENOMEM;
1116 	}
1117 
1118 	if (state->bufcnt)
1119 		memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
1120 
1121 	scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
1122 				 min(new_len, rctx->total) - state->bufcnt, 0);
1123 	sg_init_table(rctx->sgl, 1);
1124 	sg_set_buf(rctx->sgl, buf, new_len);
1125 	rctx->sg = rctx->sgl;
1126 	state->flags |= HASH_FLAGS_SGS_COPIED;
1127 	rctx->nents = 1;
1128 	rctx->offset += new_len - state->bufcnt;
1129 	state->bufcnt = 0;
1130 	rctx->total = new_len;
1131 
1132 	return 0;
1133 }
1134 
stm32_hash_align_sgs(struct scatterlist * sg,int nbytes,int bs,bool init,bool final,struct stm32_hash_request_ctx * rctx)1135 static int stm32_hash_align_sgs(struct scatterlist *sg,
1136 				int nbytes, int bs, bool init, bool final,
1137 				struct stm32_hash_request_ctx *rctx)
1138 {
1139 	struct stm32_hash_state *state = &rctx->state;
1140 	struct stm32_hash_dev *hdev = rctx->hdev;
1141 	struct scatterlist *sg_tmp = sg;
1142 	int offset = rctx->offset;
1143 	int new_len;
1144 	int n = 0;
1145 	int bufcnt = state->bufcnt;
1146 	bool secure_ctx = hdev->pdata->context_secured;
1147 	bool aligned = true;
1148 
1149 	if (!sg || !sg->length || !nbytes) {
1150 		if (bufcnt) {
1151 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
1152 			sg_init_table(rctx->sgl, 1);
1153 			sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
1154 			rctx->sg = rctx->sgl;
1155 			rctx->nents = 1;
1156 		}
1157 
1158 		return 0;
1159 	}
1160 
1161 	new_len = nbytes;
1162 
1163 	if (offset)
1164 		aligned = false;
1165 
1166 	if (final) {
1167 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
1168 	} else {
1169 		new_len = (new_len - 1) / bs * bs; // return n block - 1 block
1170 
1171 		/*
1172 		 * Context save in some version of HASH IP can only be done when the
1173 		 * FIFO is ready to get a new block. This implies to send n block plus a
1174 		 * 32 bit word in the first DMA send.
1175 		 */
1176 		if (init && secure_ctx) {
1177 			new_len += sizeof(u32);
1178 			if (unlikely(new_len > nbytes))
1179 				new_len -= bs;
1180 		}
1181 	}
1182 
1183 	if (!new_len)
1184 		return 0;
1185 
1186 	if (nbytes != new_len)
1187 		aligned = false;
1188 
1189 	while (nbytes > 0 && sg_tmp) {
1190 		n++;
1191 
1192 		if (bufcnt) {
1193 			if (!IS_ALIGNED(bufcnt, bs)) {
1194 				aligned = false;
1195 				break;
1196 			}
1197 			nbytes -= bufcnt;
1198 			bufcnt = 0;
1199 			if (!nbytes)
1200 				aligned = false;
1201 
1202 			continue;
1203 		}
1204 
1205 		if (offset < sg_tmp->length) {
1206 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
1207 				aligned = false;
1208 				break;
1209 			}
1210 
1211 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
1212 				aligned = false;
1213 				break;
1214 			}
1215 		}
1216 
1217 		if (offset) {
1218 			offset -= sg_tmp->length;
1219 			if (offset < 0) {
1220 				nbytes += offset;
1221 				offset = 0;
1222 			}
1223 		} else {
1224 			nbytes -= sg_tmp->length;
1225 		}
1226 
1227 		sg_tmp = sg_next(sg_tmp);
1228 
1229 		if (nbytes < 0) {
1230 			aligned = false;
1231 			break;
1232 		}
1233 	}
1234 
1235 	if (!aligned)
1236 		return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
1237 
1238 	rctx->total = new_len;
1239 	rctx->offset += new_len;
1240 	rctx->nents = n;
1241 	if (state->bufcnt) {
1242 		sg_init_table(rctx->sgl, 2);
1243 		sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
1244 		sg_chain(rctx->sgl, 2, sg);
1245 		rctx->sg = rctx->sgl;
1246 	} else {
1247 		rctx->sg = sg;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
stm32_hash_prepare_request(struct ahash_request * req)1253 static int stm32_hash_prepare_request(struct ahash_request *req)
1254 {
1255 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1256 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1257 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1258 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1259 	struct stm32_hash_state *state = &rctx->state;
1260 	unsigned int nbytes;
1261 	int ret, hash_later, bs;
1262 	bool update = rctx->op & HASH_OP_UPDATE;
1263 	bool init = !(state->flags & HASH_FLAGS_INIT);
1264 	bool finup = state->flags & HASH_FLAGS_FINUP;
1265 	bool final = state->flags & HASH_FLAGS_FINAL;
1266 
1267 	if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
1268 		return 0;
1269 
1270 	bs = crypto_ahash_blocksize(tfm);
1271 
1272 	nbytes = state->bufcnt;
1273 
1274 	/*
1275 	 * In case of update request nbytes must correspond to the content of the
1276 	 * buffer + the offset minus the content of the request already in the
1277 	 * buffer.
1278 	 */
1279 	if (update || finup)
1280 		nbytes += req->nbytes - rctx->offset;
1281 
1282 	dev_dbg(hdev->dev,
1283 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
1284 		__func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
1285 
1286 	if (!nbytes)
1287 		return 0;
1288 
1289 	rctx->total = nbytes;
1290 
1291 	if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
1292 		int len = bs - state->bufcnt % bs;
1293 
1294 		if (len > req->nbytes)
1295 			len = req->nbytes;
1296 		scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1297 					 0, len, 0);
1298 		state->bufcnt += len;
1299 		rctx->offset = len;
1300 	}
1301 
1302 	/* copy buffer in a temporary one that is used for sg alignment */
1303 	if (state->bufcnt)
1304 		memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
1305 
1306 	ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
1307 	if (ret)
1308 		return ret;
1309 
1310 	hash_later = nbytes - rctx->total;
1311 	if (hash_later < 0)
1312 		hash_later = 0;
1313 
1314 	if (hash_later && hash_later <= state->blocklen) {
1315 		scatterwalk_map_and_copy(state->buffer,
1316 					 req->src,
1317 					 req->nbytes - hash_later,
1318 					 hash_later, 0);
1319 
1320 		state->bufcnt = hash_later;
1321 	} else {
1322 		state->bufcnt = 0;
1323 	}
1324 
1325 	if (hash_later > state->blocklen) {
1326 		/* FIXME: add support of this case */
1327 		pr_err("Buffer contains more than one block.\n");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	rctx->total = min(nbytes, rctx->total);
1332 
1333 	return 0;
1334 }
1335 
stm32_hash_unprepare_request(struct ahash_request * req)1336 static void stm32_hash_unprepare_request(struct ahash_request *req)
1337 {
1338 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1339 	struct stm32_hash_state *state = &rctx->state;
1340 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1341 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1342 	u32 *preg = state->hw_context;
1343 	int swap_reg, i;
1344 
1345 	if (hdev->dma_lch)
1346 		dmaengine_terminate_sync(hdev->dma_lch);
1347 
1348 	if (state->flags & HASH_FLAGS_SGS_COPIED)
1349 		free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
1350 
1351 	rctx->sg = NULL;
1352 	rctx->offset = 0;
1353 
1354 	state->flags &= ~(HASH_FLAGS_SGS_COPIED);
1355 
1356 	if (!(hdev->flags & HASH_FLAGS_INIT))
1357 		goto pm_runtime;
1358 
1359 	state->flags |= HASH_FLAGS_INIT;
1360 
1361 	if (stm32_hash_wait_busy(hdev)) {
1362 		dev_warn(hdev->dev, "Wait busy failed.");
1363 		return;
1364 	}
1365 
1366 	swap_reg = hash_swap_reg(rctx);
1367 
1368 	if (!hdev->pdata->ux500)
1369 		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1370 	*preg++ = stm32_hash_read(hdev, HASH_STR);
1371 	*preg++ = stm32_hash_read(hdev, HASH_CR);
1372 	for (i = 0; i < swap_reg; i++)
1373 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1374 
1375 pm_runtime:
1376 	pm_runtime_put_autosuspend(hdev->dev);
1377 }
1378 
stm32_hash_enqueue(struct ahash_request * req,unsigned int op)1379 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
1380 {
1381 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1382 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1383 	struct stm32_hash_dev *hdev = ctx->hdev;
1384 
1385 	rctx->op = op;
1386 
1387 	return stm32_hash_handle_queue(hdev, req);
1388 }
1389 
stm32_hash_update(struct ahash_request * req)1390 static int stm32_hash_update(struct ahash_request *req)
1391 {
1392 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1393 	struct stm32_hash_state *state = &rctx->state;
1394 
1395 	if (!req->nbytes)
1396 		return 0;
1397 
1398 
1399 	if (state->flags & HASH_FLAGS_CPU) {
1400 		rctx->total = req->nbytes;
1401 		rctx->sg = req->src;
1402 		rctx->offset = 0;
1403 
1404 		if ((state->bufcnt + rctx->total < state->blocklen)) {
1405 			stm32_hash_append_sg(rctx);
1406 			return 0;
1407 		}
1408 	} else { /* DMA mode */
1409 		if (state->bufcnt + req->nbytes <= state->blocklen) {
1410 			scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1411 						 0, req->nbytes, 0);
1412 			state->bufcnt += req->nbytes;
1413 			return 0;
1414 		}
1415 	}
1416 
1417 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1418 }
1419 
stm32_hash_final(struct ahash_request * req)1420 static int stm32_hash_final(struct ahash_request *req)
1421 {
1422 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1423 	struct stm32_hash_state *state = &rctx->state;
1424 
1425 	state->flags |= HASH_FLAGS_FINAL;
1426 
1427 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1428 }
1429 
stm32_hash_finup(struct ahash_request * req)1430 static int stm32_hash_finup(struct ahash_request *req)
1431 {
1432 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1433 	struct stm32_hash_state *state = &rctx->state;
1434 
1435 	if (!req->nbytes)
1436 		goto out;
1437 
1438 	state->flags |= HASH_FLAGS_FINUP;
1439 
1440 	if ((state->flags & HASH_FLAGS_CPU)) {
1441 		rctx->total = req->nbytes;
1442 		rctx->sg = req->src;
1443 		rctx->offset = 0;
1444 	}
1445 
1446 out:
1447 	return stm32_hash_final(req);
1448 }
1449 
stm32_hash_digest(struct ahash_request * req)1450 static int stm32_hash_digest(struct ahash_request *req)
1451 {
1452 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1453 }
1454 
stm32_hash_export(struct ahash_request * req,void * out)1455 static int stm32_hash_export(struct ahash_request *req, void *out)
1456 {
1457 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1458 
1459 	memcpy(out, &rctx->state, sizeof(rctx->state));
1460 
1461 	return 0;
1462 }
1463 
stm32_hash_import(struct ahash_request * req,const void * in)1464 static int stm32_hash_import(struct ahash_request *req, const void *in)
1465 {
1466 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1467 
1468 	stm32_hash_init(req);
1469 	memcpy(&rctx->state, in, sizeof(rctx->state));
1470 
1471 	return 0;
1472 }
1473 
stm32_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1474 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1475 			     const u8 *key, unsigned int keylen)
1476 {
1477 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1478 
1479 	if (keylen <= HASH_MAX_KEY_SIZE) {
1480 		memcpy(ctx->key, key, keylen);
1481 		ctx->keylen = keylen;
1482 	} else {
1483 		return -ENOMEM;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
stm32_hash_init_fallback(struct crypto_tfm * tfm)1489 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1490 {
1491 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1492 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1493 	const char *name = crypto_tfm_alg_name(tfm);
1494 	struct crypto_shash *xtfm;
1495 
1496 	/* The fallback is only needed on Ux500 */
1497 	if (!hdev->pdata->ux500)
1498 		return 0;
1499 
1500 	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1501 	if (IS_ERR(xtfm)) {
1502 		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1503 			name);
1504 		return PTR_ERR(xtfm);
1505 	}
1506 	dev_info(hdev->dev, "allocated %s fallback\n", name);
1507 	ctx->xtfm = xtfm;
1508 
1509 	return 0;
1510 }
1511 
stm32_hash_cra_init_algs(struct crypto_tfm * tfm,u32 algs_flags)1512 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
1513 {
1514 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1515 
1516 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1517 				 sizeof(struct stm32_hash_request_ctx));
1518 
1519 	ctx->keylen = 0;
1520 
1521 	if (algs_flags)
1522 		ctx->flags |= algs_flags;
1523 
1524 	return stm32_hash_init_fallback(tfm);
1525 }
1526 
stm32_hash_cra_init(struct crypto_tfm * tfm)1527 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1528 {
1529 	return stm32_hash_cra_init_algs(tfm, 0);
1530 }
1531 
stm32_hash_cra_hmac_init(struct crypto_tfm * tfm)1532 static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
1533 {
1534 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
1535 }
1536 
stm32_hash_cra_sha3_init(struct crypto_tfm * tfm)1537 static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
1538 {
1539 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
1540 }
1541 
stm32_hash_cra_sha3_hmac_init(struct crypto_tfm * tfm)1542 static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
1543 {
1544 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
1545 					HASH_FLAGS_HMAC);
1546 }
1547 
stm32_hash_cra_exit(struct crypto_tfm * tfm)1548 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1549 {
1550 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1551 
1552 	if (ctx->xtfm)
1553 		crypto_free_shash(ctx->xtfm);
1554 }
1555 
stm32_hash_irq_thread(int irq,void * dev_id)1556 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1557 {
1558 	struct stm32_hash_dev *hdev = dev_id;
1559 
1560 	if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1561 		hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1562 		goto finish;
1563 	}
1564 
1565 	return IRQ_HANDLED;
1566 
1567 finish:
1568 	/* Finish current request */
1569 	stm32_hash_finish_req(hdev->req, 0);
1570 
1571 	return IRQ_HANDLED;
1572 }
1573 
stm32_hash_irq_handler(int irq,void * dev_id)1574 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1575 {
1576 	struct stm32_hash_dev *hdev = dev_id;
1577 	u32 reg;
1578 
1579 	reg = stm32_hash_read(hdev, HASH_SR);
1580 	if (reg & HASH_SR_OUTPUT_READY) {
1581 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1582 		/* Disable IT*/
1583 		stm32_hash_write(hdev, HASH_IMR, 0);
1584 		return IRQ_WAKE_THREAD;
1585 	}
1586 
1587 	return IRQ_NONE;
1588 }
1589 
1590 static struct ahash_engine_alg algs_md5[] = {
1591 	{
1592 		.base.init = stm32_hash_init,
1593 		.base.update = stm32_hash_update,
1594 		.base.final = stm32_hash_final,
1595 		.base.finup = stm32_hash_finup,
1596 		.base.digest = stm32_hash_digest,
1597 		.base.export = stm32_hash_export,
1598 		.base.import = stm32_hash_import,
1599 		.base.halg = {
1600 			.digestsize = MD5_DIGEST_SIZE,
1601 			.statesize = sizeof(struct stm32_hash_state),
1602 			.base = {
1603 				.cra_name = "md5",
1604 				.cra_driver_name = "stm32-md5",
1605 				.cra_priority = 200,
1606 				.cra_flags = CRYPTO_ALG_ASYNC |
1607 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1608 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1609 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1610 				.cra_init = stm32_hash_cra_init,
1611 				.cra_exit = stm32_hash_cra_exit,
1612 				.cra_module = THIS_MODULE,
1613 			}
1614 		},
1615 		.op = {
1616 			.do_one_request = stm32_hash_one_request,
1617 		},
1618 	},
1619 	{
1620 		.base.init = stm32_hash_init,
1621 		.base.update = stm32_hash_update,
1622 		.base.final = stm32_hash_final,
1623 		.base.finup = stm32_hash_finup,
1624 		.base.digest = stm32_hash_digest,
1625 		.base.export = stm32_hash_export,
1626 		.base.import = stm32_hash_import,
1627 		.base.setkey = stm32_hash_setkey,
1628 		.base.halg = {
1629 			.digestsize = MD5_DIGEST_SIZE,
1630 			.statesize = sizeof(struct stm32_hash_state),
1631 			.base = {
1632 				.cra_name = "hmac(md5)",
1633 				.cra_driver_name = "stm32-hmac-md5",
1634 				.cra_priority = 200,
1635 				.cra_flags = CRYPTO_ALG_ASYNC |
1636 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1637 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1638 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1639 				.cra_init = stm32_hash_cra_hmac_init,
1640 				.cra_exit = stm32_hash_cra_exit,
1641 				.cra_module = THIS_MODULE,
1642 			}
1643 		},
1644 		.op = {
1645 			.do_one_request = stm32_hash_one_request,
1646 		},
1647 	}
1648 };
1649 
1650 static struct ahash_engine_alg algs_sha1[] = {
1651 	{
1652 		.base.init = stm32_hash_init,
1653 		.base.update = stm32_hash_update,
1654 		.base.final = stm32_hash_final,
1655 		.base.finup = stm32_hash_finup,
1656 		.base.digest = stm32_hash_digest,
1657 		.base.export = stm32_hash_export,
1658 		.base.import = stm32_hash_import,
1659 		.base.halg = {
1660 			.digestsize = SHA1_DIGEST_SIZE,
1661 			.statesize = sizeof(struct stm32_hash_state),
1662 			.base = {
1663 				.cra_name = "sha1",
1664 				.cra_driver_name = "stm32-sha1",
1665 				.cra_priority = 200,
1666 				.cra_flags = CRYPTO_ALG_ASYNC |
1667 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1668 				.cra_blocksize = SHA1_BLOCK_SIZE,
1669 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1670 				.cra_init = stm32_hash_cra_init,
1671 				.cra_exit = stm32_hash_cra_exit,
1672 				.cra_module = THIS_MODULE,
1673 			}
1674 		},
1675 		.op = {
1676 			.do_one_request = stm32_hash_one_request,
1677 		},
1678 	},
1679 	{
1680 		.base.init = stm32_hash_init,
1681 		.base.update = stm32_hash_update,
1682 		.base.final = stm32_hash_final,
1683 		.base.finup = stm32_hash_finup,
1684 		.base.digest = stm32_hash_digest,
1685 		.base.export = stm32_hash_export,
1686 		.base.import = stm32_hash_import,
1687 		.base.setkey = stm32_hash_setkey,
1688 		.base.halg = {
1689 			.digestsize = SHA1_DIGEST_SIZE,
1690 			.statesize = sizeof(struct stm32_hash_state),
1691 			.base = {
1692 				.cra_name = "hmac(sha1)",
1693 				.cra_driver_name = "stm32-hmac-sha1",
1694 				.cra_priority = 200,
1695 				.cra_flags = CRYPTO_ALG_ASYNC |
1696 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1697 				.cra_blocksize = SHA1_BLOCK_SIZE,
1698 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1699 				.cra_init = stm32_hash_cra_hmac_init,
1700 				.cra_exit = stm32_hash_cra_exit,
1701 				.cra_module = THIS_MODULE,
1702 			}
1703 		},
1704 		.op = {
1705 			.do_one_request = stm32_hash_one_request,
1706 		},
1707 	},
1708 };
1709 
1710 static struct ahash_engine_alg algs_sha224[] = {
1711 	{
1712 		.base.init = stm32_hash_init,
1713 		.base.update = stm32_hash_update,
1714 		.base.final = stm32_hash_final,
1715 		.base.finup = stm32_hash_finup,
1716 		.base.digest = stm32_hash_digest,
1717 		.base.export = stm32_hash_export,
1718 		.base.import = stm32_hash_import,
1719 		.base.halg = {
1720 			.digestsize = SHA224_DIGEST_SIZE,
1721 			.statesize = sizeof(struct stm32_hash_state),
1722 			.base = {
1723 				.cra_name = "sha224",
1724 				.cra_driver_name = "stm32-sha224",
1725 				.cra_priority = 200,
1726 				.cra_flags = CRYPTO_ALG_ASYNC |
1727 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1728 				.cra_blocksize = SHA224_BLOCK_SIZE,
1729 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1730 				.cra_init = stm32_hash_cra_init,
1731 				.cra_exit = stm32_hash_cra_exit,
1732 				.cra_module = THIS_MODULE,
1733 			}
1734 		},
1735 		.op = {
1736 			.do_one_request = stm32_hash_one_request,
1737 		},
1738 	},
1739 	{
1740 		.base.init = stm32_hash_init,
1741 		.base.update = stm32_hash_update,
1742 		.base.final = stm32_hash_final,
1743 		.base.finup = stm32_hash_finup,
1744 		.base.digest = stm32_hash_digest,
1745 		.base.setkey = stm32_hash_setkey,
1746 		.base.export = stm32_hash_export,
1747 		.base.import = stm32_hash_import,
1748 		.base.halg = {
1749 			.digestsize = SHA224_DIGEST_SIZE,
1750 			.statesize = sizeof(struct stm32_hash_state),
1751 			.base = {
1752 				.cra_name = "hmac(sha224)",
1753 				.cra_driver_name = "stm32-hmac-sha224",
1754 				.cra_priority = 200,
1755 				.cra_flags = CRYPTO_ALG_ASYNC |
1756 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1757 				.cra_blocksize = SHA224_BLOCK_SIZE,
1758 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1759 				.cra_init = stm32_hash_cra_hmac_init,
1760 				.cra_exit = stm32_hash_cra_exit,
1761 				.cra_module = THIS_MODULE,
1762 			}
1763 		},
1764 		.op = {
1765 			.do_one_request = stm32_hash_one_request,
1766 		},
1767 	},
1768 };
1769 
1770 static struct ahash_engine_alg algs_sha256[] = {
1771 	{
1772 		.base.init = stm32_hash_init,
1773 		.base.update = stm32_hash_update,
1774 		.base.final = stm32_hash_final,
1775 		.base.finup = stm32_hash_finup,
1776 		.base.digest = stm32_hash_digest,
1777 		.base.export = stm32_hash_export,
1778 		.base.import = stm32_hash_import,
1779 		.base.halg = {
1780 			.digestsize = SHA256_DIGEST_SIZE,
1781 			.statesize = sizeof(struct stm32_hash_state),
1782 			.base = {
1783 				.cra_name = "sha256",
1784 				.cra_driver_name = "stm32-sha256",
1785 				.cra_priority = 200,
1786 				.cra_flags = CRYPTO_ALG_ASYNC |
1787 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1788 				.cra_blocksize = SHA256_BLOCK_SIZE,
1789 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1790 				.cra_init = stm32_hash_cra_init,
1791 				.cra_exit = stm32_hash_cra_exit,
1792 				.cra_module = THIS_MODULE,
1793 			}
1794 		},
1795 		.op = {
1796 			.do_one_request = stm32_hash_one_request,
1797 		},
1798 	},
1799 	{
1800 		.base.init = stm32_hash_init,
1801 		.base.update = stm32_hash_update,
1802 		.base.final = stm32_hash_final,
1803 		.base.finup = stm32_hash_finup,
1804 		.base.digest = stm32_hash_digest,
1805 		.base.export = stm32_hash_export,
1806 		.base.import = stm32_hash_import,
1807 		.base.setkey = stm32_hash_setkey,
1808 		.base.halg = {
1809 			.digestsize = SHA256_DIGEST_SIZE,
1810 			.statesize = sizeof(struct stm32_hash_state),
1811 			.base = {
1812 				.cra_name = "hmac(sha256)",
1813 				.cra_driver_name = "stm32-hmac-sha256",
1814 				.cra_priority = 200,
1815 				.cra_flags = CRYPTO_ALG_ASYNC |
1816 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1817 				.cra_blocksize = SHA256_BLOCK_SIZE,
1818 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1819 				.cra_init = stm32_hash_cra_hmac_init,
1820 				.cra_exit = stm32_hash_cra_exit,
1821 				.cra_module = THIS_MODULE,
1822 			}
1823 		},
1824 		.op = {
1825 			.do_one_request = stm32_hash_one_request,
1826 		},
1827 	},
1828 };
1829 
1830 static struct ahash_engine_alg algs_sha384_sha512[] = {
1831 	{
1832 		.base.init = stm32_hash_init,
1833 		.base.update = stm32_hash_update,
1834 		.base.final = stm32_hash_final,
1835 		.base.finup = stm32_hash_finup,
1836 		.base.digest = stm32_hash_digest,
1837 		.base.export = stm32_hash_export,
1838 		.base.import = stm32_hash_import,
1839 		.base.halg = {
1840 			.digestsize = SHA384_DIGEST_SIZE,
1841 			.statesize = sizeof(struct stm32_hash_state),
1842 			.base = {
1843 				.cra_name = "sha384",
1844 				.cra_driver_name = "stm32-sha384",
1845 				.cra_priority = 200,
1846 				.cra_flags = CRYPTO_ALG_ASYNC |
1847 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1848 				.cra_blocksize = SHA384_BLOCK_SIZE,
1849 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1850 				.cra_init = stm32_hash_cra_init,
1851 				.cra_exit = stm32_hash_cra_exit,
1852 				.cra_module = THIS_MODULE,
1853 			}
1854 		},
1855 		.op = {
1856 			.do_one_request = stm32_hash_one_request,
1857 		},
1858 	},
1859 	{
1860 		.base.init = stm32_hash_init,
1861 		.base.update = stm32_hash_update,
1862 		.base.final = stm32_hash_final,
1863 		.base.finup = stm32_hash_finup,
1864 		.base.digest = stm32_hash_digest,
1865 		.base.setkey = stm32_hash_setkey,
1866 		.base.export = stm32_hash_export,
1867 		.base.import = stm32_hash_import,
1868 		.base.halg = {
1869 			.digestsize = SHA384_DIGEST_SIZE,
1870 			.statesize = sizeof(struct stm32_hash_state),
1871 			.base = {
1872 				.cra_name = "hmac(sha384)",
1873 				.cra_driver_name = "stm32-hmac-sha384",
1874 				.cra_priority = 200,
1875 				.cra_flags = CRYPTO_ALG_ASYNC |
1876 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1877 				.cra_blocksize = SHA384_BLOCK_SIZE,
1878 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1879 				.cra_init = stm32_hash_cra_hmac_init,
1880 				.cra_exit = stm32_hash_cra_exit,
1881 				.cra_module = THIS_MODULE,
1882 			}
1883 		},
1884 		.op = {
1885 			.do_one_request = stm32_hash_one_request,
1886 		},
1887 	},
1888 	{
1889 		.base.init = stm32_hash_init,
1890 		.base.update = stm32_hash_update,
1891 		.base.final = stm32_hash_final,
1892 		.base.finup = stm32_hash_finup,
1893 		.base.digest = stm32_hash_digest,
1894 		.base.export = stm32_hash_export,
1895 		.base.import = stm32_hash_import,
1896 		.base.halg = {
1897 			.digestsize = SHA512_DIGEST_SIZE,
1898 			.statesize = sizeof(struct stm32_hash_state),
1899 			.base = {
1900 				.cra_name = "sha512",
1901 				.cra_driver_name = "stm32-sha512",
1902 				.cra_priority = 200,
1903 				.cra_flags = CRYPTO_ALG_ASYNC |
1904 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1905 				.cra_blocksize = SHA512_BLOCK_SIZE,
1906 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1907 				.cra_init = stm32_hash_cra_init,
1908 				.cra_exit = stm32_hash_cra_exit,
1909 				.cra_module = THIS_MODULE,
1910 			}
1911 		},
1912 		.op = {
1913 			.do_one_request = stm32_hash_one_request,
1914 		},
1915 	},
1916 	{
1917 		.base.init = stm32_hash_init,
1918 		.base.update = stm32_hash_update,
1919 		.base.final = stm32_hash_final,
1920 		.base.finup = stm32_hash_finup,
1921 		.base.digest = stm32_hash_digest,
1922 		.base.export = stm32_hash_export,
1923 		.base.import = stm32_hash_import,
1924 		.base.setkey = stm32_hash_setkey,
1925 		.base.halg = {
1926 			.digestsize = SHA512_DIGEST_SIZE,
1927 			.statesize = sizeof(struct stm32_hash_state),
1928 			.base = {
1929 				.cra_name = "hmac(sha512)",
1930 				.cra_driver_name = "stm32-hmac-sha512",
1931 				.cra_priority = 200,
1932 				.cra_flags = CRYPTO_ALG_ASYNC |
1933 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1934 				.cra_blocksize = SHA512_BLOCK_SIZE,
1935 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1936 				.cra_init = stm32_hash_cra_hmac_init,
1937 				.cra_exit = stm32_hash_cra_exit,
1938 				.cra_module = THIS_MODULE,
1939 			}
1940 		},
1941 		.op = {
1942 			.do_one_request = stm32_hash_one_request,
1943 		},
1944 	},
1945 };
1946 
1947 static struct ahash_engine_alg algs_sha3[] = {
1948 	{
1949 		.base.init = stm32_hash_init,
1950 		.base.update = stm32_hash_update,
1951 		.base.final = stm32_hash_final,
1952 		.base.finup = stm32_hash_finup,
1953 		.base.digest = stm32_hash_digest,
1954 		.base.export = stm32_hash_export,
1955 		.base.import = stm32_hash_import,
1956 		.base.halg = {
1957 			.digestsize = SHA3_224_DIGEST_SIZE,
1958 			.statesize = sizeof(struct stm32_hash_state),
1959 			.base = {
1960 				.cra_name = "sha3-224",
1961 				.cra_driver_name = "stm32-sha3-224",
1962 				.cra_priority = 200,
1963 				.cra_flags = CRYPTO_ALG_ASYNC |
1964 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1965 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1966 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1967 				.cra_init = stm32_hash_cra_sha3_init,
1968 				.cra_exit = stm32_hash_cra_exit,
1969 				.cra_module = THIS_MODULE,
1970 			}
1971 		},
1972 		.op = {
1973 			.do_one_request = stm32_hash_one_request,
1974 		},
1975 	},
1976 	{
1977 		.base.init = stm32_hash_init,
1978 		.base.update = stm32_hash_update,
1979 		.base.final = stm32_hash_final,
1980 		.base.finup = stm32_hash_finup,
1981 		.base.digest = stm32_hash_digest,
1982 		.base.export = stm32_hash_export,
1983 		.base.import = stm32_hash_import,
1984 		.base.setkey = stm32_hash_setkey,
1985 		.base.halg = {
1986 			.digestsize = SHA3_224_DIGEST_SIZE,
1987 			.statesize = sizeof(struct stm32_hash_state),
1988 			.base = {
1989 				.cra_name = "hmac(sha3-224)",
1990 				.cra_driver_name = "stm32-hmac-sha3-224",
1991 				.cra_priority = 200,
1992 				.cra_flags = CRYPTO_ALG_ASYNC |
1993 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1994 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1995 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1996 				.cra_init = stm32_hash_cra_sha3_hmac_init,
1997 				.cra_exit = stm32_hash_cra_exit,
1998 				.cra_module = THIS_MODULE,
1999 			}
2000 		},
2001 		.op = {
2002 			.do_one_request = stm32_hash_one_request,
2003 		},
2004 	},
2005 	{
2006 		.base.init = stm32_hash_init,
2007 		.base.update = stm32_hash_update,
2008 		.base.final = stm32_hash_final,
2009 		.base.finup = stm32_hash_finup,
2010 		.base.digest = stm32_hash_digest,
2011 		.base.export = stm32_hash_export,
2012 		.base.import = stm32_hash_import,
2013 		.base.halg = {
2014 			.digestsize = SHA3_256_DIGEST_SIZE,
2015 			.statesize = sizeof(struct stm32_hash_state),
2016 			.base = {
2017 				.cra_name = "sha3-256",
2018 				.cra_driver_name = "stm32-sha3-256",
2019 				.cra_priority = 200,
2020 				.cra_flags = CRYPTO_ALG_ASYNC |
2021 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2022 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2023 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2024 				.cra_init = stm32_hash_cra_sha3_init,
2025 				.cra_exit = stm32_hash_cra_exit,
2026 				.cra_module = THIS_MODULE,
2027 			}
2028 		},
2029 		.op = {
2030 			.do_one_request = stm32_hash_one_request,
2031 		},
2032 	},
2033 	{
2034 		.base.init = stm32_hash_init,
2035 		.base.update = stm32_hash_update,
2036 		.base.final = stm32_hash_final,
2037 		.base.finup = stm32_hash_finup,
2038 		.base.digest = stm32_hash_digest,
2039 		.base.export = stm32_hash_export,
2040 		.base.import = stm32_hash_import,
2041 		.base.setkey = stm32_hash_setkey,
2042 		.base.halg = {
2043 			.digestsize = SHA3_256_DIGEST_SIZE,
2044 			.statesize = sizeof(struct stm32_hash_state),
2045 			.base = {
2046 				.cra_name = "hmac(sha3-256)",
2047 				.cra_driver_name = "stm32-hmac-sha3-256",
2048 				.cra_priority = 200,
2049 				.cra_flags = CRYPTO_ALG_ASYNC |
2050 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2051 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2052 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2053 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2054 				.cra_exit = stm32_hash_cra_exit,
2055 				.cra_module = THIS_MODULE,
2056 			}
2057 		},
2058 		.op = {
2059 			.do_one_request = stm32_hash_one_request,
2060 		},
2061 	},
2062 	{
2063 		.base.init = stm32_hash_init,
2064 		.base.update = stm32_hash_update,
2065 		.base.final = stm32_hash_final,
2066 		.base.finup = stm32_hash_finup,
2067 		.base.digest = stm32_hash_digest,
2068 		.base.export = stm32_hash_export,
2069 		.base.import = stm32_hash_import,
2070 		.base.halg = {
2071 			.digestsize = SHA3_384_DIGEST_SIZE,
2072 			.statesize = sizeof(struct stm32_hash_state),
2073 			.base = {
2074 				.cra_name = "sha3-384",
2075 				.cra_driver_name = "stm32-sha3-384",
2076 				.cra_priority = 200,
2077 				.cra_flags = CRYPTO_ALG_ASYNC |
2078 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2079 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2080 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2081 				.cra_init = stm32_hash_cra_sha3_init,
2082 				.cra_exit = stm32_hash_cra_exit,
2083 				.cra_module = THIS_MODULE,
2084 			}
2085 		},
2086 		.op = {
2087 			.do_one_request = stm32_hash_one_request,
2088 		},
2089 	},
2090 	{
2091 		.base.init = stm32_hash_init,
2092 		.base.update = stm32_hash_update,
2093 		.base.final = stm32_hash_final,
2094 		.base.finup = stm32_hash_finup,
2095 		.base.digest = stm32_hash_digest,
2096 		.base.export = stm32_hash_export,
2097 		.base.import = stm32_hash_import,
2098 		.base.setkey = stm32_hash_setkey,
2099 		.base.halg = {
2100 			.digestsize = SHA3_384_DIGEST_SIZE,
2101 			.statesize = sizeof(struct stm32_hash_state),
2102 			.base = {
2103 				.cra_name = "hmac(sha3-384)",
2104 				.cra_driver_name = "stm32-hmac-sha3-384",
2105 				.cra_priority = 200,
2106 				.cra_flags = CRYPTO_ALG_ASYNC |
2107 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2108 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2109 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2110 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2111 				.cra_exit = stm32_hash_cra_exit,
2112 				.cra_module = THIS_MODULE,
2113 			}
2114 		},
2115 		.op = {
2116 			.do_one_request = stm32_hash_one_request,
2117 		},
2118 	},
2119 	{
2120 		.base.init = stm32_hash_init,
2121 		.base.update = stm32_hash_update,
2122 		.base.final = stm32_hash_final,
2123 		.base.finup = stm32_hash_finup,
2124 		.base.digest = stm32_hash_digest,
2125 		.base.export = stm32_hash_export,
2126 		.base.import = stm32_hash_import,
2127 		.base.halg = {
2128 			.digestsize = SHA3_512_DIGEST_SIZE,
2129 			.statesize = sizeof(struct stm32_hash_state),
2130 			.base = {
2131 				.cra_name = "sha3-512",
2132 				.cra_driver_name = "stm32-sha3-512",
2133 				.cra_priority = 200,
2134 				.cra_flags = CRYPTO_ALG_ASYNC |
2135 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2136 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2137 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2138 				.cra_init = stm32_hash_cra_sha3_init,
2139 				.cra_exit = stm32_hash_cra_exit,
2140 				.cra_module = THIS_MODULE,
2141 			}
2142 		},
2143 		.op = {
2144 			.do_one_request = stm32_hash_one_request,
2145 		},
2146 	},
2147 	{
2148 		.base.init = stm32_hash_init,
2149 		.base.update = stm32_hash_update,
2150 		.base.final = stm32_hash_final,
2151 		.base.finup = stm32_hash_finup,
2152 		.base.digest = stm32_hash_digest,
2153 		.base.export = stm32_hash_export,
2154 		.base.import = stm32_hash_import,
2155 		.base.setkey = stm32_hash_setkey,
2156 		.base.halg = {
2157 			.digestsize = SHA3_512_DIGEST_SIZE,
2158 			.statesize = sizeof(struct stm32_hash_state),
2159 			.base = {
2160 				.cra_name = "hmac(sha3-512)",
2161 				.cra_driver_name = "stm32-hmac-sha3-512",
2162 				.cra_priority = 200,
2163 				.cra_flags = CRYPTO_ALG_ASYNC |
2164 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2165 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2166 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2167 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2168 				.cra_exit = stm32_hash_cra_exit,
2169 				.cra_module = THIS_MODULE,
2170 			}
2171 		},
2172 		.op = {
2173 			.do_one_request = stm32_hash_one_request,
2174 		},
2175 	}
2176 };
2177 
stm32_hash_register_algs(struct stm32_hash_dev * hdev)2178 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
2179 {
2180 	unsigned int i, j;
2181 	int err;
2182 
2183 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2184 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
2185 			err = crypto_engine_register_ahash(
2186 				&hdev->pdata->algs_info[i].algs_list[j]);
2187 			if (err)
2188 				goto err_algs;
2189 		}
2190 	}
2191 
2192 	return 0;
2193 err_algs:
2194 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
2195 	for (; i--; ) {
2196 		for (; j--;)
2197 			crypto_engine_unregister_ahash(
2198 				&hdev->pdata->algs_info[i].algs_list[j]);
2199 	}
2200 
2201 	return err;
2202 }
2203 
stm32_hash_unregister_algs(struct stm32_hash_dev * hdev)2204 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
2205 {
2206 	unsigned int i, j;
2207 
2208 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2209 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
2210 			crypto_engine_unregister_ahash(
2211 				&hdev->pdata->algs_info[i].algs_list[j]);
2212 	}
2213 
2214 	return 0;
2215 }
2216 
2217 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
2218 	{
2219 		.algs_list	= algs_sha1,
2220 		.size		= ARRAY_SIZE(algs_sha1),
2221 	},
2222 	{
2223 		.algs_list	= algs_sha256,
2224 		.size		= ARRAY_SIZE(algs_sha256),
2225 	},
2226 };
2227 
2228 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
2229 	.alg_shift	= 7,
2230 	.algs_info	= stm32_hash_algs_info_ux500,
2231 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
2232 	.broken_emptymsg = true,
2233 	.ux500		= true,
2234 };
2235 
2236 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
2237 	{
2238 		.algs_list	= algs_md5,
2239 		.size		= ARRAY_SIZE(algs_md5),
2240 	},
2241 	{
2242 		.algs_list	= algs_sha1,
2243 		.size		= ARRAY_SIZE(algs_sha1),
2244 	},
2245 };
2246 
2247 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
2248 	.alg_shift	= 7,
2249 	.algs_info	= stm32_hash_algs_info_stm32f4,
2250 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
2251 	.has_sr		= true,
2252 	.has_mdmat	= true,
2253 };
2254 
2255 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
2256 	{
2257 		.algs_list	= algs_md5,
2258 		.size		= ARRAY_SIZE(algs_md5),
2259 	},
2260 	{
2261 		.algs_list	= algs_sha1,
2262 		.size		= ARRAY_SIZE(algs_sha1),
2263 	},
2264 	{
2265 		.algs_list	= algs_sha224,
2266 		.size		= ARRAY_SIZE(algs_sha224),
2267 	},
2268 	{
2269 		.algs_list	= algs_sha256,
2270 		.size		= ARRAY_SIZE(algs_sha256),
2271 	},
2272 };
2273 
2274 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
2275 	.alg_shift	= 7,
2276 	.algs_info	= stm32_hash_algs_info_stm32f7,
2277 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
2278 	.has_sr		= true,
2279 	.has_mdmat	= true,
2280 };
2281 
2282 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
2283 	{
2284 		.algs_list	= algs_sha1,
2285 		.size		= ARRAY_SIZE(algs_sha1),
2286 	},
2287 	{
2288 		.algs_list	= algs_sha224,
2289 		.size		= ARRAY_SIZE(algs_sha224),
2290 	},
2291 	{
2292 		.algs_list	= algs_sha256,
2293 		.size		= ARRAY_SIZE(algs_sha256),
2294 	},
2295 	{
2296 		.algs_list	= algs_sha384_sha512,
2297 		.size		= ARRAY_SIZE(algs_sha384_sha512),
2298 	},
2299 	{
2300 		.algs_list	= algs_sha3,
2301 		.size		= ARRAY_SIZE(algs_sha3),
2302 	},
2303 };
2304 
2305 static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
2306 	.alg_shift	= 17,
2307 	.algs_info	= stm32_hash_algs_info_stm32mp13,
2308 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
2309 	.has_sr		= true,
2310 	.has_mdmat	= true,
2311 	.context_secured = true,
2312 };
2313 
2314 static const struct of_device_id stm32_hash_of_match[] = {
2315 	{ .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2316 	{ .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2317 	{ .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2318 	{ .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2319 	{},
2320 };
2321 
2322 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
2323 
stm32_hash_get_of_match(struct stm32_hash_dev * hdev,struct device * dev)2324 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2325 				   struct device *dev)
2326 {
2327 	hdev->pdata = of_device_get_match_data(dev);
2328 	if (!hdev->pdata) {
2329 		dev_err(dev, "no compatible OF match\n");
2330 		return -EINVAL;
2331 	}
2332 
2333 	return 0;
2334 }
2335 
stm32_hash_probe(struct platform_device * pdev)2336 static int stm32_hash_probe(struct platform_device *pdev)
2337 {
2338 	struct stm32_hash_dev *hdev;
2339 	struct device *dev = &pdev->dev;
2340 	struct resource *res;
2341 	int ret, irq;
2342 
2343 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2344 	if (!hdev)
2345 		return -ENOMEM;
2346 
2347 	hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2348 	if (IS_ERR(hdev->io_base))
2349 		return PTR_ERR(hdev->io_base);
2350 
2351 	hdev->phys_base = res->start;
2352 
2353 	ret = stm32_hash_get_of_match(hdev, dev);
2354 	if (ret)
2355 		return ret;
2356 
2357 	irq = platform_get_irq_optional(pdev, 0);
2358 	if (irq < 0 && irq != -ENXIO)
2359 		return irq;
2360 
2361 	if (irq > 0) {
2362 		ret = devm_request_threaded_irq(dev, irq,
2363 						stm32_hash_irq_handler,
2364 						stm32_hash_irq_thread,
2365 						IRQF_ONESHOT,
2366 						dev_name(dev), hdev);
2367 		if (ret) {
2368 			dev_err(dev, "Cannot grab IRQ\n");
2369 			return ret;
2370 		}
2371 	} else {
2372 		dev_info(dev, "No IRQ, use polling mode\n");
2373 		hdev->polled = true;
2374 	}
2375 
2376 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
2377 	if (IS_ERR(hdev->clk))
2378 		return dev_err_probe(dev, PTR_ERR(hdev->clk),
2379 				     "failed to get clock for hash\n");
2380 
2381 	ret = clk_prepare_enable(hdev->clk);
2382 	if (ret) {
2383 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
2384 		return ret;
2385 	}
2386 
2387 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
2388 	pm_runtime_use_autosuspend(dev);
2389 
2390 	pm_runtime_get_noresume(dev);
2391 	pm_runtime_set_active(dev);
2392 	pm_runtime_enable(dev);
2393 
2394 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2395 	if (IS_ERR(hdev->rst)) {
2396 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2397 			ret = -EPROBE_DEFER;
2398 			goto err_reset;
2399 		}
2400 	} else {
2401 		reset_control_assert(hdev->rst);
2402 		udelay(2);
2403 		reset_control_deassert(hdev->rst);
2404 	}
2405 
2406 	hdev->dev = dev;
2407 
2408 	platform_set_drvdata(pdev, hdev);
2409 
2410 	ret = stm32_hash_dma_init(hdev);
2411 	switch (ret) {
2412 	case 0:
2413 		break;
2414 	case -ENOENT:
2415 	case -ENODEV:
2416 		dev_info(dev, "DMA mode not available\n");
2417 		break;
2418 	default:
2419 		dev_err(dev, "DMA init error %d\n", ret);
2420 		goto err_dma;
2421 	}
2422 
2423 	spin_lock(&stm32_hash.lock);
2424 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
2425 	spin_unlock(&stm32_hash.lock);
2426 
2427 	/* Initialize crypto engine */
2428 	hdev->engine = crypto_engine_alloc_init(dev, 1);
2429 	if (!hdev->engine) {
2430 		ret = -ENOMEM;
2431 		goto err_engine;
2432 	}
2433 
2434 	ret = crypto_engine_start(hdev->engine);
2435 	if (ret)
2436 		goto err_engine_start;
2437 
2438 	if (hdev->pdata->ux500)
2439 		/* FIXME: implement DMA mode for Ux500 */
2440 		hdev->dma_mode = 0;
2441 	else
2442 		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2443 
2444 	/* Register algos */
2445 	ret = stm32_hash_register_algs(hdev);
2446 	if (ret)
2447 		goto err_algs;
2448 
2449 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
2450 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2451 
2452 	pm_runtime_put_sync(dev);
2453 
2454 	return 0;
2455 
2456 err_algs:
2457 err_engine_start:
2458 	crypto_engine_exit(hdev->engine);
2459 err_engine:
2460 	spin_lock(&stm32_hash.lock);
2461 	list_del(&hdev->list);
2462 	spin_unlock(&stm32_hash.lock);
2463 err_dma:
2464 	if (hdev->dma_lch)
2465 		dma_release_channel(hdev->dma_lch);
2466 err_reset:
2467 	pm_runtime_disable(dev);
2468 	pm_runtime_put_noidle(dev);
2469 
2470 	clk_disable_unprepare(hdev->clk);
2471 
2472 	return ret;
2473 }
2474 
stm32_hash_remove(struct platform_device * pdev)2475 static void stm32_hash_remove(struct platform_device *pdev)
2476 {
2477 	struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2478 	int ret;
2479 
2480 	ret = pm_runtime_get_sync(hdev->dev);
2481 
2482 	stm32_hash_unregister_algs(hdev);
2483 
2484 	crypto_engine_exit(hdev->engine);
2485 
2486 	spin_lock(&stm32_hash.lock);
2487 	list_del(&hdev->list);
2488 	spin_unlock(&stm32_hash.lock);
2489 
2490 	if (hdev->dma_lch)
2491 		dma_release_channel(hdev->dma_lch);
2492 
2493 	pm_runtime_disable(hdev->dev);
2494 	pm_runtime_put_noidle(hdev->dev);
2495 
2496 	if (ret >= 0)
2497 		clk_disable_unprepare(hdev->clk);
2498 }
2499 
2500 #ifdef CONFIG_PM
stm32_hash_runtime_suspend(struct device * dev)2501 static int stm32_hash_runtime_suspend(struct device *dev)
2502 {
2503 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2504 
2505 	clk_disable_unprepare(hdev->clk);
2506 
2507 	return 0;
2508 }
2509 
stm32_hash_runtime_resume(struct device * dev)2510 static int stm32_hash_runtime_resume(struct device *dev)
2511 {
2512 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2513 	int ret;
2514 
2515 	ret = clk_prepare_enable(hdev->clk);
2516 	if (ret) {
2517 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
2518 		return ret;
2519 	}
2520 
2521 	return 0;
2522 }
2523 #endif
2524 
2525 static const struct dev_pm_ops stm32_hash_pm_ops = {
2526 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2527 				pm_runtime_force_resume)
2528 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
2529 			   stm32_hash_runtime_resume, NULL)
2530 };
2531 
2532 static struct platform_driver stm32_hash_driver = {
2533 	.probe		= stm32_hash_probe,
2534 	.remove		= stm32_hash_remove,
2535 	.driver		= {
2536 		.name	= "stm32-hash",
2537 		.pm = &stm32_hash_pm_ops,
2538 		.of_match_table	= stm32_hash_of_match,
2539 	}
2540 };
2541 
2542 module_platform_driver(stm32_hash_driver);
2543 
2544 MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
2545 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
2546 MODULE_LICENSE("GPL v2");
2547