1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024
4  *
5  * Christian Marangi <ansuelsmth@gmail.com
6  */
7 
8 #include <crypto/sha1.h>
9 #include <crypto/sha2.h>
10 #include <crypto/md5.h>
11 #include <crypto/hmac.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 
15 #include "eip93-cipher.h"
16 #include "eip93-hash.h"
17 #include "eip93-main.h"
18 #include "eip93-common.h"
19 #include "eip93-regs.h"
20 
eip93_hash_free_data_blocks(struct ahash_request * req)21 static void eip93_hash_free_data_blocks(struct ahash_request *req)
22 {
23 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
24 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
25 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
26 	struct eip93_device *eip93 = ctx->eip93;
27 	struct mkt_hash_block *block, *tmp;
28 
29 	list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
30 		dma_unmap_single(eip93->dev, block->data_dma,
31 				 SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
32 		kfree(block);
33 	}
34 	if (!list_empty(&rctx->blocks))
35 		INIT_LIST_HEAD(&rctx->blocks);
36 
37 	if (rctx->finalize)
38 		dma_unmap_single(eip93->dev, rctx->data_dma,
39 				 rctx->data_used,
40 				 DMA_TO_DEVICE);
41 }
42 
eip93_hash_free_sa_record(struct ahash_request * req)43 static void eip93_hash_free_sa_record(struct ahash_request *req)
44 {
45 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
46 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
47 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
48 	struct eip93_device *eip93 = ctx->eip93;
49 
50 	if (IS_HMAC(ctx->flags))
51 		dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
52 				 sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
53 
54 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
55 			 sizeof(rctx->sa_record), DMA_TO_DEVICE);
56 }
57 
eip93_hash_handle_result(struct crypto_async_request * async,int err)58 void eip93_hash_handle_result(struct crypto_async_request *async, int err)
59 {
60 	struct ahash_request *req = ahash_request_cast(async);
61 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
62 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
63 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
64 	int digestsize = crypto_ahash_digestsize(ahash);
65 	struct sa_state *sa_state = &rctx->sa_state;
66 	struct eip93_device *eip93 = ctx->eip93;
67 	int i;
68 
69 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
70 			 sizeof(*sa_state), DMA_FROM_DEVICE);
71 
72 	/*
73 	 * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
74 	 * This is to handle SHA224 that have a 32 byte intermediate digest.
75 	 */
76 	if (rctx->partial_hash)
77 		digestsize = SHA256_DIGEST_SIZE;
78 
79 	if (rctx->finalize || rctx->partial_hash) {
80 		/* bytes needs to be swapped for req->result */
81 		if (!IS_HASH_MD5(ctx->flags)) {
82 			for (i = 0; i < digestsize / sizeof(u32); i++) {
83 				u32 *digest = (u32 *)sa_state->state_i_digest;
84 
85 				digest[i] = be32_to_cpu((__be32 __force)digest[i]);
86 			}
87 		}
88 
89 		memcpy(req->result, sa_state->state_i_digest, digestsize);
90 	}
91 
92 	eip93_hash_free_sa_record(req);
93 	eip93_hash_free_data_blocks(req);
94 
95 	ahash_request_complete(req, err);
96 }
97 
eip93_hash_init_sa_state_digest(u32 hash,u8 * digest)98 static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
99 {
100 	u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
101 			      SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
102 	u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
103 			      SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
104 	u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
105 	u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
106 
107 	/* Init HASH constant */
108 	switch (hash) {
109 	case EIP93_HASH_SHA256:
110 		memcpy(digest, sha256_init, sizeof(sha256_init));
111 		return;
112 	case EIP93_HASH_SHA224:
113 		memcpy(digest, sha224_init, sizeof(sha224_init));
114 		return;
115 	case EIP93_HASH_SHA1:
116 		memcpy(digest, sha1_init, sizeof(sha1_init));
117 		return;
118 	case EIP93_HASH_MD5:
119 		memcpy(digest, md5_init, sizeof(md5_init));
120 		return;
121 	default: /* Impossible */
122 		return;
123 	}
124 }
125 
eip93_hash_export_sa_state(struct ahash_request * req,struct eip93_hash_export_state * state)126 static void eip93_hash_export_sa_state(struct ahash_request *req,
127 				       struct eip93_hash_export_state *state)
128 {
129 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
130 	struct sa_state *sa_state = &rctx->sa_state;
131 
132 	/*
133 	 * EIP93 have special handling for state_byte_cnt in sa_state.
134 	 * Even if a zero packet is passed (and a BADMSG is returned),
135 	 * state_byte_cnt is incremented to the digest handled (with the hash
136 	 * primitive). This is problematic with export/import as EIP93
137 	 * expect 0 state_byte_cnt for the very first iteration.
138 	 */
139 	if (!rctx->len)
140 		memset(state->state_len, 0, sizeof(u32) * 2);
141 	else
142 		memcpy(state->state_len, sa_state->state_byte_cnt,
143 		       sizeof(u32) * 2);
144 	memcpy(state->state_hash, sa_state->state_i_digest,
145 	       SHA256_DIGEST_SIZE);
146 	state->len = rctx->len;
147 	state->data_used = rctx->data_used;
148 }
149 
__eip93_hash_init(struct ahash_request * req)150 static void __eip93_hash_init(struct ahash_request *req)
151 {
152 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
153 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
154 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
155 	struct sa_record *sa_record = &rctx->sa_record;
156 	int digestsize;
157 
158 	digestsize = crypto_ahash_digestsize(ahash);
159 
160 	eip93_set_sa_record(sa_record, 0, ctx->flags);
161 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
162 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
163 	sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
164 	sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
165 					      EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
166 	sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
167 	sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
168 					      digestsize / sizeof(u32));
169 
170 	/*
171 	 * HMAC special handling
172 	 * Enabling CMD_HMAC force the inner hash to be always finalized.
173 	 * This cause problems on handling message > 64 byte as we
174 	 * need to produce intermediate inner hash on sending intermediate
175 	 * 64 bytes blocks.
176 	 *
177 	 * To handle this, enable CMD_HMAC only on the last block.
178 	 * We make a duplicate of sa_record and on the last descriptor,
179 	 * we pass a dedicated sa_record with CMD_HMAC enabled to make
180 	 * EIP93 apply the outer hash.
181 	 */
182 	if (IS_HMAC(ctx->flags)) {
183 		struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
184 
185 		memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
186 		/* Copy pre-hashed opad for HMAC */
187 		memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
188 
189 		/* Disable HMAC for hash normal sa_record */
190 		sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
191 	}
192 
193 	rctx->len = 0;
194 	rctx->data_used = 0;
195 	rctx->partial_hash = false;
196 	rctx->finalize = false;
197 	INIT_LIST_HEAD(&rctx->blocks);
198 }
199 
eip93_send_hash_req(struct crypto_async_request * async,u8 * data,dma_addr_t * data_dma,u32 len,bool last)200 static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data,
201 			       dma_addr_t *data_dma, u32 len, bool last)
202 {
203 	struct ahash_request *req = ahash_request_cast(async);
204 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
205 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
206 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
207 	struct eip93_device *eip93 = ctx->eip93;
208 	struct eip93_descriptor cdesc = { };
209 	dma_addr_t src_addr;
210 	int ret;
211 
212 	/* Map block data to DMA */
213 	src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE);
214 	ret = dma_mapping_error(eip93->dev, src_addr);
215 	if (ret)
216 		return ret;
217 
218 	cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
219 					     EIP93_PE_CTRL_HOST_READY);
220 	cdesc.sa_addr = rctx->sa_record_base;
221 	cdesc.arc4_addr = 0;
222 
223 	cdesc.state_addr = rctx->sa_state_base;
224 	cdesc.src_addr = src_addr;
225 	cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
226 					  EIP93_PE_LENGTH_HOST_READY);
227 	cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
228 					   len);
229 
230 	cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
231 
232 	if (last) {
233 		int crypto_async_idr;
234 
235 		if (rctx->finalize && !rctx->partial_hash) {
236 			/* For last block, pass sa_record with CMD_HMAC enabled */
237 			if (IS_HMAC(ctx->flags)) {
238 				struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
239 
240 				rctx->sa_record_hmac_base = dma_map_single(eip93->dev,
241 									   sa_record_hmac,
242 									   sizeof(*sa_record_hmac),
243 									   DMA_TO_DEVICE);
244 				ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base);
245 				if (ret)
246 					return ret;
247 
248 				cdesc.sa_addr = rctx->sa_record_hmac_base;
249 			}
250 
251 			cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
252 		}
253 
254 		scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
255 			crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
256 						     EIP93_RING_NUM - 1, GFP_ATOMIC);
257 
258 		cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
259 				 FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
260 	}
261 
262 again:
263 	scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
264 		ret = eip93_put_descriptor(eip93, &cdesc);
265 	if (ret) {
266 		usleep_range(EIP93_RING_BUSY_DELAY,
267 			     EIP93_RING_BUSY_DELAY * 2);
268 		goto again;
269 	}
270 
271 	/* Writing new descriptor count starts DMA action */
272 	writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
273 
274 	*data_dma = src_addr;
275 	return 0;
276 }
277 
eip93_hash_init(struct ahash_request * req)278 static int eip93_hash_init(struct ahash_request *req)
279 {
280 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
281 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
282 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
283 	struct sa_state *sa_state = &rctx->sa_state;
284 
285 	memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2);
286 	eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK,
287 					sa_state->state_i_digest);
288 
289 	__eip93_hash_init(req);
290 
291 	/* For HMAC setup the initial block for ipad */
292 	if (IS_HMAC(ctx->flags)) {
293 		memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE);
294 
295 		rctx->data_used = SHA256_BLOCK_SIZE;
296 		rctx->len += SHA256_BLOCK_SIZE;
297 	}
298 
299 	return 0;
300 }
301 
302 /*
303  * With complete_req true, we wait for the engine to consume all the block in list,
304  * else we just queue the block to the engine as final() will wait. This is useful
305  * for finup().
306  */
__eip93_hash_update(struct ahash_request * req,bool complete_req)307 static int __eip93_hash_update(struct ahash_request *req, bool complete_req)
308 {
309 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
310 	struct crypto_async_request *async = &req->base;
311 	unsigned int read, to_consume = req->nbytes;
312 	unsigned int max_read, consumed = 0;
313 	struct mkt_hash_block *block;
314 	bool wait_req = false;
315 	int offset;
316 	int ret;
317 
318 	/* Get the offset and available space to fill req data */
319 	offset = rctx->data_used;
320 	max_read = SHA256_BLOCK_SIZE - offset;
321 
322 	/* Consume req in block of SHA256_BLOCK_SIZE.
323 	 * to_read is initially set to space available in the req data
324 	 * and then reset to SHA256_BLOCK_SIZE.
325 	 */
326 	while (to_consume > max_read) {
327 		block = kzalloc(sizeof(*block), GFP_ATOMIC);
328 		if (!block) {
329 			ret = -ENOMEM;
330 			goto free_blocks;
331 		}
332 
333 		read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
334 					  block->data + offset,
335 					  max_read, consumed);
336 
337 		/*
338 		 * For first iteration only, copy req data to block
339 		 * and reset offset and max_read for next iteration.
340 		 */
341 		if (offset > 0) {
342 			memcpy(block->data, rctx->data, offset);
343 			offset = 0;
344 			max_read = SHA256_BLOCK_SIZE;
345 		}
346 
347 		list_add(&block->list, &rctx->blocks);
348 		to_consume -= read;
349 		consumed += read;
350 	}
351 
352 	/* Write the remaining data to req data */
353 	read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
354 				  rctx->data + offset, to_consume,
355 				  consumed);
356 	rctx->data_used = offset + read;
357 
358 	/* Update counter with processed bytes */
359 	rctx->len += read + consumed;
360 
361 	/* Consume all the block added to list */
362 	list_for_each_entry_reverse(block, &rctx->blocks, list) {
363 		wait_req = complete_req &&
364 			    list_is_first(&block->list, &rctx->blocks);
365 
366 		ret = eip93_send_hash_req(async, block->data,
367 					  &block->data_dma,
368 					  SHA256_BLOCK_SIZE, wait_req);
369 		if (ret)
370 			goto free_blocks;
371 	}
372 
373 	return wait_req ? -EINPROGRESS : 0;
374 
375 free_blocks:
376 	eip93_hash_free_data_blocks(req);
377 
378 	return ret;
379 }
380 
eip93_hash_update(struct ahash_request * req)381 static int eip93_hash_update(struct ahash_request *req)
382 {
383 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
384 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
385 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
386 	struct sa_record *sa_record = &rctx->sa_record;
387 	struct sa_state *sa_state = &rctx->sa_state;
388 	struct eip93_device *eip93 = ctx->eip93;
389 	int ret;
390 
391 	if (!req->nbytes)
392 		return 0;
393 
394 	rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
395 					     sizeof(*sa_state),
396 					     DMA_TO_DEVICE);
397 	ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
398 	if (ret)
399 		return ret;
400 
401 	rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
402 					      sizeof(*sa_record),
403 					      DMA_TO_DEVICE);
404 	ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
405 	if (ret)
406 		goto free_sa_state;
407 
408 	ret = __eip93_hash_update(req, true);
409 	if (ret && ret != -EINPROGRESS)
410 		goto free_sa_record;
411 
412 	return ret;
413 
414 free_sa_record:
415 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
416 			 sizeof(*sa_record), DMA_TO_DEVICE);
417 
418 free_sa_state:
419 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
420 			 sizeof(*sa_state), DMA_TO_DEVICE);
421 
422 	return ret;
423 }
424 
425 /*
426  * With map_data true, we map the sa_record and sa_state. This is needed
427  * for finup() as the they are mapped before calling update()
428  */
__eip93_hash_final(struct ahash_request * req,bool map_dma)429 static int __eip93_hash_final(struct ahash_request *req, bool map_dma)
430 {
431 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
432 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
433 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
434 	struct crypto_async_request *async = &req->base;
435 	struct sa_record *sa_record = &rctx->sa_record;
436 	struct sa_state *sa_state = &rctx->sa_state;
437 	struct eip93_device *eip93 = ctx->eip93;
438 	int ret;
439 
440 	/* EIP93 can't handle zero bytes hash */
441 	if (!rctx->len && !IS_HMAC(ctx->flags)) {
442 		switch ((ctx->flags & EIP93_HASH_MASK)) {
443 		case EIP93_HASH_SHA256:
444 			memcpy(req->result, sha256_zero_message_hash,
445 			       SHA256_DIGEST_SIZE);
446 			break;
447 		case EIP93_HASH_SHA224:
448 			memcpy(req->result, sha224_zero_message_hash,
449 			       SHA224_DIGEST_SIZE);
450 			break;
451 		case EIP93_HASH_SHA1:
452 			memcpy(req->result, sha1_zero_message_hash,
453 			       SHA1_DIGEST_SIZE);
454 			break;
455 		case EIP93_HASH_MD5:
456 			memcpy(req->result, md5_zero_message_hash,
457 			       MD5_DIGEST_SIZE);
458 			break;
459 		default: /* Impossible */
460 			return -EINVAL;
461 		}
462 
463 		return 0;
464 	}
465 
466 	/* Signal interrupt from engine is for last block */
467 	rctx->finalize = true;
468 
469 	if (map_dma) {
470 		rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
471 						     sizeof(*sa_state),
472 						     DMA_TO_DEVICE);
473 		ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
474 		if (ret)
475 			return ret;
476 
477 		rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
478 						      sizeof(*sa_record),
479 						      DMA_TO_DEVICE);
480 		ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
481 		if (ret)
482 			goto free_sa_state;
483 	}
484 
485 	/* Send last block */
486 	ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma,
487 				  rctx->data_used, true);
488 	if (ret)
489 		goto free_blocks;
490 
491 	return -EINPROGRESS;
492 
493 free_blocks:
494 	eip93_hash_free_data_blocks(req);
495 
496 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
497 			 sizeof(*sa_record), DMA_TO_DEVICE);
498 
499 free_sa_state:
500 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
501 			 sizeof(*sa_state), DMA_TO_DEVICE);
502 
503 	return ret;
504 }
505 
eip93_hash_final(struct ahash_request * req)506 static int eip93_hash_final(struct ahash_request *req)
507 {
508 	return __eip93_hash_final(req, true);
509 }
510 
eip93_hash_finup(struct ahash_request * req)511 static int eip93_hash_finup(struct ahash_request *req)
512 {
513 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
514 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
515 	struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
516 	struct sa_record *sa_record = &rctx->sa_record;
517 	struct sa_state *sa_state = &rctx->sa_state;
518 	struct eip93_device *eip93 = ctx->eip93;
519 	int ret;
520 
521 	if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) {
522 		rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
523 						     sizeof(*sa_state),
524 						     DMA_TO_DEVICE);
525 		ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
526 		if (ret)
527 			return ret;
528 
529 		rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
530 						      sizeof(*sa_record),
531 						      DMA_TO_DEVICE);
532 		ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
533 		if (ret)
534 			goto free_sa_state;
535 
536 		ret = __eip93_hash_update(req, false);
537 		if (ret)
538 			goto free_sa_record;
539 	}
540 
541 	return __eip93_hash_final(req, false);
542 
543 free_sa_record:
544 	dma_unmap_single(eip93->dev, rctx->sa_record_base,
545 			 sizeof(*sa_record), DMA_TO_DEVICE);
546 free_sa_state:
547 	dma_unmap_single(eip93->dev, rctx->sa_state_base,
548 			 sizeof(*sa_state), DMA_TO_DEVICE);
549 
550 	return ret;
551 }
552 
eip93_hash_hmac_setkey(struct crypto_ahash * ahash,const u8 * key,u32 keylen)553 static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
554 				  u32 keylen)
555 {
556 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
557 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
558 	struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
559 
560 	return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize,
561 				 ctx->ipad, ctx->opad, true);
562 }
563 
eip93_hash_cra_init(struct crypto_tfm * tfm)564 static int eip93_hash_cra_init(struct crypto_tfm *tfm)
565 {
566 	struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
567 	struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
568 				struct eip93_alg_template, alg.ahash.halg.base);
569 
570 	crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
571 				     sizeof(struct eip93_hash_reqctx));
572 
573 	ctx->eip93 = tmpl->eip93;
574 	ctx->flags = tmpl->flags;
575 
576 	return 0;
577 }
578 
eip93_hash_digest(struct ahash_request * req)579 static int eip93_hash_digest(struct ahash_request *req)
580 {
581 	int ret;
582 
583 	ret = eip93_hash_init(req);
584 	if (ret)
585 		return ret;
586 
587 	return eip93_hash_finup(req);
588 }
589 
eip93_hash_import(struct ahash_request * req,const void * in)590 static int eip93_hash_import(struct ahash_request *req, const void *in)
591 {
592 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
593 	const struct eip93_hash_export_state *state = in;
594 	struct sa_state *sa_state = &rctx->sa_state;
595 
596 	memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2);
597 	memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE);
598 
599 	__eip93_hash_init(req);
600 
601 	rctx->len = state->len;
602 	rctx->data_used = state->data_used;
603 
604 	/* Skip copying data if we have nothing to copy */
605 	if (rctx->len)
606 		memcpy(rctx->data, state->data, rctx->data_used);
607 
608 	return 0;
609 }
610 
eip93_hash_export(struct ahash_request * req,void * out)611 static int eip93_hash_export(struct ahash_request *req, void *out)
612 {
613 	struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
614 	struct eip93_hash_export_state *state = out;
615 
616 	/* Save the first block in state data */
617 	if (rctx->len)
618 		memcpy(state->data, rctx->data, rctx->data_used);
619 
620 	eip93_hash_export_sa_state(req, state);
621 
622 	return 0;
623 }
624 
625 struct eip93_alg_template eip93_alg_md5 = {
626 	.type = EIP93_ALG_TYPE_HASH,
627 	.flags = EIP93_HASH_MD5,
628 	.alg.ahash = {
629 		.init = eip93_hash_init,
630 		.update = eip93_hash_update,
631 		.final = eip93_hash_final,
632 		.finup = eip93_hash_finup,
633 		.digest = eip93_hash_digest,
634 		.export = eip93_hash_export,
635 		.import = eip93_hash_import,
636 		.halg = {
637 			.digestsize = MD5_DIGEST_SIZE,
638 			.statesize = sizeof(struct eip93_hash_export_state),
639 			.base = {
640 				.cra_name = "md5",
641 				.cra_driver_name = "md5-eip93",
642 				.cra_priority = 300,
643 				.cra_flags = CRYPTO_ALG_ASYNC |
644 						CRYPTO_ALG_KERN_DRIVER_ONLY |
645 						CRYPTO_ALG_ALLOCATES_MEMORY,
646 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
647 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
648 				.cra_init = eip93_hash_cra_init,
649 				.cra_module = THIS_MODULE,
650 			},
651 		},
652 	},
653 };
654 
655 struct eip93_alg_template eip93_alg_sha1 = {
656 	.type = EIP93_ALG_TYPE_HASH,
657 	.flags = EIP93_HASH_SHA1,
658 	.alg.ahash = {
659 		.init = eip93_hash_init,
660 		.update = eip93_hash_update,
661 		.final = eip93_hash_final,
662 		.finup = eip93_hash_finup,
663 		.digest = eip93_hash_digest,
664 		.export = eip93_hash_export,
665 		.import = eip93_hash_import,
666 		.halg = {
667 			.digestsize = SHA1_DIGEST_SIZE,
668 			.statesize = sizeof(struct eip93_hash_export_state),
669 			.base = {
670 				.cra_name = "sha1",
671 				.cra_driver_name = "sha1-eip93",
672 				.cra_priority = 300,
673 				.cra_flags = CRYPTO_ALG_ASYNC |
674 						CRYPTO_ALG_KERN_DRIVER_ONLY |
675 						CRYPTO_ALG_ALLOCATES_MEMORY,
676 				.cra_blocksize = SHA1_BLOCK_SIZE,
677 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
678 				.cra_init = eip93_hash_cra_init,
679 				.cra_module = THIS_MODULE,
680 			},
681 		},
682 	},
683 };
684 
685 struct eip93_alg_template eip93_alg_sha224 = {
686 	.type = EIP93_ALG_TYPE_HASH,
687 	.flags = EIP93_HASH_SHA224,
688 	.alg.ahash = {
689 		.init = eip93_hash_init,
690 		.update = eip93_hash_update,
691 		.final = eip93_hash_final,
692 		.finup = eip93_hash_finup,
693 		.digest = eip93_hash_digest,
694 		.export = eip93_hash_export,
695 		.import = eip93_hash_import,
696 		.halg = {
697 			.digestsize = SHA224_DIGEST_SIZE,
698 			.statesize = sizeof(struct eip93_hash_export_state),
699 			.base = {
700 				.cra_name = "sha224",
701 				.cra_driver_name = "sha224-eip93",
702 				.cra_priority = 300,
703 				.cra_flags = CRYPTO_ALG_ASYNC |
704 						CRYPTO_ALG_KERN_DRIVER_ONLY |
705 						CRYPTO_ALG_ALLOCATES_MEMORY,
706 				.cra_blocksize = SHA224_BLOCK_SIZE,
707 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
708 				.cra_init = eip93_hash_cra_init,
709 				.cra_module = THIS_MODULE,
710 			},
711 		},
712 	},
713 };
714 
715 struct eip93_alg_template eip93_alg_sha256 = {
716 	.type = EIP93_ALG_TYPE_HASH,
717 	.flags = EIP93_HASH_SHA256,
718 	.alg.ahash = {
719 		.init = eip93_hash_init,
720 		.update = eip93_hash_update,
721 		.final = eip93_hash_final,
722 		.finup = eip93_hash_finup,
723 		.digest = eip93_hash_digest,
724 		.export = eip93_hash_export,
725 		.import = eip93_hash_import,
726 		.halg = {
727 			.digestsize = SHA256_DIGEST_SIZE,
728 			.statesize = sizeof(struct eip93_hash_export_state),
729 			.base = {
730 				.cra_name = "sha256",
731 				.cra_driver_name = "sha256-eip93",
732 				.cra_priority = 300,
733 				.cra_flags = CRYPTO_ALG_ASYNC |
734 						CRYPTO_ALG_KERN_DRIVER_ONLY |
735 						CRYPTO_ALG_ALLOCATES_MEMORY,
736 				.cra_blocksize = SHA256_BLOCK_SIZE,
737 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
738 				.cra_init = eip93_hash_cra_init,
739 				.cra_module = THIS_MODULE,
740 			},
741 		},
742 	},
743 };
744 
745 struct eip93_alg_template eip93_alg_hmac_md5 = {
746 	.type = EIP93_ALG_TYPE_HASH,
747 	.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
748 	.alg.ahash = {
749 		.init = eip93_hash_init,
750 		.update = eip93_hash_update,
751 		.final = eip93_hash_final,
752 		.finup = eip93_hash_finup,
753 		.digest = eip93_hash_digest,
754 		.setkey = eip93_hash_hmac_setkey,
755 		.export = eip93_hash_export,
756 		.import = eip93_hash_import,
757 		.halg = {
758 			.digestsize = MD5_DIGEST_SIZE,
759 			.statesize = sizeof(struct eip93_hash_export_state),
760 			.base = {
761 				.cra_name = "hmac(md5)",
762 				.cra_driver_name = "hmac(md5-eip93)",
763 				.cra_priority = 300,
764 				.cra_flags = CRYPTO_ALG_ASYNC |
765 						CRYPTO_ALG_KERN_DRIVER_ONLY |
766 						CRYPTO_ALG_ALLOCATES_MEMORY,
767 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
768 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
769 				.cra_init = eip93_hash_cra_init,
770 				.cra_module = THIS_MODULE,
771 			},
772 		},
773 	},
774 };
775 
776 struct eip93_alg_template eip93_alg_hmac_sha1 = {
777 	.type = EIP93_ALG_TYPE_HASH,
778 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
779 	.alg.ahash = {
780 		.init = eip93_hash_init,
781 		.update = eip93_hash_update,
782 		.final = eip93_hash_final,
783 		.finup = eip93_hash_finup,
784 		.digest = eip93_hash_digest,
785 		.setkey = eip93_hash_hmac_setkey,
786 		.export = eip93_hash_export,
787 		.import = eip93_hash_import,
788 		.halg = {
789 			.digestsize = SHA1_DIGEST_SIZE,
790 			.statesize = sizeof(struct eip93_hash_export_state),
791 			.base = {
792 				.cra_name = "hmac(sha1)",
793 				.cra_driver_name = "hmac(sha1-eip93)",
794 				.cra_priority = 300,
795 				.cra_flags = CRYPTO_ALG_ASYNC |
796 						CRYPTO_ALG_KERN_DRIVER_ONLY |
797 						CRYPTO_ALG_ALLOCATES_MEMORY,
798 				.cra_blocksize = SHA1_BLOCK_SIZE,
799 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
800 				.cra_init = eip93_hash_cra_init,
801 				.cra_module = THIS_MODULE,
802 			},
803 		},
804 	},
805 };
806 
807 struct eip93_alg_template eip93_alg_hmac_sha224 = {
808 	.type = EIP93_ALG_TYPE_HASH,
809 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
810 	.alg.ahash = {
811 		.init = eip93_hash_init,
812 		.update = eip93_hash_update,
813 		.final = eip93_hash_final,
814 		.finup = eip93_hash_finup,
815 		.digest = eip93_hash_digest,
816 		.setkey = eip93_hash_hmac_setkey,
817 		.export = eip93_hash_export,
818 		.import = eip93_hash_import,
819 		.halg = {
820 			.digestsize = SHA224_DIGEST_SIZE,
821 			.statesize = sizeof(struct eip93_hash_export_state),
822 			.base = {
823 				.cra_name = "hmac(sha224)",
824 				.cra_driver_name = "hmac(sha224-eip93)",
825 				.cra_priority = 300,
826 				.cra_flags = CRYPTO_ALG_ASYNC |
827 						CRYPTO_ALG_KERN_DRIVER_ONLY |
828 						CRYPTO_ALG_ALLOCATES_MEMORY,
829 				.cra_blocksize = SHA224_BLOCK_SIZE,
830 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
831 				.cra_init = eip93_hash_cra_init,
832 				.cra_module = THIS_MODULE,
833 			},
834 		},
835 	},
836 };
837 
838 struct eip93_alg_template eip93_alg_hmac_sha256 = {
839 	.type = EIP93_ALG_TYPE_HASH,
840 	.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
841 	.alg.ahash = {
842 		.init = eip93_hash_init,
843 		.update = eip93_hash_update,
844 		.final = eip93_hash_final,
845 		.finup = eip93_hash_finup,
846 		.digest = eip93_hash_digest,
847 		.setkey = eip93_hash_hmac_setkey,
848 		.export = eip93_hash_export,
849 		.import = eip93_hash_import,
850 		.halg = {
851 			.digestsize = SHA256_DIGEST_SIZE,
852 			.statesize = sizeof(struct eip93_hash_export_state),
853 			.base = {
854 				.cra_name = "hmac(sha256)",
855 				.cra_driver_name = "hmac(sha256-eip93)",
856 				.cra_priority = 300,
857 				.cra_flags = CRYPTO_ALG_ASYNC |
858 						CRYPTO_ALG_KERN_DRIVER_ONLY |
859 						CRYPTO_ALG_ALLOCATES_MEMORY,
860 				.cra_blocksize = SHA256_BLOCK_SIZE,
861 				.cra_ctxsize = sizeof(struct eip93_hash_ctx),
862 				.cra_init = eip93_hash_cra_init,
863 				.cra_module = THIS_MODULE,
864 			},
865 		},
866 	},
867 };
868